4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
66 #include <sys/timerfd.h>
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu/xattr.h"
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
80 #ifdef HAVE_SYS_KCOV_H
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
97 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
120 #include <linux/btrfs.h>
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
126 #include "linux_loop.h"
130 #include "user-internals.h"
132 #include "signal-common.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
145 #define CLONE_IO 0x80000000 /* Clone io context */
148 /* We can't directly call the host clone syscall, because this will
149 * badly confuse libc (breaking mutexes, for example). So we must
150 * divide clone flags into:
151 * * flag combinations that look like pthread_create()
152 * * flag combinations that look like fork()
153 * * flags we can implement within QEMU itself
154 * * flags we can't support and will return an error for
156 /* For thread creation, all these flags must be present; for
157 * fork, none must be present.
159 #define CLONE_THREAD_FLAGS \
160 (CLONE_VM | CLONE_FS | CLONE_FILES | \
161 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
163 /* These flags are ignored:
164 * CLONE_DETACHED is now ignored by the kernel;
165 * CLONE_IO is just an optimisation hint to the I/O scheduler
167 #define CLONE_IGNORED_FLAGS \
168 (CLONE_DETACHED | CLONE_IO)
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS \
172 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
173 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS \
177 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
178 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
180 #define CLONE_INVALID_FORK_FLAGS \
181 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
183 #define CLONE_INVALID_THREAD_FLAGS \
184 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
185 CLONE_IGNORED_FLAGS))
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188 * have almost all been allocated. We cannot support any of
189 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191 * The checks against the invalid thread masks above will catch these.
192 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196 * once. This exercises the codepaths for restart.
198 //#define DEBUG_ERESTARTSYS
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
214 #define _syscall0(type,name) \
215 static type name (void) \
217 return syscall(__NR_##name); \
220 #define _syscall1(type,name,type1,arg1) \
221 static type name (type1 arg1) \
223 return syscall(__NR_##name, arg1); \
226 #define _syscall2(type,name,type1,arg1,type2,arg2) \
227 static type name (type1 arg1,type2 arg2) \
229 return syscall(__NR_##name, arg1, arg2); \
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
233 static type name (type1 arg1,type2 arg2,type3 arg3) \
235 return syscall(__NR_##name, arg1, arg2, arg3); \
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
241 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
253 type5,arg5,type6,arg6) \
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
257 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
275 #define __NR_sys_statx __NR_statx
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid
)
294 /* For the 64-bit guest on 32-bit host case we must emulate
295 * getdents using getdents64, because otherwise the host
296 * might hand us back more dirent records than we can fit
297 * into the guest buffer after structure format conversion.
298 * Otherwise we emulate getdents with getdents if the host has it.
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
307 #if (defined(TARGET_NR_getdents) && \
308 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
314 loff_t
*, res
, uint
, wh
);
316 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
317 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
319 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group
,int,error_code
)
323 #if defined(__NR_futex)
324 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
325 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
327 #if defined(__NR_futex_time64)
328 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
329 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
331 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
332 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
333 unsigned long *, user_mask_ptr
);
334 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
335 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
336 unsigned long *, user_mask_ptr
);
337 /* sched_attr is not defined in glibc */
340 uint32_t sched_policy
;
341 uint64_t sched_flags
;
343 uint32_t sched_priority
;
344 uint64_t sched_runtime
;
345 uint64_t sched_deadline
;
346 uint64_t sched_period
;
347 uint32_t sched_util_min
;
348 uint32_t sched_util_max
;
350 #define __NR_sys_sched_getattr __NR_sched_getattr
351 _syscall4(int, sys_sched_getattr
, pid_t
, pid
, struct sched_attr
*, attr
,
352 unsigned int, size
, unsigned int, flags
);
353 #define __NR_sys_sched_setattr __NR_sched_setattr
354 _syscall3(int, sys_sched_setattr
, pid_t
, pid
, struct sched_attr
*, attr
,
355 unsigned int, flags
);
356 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
357 _syscall1(int, sys_sched_getscheduler
, pid_t
, pid
);
358 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
359 _syscall3(int, sys_sched_setscheduler
, pid_t
, pid
, int, policy
,
360 const struct sched_param
*, param
);
361 #define __NR_sys_sched_getparam __NR_sched_getparam
362 _syscall2(int, sys_sched_getparam
, pid_t
, pid
,
363 struct sched_param
*, param
);
364 #define __NR_sys_sched_setparam __NR_sched_setparam
365 _syscall2(int, sys_sched_setparam
, pid_t
, pid
,
366 const struct sched_param
*, param
);
367 #define __NR_sys_getcpu __NR_getcpu
368 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
369 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
371 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
372 struct __user_cap_data_struct
*, data
);
373 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
374 struct __user_cap_data_struct
*, data
);
375 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
376 _syscall2(int, ioprio_get
, int, which
, int, who
)
378 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
379 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
381 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
382 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
385 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
386 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
387 unsigned long, idx1
, unsigned long, idx2
)
391 * It is assumed that struct statx is architecture independent.
393 #if defined(TARGET_NR_statx) && defined(__NR_statx)
394 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
395 unsigned int, mask
, struct target_statx
*, statxbuf
)
397 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
398 _syscall2(int, membarrier
, int, cmd
, int, flags
)
401 static const bitmask_transtbl fcntl_flags_tbl
[] = {
402 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
403 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
404 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
405 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
406 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
407 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
408 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
409 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
410 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
411 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
412 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
413 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
414 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
415 #if defined(O_DIRECT)
416 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
418 #if defined(O_NOATIME)
419 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
421 #if defined(O_CLOEXEC)
422 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
425 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
427 #if defined(O_TMPFILE)
428 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
430 /* Don't terminate the list prematurely on 64-bit host+guest. */
431 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
432 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
437 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
439 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
440 #if defined(__NR_utimensat)
441 #define __NR_sys_utimensat __NR_utimensat
442 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
443 const struct timespec
*,tsp
,int,flags
)
445 static int sys_utimensat(int dirfd
, const char *pathname
,
446 const struct timespec times
[2], int flags
)
452 #endif /* TARGET_NR_utimensat */
454 #ifdef TARGET_NR_renameat2
455 #if defined(__NR_renameat2)
456 #define __NR_sys_renameat2 __NR_renameat2
457 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
458 const char *, new, unsigned int, flags
)
460 static int sys_renameat2(int oldfd
, const char *old
,
461 int newfd
, const char *new, int flags
)
464 return renameat(oldfd
, old
, newfd
, new);
470 #endif /* TARGET_NR_renameat2 */
472 #ifdef CONFIG_INOTIFY
473 #include <sys/inotify.h>
475 /* Userspace can usually survive runtime without inotify */
476 #undef TARGET_NR_inotify_init
477 #undef TARGET_NR_inotify_init1
478 #undef TARGET_NR_inotify_add_watch
479 #undef TARGET_NR_inotify_rm_watch
480 #endif /* CONFIG_INOTIFY */
482 #if defined(TARGET_NR_prlimit64)
483 #ifndef __NR_prlimit64
484 # define __NR_prlimit64 -1
486 #define __NR_sys_prlimit64 __NR_prlimit64
487 /* The glibc rlimit structure may not be that used by the underlying syscall */
488 struct host_rlimit64
{
492 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
493 const struct host_rlimit64
*, new_limit
,
494 struct host_rlimit64
*, old_limit
)
498 #if defined(TARGET_NR_timer_create)
499 /* Maximum of 32 active POSIX timers allowed at any one time. */
500 static timer_t g_posix_timers
[32] = { 0, } ;
502 static inline int next_free_host_timer(void)
505 /* FIXME: Does finding the next free slot require a lock? */
506 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
507 if (g_posix_timers
[k
] == 0) {
508 g_posix_timers
[k
] = (timer_t
) 1;
516 static inline int host_to_target_errno(int host_errno
)
518 switch (host_errno
) {
519 #define E(X) case X: return TARGET_##X;
520 #include "errnos.c.inc"
527 static inline int target_to_host_errno(int target_errno
)
529 switch (target_errno
) {
530 #define E(X) case TARGET_##X: return X;
531 #include "errnos.c.inc"
538 abi_long
get_errno(abi_long ret
)
541 return -host_to_target_errno(errno
);
546 const char *target_strerror(int err
)
548 if (err
== QEMU_ERESTARTSYS
) {
549 return "To be restarted";
551 if (err
== QEMU_ESIGRETURN
) {
552 return "Successful exit from sigreturn";
555 return strerror(target_to_host_errno(err
));
558 static int check_zeroed_user(abi_long addr
, size_t ksize
, size_t usize
)
562 if (usize
<= ksize
) {
565 for (i
= ksize
; i
< usize
; i
++) {
566 if (get_user_u8(b
, addr
+ i
)) {
567 return -TARGET_EFAULT
;
576 #define safe_syscall0(type, name) \
577 static type safe_##name(void) \
579 return safe_syscall(__NR_##name); \
582 #define safe_syscall1(type, name, type1, arg1) \
583 static type safe_##name(type1 arg1) \
585 return safe_syscall(__NR_##name, arg1); \
588 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
589 static type safe_##name(type1 arg1, type2 arg2) \
591 return safe_syscall(__NR_##name, arg1, arg2); \
594 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
595 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
597 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
600 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
602 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
604 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
607 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
608 type4, arg4, type5, arg5) \
609 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
612 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
615 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
616 type4, arg4, type5, arg5, type6, arg6) \
617 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
618 type5 arg5, type6 arg6) \
620 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
623 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
624 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
625 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
626 int, flags
, mode_t
, mode
)
627 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
628 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
629 struct rusage
*, rusage
)
631 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
632 int, options
, struct rusage
*, rusage
)
633 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
634 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
635 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
636 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
637 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
639 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
640 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
641 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
644 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
645 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
647 #if defined(__NR_futex)
648 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
649 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
651 #if defined(__NR_futex_time64)
652 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
653 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
655 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
656 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
657 safe_syscall2(int, tkill
, int, tid
, int, sig
)
658 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
659 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
660 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
661 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
662 unsigned long, pos_l
, unsigned long, pos_h
)
663 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
664 unsigned long, pos_l
, unsigned long, pos_h
)
665 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
667 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
668 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
669 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
670 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
671 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
672 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
673 safe_syscall2(int, flock
, int, fd
, int, operation
)
674 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
675 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
676 const struct timespec
*, uts
, size_t, sigsetsize
)
678 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
680 #if defined(TARGET_NR_nanosleep)
681 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
682 struct timespec
*, rem
)
684 #if defined(TARGET_NR_clock_nanosleep) || \
685 defined(TARGET_NR_clock_nanosleep_time64)
686 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
687 const struct timespec
*, req
, struct timespec
*, rem
)
691 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
694 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
695 void *, ptr
, long, fifth
)
699 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
703 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
704 long, msgtype
, int, flags
)
706 #ifdef __NR_semtimedop
707 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
708 unsigned, nsops
, const struct timespec
*, timeout
)
710 #if defined(TARGET_NR_mq_timedsend) || \
711 defined(TARGET_NR_mq_timedsend_time64)
712 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
713 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
715 #if defined(TARGET_NR_mq_timedreceive) || \
716 defined(TARGET_NR_mq_timedreceive_time64)
717 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
718 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
720 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
721 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
722 int, outfd
, loff_t
*, poutoff
, size_t, length
,
726 /* We do ioctl like this rather than via safe_syscall3 to preserve the
727 * "third argument might be integer or pointer or not present" behaviour of
730 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
731 /* Similarly for fcntl. Note that callers must always:
732 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
733 * use the flock64 struct rather than unsuffixed flock
734 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
737 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
739 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
742 static inline int host_to_target_sock_type(int host_type
)
746 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
748 target_type
= TARGET_SOCK_DGRAM
;
751 target_type
= TARGET_SOCK_STREAM
;
754 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
758 #if defined(SOCK_CLOEXEC)
759 if (host_type
& SOCK_CLOEXEC
) {
760 target_type
|= TARGET_SOCK_CLOEXEC
;
764 #if defined(SOCK_NONBLOCK)
765 if (host_type
& SOCK_NONBLOCK
) {
766 target_type
|= TARGET_SOCK_NONBLOCK
;
773 static abi_ulong target_brk
;
774 static abi_ulong target_original_brk
;
775 static abi_ulong brk_page
;
777 void target_set_brk(abi_ulong new_brk
)
779 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
780 brk_page
= HOST_PAGE_ALIGN(target_brk
);
783 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
784 #define DEBUGF_BRK(message, args...)
786 /* do_brk() must return target values and target errnos. */
787 abi_long
do_brk(abi_ulong new_brk
)
789 abi_long mapped_addr
;
790 abi_ulong new_alloc_size
;
792 /* brk pointers are always untagged */
794 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
797 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
800 if (new_brk
< target_original_brk
) {
801 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
806 /* If the new brk is less than the highest page reserved to the
807 * target heap allocation, set it and we're almost done... */
808 if (new_brk
<= brk_page
) {
809 /* Heap contents are initialized to zero, as for anonymous
811 if (new_brk
> target_brk
) {
812 memset(g2h_untagged(target_brk
), 0, new_brk
- target_brk
);
814 target_brk
= new_brk
;
815 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
819 /* We need to allocate more memory after the brk... Note that
820 * we don't use MAP_FIXED because that will map over the top of
821 * any existing mapping (like the one with the host libc or qemu
822 * itself); instead we treat "mapped but at wrong address" as
823 * a failure and unmap again.
825 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
826 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
827 PROT_READ
|PROT_WRITE
,
828 MAP_ANON
|MAP_PRIVATE
, 0, 0));
830 if (mapped_addr
== brk_page
) {
831 /* Heap contents are initialized to zero, as for anonymous
832 * mapped pages. Technically the new pages are already
833 * initialized to zero since they *are* anonymous mapped
834 * pages, however we have to take care with the contents that
835 * come from the remaining part of the previous page: it may
836 * contains garbage data due to a previous heap usage (grown
838 memset(g2h_untagged(target_brk
), 0, brk_page
- target_brk
);
840 target_brk
= new_brk
;
841 brk_page
= HOST_PAGE_ALIGN(target_brk
);
842 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
845 } else if (mapped_addr
!= -1) {
846 /* Mapped but at wrong address, meaning there wasn't actually
847 * enough space for this brk.
849 target_munmap(mapped_addr
, new_alloc_size
);
851 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
854 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
857 #if defined(TARGET_ALPHA)
858 /* We (partially) emulate OSF/1 on Alpha, which requires we
859 return a proper errno, not an unchanged brk value. */
860 return -TARGET_ENOMEM
;
862 /* For everything else, return the previous break. */
866 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
867 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
868 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
869 abi_ulong target_fds_addr
,
873 abi_ulong b
, *target_fds
;
875 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
876 if (!(target_fds
= lock_user(VERIFY_READ
,
878 sizeof(abi_ulong
) * nw
,
880 return -TARGET_EFAULT
;
884 for (i
= 0; i
< nw
; i
++) {
885 /* grab the abi_ulong */
886 __get_user(b
, &target_fds
[i
]);
887 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
888 /* check the bit inside the abi_ulong */
895 unlock_user(target_fds
, target_fds_addr
, 0);
900 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
901 abi_ulong target_fds_addr
,
904 if (target_fds_addr
) {
905 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
906 return -TARGET_EFAULT
;
914 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
920 abi_ulong
*target_fds
;
922 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
923 if (!(target_fds
= lock_user(VERIFY_WRITE
,
925 sizeof(abi_ulong
) * nw
,
927 return -TARGET_EFAULT
;
930 for (i
= 0; i
< nw
; i
++) {
932 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
933 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
936 __put_user(v
, &target_fds
[i
]);
939 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
945 #if defined(__alpha__)
951 static inline abi_long
host_to_target_clock_t(long ticks
)
953 #if HOST_HZ == TARGET_HZ
956 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
960 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
961 const struct rusage
*rusage
)
963 struct target_rusage
*target_rusage
;
965 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
966 return -TARGET_EFAULT
;
967 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
968 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
969 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
970 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
971 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
972 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
973 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
974 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
975 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
976 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
977 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
978 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
979 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
980 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
981 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
982 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
983 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
984 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
985 unlock_user_struct(target_rusage
, target_addr
, 1);
990 #ifdef TARGET_NR_setrlimit
991 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
993 abi_ulong target_rlim_swap
;
996 target_rlim_swap
= tswapal(target_rlim
);
997 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
998 return RLIM_INFINITY
;
1000 result
= target_rlim_swap
;
1001 if (target_rlim_swap
!= (rlim_t
)result
)
1002 return RLIM_INFINITY
;
1008 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1009 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1011 abi_ulong target_rlim_swap
;
1014 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1015 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1017 target_rlim_swap
= rlim
;
1018 result
= tswapal(target_rlim_swap
);
1024 static inline int target_to_host_resource(int code
)
1027 case TARGET_RLIMIT_AS
:
1029 case TARGET_RLIMIT_CORE
:
1031 case TARGET_RLIMIT_CPU
:
1033 case TARGET_RLIMIT_DATA
:
1035 case TARGET_RLIMIT_FSIZE
:
1036 return RLIMIT_FSIZE
;
1037 case TARGET_RLIMIT_LOCKS
:
1038 return RLIMIT_LOCKS
;
1039 case TARGET_RLIMIT_MEMLOCK
:
1040 return RLIMIT_MEMLOCK
;
1041 case TARGET_RLIMIT_MSGQUEUE
:
1042 return RLIMIT_MSGQUEUE
;
1043 case TARGET_RLIMIT_NICE
:
1045 case TARGET_RLIMIT_NOFILE
:
1046 return RLIMIT_NOFILE
;
1047 case TARGET_RLIMIT_NPROC
:
1048 return RLIMIT_NPROC
;
1049 case TARGET_RLIMIT_RSS
:
1051 case TARGET_RLIMIT_RTPRIO
:
1052 return RLIMIT_RTPRIO
;
1053 #ifdef RLIMIT_RTTIME
1054 case TARGET_RLIMIT_RTTIME
:
1055 return RLIMIT_RTTIME
;
1057 case TARGET_RLIMIT_SIGPENDING
:
1058 return RLIMIT_SIGPENDING
;
1059 case TARGET_RLIMIT_STACK
:
1060 return RLIMIT_STACK
;
1066 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1067 abi_ulong target_tv_addr
)
1069 struct target_timeval
*target_tv
;
1071 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1072 return -TARGET_EFAULT
;
1075 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1076 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1078 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1083 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1084 const struct timeval
*tv
)
1086 struct target_timeval
*target_tv
;
1088 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1089 return -TARGET_EFAULT
;
1092 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1093 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1095 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1100 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1101 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1102 abi_ulong target_tv_addr
)
1104 struct target__kernel_sock_timeval
*target_tv
;
1106 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1107 return -TARGET_EFAULT
;
1110 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1111 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1113 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1119 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1120 const struct timeval
*tv
)
1122 struct target__kernel_sock_timeval
*target_tv
;
1124 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1125 return -TARGET_EFAULT
;
1128 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1129 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1131 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1136 #if defined(TARGET_NR_futex) || \
1137 defined(TARGET_NR_rt_sigtimedwait) || \
1138 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1139 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1140 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1141 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1142 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1143 defined(TARGET_NR_timer_settime) || \
1144 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1145 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1146 abi_ulong target_addr
)
1148 struct target_timespec
*target_ts
;
1150 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1151 return -TARGET_EFAULT
;
1153 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1154 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1155 unlock_user_struct(target_ts
, target_addr
, 0);
1160 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1161 defined(TARGET_NR_timer_settime64) || \
1162 defined(TARGET_NR_mq_timedsend_time64) || \
1163 defined(TARGET_NR_mq_timedreceive_time64) || \
1164 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1165 defined(TARGET_NR_clock_nanosleep_time64) || \
1166 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1167 defined(TARGET_NR_utimensat) || \
1168 defined(TARGET_NR_utimensat_time64) || \
1169 defined(TARGET_NR_semtimedop_time64) || \
1170 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1171 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1172 abi_ulong target_addr
)
1174 struct target__kernel_timespec
*target_ts
;
1176 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1177 return -TARGET_EFAULT
;
1179 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1180 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1181 /* in 32bit mode, this drops the padding */
1182 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1183 unlock_user_struct(target_ts
, target_addr
, 0);
1188 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1189 struct timespec
*host_ts
)
1191 struct target_timespec
*target_ts
;
1193 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1194 return -TARGET_EFAULT
;
1196 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1197 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1198 unlock_user_struct(target_ts
, target_addr
, 1);
1202 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1203 struct timespec
*host_ts
)
1205 struct target__kernel_timespec
*target_ts
;
1207 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1208 return -TARGET_EFAULT
;
1210 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1211 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1212 unlock_user_struct(target_ts
, target_addr
, 1);
1216 #if defined(TARGET_NR_gettimeofday)
1217 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1218 struct timezone
*tz
)
1220 struct target_timezone
*target_tz
;
1222 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1223 return -TARGET_EFAULT
;
1226 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1227 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1229 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1235 #if defined(TARGET_NR_settimeofday)
1236 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1237 abi_ulong target_tz_addr
)
1239 struct target_timezone
*target_tz
;
1241 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1242 return -TARGET_EFAULT
;
1245 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1246 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1248 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1254 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1257 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1258 abi_ulong target_mq_attr_addr
)
1260 struct target_mq_attr
*target_mq_attr
;
1262 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1263 target_mq_attr_addr
, 1))
1264 return -TARGET_EFAULT
;
1266 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1267 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1268 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1269 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1271 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1276 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1277 const struct mq_attr
*attr
)
1279 struct target_mq_attr
*target_mq_attr
;
1281 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1282 target_mq_attr_addr
, 0))
1283 return -TARGET_EFAULT
;
1285 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1286 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1287 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1288 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1290 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1296 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1297 /* do_select() must return target values and target errnos. */
1298 static abi_long
do_select(int n
,
1299 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1300 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1302 fd_set rfds
, wfds
, efds
;
1303 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1305 struct timespec ts
, *ts_ptr
;
1308 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1312 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1316 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1321 if (target_tv_addr
) {
1322 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1323 return -TARGET_EFAULT
;
1324 ts
.tv_sec
= tv
.tv_sec
;
1325 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1331 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1334 if (!is_error(ret
)) {
1335 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1336 return -TARGET_EFAULT
;
1337 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1338 return -TARGET_EFAULT
;
1339 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1340 return -TARGET_EFAULT
;
1342 if (target_tv_addr
) {
1343 tv
.tv_sec
= ts
.tv_sec
;
1344 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1345 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1346 return -TARGET_EFAULT
;
1354 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1355 static abi_long
do_old_select(abi_ulong arg1
)
1357 struct target_sel_arg_struct
*sel
;
1358 abi_ulong inp
, outp
, exp
, tvp
;
1361 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1362 return -TARGET_EFAULT
;
1365 nsel
= tswapal(sel
->n
);
1366 inp
= tswapal(sel
->inp
);
1367 outp
= tswapal(sel
->outp
);
1368 exp
= tswapal(sel
->exp
);
1369 tvp
= tswapal(sel
->tvp
);
1371 unlock_user_struct(sel
, arg1
, 0);
1373 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1378 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1379 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1380 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1383 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1384 fd_set rfds
, wfds
, efds
;
1385 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1386 struct timespec ts
, *ts_ptr
;
1390 * The 6th arg is actually two args smashed together,
1391 * so we cannot use the C library.
1398 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1406 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1410 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1414 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1420 * This takes a timespec, and not a timeval, so we cannot
1421 * use the do_select() helper ...
1425 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1426 return -TARGET_EFAULT
;
1429 if (target_to_host_timespec(&ts
, ts_addr
)) {
1430 return -TARGET_EFAULT
;
1438 /* Extract the two packed args for the sigset */
1441 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1443 return -TARGET_EFAULT
;
1445 arg_sigset
= tswapal(arg7
[0]);
1446 arg_sigsize
= tswapal(arg7
[1]);
1447 unlock_user(arg7
, arg6
, 0);
1450 ret
= process_sigsuspend_mask(&sig
.set
, arg_sigset
, arg_sigsize
);
1455 sig
.size
= SIGSET_T_SIZE
;
1459 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1463 finish_sigsuspend_mask(ret
);
1466 if (!is_error(ret
)) {
1467 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1468 return -TARGET_EFAULT
;
1470 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1471 return -TARGET_EFAULT
;
1473 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1474 return -TARGET_EFAULT
;
1477 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1478 return -TARGET_EFAULT
;
1481 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1482 return -TARGET_EFAULT
;
1490 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1491 defined(TARGET_NR_ppoll_time64)
1492 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1493 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1495 struct target_pollfd
*target_pfd
;
1496 unsigned int nfds
= arg2
;
1504 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1505 return -TARGET_EINVAL
;
1507 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1508 sizeof(struct target_pollfd
) * nfds
, 1);
1510 return -TARGET_EFAULT
;
1513 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1514 for (i
= 0; i
< nfds
; i
++) {
1515 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1516 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1520 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1521 sigset_t
*set
= NULL
;
1525 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1526 unlock_user(target_pfd
, arg1
, 0);
1527 return -TARGET_EFAULT
;
1530 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1531 unlock_user(target_pfd
, arg1
, 0);
1532 return -TARGET_EFAULT
;
1540 ret
= process_sigsuspend_mask(&set
, arg4
, arg5
);
1542 unlock_user(target_pfd
, arg1
, 0);
1547 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1548 set
, SIGSET_T_SIZE
));
1551 finish_sigsuspend_mask(ret
);
1553 if (!is_error(ret
) && arg3
) {
1555 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1556 return -TARGET_EFAULT
;
1559 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1560 return -TARGET_EFAULT
;
1565 struct timespec ts
, *pts
;
1568 /* Convert ms to secs, ns */
1569 ts
.tv_sec
= arg3
/ 1000;
1570 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1573 /* -ve poll() timeout means "infinite" */
1576 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1579 if (!is_error(ret
)) {
1580 for (i
= 0; i
< nfds
; i
++) {
1581 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1584 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1589 static abi_long
do_pipe(CPUArchState
*cpu_env
, abi_ulong pipedes
,
1590 int flags
, int is_pipe2
)
1594 ret
= pipe2(host_pipe
, flags
);
1597 return get_errno(ret
);
1599 /* Several targets have special calling conventions for the original
1600 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1602 #if defined(TARGET_ALPHA)
1603 cpu_env
->ir
[IR_A4
] = host_pipe
[1];
1604 return host_pipe
[0];
1605 #elif defined(TARGET_MIPS)
1606 cpu_env
->active_tc
.gpr
[3] = host_pipe
[1];
1607 return host_pipe
[0];
1608 #elif defined(TARGET_SH4)
1609 cpu_env
->gregs
[1] = host_pipe
[1];
1610 return host_pipe
[0];
1611 #elif defined(TARGET_SPARC)
1612 cpu_env
->regwptr
[1] = host_pipe
[1];
1613 return host_pipe
[0];
1617 if (put_user_s32(host_pipe
[0], pipedes
)
1618 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(abi_int
)))
1619 return -TARGET_EFAULT
;
1620 return get_errno(ret
);
1623 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1624 abi_ulong target_addr
,
1627 struct target_ip_mreqn
*target_smreqn
;
1629 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1631 return -TARGET_EFAULT
;
1632 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1633 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1634 if (len
== sizeof(struct target_ip_mreqn
))
1635 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1636 unlock_user(target_smreqn
, target_addr
, 0);
1641 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1642 abi_ulong target_addr
,
1645 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1646 sa_family_t sa_family
;
1647 struct target_sockaddr
*target_saddr
;
1649 if (fd_trans_target_to_host_addr(fd
)) {
1650 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1653 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1655 return -TARGET_EFAULT
;
1657 sa_family
= tswap16(target_saddr
->sa_family
);
1659 /* Oops. The caller might send a incomplete sun_path; sun_path
1660 * must be terminated by \0 (see the manual page), but
1661 * unfortunately it is quite common to specify sockaddr_un
1662 * length as "strlen(x->sun_path)" while it should be
1663 * "strlen(...) + 1". We'll fix that here if needed.
1664 * Linux kernel has a similar feature.
1667 if (sa_family
== AF_UNIX
) {
1668 if (len
< unix_maxlen
&& len
> 0) {
1669 char *cp
= (char*)target_saddr
;
1671 if ( cp
[len
-1] && !cp
[len
] )
1674 if (len
> unix_maxlen
)
1678 memcpy(addr
, target_saddr
, len
);
1679 addr
->sa_family
= sa_family
;
1680 if (sa_family
== AF_NETLINK
) {
1681 struct sockaddr_nl
*nladdr
;
1683 nladdr
= (struct sockaddr_nl
*)addr
;
1684 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1685 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1686 } else if (sa_family
== AF_PACKET
) {
1687 struct target_sockaddr_ll
*lladdr
;
1689 lladdr
= (struct target_sockaddr_ll
*)addr
;
1690 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1691 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1693 unlock_user(target_saddr
, target_addr
, 0);
1698 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1699 struct sockaddr
*addr
,
1702 struct target_sockaddr
*target_saddr
;
1709 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1711 return -TARGET_EFAULT
;
1712 memcpy(target_saddr
, addr
, len
);
1713 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1714 sizeof(target_saddr
->sa_family
)) {
1715 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1717 if (addr
->sa_family
== AF_NETLINK
&&
1718 len
>= sizeof(struct target_sockaddr_nl
)) {
1719 struct target_sockaddr_nl
*target_nl
=
1720 (struct target_sockaddr_nl
*)target_saddr
;
1721 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1722 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1723 } else if (addr
->sa_family
== AF_PACKET
) {
1724 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1725 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1726 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1727 } else if (addr
->sa_family
== AF_INET6
&&
1728 len
>= sizeof(struct target_sockaddr_in6
)) {
1729 struct target_sockaddr_in6
*target_in6
=
1730 (struct target_sockaddr_in6
*)target_saddr
;
1731 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1733 unlock_user(target_saddr
, target_addr
, len
);
1738 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1739 struct target_msghdr
*target_msgh
)
1741 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1742 abi_long msg_controllen
;
1743 abi_ulong target_cmsg_addr
;
1744 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1745 socklen_t space
= 0;
1747 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1748 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1750 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1751 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1752 target_cmsg_start
= target_cmsg
;
1754 return -TARGET_EFAULT
;
1756 while (cmsg
&& target_cmsg
) {
1757 void *data
= CMSG_DATA(cmsg
);
1758 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1760 int len
= tswapal(target_cmsg
->cmsg_len
)
1761 - sizeof(struct target_cmsghdr
);
1763 space
+= CMSG_SPACE(len
);
1764 if (space
> msgh
->msg_controllen
) {
1765 space
-= CMSG_SPACE(len
);
1766 /* This is a QEMU bug, since we allocated the payload
1767 * area ourselves (unlike overflow in host-to-target
1768 * conversion, which is just the guest giving us a buffer
1769 * that's too small). It can't happen for the payload types
1770 * we currently support; if it becomes an issue in future
1771 * we would need to improve our allocation strategy to
1772 * something more intelligent than "twice the size of the
1773 * target buffer we're reading from".
1775 qemu_log_mask(LOG_UNIMP
,
1776 ("Unsupported ancillary data %d/%d: "
1777 "unhandled msg size\n"),
1778 tswap32(target_cmsg
->cmsg_level
),
1779 tswap32(target_cmsg
->cmsg_type
));
1783 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1784 cmsg
->cmsg_level
= SOL_SOCKET
;
1786 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1788 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1789 cmsg
->cmsg_len
= CMSG_LEN(len
);
1791 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1792 int *fd
= (int *)data
;
1793 int *target_fd
= (int *)target_data
;
1794 int i
, numfds
= len
/ sizeof(int);
1796 for (i
= 0; i
< numfds
; i
++) {
1797 __get_user(fd
[i
], target_fd
+ i
);
1799 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1800 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1801 struct ucred
*cred
= (struct ucred
*)data
;
1802 struct target_ucred
*target_cred
=
1803 (struct target_ucred
*)target_data
;
1805 __get_user(cred
->pid
, &target_cred
->pid
);
1806 __get_user(cred
->uid
, &target_cred
->uid
);
1807 __get_user(cred
->gid
, &target_cred
->gid
);
1809 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1810 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1811 memcpy(data
, target_data
, len
);
1814 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1815 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1818 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1820 msgh
->msg_controllen
= space
;
1824 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1825 struct msghdr
*msgh
)
1827 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1828 abi_long msg_controllen
;
1829 abi_ulong target_cmsg_addr
;
1830 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1831 socklen_t space
= 0;
1833 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1834 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1836 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1837 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1838 target_cmsg_start
= target_cmsg
;
1840 return -TARGET_EFAULT
;
1842 while (cmsg
&& target_cmsg
) {
1843 void *data
= CMSG_DATA(cmsg
);
1844 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1846 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1847 int tgt_len
, tgt_space
;
1849 /* We never copy a half-header but may copy half-data;
1850 * this is Linux's behaviour in put_cmsg(). Note that
1851 * truncation here is a guest problem (which we report
1852 * to the guest via the CTRUNC bit), unlike truncation
1853 * in target_to_host_cmsg, which is a QEMU bug.
1855 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1856 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1860 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1861 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1863 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1865 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1867 /* Payload types which need a different size of payload on
1868 * the target must adjust tgt_len here.
1871 switch (cmsg
->cmsg_level
) {
1873 switch (cmsg
->cmsg_type
) {
1875 tgt_len
= sizeof(struct target_timeval
);
1885 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1886 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1887 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1890 /* We must now copy-and-convert len bytes of payload
1891 * into tgt_len bytes of destination space. Bear in mind
1892 * that in both source and destination we may be dealing
1893 * with a truncated value!
1895 switch (cmsg
->cmsg_level
) {
1897 switch (cmsg
->cmsg_type
) {
1900 int *fd
= (int *)data
;
1901 int *target_fd
= (int *)target_data
;
1902 int i
, numfds
= tgt_len
/ sizeof(int);
1904 for (i
= 0; i
< numfds
; i
++) {
1905 __put_user(fd
[i
], target_fd
+ i
);
1911 struct timeval
*tv
= (struct timeval
*)data
;
1912 struct target_timeval
*target_tv
=
1913 (struct target_timeval
*)target_data
;
1915 if (len
!= sizeof(struct timeval
) ||
1916 tgt_len
!= sizeof(struct target_timeval
)) {
1920 /* copy struct timeval to target */
1921 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1922 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1925 case SCM_CREDENTIALS
:
1927 struct ucred
*cred
= (struct ucred
*)data
;
1928 struct target_ucred
*target_cred
=
1929 (struct target_ucred
*)target_data
;
1931 __put_user(cred
->pid
, &target_cred
->pid
);
1932 __put_user(cred
->uid
, &target_cred
->uid
);
1933 __put_user(cred
->gid
, &target_cred
->gid
);
1942 switch (cmsg
->cmsg_type
) {
1945 uint32_t *v
= (uint32_t *)data
;
1946 uint32_t *t_int
= (uint32_t *)target_data
;
1948 if (len
!= sizeof(uint32_t) ||
1949 tgt_len
!= sizeof(uint32_t)) {
1952 __put_user(*v
, t_int
);
1958 struct sock_extended_err ee
;
1959 struct sockaddr_in offender
;
1961 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1962 struct errhdr_t
*target_errh
=
1963 (struct errhdr_t
*)target_data
;
1965 if (len
!= sizeof(struct errhdr_t
) ||
1966 tgt_len
!= sizeof(struct errhdr_t
)) {
1969 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1970 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1971 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1972 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1973 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1974 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1975 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1976 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1977 (void *) &errh
->offender
, sizeof(errh
->offender
));
1986 switch (cmsg
->cmsg_type
) {
1989 uint32_t *v
= (uint32_t *)data
;
1990 uint32_t *t_int
= (uint32_t *)target_data
;
1992 if (len
!= sizeof(uint32_t) ||
1993 tgt_len
!= sizeof(uint32_t)) {
1996 __put_user(*v
, t_int
);
2002 struct sock_extended_err ee
;
2003 struct sockaddr_in6 offender
;
2005 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2006 struct errhdr6_t
*target_errh
=
2007 (struct errhdr6_t
*)target_data
;
2009 if (len
!= sizeof(struct errhdr6_t
) ||
2010 tgt_len
!= sizeof(struct errhdr6_t
)) {
2013 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2014 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2015 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2016 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2017 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2018 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2019 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2020 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2021 (void *) &errh
->offender
, sizeof(errh
->offender
));
2031 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2032 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2033 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2034 if (tgt_len
> len
) {
2035 memset(target_data
+ len
, 0, tgt_len
- len
);
2039 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2040 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2041 if (msg_controllen
< tgt_space
) {
2042 tgt_space
= msg_controllen
;
2044 msg_controllen
-= tgt_space
;
2046 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2047 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2050 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2052 target_msgh
->msg_controllen
= tswapal(space
);
2056 /* do_setsockopt() Must return target values and target errnos. */
2057 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2058 abi_ulong optval_addr
, socklen_t optlen
)
2062 struct ip_mreqn
*ip_mreq
;
2063 struct ip_mreq_source
*ip_mreq_source
;
2068 /* TCP and UDP options all take an 'int' value. */
2069 if (optlen
< sizeof(uint32_t))
2070 return -TARGET_EINVAL
;
2072 if (get_user_u32(val
, optval_addr
))
2073 return -TARGET_EFAULT
;
2074 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2081 case IP_ROUTER_ALERT
:
2085 case IP_MTU_DISCOVER
:
2092 case IP_MULTICAST_TTL
:
2093 case IP_MULTICAST_LOOP
:
2095 if (optlen
>= sizeof(uint32_t)) {
2096 if (get_user_u32(val
, optval_addr
))
2097 return -TARGET_EFAULT
;
2098 } else if (optlen
>= 1) {
2099 if (get_user_u8(val
, optval_addr
))
2100 return -TARGET_EFAULT
;
2102 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2104 case IP_ADD_MEMBERSHIP
:
2105 case IP_DROP_MEMBERSHIP
:
2106 if (optlen
< sizeof (struct target_ip_mreq
) ||
2107 optlen
> sizeof (struct target_ip_mreqn
))
2108 return -TARGET_EINVAL
;
2110 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2111 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2112 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2115 case IP_BLOCK_SOURCE
:
2116 case IP_UNBLOCK_SOURCE
:
2117 case IP_ADD_SOURCE_MEMBERSHIP
:
2118 case IP_DROP_SOURCE_MEMBERSHIP
:
2119 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2120 return -TARGET_EINVAL
;
2122 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2123 if (!ip_mreq_source
) {
2124 return -TARGET_EFAULT
;
2126 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2127 unlock_user (ip_mreq_source
, optval_addr
, 0);
2136 case IPV6_MTU_DISCOVER
:
2139 case IPV6_RECVPKTINFO
:
2140 case IPV6_UNICAST_HOPS
:
2141 case IPV6_MULTICAST_HOPS
:
2142 case IPV6_MULTICAST_LOOP
:
2144 case IPV6_RECVHOPLIMIT
:
2145 case IPV6_2292HOPLIMIT
:
2148 case IPV6_2292PKTINFO
:
2149 case IPV6_RECVTCLASS
:
2150 case IPV6_RECVRTHDR
:
2151 case IPV6_2292RTHDR
:
2152 case IPV6_RECVHOPOPTS
:
2153 case IPV6_2292HOPOPTS
:
2154 case IPV6_RECVDSTOPTS
:
2155 case IPV6_2292DSTOPTS
:
2157 case IPV6_ADDR_PREFERENCES
:
2158 #ifdef IPV6_RECVPATHMTU
2159 case IPV6_RECVPATHMTU
:
2161 #ifdef IPV6_TRANSPARENT
2162 case IPV6_TRANSPARENT
:
2164 #ifdef IPV6_FREEBIND
2167 #ifdef IPV6_RECVORIGDSTADDR
2168 case IPV6_RECVORIGDSTADDR
:
2171 if (optlen
< sizeof(uint32_t)) {
2172 return -TARGET_EINVAL
;
2174 if (get_user_u32(val
, optval_addr
)) {
2175 return -TARGET_EFAULT
;
2177 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2178 &val
, sizeof(val
)));
2182 struct in6_pktinfo pki
;
2184 if (optlen
< sizeof(pki
)) {
2185 return -TARGET_EINVAL
;
2188 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2189 return -TARGET_EFAULT
;
2192 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2194 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2195 &pki
, sizeof(pki
)));
2198 case IPV6_ADD_MEMBERSHIP
:
2199 case IPV6_DROP_MEMBERSHIP
:
2201 struct ipv6_mreq ipv6mreq
;
2203 if (optlen
< sizeof(ipv6mreq
)) {
2204 return -TARGET_EINVAL
;
2207 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2208 return -TARGET_EFAULT
;
2211 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2213 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2214 &ipv6mreq
, sizeof(ipv6mreq
)));
2225 struct icmp6_filter icmp6f
;
2227 if (optlen
> sizeof(icmp6f
)) {
2228 optlen
= sizeof(icmp6f
);
2231 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2232 return -TARGET_EFAULT
;
2235 for (val
= 0; val
< 8; val
++) {
2236 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2239 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2251 /* those take an u32 value */
2252 if (optlen
< sizeof(uint32_t)) {
2253 return -TARGET_EINVAL
;
2256 if (get_user_u32(val
, optval_addr
)) {
2257 return -TARGET_EFAULT
;
2259 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2260 &val
, sizeof(val
)));
2267 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2272 char *alg_key
= g_malloc(optlen
);
2275 return -TARGET_ENOMEM
;
2277 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2279 return -TARGET_EFAULT
;
2281 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2286 case ALG_SET_AEAD_AUTHSIZE
:
2288 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2297 case TARGET_SOL_SOCKET
:
2299 case TARGET_SO_RCVTIMEO
:
2303 optname
= SO_RCVTIMEO
;
2306 if (optlen
!= sizeof(struct target_timeval
)) {
2307 return -TARGET_EINVAL
;
2310 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2311 return -TARGET_EFAULT
;
2314 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2318 case TARGET_SO_SNDTIMEO
:
2319 optname
= SO_SNDTIMEO
;
2321 case TARGET_SO_ATTACH_FILTER
:
2323 struct target_sock_fprog
*tfprog
;
2324 struct target_sock_filter
*tfilter
;
2325 struct sock_fprog fprog
;
2326 struct sock_filter
*filter
;
2329 if (optlen
!= sizeof(*tfprog
)) {
2330 return -TARGET_EINVAL
;
2332 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2333 return -TARGET_EFAULT
;
2335 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2336 tswapal(tfprog
->filter
), 0)) {
2337 unlock_user_struct(tfprog
, optval_addr
, 1);
2338 return -TARGET_EFAULT
;
2341 fprog
.len
= tswap16(tfprog
->len
);
2342 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2343 if (filter
== NULL
) {
2344 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2345 unlock_user_struct(tfprog
, optval_addr
, 1);
2346 return -TARGET_ENOMEM
;
2348 for (i
= 0; i
< fprog
.len
; i
++) {
2349 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2350 filter
[i
].jt
= tfilter
[i
].jt
;
2351 filter
[i
].jf
= tfilter
[i
].jf
;
2352 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2354 fprog
.filter
= filter
;
2356 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2357 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2360 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2361 unlock_user_struct(tfprog
, optval_addr
, 1);
2364 case TARGET_SO_BINDTODEVICE
:
2366 char *dev_ifname
, *addr_ifname
;
2368 if (optlen
> IFNAMSIZ
- 1) {
2369 optlen
= IFNAMSIZ
- 1;
2371 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2373 return -TARGET_EFAULT
;
2375 optname
= SO_BINDTODEVICE
;
2376 addr_ifname
= alloca(IFNAMSIZ
);
2377 memcpy(addr_ifname
, dev_ifname
, optlen
);
2378 addr_ifname
[optlen
] = 0;
2379 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2380 addr_ifname
, optlen
));
2381 unlock_user (dev_ifname
, optval_addr
, 0);
2384 case TARGET_SO_LINGER
:
2387 struct target_linger
*tlg
;
2389 if (optlen
!= sizeof(struct target_linger
)) {
2390 return -TARGET_EINVAL
;
2392 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2393 return -TARGET_EFAULT
;
2395 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2396 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2397 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2399 unlock_user_struct(tlg
, optval_addr
, 0);
2402 /* Options with 'int' argument. */
2403 case TARGET_SO_DEBUG
:
2406 case TARGET_SO_REUSEADDR
:
2407 optname
= SO_REUSEADDR
;
2410 case TARGET_SO_REUSEPORT
:
2411 optname
= SO_REUSEPORT
;
2414 case TARGET_SO_TYPE
:
2417 case TARGET_SO_ERROR
:
2420 case TARGET_SO_DONTROUTE
:
2421 optname
= SO_DONTROUTE
;
2423 case TARGET_SO_BROADCAST
:
2424 optname
= SO_BROADCAST
;
2426 case TARGET_SO_SNDBUF
:
2427 optname
= SO_SNDBUF
;
2429 case TARGET_SO_SNDBUFFORCE
:
2430 optname
= SO_SNDBUFFORCE
;
2432 case TARGET_SO_RCVBUF
:
2433 optname
= SO_RCVBUF
;
2435 case TARGET_SO_RCVBUFFORCE
:
2436 optname
= SO_RCVBUFFORCE
;
2438 case TARGET_SO_KEEPALIVE
:
2439 optname
= SO_KEEPALIVE
;
2441 case TARGET_SO_OOBINLINE
:
2442 optname
= SO_OOBINLINE
;
2444 case TARGET_SO_NO_CHECK
:
2445 optname
= SO_NO_CHECK
;
2447 case TARGET_SO_PRIORITY
:
2448 optname
= SO_PRIORITY
;
2451 case TARGET_SO_BSDCOMPAT
:
2452 optname
= SO_BSDCOMPAT
;
2455 case TARGET_SO_PASSCRED
:
2456 optname
= SO_PASSCRED
;
2458 case TARGET_SO_PASSSEC
:
2459 optname
= SO_PASSSEC
;
2461 case TARGET_SO_TIMESTAMP
:
2462 optname
= SO_TIMESTAMP
;
2464 case TARGET_SO_RCVLOWAT
:
2465 optname
= SO_RCVLOWAT
;
2470 if (optlen
< sizeof(uint32_t))
2471 return -TARGET_EINVAL
;
2473 if (get_user_u32(val
, optval_addr
))
2474 return -TARGET_EFAULT
;
2475 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2480 case NETLINK_PKTINFO
:
2481 case NETLINK_ADD_MEMBERSHIP
:
2482 case NETLINK_DROP_MEMBERSHIP
:
2483 case NETLINK_BROADCAST_ERROR
:
2484 case NETLINK_NO_ENOBUFS
:
2485 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2486 case NETLINK_LISTEN_ALL_NSID
:
2487 case NETLINK_CAP_ACK
:
2488 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2489 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2490 case NETLINK_EXT_ACK
:
2491 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2492 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2493 case NETLINK_GET_STRICT_CHK
:
2494 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2500 if (optlen
< sizeof(uint32_t)) {
2501 return -TARGET_EINVAL
;
2503 if (get_user_u32(val
, optval_addr
)) {
2504 return -TARGET_EFAULT
;
2506 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2509 #endif /* SOL_NETLINK */
2512 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2514 ret
= -TARGET_ENOPROTOOPT
;
2519 /* do_getsockopt() Must return target values and target errnos. */
2520 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2521 abi_ulong optval_addr
, abi_ulong optlen
)
2528 case TARGET_SOL_SOCKET
:
2531 /* These don't just return a single integer */
2532 case TARGET_SO_PEERNAME
:
2534 case TARGET_SO_RCVTIMEO
: {
2538 optname
= SO_RCVTIMEO
;
2541 if (get_user_u32(len
, optlen
)) {
2542 return -TARGET_EFAULT
;
2545 return -TARGET_EINVAL
;
2549 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2554 if (len
> sizeof(struct target_timeval
)) {
2555 len
= sizeof(struct target_timeval
);
2557 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2558 return -TARGET_EFAULT
;
2560 if (put_user_u32(len
, optlen
)) {
2561 return -TARGET_EFAULT
;
2565 case TARGET_SO_SNDTIMEO
:
2566 optname
= SO_SNDTIMEO
;
2568 case TARGET_SO_PEERCRED
: {
2571 struct target_ucred
*tcr
;
2573 if (get_user_u32(len
, optlen
)) {
2574 return -TARGET_EFAULT
;
2577 return -TARGET_EINVAL
;
2581 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2589 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2590 return -TARGET_EFAULT
;
2592 __put_user(cr
.pid
, &tcr
->pid
);
2593 __put_user(cr
.uid
, &tcr
->uid
);
2594 __put_user(cr
.gid
, &tcr
->gid
);
2595 unlock_user_struct(tcr
, optval_addr
, 1);
2596 if (put_user_u32(len
, optlen
)) {
2597 return -TARGET_EFAULT
;
2601 case TARGET_SO_PEERSEC
: {
2604 if (get_user_u32(len
, optlen
)) {
2605 return -TARGET_EFAULT
;
2608 return -TARGET_EINVAL
;
2610 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2612 return -TARGET_EFAULT
;
2615 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2617 if (put_user_u32(lv
, optlen
)) {
2618 ret
= -TARGET_EFAULT
;
2620 unlock_user(name
, optval_addr
, lv
);
2623 case TARGET_SO_LINGER
:
2627 struct target_linger
*tlg
;
2629 if (get_user_u32(len
, optlen
)) {
2630 return -TARGET_EFAULT
;
2633 return -TARGET_EINVAL
;
2637 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2645 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2646 return -TARGET_EFAULT
;
2648 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2649 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2650 unlock_user_struct(tlg
, optval_addr
, 1);
2651 if (put_user_u32(len
, optlen
)) {
2652 return -TARGET_EFAULT
;
2656 /* Options with 'int' argument. */
2657 case TARGET_SO_DEBUG
:
2660 case TARGET_SO_REUSEADDR
:
2661 optname
= SO_REUSEADDR
;
2664 case TARGET_SO_REUSEPORT
:
2665 optname
= SO_REUSEPORT
;
2668 case TARGET_SO_TYPE
:
2671 case TARGET_SO_ERROR
:
2674 case TARGET_SO_DONTROUTE
:
2675 optname
= SO_DONTROUTE
;
2677 case TARGET_SO_BROADCAST
:
2678 optname
= SO_BROADCAST
;
2680 case TARGET_SO_SNDBUF
:
2681 optname
= SO_SNDBUF
;
2683 case TARGET_SO_RCVBUF
:
2684 optname
= SO_RCVBUF
;
2686 case TARGET_SO_KEEPALIVE
:
2687 optname
= SO_KEEPALIVE
;
2689 case TARGET_SO_OOBINLINE
:
2690 optname
= SO_OOBINLINE
;
2692 case TARGET_SO_NO_CHECK
:
2693 optname
= SO_NO_CHECK
;
2695 case TARGET_SO_PRIORITY
:
2696 optname
= SO_PRIORITY
;
2699 case TARGET_SO_BSDCOMPAT
:
2700 optname
= SO_BSDCOMPAT
;
2703 case TARGET_SO_PASSCRED
:
2704 optname
= SO_PASSCRED
;
2706 case TARGET_SO_TIMESTAMP
:
2707 optname
= SO_TIMESTAMP
;
2709 case TARGET_SO_RCVLOWAT
:
2710 optname
= SO_RCVLOWAT
;
2712 case TARGET_SO_ACCEPTCONN
:
2713 optname
= SO_ACCEPTCONN
;
2715 case TARGET_SO_PROTOCOL
:
2716 optname
= SO_PROTOCOL
;
2718 case TARGET_SO_DOMAIN
:
2719 optname
= SO_DOMAIN
;
2727 /* TCP and UDP options all take an 'int' value. */
2729 if (get_user_u32(len
, optlen
))
2730 return -TARGET_EFAULT
;
2732 return -TARGET_EINVAL
;
2734 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2737 if (optname
== SO_TYPE
) {
2738 val
= host_to_target_sock_type(val
);
2743 if (put_user_u32(val
, optval_addr
))
2744 return -TARGET_EFAULT
;
2746 if (put_user_u8(val
, optval_addr
))
2747 return -TARGET_EFAULT
;
2749 if (put_user_u32(len
, optlen
))
2750 return -TARGET_EFAULT
;
2757 case IP_ROUTER_ALERT
:
2761 case IP_MTU_DISCOVER
:
2767 case IP_MULTICAST_TTL
:
2768 case IP_MULTICAST_LOOP
:
2769 if (get_user_u32(len
, optlen
))
2770 return -TARGET_EFAULT
;
2772 return -TARGET_EINVAL
;
2774 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2777 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2779 if (put_user_u32(len
, optlen
)
2780 || put_user_u8(val
, optval_addr
))
2781 return -TARGET_EFAULT
;
2783 if (len
> sizeof(int))
2785 if (put_user_u32(len
, optlen
)
2786 || put_user_u32(val
, optval_addr
))
2787 return -TARGET_EFAULT
;
2791 ret
= -TARGET_ENOPROTOOPT
;
2797 case IPV6_MTU_DISCOVER
:
2800 case IPV6_RECVPKTINFO
:
2801 case IPV6_UNICAST_HOPS
:
2802 case IPV6_MULTICAST_HOPS
:
2803 case IPV6_MULTICAST_LOOP
:
2805 case IPV6_RECVHOPLIMIT
:
2806 case IPV6_2292HOPLIMIT
:
2809 case IPV6_2292PKTINFO
:
2810 case IPV6_RECVTCLASS
:
2811 case IPV6_RECVRTHDR
:
2812 case IPV6_2292RTHDR
:
2813 case IPV6_RECVHOPOPTS
:
2814 case IPV6_2292HOPOPTS
:
2815 case IPV6_RECVDSTOPTS
:
2816 case IPV6_2292DSTOPTS
:
2818 case IPV6_ADDR_PREFERENCES
:
2819 #ifdef IPV6_RECVPATHMTU
2820 case IPV6_RECVPATHMTU
:
2822 #ifdef IPV6_TRANSPARENT
2823 case IPV6_TRANSPARENT
:
2825 #ifdef IPV6_FREEBIND
2828 #ifdef IPV6_RECVORIGDSTADDR
2829 case IPV6_RECVORIGDSTADDR
:
2831 if (get_user_u32(len
, optlen
))
2832 return -TARGET_EFAULT
;
2834 return -TARGET_EINVAL
;
2836 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2839 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2841 if (put_user_u32(len
, optlen
)
2842 || put_user_u8(val
, optval_addr
))
2843 return -TARGET_EFAULT
;
2845 if (len
> sizeof(int))
2847 if (put_user_u32(len
, optlen
)
2848 || put_user_u32(val
, optval_addr
))
2849 return -TARGET_EFAULT
;
2853 ret
= -TARGET_ENOPROTOOPT
;
2860 case NETLINK_PKTINFO
:
2861 case NETLINK_BROADCAST_ERROR
:
2862 case NETLINK_NO_ENOBUFS
:
2863 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2864 case NETLINK_LISTEN_ALL_NSID
:
2865 case NETLINK_CAP_ACK
:
2866 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2867 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2868 case NETLINK_EXT_ACK
:
2869 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2870 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2871 case NETLINK_GET_STRICT_CHK
:
2872 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2873 if (get_user_u32(len
, optlen
)) {
2874 return -TARGET_EFAULT
;
2876 if (len
!= sizeof(val
)) {
2877 return -TARGET_EINVAL
;
2880 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2884 if (put_user_u32(lv
, optlen
)
2885 || put_user_u32(val
, optval_addr
)) {
2886 return -TARGET_EFAULT
;
2889 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2890 case NETLINK_LIST_MEMBERSHIPS
:
2894 if (get_user_u32(len
, optlen
)) {
2895 return -TARGET_EFAULT
;
2898 return -TARGET_EINVAL
;
2900 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2901 if (!results
&& len
> 0) {
2902 return -TARGET_EFAULT
;
2905 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2907 unlock_user(results
, optval_addr
, 0);
2910 /* swap host endianess to target endianess. */
2911 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2912 results
[i
] = tswap32(results
[i
]);
2914 if (put_user_u32(lv
, optlen
)) {
2915 return -TARGET_EFAULT
;
2917 unlock_user(results
, optval_addr
, 0);
2920 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2925 #endif /* SOL_NETLINK */
2928 qemu_log_mask(LOG_UNIMP
,
2929 "getsockopt level=%d optname=%d not yet supported\n",
2931 ret
= -TARGET_EOPNOTSUPP
;
2937 /* Convert target low/high pair representing file offset into the host
2938 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2939 * as the kernel doesn't handle them either.
2941 static void target_to_host_low_high(abi_ulong tlow
,
2943 unsigned long *hlow
,
2944 unsigned long *hhigh
)
2946 uint64_t off
= tlow
|
2947 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2948 TARGET_LONG_BITS
/ 2;
2951 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2954 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2955 abi_ulong count
, int copy
)
2957 struct target_iovec
*target_vec
;
2959 abi_ulong total_len
, max_len
;
2962 bool bad_address
= false;
2968 if (count
> IOV_MAX
) {
2973 vec
= g_try_new0(struct iovec
, count
);
2979 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2980 count
* sizeof(struct target_iovec
), 1);
2981 if (target_vec
== NULL
) {
2986 /* ??? If host page size > target page size, this will result in a
2987 value larger than what we can actually support. */
2988 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2991 for (i
= 0; i
< count
; i
++) {
2992 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2993 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2998 } else if (len
== 0) {
2999 /* Zero length pointer is ignored. */
3000 vec
[i
].iov_base
= 0;
3002 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3003 /* If the first buffer pointer is bad, this is a fault. But
3004 * subsequent bad buffers will result in a partial write; this
3005 * is realized by filling the vector with null pointers and
3007 if (!vec
[i
].iov_base
) {
3018 if (len
> max_len
- total_len
) {
3019 len
= max_len
- total_len
;
3022 vec
[i
].iov_len
= len
;
3026 unlock_user(target_vec
, target_addr
, 0);
3031 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3032 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3035 unlock_user(target_vec
, target_addr
, 0);
3042 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3043 abi_ulong count
, int copy
)
3045 struct target_iovec
*target_vec
;
3048 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3049 count
* sizeof(struct target_iovec
), 1);
3051 for (i
= 0; i
< count
; i
++) {
3052 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3053 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3057 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3059 unlock_user(target_vec
, target_addr
, 0);
3065 static inline int target_to_host_sock_type(int *type
)
3068 int target_type
= *type
;
3070 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3071 case TARGET_SOCK_DGRAM
:
3072 host_type
= SOCK_DGRAM
;
3074 case TARGET_SOCK_STREAM
:
3075 host_type
= SOCK_STREAM
;
3078 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3081 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3082 #if defined(SOCK_CLOEXEC)
3083 host_type
|= SOCK_CLOEXEC
;
3085 return -TARGET_EINVAL
;
3088 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3089 #if defined(SOCK_NONBLOCK)
3090 host_type
|= SOCK_NONBLOCK
;
3091 #elif !defined(O_NONBLOCK)
3092 return -TARGET_EINVAL
;
3099 /* Try to emulate socket type flags after socket creation. */
3100 static int sock_flags_fixup(int fd
, int target_type
)
3102 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3103 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3104 int flags
= fcntl(fd
, F_GETFL
);
3105 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3107 return -TARGET_EINVAL
;
3114 /* do_socket() Must return target values and target errnos. */
3115 static abi_long
do_socket(int domain
, int type
, int protocol
)
3117 int target_type
= type
;
3120 ret
= target_to_host_sock_type(&type
);
3125 if (domain
== PF_NETLINK
&& !(
3126 #ifdef CONFIG_RTNETLINK
3127 protocol
== NETLINK_ROUTE
||
3129 protocol
== NETLINK_KOBJECT_UEVENT
||
3130 protocol
== NETLINK_AUDIT
)) {
3131 return -TARGET_EPROTONOSUPPORT
;
3134 if (domain
== AF_PACKET
||
3135 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3136 protocol
= tswap16(protocol
);
3139 ret
= get_errno(socket(domain
, type
, protocol
));
3141 ret
= sock_flags_fixup(ret
, target_type
);
3142 if (type
== SOCK_PACKET
) {
3143 /* Manage an obsolete case :
3144 * if socket type is SOCK_PACKET, bind by name
3146 fd_trans_register(ret
, &target_packet_trans
);
3147 } else if (domain
== PF_NETLINK
) {
3149 #ifdef CONFIG_RTNETLINK
3151 fd_trans_register(ret
, &target_netlink_route_trans
);
3154 case NETLINK_KOBJECT_UEVENT
:
3155 /* nothing to do: messages are strings */
3158 fd_trans_register(ret
, &target_netlink_audit_trans
);
3161 g_assert_not_reached();
3168 /* do_bind() Must return target values and target errnos. */
3169 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3175 if ((int)addrlen
< 0) {
3176 return -TARGET_EINVAL
;
3179 addr
= alloca(addrlen
+1);
3181 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3185 return get_errno(bind(sockfd
, addr
, addrlen
));
3188 /* do_connect() Must return target values and target errnos. */
3189 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3195 if ((int)addrlen
< 0) {
3196 return -TARGET_EINVAL
;
3199 addr
= alloca(addrlen
+1);
3201 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3205 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3208 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3209 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3210 int flags
, int send
)
3216 abi_ulong target_vec
;
3218 if (msgp
->msg_name
) {
3219 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3220 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3221 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3222 tswapal(msgp
->msg_name
),
3224 if (ret
== -TARGET_EFAULT
) {
3225 /* For connected sockets msg_name and msg_namelen must
3226 * be ignored, so returning EFAULT immediately is wrong.
3227 * Instead, pass a bad msg_name to the host kernel, and
3228 * let it decide whether to return EFAULT or not.
3230 msg
.msg_name
= (void *)-1;
3235 msg
.msg_name
= NULL
;
3236 msg
.msg_namelen
= 0;
3238 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3239 msg
.msg_control
= alloca(msg
.msg_controllen
);
3240 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3242 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3244 count
= tswapal(msgp
->msg_iovlen
);
3245 target_vec
= tswapal(msgp
->msg_iov
);
3247 if (count
> IOV_MAX
) {
3248 /* sendrcvmsg returns a different errno for this condition than
3249 * readv/writev, so we must catch it here before lock_iovec() does.
3251 ret
= -TARGET_EMSGSIZE
;
3255 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3256 target_vec
, count
, send
);
3258 ret
= -host_to_target_errno(errno
);
3261 msg
.msg_iovlen
= count
;
3265 if (fd_trans_target_to_host_data(fd
)) {
3268 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3269 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3270 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3271 msg
.msg_iov
->iov_len
);
3273 msg
.msg_iov
->iov_base
= host_msg
;
3274 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3278 ret
= target_to_host_cmsg(&msg
, msgp
);
3280 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3284 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3285 if (!is_error(ret
)) {
3287 if (fd_trans_host_to_target_data(fd
)) {
3288 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3289 MIN(msg
.msg_iov
->iov_len
, len
));
3291 ret
= host_to_target_cmsg(msgp
, &msg
);
3293 if (!is_error(ret
)) {
3294 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3295 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3296 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3297 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3298 msg
.msg_name
, msg
.msg_namelen
);
3310 unlock_iovec(vec
, target_vec
, count
, !send
);
3315 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3316 int flags
, int send
)
3319 struct target_msghdr
*msgp
;
3321 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3325 return -TARGET_EFAULT
;
3327 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3328 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3332 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3333 * so it might not have this *mmsg-specific flag either.
3335 #ifndef MSG_WAITFORONE
3336 #define MSG_WAITFORONE 0x10000
3339 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3340 unsigned int vlen
, unsigned int flags
,
3343 struct target_mmsghdr
*mmsgp
;
3347 if (vlen
> UIO_MAXIOV
) {
3351 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3353 return -TARGET_EFAULT
;
3356 for (i
= 0; i
< vlen
; i
++) {
3357 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3358 if (is_error(ret
)) {
3361 mmsgp
[i
].msg_len
= tswap32(ret
);
3362 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3363 if (flags
& MSG_WAITFORONE
) {
3364 flags
|= MSG_DONTWAIT
;
3368 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3370 /* Return number of datagrams sent if we sent any at all;
3371 * otherwise return the error.
3379 /* do_accept4() Must return target values and target errnos. */
3380 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3381 abi_ulong target_addrlen_addr
, int flags
)
3383 socklen_t addrlen
, ret_addrlen
;
3388 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3390 if (target_addr
== 0) {
3391 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3394 /* linux returns EFAULT if addrlen pointer is invalid */
3395 if (get_user_u32(addrlen
, target_addrlen_addr
))
3396 return -TARGET_EFAULT
;
3398 if ((int)addrlen
< 0) {
3399 return -TARGET_EINVAL
;
3402 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3403 return -TARGET_EFAULT
;
3406 addr
= alloca(addrlen
);
3408 ret_addrlen
= addrlen
;
3409 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3410 if (!is_error(ret
)) {
3411 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3412 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3413 ret
= -TARGET_EFAULT
;
3419 /* do_getpeername() Must return target values and target errnos. */
3420 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3421 abi_ulong target_addrlen_addr
)
3423 socklen_t addrlen
, ret_addrlen
;
3427 if (get_user_u32(addrlen
, target_addrlen_addr
))
3428 return -TARGET_EFAULT
;
3430 if ((int)addrlen
< 0) {
3431 return -TARGET_EINVAL
;
3434 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3435 return -TARGET_EFAULT
;
3438 addr
= alloca(addrlen
);
3440 ret_addrlen
= addrlen
;
3441 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3442 if (!is_error(ret
)) {
3443 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3444 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3445 ret
= -TARGET_EFAULT
;
3451 /* do_getsockname() Must return target values and target errnos. */
3452 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3453 abi_ulong target_addrlen_addr
)
3455 socklen_t addrlen
, ret_addrlen
;
3459 if (get_user_u32(addrlen
, target_addrlen_addr
))
3460 return -TARGET_EFAULT
;
3462 if ((int)addrlen
< 0) {
3463 return -TARGET_EINVAL
;
3466 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3467 return -TARGET_EFAULT
;
3470 addr
= alloca(addrlen
);
3472 ret_addrlen
= addrlen
;
3473 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3474 if (!is_error(ret
)) {
3475 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3476 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3477 ret
= -TARGET_EFAULT
;
3483 /* do_socketpair() Must return target values and target errnos. */
3484 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3485 abi_ulong target_tab_addr
)
3490 target_to_host_sock_type(&type
);
3492 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3493 if (!is_error(ret
)) {
3494 if (put_user_s32(tab
[0], target_tab_addr
)
3495 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3496 ret
= -TARGET_EFAULT
;
3501 /* do_sendto() Must return target values and target errnos. */
3502 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3503 abi_ulong target_addr
, socklen_t addrlen
)
3507 void *copy_msg
= NULL
;
3510 if ((int)addrlen
< 0) {
3511 return -TARGET_EINVAL
;
3514 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3516 return -TARGET_EFAULT
;
3517 if (fd_trans_target_to_host_data(fd
)) {
3518 copy_msg
= host_msg
;
3519 host_msg
= g_malloc(len
);
3520 memcpy(host_msg
, copy_msg
, len
);
3521 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3527 addr
= alloca(addrlen
+1);
3528 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3532 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3534 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3539 host_msg
= copy_msg
;
3541 unlock_user(host_msg
, msg
, 0);
3545 /* do_recvfrom() Must return target values and target errnos. */
3546 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3547 abi_ulong target_addr
,
3548 abi_ulong target_addrlen
)
3550 socklen_t addrlen
, ret_addrlen
;
3558 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3560 return -TARGET_EFAULT
;
3564 if (get_user_u32(addrlen
, target_addrlen
)) {
3565 ret
= -TARGET_EFAULT
;
3568 if ((int)addrlen
< 0) {
3569 ret
= -TARGET_EINVAL
;
3572 addr
= alloca(addrlen
);
3573 ret_addrlen
= addrlen
;
3574 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3575 addr
, &ret_addrlen
));
3577 addr
= NULL
; /* To keep compiler quiet. */
3578 addrlen
= 0; /* To keep compiler quiet. */
3579 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3581 if (!is_error(ret
)) {
3582 if (fd_trans_host_to_target_data(fd
)) {
3584 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3585 if (is_error(trans
)) {
3591 host_to_target_sockaddr(target_addr
, addr
,
3592 MIN(addrlen
, ret_addrlen
));
3593 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3594 ret
= -TARGET_EFAULT
;
3598 unlock_user(host_msg
, msg
, len
);
3601 unlock_user(host_msg
, msg
, 0);
3606 #ifdef TARGET_NR_socketcall
3607 /* do_socketcall() must return target values and target errnos. */
3608 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3610 static const unsigned nargs
[] = { /* number of arguments per operation */
3611 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3612 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3613 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3614 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3615 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3616 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3617 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3618 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3619 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3620 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3621 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3622 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3623 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3624 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3625 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3626 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3627 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3628 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3629 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3630 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3632 abi_long a
[6]; /* max 6 args */
3635 /* check the range of the first argument num */
3636 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3637 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3638 return -TARGET_EINVAL
;
3640 /* ensure we have space for args */
3641 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3642 return -TARGET_EINVAL
;
3644 /* collect the arguments in a[] according to nargs[] */
3645 for (i
= 0; i
< nargs
[num
]; ++i
) {
3646 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3647 return -TARGET_EFAULT
;
3650 /* now when we have the args, invoke the appropriate underlying function */
3652 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3653 return do_socket(a
[0], a
[1], a
[2]);
3654 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3655 return do_bind(a
[0], a
[1], a
[2]);
3656 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3657 return do_connect(a
[0], a
[1], a
[2]);
3658 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3659 return get_errno(listen(a
[0], a
[1]));
3660 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3661 return do_accept4(a
[0], a
[1], a
[2], 0);
3662 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3663 return do_getsockname(a
[0], a
[1], a
[2]);
3664 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3665 return do_getpeername(a
[0], a
[1], a
[2]);
3666 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3667 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3668 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3669 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3670 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3671 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3672 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3673 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3674 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3675 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3676 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3677 return get_errno(shutdown(a
[0], a
[1]));
3678 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3679 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3680 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3681 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3682 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3683 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3684 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3685 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3686 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3687 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3688 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3689 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3690 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3691 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3693 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3694 return -TARGET_EINVAL
;
3699 #define N_SHM_REGIONS 32
3701 static struct shm_region
{
3705 } shm_regions
[N_SHM_REGIONS
];
3707 #ifndef TARGET_SEMID64_DS
3708 /* asm-generic version of this struct */
3709 struct target_semid64_ds
3711 struct target_ipc_perm sem_perm
;
3712 abi_ulong sem_otime
;
3713 #if TARGET_ABI_BITS == 32
3714 abi_ulong __unused1
;
3716 abi_ulong sem_ctime
;
3717 #if TARGET_ABI_BITS == 32
3718 abi_ulong __unused2
;
3720 abi_ulong sem_nsems
;
3721 abi_ulong __unused3
;
3722 abi_ulong __unused4
;
3726 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3727 abi_ulong target_addr
)
3729 struct target_ipc_perm
*target_ip
;
3730 struct target_semid64_ds
*target_sd
;
3732 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3733 return -TARGET_EFAULT
;
3734 target_ip
= &(target_sd
->sem_perm
);
3735 host_ip
->__key
= tswap32(target_ip
->__key
);
3736 host_ip
->uid
= tswap32(target_ip
->uid
);
3737 host_ip
->gid
= tswap32(target_ip
->gid
);
3738 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3739 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3740 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3741 host_ip
->mode
= tswap32(target_ip
->mode
);
3743 host_ip
->mode
= tswap16(target_ip
->mode
);
3745 #if defined(TARGET_PPC)
3746 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3748 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3750 unlock_user_struct(target_sd
, target_addr
, 0);
3754 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3755 struct ipc_perm
*host_ip
)
3757 struct target_ipc_perm
*target_ip
;
3758 struct target_semid64_ds
*target_sd
;
3760 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3761 return -TARGET_EFAULT
;
3762 target_ip
= &(target_sd
->sem_perm
);
3763 target_ip
->__key
= tswap32(host_ip
->__key
);
3764 target_ip
->uid
= tswap32(host_ip
->uid
);
3765 target_ip
->gid
= tswap32(host_ip
->gid
);
3766 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3767 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3768 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3769 target_ip
->mode
= tswap32(host_ip
->mode
);
3771 target_ip
->mode
= tswap16(host_ip
->mode
);
3773 #if defined(TARGET_PPC)
3774 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3776 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3778 unlock_user_struct(target_sd
, target_addr
, 1);
3782 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3783 abi_ulong target_addr
)
3785 struct target_semid64_ds
*target_sd
;
3787 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3788 return -TARGET_EFAULT
;
3789 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3790 return -TARGET_EFAULT
;
3791 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3792 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3793 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3794 unlock_user_struct(target_sd
, target_addr
, 0);
3798 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3799 struct semid_ds
*host_sd
)
3801 struct target_semid64_ds
*target_sd
;
3803 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3804 return -TARGET_EFAULT
;
3805 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3806 return -TARGET_EFAULT
;
3807 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3808 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3809 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3810 unlock_user_struct(target_sd
, target_addr
, 1);
3814 struct target_seminfo
{
3827 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3828 struct seminfo
*host_seminfo
)
3830 struct target_seminfo
*target_seminfo
;
3831 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3832 return -TARGET_EFAULT
;
3833 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3834 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3835 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3836 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3837 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3838 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3839 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3840 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3841 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3842 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3843 unlock_user_struct(target_seminfo
, target_addr
, 1);
3849 struct semid_ds
*buf
;
3850 unsigned short *array
;
3851 struct seminfo
*__buf
;
3854 union target_semun
{
3861 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3862 abi_ulong target_addr
)
3865 unsigned short *array
;
3867 struct semid_ds semid_ds
;
3870 semun
.buf
= &semid_ds
;
3872 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3874 return get_errno(ret
);
3876 nsems
= semid_ds
.sem_nsems
;
3878 *host_array
= g_try_new(unsigned short, nsems
);
3880 return -TARGET_ENOMEM
;
3882 array
= lock_user(VERIFY_READ
, target_addr
,
3883 nsems
*sizeof(unsigned short), 1);
3885 g_free(*host_array
);
3886 return -TARGET_EFAULT
;
3889 for(i
=0; i
<nsems
; i
++) {
3890 __get_user((*host_array
)[i
], &array
[i
]);
3892 unlock_user(array
, target_addr
, 0);
3897 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3898 unsigned short **host_array
)
3901 unsigned short *array
;
3903 struct semid_ds semid_ds
;
3906 semun
.buf
= &semid_ds
;
3908 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3910 return get_errno(ret
);
3912 nsems
= semid_ds
.sem_nsems
;
3914 array
= lock_user(VERIFY_WRITE
, target_addr
,
3915 nsems
*sizeof(unsigned short), 0);
3917 return -TARGET_EFAULT
;
3919 for(i
=0; i
<nsems
; i
++) {
3920 __put_user((*host_array
)[i
], &array
[i
]);
3922 g_free(*host_array
);
3923 unlock_user(array
, target_addr
, 1);
3928 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3929 abi_ulong target_arg
)
3931 union target_semun target_su
= { .buf
= target_arg
};
3933 struct semid_ds dsarg
;
3934 unsigned short *array
= NULL
;
3935 struct seminfo seminfo
;
3936 abi_long ret
= -TARGET_EINVAL
;
3943 /* In 64 bit cross-endian situations, we will erroneously pick up
3944 * the wrong half of the union for the "val" element. To rectify
3945 * this, the entire 8-byte structure is byteswapped, followed by
3946 * a swap of the 4 byte val field. In other cases, the data is
3947 * already in proper host byte order. */
3948 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3949 target_su
.buf
= tswapal(target_su
.buf
);
3950 arg
.val
= tswap32(target_su
.val
);
3952 arg
.val
= target_su
.val
;
3954 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3958 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3962 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3963 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3970 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3974 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3975 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3981 arg
.__buf
= &seminfo
;
3982 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3983 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3991 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3998 struct target_sembuf
{
3999 unsigned short sem_num
;
4004 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4005 abi_ulong target_addr
,
4008 struct target_sembuf
*target_sembuf
;
4011 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4012 nsops
*sizeof(struct target_sembuf
), 1);
4014 return -TARGET_EFAULT
;
4016 for(i
=0; i
<nsops
; i
++) {
4017 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4018 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4019 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4022 unlock_user(target_sembuf
, target_addr
, 0);
4027 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4028 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4031 * This macro is required to handle the s390 variants, which passes the
4032 * arguments in a different order than default.
4035 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4036 (__nsops), (__timeout), (__sops)
4038 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4039 (__nsops), 0, (__sops), (__timeout)
4042 static inline abi_long
do_semtimedop(int semid
,
4045 abi_long timeout
, bool time64
)
4047 struct sembuf
*sops
;
4048 struct timespec ts
, *pts
= NULL
;
4054 if (target_to_host_timespec64(pts
, timeout
)) {
4055 return -TARGET_EFAULT
;
4058 if (target_to_host_timespec(pts
, timeout
)) {
4059 return -TARGET_EFAULT
;
4064 if (nsops
> TARGET_SEMOPM
) {
4065 return -TARGET_E2BIG
;
4068 sops
= g_new(struct sembuf
, nsops
);
4070 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4072 return -TARGET_EFAULT
;
4075 ret
= -TARGET_ENOSYS
;
4076 #ifdef __NR_semtimedop
4077 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4080 if (ret
== -TARGET_ENOSYS
) {
4081 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4082 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4090 struct target_msqid_ds
4092 struct target_ipc_perm msg_perm
;
4093 abi_ulong msg_stime
;
4094 #if TARGET_ABI_BITS == 32
4095 abi_ulong __unused1
;
4097 abi_ulong msg_rtime
;
4098 #if TARGET_ABI_BITS == 32
4099 abi_ulong __unused2
;
4101 abi_ulong msg_ctime
;
4102 #if TARGET_ABI_BITS == 32
4103 abi_ulong __unused3
;
4105 abi_ulong __msg_cbytes
;
4107 abi_ulong msg_qbytes
;
4108 abi_ulong msg_lspid
;
4109 abi_ulong msg_lrpid
;
4110 abi_ulong __unused4
;
4111 abi_ulong __unused5
;
4114 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4115 abi_ulong target_addr
)
4117 struct target_msqid_ds
*target_md
;
4119 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4120 return -TARGET_EFAULT
;
4121 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4122 return -TARGET_EFAULT
;
4123 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4124 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4125 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4126 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4127 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4128 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4129 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4130 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4131 unlock_user_struct(target_md
, target_addr
, 0);
4135 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4136 struct msqid_ds
*host_md
)
4138 struct target_msqid_ds
*target_md
;
4140 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4141 return -TARGET_EFAULT
;
4142 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4143 return -TARGET_EFAULT
;
4144 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4145 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4146 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4147 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4148 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4149 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4150 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4151 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4152 unlock_user_struct(target_md
, target_addr
, 1);
4156 struct target_msginfo
{
4164 unsigned short int msgseg
;
4167 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4168 struct msginfo
*host_msginfo
)
4170 struct target_msginfo
*target_msginfo
;
4171 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4172 return -TARGET_EFAULT
;
4173 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4174 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4175 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4176 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4177 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4178 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4179 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4180 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4181 unlock_user_struct(target_msginfo
, target_addr
, 1);
4185 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4187 struct msqid_ds dsarg
;
4188 struct msginfo msginfo
;
4189 abi_long ret
= -TARGET_EINVAL
;
4197 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4198 return -TARGET_EFAULT
;
4199 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4200 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4201 return -TARGET_EFAULT
;
4204 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4208 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4209 if (host_to_target_msginfo(ptr
, &msginfo
))
4210 return -TARGET_EFAULT
;
4217 struct target_msgbuf
{
4222 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4223 ssize_t msgsz
, int msgflg
)
4225 struct target_msgbuf
*target_mb
;
4226 struct msgbuf
*host_mb
;
4230 return -TARGET_EINVAL
;
4233 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4234 return -TARGET_EFAULT
;
4235 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4237 unlock_user_struct(target_mb
, msgp
, 0);
4238 return -TARGET_ENOMEM
;
4240 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4241 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4242 ret
= -TARGET_ENOSYS
;
4244 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4247 if (ret
== -TARGET_ENOSYS
) {
4249 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4252 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4258 unlock_user_struct(target_mb
, msgp
, 0);
4264 #if defined(__sparc__)
4265 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4266 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4267 #elif defined(__s390x__)
4268 /* The s390 sys_ipc variant has only five parameters. */
4269 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4270 ((long int[]){(long int)__msgp, __msgtyp})
4272 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4273 ((long int[]){(long int)__msgp, __msgtyp}), 0
4277 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4278 ssize_t msgsz
, abi_long msgtyp
,
4281 struct target_msgbuf
*target_mb
;
4283 struct msgbuf
*host_mb
;
4287 return -TARGET_EINVAL
;
4290 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4291 return -TARGET_EFAULT
;
4293 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4295 ret
= -TARGET_ENOMEM
;
4298 ret
= -TARGET_ENOSYS
;
4300 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4303 if (ret
== -TARGET_ENOSYS
) {
4304 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4305 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4310 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4311 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4312 if (!target_mtext
) {
4313 ret
= -TARGET_EFAULT
;
4316 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4317 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4320 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4324 unlock_user_struct(target_mb
, msgp
, 1);
4329 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4330 abi_ulong target_addr
)
4332 struct target_shmid_ds
*target_sd
;
4334 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4335 return -TARGET_EFAULT
;
4336 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4337 return -TARGET_EFAULT
;
4338 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4339 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4340 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4341 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4342 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4343 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4344 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4345 unlock_user_struct(target_sd
, target_addr
, 0);
4349 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4350 struct shmid_ds
*host_sd
)
4352 struct target_shmid_ds
*target_sd
;
4354 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4355 return -TARGET_EFAULT
;
4356 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4357 return -TARGET_EFAULT
;
4358 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4359 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4360 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4361 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4362 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4363 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4364 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4365 unlock_user_struct(target_sd
, target_addr
, 1);
4369 struct target_shminfo
{
4377 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4378 struct shminfo
*host_shminfo
)
4380 struct target_shminfo
*target_shminfo
;
4381 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4382 return -TARGET_EFAULT
;
4383 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4384 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4385 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4386 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4387 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4388 unlock_user_struct(target_shminfo
, target_addr
, 1);
4392 struct target_shm_info
{
4397 abi_ulong swap_attempts
;
4398 abi_ulong swap_successes
;
4401 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4402 struct shm_info
*host_shm_info
)
4404 struct target_shm_info
*target_shm_info
;
4405 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4406 return -TARGET_EFAULT
;
4407 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4408 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4409 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4410 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4411 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4412 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4413 unlock_user_struct(target_shm_info
, target_addr
, 1);
4417 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4419 struct shmid_ds dsarg
;
4420 struct shminfo shminfo
;
4421 struct shm_info shm_info
;
4422 abi_long ret
= -TARGET_EINVAL
;
4430 if (target_to_host_shmid_ds(&dsarg
, buf
))
4431 return -TARGET_EFAULT
;
4432 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4433 if (host_to_target_shmid_ds(buf
, &dsarg
))
4434 return -TARGET_EFAULT
;
4437 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4438 if (host_to_target_shminfo(buf
, &shminfo
))
4439 return -TARGET_EFAULT
;
4442 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4443 if (host_to_target_shm_info(buf
, &shm_info
))
4444 return -TARGET_EFAULT
;
4449 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4456 #ifndef TARGET_FORCE_SHMLBA
4457 /* For most architectures, SHMLBA is the same as the page size;
4458 * some architectures have larger values, in which case they should
4459 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4460 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4461 * and defining its own value for SHMLBA.
4463 * The kernel also permits SHMLBA to be set by the architecture to a
4464 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4465 * this means that addresses are rounded to the large size if
4466 * SHM_RND is set but addresses not aligned to that size are not rejected
4467 * as long as they are at least page-aligned. Since the only architecture
4468 * which uses this is ia64 this code doesn't provide for that oddity.
4470 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4472 return TARGET_PAGE_SIZE
;
4476 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4477 int shmid
, abi_ulong shmaddr
, int shmflg
)
4479 CPUState
*cpu
= env_cpu(cpu_env
);
4482 struct shmid_ds shm_info
;
4486 /* shmat pointers are always untagged */
4488 /* find out the length of the shared memory segment */
4489 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4490 if (is_error(ret
)) {
4491 /* can't get length, bail out */
4495 shmlba
= target_shmlba(cpu_env
);
4497 if (shmaddr
& (shmlba
- 1)) {
4498 if (shmflg
& SHM_RND
) {
4499 shmaddr
&= ~(shmlba
- 1);
4501 return -TARGET_EINVAL
;
4504 if (!guest_range_valid_untagged(shmaddr
, shm_info
.shm_segsz
)) {
4505 return -TARGET_EINVAL
;
4511 * We're mapping shared memory, so ensure we generate code for parallel
4512 * execution and flush old translations. This will work up to the level
4513 * supported by the host -- anything that requires EXCP_ATOMIC will not
4514 * be atomic with respect to an external process.
4516 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
4517 cpu
->tcg_cflags
|= CF_PARALLEL
;
4522 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4524 abi_ulong mmap_start
;
4526 /* In order to use the host shmat, we need to honor host SHMLBA. */
4527 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4529 if (mmap_start
== -1) {
4531 host_raddr
= (void *)-1;
4533 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4534 shmflg
| SHM_REMAP
);
4537 if (host_raddr
== (void *)-1) {
4539 return get_errno((long)host_raddr
);
4541 raddr
=h2g((unsigned long)host_raddr
);
4543 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4544 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4545 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4547 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4548 if (!shm_regions
[i
].in_use
) {
4549 shm_regions
[i
].in_use
= true;
4550 shm_regions
[i
].start
= raddr
;
4551 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4561 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4566 /* shmdt pointers are always untagged */
4570 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4571 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4572 shm_regions
[i
].in_use
= false;
4573 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4577 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4584 #ifdef TARGET_NR_ipc
4585 /* ??? This only works with linear mappings. */
4586 /* do_ipc() must return target values and target errnos. */
4587 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4588 unsigned int call
, abi_long first
,
4589 abi_long second
, abi_long third
,
4590 abi_long ptr
, abi_long fifth
)
4595 version
= call
>> 16;
4600 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4602 case IPCOP_semtimedop
:
4604 * The s390 sys_ipc variant has only five parameters instead of six
4605 * (as for default variant) and the only difference is the handling of
4606 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4607 * to a struct timespec where the generic variant uses fifth parameter.
4609 #if defined(TARGET_S390X)
4610 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4612 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4617 ret
= get_errno(semget(first
, second
, third
));
4620 case IPCOP_semctl
: {
4621 /* The semun argument to semctl is passed by value, so dereference the
4624 get_user_ual(atptr
, ptr
);
4625 ret
= do_semctl(first
, second
, third
, atptr
);
4630 ret
= get_errno(msgget(first
, second
));
4634 ret
= do_msgsnd(first
, ptr
, second
, third
);
4638 ret
= do_msgctl(first
, second
, ptr
);
4645 struct target_ipc_kludge
{
4650 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4651 ret
= -TARGET_EFAULT
;
4655 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4657 unlock_user_struct(tmp
, ptr
, 0);
4661 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4670 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4671 if (is_error(raddr
))
4672 return get_errno(raddr
);
4673 if (put_user_ual(raddr
, third
))
4674 return -TARGET_EFAULT
;
4678 ret
= -TARGET_EINVAL
;
4683 ret
= do_shmdt(ptr
);
4687 /* IPC_* flag values are the same on all linux platforms */
4688 ret
= get_errno(shmget(first
, second
, third
));
4691 /* IPC_* and SHM_* command values are the same on all linux platforms */
4693 ret
= do_shmctl(first
, second
, ptr
);
4696 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4698 ret
= -TARGET_ENOSYS
;
4705 /* kernel structure types definitions */
4707 #define STRUCT(name, ...) STRUCT_ ## name,
4708 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4710 #include "syscall_types.h"
4714 #undef STRUCT_SPECIAL
4716 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4717 #define STRUCT_SPECIAL(name)
4718 #include "syscall_types.h"
4720 #undef STRUCT_SPECIAL
4722 #define MAX_STRUCT_SIZE 4096
4724 #ifdef CONFIG_FIEMAP
4725 /* So fiemap access checks don't overflow on 32 bit systems.
4726 * This is very slightly smaller than the limit imposed by
4727 * the underlying kernel.
4729 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4730 / sizeof(struct fiemap_extent))
4732 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4733 int fd
, int cmd
, abi_long arg
)
4735 /* The parameter for this ioctl is a struct fiemap followed
4736 * by an array of struct fiemap_extent whose size is set
4737 * in fiemap->fm_extent_count. The array is filled in by the
4740 int target_size_in
, target_size_out
;
4742 const argtype
*arg_type
= ie
->arg_type
;
4743 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4746 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4750 assert(arg_type
[0] == TYPE_PTR
);
4751 assert(ie
->access
== IOC_RW
);
4753 target_size_in
= thunk_type_size(arg_type
, 0);
4754 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4756 return -TARGET_EFAULT
;
4758 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4759 unlock_user(argptr
, arg
, 0);
4760 fm
= (struct fiemap
*)buf_temp
;
4761 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4762 return -TARGET_EINVAL
;
4765 outbufsz
= sizeof (*fm
) +
4766 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4768 if (outbufsz
> MAX_STRUCT_SIZE
) {
4769 /* We can't fit all the extents into the fixed size buffer.
4770 * Allocate one that is large enough and use it instead.
4772 fm
= g_try_malloc(outbufsz
);
4774 return -TARGET_ENOMEM
;
4776 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4779 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4780 if (!is_error(ret
)) {
4781 target_size_out
= target_size_in
;
4782 /* An extent_count of 0 means we were only counting the extents
4783 * so there are no structs to copy
4785 if (fm
->fm_extent_count
!= 0) {
4786 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4788 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4790 ret
= -TARGET_EFAULT
;
4792 /* Convert the struct fiemap */
4793 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4794 if (fm
->fm_extent_count
!= 0) {
4795 p
= argptr
+ target_size_in
;
4796 /* ...and then all the struct fiemap_extents */
4797 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4798 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4803 unlock_user(argptr
, arg
, target_size_out
);
4813 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4814 int fd
, int cmd
, abi_long arg
)
4816 const argtype
*arg_type
= ie
->arg_type
;
4820 struct ifconf
*host_ifconf
;
4822 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4823 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4824 int target_ifreq_size
;
4829 abi_long target_ifc_buf
;
4833 assert(arg_type
[0] == TYPE_PTR
);
4834 assert(ie
->access
== IOC_RW
);
4837 target_size
= thunk_type_size(arg_type
, 0);
4839 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4841 return -TARGET_EFAULT
;
4842 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4843 unlock_user(argptr
, arg
, 0);
4845 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4846 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4847 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4849 if (target_ifc_buf
!= 0) {
4850 target_ifc_len
= host_ifconf
->ifc_len
;
4851 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4852 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4854 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4855 if (outbufsz
> MAX_STRUCT_SIZE
) {
4857 * We can't fit all the extents into the fixed size buffer.
4858 * Allocate one that is large enough and use it instead.
4860 host_ifconf
= g_try_malloc(outbufsz
);
4862 return -TARGET_ENOMEM
;
4864 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4867 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4869 host_ifconf
->ifc_len
= host_ifc_len
;
4871 host_ifc_buf
= NULL
;
4873 host_ifconf
->ifc_buf
= host_ifc_buf
;
4875 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4876 if (!is_error(ret
)) {
4877 /* convert host ifc_len to target ifc_len */
4879 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4880 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4881 host_ifconf
->ifc_len
= target_ifc_len
;
4883 /* restore target ifc_buf */
4885 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4887 /* copy struct ifconf to target user */
4889 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4891 return -TARGET_EFAULT
;
4892 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4893 unlock_user(argptr
, arg
, target_size
);
4895 if (target_ifc_buf
!= 0) {
4896 /* copy ifreq[] to target user */
4897 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4898 for (i
= 0; i
< nb_ifreq
; i
++) {
4899 thunk_convert(argptr
+ i
* target_ifreq_size
,
4900 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4901 ifreq_arg_type
, THUNK_TARGET
);
4903 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4908 g_free(host_ifconf
);
4914 #if defined(CONFIG_USBFS)
4915 #if HOST_LONG_BITS > 64
4916 #error USBDEVFS thunks do not support >64 bit hosts yet.
4919 uint64_t target_urb_adr
;
4920 uint64_t target_buf_adr
;
4921 char *target_buf_ptr
;
4922 struct usbdevfs_urb host_urb
;
4925 static GHashTable
*usbdevfs_urb_hashtable(void)
4927 static GHashTable
*urb_hashtable
;
4929 if (!urb_hashtable
) {
4930 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4932 return urb_hashtable
;
4935 static void urb_hashtable_insert(struct live_urb
*urb
)
4937 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4938 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4941 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4943 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4944 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4947 static void urb_hashtable_remove(struct live_urb
*urb
)
4949 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4950 g_hash_table_remove(urb_hashtable
, urb
);
4954 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4955 int fd
, int cmd
, abi_long arg
)
4957 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4958 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4959 struct live_urb
*lurb
;
4963 uintptr_t target_urb_adr
;
4966 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4968 memset(buf_temp
, 0, sizeof(uint64_t));
4969 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4970 if (is_error(ret
)) {
4974 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4975 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4976 if (!lurb
->target_urb_adr
) {
4977 return -TARGET_EFAULT
;
4979 urb_hashtable_remove(lurb
);
4980 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4981 lurb
->host_urb
.buffer_length
);
4982 lurb
->target_buf_ptr
= NULL
;
4984 /* restore the guest buffer pointer */
4985 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4987 /* update the guest urb struct */
4988 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4991 return -TARGET_EFAULT
;
4993 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4994 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4996 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4997 /* write back the urb handle */
4998 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5001 return -TARGET_EFAULT
;
5004 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5005 target_urb_adr
= lurb
->target_urb_adr
;
5006 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5007 unlock_user(argptr
, arg
, target_size
);
5014 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5015 uint8_t *buf_temp
__attribute__((unused
)),
5016 int fd
, int cmd
, abi_long arg
)
5018 struct live_urb
*lurb
;
5020 /* map target address back to host URB with metadata. */
5021 lurb
= urb_hashtable_lookup(arg
);
5023 return -TARGET_EFAULT
;
5025 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5029 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5030 int fd
, int cmd
, abi_long arg
)
5032 const argtype
*arg_type
= ie
->arg_type
;
5037 struct live_urb
*lurb
;
5040 * each submitted URB needs to map to a unique ID for the
5041 * kernel, and that unique ID needs to be a pointer to
5042 * host memory. hence, we need to malloc for each URB.
5043 * isochronous transfers have a variable length struct.
5046 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5048 /* construct host copy of urb and metadata */
5049 lurb
= g_try_new0(struct live_urb
, 1);
5051 return -TARGET_ENOMEM
;
5054 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5057 return -TARGET_EFAULT
;
5059 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5060 unlock_user(argptr
, arg
, 0);
5062 lurb
->target_urb_adr
= arg
;
5063 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5065 /* buffer space used depends on endpoint type so lock the entire buffer */
5066 /* control type urbs should check the buffer contents for true direction */
5067 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5068 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5069 lurb
->host_urb
.buffer_length
, 1);
5070 if (lurb
->target_buf_ptr
== NULL
) {
5072 return -TARGET_EFAULT
;
5075 /* update buffer pointer in host copy */
5076 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5078 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5079 if (is_error(ret
)) {
5080 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5083 urb_hashtable_insert(lurb
);
5088 #endif /* CONFIG_USBFS */
5090 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5091 int cmd
, abi_long arg
)
5094 struct dm_ioctl
*host_dm
;
5095 abi_long guest_data
;
5096 uint32_t guest_data_size
;
5098 const argtype
*arg_type
= ie
->arg_type
;
5100 void *big_buf
= NULL
;
5104 target_size
= thunk_type_size(arg_type
, 0);
5105 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5107 ret
= -TARGET_EFAULT
;
5110 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5111 unlock_user(argptr
, arg
, 0);
5113 /* buf_temp is too small, so fetch things into a bigger buffer */
5114 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5115 memcpy(big_buf
, buf_temp
, target_size
);
5119 guest_data
= arg
+ host_dm
->data_start
;
5120 if ((guest_data
- arg
) < 0) {
5121 ret
= -TARGET_EINVAL
;
5124 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5125 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5127 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5129 ret
= -TARGET_EFAULT
;
5133 switch (ie
->host_cmd
) {
5135 case DM_LIST_DEVICES
:
5138 case DM_DEV_SUSPEND
:
5141 case DM_TABLE_STATUS
:
5142 case DM_TABLE_CLEAR
:
5144 case DM_LIST_VERSIONS
:
5148 case DM_DEV_SET_GEOMETRY
:
5149 /* data contains only strings */
5150 memcpy(host_data
, argptr
, guest_data_size
);
5153 memcpy(host_data
, argptr
, guest_data_size
);
5154 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5158 void *gspec
= argptr
;
5159 void *cur_data
= host_data
;
5160 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5161 int spec_size
= thunk_type_size(arg_type
, 0);
5164 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5165 struct dm_target_spec
*spec
= cur_data
;
5169 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5170 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5172 spec
->next
= sizeof(*spec
) + slen
;
5173 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5175 cur_data
+= spec
->next
;
5180 ret
= -TARGET_EINVAL
;
5181 unlock_user(argptr
, guest_data
, 0);
5184 unlock_user(argptr
, guest_data
, 0);
5186 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5187 if (!is_error(ret
)) {
5188 guest_data
= arg
+ host_dm
->data_start
;
5189 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5190 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5191 switch (ie
->host_cmd
) {
5196 case DM_DEV_SUSPEND
:
5199 case DM_TABLE_CLEAR
:
5201 case DM_DEV_SET_GEOMETRY
:
5202 /* no return data */
5204 case DM_LIST_DEVICES
:
5206 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5207 uint32_t remaining_data
= guest_data_size
;
5208 void *cur_data
= argptr
;
5209 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5210 int nl_size
= 12; /* can't use thunk_size due to alignment */
5213 uint32_t next
= nl
->next
;
5215 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5217 if (remaining_data
< nl
->next
) {
5218 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5221 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5222 strcpy(cur_data
+ nl_size
, nl
->name
);
5223 cur_data
+= nl
->next
;
5224 remaining_data
-= nl
->next
;
5228 nl
= (void*)nl
+ next
;
5233 case DM_TABLE_STATUS
:
5235 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5236 void *cur_data
= argptr
;
5237 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5238 int spec_size
= thunk_type_size(arg_type
, 0);
5241 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5242 uint32_t next
= spec
->next
;
5243 int slen
= strlen((char*)&spec
[1]) + 1;
5244 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5245 if (guest_data_size
< spec
->next
) {
5246 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5249 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5250 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5251 cur_data
= argptr
+ spec
->next
;
5252 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5258 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5259 int count
= *(uint32_t*)hdata
;
5260 uint64_t *hdev
= hdata
+ 8;
5261 uint64_t *gdev
= argptr
+ 8;
5264 *(uint32_t*)argptr
= tswap32(count
);
5265 for (i
= 0; i
< count
; i
++) {
5266 *gdev
= tswap64(*hdev
);
5272 case DM_LIST_VERSIONS
:
5274 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5275 uint32_t remaining_data
= guest_data_size
;
5276 void *cur_data
= argptr
;
5277 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5278 int vers_size
= thunk_type_size(arg_type
, 0);
5281 uint32_t next
= vers
->next
;
5283 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5285 if (remaining_data
< vers
->next
) {
5286 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5289 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5290 strcpy(cur_data
+ vers_size
, vers
->name
);
5291 cur_data
+= vers
->next
;
5292 remaining_data
-= vers
->next
;
5296 vers
= (void*)vers
+ next
;
5301 unlock_user(argptr
, guest_data
, 0);
5302 ret
= -TARGET_EINVAL
;
5305 unlock_user(argptr
, guest_data
, guest_data_size
);
5307 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5309 ret
= -TARGET_EFAULT
;
5312 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5313 unlock_user(argptr
, arg
, target_size
);
5320 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5321 int cmd
, abi_long arg
)
5325 const argtype
*arg_type
= ie
->arg_type
;
5326 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5329 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5330 struct blkpg_partition host_part
;
5332 /* Read and convert blkpg */
5334 target_size
= thunk_type_size(arg_type
, 0);
5335 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5337 ret
= -TARGET_EFAULT
;
5340 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5341 unlock_user(argptr
, arg
, 0);
5343 switch (host_blkpg
->op
) {
5344 case BLKPG_ADD_PARTITION
:
5345 case BLKPG_DEL_PARTITION
:
5346 /* payload is struct blkpg_partition */
5349 /* Unknown opcode */
5350 ret
= -TARGET_EINVAL
;
5354 /* Read and convert blkpg->data */
5355 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5356 target_size
= thunk_type_size(part_arg_type
, 0);
5357 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5359 ret
= -TARGET_EFAULT
;
5362 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5363 unlock_user(argptr
, arg
, 0);
5365 /* Swizzle the data pointer to our local copy and call! */
5366 host_blkpg
->data
= &host_part
;
5367 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5373 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5374 int fd
, int cmd
, abi_long arg
)
5376 const argtype
*arg_type
= ie
->arg_type
;
5377 const StructEntry
*se
;
5378 const argtype
*field_types
;
5379 const int *dst_offsets
, *src_offsets
;
5382 abi_ulong
*target_rt_dev_ptr
= NULL
;
5383 unsigned long *host_rt_dev_ptr
= NULL
;
5387 assert(ie
->access
== IOC_W
);
5388 assert(*arg_type
== TYPE_PTR
);
5390 assert(*arg_type
== TYPE_STRUCT
);
5391 target_size
= thunk_type_size(arg_type
, 0);
5392 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5394 return -TARGET_EFAULT
;
5397 assert(*arg_type
== (int)STRUCT_rtentry
);
5398 se
= struct_entries
+ *arg_type
++;
5399 assert(se
->convert
[0] == NULL
);
5400 /* convert struct here to be able to catch rt_dev string */
5401 field_types
= se
->field_types
;
5402 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5403 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5404 for (i
= 0; i
< se
->nb_fields
; i
++) {
5405 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5406 assert(*field_types
== TYPE_PTRVOID
);
5407 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5408 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5409 if (*target_rt_dev_ptr
!= 0) {
5410 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5411 tswapal(*target_rt_dev_ptr
));
5412 if (!*host_rt_dev_ptr
) {
5413 unlock_user(argptr
, arg
, 0);
5414 return -TARGET_EFAULT
;
5417 *host_rt_dev_ptr
= 0;
5422 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5423 argptr
+ src_offsets
[i
],
5424 field_types
, THUNK_HOST
);
5426 unlock_user(argptr
, arg
, 0);
5428 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5430 assert(host_rt_dev_ptr
!= NULL
);
5431 assert(target_rt_dev_ptr
!= NULL
);
5432 if (*host_rt_dev_ptr
!= 0) {
5433 unlock_user((void *)*host_rt_dev_ptr
,
5434 *target_rt_dev_ptr
, 0);
5439 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5440 int fd
, int cmd
, abi_long arg
)
5442 int sig
= target_to_host_signal(arg
);
5443 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5446 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5447 int fd
, int cmd
, abi_long arg
)
5452 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5453 if (is_error(ret
)) {
5457 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5458 if (copy_to_user_timeval(arg
, &tv
)) {
5459 return -TARGET_EFAULT
;
5462 if (copy_to_user_timeval64(arg
, &tv
)) {
5463 return -TARGET_EFAULT
;
5470 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5471 int fd
, int cmd
, abi_long arg
)
5476 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5477 if (is_error(ret
)) {
5481 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5482 if (host_to_target_timespec(arg
, &ts
)) {
5483 return -TARGET_EFAULT
;
5486 if (host_to_target_timespec64(arg
, &ts
)) {
5487 return -TARGET_EFAULT
;
5495 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5496 int fd
, int cmd
, abi_long arg
)
5498 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5499 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5505 static void unlock_drm_version(struct drm_version
*host_ver
,
5506 struct target_drm_version
*target_ver
,
5509 unlock_user(host_ver
->name
, target_ver
->name
,
5510 copy
? host_ver
->name_len
: 0);
5511 unlock_user(host_ver
->date
, target_ver
->date
,
5512 copy
? host_ver
->date_len
: 0);
5513 unlock_user(host_ver
->desc
, target_ver
->desc
,
5514 copy
? host_ver
->desc_len
: 0);
5517 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5518 struct target_drm_version
*target_ver
)
5520 memset(host_ver
, 0, sizeof(*host_ver
));
5522 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5523 if (host_ver
->name_len
) {
5524 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5525 target_ver
->name_len
, 0);
5526 if (!host_ver
->name
) {
5531 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5532 if (host_ver
->date_len
) {
5533 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5534 target_ver
->date_len
, 0);
5535 if (!host_ver
->date
) {
5540 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5541 if (host_ver
->desc_len
) {
5542 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5543 target_ver
->desc_len
, 0);
5544 if (!host_ver
->desc
) {
5551 unlock_drm_version(host_ver
, target_ver
, false);
5555 static inline void host_to_target_drmversion(
5556 struct target_drm_version
*target_ver
,
5557 struct drm_version
*host_ver
)
5559 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5560 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5561 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5562 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5563 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5564 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5565 unlock_drm_version(host_ver
, target_ver
, true);
5568 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5569 int fd
, int cmd
, abi_long arg
)
5571 struct drm_version
*ver
;
5572 struct target_drm_version
*target_ver
;
5575 switch (ie
->host_cmd
) {
5576 case DRM_IOCTL_VERSION
:
5577 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5578 return -TARGET_EFAULT
;
5580 ver
= (struct drm_version
*)buf_temp
;
5581 ret
= target_to_host_drmversion(ver
, target_ver
);
5582 if (!is_error(ret
)) {
5583 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5584 if (is_error(ret
)) {
5585 unlock_drm_version(ver
, target_ver
, false);
5587 host_to_target_drmversion(target_ver
, ver
);
5590 unlock_user_struct(target_ver
, arg
, 0);
5593 return -TARGET_ENOSYS
;
5596 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5597 struct drm_i915_getparam
*gparam
,
5598 int fd
, abi_long arg
)
5602 struct target_drm_i915_getparam
*target_gparam
;
5604 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5605 return -TARGET_EFAULT
;
5608 __get_user(gparam
->param
, &target_gparam
->param
);
5609 gparam
->value
= &value
;
5610 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5611 put_user_s32(value
, target_gparam
->value
);
5613 unlock_user_struct(target_gparam
, arg
, 0);
5617 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5618 int fd
, int cmd
, abi_long arg
)
5620 switch (ie
->host_cmd
) {
5621 case DRM_IOCTL_I915_GETPARAM
:
5622 return do_ioctl_drm_i915_getparam(ie
,
5623 (struct drm_i915_getparam
*)buf_temp
,
5626 return -TARGET_ENOSYS
;
5632 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5633 int fd
, int cmd
, abi_long arg
)
5635 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5636 struct tun_filter
*target_filter
;
5639 assert(ie
->access
== IOC_W
);
5641 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5642 if (!target_filter
) {
5643 return -TARGET_EFAULT
;
5645 filter
->flags
= tswap16(target_filter
->flags
);
5646 filter
->count
= tswap16(target_filter
->count
);
5647 unlock_user(target_filter
, arg
, 0);
5649 if (filter
->count
) {
5650 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5652 return -TARGET_EFAULT
;
5655 target_addr
= lock_user(VERIFY_READ
,
5656 arg
+ offsetof(struct tun_filter
, addr
),
5657 filter
->count
* ETH_ALEN
, 1);
5659 return -TARGET_EFAULT
;
5661 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5662 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5665 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5668 IOCTLEntry ioctl_entries
[] = {
5669 #define IOCTL(cmd, access, ...) \
5670 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5671 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5672 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5673 #define IOCTL_IGNORE(cmd) \
5674 { TARGET_ ## cmd, 0, #cmd },
5679 /* ??? Implement proper locking for ioctls. */
5680 /* do_ioctl() Must return target values and target errnos. */
5681 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5683 const IOCTLEntry
*ie
;
5684 const argtype
*arg_type
;
5686 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5692 if (ie
->target_cmd
== 0) {
5694 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5695 return -TARGET_ENOSYS
;
5697 if (ie
->target_cmd
== cmd
)
5701 arg_type
= ie
->arg_type
;
5703 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5704 } else if (!ie
->host_cmd
) {
5705 /* Some architectures define BSD ioctls in their headers
5706 that are not implemented in Linux. */
5707 return -TARGET_ENOSYS
;
5710 switch(arg_type
[0]) {
5713 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5719 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5723 target_size
= thunk_type_size(arg_type
, 0);
5724 switch(ie
->access
) {
5726 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5727 if (!is_error(ret
)) {
5728 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5730 return -TARGET_EFAULT
;
5731 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5732 unlock_user(argptr
, arg
, target_size
);
5736 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5738 return -TARGET_EFAULT
;
5739 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5740 unlock_user(argptr
, arg
, 0);
5741 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5745 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5747 return -TARGET_EFAULT
;
5748 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5749 unlock_user(argptr
, arg
, 0);
5750 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5751 if (!is_error(ret
)) {
5752 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5754 return -TARGET_EFAULT
;
5755 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5756 unlock_user(argptr
, arg
, target_size
);
5762 qemu_log_mask(LOG_UNIMP
,
5763 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5764 (long)cmd
, arg_type
[0]);
5765 ret
= -TARGET_ENOSYS
;
5771 static const bitmask_transtbl iflag_tbl
[] = {
5772 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5773 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5774 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5775 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5776 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5777 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5778 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5779 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5780 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5781 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5782 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5783 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5784 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5785 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5786 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5790 static const bitmask_transtbl oflag_tbl
[] = {
5791 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5792 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5793 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5794 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5795 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5796 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5797 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5798 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5799 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5800 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5801 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5802 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5803 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5804 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5805 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5806 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5807 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5808 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5809 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5810 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5811 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5812 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5813 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5814 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5818 static const bitmask_transtbl cflag_tbl
[] = {
5819 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5820 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5821 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5822 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5823 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5824 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5825 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5826 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5827 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5828 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5829 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5830 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5831 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5832 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5833 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5834 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5835 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5836 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5837 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5838 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5839 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5840 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5841 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5842 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5843 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5844 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5845 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5846 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5847 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5848 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5849 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5853 static const bitmask_transtbl lflag_tbl
[] = {
5854 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5855 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5856 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5857 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5858 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5859 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5860 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5861 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5862 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5863 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5864 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5865 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5866 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5867 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5868 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5869 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5873 static void target_to_host_termios (void *dst
, const void *src
)
5875 struct host_termios
*host
= dst
;
5876 const struct target_termios
*target
= src
;
5879 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5881 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5883 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5885 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5886 host
->c_line
= target
->c_line
;
5888 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5889 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5890 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5891 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5892 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5893 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5894 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5895 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5896 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5897 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5898 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5899 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5900 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5901 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5902 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5903 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5904 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5905 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5908 static void host_to_target_termios (void *dst
, const void *src
)
5910 struct target_termios
*target
= dst
;
5911 const struct host_termios
*host
= src
;
5914 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5916 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5918 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5920 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5921 target
->c_line
= host
->c_line
;
5923 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5924 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5925 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5926 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5927 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5928 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5929 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5930 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5931 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5932 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5933 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5934 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5935 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5936 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5937 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5938 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5939 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5940 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5943 static const StructEntry struct_termios_def
= {
5944 .convert
= { host_to_target_termios
, target_to_host_termios
},
5945 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5946 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5947 .print
= print_termios
,
5950 static const bitmask_transtbl mmap_flags_tbl
[] = {
5951 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5952 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5953 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5954 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5955 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5956 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5957 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5958 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5959 MAP_DENYWRITE
, MAP_DENYWRITE
},
5960 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5961 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5962 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5963 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5964 MAP_NORESERVE
, MAP_NORESERVE
},
5965 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5966 /* MAP_STACK had been ignored by the kernel for quite some time.
5967 Recognize it for the target insofar as we do not want to pass
5968 it through to the host. */
5969 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5974 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5975 * TARGET_I386 is defined if TARGET_X86_64 is defined
5977 #if defined(TARGET_I386)
5979 /* NOTE: there is really one LDT for all the threads */
5980 static uint8_t *ldt_table
;
5982 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5989 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5990 if (size
> bytecount
)
5992 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5994 return -TARGET_EFAULT
;
5995 /* ??? Should this by byteswapped? */
5996 memcpy(p
, ldt_table
, size
);
5997 unlock_user(p
, ptr
, size
);
6001 /* XXX: add locking support */
6002 static abi_long
write_ldt(CPUX86State
*env
,
6003 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6005 struct target_modify_ldt_ldt_s ldt_info
;
6006 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6007 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6008 int seg_not_present
, useable
, lm
;
6009 uint32_t *lp
, entry_1
, entry_2
;
6011 if (bytecount
!= sizeof(ldt_info
))
6012 return -TARGET_EINVAL
;
6013 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6014 return -TARGET_EFAULT
;
6015 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6016 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6017 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6018 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6019 unlock_user_struct(target_ldt_info
, ptr
, 0);
6021 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6022 return -TARGET_EINVAL
;
6023 seg_32bit
= ldt_info
.flags
& 1;
6024 contents
= (ldt_info
.flags
>> 1) & 3;
6025 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6026 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6027 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6028 useable
= (ldt_info
.flags
>> 6) & 1;
6032 lm
= (ldt_info
.flags
>> 7) & 1;
6034 if (contents
== 3) {
6036 return -TARGET_EINVAL
;
6037 if (seg_not_present
== 0)
6038 return -TARGET_EINVAL
;
6040 /* allocate the LDT */
6042 env
->ldt
.base
= target_mmap(0,
6043 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6044 PROT_READ
|PROT_WRITE
,
6045 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6046 if (env
->ldt
.base
== -1)
6047 return -TARGET_ENOMEM
;
6048 memset(g2h_untagged(env
->ldt
.base
), 0,
6049 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6050 env
->ldt
.limit
= 0xffff;
6051 ldt_table
= g2h_untagged(env
->ldt
.base
);
6054 /* NOTE: same code as Linux kernel */
6055 /* Allow LDTs to be cleared by the user. */
6056 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6059 read_exec_only
== 1 &&
6061 limit_in_pages
== 0 &&
6062 seg_not_present
== 1 &&
6070 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6071 (ldt_info
.limit
& 0x0ffff);
6072 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6073 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6074 (ldt_info
.limit
& 0xf0000) |
6075 ((read_exec_only
^ 1) << 9) |
6077 ((seg_not_present
^ 1) << 15) |
6079 (limit_in_pages
<< 23) |
6083 entry_2
|= (useable
<< 20);
6085 /* Install the new entry ... */
6087 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6088 lp
[0] = tswap32(entry_1
);
6089 lp
[1] = tswap32(entry_2
);
6093 /* specific and weird i386 syscalls */
6094 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6095 unsigned long bytecount
)
6101 ret
= read_ldt(ptr
, bytecount
);
6104 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6107 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6110 ret
= -TARGET_ENOSYS
;
6116 #if defined(TARGET_ABI32)
6117 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6119 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6120 struct target_modify_ldt_ldt_s ldt_info
;
6121 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6122 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6123 int seg_not_present
, useable
, lm
;
6124 uint32_t *lp
, entry_1
, entry_2
;
6127 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6128 if (!target_ldt_info
)
6129 return -TARGET_EFAULT
;
6130 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6131 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6132 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6133 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6134 if (ldt_info
.entry_number
== -1) {
6135 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6136 if (gdt_table
[i
] == 0) {
6137 ldt_info
.entry_number
= i
;
6138 target_ldt_info
->entry_number
= tswap32(i
);
6143 unlock_user_struct(target_ldt_info
, ptr
, 1);
6145 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6146 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6147 return -TARGET_EINVAL
;
6148 seg_32bit
= ldt_info
.flags
& 1;
6149 contents
= (ldt_info
.flags
>> 1) & 3;
6150 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6151 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6152 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6153 useable
= (ldt_info
.flags
>> 6) & 1;
6157 lm
= (ldt_info
.flags
>> 7) & 1;
6160 if (contents
== 3) {
6161 if (seg_not_present
== 0)
6162 return -TARGET_EINVAL
;
6165 /* NOTE: same code as Linux kernel */
6166 /* Allow LDTs to be cleared by the user. */
6167 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6168 if ((contents
== 0 &&
6169 read_exec_only
== 1 &&
6171 limit_in_pages
== 0 &&
6172 seg_not_present
== 1 &&
6180 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6181 (ldt_info
.limit
& 0x0ffff);
6182 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6183 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6184 (ldt_info
.limit
& 0xf0000) |
6185 ((read_exec_only
^ 1) << 9) |
6187 ((seg_not_present
^ 1) << 15) |
6189 (limit_in_pages
<< 23) |
6194 /* Install the new entry ... */
6196 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6197 lp
[0] = tswap32(entry_1
);
6198 lp
[1] = tswap32(entry_2
);
6202 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6204 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6205 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6206 uint32_t base_addr
, limit
, flags
;
6207 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6208 int seg_not_present
, useable
, lm
;
6209 uint32_t *lp
, entry_1
, entry_2
;
6211 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6212 if (!target_ldt_info
)
6213 return -TARGET_EFAULT
;
6214 idx
= tswap32(target_ldt_info
->entry_number
);
6215 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6216 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6217 unlock_user_struct(target_ldt_info
, ptr
, 1);
6218 return -TARGET_EINVAL
;
6220 lp
= (uint32_t *)(gdt_table
+ idx
);
6221 entry_1
= tswap32(lp
[0]);
6222 entry_2
= tswap32(lp
[1]);
6224 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6225 contents
= (entry_2
>> 10) & 3;
6226 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6227 seg_32bit
= (entry_2
>> 22) & 1;
6228 limit_in_pages
= (entry_2
>> 23) & 1;
6229 useable
= (entry_2
>> 20) & 1;
6233 lm
= (entry_2
>> 21) & 1;
6235 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6236 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6237 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6238 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6239 base_addr
= (entry_1
>> 16) |
6240 (entry_2
& 0xff000000) |
6241 ((entry_2
& 0xff) << 16);
6242 target_ldt_info
->base_addr
= tswapal(base_addr
);
6243 target_ldt_info
->limit
= tswap32(limit
);
6244 target_ldt_info
->flags
= tswap32(flags
);
6245 unlock_user_struct(target_ldt_info
, ptr
, 1);
6249 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6251 return -TARGET_ENOSYS
;
6254 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6261 case TARGET_ARCH_SET_GS
:
6262 case TARGET_ARCH_SET_FS
:
6263 if (code
== TARGET_ARCH_SET_GS
)
6267 cpu_x86_load_seg(env
, idx
, 0);
6268 env
->segs
[idx
].base
= addr
;
6270 case TARGET_ARCH_GET_GS
:
6271 case TARGET_ARCH_GET_FS
:
6272 if (code
== TARGET_ARCH_GET_GS
)
6276 val
= env
->segs
[idx
].base
;
6277 if (put_user(val
, addr
, abi_ulong
))
6278 ret
= -TARGET_EFAULT
;
6281 ret
= -TARGET_EINVAL
;
6286 #endif /* defined(TARGET_ABI32 */
6287 #endif /* defined(TARGET_I386) */
6290 * These constants are generic. Supply any that are missing from the host.
6293 # define PR_SET_NAME 15
6294 # define PR_GET_NAME 16
6296 #ifndef PR_SET_FP_MODE
6297 # define PR_SET_FP_MODE 45
6298 # define PR_GET_FP_MODE 46
6299 # define PR_FP_MODE_FR (1 << 0)
6300 # define PR_FP_MODE_FRE (1 << 1)
6302 #ifndef PR_SVE_SET_VL
6303 # define PR_SVE_SET_VL 50
6304 # define PR_SVE_GET_VL 51
6305 # define PR_SVE_VL_LEN_MASK 0xffff
6306 # define PR_SVE_VL_INHERIT (1 << 17)
6308 #ifndef PR_PAC_RESET_KEYS
6309 # define PR_PAC_RESET_KEYS 54
6310 # define PR_PAC_APIAKEY (1 << 0)
6311 # define PR_PAC_APIBKEY (1 << 1)
6312 # define PR_PAC_APDAKEY (1 << 2)
6313 # define PR_PAC_APDBKEY (1 << 3)
6314 # define PR_PAC_APGAKEY (1 << 4)
6316 #ifndef PR_SET_TAGGED_ADDR_CTRL
6317 # define PR_SET_TAGGED_ADDR_CTRL 55
6318 # define PR_GET_TAGGED_ADDR_CTRL 56
6319 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6321 #ifndef PR_MTE_TCF_SHIFT
6322 # define PR_MTE_TCF_SHIFT 1
6323 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6324 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6325 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6326 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6327 # define PR_MTE_TAG_SHIFT 3
6328 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6330 #ifndef PR_SET_IO_FLUSHER
6331 # define PR_SET_IO_FLUSHER 57
6332 # define PR_GET_IO_FLUSHER 58
6334 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6335 # define PR_SET_SYSCALL_USER_DISPATCH 59
6337 #ifndef PR_SME_SET_VL
6338 # define PR_SME_SET_VL 63
6339 # define PR_SME_GET_VL 64
6340 # define PR_SME_VL_LEN_MASK 0xffff
6341 # define PR_SME_VL_INHERIT (1 << 17)
6344 #include "target_prctl.h"
6346 static abi_long
do_prctl_inval0(CPUArchState
*env
)
6348 return -TARGET_EINVAL
;
6351 static abi_long
do_prctl_inval1(CPUArchState
*env
, abi_long arg2
)
6353 return -TARGET_EINVAL
;
6356 #ifndef do_prctl_get_fp_mode
6357 #define do_prctl_get_fp_mode do_prctl_inval0
6359 #ifndef do_prctl_set_fp_mode
6360 #define do_prctl_set_fp_mode do_prctl_inval1
6362 #ifndef do_prctl_sve_get_vl
6363 #define do_prctl_sve_get_vl do_prctl_inval0
6365 #ifndef do_prctl_sve_set_vl
6366 #define do_prctl_sve_set_vl do_prctl_inval1
6368 #ifndef do_prctl_reset_keys
6369 #define do_prctl_reset_keys do_prctl_inval1
6371 #ifndef do_prctl_set_tagged_addr_ctrl
6372 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6374 #ifndef do_prctl_get_tagged_addr_ctrl
6375 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6377 #ifndef do_prctl_get_unalign
6378 #define do_prctl_get_unalign do_prctl_inval1
6380 #ifndef do_prctl_set_unalign
6381 #define do_prctl_set_unalign do_prctl_inval1
6383 #ifndef do_prctl_sme_get_vl
6384 #define do_prctl_sme_get_vl do_prctl_inval0
6386 #ifndef do_prctl_sme_set_vl
6387 #define do_prctl_sme_set_vl do_prctl_inval1
6390 static abi_long
do_prctl(CPUArchState
*env
, abi_long option
, abi_long arg2
,
6391 abi_long arg3
, abi_long arg4
, abi_long arg5
)
6396 case PR_GET_PDEATHSIG
:
6399 ret
= get_errno(prctl(PR_GET_PDEATHSIG
, &deathsig
,
6401 if (!is_error(ret
) &&
6402 put_user_s32(host_to_target_signal(deathsig
), arg2
)) {
6403 return -TARGET_EFAULT
;
6407 case PR_SET_PDEATHSIG
:
6408 return get_errno(prctl(PR_SET_PDEATHSIG
, target_to_host_signal(arg2
),
6412 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
6414 return -TARGET_EFAULT
;
6416 ret
= get_errno(prctl(PR_GET_NAME
, (uintptr_t)name
,
6418 unlock_user(name
, arg2
, 16);
6423 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
6425 return -TARGET_EFAULT
;
6427 ret
= get_errno(prctl(PR_SET_NAME
, (uintptr_t)name
,
6429 unlock_user(name
, arg2
, 0);
6432 case PR_GET_FP_MODE
:
6433 return do_prctl_get_fp_mode(env
);
6434 case PR_SET_FP_MODE
:
6435 return do_prctl_set_fp_mode(env
, arg2
);
6437 return do_prctl_sve_get_vl(env
);
6439 return do_prctl_sve_set_vl(env
, arg2
);
6441 return do_prctl_sme_get_vl(env
);
6443 return do_prctl_sme_set_vl(env
, arg2
);
6444 case PR_PAC_RESET_KEYS
:
6445 if (arg3
|| arg4
|| arg5
) {
6446 return -TARGET_EINVAL
;
6448 return do_prctl_reset_keys(env
, arg2
);
6449 case PR_SET_TAGGED_ADDR_CTRL
:
6450 if (arg3
|| arg4
|| arg5
) {
6451 return -TARGET_EINVAL
;
6453 return do_prctl_set_tagged_addr_ctrl(env
, arg2
);
6454 case PR_GET_TAGGED_ADDR_CTRL
:
6455 if (arg2
|| arg3
|| arg4
|| arg5
) {
6456 return -TARGET_EINVAL
;
6458 return do_prctl_get_tagged_addr_ctrl(env
);
6460 case PR_GET_UNALIGN
:
6461 return do_prctl_get_unalign(env
, arg2
);
6462 case PR_SET_UNALIGN
:
6463 return do_prctl_set_unalign(env
, arg2
);
6465 case PR_CAP_AMBIENT
:
6466 case PR_CAPBSET_READ
:
6467 case PR_CAPBSET_DROP
:
6468 case PR_GET_DUMPABLE
:
6469 case PR_SET_DUMPABLE
:
6470 case PR_GET_KEEPCAPS
:
6471 case PR_SET_KEEPCAPS
:
6472 case PR_GET_SECUREBITS
:
6473 case PR_SET_SECUREBITS
:
6476 case PR_GET_TIMERSLACK
:
6477 case PR_SET_TIMERSLACK
:
6479 case PR_MCE_KILL_GET
:
6480 case PR_GET_NO_NEW_PRIVS
:
6481 case PR_SET_NO_NEW_PRIVS
:
6482 case PR_GET_IO_FLUSHER
:
6483 case PR_SET_IO_FLUSHER
:
6484 /* Some prctl options have no pointer arguments and we can pass on. */
6485 return get_errno(prctl(option
, arg2
, arg3
, arg4
, arg5
));
6487 case PR_GET_CHILD_SUBREAPER
:
6488 case PR_SET_CHILD_SUBREAPER
:
6489 case PR_GET_SPECULATION_CTRL
:
6490 case PR_SET_SPECULATION_CTRL
:
6491 case PR_GET_TID_ADDRESS
:
6493 return -TARGET_EINVAL
;
6497 /* Was used for SPE on PowerPC. */
6498 return -TARGET_EINVAL
;
6505 case PR_GET_SECCOMP
:
6506 case PR_SET_SECCOMP
:
6507 case PR_SET_SYSCALL_USER_DISPATCH
:
6508 case PR_GET_THP_DISABLE
:
6509 case PR_SET_THP_DISABLE
:
6512 /* Disable to prevent the target disabling stuff we need. */
6513 return -TARGET_EINVAL
;
6516 qemu_log_mask(LOG_UNIMP
, "Unsupported prctl: " TARGET_ABI_FMT_ld
"\n",
6518 return -TARGET_EINVAL
;
6522 #define NEW_STACK_SIZE 0x40000
6525 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6528 pthread_mutex_t mutex
;
6529 pthread_cond_t cond
;
6532 abi_ulong child_tidptr
;
6533 abi_ulong parent_tidptr
;
6537 static void *clone_func(void *arg
)
6539 new_thread_info
*info
= arg
;
6544 rcu_register_thread();
6545 tcg_register_thread();
6549 ts
= (TaskState
*)cpu
->opaque
;
6550 info
->tid
= sys_gettid();
6552 if (info
->child_tidptr
)
6553 put_user_u32(info
->tid
, info
->child_tidptr
);
6554 if (info
->parent_tidptr
)
6555 put_user_u32(info
->tid
, info
->parent_tidptr
);
6556 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6557 /* Enable signals. */
6558 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6559 /* Signal to the parent that we're ready. */
6560 pthread_mutex_lock(&info
->mutex
);
6561 pthread_cond_broadcast(&info
->cond
);
6562 pthread_mutex_unlock(&info
->mutex
);
6563 /* Wait until the parent has finished initializing the tls state. */
6564 pthread_mutex_lock(&clone_lock
);
6565 pthread_mutex_unlock(&clone_lock
);
6571 /* do_fork() Must return host values and target errnos (unlike most
6572 do_*() functions). */
6573 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6574 abi_ulong parent_tidptr
, target_ulong newtls
,
6575 abi_ulong child_tidptr
)
6577 CPUState
*cpu
= env_cpu(env
);
6581 CPUArchState
*new_env
;
6584 flags
&= ~CLONE_IGNORED_FLAGS
;
6586 /* Emulate vfork() with fork() */
6587 if (flags
& CLONE_VFORK
)
6588 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6590 if (flags
& CLONE_VM
) {
6591 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6592 new_thread_info info
;
6593 pthread_attr_t attr
;
6595 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6596 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6597 return -TARGET_EINVAL
;
6600 ts
= g_new0(TaskState
, 1);
6601 init_task_state(ts
);
6603 /* Grab a mutex so that thread setup appears atomic. */
6604 pthread_mutex_lock(&clone_lock
);
6607 * If this is our first additional thread, we need to ensure we
6608 * generate code for parallel execution and flush old translations.
6609 * Do this now so that the copy gets CF_PARALLEL too.
6611 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
6612 cpu
->tcg_cflags
|= CF_PARALLEL
;
6616 /* we create a new CPU instance. */
6617 new_env
= cpu_copy(env
);
6618 /* Init regs that differ from the parent. */
6619 cpu_clone_regs_child(new_env
, newsp
, flags
);
6620 cpu_clone_regs_parent(env
, flags
);
6621 new_cpu
= env_cpu(new_env
);
6622 new_cpu
->opaque
= ts
;
6623 ts
->bprm
= parent_ts
->bprm
;
6624 ts
->info
= parent_ts
->info
;
6625 ts
->signal_mask
= parent_ts
->signal_mask
;
6627 if (flags
& CLONE_CHILD_CLEARTID
) {
6628 ts
->child_tidptr
= child_tidptr
;
6631 if (flags
& CLONE_SETTLS
) {
6632 cpu_set_tls (new_env
, newtls
);
6635 memset(&info
, 0, sizeof(info
));
6636 pthread_mutex_init(&info
.mutex
, NULL
);
6637 pthread_mutex_lock(&info
.mutex
);
6638 pthread_cond_init(&info
.cond
, NULL
);
6640 if (flags
& CLONE_CHILD_SETTID
) {
6641 info
.child_tidptr
= child_tidptr
;
6643 if (flags
& CLONE_PARENT_SETTID
) {
6644 info
.parent_tidptr
= parent_tidptr
;
6647 ret
= pthread_attr_init(&attr
);
6648 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6649 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6650 /* It is not safe to deliver signals until the child has finished
6651 initializing, so temporarily block all signals. */
6652 sigfillset(&sigmask
);
6653 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6654 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6656 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6657 /* TODO: Free new CPU state if thread creation failed. */
6659 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6660 pthread_attr_destroy(&attr
);
6662 /* Wait for the child to initialize. */
6663 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6668 pthread_mutex_unlock(&info
.mutex
);
6669 pthread_cond_destroy(&info
.cond
);
6670 pthread_mutex_destroy(&info
.mutex
);
6671 pthread_mutex_unlock(&clone_lock
);
6673 /* if no CLONE_VM, we consider it is a fork */
6674 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6675 return -TARGET_EINVAL
;
6678 /* We can't support custom termination signals */
6679 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6680 return -TARGET_EINVAL
;
6683 if (block_signals()) {
6684 return -QEMU_ERESTARTSYS
;
6690 /* Child Process. */
6691 cpu_clone_regs_child(env
, newsp
, flags
);
6693 /* There is a race condition here. The parent process could
6694 theoretically read the TID in the child process before the child
6695 tid is set. This would require using either ptrace
6696 (not implemented) or having *_tidptr to point at a shared memory
6697 mapping. We can't repeat the spinlock hack used above because
6698 the child process gets its own copy of the lock. */
6699 if (flags
& CLONE_CHILD_SETTID
)
6700 put_user_u32(sys_gettid(), child_tidptr
);
6701 if (flags
& CLONE_PARENT_SETTID
)
6702 put_user_u32(sys_gettid(), parent_tidptr
);
6703 ts
= (TaskState
*)cpu
->opaque
;
6704 if (flags
& CLONE_SETTLS
)
6705 cpu_set_tls (env
, newtls
);
6706 if (flags
& CLONE_CHILD_CLEARTID
)
6707 ts
->child_tidptr
= child_tidptr
;
6709 cpu_clone_regs_parent(env
, flags
);
6716 /* warning : doesn't handle linux specific flags... */
6717 static int target_to_host_fcntl_cmd(int cmd
)
6722 case TARGET_F_DUPFD
:
6723 case TARGET_F_GETFD
:
6724 case TARGET_F_SETFD
:
6725 case TARGET_F_GETFL
:
6726 case TARGET_F_SETFL
:
6727 case TARGET_F_OFD_GETLK
:
6728 case TARGET_F_OFD_SETLK
:
6729 case TARGET_F_OFD_SETLKW
:
6732 case TARGET_F_GETLK
:
6735 case TARGET_F_SETLK
:
6738 case TARGET_F_SETLKW
:
6741 case TARGET_F_GETOWN
:
6744 case TARGET_F_SETOWN
:
6747 case TARGET_F_GETSIG
:
6750 case TARGET_F_SETSIG
:
6753 #if TARGET_ABI_BITS == 32
6754 case TARGET_F_GETLK64
:
6757 case TARGET_F_SETLK64
:
6760 case TARGET_F_SETLKW64
:
6764 case TARGET_F_SETLEASE
:
6767 case TARGET_F_GETLEASE
:
6770 #ifdef F_DUPFD_CLOEXEC
6771 case TARGET_F_DUPFD_CLOEXEC
:
6772 ret
= F_DUPFD_CLOEXEC
;
6775 case TARGET_F_NOTIFY
:
6779 case TARGET_F_GETOWN_EX
:
6784 case TARGET_F_SETOWN_EX
:
6789 case TARGET_F_SETPIPE_SZ
:
6792 case TARGET_F_GETPIPE_SZ
:
6797 case TARGET_F_ADD_SEALS
:
6800 case TARGET_F_GET_SEALS
:
6805 ret
= -TARGET_EINVAL
;
6809 #if defined(__powerpc64__)
6810 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6811 * is not supported by kernel. The glibc fcntl call actually adjusts
6812 * them to 5, 6 and 7 before making the syscall(). Since we make the
6813 * syscall directly, adjust to what is supported by the kernel.
6815 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6816 ret
-= F_GETLK64
- 5;
6823 #define FLOCK_TRANSTBL \
6825 TRANSTBL_CONVERT(F_RDLCK); \
6826 TRANSTBL_CONVERT(F_WRLCK); \
6827 TRANSTBL_CONVERT(F_UNLCK); \
6830 static int target_to_host_flock(int type
)
6832 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6834 #undef TRANSTBL_CONVERT
6835 return -TARGET_EINVAL
;
6838 static int host_to_target_flock(int type
)
6840 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6842 #undef TRANSTBL_CONVERT
6843 /* if we don't know how to convert the value coming
6844 * from the host we copy to the target field as-is
6849 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6850 abi_ulong target_flock_addr
)
6852 struct target_flock
*target_fl
;
6855 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6856 return -TARGET_EFAULT
;
6859 __get_user(l_type
, &target_fl
->l_type
);
6860 l_type
= target_to_host_flock(l_type
);
6864 fl
->l_type
= l_type
;
6865 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6866 __get_user(fl
->l_start
, &target_fl
->l_start
);
6867 __get_user(fl
->l_len
, &target_fl
->l_len
);
6868 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6869 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6873 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6874 const struct flock64
*fl
)
6876 struct target_flock
*target_fl
;
6879 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6880 return -TARGET_EFAULT
;
6883 l_type
= host_to_target_flock(fl
->l_type
);
6884 __put_user(l_type
, &target_fl
->l_type
);
6885 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6886 __put_user(fl
->l_start
, &target_fl
->l_start
);
6887 __put_user(fl
->l_len
, &target_fl
->l_len
);
6888 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6889 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6893 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6894 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6896 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6897 struct target_oabi_flock64
{
6905 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6906 abi_ulong target_flock_addr
)
6908 struct target_oabi_flock64
*target_fl
;
6911 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6912 return -TARGET_EFAULT
;
6915 __get_user(l_type
, &target_fl
->l_type
);
6916 l_type
= target_to_host_flock(l_type
);
6920 fl
->l_type
= l_type
;
6921 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6922 __get_user(fl
->l_start
, &target_fl
->l_start
);
6923 __get_user(fl
->l_len
, &target_fl
->l_len
);
6924 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6925 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6929 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6930 const struct flock64
*fl
)
6932 struct target_oabi_flock64
*target_fl
;
6935 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6936 return -TARGET_EFAULT
;
6939 l_type
= host_to_target_flock(fl
->l_type
);
6940 __put_user(l_type
, &target_fl
->l_type
);
6941 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6942 __put_user(fl
->l_start
, &target_fl
->l_start
);
6943 __put_user(fl
->l_len
, &target_fl
->l_len
);
6944 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6945 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6950 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6951 abi_ulong target_flock_addr
)
6953 struct target_flock64
*target_fl
;
6956 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6957 return -TARGET_EFAULT
;
6960 __get_user(l_type
, &target_fl
->l_type
);
6961 l_type
= target_to_host_flock(l_type
);
6965 fl
->l_type
= l_type
;
6966 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6967 __get_user(fl
->l_start
, &target_fl
->l_start
);
6968 __get_user(fl
->l_len
, &target_fl
->l_len
);
6969 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6970 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6974 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6975 const struct flock64
*fl
)
6977 struct target_flock64
*target_fl
;
6980 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6981 return -TARGET_EFAULT
;
6984 l_type
= host_to_target_flock(fl
->l_type
);
6985 __put_user(l_type
, &target_fl
->l_type
);
6986 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6987 __put_user(fl
->l_start
, &target_fl
->l_start
);
6988 __put_user(fl
->l_len
, &target_fl
->l_len
);
6989 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6990 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6994 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6996 struct flock64 fl64
;
6998 struct f_owner_ex fox
;
6999 struct target_f_owner_ex
*target_fox
;
7002 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
7004 if (host_cmd
== -TARGET_EINVAL
)
7008 case TARGET_F_GETLK
:
7009 ret
= copy_from_user_flock(&fl64
, arg
);
7013 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7015 ret
= copy_to_user_flock(arg
, &fl64
);
7019 case TARGET_F_SETLK
:
7020 case TARGET_F_SETLKW
:
7021 ret
= copy_from_user_flock(&fl64
, arg
);
7025 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7028 case TARGET_F_GETLK64
:
7029 case TARGET_F_OFD_GETLK
:
7030 ret
= copy_from_user_flock64(&fl64
, arg
);
7034 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7036 ret
= copy_to_user_flock64(arg
, &fl64
);
7039 case TARGET_F_SETLK64
:
7040 case TARGET_F_SETLKW64
:
7041 case TARGET_F_OFD_SETLK
:
7042 case TARGET_F_OFD_SETLKW
:
7043 ret
= copy_from_user_flock64(&fl64
, arg
);
7047 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7050 case TARGET_F_GETFL
:
7051 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7053 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
7057 case TARGET_F_SETFL
:
7058 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
7059 target_to_host_bitmask(arg
,
7064 case TARGET_F_GETOWN_EX
:
7065 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7067 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
7068 return -TARGET_EFAULT
;
7069 target_fox
->type
= tswap32(fox
.type
);
7070 target_fox
->pid
= tswap32(fox
.pid
);
7071 unlock_user_struct(target_fox
, arg
, 1);
7077 case TARGET_F_SETOWN_EX
:
7078 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
7079 return -TARGET_EFAULT
;
7080 fox
.type
= tswap32(target_fox
->type
);
7081 fox
.pid
= tswap32(target_fox
->pid
);
7082 unlock_user_struct(target_fox
, arg
, 0);
7083 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7087 case TARGET_F_SETSIG
:
7088 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
7091 case TARGET_F_GETSIG
:
7092 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
7095 case TARGET_F_SETOWN
:
7096 case TARGET_F_GETOWN
:
7097 case TARGET_F_SETLEASE
:
7098 case TARGET_F_GETLEASE
:
7099 case TARGET_F_SETPIPE_SZ
:
7100 case TARGET_F_GETPIPE_SZ
:
7101 case TARGET_F_ADD_SEALS
:
7102 case TARGET_F_GET_SEALS
:
7103 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7107 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
7115 static inline int high2lowuid(int uid
)
7123 static inline int high2lowgid(int gid
)
7131 static inline int low2highuid(int uid
)
7133 if ((int16_t)uid
== -1)
7139 static inline int low2highgid(int gid
)
7141 if ((int16_t)gid
== -1)
7146 static inline int tswapid(int id
)
7151 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7153 #else /* !USE_UID16 */
7154 static inline int high2lowuid(int uid
)
7158 static inline int high2lowgid(int gid
)
7162 static inline int low2highuid(int uid
)
7166 static inline int low2highgid(int gid
)
7170 static inline int tswapid(int id
)
7175 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7177 #endif /* USE_UID16 */
7179 /* We must do direct syscalls for setting UID/GID, because we want to
7180 * implement the Linux system call semantics of "change only for this thread",
7181 * not the libc/POSIX semantics of "change for all threads in process".
7182 * (See http://ewontfix.com/17/ for more details.)
7183 * We use the 32-bit version of the syscalls if present; if it is not
7184 * then either the host architecture supports 32-bit UIDs natively with
7185 * the standard syscall, or the 16-bit UID is the best we can do.
7187 #ifdef __NR_setuid32
7188 #define __NR_sys_setuid __NR_setuid32
7190 #define __NR_sys_setuid __NR_setuid
7192 #ifdef __NR_setgid32
7193 #define __NR_sys_setgid __NR_setgid32
7195 #define __NR_sys_setgid __NR_setgid
7197 #ifdef __NR_setresuid32
7198 #define __NR_sys_setresuid __NR_setresuid32
7200 #define __NR_sys_setresuid __NR_setresuid
7202 #ifdef __NR_setresgid32
7203 #define __NR_sys_setresgid __NR_setresgid32
7205 #define __NR_sys_setresgid __NR_setresgid
7208 _syscall1(int, sys_setuid
, uid_t
, uid
)
7209 _syscall1(int, sys_setgid
, gid_t
, gid
)
7210 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7211 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7213 void syscall_init(void)
7216 const argtype
*arg_type
;
7219 thunk_init(STRUCT_MAX
);
7221 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7222 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7223 #include "syscall_types.h"
7225 #undef STRUCT_SPECIAL
7227 /* we patch the ioctl size if necessary. We rely on the fact that
7228 no ioctl has all the bits at '1' in the size field */
7230 while (ie
->target_cmd
!= 0) {
7231 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7232 TARGET_IOC_SIZEMASK
) {
7233 arg_type
= ie
->arg_type
;
7234 if (arg_type
[0] != TYPE_PTR
) {
7235 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7240 size
= thunk_type_size(arg_type
, 0);
7241 ie
->target_cmd
= (ie
->target_cmd
&
7242 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7243 (size
<< TARGET_IOC_SIZESHIFT
);
7246 /* automatic consistency check if same arch */
7247 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7248 (defined(__x86_64__) && defined(TARGET_X86_64))
7249 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7250 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7251 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7258 #ifdef TARGET_NR_truncate64
7259 static inline abi_long
target_truncate64(CPUArchState
*cpu_env
, const char *arg1
,
7264 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7268 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7272 #ifdef TARGET_NR_ftruncate64
7273 static inline abi_long
target_ftruncate64(CPUArchState
*cpu_env
, abi_long arg1
,
7278 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7282 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7286 #if defined(TARGET_NR_timer_settime) || \
7287 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7288 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7289 abi_ulong target_addr
)
7291 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7292 offsetof(struct target_itimerspec
,
7294 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7295 offsetof(struct target_itimerspec
,
7297 return -TARGET_EFAULT
;
7304 #if defined(TARGET_NR_timer_settime64) || \
7305 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7306 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7307 abi_ulong target_addr
)
7309 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7310 offsetof(struct target__kernel_itimerspec
,
7312 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7313 offsetof(struct target__kernel_itimerspec
,
7315 return -TARGET_EFAULT
;
7322 #if ((defined(TARGET_NR_timerfd_gettime) || \
7323 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7324 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7325 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7326 struct itimerspec
*host_its
)
7328 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7330 &host_its
->it_interval
) ||
7331 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7333 &host_its
->it_value
)) {
7334 return -TARGET_EFAULT
;
7340 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7341 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7342 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7343 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7344 struct itimerspec
*host_its
)
7346 if (host_to_target_timespec64(target_addr
+
7347 offsetof(struct target__kernel_itimerspec
,
7349 &host_its
->it_interval
) ||
7350 host_to_target_timespec64(target_addr
+
7351 offsetof(struct target__kernel_itimerspec
,
7353 &host_its
->it_value
)) {
7354 return -TARGET_EFAULT
;
7360 #if defined(TARGET_NR_adjtimex) || \
7361 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7362 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7363 abi_long target_addr
)
7365 struct target_timex
*target_tx
;
7367 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7368 return -TARGET_EFAULT
;
7371 __get_user(host_tx
->modes
, &target_tx
->modes
);
7372 __get_user(host_tx
->offset
, &target_tx
->offset
);
7373 __get_user(host_tx
->freq
, &target_tx
->freq
);
7374 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7375 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7376 __get_user(host_tx
->status
, &target_tx
->status
);
7377 __get_user(host_tx
->constant
, &target_tx
->constant
);
7378 __get_user(host_tx
->precision
, &target_tx
->precision
);
7379 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7380 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7381 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7382 __get_user(host_tx
->tick
, &target_tx
->tick
);
7383 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7384 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7385 __get_user(host_tx
->shift
, &target_tx
->shift
);
7386 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7387 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7388 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7389 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7390 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7391 __get_user(host_tx
->tai
, &target_tx
->tai
);
7393 unlock_user_struct(target_tx
, target_addr
, 0);
7397 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7398 struct timex
*host_tx
)
7400 struct target_timex
*target_tx
;
7402 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7403 return -TARGET_EFAULT
;
7406 __put_user(host_tx
->modes
, &target_tx
->modes
);
7407 __put_user(host_tx
->offset
, &target_tx
->offset
);
7408 __put_user(host_tx
->freq
, &target_tx
->freq
);
7409 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7410 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7411 __put_user(host_tx
->status
, &target_tx
->status
);
7412 __put_user(host_tx
->constant
, &target_tx
->constant
);
7413 __put_user(host_tx
->precision
, &target_tx
->precision
);
7414 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7415 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7416 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7417 __put_user(host_tx
->tick
, &target_tx
->tick
);
7418 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7419 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7420 __put_user(host_tx
->shift
, &target_tx
->shift
);
7421 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7422 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7423 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7424 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7425 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7426 __put_user(host_tx
->tai
, &target_tx
->tai
);
7428 unlock_user_struct(target_tx
, target_addr
, 1);
7434 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7435 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7436 abi_long target_addr
)
7438 struct target__kernel_timex
*target_tx
;
7440 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7441 offsetof(struct target__kernel_timex
,
7443 return -TARGET_EFAULT
;
7446 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7447 return -TARGET_EFAULT
;
7450 __get_user(host_tx
->modes
, &target_tx
->modes
);
7451 __get_user(host_tx
->offset
, &target_tx
->offset
);
7452 __get_user(host_tx
->freq
, &target_tx
->freq
);
7453 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7454 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7455 __get_user(host_tx
->status
, &target_tx
->status
);
7456 __get_user(host_tx
->constant
, &target_tx
->constant
);
7457 __get_user(host_tx
->precision
, &target_tx
->precision
);
7458 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7459 __get_user(host_tx
->tick
, &target_tx
->tick
);
7460 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7461 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7462 __get_user(host_tx
->shift
, &target_tx
->shift
);
7463 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7464 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7465 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7466 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7467 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7468 __get_user(host_tx
->tai
, &target_tx
->tai
);
7470 unlock_user_struct(target_tx
, target_addr
, 0);
7474 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7475 struct timex
*host_tx
)
7477 struct target__kernel_timex
*target_tx
;
7479 if (copy_to_user_timeval64(target_addr
+
7480 offsetof(struct target__kernel_timex
, time
),
7482 return -TARGET_EFAULT
;
7485 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7486 return -TARGET_EFAULT
;
7489 __put_user(host_tx
->modes
, &target_tx
->modes
);
7490 __put_user(host_tx
->offset
, &target_tx
->offset
);
7491 __put_user(host_tx
->freq
, &target_tx
->freq
);
7492 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7493 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7494 __put_user(host_tx
->status
, &target_tx
->status
);
7495 __put_user(host_tx
->constant
, &target_tx
->constant
);
7496 __put_user(host_tx
->precision
, &target_tx
->precision
);
7497 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7498 __put_user(host_tx
->tick
, &target_tx
->tick
);
7499 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7500 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7501 __put_user(host_tx
->shift
, &target_tx
->shift
);
7502 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7503 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7504 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7505 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7506 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7507 __put_user(host_tx
->tai
, &target_tx
->tai
);
7509 unlock_user_struct(target_tx
, target_addr
, 1);
7514 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7515 #define sigev_notify_thread_id _sigev_un._tid
7518 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7519 abi_ulong target_addr
)
7521 struct target_sigevent
*target_sevp
;
7523 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7524 return -TARGET_EFAULT
;
7527 /* This union is awkward on 64 bit systems because it has a 32 bit
7528 * integer and a pointer in it; we follow the conversion approach
7529 * used for handling sigval types in signal.c so the guest should get
7530 * the correct value back even if we did a 64 bit byteswap and it's
7531 * using the 32 bit integer.
7533 host_sevp
->sigev_value
.sival_ptr
=
7534 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7535 host_sevp
->sigev_signo
=
7536 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7537 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7538 host_sevp
->sigev_notify_thread_id
= tswap32(target_sevp
->_sigev_un
._tid
);
7540 unlock_user_struct(target_sevp
, target_addr
, 1);
7544 #if defined(TARGET_NR_mlockall)
7545 static inline int target_to_host_mlockall_arg(int arg
)
7549 if (arg
& TARGET_MCL_CURRENT
) {
7550 result
|= MCL_CURRENT
;
7552 if (arg
& TARGET_MCL_FUTURE
) {
7553 result
|= MCL_FUTURE
;
7556 if (arg
& TARGET_MCL_ONFAULT
) {
7557 result
|= MCL_ONFAULT
;
7565 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7566 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7567 defined(TARGET_NR_newfstatat))
7568 static inline abi_long
host_to_target_stat64(CPUArchState
*cpu_env
,
7569 abi_ulong target_addr
,
7570 struct stat
*host_st
)
7572 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7573 if (cpu_env
->eabi
) {
7574 struct target_eabi_stat64
*target_st
;
7576 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7577 return -TARGET_EFAULT
;
7578 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7579 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7580 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7581 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7582 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7584 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7585 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7586 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7587 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7588 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7589 __put_user(host_st
->st_size
, &target_st
->st_size
);
7590 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7591 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7592 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7593 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7594 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7595 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7596 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7597 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7598 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7600 unlock_user_struct(target_st
, target_addr
, 1);
7604 #if defined(TARGET_HAS_STRUCT_STAT64)
7605 struct target_stat64
*target_st
;
7607 struct target_stat
*target_st
;
7610 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7611 return -TARGET_EFAULT
;
7612 memset(target_st
, 0, sizeof(*target_st
));
7613 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7614 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7615 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7616 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7618 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7619 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7620 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7621 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7622 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7623 /* XXX: better use of kernel struct */
7624 __put_user(host_st
->st_size
, &target_st
->st_size
);
7625 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7626 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7627 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7628 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7629 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7630 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7631 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7632 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7633 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7635 unlock_user_struct(target_st
, target_addr
, 1);
7642 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7643 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7644 abi_ulong target_addr
)
7646 struct target_statx
*target_stx
;
7648 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7649 return -TARGET_EFAULT
;
7651 memset(target_stx
, 0, sizeof(*target_stx
));
7653 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7654 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7655 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7656 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7657 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7658 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7659 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7660 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7661 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7662 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7663 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7664 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7665 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7666 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7667 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7668 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7669 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7670 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7671 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7672 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7673 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7674 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7675 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7677 unlock_user_struct(target_stx
, target_addr
, 1);
7683 static int do_sys_futex(int *uaddr
, int op
, int val
,
7684 const struct timespec
*timeout
, int *uaddr2
,
7687 #if HOST_LONG_BITS == 64
7688 #if defined(__NR_futex)
7689 /* always a 64-bit time_t, it doesn't define _time64 version */
7690 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7693 #else /* HOST_LONG_BITS == 64 */
7694 #if defined(__NR_futex_time64)
7695 if (sizeof(timeout
->tv_sec
) == 8) {
7696 /* _time64 function on 32bit arch */
7697 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7700 #if defined(__NR_futex)
7701 /* old function on 32bit arch */
7702 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7704 #endif /* HOST_LONG_BITS == 64 */
7705 g_assert_not_reached();
7708 static int do_safe_futex(int *uaddr
, int op
, int val
,
7709 const struct timespec
*timeout
, int *uaddr2
,
7712 #if HOST_LONG_BITS == 64
7713 #if defined(__NR_futex)
7714 /* always a 64-bit time_t, it doesn't define _time64 version */
7715 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7717 #else /* HOST_LONG_BITS == 64 */
7718 #if defined(__NR_futex_time64)
7719 if (sizeof(timeout
->tv_sec
) == 8) {
7720 /* _time64 function on 32bit arch */
7721 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7725 #if defined(__NR_futex)
7726 /* old function on 32bit arch */
7727 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7729 #endif /* HOST_LONG_BITS == 64 */
7730 return -TARGET_ENOSYS
;
7733 /* ??? Using host futex calls even when target atomic operations
7734 are not really atomic probably breaks things. However implementing
7735 futexes locally would make futexes shared between multiple processes
7736 tricky. However they're probably useless because guest atomic
7737 operations won't work either. */
7738 #if defined(TARGET_NR_futex)
7739 static int do_futex(CPUState
*cpu
, target_ulong uaddr
, int op
, int val
,
7740 target_ulong timeout
, target_ulong uaddr2
, int val3
)
7742 struct timespec ts
, *pts
;
7745 /* ??? We assume FUTEX_* constants are the same on both host
7747 #ifdef FUTEX_CMD_MASK
7748 base_op
= op
& FUTEX_CMD_MASK
;
7754 case FUTEX_WAIT_BITSET
:
7757 target_to_host_timespec(pts
, timeout
);
7761 return do_safe_futex(g2h(cpu
, uaddr
),
7762 op
, tswap32(val
), pts
, NULL
, val3
);
7764 return do_safe_futex(g2h(cpu
, uaddr
),
7765 op
, val
, NULL
, NULL
, 0);
7767 return do_safe_futex(g2h(cpu
, uaddr
),
7768 op
, val
, NULL
, NULL
, 0);
7770 case FUTEX_CMP_REQUEUE
:
7772 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7773 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7774 But the prototype takes a `struct timespec *'; insert casts
7775 to satisfy the compiler. We do not need to tswap TIMEOUT
7776 since it's not compared to guest memory. */
7777 pts
= (struct timespec
*)(uintptr_t) timeout
;
7778 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7779 (base_op
== FUTEX_CMP_REQUEUE
7780 ? tswap32(val3
) : val3
));
7782 return -TARGET_ENOSYS
;
7787 #if defined(TARGET_NR_futex_time64)
7788 static int do_futex_time64(CPUState
*cpu
, target_ulong uaddr
, int op
,
7789 int val
, target_ulong timeout
,
7790 target_ulong uaddr2
, int val3
)
7792 struct timespec ts
, *pts
;
7795 /* ??? We assume FUTEX_* constants are the same on both host
7797 #ifdef FUTEX_CMD_MASK
7798 base_op
= op
& FUTEX_CMD_MASK
;
7804 case FUTEX_WAIT_BITSET
:
7807 if (target_to_host_timespec64(pts
, timeout
)) {
7808 return -TARGET_EFAULT
;
7813 return do_safe_futex(g2h(cpu
, uaddr
), op
,
7814 tswap32(val
), pts
, NULL
, val3
);
7816 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7818 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7820 case FUTEX_CMP_REQUEUE
:
7822 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7823 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7824 But the prototype takes a `struct timespec *'; insert casts
7825 to satisfy the compiler. We do not need to tswap TIMEOUT
7826 since it's not compared to guest memory. */
7827 pts
= (struct timespec
*)(uintptr_t) timeout
;
7828 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7829 (base_op
== FUTEX_CMP_REQUEUE
7830 ? tswap32(val3
) : val3
));
7832 return -TARGET_ENOSYS
;
7837 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7838 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7839 abi_long handle
, abi_long mount_id
,
7842 struct file_handle
*target_fh
;
7843 struct file_handle
*fh
;
7847 unsigned int size
, total_size
;
7849 if (get_user_s32(size
, handle
)) {
7850 return -TARGET_EFAULT
;
7853 name
= lock_user_string(pathname
);
7855 return -TARGET_EFAULT
;
7858 total_size
= sizeof(struct file_handle
) + size
;
7859 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7861 unlock_user(name
, pathname
, 0);
7862 return -TARGET_EFAULT
;
7865 fh
= g_malloc0(total_size
);
7866 fh
->handle_bytes
= size
;
7868 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7869 unlock_user(name
, pathname
, 0);
7871 /* man name_to_handle_at(2):
7872 * Other than the use of the handle_bytes field, the caller should treat
7873 * the file_handle structure as an opaque data type
7876 memcpy(target_fh
, fh
, total_size
);
7877 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7878 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7880 unlock_user(target_fh
, handle
, total_size
);
7882 if (put_user_s32(mid
, mount_id
)) {
7883 return -TARGET_EFAULT
;
7891 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7892 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7895 struct file_handle
*target_fh
;
7896 struct file_handle
*fh
;
7897 unsigned int size
, total_size
;
7900 if (get_user_s32(size
, handle
)) {
7901 return -TARGET_EFAULT
;
7904 total_size
= sizeof(struct file_handle
) + size
;
7905 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7907 return -TARGET_EFAULT
;
7910 fh
= g_memdup(target_fh
, total_size
);
7911 fh
->handle_bytes
= size
;
7912 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7914 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7915 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7919 unlock_user(target_fh
, handle
, total_size
);
7925 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7927 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7930 target_sigset_t
*target_mask
;
7934 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
7935 return -TARGET_EINVAL
;
7937 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7938 return -TARGET_EFAULT
;
7941 target_to_host_sigset(&host_mask
, target_mask
);
7943 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7945 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7947 fd_trans_register(ret
, &target_signalfd_trans
);
7950 unlock_user_struct(target_mask
, mask
, 0);
7956 /* Map host to target signal numbers for the wait family of syscalls.
7957 Assume all other status bits are the same. */
7958 int host_to_target_waitstatus(int status
)
7960 if (WIFSIGNALED(status
)) {
7961 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7963 if (WIFSTOPPED(status
)) {
7964 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7970 static int open_self_cmdline(CPUArchState
*cpu_env
, int fd
)
7972 CPUState
*cpu
= env_cpu(cpu_env
);
7973 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7976 for (i
= 0; i
< bprm
->argc
; i
++) {
7977 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7979 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7987 static int open_self_maps(CPUArchState
*cpu_env
, int fd
)
7989 CPUState
*cpu
= env_cpu(cpu_env
);
7990 TaskState
*ts
= cpu
->opaque
;
7991 GSList
*map_info
= read_self_maps();
7995 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7996 MapInfo
*e
= (MapInfo
*) s
->data
;
7998 if (h2g_valid(e
->start
)) {
7999 unsigned long min
= e
->start
;
8000 unsigned long max
= e
->end
;
8001 int flags
= page_get_flags(h2g(min
));
8004 max
= h2g_valid(max
- 1) ?
8005 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
8007 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
8011 if (h2g(min
) == ts
->info
->stack_limit
) {
8017 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
8018 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
8019 h2g(min
), h2g(max
- 1) + 1,
8020 (flags
& PAGE_READ
) ? 'r' : '-',
8021 (flags
& PAGE_WRITE_ORG
) ? 'w' : '-',
8022 (flags
& PAGE_EXEC
) ? 'x' : '-',
8023 e
->is_priv
? 'p' : 's',
8024 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
8026 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
8033 free_self_maps(map_info
);
8035 #ifdef TARGET_VSYSCALL_PAGE
8037 * We only support execution from the vsyscall page.
8038 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8040 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
8041 " --xp 00000000 00:00 0",
8042 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
8043 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
8049 static int open_self_stat(CPUArchState
*cpu_env
, int fd
)
8051 CPUState
*cpu
= env_cpu(cpu_env
);
8052 TaskState
*ts
= cpu
->opaque
;
8053 g_autoptr(GString
) buf
= g_string_new(NULL
);
8056 for (i
= 0; i
< 44; i
++) {
8059 g_string_printf(buf
, FMT_pid
" ", getpid());
8060 } else if (i
== 1) {
8062 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
8063 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
8064 g_string_printf(buf
, "(%.15s) ", bin
);
8065 } else if (i
== 3) {
8067 g_string_printf(buf
, FMT_pid
" ", getppid());
8068 } else if (i
== 21) {
8070 g_string_printf(buf
, "%" PRIu64
" ", ts
->start_boottime
);
8071 } else if (i
== 27) {
8073 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
8075 /* for the rest, there is MasterCard */
8076 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
8079 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
8087 static int open_self_auxv(CPUArchState
*cpu_env
, int fd
)
8089 CPUState
*cpu
= env_cpu(cpu_env
);
8090 TaskState
*ts
= cpu
->opaque
;
8091 abi_ulong auxv
= ts
->info
->saved_auxv
;
8092 abi_ulong len
= ts
->info
->auxv_len
;
8096 * Auxiliary vector is stored in target process stack.
8097 * read in whole auxv vector and copy it to file
8099 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
8103 r
= write(fd
, ptr
, len
);
8110 lseek(fd
, 0, SEEK_SET
);
8111 unlock_user(ptr
, auxv
, len
);
8117 static int is_proc_myself(const char *filename
, const char *entry
)
8119 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
8120 filename
+= strlen("/proc/");
8121 if (!strncmp(filename
, "self/", strlen("self/"))) {
8122 filename
+= strlen("self/");
8123 } else if (*filename
>= '1' && *filename
<= '9') {
8125 snprintf(myself
, sizeof(myself
), "%d/", getpid());
8126 if (!strncmp(filename
, myself
, strlen(myself
))) {
8127 filename
+= strlen(myself
);
8134 if (!strcmp(filename
, entry
)) {
8141 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8142 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8143 static int is_proc(const char *filename
, const char *entry
)
8145 return strcmp(filename
, entry
) == 0;
8149 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8150 static int open_net_route(CPUArchState
*cpu_env
, int fd
)
8157 fp
= fopen("/proc/net/route", "r");
8164 read
= getline(&line
, &len
, fp
);
8165 dprintf(fd
, "%s", line
);
8169 while ((read
= getline(&line
, &len
, fp
)) != -1) {
8171 uint32_t dest
, gw
, mask
;
8172 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8175 fields
= sscanf(line
,
8176 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8177 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8178 &mask
, &mtu
, &window
, &irtt
);
8182 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8183 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8184 metric
, tswap32(mask
), mtu
, window
, irtt
);
8194 #if defined(TARGET_SPARC)
8195 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8197 dprintf(fd
, "type\t\t: sun4u\n");
8202 #if defined(TARGET_HPPA)
8203 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8205 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
8206 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
8207 dprintf(fd
, "capabilities\t: os32\n");
8208 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
8209 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8214 #if defined(TARGET_M68K)
8215 static int open_hardware(CPUArchState
*cpu_env
, int fd
)
8217 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8222 static int do_openat(CPUArchState
*cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
8225 const char *filename
;
8226 int (*fill
)(CPUArchState
*cpu_env
, int fd
);
8227 int (*cmp
)(const char *s1
, const char *s2
);
8229 const struct fake_open
*fake_open
;
8230 static const struct fake_open fakes
[] = {
8231 { "maps", open_self_maps
, is_proc_myself
},
8232 { "stat", open_self_stat
, is_proc_myself
},
8233 { "auxv", open_self_auxv
, is_proc_myself
},
8234 { "cmdline", open_self_cmdline
, is_proc_myself
},
8235 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8236 { "/proc/net/route", open_net_route
, is_proc
},
8238 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8239 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8241 #if defined(TARGET_M68K)
8242 { "/proc/hardware", open_hardware
, is_proc
},
8244 { NULL
, NULL
, NULL
}
8247 if (is_proc_myself(pathname
, "exe")) {
8248 int execfd
= qemu_getauxval(AT_EXECFD
);
8249 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
8252 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8253 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8258 if (fake_open
->filename
) {
8260 char filename
[PATH_MAX
];
8263 /* create temporary file to map stat to */
8264 tmpdir
= getenv("TMPDIR");
8267 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8268 fd
= mkstemp(filename
);
8274 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8280 lseek(fd
, 0, SEEK_SET
);
8285 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8288 #define TIMER_MAGIC 0x0caf0000
8289 #define TIMER_MAGIC_MASK 0xffff0000
8291 /* Convert QEMU provided timer ID back to internal 16bit index format */
8292 static target_timer_t
get_timer_id(abi_long arg
)
8294 target_timer_t timerid
= arg
;
8296 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8297 return -TARGET_EINVAL
;
8302 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8303 return -TARGET_EINVAL
;
8309 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8311 abi_ulong target_addr
,
8314 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8315 unsigned host_bits
= sizeof(*host_mask
) * 8;
8316 abi_ulong
*target_mask
;
8319 assert(host_size
>= target_size
);
8321 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8323 return -TARGET_EFAULT
;
8325 memset(host_mask
, 0, host_size
);
8327 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8328 unsigned bit
= i
* target_bits
;
8331 __get_user(val
, &target_mask
[i
]);
8332 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8333 if (val
& (1UL << j
)) {
8334 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8339 unlock_user(target_mask
, target_addr
, 0);
8343 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8345 abi_ulong target_addr
,
8348 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8349 unsigned host_bits
= sizeof(*host_mask
) * 8;
8350 abi_ulong
*target_mask
;
8353 assert(host_size
>= target_size
);
8355 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8357 return -TARGET_EFAULT
;
8360 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8361 unsigned bit
= i
* target_bits
;
8364 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8365 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8369 __put_user(val
, &target_mask
[i
]);
8372 unlock_user(target_mask
, target_addr
, target_size
);
8376 #ifdef TARGET_NR_getdents
8377 static int do_getdents(abi_long dirfd
, abi_long arg2
, abi_long count
)
8379 g_autofree
void *hdirp
= NULL
;
8381 int hlen
, hoff
, toff
;
8382 int hreclen
, treclen
;
8383 off64_t prev_diroff
= 0;
8385 hdirp
= g_try_malloc(count
);
8387 return -TARGET_ENOMEM
;
8390 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8391 hlen
= sys_getdents(dirfd
, hdirp
, count
);
8393 hlen
= sys_getdents64(dirfd
, hdirp
, count
);
8396 hlen
= get_errno(hlen
);
8397 if (is_error(hlen
)) {
8401 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8403 return -TARGET_EFAULT
;
8406 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8407 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8408 struct linux_dirent
*hde
= hdirp
+ hoff
;
8410 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8412 struct target_dirent
*tde
= tdirp
+ toff
;
8416 namelen
= strlen(hde
->d_name
);
8417 hreclen
= hde
->d_reclen
;
8418 treclen
= offsetof(struct target_dirent
, d_name
) + namelen
+ 2;
8419 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent
));
8421 if (toff
+ treclen
> count
) {
8423 * If the host struct is smaller than the target struct, or
8424 * requires less alignment and thus packs into less space,
8425 * then the host can return more entries than we can pass
8429 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8433 * Return what we have, resetting the file pointer to the
8434 * location of the first record not returned.
8436 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8440 prev_diroff
= hde
->d_off
;
8441 tde
->d_ino
= tswapal(hde
->d_ino
);
8442 tde
->d_off
= tswapal(hde
->d_off
);
8443 tde
->d_reclen
= tswap16(treclen
);
8444 memcpy(tde
->d_name
, hde
->d_name
, namelen
+ 1);
8447 * The getdents type is in what was formerly a padding byte at the
8448 * end of the structure.
8450 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8451 type
= *((uint8_t *)hde
+ hreclen
- 1);
8455 *((uint8_t *)tde
+ treclen
- 1) = type
;
8458 unlock_user(tdirp
, arg2
, toff
);
8461 #endif /* TARGET_NR_getdents */
8463 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8464 static int do_getdents64(abi_long dirfd
, abi_long arg2
, abi_long count
)
8466 g_autofree
void *hdirp
= NULL
;
8468 int hlen
, hoff
, toff
;
8469 int hreclen
, treclen
;
8470 off64_t prev_diroff
= 0;
8472 hdirp
= g_try_malloc(count
);
8474 return -TARGET_ENOMEM
;
8477 hlen
= get_errno(sys_getdents64(dirfd
, hdirp
, count
));
8478 if (is_error(hlen
)) {
8482 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8484 return -TARGET_EFAULT
;
8487 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8488 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8489 struct target_dirent64
*tde
= tdirp
+ toff
;
8492 namelen
= strlen(hde
->d_name
) + 1;
8493 hreclen
= hde
->d_reclen
;
8494 treclen
= offsetof(struct target_dirent64
, d_name
) + namelen
;
8495 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent64
));
8497 if (toff
+ treclen
> count
) {
8499 * If the host struct is smaller than the target struct, or
8500 * requires less alignment and thus packs into less space,
8501 * then the host can return more entries than we can pass
8505 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8509 * Return what we have, resetting the file pointer to the
8510 * location of the first record not returned.
8512 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8516 prev_diroff
= hde
->d_off
;
8517 tde
->d_ino
= tswap64(hde
->d_ino
);
8518 tde
->d_off
= tswap64(hde
->d_off
);
8519 tde
->d_reclen
= tswap16(treclen
);
8520 tde
->d_type
= hde
->d_type
;
8521 memcpy(tde
->d_name
, hde
->d_name
, namelen
);
8524 unlock_user(tdirp
, arg2
, toff
);
8527 #endif /* TARGET_NR_getdents64 */
8529 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8530 _syscall2(int, pivot_root
, const char *, new_root
, const char *, put_old
)
8533 /* This is an internal helper for do_syscall so that it is easier
8534 * to have a single return point, so that actions, such as logging
8535 * of syscall results, can be performed.
8536 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8538 static abi_long
do_syscall1(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
8539 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8540 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8543 CPUState
*cpu
= env_cpu(cpu_env
);
8545 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8546 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8547 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8548 || defined(TARGET_NR_statx)
8551 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8552 || defined(TARGET_NR_fstatfs)
8558 case TARGET_NR_exit
:
8559 /* In old applications this may be used to implement _exit(2).
8560 However in threaded applications it is used for thread termination,
8561 and _exit_group is used for application termination.
8562 Do thread termination if we have more then one thread. */
8564 if (block_signals()) {
8565 return -QEMU_ERESTARTSYS
;
8568 pthread_mutex_lock(&clone_lock
);
8570 if (CPU_NEXT(first_cpu
)) {
8571 TaskState
*ts
= cpu
->opaque
;
8573 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
8574 object_unref(OBJECT(cpu
));
8576 * At this point the CPU should be unrealized and removed
8577 * from cpu lists. We can clean-up the rest of the thread
8578 * data without the lock held.
8581 pthread_mutex_unlock(&clone_lock
);
8583 if (ts
->child_tidptr
) {
8584 put_user_u32(0, ts
->child_tidptr
);
8585 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
8586 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
8590 rcu_unregister_thread();
8594 pthread_mutex_unlock(&clone_lock
);
8595 preexit_cleanup(cpu_env
, arg1
);
8597 return 0; /* avoid warning */
8598 case TARGET_NR_read
:
8599 if (arg2
== 0 && arg3
== 0) {
8600 return get_errno(safe_read(arg1
, 0, 0));
8602 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8603 return -TARGET_EFAULT
;
8604 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8606 fd_trans_host_to_target_data(arg1
)) {
8607 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8609 unlock_user(p
, arg2
, ret
);
8612 case TARGET_NR_write
:
8613 if (arg2
== 0 && arg3
== 0) {
8614 return get_errno(safe_write(arg1
, 0, 0));
8616 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8617 return -TARGET_EFAULT
;
8618 if (fd_trans_target_to_host_data(arg1
)) {
8619 void *copy
= g_malloc(arg3
);
8620 memcpy(copy
, p
, arg3
);
8621 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8623 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8627 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8629 unlock_user(p
, arg2
, 0);
8632 #ifdef TARGET_NR_open
8633 case TARGET_NR_open
:
8634 if (!(p
= lock_user_string(arg1
)))
8635 return -TARGET_EFAULT
;
8636 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8637 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8639 fd_trans_unregister(ret
);
8640 unlock_user(p
, arg1
, 0);
8643 case TARGET_NR_openat
:
8644 if (!(p
= lock_user_string(arg2
)))
8645 return -TARGET_EFAULT
;
8646 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8647 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8649 fd_trans_unregister(ret
);
8650 unlock_user(p
, arg2
, 0);
8652 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8653 case TARGET_NR_name_to_handle_at
:
8654 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8657 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8658 case TARGET_NR_open_by_handle_at
:
8659 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8660 fd_trans_unregister(ret
);
8663 case TARGET_NR_close
:
8664 fd_trans_unregister(arg1
);
8665 return get_errno(close(arg1
));
8668 return do_brk(arg1
);
8669 #ifdef TARGET_NR_fork
8670 case TARGET_NR_fork
:
8671 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8673 #ifdef TARGET_NR_waitpid
8674 case TARGET_NR_waitpid
:
8677 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8678 if (!is_error(ret
) && arg2
&& ret
8679 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8680 return -TARGET_EFAULT
;
8684 #ifdef TARGET_NR_waitid
8685 case TARGET_NR_waitid
:
8689 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8690 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8691 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8692 return -TARGET_EFAULT
;
8693 host_to_target_siginfo(p
, &info
);
8694 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8699 #ifdef TARGET_NR_creat /* not on alpha */
8700 case TARGET_NR_creat
:
8701 if (!(p
= lock_user_string(arg1
)))
8702 return -TARGET_EFAULT
;
8703 ret
= get_errno(creat(p
, arg2
));
8704 fd_trans_unregister(ret
);
8705 unlock_user(p
, arg1
, 0);
8708 #ifdef TARGET_NR_link
8709 case TARGET_NR_link
:
8712 p
= lock_user_string(arg1
);
8713 p2
= lock_user_string(arg2
);
8715 ret
= -TARGET_EFAULT
;
8717 ret
= get_errno(link(p
, p2
));
8718 unlock_user(p2
, arg2
, 0);
8719 unlock_user(p
, arg1
, 0);
8723 #if defined(TARGET_NR_linkat)
8724 case TARGET_NR_linkat
:
8728 return -TARGET_EFAULT
;
8729 p
= lock_user_string(arg2
);
8730 p2
= lock_user_string(arg4
);
8732 ret
= -TARGET_EFAULT
;
8734 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8735 unlock_user(p
, arg2
, 0);
8736 unlock_user(p2
, arg4
, 0);
8740 #ifdef TARGET_NR_unlink
8741 case TARGET_NR_unlink
:
8742 if (!(p
= lock_user_string(arg1
)))
8743 return -TARGET_EFAULT
;
8744 ret
= get_errno(unlink(p
));
8745 unlock_user(p
, arg1
, 0);
8748 #if defined(TARGET_NR_unlinkat)
8749 case TARGET_NR_unlinkat
:
8750 if (!(p
= lock_user_string(arg2
)))
8751 return -TARGET_EFAULT
;
8752 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8753 unlock_user(p
, arg2
, 0);
8756 case TARGET_NR_execve
:
8758 char **argp
, **envp
;
8761 abi_ulong guest_argp
;
8762 abi_ulong guest_envp
;
8768 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8769 if (get_user_ual(addr
, gp
))
8770 return -TARGET_EFAULT
;
8777 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8778 if (get_user_ual(addr
, gp
))
8779 return -TARGET_EFAULT
;
8785 argp
= g_new0(char *, argc
+ 1);
8786 envp
= g_new0(char *, envc
+ 1);
8788 for (gp
= guest_argp
, q
= argp
; gp
;
8789 gp
+= sizeof(abi_ulong
), q
++) {
8790 if (get_user_ual(addr
, gp
))
8794 if (!(*q
= lock_user_string(addr
)))
8799 for (gp
= guest_envp
, q
= envp
; gp
;
8800 gp
+= sizeof(abi_ulong
), q
++) {
8801 if (get_user_ual(addr
, gp
))
8805 if (!(*q
= lock_user_string(addr
)))
8810 if (!(p
= lock_user_string(arg1
)))
8812 /* Although execve() is not an interruptible syscall it is
8813 * a special case where we must use the safe_syscall wrapper:
8814 * if we allow a signal to happen before we make the host
8815 * syscall then we will 'lose' it, because at the point of
8816 * execve the process leaves QEMU's control. So we use the
8817 * safe syscall wrapper to ensure that we either take the
8818 * signal as a guest signal, or else it does not happen
8819 * before the execve completes and makes it the other
8820 * program's problem.
8822 ret
= get_errno(safe_execve(p
, argp
, envp
));
8823 unlock_user(p
, arg1
, 0);
8828 ret
= -TARGET_EFAULT
;
8831 for (gp
= guest_argp
, q
= argp
; *q
;
8832 gp
+= sizeof(abi_ulong
), q
++) {
8833 if (get_user_ual(addr
, gp
)
8836 unlock_user(*q
, addr
, 0);
8838 for (gp
= guest_envp
, q
= envp
; *q
;
8839 gp
+= sizeof(abi_ulong
), q
++) {
8840 if (get_user_ual(addr
, gp
)
8843 unlock_user(*q
, addr
, 0);
8850 case TARGET_NR_chdir
:
8851 if (!(p
= lock_user_string(arg1
)))
8852 return -TARGET_EFAULT
;
8853 ret
= get_errno(chdir(p
));
8854 unlock_user(p
, arg1
, 0);
8856 #ifdef TARGET_NR_time
8857 case TARGET_NR_time
:
8860 ret
= get_errno(time(&host_time
));
8863 && put_user_sal(host_time
, arg1
))
8864 return -TARGET_EFAULT
;
8868 #ifdef TARGET_NR_mknod
8869 case TARGET_NR_mknod
:
8870 if (!(p
= lock_user_string(arg1
)))
8871 return -TARGET_EFAULT
;
8872 ret
= get_errno(mknod(p
, arg2
, arg3
));
8873 unlock_user(p
, arg1
, 0);
8876 #if defined(TARGET_NR_mknodat)
8877 case TARGET_NR_mknodat
:
8878 if (!(p
= lock_user_string(arg2
)))
8879 return -TARGET_EFAULT
;
8880 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8881 unlock_user(p
, arg2
, 0);
8884 #ifdef TARGET_NR_chmod
8885 case TARGET_NR_chmod
:
8886 if (!(p
= lock_user_string(arg1
)))
8887 return -TARGET_EFAULT
;
8888 ret
= get_errno(chmod(p
, arg2
));
8889 unlock_user(p
, arg1
, 0);
8892 #ifdef TARGET_NR_lseek
8893 case TARGET_NR_lseek
:
8894 return get_errno(lseek(arg1
, arg2
, arg3
));
8896 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8897 /* Alpha specific */
8898 case TARGET_NR_getxpid
:
8899 cpu_env
->ir
[IR_A4
] = getppid();
8900 return get_errno(getpid());
8902 #ifdef TARGET_NR_getpid
8903 case TARGET_NR_getpid
:
8904 return get_errno(getpid());
8906 case TARGET_NR_mount
:
8908 /* need to look at the data field */
8912 p
= lock_user_string(arg1
);
8914 return -TARGET_EFAULT
;
8920 p2
= lock_user_string(arg2
);
8923 unlock_user(p
, arg1
, 0);
8925 return -TARGET_EFAULT
;
8929 p3
= lock_user_string(arg3
);
8932 unlock_user(p
, arg1
, 0);
8934 unlock_user(p2
, arg2
, 0);
8935 return -TARGET_EFAULT
;
8941 /* FIXME - arg5 should be locked, but it isn't clear how to
8942 * do that since it's not guaranteed to be a NULL-terminated
8946 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8948 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
8950 ret
= get_errno(ret
);
8953 unlock_user(p
, arg1
, 0);
8955 unlock_user(p2
, arg2
, 0);
8957 unlock_user(p3
, arg3
, 0);
8961 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8962 #if defined(TARGET_NR_umount)
8963 case TARGET_NR_umount
:
8965 #if defined(TARGET_NR_oldumount)
8966 case TARGET_NR_oldumount
:
8968 if (!(p
= lock_user_string(arg1
)))
8969 return -TARGET_EFAULT
;
8970 ret
= get_errno(umount(p
));
8971 unlock_user(p
, arg1
, 0);
8974 #ifdef TARGET_NR_stime /* not on alpha */
8975 case TARGET_NR_stime
:
8979 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8980 return -TARGET_EFAULT
;
8982 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8985 #ifdef TARGET_NR_alarm /* not on alpha */
8986 case TARGET_NR_alarm
:
8989 #ifdef TARGET_NR_pause /* not on alpha */
8990 case TARGET_NR_pause
:
8991 if (!block_signals()) {
8992 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8994 return -TARGET_EINTR
;
8996 #ifdef TARGET_NR_utime
8997 case TARGET_NR_utime
:
8999 struct utimbuf tbuf
, *host_tbuf
;
9000 struct target_utimbuf
*target_tbuf
;
9002 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
9003 return -TARGET_EFAULT
;
9004 tbuf
.actime
= tswapal(target_tbuf
->actime
);
9005 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
9006 unlock_user_struct(target_tbuf
, arg2
, 0);
9011 if (!(p
= lock_user_string(arg1
)))
9012 return -TARGET_EFAULT
;
9013 ret
= get_errno(utime(p
, host_tbuf
));
9014 unlock_user(p
, arg1
, 0);
9018 #ifdef TARGET_NR_utimes
9019 case TARGET_NR_utimes
:
9021 struct timeval
*tvp
, tv
[2];
9023 if (copy_from_user_timeval(&tv
[0], arg2
)
9024 || copy_from_user_timeval(&tv
[1],
9025 arg2
+ sizeof(struct target_timeval
)))
9026 return -TARGET_EFAULT
;
9031 if (!(p
= lock_user_string(arg1
)))
9032 return -TARGET_EFAULT
;
9033 ret
= get_errno(utimes(p
, tvp
));
9034 unlock_user(p
, arg1
, 0);
9038 #if defined(TARGET_NR_futimesat)
9039 case TARGET_NR_futimesat
:
9041 struct timeval
*tvp
, tv
[2];
9043 if (copy_from_user_timeval(&tv
[0], arg3
)
9044 || copy_from_user_timeval(&tv
[1],
9045 arg3
+ sizeof(struct target_timeval
)))
9046 return -TARGET_EFAULT
;
9051 if (!(p
= lock_user_string(arg2
))) {
9052 return -TARGET_EFAULT
;
9054 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
9055 unlock_user(p
, arg2
, 0);
9059 #ifdef TARGET_NR_access
9060 case TARGET_NR_access
:
9061 if (!(p
= lock_user_string(arg1
))) {
9062 return -TARGET_EFAULT
;
9064 ret
= get_errno(access(path(p
), arg2
));
9065 unlock_user(p
, arg1
, 0);
9068 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9069 case TARGET_NR_faccessat
:
9070 if (!(p
= lock_user_string(arg2
))) {
9071 return -TARGET_EFAULT
;
9073 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
9074 unlock_user(p
, arg2
, 0);
9077 #ifdef TARGET_NR_nice /* not on alpha */
9078 case TARGET_NR_nice
:
9079 return get_errno(nice(arg1
));
9081 case TARGET_NR_sync
:
9084 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9085 case TARGET_NR_syncfs
:
9086 return get_errno(syncfs(arg1
));
9088 case TARGET_NR_kill
:
9089 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
9090 #ifdef TARGET_NR_rename
9091 case TARGET_NR_rename
:
9094 p
= lock_user_string(arg1
);
9095 p2
= lock_user_string(arg2
);
9097 ret
= -TARGET_EFAULT
;
9099 ret
= get_errno(rename(p
, p2
));
9100 unlock_user(p2
, arg2
, 0);
9101 unlock_user(p
, arg1
, 0);
9105 #if defined(TARGET_NR_renameat)
9106 case TARGET_NR_renameat
:
9109 p
= lock_user_string(arg2
);
9110 p2
= lock_user_string(arg4
);
9112 ret
= -TARGET_EFAULT
;
9114 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
9115 unlock_user(p2
, arg4
, 0);
9116 unlock_user(p
, arg2
, 0);
9120 #if defined(TARGET_NR_renameat2)
9121 case TARGET_NR_renameat2
:
9124 p
= lock_user_string(arg2
);
9125 p2
= lock_user_string(arg4
);
9127 ret
= -TARGET_EFAULT
;
9129 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
9131 unlock_user(p2
, arg4
, 0);
9132 unlock_user(p
, arg2
, 0);
9136 #ifdef TARGET_NR_mkdir
9137 case TARGET_NR_mkdir
:
9138 if (!(p
= lock_user_string(arg1
)))
9139 return -TARGET_EFAULT
;
9140 ret
= get_errno(mkdir(p
, arg2
));
9141 unlock_user(p
, arg1
, 0);
9144 #if defined(TARGET_NR_mkdirat)
9145 case TARGET_NR_mkdirat
:
9146 if (!(p
= lock_user_string(arg2
)))
9147 return -TARGET_EFAULT
;
9148 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
9149 unlock_user(p
, arg2
, 0);
9152 #ifdef TARGET_NR_rmdir
9153 case TARGET_NR_rmdir
:
9154 if (!(p
= lock_user_string(arg1
)))
9155 return -TARGET_EFAULT
;
9156 ret
= get_errno(rmdir(p
));
9157 unlock_user(p
, arg1
, 0);
9161 ret
= get_errno(dup(arg1
));
9163 fd_trans_dup(arg1
, ret
);
9166 #ifdef TARGET_NR_pipe
9167 case TARGET_NR_pipe
:
9168 return do_pipe(cpu_env
, arg1
, 0, 0);
9170 #ifdef TARGET_NR_pipe2
9171 case TARGET_NR_pipe2
:
9172 return do_pipe(cpu_env
, arg1
,
9173 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
9175 case TARGET_NR_times
:
9177 struct target_tms
*tmsp
;
9179 ret
= get_errno(times(&tms
));
9181 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
9183 return -TARGET_EFAULT
;
9184 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
9185 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
9186 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
9187 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
9190 ret
= host_to_target_clock_t(ret
);
9193 case TARGET_NR_acct
:
9195 ret
= get_errno(acct(NULL
));
9197 if (!(p
= lock_user_string(arg1
))) {
9198 return -TARGET_EFAULT
;
9200 ret
= get_errno(acct(path(p
)));
9201 unlock_user(p
, arg1
, 0);
9204 #ifdef TARGET_NR_umount2
9205 case TARGET_NR_umount2
:
9206 if (!(p
= lock_user_string(arg1
)))
9207 return -TARGET_EFAULT
;
9208 ret
= get_errno(umount2(p
, arg2
));
9209 unlock_user(p
, arg1
, 0);
9212 case TARGET_NR_ioctl
:
9213 return do_ioctl(arg1
, arg2
, arg3
);
9214 #ifdef TARGET_NR_fcntl
9215 case TARGET_NR_fcntl
:
9216 return do_fcntl(arg1
, arg2
, arg3
);
9218 case TARGET_NR_setpgid
:
9219 return get_errno(setpgid(arg1
, arg2
));
9220 case TARGET_NR_umask
:
9221 return get_errno(umask(arg1
));
9222 case TARGET_NR_chroot
:
9223 if (!(p
= lock_user_string(arg1
)))
9224 return -TARGET_EFAULT
;
9225 ret
= get_errno(chroot(p
));
9226 unlock_user(p
, arg1
, 0);
9228 #ifdef TARGET_NR_dup2
9229 case TARGET_NR_dup2
:
9230 ret
= get_errno(dup2(arg1
, arg2
));
9232 fd_trans_dup(arg1
, arg2
);
9236 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9237 case TARGET_NR_dup3
:
9241 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
9244 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
9245 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
9247 fd_trans_dup(arg1
, arg2
);
9252 #ifdef TARGET_NR_getppid /* not on alpha */
9253 case TARGET_NR_getppid
:
9254 return get_errno(getppid());
9256 #ifdef TARGET_NR_getpgrp
9257 case TARGET_NR_getpgrp
:
9258 return get_errno(getpgrp());
9260 case TARGET_NR_setsid
:
9261 return get_errno(setsid());
9262 #ifdef TARGET_NR_sigaction
9263 case TARGET_NR_sigaction
:
9265 #if defined(TARGET_MIPS)
9266 struct target_sigaction act
, oact
, *pact
, *old_act
;
9269 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9270 return -TARGET_EFAULT
;
9271 act
._sa_handler
= old_act
->_sa_handler
;
9272 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
9273 act
.sa_flags
= old_act
->sa_flags
;
9274 unlock_user_struct(old_act
, arg2
, 0);
9280 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9282 if (!is_error(ret
) && arg3
) {
9283 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9284 return -TARGET_EFAULT
;
9285 old_act
->_sa_handler
= oact
._sa_handler
;
9286 old_act
->sa_flags
= oact
.sa_flags
;
9287 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
9288 old_act
->sa_mask
.sig
[1] = 0;
9289 old_act
->sa_mask
.sig
[2] = 0;
9290 old_act
->sa_mask
.sig
[3] = 0;
9291 unlock_user_struct(old_act
, arg3
, 1);
9294 struct target_old_sigaction
*old_act
;
9295 struct target_sigaction act
, oact
, *pact
;
9297 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9298 return -TARGET_EFAULT
;
9299 act
._sa_handler
= old_act
->_sa_handler
;
9300 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9301 act
.sa_flags
= old_act
->sa_flags
;
9302 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9303 act
.sa_restorer
= old_act
->sa_restorer
;
9305 unlock_user_struct(old_act
, arg2
, 0);
9310 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9311 if (!is_error(ret
) && arg3
) {
9312 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9313 return -TARGET_EFAULT
;
9314 old_act
->_sa_handler
= oact
._sa_handler
;
9315 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9316 old_act
->sa_flags
= oact
.sa_flags
;
9317 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9318 old_act
->sa_restorer
= oact
.sa_restorer
;
9320 unlock_user_struct(old_act
, arg3
, 1);
9326 case TARGET_NR_rt_sigaction
:
9329 * For Alpha and SPARC this is a 5 argument syscall, with
9330 * a 'restorer' parameter which must be copied into the
9331 * sa_restorer field of the sigaction struct.
9332 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9333 * and arg5 is the sigsetsize.
9335 #if defined(TARGET_ALPHA)
9336 target_ulong sigsetsize
= arg4
;
9337 target_ulong restorer
= arg5
;
9338 #elif defined(TARGET_SPARC)
9339 target_ulong restorer
= arg4
;
9340 target_ulong sigsetsize
= arg5
;
9342 target_ulong sigsetsize
= arg4
;
9343 target_ulong restorer
= 0;
9345 struct target_sigaction
*act
= NULL
;
9346 struct target_sigaction
*oact
= NULL
;
9348 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9349 return -TARGET_EINVAL
;
9351 if (arg2
&& !lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9352 return -TARGET_EFAULT
;
9354 if (arg3
&& !lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9355 ret
= -TARGET_EFAULT
;
9357 ret
= get_errno(do_sigaction(arg1
, act
, oact
, restorer
));
9359 unlock_user_struct(oact
, arg3
, 1);
9363 unlock_user_struct(act
, arg2
, 0);
9367 #ifdef TARGET_NR_sgetmask /* not on alpha */
9368 case TARGET_NR_sgetmask
:
9371 abi_ulong target_set
;
9372 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9374 host_to_target_old_sigset(&target_set
, &cur_set
);
9380 #ifdef TARGET_NR_ssetmask /* not on alpha */
9381 case TARGET_NR_ssetmask
:
9384 abi_ulong target_set
= arg1
;
9385 target_to_host_old_sigset(&set
, &target_set
);
9386 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9388 host_to_target_old_sigset(&target_set
, &oset
);
9394 #ifdef TARGET_NR_sigprocmask
9395 case TARGET_NR_sigprocmask
:
9397 #if defined(TARGET_ALPHA)
9398 sigset_t set
, oldset
;
9403 case TARGET_SIG_BLOCK
:
9406 case TARGET_SIG_UNBLOCK
:
9409 case TARGET_SIG_SETMASK
:
9413 return -TARGET_EINVAL
;
9416 target_to_host_old_sigset(&set
, &mask
);
9418 ret
= do_sigprocmask(how
, &set
, &oldset
);
9419 if (!is_error(ret
)) {
9420 host_to_target_old_sigset(&mask
, &oldset
);
9422 cpu_env
->ir
[IR_V0
] = 0; /* force no error */
9425 sigset_t set
, oldset
, *set_ptr
;
9429 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
9431 return -TARGET_EFAULT
;
9433 target_to_host_old_sigset(&set
, p
);
9434 unlock_user(p
, arg2
, 0);
9437 case TARGET_SIG_BLOCK
:
9440 case TARGET_SIG_UNBLOCK
:
9443 case TARGET_SIG_SETMASK
:
9447 return -TARGET_EINVAL
;
9453 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9454 if (!is_error(ret
) && arg3
) {
9455 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9456 return -TARGET_EFAULT
;
9457 host_to_target_old_sigset(p
, &oldset
);
9458 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9464 case TARGET_NR_rt_sigprocmask
:
9467 sigset_t set
, oldset
, *set_ptr
;
9469 if (arg4
!= sizeof(target_sigset_t
)) {
9470 return -TARGET_EINVAL
;
9474 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
9476 return -TARGET_EFAULT
;
9478 target_to_host_sigset(&set
, p
);
9479 unlock_user(p
, arg2
, 0);
9482 case TARGET_SIG_BLOCK
:
9485 case TARGET_SIG_UNBLOCK
:
9488 case TARGET_SIG_SETMASK
:
9492 return -TARGET_EINVAL
;
9498 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9499 if (!is_error(ret
) && arg3
) {
9500 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9501 return -TARGET_EFAULT
;
9502 host_to_target_sigset(p
, &oldset
);
9503 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9507 #ifdef TARGET_NR_sigpending
9508 case TARGET_NR_sigpending
:
9511 ret
= get_errno(sigpending(&set
));
9512 if (!is_error(ret
)) {
9513 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9514 return -TARGET_EFAULT
;
9515 host_to_target_old_sigset(p
, &set
);
9516 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9521 case TARGET_NR_rt_sigpending
:
9525 /* Yes, this check is >, not != like most. We follow the kernel's
9526 * logic and it does it like this because it implements
9527 * NR_sigpending through the same code path, and in that case
9528 * the old_sigset_t is smaller in size.
9530 if (arg2
> sizeof(target_sigset_t
)) {
9531 return -TARGET_EINVAL
;
9534 ret
= get_errno(sigpending(&set
));
9535 if (!is_error(ret
)) {
9536 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9537 return -TARGET_EFAULT
;
9538 host_to_target_sigset(p
, &set
);
9539 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9543 #ifdef TARGET_NR_sigsuspend
9544 case TARGET_NR_sigsuspend
:
9548 #if defined(TARGET_ALPHA)
9549 TaskState
*ts
= cpu
->opaque
;
9550 /* target_to_host_old_sigset will bswap back */
9551 abi_ulong mask
= tswapal(arg1
);
9552 set
= &ts
->sigsuspend_mask
;
9553 target_to_host_old_sigset(set
, &mask
);
9555 ret
= process_sigsuspend_mask(&set
, arg1
, sizeof(target_sigset_t
));
9560 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
9561 finish_sigsuspend_mask(ret
);
9565 case TARGET_NR_rt_sigsuspend
:
9569 ret
= process_sigsuspend_mask(&set
, arg1
, arg2
);
9573 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
9574 finish_sigsuspend_mask(ret
);
9577 #ifdef TARGET_NR_rt_sigtimedwait
9578 case TARGET_NR_rt_sigtimedwait
:
9581 struct timespec uts
, *puts
;
9584 if (arg4
!= sizeof(target_sigset_t
)) {
9585 return -TARGET_EINVAL
;
9588 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9589 return -TARGET_EFAULT
;
9590 target_to_host_sigset(&set
, p
);
9591 unlock_user(p
, arg1
, 0);
9594 if (target_to_host_timespec(puts
, arg3
)) {
9595 return -TARGET_EFAULT
;
9600 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9602 if (!is_error(ret
)) {
9604 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9607 return -TARGET_EFAULT
;
9609 host_to_target_siginfo(p
, &uinfo
);
9610 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9612 ret
= host_to_target_signal(ret
);
9617 #ifdef TARGET_NR_rt_sigtimedwait_time64
9618 case TARGET_NR_rt_sigtimedwait_time64
:
9621 struct timespec uts
, *puts
;
9624 if (arg4
!= sizeof(target_sigset_t
)) {
9625 return -TARGET_EINVAL
;
9628 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9630 return -TARGET_EFAULT
;
9632 target_to_host_sigset(&set
, p
);
9633 unlock_user(p
, arg1
, 0);
9636 if (target_to_host_timespec64(puts
, arg3
)) {
9637 return -TARGET_EFAULT
;
9642 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9644 if (!is_error(ret
)) {
9646 p
= lock_user(VERIFY_WRITE
, arg2
,
9647 sizeof(target_siginfo_t
), 0);
9649 return -TARGET_EFAULT
;
9651 host_to_target_siginfo(p
, &uinfo
);
9652 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9654 ret
= host_to_target_signal(ret
);
9659 case TARGET_NR_rt_sigqueueinfo
:
9663 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9665 return -TARGET_EFAULT
;
9667 target_to_host_siginfo(&uinfo
, p
);
9668 unlock_user(p
, arg3
, 0);
9669 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9672 case TARGET_NR_rt_tgsigqueueinfo
:
9676 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9678 return -TARGET_EFAULT
;
9680 target_to_host_siginfo(&uinfo
, p
);
9681 unlock_user(p
, arg4
, 0);
9682 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9685 #ifdef TARGET_NR_sigreturn
9686 case TARGET_NR_sigreturn
:
9687 if (block_signals()) {
9688 return -QEMU_ERESTARTSYS
;
9690 return do_sigreturn(cpu_env
);
9692 case TARGET_NR_rt_sigreturn
:
9693 if (block_signals()) {
9694 return -QEMU_ERESTARTSYS
;
9696 return do_rt_sigreturn(cpu_env
);
9697 case TARGET_NR_sethostname
:
9698 if (!(p
= lock_user_string(arg1
)))
9699 return -TARGET_EFAULT
;
9700 ret
= get_errno(sethostname(p
, arg2
));
9701 unlock_user(p
, arg1
, 0);
9703 #ifdef TARGET_NR_setrlimit
9704 case TARGET_NR_setrlimit
:
9706 int resource
= target_to_host_resource(arg1
);
9707 struct target_rlimit
*target_rlim
;
9709 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9710 return -TARGET_EFAULT
;
9711 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9712 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9713 unlock_user_struct(target_rlim
, arg2
, 0);
9715 * If we just passed through resource limit settings for memory then
9716 * they would also apply to QEMU's own allocations, and QEMU will
9717 * crash or hang or die if its allocations fail. Ideally we would
9718 * track the guest allocations in QEMU and apply the limits ourselves.
9719 * For now, just tell the guest the call succeeded but don't actually
9722 if (resource
!= RLIMIT_AS
&&
9723 resource
!= RLIMIT_DATA
&&
9724 resource
!= RLIMIT_STACK
) {
9725 return get_errno(setrlimit(resource
, &rlim
));
9731 #ifdef TARGET_NR_getrlimit
9732 case TARGET_NR_getrlimit
:
9734 int resource
= target_to_host_resource(arg1
);
9735 struct target_rlimit
*target_rlim
;
9738 ret
= get_errno(getrlimit(resource
, &rlim
));
9739 if (!is_error(ret
)) {
9740 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9741 return -TARGET_EFAULT
;
9742 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9743 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9744 unlock_user_struct(target_rlim
, arg2
, 1);
9749 case TARGET_NR_getrusage
:
9751 struct rusage rusage
;
9752 ret
= get_errno(getrusage(arg1
, &rusage
));
9753 if (!is_error(ret
)) {
9754 ret
= host_to_target_rusage(arg2
, &rusage
);
9758 #if defined(TARGET_NR_gettimeofday)
9759 case TARGET_NR_gettimeofday
:
9764 ret
= get_errno(gettimeofday(&tv
, &tz
));
9765 if (!is_error(ret
)) {
9766 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9767 return -TARGET_EFAULT
;
9769 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9770 return -TARGET_EFAULT
;
9776 #if defined(TARGET_NR_settimeofday)
9777 case TARGET_NR_settimeofday
:
9779 struct timeval tv
, *ptv
= NULL
;
9780 struct timezone tz
, *ptz
= NULL
;
9783 if (copy_from_user_timeval(&tv
, arg1
)) {
9784 return -TARGET_EFAULT
;
9790 if (copy_from_user_timezone(&tz
, arg2
)) {
9791 return -TARGET_EFAULT
;
9796 return get_errno(settimeofday(ptv
, ptz
));
9799 #if defined(TARGET_NR_select)
9800 case TARGET_NR_select
:
9801 #if defined(TARGET_WANT_NI_OLD_SELECT)
9802 /* some architectures used to have old_select here
9803 * but now ENOSYS it.
9805 ret
= -TARGET_ENOSYS
;
9806 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9807 ret
= do_old_select(arg1
);
9809 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9813 #ifdef TARGET_NR_pselect6
9814 case TARGET_NR_pselect6
:
9815 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9817 #ifdef TARGET_NR_pselect6_time64
9818 case TARGET_NR_pselect6_time64
:
9819 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9821 #ifdef TARGET_NR_symlink
9822 case TARGET_NR_symlink
:
9825 p
= lock_user_string(arg1
);
9826 p2
= lock_user_string(arg2
);
9828 ret
= -TARGET_EFAULT
;
9830 ret
= get_errno(symlink(p
, p2
));
9831 unlock_user(p2
, arg2
, 0);
9832 unlock_user(p
, arg1
, 0);
9836 #if defined(TARGET_NR_symlinkat)
9837 case TARGET_NR_symlinkat
:
9840 p
= lock_user_string(arg1
);
9841 p2
= lock_user_string(arg3
);
9843 ret
= -TARGET_EFAULT
;
9845 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9846 unlock_user(p2
, arg3
, 0);
9847 unlock_user(p
, arg1
, 0);
9851 #ifdef TARGET_NR_readlink
9852 case TARGET_NR_readlink
:
9855 p
= lock_user_string(arg1
);
9856 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9858 ret
= -TARGET_EFAULT
;
9860 /* Short circuit this for the magic exe check. */
9861 ret
= -TARGET_EINVAL
;
9862 } else if (is_proc_myself((const char *)p
, "exe")) {
9863 char real
[PATH_MAX
], *temp
;
9864 temp
= realpath(exec_path
, real
);
9865 /* Return value is # of bytes that we wrote to the buffer. */
9867 ret
= get_errno(-1);
9869 /* Don't worry about sign mismatch as earlier mapping
9870 * logic would have thrown a bad address error. */
9871 ret
= MIN(strlen(real
), arg3
);
9872 /* We cannot NUL terminate the string. */
9873 memcpy(p2
, real
, ret
);
9876 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9878 unlock_user(p2
, arg2
, ret
);
9879 unlock_user(p
, arg1
, 0);
9883 #if defined(TARGET_NR_readlinkat)
9884 case TARGET_NR_readlinkat
:
9887 p
= lock_user_string(arg2
);
9888 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9890 ret
= -TARGET_EFAULT
;
9891 } else if (is_proc_myself((const char *)p
, "exe")) {
9892 char real
[PATH_MAX
], *temp
;
9893 temp
= realpath(exec_path
, real
);
9894 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9895 snprintf((char *)p2
, arg4
, "%s", real
);
9897 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9899 unlock_user(p2
, arg3
, ret
);
9900 unlock_user(p
, arg2
, 0);
9904 #ifdef TARGET_NR_swapon
9905 case TARGET_NR_swapon
:
9906 if (!(p
= lock_user_string(arg1
)))
9907 return -TARGET_EFAULT
;
9908 ret
= get_errno(swapon(p
, arg2
));
9909 unlock_user(p
, arg1
, 0);
9912 case TARGET_NR_reboot
:
9913 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9914 /* arg4 must be ignored in all other cases */
9915 p
= lock_user_string(arg4
);
9917 return -TARGET_EFAULT
;
9919 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9920 unlock_user(p
, arg4
, 0);
9922 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9925 #ifdef TARGET_NR_mmap
9926 case TARGET_NR_mmap
:
9927 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9928 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9929 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9930 || defined(TARGET_S390X)
9933 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9934 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9935 return -TARGET_EFAULT
;
9942 unlock_user(v
, arg1
, 0);
9943 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9944 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9948 /* mmap pointers are always untagged */
9949 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9950 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9956 #ifdef TARGET_NR_mmap2
9957 case TARGET_NR_mmap2
:
9959 #define MMAP_SHIFT 12
9961 ret
= target_mmap(arg1
, arg2
, arg3
,
9962 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9963 arg5
, arg6
<< MMAP_SHIFT
);
9964 return get_errno(ret
);
9966 case TARGET_NR_munmap
:
9967 arg1
= cpu_untagged_addr(cpu
, arg1
);
9968 return get_errno(target_munmap(arg1
, arg2
));
9969 case TARGET_NR_mprotect
:
9970 arg1
= cpu_untagged_addr(cpu
, arg1
);
9972 TaskState
*ts
= cpu
->opaque
;
9973 /* Special hack to detect libc making the stack executable. */
9974 if ((arg3
& PROT_GROWSDOWN
)
9975 && arg1
>= ts
->info
->stack_limit
9976 && arg1
<= ts
->info
->start_stack
) {
9977 arg3
&= ~PROT_GROWSDOWN
;
9978 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9979 arg1
= ts
->info
->stack_limit
;
9982 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9983 #ifdef TARGET_NR_mremap
9984 case TARGET_NR_mremap
:
9985 arg1
= cpu_untagged_addr(cpu
, arg1
);
9986 /* mremap new_addr (arg5) is always untagged */
9987 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9989 /* ??? msync/mlock/munlock are broken for softmmu. */
9990 #ifdef TARGET_NR_msync
9991 case TARGET_NR_msync
:
9992 return get_errno(msync(g2h(cpu
, arg1
), arg2
, arg3
));
9994 #ifdef TARGET_NR_mlock
9995 case TARGET_NR_mlock
:
9996 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
9998 #ifdef TARGET_NR_munlock
9999 case TARGET_NR_munlock
:
10000 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
10002 #ifdef TARGET_NR_mlockall
10003 case TARGET_NR_mlockall
:
10004 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
10006 #ifdef TARGET_NR_munlockall
10007 case TARGET_NR_munlockall
:
10008 return get_errno(munlockall());
10010 #ifdef TARGET_NR_truncate
10011 case TARGET_NR_truncate
:
10012 if (!(p
= lock_user_string(arg1
)))
10013 return -TARGET_EFAULT
;
10014 ret
= get_errno(truncate(p
, arg2
));
10015 unlock_user(p
, arg1
, 0);
10018 #ifdef TARGET_NR_ftruncate
10019 case TARGET_NR_ftruncate
:
10020 return get_errno(ftruncate(arg1
, arg2
));
10022 case TARGET_NR_fchmod
:
10023 return get_errno(fchmod(arg1
, arg2
));
10024 #if defined(TARGET_NR_fchmodat)
10025 case TARGET_NR_fchmodat
:
10026 if (!(p
= lock_user_string(arg2
)))
10027 return -TARGET_EFAULT
;
10028 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
10029 unlock_user(p
, arg2
, 0);
10032 case TARGET_NR_getpriority
:
10033 /* Note that negative values are valid for getpriority, so we must
10034 differentiate based on errno settings. */
10036 ret
= getpriority(arg1
, arg2
);
10037 if (ret
== -1 && errno
!= 0) {
10038 return -host_to_target_errno(errno
);
10040 #ifdef TARGET_ALPHA
10041 /* Return value is the unbiased priority. Signal no error. */
10042 cpu_env
->ir
[IR_V0
] = 0;
10044 /* Return value is a biased priority to avoid negative numbers. */
10048 case TARGET_NR_setpriority
:
10049 return get_errno(setpriority(arg1
, arg2
, arg3
));
10050 #ifdef TARGET_NR_statfs
10051 case TARGET_NR_statfs
:
10052 if (!(p
= lock_user_string(arg1
))) {
10053 return -TARGET_EFAULT
;
10055 ret
= get_errno(statfs(path(p
), &stfs
));
10056 unlock_user(p
, arg1
, 0);
10058 if (!is_error(ret
)) {
10059 struct target_statfs
*target_stfs
;
10061 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
10062 return -TARGET_EFAULT
;
10063 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10064 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10065 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10066 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10067 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10068 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10069 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10070 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10071 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10072 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10073 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10074 #ifdef _STATFS_F_FLAGS
10075 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10077 __put_user(0, &target_stfs
->f_flags
);
10079 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10080 unlock_user_struct(target_stfs
, arg2
, 1);
10084 #ifdef TARGET_NR_fstatfs
10085 case TARGET_NR_fstatfs
:
10086 ret
= get_errno(fstatfs(arg1
, &stfs
));
10087 goto convert_statfs
;
10089 #ifdef TARGET_NR_statfs64
10090 case TARGET_NR_statfs64
:
10091 if (!(p
= lock_user_string(arg1
))) {
10092 return -TARGET_EFAULT
;
10094 ret
= get_errno(statfs(path(p
), &stfs
));
10095 unlock_user(p
, arg1
, 0);
10097 if (!is_error(ret
)) {
10098 struct target_statfs64
*target_stfs
;
10100 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
10101 return -TARGET_EFAULT
;
10102 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10103 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10104 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10105 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10106 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10107 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10108 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10109 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10110 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10111 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10112 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10113 #ifdef _STATFS_F_FLAGS
10114 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10116 __put_user(0, &target_stfs
->f_flags
);
10118 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10119 unlock_user_struct(target_stfs
, arg3
, 1);
10122 case TARGET_NR_fstatfs64
:
10123 ret
= get_errno(fstatfs(arg1
, &stfs
));
10124 goto convert_statfs64
;
10126 #ifdef TARGET_NR_socketcall
10127 case TARGET_NR_socketcall
:
10128 return do_socketcall(arg1
, arg2
);
10130 #ifdef TARGET_NR_accept
10131 case TARGET_NR_accept
:
10132 return do_accept4(arg1
, arg2
, arg3
, 0);
10134 #ifdef TARGET_NR_accept4
10135 case TARGET_NR_accept4
:
10136 return do_accept4(arg1
, arg2
, arg3
, arg4
);
10138 #ifdef TARGET_NR_bind
10139 case TARGET_NR_bind
:
10140 return do_bind(arg1
, arg2
, arg3
);
10142 #ifdef TARGET_NR_connect
10143 case TARGET_NR_connect
:
10144 return do_connect(arg1
, arg2
, arg3
);
10146 #ifdef TARGET_NR_getpeername
10147 case TARGET_NR_getpeername
:
10148 return do_getpeername(arg1
, arg2
, arg3
);
10150 #ifdef TARGET_NR_getsockname
10151 case TARGET_NR_getsockname
:
10152 return do_getsockname(arg1
, arg2
, arg3
);
10154 #ifdef TARGET_NR_getsockopt
10155 case TARGET_NR_getsockopt
:
10156 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
10158 #ifdef TARGET_NR_listen
10159 case TARGET_NR_listen
:
10160 return get_errno(listen(arg1
, arg2
));
10162 #ifdef TARGET_NR_recv
10163 case TARGET_NR_recv
:
10164 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
10166 #ifdef TARGET_NR_recvfrom
10167 case TARGET_NR_recvfrom
:
10168 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10170 #ifdef TARGET_NR_recvmsg
10171 case TARGET_NR_recvmsg
:
10172 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
10174 #ifdef TARGET_NR_send
10175 case TARGET_NR_send
:
10176 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
10178 #ifdef TARGET_NR_sendmsg
10179 case TARGET_NR_sendmsg
:
10180 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
10182 #ifdef TARGET_NR_sendmmsg
10183 case TARGET_NR_sendmmsg
:
10184 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
10186 #ifdef TARGET_NR_recvmmsg
10187 case TARGET_NR_recvmmsg
:
10188 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
10190 #ifdef TARGET_NR_sendto
10191 case TARGET_NR_sendto
:
10192 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10194 #ifdef TARGET_NR_shutdown
10195 case TARGET_NR_shutdown
:
10196 return get_errno(shutdown(arg1
, arg2
));
10198 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10199 case TARGET_NR_getrandom
:
10200 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10202 return -TARGET_EFAULT
;
10204 ret
= get_errno(getrandom(p
, arg2
, arg3
));
10205 unlock_user(p
, arg1
, ret
);
10208 #ifdef TARGET_NR_socket
10209 case TARGET_NR_socket
:
10210 return do_socket(arg1
, arg2
, arg3
);
10212 #ifdef TARGET_NR_socketpair
10213 case TARGET_NR_socketpair
:
10214 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
10216 #ifdef TARGET_NR_setsockopt
10217 case TARGET_NR_setsockopt
:
10218 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
10220 #if defined(TARGET_NR_syslog)
10221 case TARGET_NR_syslog
:
10226 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
10227 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
10228 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
10229 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
10230 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
10231 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
10232 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
10233 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
10234 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
10235 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
10236 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
10237 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
10240 return -TARGET_EINVAL
;
10245 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10247 return -TARGET_EFAULT
;
10249 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
10250 unlock_user(p
, arg2
, arg3
);
10254 return -TARGET_EINVAL
;
10259 case TARGET_NR_setitimer
:
10261 struct itimerval value
, ovalue
, *pvalue
;
10265 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10266 || copy_from_user_timeval(&pvalue
->it_value
,
10267 arg2
+ sizeof(struct target_timeval
)))
10268 return -TARGET_EFAULT
;
10272 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10273 if (!is_error(ret
) && arg3
) {
10274 if (copy_to_user_timeval(arg3
,
10275 &ovalue
.it_interval
)
10276 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10278 return -TARGET_EFAULT
;
10282 case TARGET_NR_getitimer
:
10284 struct itimerval value
;
10286 ret
= get_errno(getitimer(arg1
, &value
));
10287 if (!is_error(ret
) && arg2
) {
10288 if (copy_to_user_timeval(arg2
,
10289 &value
.it_interval
)
10290 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10292 return -TARGET_EFAULT
;
10296 #ifdef TARGET_NR_stat
10297 case TARGET_NR_stat
:
10298 if (!(p
= lock_user_string(arg1
))) {
10299 return -TARGET_EFAULT
;
10301 ret
= get_errno(stat(path(p
), &st
));
10302 unlock_user(p
, arg1
, 0);
10305 #ifdef TARGET_NR_lstat
10306 case TARGET_NR_lstat
:
10307 if (!(p
= lock_user_string(arg1
))) {
10308 return -TARGET_EFAULT
;
10310 ret
= get_errno(lstat(path(p
), &st
));
10311 unlock_user(p
, arg1
, 0);
10314 #ifdef TARGET_NR_fstat
10315 case TARGET_NR_fstat
:
10317 ret
= get_errno(fstat(arg1
, &st
));
10318 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10321 if (!is_error(ret
)) {
10322 struct target_stat
*target_st
;
10324 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10325 return -TARGET_EFAULT
;
10326 memset(target_st
, 0, sizeof(*target_st
));
10327 __put_user(st
.st_dev
, &target_st
->st_dev
);
10328 __put_user(st
.st_ino
, &target_st
->st_ino
);
10329 __put_user(st
.st_mode
, &target_st
->st_mode
);
10330 __put_user(st
.st_uid
, &target_st
->st_uid
);
10331 __put_user(st
.st_gid
, &target_st
->st_gid
);
10332 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10333 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10334 __put_user(st
.st_size
, &target_st
->st_size
);
10335 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10336 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10337 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10338 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10339 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10340 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10341 __put_user(st
.st_atim
.tv_nsec
,
10342 &target_st
->target_st_atime_nsec
);
10343 __put_user(st
.st_mtim
.tv_nsec
,
10344 &target_st
->target_st_mtime_nsec
);
10345 __put_user(st
.st_ctim
.tv_nsec
,
10346 &target_st
->target_st_ctime_nsec
);
10348 unlock_user_struct(target_st
, arg2
, 1);
10353 case TARGET_NR_vhangup
:
10354 return get_errno(vhangup());
10355 #ifdef TARGET_NR_syscall
10356 case TARGET_NR_syscall
:
10357 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10358 arg6
, arg7
, arg8
, 0);
10360 #if defined(TARGET_NR_wait4)
10361 case TARGET_NR_wait4
:
10364 abi_long status_ptr
= arg2
;
10365 struct rusage rusage
, *rusage_ptr
;
10366 abi_ulong target_rusage
= arg4
;
10367 abi_long rusage_err
;
10369 rusage_ptr
= &rusage
;
10372 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10373 if (!is_error(ret
)) {
10374 if (status_ptr
&& ret
) {
10375 status
= host_to_target_waitstatus(status
);
10376 if (put_user_s32(status
, status_ptr
))
10377 return -TARGET_EFAULT
;
10379 if (target_rusage
) {
10380 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10389 #ifdef TARGET_NR_swapoff
10390 case TARGET_NR_swapoff
:
10391 if (!(p
= lock_user_string(arg1
)))
10392 return -TARGET_EFAULT
;
10393 ret
= get_errno(swapoff(p
));
10394 unlock_user(p
, arg1
, 0);
10397 case TARGET_NR_sysinfo
:
10399 struct target_sysinfo
*target_value
;
10400 struct sysinfo value
;
10401 ret
= get_errno(sysinfo(&value
));
10402 if (!is_error(ret
) && arg1
)
10404 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10405 return -TARGET_EFAULT
;
10406 __put_user(value
.uptime
, &target_value
->uptime
);
10407 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10408 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10409 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10410 __put_user(value
.totalram
, &target_value
->totalram
);
10411 __put_user(value
.freeram
, &target_value
->freeram
);
10412 __put_user(value
.sharedram
, &target_value
->sharedram
);
10413 __put_user(value
.bufferram
, &target_value
->bufferram
);
10414 __put_user(value
.totalswap
, &target_value
->totalswap
);
10415 __put_user(value
.freeswap
, &target_value
->freeswap
);
10416 __put_user(value
.procs
, &target_value
->procs
);
10417 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10418 __put_user(value
.freehigh
, &target_value
->freehigh
);
10419 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10420 unlock_user_struct(target_value
, arg1
, 1);
10424 #ifdef TARGET_NR_ipc
10425 case TARGET_NR_ipc
:
10426 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10428 #ifdef TARGET_NR_semget
10429 case TARGET_NR_semget
:
10430 return get_errno(semget(arg1
, arg2
, arg3
));
10432 #ifdef TARGET_NR_semop
10433 case TARGET_NR_semop
:
10434 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10436 #ifdef TARGET_NR_semtimedop
10437 case TARGET_NR_semtimedop
:
10438 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10440 #ifdef TARGET_NR_semtimedop_time64
10441 case TARGET_NR_semtimedop_time64
:
10442 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10444 #ifdef TARGET_NR_semctl
10445 case TARGET_NR_semctl
:
10446 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10448 #ifdef TARGET_NR_msgctl
10449 case TARGET_NR_msgctl
:
10450 return do_msgctl(arg1
, arg2
, arg3
);
10452 #ifdef TARGET_NR_msgget
10453 case TARGET_NR_msgget
:
10454 return get_errno(msgget(arg1
, arg2
));
10456 #ifdef TARGET_NR_msgrcv
10457 case TARGET_NR_msgrcv
:
10458 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10460 #ifdef TARGET_NR_msgsnd
10461 case TARGET_NR_msgsnd
:
10462 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10464 #ifdef TARGET_NR_shmget
10465 case TARGET_NR_shmget
:
10466 return get_errno(shmget(arg1
, arg2
, arg3
));
10468 #ifdef TARGET_NR_shmctl
10469 case TARGET_NR_shmctl
:
10470 return do_shmctl(arg1
, arg2
, arg3
);
10472 #ifdef TARGET_NR_shmat
10473 case TARGET_NR_shmat
:
10474 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10476 #ifdef TARGET_NR_shmdt
10477 case TARGET_NR_shmdt
:
10478 return do_shmdt(arg1
);
10480 case TARGET_NR_fsync
:
10481 return get_errno(fsync(arg1
));
10482 case TARGET_NR_clone
:
10483 /* Linux manages to have three different orderings for its
10484 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10485 * match the kernel's CONFIG_CLONE_* settings.
10486 * Microblaze is further special in that it uses a sixth
10487 * implicit argument to clone for the TLS pointer.
10489 #if defined(TARGET_MICROBLAZE)
10490 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10491 #elif defined(TARGET_CLONE_BACKWARDS)
10492 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10493 #elif defined(TARGET_CLONE_BACKWARDS2)
10494 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10496 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10499 #ifdef __NR_exit_group
10500 /* new thread calls */
10501 case TARGET_NR_exit_group
:
10502 preexit_cleanup(cpu_env
, arg1
);
10503 return get_errno(exit_group(arg1
));
10505 case TARGET_NR_setdomainname
:
10506 if (!(p
= lock_user_string(arg1
)))
10507 return -TARGET_EFAULT
;
10508 ret
= get_errno(setdomainname(p
, arg2
));
10509 unlock_user(p
, arg1
, 0);
10511 case TARGET_NR_uname
:
10512 /* no need to transcode because we use the linux syscall */
10514 struct new_utsname
* buf
;
10516 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10517 return -TARGET_EFAULT
;
10518 ret
= get_errno(sys_uname(buf
));
10519 if (!is_error(ret
)) {
10520 /* Overwrite the native machine name with whatever is being
10522 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10523 sizeof(buf
->machine
));
10524 /* Allow the user to override the reported release. */
10525 if (qemu_uname_release
&& *qemu_uname_release
) {
10526 g_strlcpy(buf
->release
, qemu_uname_release
,
10527 sizeof(buf
->release
));
10530 unlock_user_struct(buf
, arg1
, 1);
10534 case TARGET_NR_modify_ldt
:
10535 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10536 #if !defined(TARGET_X86_64)
10537 case TARGET_NR_vm86
:
10538 return do_vm86(cpu_env
, arg1
, arg2
);
10541 #if defined(TARGET_NR_adjtimex)
10542 case TARGET_NR_adjtimex
:
10544 struct timex host_buf
;
10546 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10547 return -TARGET_EFAULT
;
10549 ret
= get_errno(adjtimex(&host_buf
));
10550 if (!is_error(ret
)) {
10551 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10552 return -TARGET_EFAULT
;
10558 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10559 case TARGET_NR_clock_adjtime
:
10561 struct timex htx
, *phtx
= &htx
;
10563 if (target_to_host_timex(phtx
, arg2
) != 0) {
10564 return -TARGET_EFAULT
;
10566 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10567 if (!is_error(ret
) && phtx
) {
10568 if (host_to_target_timex(arg2
, phtx
) != 0) {
10569 return -TARGET_EFAULT
;
10575 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10576 case TARGET_NR_clock_adjtime64
:
10580 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10581 return -TARGET_EFAULT
;
10583 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10584 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10585 return -TARGET_EFAULT
;
10590 case TARGET_NR_getpgid
:
10591 return get_errno(getpgid(arg1
));
10592 case TARGET_NR_fchdir
:
10593 return get_errno(fchdir(arg1
));
10594 case TARGET_NR_personality
:
10595 return get_errno(personality(arg1
));
10596 #ifdef TARGET_NR__llseek /* Not on alpha */
10597 case TARGET_NR__llseek
:
10600 #if !defined(__NR_llseek)
10601 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10603 ret
= get_errno(res
);
10608 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10610 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10611 return -TARGET_EFAULT
;
10616 #ifdef TARGET_NR_getdents
10617 case TARGET_NR_getdents
:
10618 return do_getdents(arg1
, arg2
, arg3
);
10619 #endif /* TARGET_NR_getdents */
10620 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10621 case TARGET_NR_getdents64
:
10622 return do_getdents64(arg1
, arg2
, arg3
);
10623 #endif /* TARGET_NR_getdents64 */
10624 #if defined(TARGET_NR__newselect)
10625 case TARGET_NR__newselect
:
10626 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10628 #ifdef TARGET_NR_poll
10629 case TARGET_NR_poll
:
10630 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10632 #ifdef TARGET_NR_ppoll
10633 case TARGET_NR_ppoll
:
10634 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10636 #ifdef TARGET_NR_ppoll_time64
10637 case TARGET_NR_ppoll_time64
:
10638 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10640 case TARGET_NR_flock
:
10641 /* NOTE: the flock constant seems to be the same for every
10643 return get_errno(safe_flock(arg1
, arg2
));
10644 case TARGET_NR_readv
:
10646 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10648 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10649 unlock_iovec(vec
, arg2
, arg3
, 1);
10651 ret
= -host_to_target_errno(errno
);
10655 case TARGET_NR_writev
:
10657 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10659 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10660 unlock_iovec(vec
, arg2
, arg3
, 0);
10662 ret
= -host_to_target_errno(errno
);
10666 #if defined(TARGET_NR_preadv)
10667 case TARGET_NR_preadv
:
10669 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10671 unsigned long low
, high
;
10673 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10674 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10675 unlock_iovec(vec
, arg2
, arg3
, 1);
10677 ret
= -host_to_target_errno(errno
);
10682 #if defined(TARGET_NR_pwritev)
10683 case TARGET_NR_pwritev
:
10685 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10687 unsigned long low
, high
;
10689 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10690 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10691 unlock_iovec(vec
, arg2
, arg3
, 0);
10693 ret
= -host_to_target_errno(errno
);
10698 case TARGET_NR_getsid
:
10699 return get_errno(getsid(arg1
));
10700 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10701 case TARGET_NR_fdatasync
:
10702 return get_errno(fdatasync(arg1
));
10704 case TARGET_NR_sched_getaffinity
:
10706 unsigned int mask_size
;
10707 unsigned long *mask
;
10710 * sched_getaffinity needs multiples of ulong, so need to take
10711 * care of mismatches between target ulong and host ulong sizes.
10713 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10714 return -TARGET_EINVAL
;
10716 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10718 mask
= alloca(mask_size
);
10719 memset(mask
, 0, mask_size
);
10720 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10722 if (!is_error(ret
)) {
10724 /* More data returned than the caller's buffer will fit.
10725 * This only happens if sizeof(abi_long) < sizeof(long)
10726 * and the caller passed us a buffer holding an odd number
10727 * of abi_longs. If the host kernel is actually using the
10728 * extra 4 bytes then fail EINVAL; otherwise we can just
10729 * ignore them and only copy the interesting part.
10731 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10732 if (numcpus
> arg2
* 8) {
10733 return -TARGET_EINVAL
;
10738 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10739 return -TARGET_EFAULT
;
10744 case TARGET_NR_sched_setaffinity
:
10746 unsigned int mask_size
;
10747 unsigned long *mask
;
10750 * sched_setaffinity needs multiples of ulong, so need to take
10751 * care of mismatches between target ulong and host ulong sizes.
10753 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10754 return -TARGET_EINVAL
;
10756 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10757 mask
= alloca(mask_size
);
10759 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10764 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10766 case TARGET_NR_getcpu
:
10768 unsigned cpu
, node
;
10769 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10770 arg2
? &node
: NULL
,
10772 if (is_error(ret
)) {
10775 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10776 return -TARGET_EFAULT
;
10778 if (arg2
&& put_user_u32(node
, arg2
)) {
10779 return -TARGET_EFAULT
;
10783 case TARGET_NR_sched_setparam
:
10785 struct target_sched_param
*target_schp
;
10786 struct sched_param schp
;
10789 return -TARGET_EINVAL
;
10791 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1)) {
10792 return -TARGET_EFAULT
;
10794 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10795 unlock_user_struct(target_schp
, arg2
, 0);
10796 return get_errno(sys_sched_setparam(arg1
, &schp
));
10798 case TARGET_NR_sched_getparam
:
10800 struct target_sched_param
*target_schp
;
10801 struct sched_param schp
;
10804 return -TARGET_EINVAL
;
10806 ret
= get_errno(sys_sched_getparam(arg1
, &schp
));
10807 if (!is_error(ret
)) {
10808 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0)) {
10809 return -TARGET_EFAULT
;
10811 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10812 unlock_user_struct(target_schp
, arg2
, 1);
10816 case TARGET_NR_sched_setscheduler
:
10818 struct target_sched_param
*target_schp
;
10819 struct sched_param schp
;
10821 return -TARGET_EINVAL
;
10823 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1)) {
10824 return -TARGET_EFAULT
;
10826 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10827 unlock_user_struct(target_schp
, arg3
, 0);
10828 return get_errno(sys_sched_setscheduler(arg1
, arg2
, &schp
));
10830 case TARGET_NR_sched_getscheduler
:
10831 return get_errno(sys_sched_getscheduler(arg1
));
10832 case TARGET_NR_sched_getattr
:
10834 struct target_sched_attr
*target_scha
;
10835 struct sched_attr scha
;
10837 return -TARGET_EINVAL
;
10839 if (arg3
> sizeof(scha
)) {
10840 arg3
= sizeof(scha
);
10842 ret
= get_errno(sys_sched_getattr(arg1
, &scha
, arg3
, arg4
));
10843 if (!is_error(ret
)) {
10844 target_scha
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10845 if (!target_scha
) {
10846 return -TARGET_EFAULT
;
10848 target_scha
->size
= tswap32(scha
.size
);
10849 target_scha
->sched_policy
= tswap32(scha
.sched_policy
);
10850 target_scha
->sched_flags
= tswap64(scha
.sched_flags
);
10851 target_scha
->sched_nice
= tswap32(scha
.sched_nice
);
10852 target_scha
->sched_priority
= tswap32(scha
.sched_priority
);
10853 target_scha
->sched_runtime
= tswap64(scha
.sched_runtime
);
10854 target_scha
->sched_deadline
= tswap64(scha
.sched_deadline
);
10855 target_scha
->sched_period
= tswap64(scha
.sched_period
);
10856 if (scha
.size
> offsetof(struct sched_attr
, sched_util_min
)) {
10857 target_scha
->sched_util_min
= tswap32(scha
.sched_util_min
);
10858 target_scha
->sched_util_max
= tswap32(scha
.sched_util_max
);
10860 unlock_user(target_scha
, arg2
, arg3
);
10864 case TARGET_NR_sched_setattr
:
10866 struct target_sched_attr
*target_scha
;
10867 struct sched_attr scha
;
10871 return -TARGET_EINVAL
;
10873 if (get_user_u32(size
, arg2
)) {
10874 return -TARGET_EFAULT
;
10877 size
= offsetof(struct target_sched_attr
, sched_util_min
);
10879 if (size
< offsetof(struct target_sched_attr
, sched_util_min
)) {
10880 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
10881 return -TARGET_EFAULT
;
10883 return -TARGET_E2BIG
;
10886 zeroed
= check_zeroed_user(arg2
, sizeof(struct target_sched_attr
), size
);
10889 } else if (zeroed
== 0) {
10890 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
10891 return -TARGET_EFAULT
;
10893 return -TARGET_E2BIG
;
10895 if (size
> sizeof(struct target_sched_attr
)) {
10896 size
= sizeof(struct target_sched_attr
);
10899 target_scha
= lock_user(VERIFY_READ
, arg2
, size
, 1);
10900 if (!target_scha
) {
10901 return -TARGET_EFAULT
;
10904 scha
.sched_policy
= tswap32(target_scha
->sched_policy
);
10905 scha
.sched_flags
= tswap64(target_scha
->sched_flags
);
10906 scha
.sched_nice
= tswap32(target_scha
->sched_nice
);
10907 scha
.sched_priority
= tswap32(target_scha
->sched_priority
);
10908 scha
.sched_runtime
= tswap64(target_scha
->sched_runtime
);
10909 scha
.sched_deadline
= tswap64(target_scha
->sched_deadline
);
10910 scha
.sched_period
= tswap64(target_scha
->sched_period
);
10911 if (size
> offsetof(struct target_sched_attr
, sched_util_min
)) {
10912 scha
.sched_util_min
= tswap32(target_scha
->sched_util_min
);
10913 scha
.sched_util_max
= tswap32(target_scha
->sched_util_max
);
10915 unlock_user(target_scha
, arg2
, 0);
10916 return get_errno(sys_sched_setattr(arg1
, &scha
, arg3
));
10918 case TARGET_NR_sched_yield
:
10919 return get_errno(sched_yield());
10920 case TARGET_NR_sched_get_priority_max
:
10921 return get_errno(sched_get_priority_max(arg1
));
10922 case TARGET_NR_sched_get_priority_min
:
10923 return get_errno(sched_get_priority_min(arg1
));
10924 #ifdef TARGET_NR_sched_rr_get_interval
10925 case TARGET_NR_sched_rr_get_interval
:
10927 struct timespec ts
;
10928 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10929 if (!is_error(ret
)) {
10930 ret
= host_to_target_timespec(arg2
, &ts
);
10935 #ifdef TARGET_NR_sched_rr_get_interval_time64
10936 case TARGET_NR_sched_rr_get_interval_time64
:
10938 struct timespec ts
;
10939 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10940 if (!is_error(ret
)) {
10941 ret
= host_to_target_timespec64(arg2
, &ts
);
10946 #if defined(TARGET_NR_nanosleep)
10947 case TARGET_NR_nanosleep
:
10949 struct timespec req
, rem
;
10950 target_to_host_timespec(&req
, arg1
);
10951 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10952 if (is_error(ret
) && arg2
) {
10953 host_to_target_timespec(arg2
, &rem
);
10958 case TARGET_NR_prctl
:
10959 return do_prctl(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
);
10961 #ifdef TARGET_NR_arch_prctl
10962 case TARGET_NR_arch_prctl
:
10963 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10965 #ifdef TARGET_NR_pread64
10966 case TARGET_NR_pread64
:
10967 if (regpairs_aligned(cpu_env
, num
)) {
10971 if (arg2
== 0 && arg3
== 0) {
10972 /* Special-case NULL buffer and zero length, which should succeed */
10975 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10977 return -TARGET_EFAULT
;
10980 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10981 unlock_user(p
, arg2
, ret
);
10983 case TARGET_NR_pwrite64
:
10984 if (regpairs_aligned(cpu_env
, num
)) {
10988 if (arg2
== 0 && arg3
== 0) {
10989 /* Special-case NULL buffer and zero length, which should succeed */
10992 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10994 return -TARGET_EFAULT
;
10997 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10998 unlock_user(p
, arg2
, 0);
11001 case TARGET_NR_getcwd
:
11002 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11003 return -TARGET_EFAULT
;
11004 ret
= get_errno(sys_getcwd1(p
, arg2
));
11005 unlock_user(p
, arg1
, ret
);
11007 case TARGET_NR_capget
:
11008 case TARGET_NR_capset
:
11010 struct target_user_cap_header
*target_header
;
11011 struct target_user_cap_data
*target_data
= NULL
;
11012 struct __user_cap_header_struct header
;
11013 struct __user_cap_data_struct data
[2];
11014 struct __user_cap_data_struct
*dataptr
= NULL
;
11015 int i
, target_datalen
;
11016 int data_items
= 1;
11018 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11019 return -TARGET_EFAULT
;
11021 header
.version
= tswap32(target_header
->version
);
11022 header
.pid
= tswap32(target_header
->pid
);
11024 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11025 /* Version 2 and up takes pointer to two user_data structs */
11029 target_datalen
= sizeof(*target_data
) * data_items
;
11032 if (num
== TARGET_NR_capget
) {
11033 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11035 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11037 if (!target_data
) {
11038 unlock_user_struct(target_header
, arg1
, 0);
11039 return -TARGET_EFAULT
;
11042 if (num
== TARGET_NR_capset
) {
11043 for (i
= 0; i
< data_items
; i
++) {
11044 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11045 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11046 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11053 if (num
== TARGET_NR_capget
) {
11054 ret
= get_errno(capget(&header
, dataptr
));
11056 ret
= get_errno(capset(&header
, dataptr
));
11059 /* The kernel always updates version for both capget and capset */
11060 target_header
->version
= tswap32(header
.version
);
11061 unlock_user_struct(target_header
, arg1
, 1);
11064 if (num
== TARGET_NR_capget
) {
11065 for (i
= 0; i
< data_items
; i
++) {
11066 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11067 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11068 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11070 unlock_user(target_data
, arg2
, target_datalen
);
11072 unlock_user(target_data
, arg2
, 0);
11077 case TARGET_NR_sigaltstack
:
11078 return do_sigaltstack(arg1
, arg2
, cpu_env
);
11080 #ifdef CONFIG_SENDFILE
11081 #ifdef TARGET_NR_sendfile
11082 case TARGET_NR_sendfile
:
11084 off_t
*offp
= NULL
;
11087 ret
= get_user_sal(off
, arg3
);
11088 if (is_error(ret
)) {
11093 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11094 if (!is_error(ret
) && arg3
) {
11095 abi_long ret2
= put_user_sal(off
, arg3
);
11096 if (is_error(ret2
)) {
11103 #ifdef TARGET_NR_sendfile64
11104 case TARGET_NR_sendfile64
:
11106 off_t
*offp
= NULL
;
11109 ret
= get_user_s64(off
, arg3
);
11110 if (is_error(ret
)) {
11115 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11116 if (!is_error(ret
) && arg3
) {
11117 abi_long ret2
= put_user_s64(off
, arg3
);
11118 if (is_error(ret2
)) {
11126 #ifdef TARGET_NR_vfork
11127 case TARGET_NR_vfork
:
11128 return get_errno(do_fork(cpu_env
,
11129 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11132 #ifdef TARGET_NR_ugetrlimit
11133 case TARGET_NR_ugetrlimit
:
11135 struct rlimit rlim
;
11136 int resource
= target_to_host_resource(arg1
);
11137 ret
= get_errno(getrlimit(resource
, &rlim
));
11138 if (!is_error(ret
)) {
11139 struct target_rlimit
*target_rlim
;
11140 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11141 return -TARGET_EFAULT
;
11142 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11143 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11144 unlock_user_struct(target_rlim
, arg2
, 1);
11149 #ifdef TARGET_NR_truncate64
11150 case TARGET_NR_truncate64
:
11151 if (!(p
= lock_user_string(arg1
)))
11152 return -TARGET_EFAULT
;
11153 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11154 unlock_user(p
, arg1
, 0);
11157 #ifdef TARGET_NR_ftruncate64
11158 case TARGET_NR_ftruncate64
:
11159 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11161 #ifdef TARGET_NR_stat64
11162 case TARGET_NR_stat64
:
11163 if (!(p
= lock_user_string(arg1
))) {
11164 return -TARGET_EFAULT
;
11166 ret
= get_errno(stat(path(p
), &st
));
11167 unlock_user(p
, arg1
, 0);
11168 if (!is_error(ret
))
11169 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11172 #ifdef TARGET_NR_lstat64
11173 case TARGET_NR_lstat64
:
11174 if (!(p
= lock_user_string(arg1
))) {
11175 return -TARGET_EFAULT
;
11177 ret
= get_errno(lstat(path(p
), &st
));
11178 unlock_user(p
, arg1
, 0);
11179 if (!is_error(ret
))
11180 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11183 #ifdef TARGET_NR_fstat64
11184 case TARGET_NR_fstat64
:
11185 ret
= get_errno(fstat(arg1
, &st
));
11186 if (!is_error(ret
))
11187 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11190 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11191 #ifdef TARGET_NR_fstatat64
11192 case TARGET_NR_fstatat64
:
11194 #ifdef TARGET_NR_newfstatat
11195 case TARGET_NR_newfstatat
:
11197 if (!(p
= lock_user_string(arg2
))) {
11198 return -TARGET_EFAULT
;
11200 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11201 unlock_user(p
, arg2
, 0);
11202 if (!is_error(ret
))
11203 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11206 #if defined(TARGET_NR_statx)
11207 case TARGET_NR_statx
:
11209 struct target_statx
*target_stx
;
11213 p
= lock_user_string(arg2
);
11215 return -TARGET_EFAULT
;
11217 #if defined(__NR_statx)
11220 * It is assumed that struct statx is architecture independent.
11222 struct target_statx host_stx
;
11225 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11226 if (!is_error(ret
)) {
11227 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11228 unlock_user(p
, arg2
, 0);
11229 return -TARGET_EFAULT
;
11233 if (ret
!= -TARGET_ENOSYS
) {
11234 unlock_user(p
, arg2
, 0);
11239 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11240 unlock_user(p
, arg2
, 0);
11242 if (!is_error(ret
)) {
11243 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11244 return -TARGET_EFAULT
;
11246 memset(target_stx
, 0, sizeof(*target_stx
));
11247 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11248 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11249 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11250 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11251 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11252 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11253 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11254 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11255 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11256 __put_user(st
.st_size
, &target_stx
->stx_size
);
11257 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11258 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11259 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11260 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11261 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11262 unlock_user_struct(target_stx
, arg5
, 1);
11267 #ifdef TARGET_NR_lchown
11268 case TARGET_NR_lchown
:
11269 if (!(p
= lock_user_string(arg1
)))
11270 return -TARGET_EFAULT
;
11271 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11272 unlock_user(p
, arg1
, 0);
11275 #ifdef TARGET_NR_getuid
11276 case TARGET_NR_getuid
:
11277 return get_errno(high2lowuid(getuid()));
11279 #ifdef TARGET_NR_getgid
11280 case TARGET_NR_getgid
:
11281 return get_errno(high2lowgid(getgid()));
11283 #ifdef TARGET_NR_geteuid
11284 case TARGET_NR_geteuid
:
11285 return get_errno(high2lowuid(geteuid()));
11287 #ifdef TARGET_NR_getegid
11288 case TARGET_NR_getegid
:
11289 return get_errno(high2lowgid(getegid()));
11291 case TARGET_NR_setreuid
:
11292 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11293 case TARGET_NR_setregid
:
11294 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11295 case TARGET_NR_getgroups
:
11297 int gidsetsize
= arg1
;
11298 target_id
*target_grouplist
;
11302 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11303 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11304 if (gidsetsize
== 0)
11306 if (!is_error(ret
)) {
11307 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11308 if (!target_grouplist
)
11309 return -TARGET_EFAULT
;
11310 for(i
= 0;i
< ret
; i
++)
11311 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11312 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11316 case TARGET_NR_setgroups
:
11318 int gidsetsize
= arg1
;
11319 target_id
*target_grouplist
;
11320 gid_t
*grouplist
= NULL
;
11323 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11324 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11325 if (!target_grouplist
) {
11326 return -TARGET_EFAULT
;
11328 for (i
= 0; i
< gidsetsize
; i
++) {
11329 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11331 unlock_user(target_grouplist
, arg2
, 0);
11333 return get_errno(setgroups(gidsetsize
, grouplist
));
11335 case TARGET_NR_fchown
:
11336 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11337 #if defined(TARGET_NR_fchownat)
11338 case TARGET_NR_fchownat
:
11339 if (!(p
= lock_user_string(arg2
)))
11340 return -TARGET_EFAULT
;
11341 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11342 low2highgid(arg4
), arg5
));
11343 unlock_user(p
, arg2
, 0);
11346 #ifdef TARGET_NR_setresuid
11347 case TARGET_NR_setresuid
:
11348 return get_errno(sys_setresuid(low2highuid(arg1
),
11350 low2highuid(arg3
)));
11352 #ifdef TARGET_NR_getresuid
11353 case TARGET_NR_getresuid
:
11355 uid_t ruid
, euid
, suid
;
11356 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11357 if (!is_error(ret
)) {
11358 if (put_user_id(high2lowuid(ruid
), arg1
)
11359 || put_user_id(high2lowuid(euid
), arg2
)
11360 || put_user_id(high2lowuid(suid
), arg3
))
11361 return -TARGET_EFAULT
;
11366 #ifdef TARGET_NR_getresgid
11367 case TARGET_NR_setresgid
:
11368 return get_errno(sys_setresgid(low2highgid(arg1
),
11370 low2highgid(arg3
)));
11372 #ifdef TARGET_NR_getresgid
11373 case TARGET_NR_getresgid
:
11375 gid_t rgid
, egid
, sgid
;
11376 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11377 if (!is_error(ret
)) {
11378 if (put_user_id(high2lowgid(rgid
), arg1
)
11379 || put_user_id(high2lowgid(egid
), arg2
)
11380 || put_user_id(high2lowgid(sgid
), arg3
))
11381 return -TARGET_EFAULT
;
11386 #ifdef TARGET_NR_chown
11387 case TARGET_NR_chown
:
11388 if (!(p
= lock_user_string(arg1
)))
11389 return -TARGET_EFAULT
;
11390 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11391 unlock_user(p
, arg1
, 0);
11394 case TARGET_NR_setuid
:
11395 return get_errno(sys_setuid(low2highuid(arg1
)));
11396 case TARGET_NR_setgid
:
11397 return get_errno(sys_setgid(low2highgid(arg1
)));
11398 case TARGET_NR_setfsuid
:
11399 return get_errno(setfsuid(arg1
));
11400 case TARGET_NR_setfsgid
:
11401 return get_errno(setfsgid(arg1
));
11403 #ifdef TARGET_NR_lchown32
11404 case TARGET_NR_lchown32
:
11405 if (!(p
= lock_user_string(arg1
)))
11406 return -TARGET_EFAULT
;
11407 ret
= get_errno(lchown(p
, arg2
, arg3
));
11408 unlock_user(p
, arg1
, 0);
11411 #ifdef TARGET_NR_getuid32
11412 case TARGET_NR_getuid32
:
11413 return get_errno(getuid());
11416 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11417 /* Alpha specific */
11418 case TARGET_NR_getxuid
:
11422 cpu_env
->ir
[IR_A4
]=euid
;
11424 return get_errno(getuid());
11426 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11427 /* Alpha specific */
11428 case TARGET_NR_getxgid
:
11432 cpu_env
->ir
[IR_A4
]=egid
;
11434 return get_errno(getgid());
11436 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11437 /* Alpha specific */
11438 case TARGET_NR_osf_getsysinfo
:
11439 ret
= -TARGET_EOPNOTSUPP
;
11441 case TARGET_GSI_IEEE_FP_CONTROL
:
11443 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11444 uint64_t swcr
= cpu_env
->swcr
;
11446 swcr
&= ~SWCR_STATUS_MASK
;
11447 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11449 if (put_user_u64 (swcr
, arg2
))
11450 return -TARGET_EFAULT
;
11455 /* case GSI_IEEE_STATE_AT_SIGNAL:
11456 -- Not implemented in linux kernel.
11458 -- Retrieves current unaligned access state; not much used.
11459 case GSI_PROC_TYPE:
11460 -- Retrieves implver information; surely not used.
11461 case GSI_GET_HWRPB:
11462 -- Grabs a copy of the HWRPB; surely not used.
11467 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11468 /* Alpha specific */
11469 case TARGET_NR_osf_setsysinfo
:
11470 ret
= -TARGET_EOPNOTSUPP
;
11472 case TARGET_SSI_IEEE_FP_CONTROL
:
11474 uint64_t swcr
, fpcr
;
11476 if (get_user_u64 (swcr
, arg2
)) {
11477 return -TARGET_EFAULT
;
11481 * The kernel calls swcr_update_status to update the
11482 * status bits from the fpcr at every point that it
11483 * could be queried. Therefore, we store the status
11484 * bits only in FPCR.
11486 cpu_env
->swcr
= swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11488 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11489 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11490 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11491 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11496 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11498 uint64_t exc
, fpcr
, fex
;
11500 if (get_user_u64(exc
, arg2
)) {
11501 return -TARGET_EFAULT
;
11503 exc
&= SWCR_STATUS_MASK
;
11504 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11506 /* Old exceptions are not signaled. */
11507 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11509 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11510 fex
&= (cpu_env
)->swcr
;
11512 /* Update the hardware fpcr. */
11513 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11514 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11517 int si_code
= TARGET_FPE_FLTUNK
;
11518 target_siginfo_t info
;
11520 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11521 si_code
= TARGET_FPE_FLTUND
;
11523 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11524 si_code
= TARGET_FPE_FLTRES
;
11526 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11527 si_code
= TARGET_FPE_FLTUND
;
11529 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11530 si_code
= TARGET_FPE_FLTOVF
;
11532 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11533 si_code
= TARGET_FPE_FLTDIV
;
11535 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11536 si_code
= TARGET_FPE_FLTINV
;
11539 info
.si_signo
= SIGFPE
;
11541 info
.si_code
= si_code
;
11542 info
._sifields
._sigfault
._addr
= (cpu_env
)->pc
;
11543 queue_signal(cpu_env
, info
.si_signo
,
11544 QEMU_SI_FAULT
, &info
);
11550 /* case SSI_NVPAIRS:
11551 -- Used with SSIN_UACPROC to enable unaligned accesses.
11552 case SSI_IEEE_STATE_AT_SIGNAL:
11553 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11554 -- Not implemented in linux kernel
11559 #ifdef TARGET_NR_osf_sigprocmask
11560 /* Alpha specific. */
11561 case TARGET_NR_osf_sigprocmask
:
11565 sigset_t set
, oldset
;
11568 case TARGET_SIG_BLOCK
:
11571 case TARGET_SIG_UNBLOCK
:
11574 case TARGET_SIG_SETMASK
:
11578 return -TARGET_EINVAL
;
11581 target_to_host_old_sigset(&set
, &mask
);
11582 ret
= do_sigprocmask(how
, &set
, &oldset
);
11584 host_to_target_old_sigset(&mask
, &oldset
);
11591 #ifdef TARGET_NR_getgid32
11592 case TARGET_NR_getgid32
:
11593 return get_errno(getgid());
11595 #ifdef TARGET_NR_geteuid32
11596 case TARGET_NR_geteuid32
:
11597 return get_errno(geteuid());
11599 #ifdef TARGET_NR_getegid32
11600 case TARGET_NR_getegid32
:
11601 return get_errno(getegid());
11603 #ifdef TARGET_NR_setreuid32
11604 case TARGET_NR_setreuid32
:
11605 return get_errno(setreuid(arg1
, arg2
));
11607 #ifdef TARGET_NR_setregid32
11608 case TARGET_NR_setregid32
:
11609 return get_errno(setregid(arg1
, arg2
));
11611 #ifdef TARGET_NR_getgroups32
11612 case TARGET_NR_getgroups32
:
11614 int gidsetsize
= arg1
;
11615 uint32_t *target_grouplist
;
11619 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11620 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11621 if (gidsetsize
== 0)
11623 if (!is_error(ret
)) {
11624 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11625 if (!target_grouplist
) {
11626 return -TARGET_EFAULT
;
11628 for(i
= 0;i
< ret
; i
++)
11629 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11630 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11635 #ifdef TARGET_NR_setgroups32
11636 case TARGET_NR_setgroups32
:
11638 int gidsetsize
= arg1
;
11639 uint32_t *target_grouplist
;
11643 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11644 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11645 if (!target_grouplist
) {
11646 return -TARGET_EFAULT
;
11648 for(i
= 0;i
< gidsetsize
; i
++)
11649 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11650 unlock_user(target_grouplist
, arg2
, 0);
11651 return get_errno(setgroups(gidsetsize
, grouplist
));
11654 #ifdef TARGET_NR_fchown32
11655 case TARGET_NR_fchown32
:
11656 return get_errno(fchown(arg1
, arg2
, arg3
));
11658 #ifdef TARGET_NR_setresuid32
11659 case TARGET_NR_setresuid32
:
11660 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11662 #ifdef TARGET_NR_getresuid32
11663 case TARGET_NR_getresuid32
:
11665 uid_t ruid
, euid
, suid
;
11666 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11667 if (!is_error(ret
)) {
11668 if (put_user_u32(ruid
, arg1
)
11669 || put_user_u32(euid
, arg2
)
11670 || put_user_u32(suid
, arg3
))
11671 return -TARGET_EFAULT
;
11676 #ifdef TARGET_NR_setresgid32
11677 case TARGET_NR_setresgid32
:
11678 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11680 #ifdef TARGET_NR_getresgid32
11681 case TARGET_NR_getresgid32
:
11683 gid_t rgid
, egid
, sgid
;
11684 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11685 if (!is_error(ret
)) {
11686 if (put_user_u32(rgid
, arg1
)
11687 || put_user_u32(egid
, arg2
)
11688 || put_user_u32(sgid
, arg3
))
11689 return -TARGET_EFAULT
;
11694 #ifdef TARGET_NR_chown32
11695 case TARGET_NR_chown32
:
11696 if (!(p
= lock_user_string(arg1
)))
11697 return -TARGET_EFAULT
;
11698 ret
= get_errno(chown(p
, arg2
, arg3
));
11699 unlock_user(p
, arg1
, 0);
11702 #ifdef TARGET_NR_setuid32
11703 case TARGET_NR_setuid32
:
11704 return get_errno(sys_setuid(arg1
));
11706 #ifdef TARGET_NR_setgid32
11707 case TARGET_NR_setgid32
:
11708 return get_errno(sys_setgid(arg1
));
11710 #ifdef TARGET_NR_setfsuid32
11711 case TARGET_NR_setfsuid32
:
11712 return get_errno(setfsuid(arg1
));
11714 #ifdef TARGET_NR_setfsgid32
11715 case TARGET_NR_setfsgid32
:
11716 return get_errno(setfsgid(arg1
));
11718 #ifdef TARGET_NR_mincore
11719 case TARGET_NR_mincore
:
11721 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11723 return -TARGET_ENOMEM
;
11725 p
= lock_user_string(arg3
);
11727 ret
= -TARGET_EFAULT
;
11729 ret
= get_errno(mincore(a
, arg2
, p
));
11730 unlock_user(p
, arg3
, ret
);
11732 unlock_user(a
, arg1
, 0);
11736 #ifdef TARGET_NR_arm_fadvise64_64
11737 case TARGET_NR_arm_fadvise64_64
:
11738 /* arm_fadvise64_64 looks like fadvise64_64 but
11739 * with different argument order: fd, advice, offset, len
11740 * rather than the usual fd, offset, len, advice.
11741 * Note that offset and len are both 64-bit so appear as
11742 * pairs of 32-bit registers.
11744 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11745 target_offset64(arg5
, arg6
), arg2
);
11746 return -host_to_target_errno(ret
);
11749 #if TARGET_ABI_BITS == 32
11751 #ifdef TARGET_NR_fadvise64_64
11752 case TARGET_NR_fadvise64_64
:
11753 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11754 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11762 /* 6 args: fd, offset (high, low), len (high, low), advice */
11763 if (regpairs_aligned(cpu_env
, num
)) {
11764 /* offset is in (3,4), len in (5,6) and advice in 7 */
11772 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11773 target_offset64(arg4
, arg5
), arg6
);
11774 return -host_to_target_errno(ret
);
11777 #ifdef TARGET_NR_fadvise64
11778 case TARGET_NR_fadvise64
:
11779 /* 5 args: fd, offset (high, low), len, advice */
11780 if (regpairs_aligned(cpu_env
, num
)) {
11781 /* offset is in (3,4), len in 5 and advice in 6 */
11787 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11788 return -host_to_target_errno(ret
);
11791 #else /* not a 32-bit ABI */
11792 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11793 #ifdef TARGET_NR_fadvise64_64
11794 case TARGET_NR_fadvise64_64
:
11796 #ifdef TARGET_NR_fadvise64
11797 case TARGET_NR_fadvise64
:
11799 #ifdef TARGET_S390X
11801 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11802 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11803 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11804 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11808 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11810 #endif /* end of 64-bit ABI fadvise handling */
11812 #ifdef TARGET_NR_madvise
11813 case TARGET_NR_madvise
:
11814 return target_madvise(arg1
, arg2
, arg3
);
11816 #ifdef TARGET_NR_fcntl64
11817 case TARGET_NR_fcntl64
:
11821 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11822 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11825 if (!cpu_env
->eabi
) {
11826 copyfrom
= copy_from_user_oabi_flock64
;
11827 copyto
= copy_to_user_oabi_flock64
;
11831 cmd
= target_to_host_fcntl_cmd(arg2
);
11832 if (cmd
== -TARGET_EINVAL
) {
11837 case TARGET_F_GETLK64
:
11838 ret
= copyfrom(&fl
, arg3
);
11842 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11844 ret
= copyto(arg3
, &fl
);
11848 case TARGET_F_SETLK64
:
11849 case TARGET_F_SETLKW64
:
11850 ret
= copyfrom(&fl
, arg3
);
11854 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11857 ret
= do_fcntl(arg1
, arg2
, arg3
);
11863 #ifdef TARGET_NR_cacheflush
11864 case TARGET_NR_cacheflush
:
11865 /* self-modifying code is handled automatically, so nothing needed */
11868 #ifdef TARGET_NR_getpagesize
11869 case TARGET_NR_getpagesize
:
11870 return TARGET_PAGE_SIZE
;
11872 case TARGET_NR_gettid
:
11873 return get_errno(sys_gettid());
11874 #ifdef TARGET_NR_readahead
11875 case TARGET_NR_readahead
:
11876 #if TARGET_ABI_BITS == 32
11877 if (regpairs_aligned(cpu_env
, num
)) {
11882 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11884 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11889 #ifdef TARGET_NR_setxattr
11890 case TARGET_NR_listxattr
:
11891 case TARGET_NR_llistxattr
:
11895 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11897 return -TARGET_EFAULT
;
11900 p
= lock_user_string(arg1
);
11902 if (num
== TARGET_NR_listxattr
) {
11903 ret
= get_errno(listxattr(p
, b
, arg3
));
11905 ret
= get_errno(llistxattr(p
, b
, arg3
));
11908 ret
= -TARGET_EFAULT
;
11910 unlock_user(p
, arg1
, 0);
11911 unlock_user(b
, arg2
, arg3
);
11914 case TARGET_NR_flistxattr
:
11918 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11920 return -TARGET_EFAULT
;
11923 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11924 unlock_user(b
, arg2
, arg3
);
11927 case TARGET_NR_setxattr
:
11928 case TARGET_NR_lsetxattr
:
11930 void *p
, *n
, *v
= 0;
11932 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11934 return -TARGET_EFAULT
;
11937 p
= lock_user_string(arg1
);
11938 n
= lock_user_string(arg2
);
11940 if (num
== TARGET_NR_setxattr
) {
11941 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11943 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11946 ret
= -TARGET_EFAULT
;
11948 unlock_user(p
, arg1
, 0);
11949 unlock_user(n
, arg2
, 0);
11950 unlock_user(v
, arg3
, 0);
11953 case TARGET_NR_fsetxattr
:
11957 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11959 return -TARGET_EFAULT
;
11962 n
= lock_user_string(arg2
);
11964 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11966 ret
= -TARGET_EFAULT
;
11968 unlock_user(n
, arg2
, 0);
11969 unlock_user(v
, arg3
, 0);
11972 case TARGET_NR_getxattr
:
11973 case TARGET_NR_lgetxattr
:
11975 void *p
, *n
, *v
= 0;
11977 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11979 return -TARGET_EFAULT
;
11982 p
= lock_user_string(arg1
);
11983 n
= lock_user_string(arg2
);
11985 if (num
== TARGET_NR_getxattr
) {
11986 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11988 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11991 ret
= -TARGET_EFAULT
;
11993 unlock_user(p
, arg1
, 0);
11994 unlock_user(n
, arg2
, 0);
11995 unlock_user(v
, arg3
, arg4
);
11998 case TARGET_NR_fgetxattr
:
12002 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12004 return -TARGET_EFAULT
;
12007 n
= lock_user_string(arg2
);
12009 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12011 ret
= -TARGET_EFAULT
;
12013 unlock_user(n
, arg2
, 0);
12014 unlock_user(v
, arg3
, arg4
);
12017 case TARGET_NR_removexattr
:
12018 case TARGET_NR_lremovexattr
:
12021 p
= lock_user_string(arg1
);
12022 n
= lock_user_string(arg2
);
12024 if (num
== TARGET_NR_removexattr
) {
12025 ret
= get_errno(removexattr(p
, n
));
12027 ret
= get_errno(lremovexattr(p
, n
));
12030 ret
= -TARGET_EFAULT
;
12032 unlock_user(p
, arg1
, 0);
12033 unlock_user(n
, arg2
, 0);
12036 case TARGET_NR_fremovexattr
:
12039 n
= lock_user_string(arg2
);
12041 ret
= get_errno(fremovexattr(arg1
, n
));
12043 ret
= -TARGET_EFAULT
;
12045 unlock_user(n
, arg2
, 0);
12049 #endif /* CONFIG_ATTR */
12050 #ifdef TARGET_NR_set_thread_area
12051 case TARGET_NR_set_thread_area
:
12052 #if defined(TARGET_MIPS)
12053 cpu_env
->active_tc
.CP0_UserLocal
= arg1
;
12055 #elif defined(TARGET_CRIS)
12057 ret
= -TARGET_EINVAL
;
12059 cpu_env
->pregs
[PR_PID
] = arg1
;
12063 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12064 return do_set_thread_area(cpu_env
, arg1
);
12065 #elif defined(TARGET_M68K)
12067 TaskState
*ts
= cpu
->opaque
;
12068 ts
->tp_value
= arg1
;
12072 return -TARGET_ENOSYS
;
12075 #ifdef TARGET_NR_get_thread_area
12076 case TARGET_NR_get_thread_area
:
12077 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12078 return do_get_thread_area(cpu_env
, arg1
);
12079 #elif defined(TARGET_M68K)
12081 TaskState
*ts
= cpu
->opaque
;
12082 return ts
->tp_value
;
12085 return -TARGET_ENOSYS
;
12088 #ifdef TARGET_NR_getdomainname
12089 case TARGET_NR_getdomainname
:
12090 return -TARGET_ENOSYS
;
12093 #ifdef TARGET_NR_clock_settime
12094 case TARGET_NR_clock_settime
:
12096 struct timespec ts
;
12098 ret
= target_to_host_timespec(&ts
, arg2
);
12099 if (!is_error(ret
)) {
12100 ret
= get_errno(clock_settime(arg1
, &ts
));
12105 #ifdef TARGET_NR_clock_settime64
12106 case TARGET_NR_clock_settime64
:
12108 struct timespec ts
;
12110 ret
= target_to_host_timespec64(&ts
, arg2
);
12111 if (!is_error(ret
)) {
12112 ret
= get_errno(clock_settime(arg1
, &ts
));
12117 #ifdef TARGET_NR_clock_gettime
12118 case TARGET_NR_clock_gettime
:
12120 struct timespec ts
;
12121 ret
= get_errno(clock_gettime(arg1
, &ts
));
12122 if (!is_error(ret
)) {
12123 ret
= host_to_target_timespec(arg2
, &ts
);
12128 #ifdef TARGET_NR_clock_gettime64
12129 case TARGET_NR_clock_gettime64
:
12131 struct timespec ts
;
12132 ret
= get_errno(clock_gettime(arg1
, &ts
));
12133 if (!is_error(ret
)) {
12134 ret
= host_to_target_timespec64(arg2
, &ts
);
12139 #ifdef TARGET_NR_clock_getres
12140 case TARGET_NR_clock_getres
:
12142 struct timespec ts
;
12143 ret
= get_errno(clock_getres(arg1
, &ts
));
12144 if (!is_error(ret
)) {
12145 host_to_target_timespec(arg2
, &ts
);
12150 #ifdef TARGET_NR_clock_getres_time64
12151 case TARGET_NR_clock_getres_time64
:
12153 struct timespec ts
;
12154 ret
= get_errno(clock_getres(arg1
, &ts
));
12155 if (!is_error(ret
)) {
12156 host_to_target_timespec64(arg2
, &ts
);
12161 #ifdef TARGET_NR_clock_nanosleep
12162 case TARGET_NR_clock_nanosleep
:
12164 struct timespec ts
;
12165 if (target_to_host_timespec(&ts
, arg3
)) {
12166 return -TARGET_EFAULT
;
12168 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12169 &ts
, arg4
? &ts
: NULL
));
12171 * if the call is interrupted by a signal handler, it fails
12172 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12173 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12175 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12176 host_to_target_timespec(arg4
, &ts
)) {
12177 return -TARGET_EFAULT
;
12183 #ifdef TARGET_NR_clock_nanosleep_time64
12184 case TARGET_NR_clock_nanosleep_time64
:
12186 struct timespec ts
;
12188 if (target_to_host_timespec64(&ts
, arg3
)) {
12189 return -TARGET_EFAULT
;
12192 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12193 &ts
, arg4
? &ts
: NULL
));
12195 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12196 host_to_target_timespec64(arg4
, &ts
)) {
12197 return -TARGET_EFAULT
;
12203 #if defined(TARGET_NR_set_tid_address)
12204 case TARGET_NR_set_tid_address
:
12206 TaskState
*ts
= cpu
->opaque
;
12207 ts
->child_tidptr
= arg1
;
12208 /* do not call host set_tid_address() syscall, instead return tid() */
12209 return get_errno(sys_gettid());
12213 case TARGET_NR_tkill
:
12214 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12216 case TARGET_NR_tgkill
:
12217 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12218 target_to_host_signal(arg3
)));
12220 #ifdef TARGET_NR_set_robust_list
12221 case TARGET_NR_set_robust_list
:
12222 case TARGET_NR_get_robust_list
:
12223 /* The ABI for supporting robust futexes has userspace pass
12224 * the kernel a pointer to a linked list which is updated by
12225 * userspace after the syscall; the list is walked by the kernel
12226 * when the thread exits. Since the linked list in QEMU guest
12227 * memory isn't a valid linked list for the host and we have
12228 * no way to reliably intercept the thread-death event, we can't
12229 * support these. Silently return ENOSYS so that guest userspace
12230 * falls back to a non-robust futex implementation (which should
12231 * be OK except in the corner case of the guest crashing while
12232 * holding a mutex that is shared with another process via
12235 return -TARGET_ENOSYS
;
12238 #if defined(TARGET_NR_utimensat)
12239 case TARGET_NR_utimensat
:
12241 struct timespec
*tsp
, ts
[2];
12245 if (target_to_host_timespec(ts
, arg3
)) {
12246 return -TARGET_EFAULT
;
12248 if (target_to_host_timespec(ts
+ 1, arg3
+
12249 sizeof(struct target_timespec
))) {
12250 return -TARGET_EFAULT
;
12255 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12257 if (!(p
= lock_user_string(arg2
))) {
12258 return -TARGET_EFAULT
;
12260 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12261 unlock_user(p
, arg2
, 0);
12266 #ifdef TARGET_NR_utimensat_time64
12267 case TARGET_NR_utimensat_time64
:
12269 struct timespec
*tsp
, ts
[2];
12273 if (target_to_host_timespec64(ts
, arg3
)) {
12274 return -TARGET_EFAULT
;
12276 if (target_to_host_timespec64(ts
+ 1, arg3
+
12277 sizeof(struct target__kernel_timespec
))) {
12278 return -TARGET_EFAULT
;
12283 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12285 p
= lock_user_string(arg2
);
12287 return -TARGET_EFAULT
;
12289 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12290 unlock_user(p
, arg2
, 0);
12295 #ifdef TARGET_NR_futex
12296 case TARGET_NR_futex
:
12297 return do_futex(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12299 #ifdef TARGET_NR_futex_time64
12300 case TARGET_NR_futex_time64
:
12301 return do_futex_time64(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12303 #ifdef CONFIG_INOTIFY
12304 #if defined(TARGET_NR_inotify_init)
12305 case TARGET_NR_inotify_init
:
12306 ret
= get_errno(inotify_init());
12308 fd_trans_register(ret
, &target_inotify_trans
);
12312 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12313 case TARGET_NR_inotify_init1
:
12314 ret
= get_errno(inotify_init1(target_to_host_bitmask(arg1
,
12315 fcntl_flags_tbl
)));
12317 fd_trans_register(ret
, &target_inotify_trans
);
12321 #if defined(TARGET_NR_inotify_add_watch)
12322 case TARGET_NR_inotify_add_watch
:
12323 p
= lock_user_string(arg2
);
12324 ret
= get_errno(inotify_add_watch(arg1
, path(p
), arg3
));
12325 unlock_user(p
, arg2
, 0);
12328 #if defined(TARGET_NR_inotify_rm_watch)
12329 case TARGET_NR_inotify_rm_watch
:
12330 return get_errno(inotify_rm_watch(arg1
, arg2
));
12334 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12335 case TARGET_NR_mq_open
:
12337 struct mq_attr posix_mq_attr
;
12338 struct mq_attr
*pposix_mq_attr
;
12341 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12342 pposix_mq_attr
= NULL
;
12344 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12345 return -TARGET_EFAULT
;
12347 pposix_mq_attr
= &posix_mq_attr
;
12349 p
= lock_user_string(arg1
- 1);
12351 return -TARGET_EFAULT
;
12353 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12354 unlock_user (p
, arg1
, 0);
12358 case TARGET_NR_mq_unlink
:
12359 p
= lock_user_string(arg1
- 1);
12361 return -TARGET_EFAULT
;
12363 ret
= get_errno(mq_unlink(p
));
12364 unlock_user (p
, arg1
, 0);
12367 #ifdef TARGET_NR_mq_timedsend
12368 case TARGET_NR_mq_timedsend
:
12370 struct timespec ts
;
12372 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12374 if (target_to_host_timespec(&ts
, arg5
)) {
12375 return -TARGET_EFAULT
;
12377 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12378 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12379 return -TARGET_EFAULT
;
12382 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12384 unlock_user (p
, arg2
, arg3
);
12388 #ifdef TARGET_NR_mq_timedsend_time64
12389 case TARGET_NR_mq_timedsend_time64
:
12391 struct timespec ts
;
12393 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12395 if (target_to_host_timespec64(&ts
, arg5
)) {
12396 return -TARGET_EFAULT
;
12398 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12399 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12400 return -TARGET_EFAULT
;
12403 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12405 unlock_user(p
, arg2
, arg3
);
12410 #ifdef TARGET_NR_mq_timedreceive
12411 case TARGET_NR_mq_timedreceive
:
12413 struct timespec ts
;
12416 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12418 if (target_to_host_timespec(&ts
, arg5
)) {
12419 return -TARGET_EFAULT
;
12421 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12423 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12424 return -TARGET_EFAULT
;
12427 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12430 unlock_user (p
, arg2
, arg3
);
12432 put_user_u32(prio
, arg4
);
12436 #ifdef TARGET_NR_mq_timedreceive_time64
12437 case TARGET_NR_mq_timedreceive_time64
:
12439 struct timespec ts
;
12442 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12444 if (target_to_host_timespec64(&ts
, arg5
)) {
12445 return -TARGET_EFAULT
;
12447 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12449 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12450 return -TARGET_EFAULT
;
12453 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12456 unlock_user(p
, arg2
, arg3
);
12458 put_user_u32(prio
, arg4
);
12464 /* Not implemented for now... */
12465 /* case TARGET_NR_mq_notify: */
12468 case TARGET_NR_mq_getsetattr
:
12470 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12473 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12474 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12475 &posix_mq_attr_out
));
12476 } else if (arg3
!= 0) {
12477 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12479 if (ret
== 0 && arg3
!= 0) {
12480 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12486 #ifdef CONFIG_SPLICE
12487 #ifdef TARGET_NR_tee
12488 case TARGET_NR_tee
:
12490 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12494 #ifdef TARGET_NR_splice
12495 case TARGET_NR_splice
:
12497 loff_t loff_in
, loff_out
;
12498 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12500 if (get_user_u64(loff_in
, arg2
)) {
12501 return -TARGET_EFAULT
;
12503 ploff_in
= &loff_in
;
12506 if (get_user_u64(loff_out
, arg4
)) {
12507 return -TARGET_EFAULT
;
12509 ploff_out
= &loff_out
;
12511 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12513 if (put_user_u64(loff_in
, arg2
)) {
12514 return -TARGET_EFAULT
;
12518 if (put_user_u64(loff_out
, arg4
)) {
12519 return -TARGET_EFAULT
;
12525 #ifdef TARGET_NR_vmsplice
12526 case TARGET_NR_vmsplice
:
12528 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12530 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12531 unlock_iovec(vec
, arg2
, arg3
, 0);
12533 ret
= -host_to_target_errno(errno
);
12538 #endif /* CONFIG_SPLICE */
12539 #ifdef CONFIG_EVENTFD
12540 #if defined(TARGET_NR_eventfd)
12541 case TARGET_NR_eventfd
:
12542 ret
= get_errno(eventfd(arg1
, 0));
12544 fd_trans_register(ret
, &target_eventfd_trans
);
12548 #if defined(TARGET_NR_eventfd2)
12549 case TARGET_NR_eventfd2
:
12551 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
12552 if (arg2
& TARGET_O_NONBLOCK
) {
12553 host_flags
|= O_NONBLOCK
;
12555 if (arg2
& TARGET_O_CLOEXEC
) {
12556 host_flags
|= O_CLOEXEC
;
12558 ret
= get_errno(eventfd(arg1
, host_flags
));
12560 fd_trans_register(ret
, &target_eventfd_trans
);
12565 #endif /* CONFIG_EVENTFD */
12566 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12567 case TARGET_NR_fallocate
:
12568 #if TARGET_ABI_BITS == 32
12569 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12570 target_offset64(arg5
, arg6
)));
12572 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12576 #if defined(CONFIG_SYNC_FILE_RANGE)
12577 #if defined(TARGET_NR_sync_file_range)
12578 case TARGET_NR_sync_file_range
:
12579 #if TARGET_ABI_BITS == 32
12580 #if defined(TARGET_MIPS)
12581 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12582 target_offset64(arg5
, arg6
), arg7
));
12584 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12585 target_offset64(arg4
, arg5
), arg6
));
12586 #endif /* !TARGET_MIPS */
12588 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12592 #if defined(TARGET_NR_sync_file_range2) || \
12593 defined(TARGET_NR_arm_sync_file_range)
12594 #if defined(TARGET_NR_sync_file_range2)
12595 case TARGET_NR_sync_file_range2
:
12597 #if defined(TARGET_NR_arm_sync_file_range)
12598 case TARGET_NR_arm_sync_file_range
:
12600 /* This is like sync_file_range but the arguments are reordered */
12601 #if TARGET_ABI_BITS == 32
12602 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12603 target_offset64(arg5
, arg6
), arg2
));
12605 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12610 #if defined(TARGET_NR_signalfd4)
12611 case TARGET_NR_signalfd4
:
12612 return do_signalfd4(arg1
, arg2
, arg4
);
12614 #if defined(TARGET_NR_signalfd)
12615 case TARGET_NR_signalfd
:
12616 return do_signalfd4(arg1
, arg2
, 0);
12618 #if defined(CONFIG_EPOLL)
12619 #if defined(TARGET_NR_epoll_create)
12620 case TARGET_NR_epoll_create
:
12621 return get_errno(epoll_create(arg1
));
12623 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12624 case TARGET_NR_epoll_create1
:
12625 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12627 #if defined(TARGET_NR_epoll_ctl)
12628 case TARGET_NR_epoll_ctl
:
12630 struct epoll_event ep
;
12631 struct epoll_event
*epp
= 0;
12633 if (arg2
!= EPOLL_CTL_DEL
) {
12634 struct target_epoll_event
*target_ep
;
12635 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12636 return -TARGET_EFAULT
;
12638 ep
.events
= tswap32(target_ep
->events
);
12640 * The epoll_data_t union is just opaque data to the kernel,
12641 * so we transfer all 64 bits across and need not worry what
12642 * actual data type it is.
12644 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12645 unlock_user_struct(target_ep
, arg4
, 0);
12648 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12649 * non-null pointer, even though this argument is ignored.
12654 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12658 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12659 #if defined(TARGET_NR_epoll_wait)
12660 case TARGET_NR_epoll_wait
:
12662 #if defined(TARGET_NR_epoll_pwait)
12663 case TARGET_NR_epoll_pwait
:
12666 struct target_epoll_event
*target_ep
;
12667 struct epoll_event
*ep
;
12669 int maxevents
= arg3
;
12670 int timeout
= arg4
;
12672 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12673 return -TARGET_EINVAL
;
12676 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12677 maxevents
* sizeof(struct target_epoll_event
), 1);
12679 return -TARGET_EFAULT
;
12682 ep
= g_try_new(struct epoll_event
, maxevents
);
12684 unlock_user(target_ep
, arg2
, 0);
12685 return -TARGET_ENOMEM
;
12689 #if defined(TARGET_NR_epoll_pwait)
12690 case TARGET_NR_epoll_pwait
:
12692 sigset_t
*set
= NULL
;
12695 ret
= process_sigsuspend_mask(&set
, arg5
, arg6
);
12701 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12702 set
, SIGSET_T_SIZE
));
12705 finish_sigsuspend_mask(ret
);
12710 #if defined(TARGET_NR_epoll_wait)
12711 case TARGET_NR_epoll_wait
:
12712 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12717 ret
= -TARGET_ENOSYS
;
12719 if (!is_error(ret
)) {
12721 for (i
= 0; i
< ret
; i
++) {
12722 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12723 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12725 unlock_user(target_ep
, arg2
,
12726 ret
* sizeof(struct target_epoll_event
));
12728 unlock_user(target_ep
, arg2
, 0);
12735 #ifdef TARGET_NR_prlimit64
12736 case TARGET_NR_prlimit64
:
12738 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12739 struct target_rlimit64
*target_rnew
, *target_rold
;
12740 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12741 int resource
= target_to_host_resource(arg2
);
12743 if (arg3
&& (resource
!= RLIMIT_AS
&&
12744 resource
!= RLIMIT_DATA
&&
12745 resource
!= RLIMIT_STACK
)) {
12746 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12747 return -TARGET_EFAULT
;
12749 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12750 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12751 unlock_user_struct(target_rnew
, arg3
, 0);
12755 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12756 if (!is_error(ret
) && arg4
) {
12757 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12758 return -TARGET_EFAULT
;
12760 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12761 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12762 unlock_user_struct(target_rold
, arg4
, 1);
12767 #ifdef TARGET_NR_gethostname
12768 case TARGET_NR_gethostname
:
12770 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12772 ret
= get_errno(gethostname(name
, arg2
));
12773 unlock_user(name
, arg1
, arg2
);
12775 ret
= -TARGET_EFAULT
;
12780 #ifdef TARGET_NR_atomic_cmpxchg_32
12781 case TARGET_NR_atomic_cmpxchg_32
:
12783 /* should use start_exclusive from main.c */
12784 abi_ulong mem_value
;
12785 if (get_user_u32(mem_value
, arg6
)) {
12786 target_siginfo_t info
;
12787 info
.si_signo
= SIGSEGV
;
12789 info
.si_code
= TARGET_SEGV_MAPERR
;
12790 info
._sifields
._sigfault
._addr
= arg6
;
12791 queue_signal(cpu_env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
12795 if (mem_value
== arg2
)
12796 put_user_u32(arg1
, arg6
);
12800 #ifdef TARGET_NR_atomic_barrier
12801 case TARGET_NR_atomic_barrier
:
12802 /* Like the kernel implementation and the
12803 qemu arm barrier, no-op this? */
12807 #ifdef TARGET_NR_timer_create
12808 case TARGET_NR_timer_create
:
12810 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12812 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12815 int timer_index
= next_free_host_timer();
12817 if (timer_index
< 0) {
12818 ret
= -TARGET_EAGAIN
;
12820 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12823 phost_sevp
= &host_sevp
;
12824 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12830 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12834 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12835 return -TARGET_EFAULT
;
12843 #ifdef TARGET_NR_timer_settime
12844 case TARGET_NR_timer_settime
:
12846 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12847 * struct itimerspec * old_value */
12848 target_timer_t timerid
= get_timer_id(arg1
);
12852 } else if (arg3
== 0) {
12853 ret
= -TARGET_EINVAL
;
12855 timer_t htimer
= g_posix_timers
[timerid
];
12856 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12858 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12859 return -TARGET_EFAULT
;
12862 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12863 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12864 return -TARGET_EFAULT
;
12871 #ifdef TARGET_NR_timer_settime64
12872 case TARGET_NR_timer_settime64
:
12874 target_timer_t timerid
= get_timer_id(arg1
);
12878 } else if (arg3
== 0) {
12879 ret
= -TARGET_EINVAL
;
12881 timer_t htimer
= g_posix_timers
[timerid
];
12882 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12884 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12885 return -TARGET_EFAULT
;
12888 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12889 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12890 return -TARGET_EFAULT
;
12897 #ifdef TARGET_NR_timer_gettime
12898 case TARGET_NR_timer_gettime
:
12900 /* args: timer_t timerid, struct itimerspec *curr_value */
12901 target_timer_t timerid
= get_timer_id(arg1
);
12905 } else if (!arg2
) {
12906 ret
= -TARGET_EFAULT
;
12908 timer_t htimer
= g_posix_timers
[timerid
];
12909 struct itimerspec hspec
;
12910 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12912 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12913 ret
= -TARGET_EFAULT
;
12920 #ifdef TARGET_NR_timer_gettime64
12921 case TARGET_NR_timer_gettime64
:
12923 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12924 target_timer_t timerid
= get_timer_id(arg1
);
12928 } else if (!arg2
) {
12929 ret
= -TARGET_EFAULT
;
12931 timer_t htimer
= g_posix_timers
[timerid
];
12932 struct itimerspec hspec
;
12933 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12935 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
12936 ret
= -TARGET_EFAULT
;
12943 #ifdef TARGET_NR_timer_getoverrun
12944 case TARGET_NR_timer_getoverrun
:
12946 /* args: timer_t timerid */
12947 target_timer_t timerid
= get_timer_id(arg1
);
12952 timer_t htimer
= g_posix_timers
[timerid
];
12953 ret
= get_errno(timer_getoverrun(htimer
));
12959 #ifdef TARGET_NR_timer_delete
12960 case TARGET_NR_timer_delete
:
12962 /* args: timer_t timerid */
12963 target_timer_t timerid
= get_timer_id(arg1
);
12968 timer_t htimer
= g_posix_timers
[timerid
];
12969 ret
= get_errno(timer_delete(htimer
));
12970 g_posix_timers
[timerid
] = 0;
12976 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12977 case TARGET_NR_timerfd_create
:
12978 return get_errno(timerfd_create(arg1
,
12979 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12982 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12983 case TARGET_NR_timerfd_gettime
:
12985 struct itimerspec its_curr
;
12987 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12989 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12990 return -TARGET_EFAULT
;
12996 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12997 case TARGET_NR_timerfd_gettime64
:
12999 struct itimerspec its_curr
;
13001 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13003 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
13004 return -TARGET_EFAULT
;
13010 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13011 case TARGET_NR_timerfd_settime
:
13013 struct itimerspec its_new
, its_old
, *p_new
;
13016 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13017 return -TARGET_EFAULT
;
13024 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13026 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13027 return -TARGET_EFAULT
;
13033 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13034 case TARGET_NR_timerfd_settime64
:
13036 struct itimerspec its_new
, its_old
, *p_new
;
13039 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13040 return -TARGET_EFAULT
;
13047 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13049 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13050 return -TARGET_EFAULT
;
13056 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13057 case TARGET_NR_ioprio_get
:
13058 return get_errno(ioprio_get(arg1
, arg2
));
13061 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13062 case TARGET_NR_ioprio_set
:
13063 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13066 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13067 case TARGET_NR_setns
:
13068 return get_errno(setns(arg1
, arg2
));
13070 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13071 case TARGET_NR_unshare
:
13072 return get_errno(unshare(arg1
));
13074 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13075 case TARGET_NR_kcmp
:
13076 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13078 #ifdef TARGET_NR_swapcontext
13079 case TARGET_NR_swapcontext
:
13080 /* PowerPC specific. */
13081 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13083 #ifdef TARGET_NR_memfd_create
13084 case TARGET_NR_memfd_create
:
13085 p
= lock_user_string(arg1
);
13087 return -TARGET_EFAULT
;
13089 ret
= get_errno(memfd_create(p
, arg2
));
13090 fd_trans_unregister(ret
);
13091 unlock_user(p
, arg1
, 0);
13094 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13095 case TARGET_NR_membarrier
:
13096 return get_errno(membarrier(arg1
, arg2
));
13099 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13100 case TARGET_NR_copy_file_range
:
13102 loff_t inoff
, outoff
;
13103 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13106 if (get_user_u64(inoff
, arg2
)) {
13107 return -TARGET_EFAULT
;
13112 if (get_user_u64(outoff
, arg4
)) {
13113 return -TARGET_EFAULT
;
13117 /* Do not sign-extend the count parameter. */
13118 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13119 (abi_ulong
)arg5
, arg6
));
13120 if (!is_error(ret
) && ret
> 0) {
13122 if (put_user_u64(inoff
, arg2
)) {
13123 return -TARGET_EFAULT
;
13127 if (put_user_u64(outoff
, arg4
)) {
13128 return -TARGET_EFAULT
;
13136 #if defined(TARGET_NR_pivot_root)
13137 case TARGET_NR_pivot_root
:
13140 p
= lock_user_string(arg1
); /* new_root */
13141 p2
= lock_user_string(arg2
); /* put_old */
13143 ret
= -TARGET_EFAULT
;
13145 ret
= get_errno(pivot_root(p
, p2
));
13147 unlock_user(p2
, arg2
, 0);
13148 unlock_user(p
, arg1
, 0);
13154 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13155 return -TARGET_ENOSYS
;
13160 abi_long
do_syscall(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
13161 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13162 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13165 CPUState
*cpu
= env_cpu(cpu_env
);
13168 #ifdef DEBUG_ERESTARTSYS
13169 /* Debug-only code for exercising the syscall-restart code paths
13170 * in the per-architecture cpu main loops: restart every syscall
13171 * the guest makes once before letting it through.
13177 return -QEMU_ERESTARTSYS
;
13182 record_syscall_start(cpu
, num
, arg1
,
13183 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13185 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13186 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13189 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13190 arg5
, arg6
, arg7
, arg8
);
13192 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13193 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13194 arg3
, arg4
, arg5
, arg6
);
13197 record_syscall_return(cpu
, num
, ret
);