4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
66 #include <sys/timerfd.h>
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu/xattr.h"
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
80 #ifdef HAVE_SYS_KCOV_H
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
97 #include <linux/mtio.h>
99 #ifdef HAVE_SYS_MOUNT_FSCONFIG
101 * glibc >= 2.36 linux/mount.h conflicts with sys/mount.h,
102 * which in turn prevents use of linux/fs.h. So we have to
103 * define the constants ourselves for now.
105 #define FS_IOC_GETFLAGS _IOR('f', 1, long)
106 #define FS_IOC_SETFLAGS _IOW('f', 2, long)
107 #define FS_IOC_GETVERSION _IOR('v', 1, long)
108 #define FS_IOC_SETVERSION _IOW('v', 2, long)
109 #define FS_IOC_FIEMAP _IOWR('f', 11, struct fiemap)
110 #define FS_IOC32_GETFLAGS _IOR('f', 1, int)
111 #define FS_IOC32_SETFLAGS _IOW('f', 2, int)
112 #define FS_IOC32_GETVERSION _IOR('v', 1, int)
113 #define FS_IOC32_SETVERSION _IOW('v', 2, int)
115 #define BLKGETSIZE64 _IOR(0x12,114,size_t)
116 #define BLKDISCARD _IO(0x12,119)
117 #define BLKIOMIN _IO(0x12,120)
118 #define BLKIOOPT _IO(0x12,121)
119 #define BLKALIGNOFF _IO(0x12,122)
120 #define BLKPBSZGET _IO(0x12,123)
121 #define BLKDISCARDZEROES _IO(0x12,124)
122 #define BLKSECDISCARD _IO(0x12,125)
123 #define BLKROTATIONAL _IO(0x12,126)
124 #define BLKZEROOUT _IO(0x12,127)
126 #define FIBMAP _IO(0x00,1)
127 #define FIGETBSZ _IO(0x00,2)
129 struct file_clone_range
{
136 #define FICLONE _IOW(0x94, 9, int)
137 #define FICLONERANGE _IOW(0x94, 13, struct file_clone_range)
140 #include <linux/fs.h>
142 #include <linux/fd.h>
143 #if defined(CONFIG_FIEMAP)
144 #include <linux/fiemap.h>
146 #include <linux/fb.h>
147 #if defined(CONFIG_USBFS)
148 #include <linux/usbdevice_fs.h>
149 #include <linux/usb/ch9.h>
151 #include <linux/vt.h>
152 #include <linux/dm-ioctl.h>
153 #include <linux/reboot.h>
154 #include <linux/route.h>
155 #include <linux/filter.h>
156 #include <linux/blkpg.h>
157 #include <netpacket/packet.h>
158 #include <linux/netlink.h>
159 #include <linux/if_alg.h>
160 #include <linux/rtc.h>
161 #include <sound/asound.h>
163 #include <linux/btrfs.h>
166 #include <libdrm/drm.h>
167 #include <libdrm/i915_drm.h>
169 #include "linux_loop.h"
173 #include "user-internals.h"
175 #include "signal-common.h"
177 #include "user-mmap.h"
178 #include "user/safe-syscall.h"
179 #include "qemu/guest-random.h"
180 #include "qemu/selfmap.h"
181 #include "user/syscall-trace.h"
182 #include "special-errno.h"
183 #include "qapi/error.h"
184 #include "fd-trans.h"
186 #include "cpu_loop-common.h"
189 #define CLONE_IO 0x80000000 /* Clone io context */
192 /* We can't directly call the host clone syscall, because this will
193 * badly confuse libc (breaking mutexes, for example). So we must
194 * divide clone flags into:
195 * * flag combinations that look like pthread_create()
196 * * flag combinations that look like fork()
197 * * flags we can implement within QEMU itself
198 * * flags we can't support and will return an error for
200 /* For thread creation, all these flags must be present; for
201 * fork, none must be present.
203 #define CLONE_THREAD_FLAGS \
204 (CLONE_VM | CLONE_FS | CLONE_FILES | \
205 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
207 /* These flags are ignored:
208 * CLONE_DETACHED is now ignored by the kernel;
209 * CLONE_IO is just an optimisation hint to the I/O scheduler
211 #define CLONE_IGNORED_FLAGS \
212 (CLONE_DETACHED | CLONE_IO)
214 /* Flags for fork which we can implement within QEMU itself */
215 #define CLONE_OPTIONAL_FORK_FLAGS \
216 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
217 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
219 /* Flags for thread creation which we can implement within QEMU itself */
220 #define CLONE_OPTIONAL_THREAD_FLAGS \
221 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
222 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
224 #define CLONE_INVALID_FORK_FLAGS \
225 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
227 #define CLONE_INVALID_THREAD_FLAGS \
228 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
229 CLONE_IGNORED_FLAGS))
231 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
232 * have almost all been allocated. We cannot support any of
233 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
234 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
235 * The checks against the invalid thread masks above will catch these.
236 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
239 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
240 * once. This exercises the codepaths for restart.
242 //#define DEBUG_ERESTARTSYS
244 //#include <linux/msdos_fs.h>
245 #define VFAT_IOCTL_READDIR_BOTH \
246 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
247 #define VFAT_IOCTL_READDIR_SHORT \
248 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
258 #define _syscall0(type,name) \
259 static type name (void) \
261 return syscall(__NR_##name); \
264 #define _syscall1(type,name,type1,arg1) \
265 static type name (type1 arg1) \
267 return syscall(__NR_##name, arg1); \
270 #define _syscall2(type,name,type1,arg1,type2,arg2) \
271 static type name (type1 arg1,type2 arg2) \
273 return syscall(__NR_##name, arg1, arg2); \
276 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
277 static type name (type1 arg1,type2 arg2,type3 arg3) \
279 return syscall(__NR_##name, arg1, arg2, arg3); \
282 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
283 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
285 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
288 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
290 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
292 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
296 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
297 type5,arg5,type6,arg6) \
298 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
301 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
305 #define __NR_sys_uname __NR_uname
306 #define __NR_sys_getcwd1 __NR_getcwd
307 #define __NR_sys_getdents __NR_getdents
308 #define __NR_sys_getdents64 __NR_getdents64
309 #define __NR_sys_getpriority __NR_getpriority
310 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
311 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
312 #define __NR_sys_syslog __NR_syslog
313 #if defined(__NR_futex)
314 # define __NR_sys_futex __NR_futex
316 #if defined(__NR_futex_time64)
317 # define __NR_sys_futex_time64 __NR_futex_time64
319 #define __NR_sys_statx __NR_statx
321 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
322 #define __NR__llseek __NR_lseek
325 /* Newer kernel ports have llseek() instead of _llseek() */
326 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
327 #define TARGET_NR__llseek TARGET_NR_llseek
330 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
331 #ifndef TARGET_O_NONBLOCK_MASK
332 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
335 #define __NR_sys_gettid __NR_gettid
336 _syscall0(int, sys_gettid
)
338 /* For the 64-bit guest on 32-bit host case we must emulate
339 * getdents using getdents64, because otherwise the host
340 * might hand us back more dirent records than we can fit
341 * into the guest buffer after structure format conversion.
342 * Otherwise we emulate getdents with getdents if the host has it.
344 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
345 #define EMULATE_GETDENTS_WITH_GETDENTS
348 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
349 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
351 #if (defined(TARGET_NR_getdents) && \
352 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
353 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
354 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
356 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
357 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
358 loff_t
*, res
, uint
, wh
);
360 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
361 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
363 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
364 #ifdef __NR_exit_group
365 _syscall1(int,exit_group
,int,error_code
)
367 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
368 #define __NR_sys_close_range __NR_close_range
369 _syscall3(int,sys_close_range
,int,first
,int,last
,int,flags
)
370 #ifndef CLOSE_RANGE_CLOEXEC
371 #define CLOSE_RANGE_CLOEXEC (1U << 2)
374 #if defined(__NR_futex)
375 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
376 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
378 #if defined(__NR_futex_time64)
379 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
380 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
382 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
383 _syscall2(int, pidfd_open
, pid_t
, pid
, unsigned int, flags
);
385 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
386 _syscall4(int, pidfd_send_signal
, int, pidfd
, int, sig
, siginfo_t
*, info
,
387 unsigned int, flags
);
389 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
390 _syscall3(int, pidfd_getfd
, int, pidfd
, int, targetfd
, unsigned int, flags
);
392 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
393 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
394 unsigned long *, user_mask_ptr
);
395 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
396 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
397 unsigned long *, user_mask_ptr
);
398 /* sched_attr is not defined in glibc */
401 uint32_t sched_policy
;
402 uint64_t sched_flags
;
404 uint32_t sched_priority
;
405 uint64_t sched_runtime
;
406 uint64_t sched_deadline
;
407 uint64_t sched_period
;
408 uint32_t sched_util_min
;
409 uint32_t sched_util_max
;
411 #define __NR_sys_sched_getattr __NR_sched_getattr
412 _syscall4(int, sys_sched_getattr
, pid_t
, pid
, struct sched_attr
*, attr
,
413 unsigned int, size
, unsigned int, flags
);
414 #define __NR_sys_sched_setattr __NR_sched_setattr
415 _syscall3(int, sys_sched_setattr
, pid_t
, pid
, struct sched_attr
*, attr
,
416 unsigned int, flags
);
417 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
418 _syscall1(int, sys_sched_getscheduler
, pid_t
, pid
);
419 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
420 _syscall3(int, sys_sched_setscheduler
, pid_t
, pid
, int, policy
,
421 const struct sched_param
*, param
);
422 #define __NR_sys_sched_getparam __NR_sched_getparam
423 _syscall2(int, sys_sched_getparam
, pid_t
, pid
,
424 struct sched_param
*, param
);
425 #define __NR_sys_sched_setparam __NR_sched_setparam
426 _syscall2(int, sys_sched_setparam
, pid_t
, pid
,
427 const struct sched_param
*, param
);
428 #define __NR_sys_getcpu __NR_getcpu
429 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
430 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
432 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
433 struct __user_cap_data_struct
*, data
);
434 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
435 struct __user_cap_data_struct
*, data
);
436 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
437 _syscall2(int, ioprio_get
, int, which
, int, who
)
439 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
440 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
442 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
443 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
446 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
447 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
448 unsigned long, idx1
, unsigned long, idx2
)
452 * It is assumed that struct statx is architecture independent.
454 #if defined(TARGET_NR_statx) && defined(__NR_statx)
455 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
456 unsigned int, mask
, struct target_statx
*, statxbuf
)
458 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
459 _syscall2(int, membarrier
, int, cmd
, int, flags
)
462 static const bitmask_transtbl fcntl_flags_tbl
[] = {
463 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
464 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
465 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
466 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
467 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
468 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
469 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
470 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
471 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
472 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
473 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
474 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
475 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
476 #if defined(O_DIRECT)
477 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
479 #if defined(O_NOATIME)
480 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
482 #if defined(O_CLOEXEC)
483 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
486 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
488 #if defined(O_TMPFILE)
489 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
491 /* Don't terminate the list prematurely on 64-bit host+guest. */
492 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
493 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
498 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
500 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
501 #if defined(__NR_utimensat)
502 #define __NR_sys_utimensat __NR_utimensat
503 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
504 const struct timespec
*,tsp
,int,flags
)
506 static int sys_utimensat(int dirfd
, const char *pathname
,
507 const struct timespec times
[2], int flags
)
513 #endif /* TARGET_NR_utimensat */
515 #ifdef TARGET_NR_renameat2
516 #if defined(__NR_renameat2)
517 #define __NR_sys_renameat2 __NR_renameat2
518 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
519 const char *, new, unsigned int, flags
)
521 static int sys_renameat2(int oldfd
, const char *old
,
522 int newfd
, const char *new, int flags
)
525 return renameat(oldfd
, old
, newfd
, new);
531 #endif /* TARGET_NR_renameat2 */
533 #ifdef CONFIG_INOTIFY
534 #include <sys/inotify.h>
536 /* Userspace can usually survive runtime without inotify */
537 #undef TARGET_NR_inotify_init
538 #undef TARGET_NR_inotify_init1
539 #undef TARGET_NR_inotify_add_watch
540 #undef TARGET_NR_inotify_rm_watch
541 #endif /* CONFIG_INOTIFY */
543 #if defined(TARGET_NR_prlimit64)
544 #ifndef __NR_prlimit64
545 # define __NR_prlimit64 -1
547 #define __NR_sys_prlimit64 __NR_prlimit64
548 /* The glibc rlimit structure may not be that used by the underlying syscall */
549 struct host_rlimit64
{
553 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
554 const struct host_rlimit64
*, new_limit
,
555 struct host_rlimit64
*, old_limit
)
559 #if defined(TARGET_NR_timer_create)
560 /* Maximum of 32 active POSIX timers allowed at any one time. */
561 #define GUEST_TIMER_MAX 32
562 static timer_t g_posix_timers
[GUEST_TIMER_MAX
];
563 static int g_posix_timer_allocated
[GUEST_TIMER_MAX
];
565 static inline int next_free_host_timer(void)
568 for (k
= 0; k
< ARRAY_SIZE(g_posix_timer_allocated
); k
++) {
569 if (qatomic_xchg(g_posix_timer_allocated
+ k
, 1) == 0) {
576 static inline void free_host_timer_slot(int id
)
578 qatomic_store_release(g_posix_timer_allocated
+ id
, 0);
582 static inline int host_to_target_errno(int host_errno
)
584 switch (host_errno
) {
585 #define E(X) case X: return TARGET_##X;
586 #include "errnos.c.inc"
593 static inline int target_to_host_errno(int target_errno
)
595 switch (target_errno
) {
596 #define E(X) case TARGET_##X: return X;
597 #include "errnos.c.inc"
604 abi_long
get_errno(abi_long ret
)
607 return -host_to_target_errno(errno
);
612 const char *target_strerror(int err
)
614 if (err
== QEMU_ERESTARTSYS
) {
615 return "To be restarted";
617 if (err
== QEMU_ESIGRETURN
) {
618 return "Successful exit from sigreturn";
621 return strerror(target_to_host_errno(err
));
624 static int check_zeroed_user(abi_long addr
, size_t ksize
, size_t usize
)
628 if (usize
<= ksize
) {
631 for (i
= ksize
; i
< usize
; i
++) {
632 if (get_user_u8(b
, addr
+ i
)) {
633 return -TARGET_EFAULT
;
642 #define safe_syscall0(type, name) \
643 static type safe_##name(void) \
645 return safe_syscall(__NR_##name); \
648 #define safe_syscall1(type, name, type1, arg1) \
649 static type safe_##name(type1 arg1) \
651 return safe_syscall(__NR_##name, arg1); \
654 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
655 static type safe_##name(type1 arg1, type2 arg2) \
657 return safe_syscall(__NR_##name, arg1, arg2); \
660 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
661 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
663 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
666 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
668 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
670 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
673 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
674 type4, arg4, type5, arg5) \
675 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
678 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
681 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
682 type4, arg4, type5, arg5, type6, arg6) \
683 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
684 type5 arg5, type6 arg6) \
686 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
689 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
690 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
691 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
692 int, flags
, mode_t
, mode
)
693 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
694 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
695 struct rusage
*, rusage
)
697 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
698 int, options
, struct rusage
*, rusage
)
699 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
700 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
701 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
702 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
703 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
705 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
706 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
707 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
710 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
711 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
713 #if defined(__NR_futex)
714 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
715 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
717 #if defined(__NR_futex_time64)
718 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
719 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
721 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
722 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
723 safe_syscall2(int, tkill
, int, tid
, int, sig
)
724 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
725 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
726 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
727 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
728 unsigned long, pos_l
, unsigned long, pos_h
)
729 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
730 unsigned long, pos_l
, unsigned long, pos_h
)
731 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
733 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
734 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
735 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
736 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
737 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
738 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
739 safe_syscall2(int, flock
, int, fd
, int, operation
)
740 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
741 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
742 const struct timespec
*, uts
, size_t, sigsetsize
)
744 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
746 #if defined(TARGET_NR_nanosleep)
747 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
748 struct timespec
*, rem
)
750 #if defined(TARGET_NR_clock_nanosleep) || \
751 defined(TARGET_NR_clock_nanosleep_time64)
752 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
753 const struct timespec
*, req
, struct timespec
*, rem
)
757 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
760 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
761 void *, ptr
, long, fifth
)
765 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
769 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
770 long, msgtype
, int, flags
)
772 #ifdef __NR_semtimedop
773 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
774 unsigned, nsops
, const struct timespec
*, timeout
)
776 #if defined(TARGET_NR_mq_timedsend) || \
777 defined(TARGET_NR_mq_timedsend_time64)
778 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
779 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
781 #if defined(TARGET_NR_mq_timedreceive) || \
782 defined(TARGET_NR_mq_timedreceive_time64)
783 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
784 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
786 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
787 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
788 int, outfd
, loff_t
*, poutoff
, size_t, length
,
792 /* We do ioctl like this rather than via safe_syscall3 to preserve the
793 * "third argument might be integer or pointer or not present" behaviour of
796 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
797 /* Similarly for fcntl. Note that callers must always:
798 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
799 * use the flock64 struct rather than unsuffixed flock
800 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
803 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
805 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
808 static inline int host_to_target_sock_type(int host_type
)
812 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
814 target_type
= TARGET_SOCK_DGRAM
;
817 target_type
= TARGET_SOCK_STREAM
;
820 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
824 #if defined(SOCK_CLOEXEC)
825 if (host_type
& SOCK_CLOEXEC
) {
826 target_type
|= TARGET_SOCK_CLOEXEC
;
830 #if defined(SOCK_NONBLOCK)
831 if (host_type
& SOCK_NONBLOCK
) {
832 target_type
|= TARGET_SOCK_NONBLOCK
;
839 static abi_ulong target_brk
;
840 static abi_ulong target_original_brk
;
841 static abi_ulong brk_page
;
843 void target_set_brk(abi_ulong new_brk
)
845 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
846 brk_page
= HOST_PAGE_ALIGN(target_brk
);
849 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
850 #define DEBUGF_BRK(message, args...)
852 /* do_brk() must return target values and target errnos. */
853 abi_long
do_brk(abi_ulong new_brk
)
855 abi_long mapped_addr
;
856 abi_ulong new_alloc_size
;
858 /* brk pointers are always untagged */
860 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
863 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
866 if (new_brk
< target_original_brk
) {
867 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
872 /* If the new brk is less than the highest page reserved to the
873 * target heap allocation, set it and we're almost done... */
874 if (new_brk
<= brk_page
) {
875 /* Heap contents are initialized to zero, as for anonymous
877 if (new_brk
> target_brk
) {
878 memset(g2h_untagged(target_brk
), 0, new_brk
- target_brk
);
880 target_brk
= new_brk
;
881 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
885 /* We need to allocate more memory after the brk... Note that
886 * we don't use MAP_FIXED because that will map over the top of
887 * any existing mapping (like the one with the host libc or qemu
888 * itself); instead we treat "mapped but at wrong address" as
889 * a failure and unmap again.
891 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
892 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
893 PROT_READ
|PROT_WRITE
,
894 MAP_ANON
|MAP_PRIVATE
, 0, 0));
896 if (mapped_addr
== brk_page
) {
897 /* Heap contents are initialized to zero, as for anonymous
898 * mapped pages. Technically the new pages are already
899 * initialized to zero since they *are* anonymous mapped
900 * pages, however we have to take care with the contents that
901 * come from the remaining part of the previous page: it may
902 * contains garbage data due to a previous heap usage (grown
904 memset(g2h_untagged(target_brk
), 0, brk_page
- target_brk
);
906 target_brk
= new_brk
;
907 brk_page
= HOST_PAGE_ALIGN(target_brk
);
908 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
911 } else if (mapped_addr
!= -1) {
912 /* Mapped but at wrong address, meaning there wasn't actually
913 * enough space for this brk.
915 target_munmap(mapped_addr
, new_alloc_size
);
917 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
920 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
923 #if defined(TARGET_ALPHA)
924 /* We (partially) emulate OSF/1 on Alpha, which requires we
925 return a proper errno, not an unchanged brk value. */
926 return -TARGET_ENOMEM
;
928 /* For everything else, return the previous break. */
932 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
933 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
934 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
935 abi_ulong target_fds_addr
,
939 abi_ulong b
, *target_fds
;
941 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
942 if (!(target_fds
= lock_user(VERIFY_READ
,
944 sizeof(abi_ulong
) * nw
,
946 return -TARGET_EFAULT
;
950 for (i
= 0; i
< nw
; i
++) {
951 /* grab the abi_ulong */
952 __get_user(b
, &target_fds
[i
]);
953 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
954 /* check the bit inside the abi_ulong */
961 unlock_user(target_fds
, target_fds_addr
, 0);
966 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
967 abi_ulong target_fds_addr
,
970 if (target_fds_addr
) {
971 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
972 return -TARGET_EFAULT
;
980 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
986 abi_ulong
*target_fds
;
988 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
989 if (!(target_fds
= lock_user(VERIFY_WRITE
,
991 sizeof(abi_ulong
) * nw
,
993 return -TARGET_EFAULT
;
996 for (i
= 0; i
< nw
; i
++) {
998 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
999 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1002 __put_user(v
, &target_fds
[i
]);
1005 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1011 #if defined(__alpha__)
1012 #define HOST_HZ 1024
1017 static inline abi_long
host_to_target_clock_t(long ticks
)
1019 #if HOST_HZ == TARGET_HZ
1022 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1026 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1027 const struct rusage
*rusage
)
1029 struct target_rusage
*target_rusage
;
1031 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1032 return -TARGET_EFAULT
;
1033 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1034 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1035 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1036 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1037 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1038 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1039 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1040 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1041 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1042 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1043 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1044 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1045 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1046 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1047 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1048 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1049 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1050 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1051 unlock_user_struct(target_rusage
, target_addr
, 1);
1056 #ifdef TARGET_NR_setrlimit
1057 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1059 abi_ulong target_rlim_swap
;
1062 target_rlim_swap
= tswapal(target_rlim
);
1063 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1064 return RLIM_INFINITY
;
1066 result
= target_rlim_swap
;
1067 if (target_rlim_swap
!= (rlim_t
)result
)
1068 return RLIM_INFINITY
;
1074 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1075 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1077 abi_ulong target_rlim_swap
;
1080 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1081 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1083 target_rlim_swap
= rlim
;
1084 result
= tswapal(target_rlim_swap
);
1090 static inline int target_to_host_resource(int code
)
1093 case TARGET_RLIMIT_AS
:
1095 case TARGET_RLIMIT_CORE
:
1097 case TARGET_RLIMIT_CPU
:
1099 case TARGET_RLIMIT_DATA
:
1101 case TARGET_RLIMIT_FSIZE
:
1102 return RLIMIT_FSIZE
;
1103 case TARGET_RLIMIT_LOCKS
:
1104 return RLIMIT_LOCKS
;
1105 case TARGET_RLIMIT_MEMLOCK
:
1106 return RLIMIT_MEMLOCK
;
1107 case TARGET_RLIMIT_MSGQUEUE
:
1108 return RLIMIT_MSGQUEUE
;
1109 case TARGET_RLIMIT_NICE
:
1111 case TARGET_RLIMIT_NOFILE
:
1112 return RLIMIT_NOFILE
;
1113 case TARGET_RLIMIT_NPROC
:
1114 return RLIMIT_NPROC
;
1115 case TARGET_RLIMIT_RSS
:
1117 case TARGET_RLIMIT_RTPRIO
:
1118 return RLIMIT_RTPRIO
;
1119 #ifdef RLIMIT_RTTIME
1120 case TARGET_RLIMIT_RTTIME
:
1121 return RLIMIT_RTTIME
;
1123 case TARGET_RLIMIT_SIGPENDING
:
1124 return RLIMIT_SIGPENDING
;
1125 case TARGET_RLIMIT_STACK
:
1126 return RLIMIT_STACK
;
1132 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1133 abi_ulong target_tv_addr
)
1135 struct target_timeval
*target_tv
;
1137 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1138 return -TARGET_EFAULT
;
1141 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1142 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1144 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1149 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1150 const struct timeval
*tv
)
1152 struct target_timeval
*target_tv
;
1154 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1155 return -TARGET_EFAULT
;
1158 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1159 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1161 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1166 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1167 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1168 abi_ulong target_tv_addr
)
1170 struct target__kernel_sock_timeval
*target_tv
;
1172 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1173 return -TARGET_EFAULT
;
1176 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1177 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1179 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1185 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1186 const struct timeval
*tv
)
1188 struct target__kernel_sock_timeval
*target_tv
;
1190 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1191 return -TARGET_EFAULT
;
1194 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1195 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1197 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1202 #if defined(TARGET_NR_futex) || \
1203 defined(TARGET_NR_rt_sigtimedwait) || \
1204 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1205 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1206 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1207 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1208 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1209 defined(TARGET_NR_timer_settime) || \
1210 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1211 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1212 abi_ulong target_addr
)
1214 struct target_timespec
*target_ts
;
1216 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1217 return -TARGET_EFAULT
;
1219 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1220 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1221 unlock_user_struct(target_ts
, target_addr
, 0);
1226 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1227 defined(TARGET_NR_timer_settime64) || \
1228 defined(TARGET_NR_mq_timedsend_time64) || \
1229 defined(TARGET_NR_mq_timedreceive_time64) || \
1230 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1231 defined(TARGET_NR_clock_nanosleep_time64) || \
1232 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1233 defined(TARGET_NR_utimensat) || \
1234 defined(TARGET_NR_utimensat_time64) || \
1235 defined(TARGET_NR_semtimedop_time64) || \
1236 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1237 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1238 abi_ulong target_addr
)
1240 struct target__kernel_timespec
*target_ts
;
1242 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1243 return -TARGET_EFAULT
;
1245 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1246 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1247 /* in 32bit mode, this drops the padding */
1248 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1249 unlock_user_struct(target_ts
, target_addr
, 0);
1254 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1255 struct timespec
*host_ts
)
1257 struct target_timespec
*target_ts
;
1259 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1260 return -TARGET_EFAULT
;
1262 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1263 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1264 unlock_user_struct(target_ts
, target_addr
, 1);
1268 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1269 struct timespec
*host_ts
)
1271 struct target__kernel_timespec
*target_ts
;
1273 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1274 return -TARGET_EFAULT
;
1276 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1277 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1278 unlock_user_struct(target_ts
, target_addr
, 1);
1282 #if defined(TARGET_NR_gettimeofday)
1283 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1284 struct timezone
*tz
)
1286 struct target_timezone
*target_tz
;
1288 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1289 return -TARGET_EFAULT
;
1292 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1293 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1295 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1301 #if defined(TARGET_NR_settimeofday)
1302 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1303 abi_ulong target_tz_addr
)
1305 struct target_timezone
*target_tz
;
1307 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1308 return -TARGET_EFAULT
;
1311 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1312 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1314 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1320 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1323 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1324 abi_ulong target_mq_attr_addr
)
1326 struct target_mq_attr
*target_mq_attr
;
1328 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1329 target_mq_attr_addr
, 1))
1330 return -TARGET_EFAULT
;
1332 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1333 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1334 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1335 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1337 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1342 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1343 const struct mq_attr
*attr
)
1345 struct target_mq_attr
*target_mq_attr
;
1347 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1348 target_mq_attr_addr
, 0))
1349 return -TARGET_EFAULT
;
1351 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1352 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1353 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1354 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1356 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1362 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1363 /* do_select() must return target values and target errnos. */
1364 static abi_long
do_select(int n
,
1365 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1366 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1368 fd_set rfds
, wfds
, efds
;
1369 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1371 struct timespec ts
, *ts_ptr
;
1374 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1378 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1382 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1387 if (target_tv_addr
) {
1388 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1389 return -TARGET_EFAULT
;
1390 ts
.tv_sec
= tv
.tv_sec
;
1391 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1397 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1400 if (!is_error(ret
)) {
1401 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1402 return -TARGET_EFAULT
;
1403 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1404 return -TARGET_EFAULT
;
1405 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1406 return -TARGET_EFAULT
;
1408 if (target_tv_addr
) {
1409 tv
.tv_sec
= ts
.tv_sec
;
1410 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1411 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1412 return -TARGET_EFAULT
;
1420 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1421 static abi_long
do_old_select(abi_ulong arg1
)
1423 struct target_sel_arg_struct
*sel
;
1424 abi_ulong inp
, outp
, exp
, tvp
;
1427 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1428 return -TARGET_EFAULT
;
1431 nsel
= tswapal(sel
->n
);
1432 inp
= tswapal(sel
->inp
);
1433 outp
= tswapal(sel
->outp
);
1434 exp
= tswapal(sel
->exp
);
1435 tvp
= tswapal(sel
->tvp
);
1437 unlock_user_struct(sel
, arg1
, 0);
1439 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1444 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1445 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1446 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1449 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1450 fd_set rfds
, wfds
, efds
;
1451 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1452 struct timespec ts
, *ts_ptr
;
1456 * The 6th arg is actually two args smashed together,
1457 * so we cannot use the C library.
1464 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1472 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1476 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1480 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1486 * This takes a timespec, and not a timeval, so we cannot
1487 * use the do_select() helper ...
1491 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1492 return -TARGET_EFAULT
;
1495 if (target_to_host_timespec(&ts
, ts_addr
)) {
1496 return -TARGET_EFAULT
;
1504 /* Extract the two packed args for the sigset */
1507 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1509 return -TARGET_EFAULT
;
1511 arg_sigset
= tswapal(arg7
[0]);
1512 arg_sigsize
= tswapal(arg7
[1]);
1513 unlock_user(arg7
, arg6
, 0);
1516 ret
= process_sigsuspend_mask(&sig
.set
, arg_sigset
, arg_sigsize
);
1521 sig
.size
= SIGSET_T_SIZE
;
1525 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1529 finish_sigsuspend_mask(ret
);
1532 if (!is_error(ret
)) {
1533 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1534 return -TARGET_EFAULT
;
1536 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1537 return -TARGET_EFAULT
;
1539 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1540 return -TARGET_EFAULT
;
1543 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1544 return -TARGET_EFAULT
;
1547 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1548 return -TARGET_EFAULT
;
1556 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1557 defined(TARGET_NR_ppoll_time64)
1558 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1559 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1561 struct target_pollfd
*target_pfd
;
1562 unsigned int nfds
= arg2
;
1570 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1571 return -TARGET_EINVAL
;
1573 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1574 sizeof(struct target_pollfd
) * nfds
, 1);
1576 return -TARGET_EFAULT
;
1579 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1580 for (i
= 0; i
< nfds
; i
++) {
1581 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1582 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1586 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1587 sigset_t
*set
= NULL
;
1591 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1592 unlock_user(target_pfd
, arg1
, 0);
1593 return -TARGET_EFAULT
;
1596 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1597 unlock_user(target_pfd
, arg1
, 0);
1598 return -TARGET_EFAULT
;
1606 ret
= process_sigsuspend_mask(&set
, arg4
, arg5
);
1608 unlock_user(target_pfd
, arg1
, 0);
1613 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1614 set
, SIGSET_T_SIZE
));
1617 finish_sigsuspend_mask(ret
);
1619 if (!is_error(ret
) && arg3
) {
1621 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1622 return -TARGET_EFAULT
;
1625 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1626 return -TARGET_EFAULT
;
1631 struct timespec ts
, *pts
;
1634 /* Convert ms to secs, ns */
1635 ts
.tv_sec
= arg3
/ 1000;
1636 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1639 /* -ve poll() timeout means "infinite" */
1642 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1645 if (!is_error(ret
)) {
1646 for (i
= 0; i
< nfds
; i
++) {
1647 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1650 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1655 static abi_long
do_pipe(CPUArchState
*cpu_env
, abi_ulong pipedes
,
1656 int flags
, int is_pipe2
)
1660 ret
= pipe2(host_pipe
, flags
);
1663 return get_errno(ret
);
1665 /* Several targets have special calling conventions for the original
1666 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1668 #if defined(TARGET_ALPHA)
1669 cpu_env
->ir
[IR_A4
] = host_pipe
[1];
1670 return host_pipe
[0];
1671 #elif defined(TARGET_MIPS)
1672 cpu_env
->active_tc
.gpr
[3] = host_pipe
[1];
1673 return host_pipe
[0];
1674 #elif defined(TARGET_SH4)
1675 cpu_env
->gregs
[1] = host_pipe
[1];
1676 return host_pipe
[0];
1677 #elif defined(TARGET_SPARC)
1678 cpu_env
->regwptr
[1] = host_pipe
[1];
1679 return host_pipe
[0];
1683 if (put_user_s32(host_pipe
[0], pipedes
)
1684 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(abi_int
)))
1685 return -TARGET_EFAULT
;
1686 return get_errno(ret
);
1689 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1690 abi_ulong target_addr
,
1693 struct target_ip_mreqn
*target_smreqn
;
1695 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1697 return -TARGET_EFAULT
;
1698 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1699 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1700 if (len
== sizeof(struct target_ip_mreqn
))
1701 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1702 unlock_user(target_smreqn
, target_addr
, 0);
1707 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1708 abi_ulong target_addr
,
1711 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1712 sa_family_t sa_family
;
1713 struct target_sockaddr
*target_saddr
;
1715 if (fd_trans_target_to_host_addr(fd
)) {
1716 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1719 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1721 return -TARGET_EFAULT
;
1723 sa_family
= tswap16(target_saddr
->sa_family
);
1725 /* Oops. The caller might send a incomplete sun_path; sun_path
1726 * must be terminated by \0 (see the manual page), but
1727 * unfortunately it is quite common to specify sockaddr_un
1728 * length as "strlen(x->sun_path)" while it should be
1729 * "strlen(...) + 1". We'll fix that here if needed.
1730 * Linux kernel has a similar feature.
1733 if (sa_family
== AF_UNIX
) {
1734 if (len
< unix_maxlen
&& len
> 0) {
1735 char *cp
= (char*)target_saddr
;
1737 if ( cp
[len
-1] && !cp
[len
] )
1740 if (len
> unix_maxlen
)
1744 memcpy(addr
, target_saddr
, len
);
1745 addr
->sa_family
= sa_family
;
1746 if (sa_family
== AF_NETLINK
) {
1747 struct sockaddr_nl
*nladdr
;
1749 nladdr
= (struct sockaddr_nl
*)addr
;
1750 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1751 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1752 } else if (sa_family
== AF_PACKET
) {
1753 struct target_sockaddr_ll
*lladdr
;
1755 lladdr
= (struct target_sockaddr_ll
*)addr
;
1756 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1757 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1759 unlock_user(target_saddr
, target_addr
, 0);
1764 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1765 struct sockaddr
*addr
,
1768 struct target_sockaddr
*target_saddr
;
1775 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1777 return -TARGET_EFAULT
;
1778 memcpy(target_saddr
, addr
, len
);
1779 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1780 sizeof(target_saddr
->sa_family
)) {
1781 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1783 if (addr
->sa_family
== AF_NETLINK
&&
1784 len
>= sizeof(struct target_sockaddr_nl
)) {
1785 struct target_sockaddr_nl
*target_nl
=
1786 (struct target_sockaddr_nl
*)target_saddr
;
1787 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1788 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1789 } else if (addr
->sa_family
== AF_PACKET
) {
1790 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1791 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1792 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1793 } else if (addr
->sa_family
== AF_INET6
&&
1794 len
>= sizeof(struct target_sockaddr_in6
)) {
1795 struct target_sockaddr_in6
*target_in6
=
1796 (struct target_sockaddr_in6
*)target_saddr
;
1797 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1799 unlock_user(target_saddr
, target_addr
, len
);
1804 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1805 struct target_msghdr
*target_msgh
)
1807 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1808 abi_long msg_controllen
;
1809 abi_ulong target_cmsg_addr
;
1810 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1811 socklen_t space
= 0;
1813 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1814 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1816 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1817 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1818 target_cmsg_start
= target_cmsg
;
1820 return -TARGET_EFAULT
;
1822 while (cmsg
&& target_cmsg
) {
1823 void *data
= CMSG_DATA(cmsg
);
1824 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1826 int len
= tswapal(target_cmsg
->cmsg_len
)
1827 - sizeof(struct target_cmsghdr
);
1829 space
+= CMSG_SPACE(len
);
1830 if (space
> msgh
->msg_controllen
) {
1831 space
-= CMSG_SPACE(len
);
1832 /* This is a QEMU bug, since we allocated the payload
1833 * area ourselves (unlike overflow in host-to-target
1834 * conversion, which is just the guest giving us a buffer
1835 * that's too small). It can't happen for the payload types
1836 * we currently support; if it becomes an issue in future
1837 * we would need to improve our allocation strategy to
1838 * something more intelligent than "twice the size of the
1839 * target buffer we're reading from".
1841 qemu_log_mask(LOG_UNIMP
,
1842 ("Unsupported ancillary data %d/%d: "
1843 "unhandled msg size\n"),
1844 tswap32(target_cmsg
->cmsg_level
),
1845 tswap32(target_cmsg
->cmsg_type
));
1849 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1850 cmsg
->cmsg_level
= SOL_SOCKET
;
1852 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1854 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1855 cmsg
->cmsg_len
= CMSG_LEN(len
);
1857 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1858 int *fd
= (int *)data
;
1859 int *target_fd
= (int *)target_data
;
1860 int i
, numfds
= len
/ sizeof(int);
1862 for (i
= 0; i
< numfds
; i
++) {
1863 __get_user(fd
[i
], target_fd
+ i
);
1865 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1866 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1867 struct ucred
*cred
= (struct ucred
*)data
;
1868 struct target_ucred
*target_cred
=
1869 (struct target_ucred
*)target_data
;
1871 __get_user(cred
->pid
, &target_cred
->pid
);
1872 __get_user(cred
->uid
, &target_cred
->uid
);
1873 __get_user(cred
->gid
, &target_cred
->gid
);
1875 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1876 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1877 memcpy(data
, target_data
, len
);
1880 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1881 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1884 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1886 msgh
->msg_controllen
= space
;
1890 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1891 struct msghdr
*msgh
)
1893 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1894 abi_long msg_controllen
;
1895 abi_ulong target_cmsg_addr
;
1896 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1897 socklen_t space
= 0;
1899 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1900 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1902 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1903 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1904 target_cmsg_start
= target_cmsg
;
1906 return -TARGET_EFAULT
;
1908 while (cmsg
&& target_cmsg
) {
1909 void *data
= CMSG_DATA(cmsg
);
1910 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1912 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1913 int tgt_len
, tgt_space
;
1915 /* We never copy a half-header but may copy half-data;
1916 * this is Linux's behaviour in put_cmsg(). Note that
1917 * truncation here is a guest problem (which we report
1918 * to the guest via the CTRUNC bit), unlike truncation
1919 * in target_to_host_cmsg, which is a QEMU bug.
1921 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1922 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1926 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1927 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1929 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1931 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1933 /* Payload types which need a different size of payload on
1934 * the target must adjust tgt_len here.
1937 switch (cmsg
->cmsg_level
) {
1939 switch (cmsg
->cmsg_type
) {
1941 tgt_len
= sizeof(struct target_timeval
);
1951 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1952 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1953 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1956 /* We must now copy-and-convert len bytes of payload
1957 * into tgt_len bytes of destination space. Bear in mind
1958 * that in both source and destination we may be dealing
1959 * with a truncated value!
1961 switch (cmsg
->cmsg_level
) {
1963 switch (cmsg
->cmsg_type
) {
1966 int *fd
= (int *)data
;
1967 int *target_fd
= (int *)target_data
;
1968 int i
, numfds
= tgt_len
/ sizeof(int);
1970 for (i
= 0; i
< numfds
; i
++) {
1971 __put_user(fd
[i
], target_fd
+ i
);
1977 struct timeval
*tv
= (struct timeval
*)data
;
1978 struct target_timeval
*target_tv
=
1979 (struct target_timeval
*)target_data
;
1981 if (len
!= sizeof(struct timeval
) ||
1982 tgt_len
!= sizeof(struct target_timeval
)) {
1986 /* copy struct timeval to target */
1987 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1988 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1991 case SCM_CREDENTIALS
:
1993 struct ucred
*cred
= (struct ucred
*)data
;
1994 struct target_ucred
*target_cred
=
1995 (struct target_ucred
*)target_data
;
1997 __put_user(cred
->pid
, &target_cred
->pid
);
1998 __put_user(cred
->uid
, &target_cred
->uid
);
1999 __put_user(cred
->gid
, &target_cred
->gid
);
2008 switch (cmsg
->cmsg_type
) {
2011 uint32_t *v
= (uint32_t *)data
;
2012 uint32_t *t_int
= (uint32_t *)target_data
;
2014 if (len
!= sizeof(uint32_t) ||
2015 tgt_len
!= sizeof(uint32_t)) {
2018 __put_user(*v
, t_int
);
2024 struct sock_extended_err ee
;
2025 struct sockaddr_in offender
;
2027 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
2028 struct errhdr_t
*target_errh
=
2029 (struct errhdr_t
*)target_data
;
2031 if (len
!= sizeof(struct errhdr_t
) ||
2032 tgt_len
!= sizeof(struct errhdr_t
)) {
2035 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2036 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2037 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2038 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2039 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2040 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2041 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2042 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2043 (void *) &errh
->offender
, sizeof(errh
->offender
));
2052 switch (cmsg
->cmsg_type
) {
2055 uint32_t *v
= (uint32_t *)data
;
2056 uint32_t *t_int
= (uint32_t *)target_data
;
2058 if (len
!= sizeof(uint32_t) ||
2059 tgt_len
!= sizeof(uint32_t)) {
2062 __put_user(*v
, t_int
);
2068 struct sock_extended_err ee
;
2069 struct sockaddr_in6 offender
;
2071 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2072 struct errhdr6_t
*target_errh
=
2073 (struct errhdr6_t
*)target_data
;
2075 if (len
!= sizeof(struct errhdr6_t
) ||
2076 tgt_len
!= sizeof(struct errhdr6_t
)) {
2079 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2080 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2081 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2082 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2083 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2084 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2085 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2086 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2087 (void *) &errh
->offender
, sizeof(errh
->offender
));
2097 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2098 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2099 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2100 if (tgt_len
> len
) {
2101 memset(target_data
+ len
, 0, tgt_len
- len
);
2105 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2106 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2107 if (msg_controllen
< tgt_space
) {
2108 tgt_space
= msg_controllen
;
2110 msg_controllen
-= tgt_space
;
2112 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2113 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2116 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2118 target_msgh
->msg_controllen
= tswapal(space
);
2122 /* do_setsockopt() Must return target values and target errnos. */
2123 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2124 abi_ulong optval_addr
, socklen_t optlen
)
2128 struct ip_mreqn
*ip_mreq
;
2129 struct ip_mreq_source
*ip_mreq_source
;
2134 /* TCP and UDP options all take an 'int' value. */
2135 if (optlen
< sizeof(uint32_t))
2136 return -TARGET_EINVAL
;
2138 if (get_user_u32(val
, optval_addr
))
2139 return -TARGET_EFAULT
;
2140 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2147 case IP_ROUTER_ALERT
:
2151 case IP_MTU_DISCOVER
:
2158 case IP_MULTICAST_TTL
:
2159 case IP_MULTICAST_LOOP
:
2161 if (optlen
>= sizeof(uint32_t)) {
2162 if (get_user_u32(val
, optval_addr
))
2163 return -TARGET_EFAULT
;
2164 } else if (optlen
>= 1) {
2165 if (get_user_u8(val
, optval_addr
))
2166 return -TARGET_EFAULT
;
2168 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2170 case IP_ADD_MEMBERSHIP
:
2171 case IP_DROP_MEMBERSHIP
:
2172 if (optlen
< sizeof (struct target_ip_mreq
) ||
2173 optlen
> sizeof (struct target_ip_mreqn
))
2174 return -TARGET_EINVAL
;
2176 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2177 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2178 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2181 case IP_BLOCK_SOURCE
:
2182 case IP_UNBLOCK_SOURCE
:
2183 case IP_ADD_SOURCE_MEMBERSHIP
:
2184 case IP_DROP_SOURCE_MEMBERSHIP
:
2185 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2186 return -TARGET_EINVAL
;
2188 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2189 if (!ip_mreq_source
) {
2190 return -TARGET_EFAULT
;
2192 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2193 unlock_user (ip_mreq_source
, optval_addr
, 0);
2202 case IPV6_MTU_DISCOVER
:
2205 case IPV6_RECVPKTINFO
:
2206 case IPV6_UNICAST_HOPS
:
2207 case IPV6_MULTICAST_HOPS
:
2208 case IPV6_MULTICAST_LOOP
:
2210 case IPV6_RECVHOPLIMIT
:
2211 case IPV6_2292HOPLIMIT
:
2214 case IPV6_2292PKTINFO
:
2215 case IPV6_RECVTCLASS
:
2216 case IPV6_RECVRTHDR
:
2217 case IPV6_2292RTHDR
:
2218 case IPV6_RECVHOPOPTS
:
2219 case IPV6_2292HOPOPTS
:
2220 case IPV6_RECVDSTOPTS
:
2221 case IPV6_2292DSTOPTS
:
2223 case IPV6_ADDR_PREFERENCES
:
2224 #ifdef IPV6_RECVPATHMTU
2225 case IPV6_RECVPATHMTU
:
2227 #ifdef IPV6_TRANSPARENT
2228 case IPV6_TRANSPARENT
:
2230 #ifdef IPV6_FREEBIND
2233 #ifdef IPV6_RECVORIGDSTADDR
2234 case IPV6_RECVORIGDSTADDR
:
2237 if (optlen
< sizeof(uint32_t)) {
2238 return -TARGET_EINVAL
;
2240 if (get_user_u32(val
, optval_addr
)) {
2241 return -TARGET_EFAULT
;
2243 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2244 &val
, sizeof(val
)));
2248 struct in6_pktinfo pki
;
2250 if (optlen
< sizeof(pki
)) {
2251 return -TARGET_EINVAL
;
2254 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2255 return -TARGET_EFAULT
;
2258 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2260 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2261 &pki
, sizeof(pki
)));
2264 case IPV6_ADD_MEMBERSHIP
:
2265 case IPV6_DROP_MEMBERSHIP
:
2267 struct ipv6_mreq ipv6mreq
;
2269 if (optlen
< sizeof(ipv6mreq
)) {
2270 return -TARGET_EINVAL
;
2273 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2274 return -TARGET_EFAULT
;
2277 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2279 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2280 &ipv6mreq
, sizeof(ipv6mreq
)));
2291 struct icmp6_filter icmp6f
;
2293 if (optlen
> sizeof(icmp6f
)) {
2294 optlen
= sizeof(icmp6f
);
2297 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2298 return -TARGET_EFAULT
;
2301 for (val
= 0; val
< 8; val
++) {
2302 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2305 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2317 /* those take an u32 value */
2318 if (optlen
< sizeof(uint32_t)) {
2319 return -TARGET_EINVAL
;
2322 if (get_user_u32(val
, optval_addr
)) {
2323 return -TARGET_EFAULT
;
2325 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2326 &val
, sizeof(val
)));
2333 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2338 char *alg_key
= g_malloc(optlen
);
2341 return -TARGET_ENOMEM
;
2343 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2345 return -TARGET_EFAULT
;
2347 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2352 case ALG_SET_AEAD_AUTHSIZE
:
2354 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2363 case TARGET_SOL_SOCKET
:
2365 case TARGET_SO_RCVTIMEO
:
2369 optname
= SO_RCVTIMEO
;
2372 if (optlen
!= sizeof(struct target_timeval
)) {
2373 return -TARGET_EINVAL
;
2376 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2377 return -TARGET_EFAULT
;
2380 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2384 case TARGET_SO_SNDTIMEO
:
2385 optname
= SO_SNDTIMEO
;
2387 case TARGET_SO_ATTACH_FILTER
:
2389 struct target_sock_fprog
*tfprog
;
2390 struct target_sock_filter
*tfilter
;
2391 struct sock_fprog fprog
;
2392 struct sock_filter
*filter
;
2395 if (optlen
!= sizeof(*tfprog
)) {
2396 return -TARGET_EINVAL
;
2398 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2399 return -TARGET_EFAULT
;
2401 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2402 tswapal(tfprog
->filter
), 0)) {
2403 unlock_user_struct(tfprog
, optval_addr
, 1);
2404 return -TARGET_EFAULT
;
2407 fprog
.len
= tswap16(tfprog
->len
);
2408 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2409 if (filter
== NULL
) {
2410 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2411 unlock_user_struct(tfprog
, optval_addr
, 1);
2412 return -TARGET_ENOMEM
;
2414 for (i
= 0; i
< fprog
.len
; i
++) {
2415 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2416 filter
[i
].jt
= tfilter
[i
].jt
;
2417 filter
[i
].jf
= tfilter
[i
].jf
;
2418 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2420 fprog
.filter
= filter
;
2422 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2423 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2426 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2427 unlock_user_struct(tfprog
, optval_addr
, 1);
2430 case TARGET_SO_BINDTODEVICE
:
2432 char *dev_ifname
, *addr_ifname
;
2434 if (optlen
> IFNAMSIZ
- 1) {
2435 optlen
= IFNAMSIZ
- 1;
2437 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2439 return -TARGET_EFAULT
;
2441 optname
= SO_BINDTODEVICE
;
2442 addr_ifname
= alloca(IFNAMSIZ
);
2443 memcpy(addr_ifname
, dev_ifname
, optlen
);
2444 addr_ifname
[optlen
] = 0;
2445 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2446 addr_ifname
, optlen
));
2447 unlock_user (dev_ifname
, optval_addr
, 0);
2450 case TARGET_SO_LINGER
:
2453 struct target_linger
*tlg
;
2455 if (optlen
!= sizeof(struct target_linger
)) {
2456 return -TARGET_EINVAL
;
2458 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2459 return -TARGET_EFAULT
;
2461 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2462 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2463 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2465 unlock_user_struct(tlg
, optval_addr
, 0);
2468 /* Options with 'int' argument. */
2469 case TARGET_SO_DEBUG
:
2472 case TARGET_SO_REUSEADDR
:
2473 optname
= SO_REUSEADDR
;
2476 case TARGET_SO_REUSEPORT
:
2477 optname
= SO_REUSEPORT
;
2480 case TARGET_SO_TYPE
:
2483 case TARGET_SO_ERROR
:
2486 case TARGET_SO_DONTROUTE
:
2487 optname
= SO_DONTROUTE
;
2489 case TARGET_SO_BROADCAST
:
2490 optname
= SO_BROADCAST
;
2492 case TARGET_SO_SNDBUF
:
2493 optname
= SO_SNDBUF
;
2495 case TARGET_SO_SNDBUFFORCE
:
2496 optname
= SO_SNDBUFFORCE
;
2498 case TARGET_SO_RCVBUF
:
2499 optname
= SO_RCVBUF
;
2501 case TARGET_SO_RCVBUFFORCE
:
2502 optname
= SO_RCVBUFFORCE
;
2504 case TARGET_SO_KEEPALIVE
:
2505 optname
= SO_KEEPALIVE
;
2507 case TARGET_SO_OOBINLINE
:
2508 optname
= SO_OOBINLINE
;
2510 case TARGET_SO_NO_CHECK
:
2511 optname
= SO_NO_CHECK
;
2513 case TARGET_SO_PRIORITY
:
2514 optname
= SO_PRIORITY
;
2517 case TARGET_SO_BSDCOMPAT
:
2518 optname
= SO_BSDCOMPAT
;
2521 case TARGET_SO_PASSCRED
:
2522 optname
= SO_PASSCRED
;
2524 case TARGET_SO_PASSSEC
:
2525 optname
= SO_PASSSEC
;
2527 case TARGET_SO_TIMESTAMP
:
2528 optname
= SO_TIMESTAMP
;
2530 case TARGET_SO_RCVLOWAT
:
2531 optname
= SO_RCVLOWAT
;
2536 if (optlen
< sizeof(uint32_t))
2537 return -TARGET_EINVAL
;
2539 if (get_user_u32(val
, optval_addr
))
2540 return -TARGET_EFAULT
;
2541 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2546 case NETLINK_PKTINFO
:
2547 case NETLINK_ADD_MEMBERSHIP
:
2548 case NETLINK_DROP_MEMBERSHIP
:
2549 case NETLINK_BROADCAST_ERROR
:
2550 case NETLINK_NO_ENOBUFS
:
2551 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2552 case NETLINK_LISTEN_ALL_NSID
:
2553 case NETLINK_CAP_ACK
:
2554 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2555 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2556 case NETLINK_EXT_ACK
:
2557 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2558 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2559 case NETLINK_GET_STRICT_CHK
:
2560 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2566 if (optlen
< sizeof(uint32_t)) {
2567 return -TARGET_EINVAL
;
2569 if (get_user_u32(val
, optval_addr
)) {
2570 return -TARGET_EFAULT
;
2572 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2575 #endif /* SOL_NETLINK */
2578 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2580 ret
= -TARGET_ENOPROTOOPT
;
2585 /* do_getsockopt() Must return target values and target errnos. */
2586 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2587 abi_ulong optval_addr
, abi_ulong optlen
)
2594 case TARGET_SOL_SOCKET
:
2597 /* These don't just return a single integer */
2598 case TARGET_SO_PEERNAME
:
2600 case TARGET_SO_RCVTIMEO
: {
2604 optname
= SO_RCVTIMEO
;
2607 if (get_user_u32(len
, optlen
)) {
2608 return -TARGET_EFAULT
;
2611 return -TARGET_EINVAL
;
2615 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2620 if (len
> sizeof(struct target_timeval
)) {
2621 len
= sizeof(struct target_timeval
);
2623 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2624 return -TARGET_EFAULT
;
2626 if (put_user_u32(len
, optlen
)) {
2627 return -TARGET_EFAULT
;
2631 case TARGET_SO_SNDTIMEO
:
2632 optname
= SO_SNDTIMEO
;
2634 case TARGET_SO_PEERCRED
: {
2637 struct target_ucred
*tcr
;
2639 if (get_user_u32(len
, optlen
)) {
2640 return -TARGET_EFAULT
;
2643 return -TARGET_EINVAL
;
2647 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2655 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2656 return -TARGET_EFAULT
;
2658 __put_user(cr
.pid
, &tcr
->pid
);
2659 __put_user(cr
.uid
, &tcr
->uid
);
2660 __put_user(cr
.gid
, &tcr
->gid
);
2661 unlock_user_struct(tcr
, optval_addr
, 1);
2662 if (put_user_u32(len
, optlen
)) {
2663 return -TARGET_EFAULT
;
2667 case TARGET_SO_PEERSEC
: {
2670 if (get_user_u32(len
, optlen
)) {
2671 return -TARGET_EFAULT
;
2674 return -TARGET_EINVAL
;
2676 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2678 return -TARGET_EFAULT
;
2681 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2683 if (put_user_u32(lv
, optlen
)) {
2684 ret
= -TARGET_EFAULT
;
2686 unlock_user(name
, optval_addr
, lv
);
2689 case TARGET_SO_LINGER
:
2693 struct target_linger
*tlg
;
2695 if (get_user_u32(len
, optlen
)) {
2696 return -TARGET_EFAULT
;
2699 return -TARGET_EINVAL
;
2703 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2711 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2712 return -TARGET_EFAULT
;
2714 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2715 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2716 unlock_user_struct(tlg
, optval_addr
, 1);
2717 if (put_user_u32(len
, optlen
)) {
2718 return -TARGET_EFAULT
;
2722 /* Options with 'int' argument. */
2723 case TARGET_SO_DEBUG
:
2726 case TARGET_SO_REUSEADDR
:
2727 optname
= SO_REUSEADDR
;
2730 case TARGET_SO_REUSEPORT
:
2731 optname
= SO_REUSEPORT
;
2734 case TARGET_SO_TYPE
:
2737 case TARGET_SO_ERROR
:
2740 case TARGET_SO_DONTROUTE
:
2741 optname
= SO_DONTROUTE
;
2743 case TARGET_SO_BROADCAST
:
2744 optname
= SO_BROADCAST
;
2746 case TARGET_SO_SNDBUF
:
2747 optname
= SO_SNDBUF
;
2749 case TARGET_SO_RCVBUF
:
2750 optname
= SO_RCVBUF
;
2752 case TARGET_SO_KEEPALIVE
:
2753 optname
= SO_KEEPALIVE
;
2755 case TARGET_SO_OOBINLINE
:
2756 optname
= SO_OOBINLINE
;
2758 case TARGET_SO_NO_CHECK
:
2759 optname
= SO_NO_CHECK
;
2761 case TARGET_SO_PRIORITY
:
2762 optname
= SO_PRIORITY
;
2765 case TARGET_SO_BSDCOMPAT
:
2766 optname
= SO_BSDCOMPAT
;
2769 case TARGET_SO_PASSCRED
:
2770 optname
= SO_PASSCRED
;
2772 case TARGET_SO_TIMESTAMP
:
2773 optname
= SO_TIMESTAMP
;
2775 case TARGET_SO_RCVLOWAT
:
2776 optname
= SO_RCVLOWAT
;
2778 case TARGET_SO_ACCEPTCONN
:
2779 optname
= SO_ACCEPTCONN
;
2781 case TARGET_SO_PROTOCOL
:
2782 optname
= SO_PROTOCOL
;
2784 case TARGET_SO_DOMAIN
:
2785 optname
= SO_DOMAIN
;
2793 /* TCP and UDP options all take an 'int' value. */
2795 if (get_user_u32(len
, optlen
))
2796 return -TARGET_EFAULT
;
2798 return -TARGET_EINVAL
;
2800 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2803 if (optname
== SO_TYPE
) {
2804 val
= host_to_target_sock_type(val
);
2809 if (put_user_u32(val
, optval_addr
))
2810 return -TARGET_EFAULT
;
2812 if (put_user_u8(val
, optval_addr
))
2813 return -TARGET_EFAULT
;
2815 if (put_user_u32(len
, optlen
))
2816 return -TARGET_EFAULT
;
2823 case IP_ROUTER_ALERT
:
2827 case IP_MTU_DISCOVER
:
2833 case IP_MULTICAST_TTL
:
2834 case IP_MULTICAST_LOOP
:
2835 if (get_user_u32(len
, optlen
))
2836 return -TARGET_EFAULT
;
2838 return -TARGET_EINVAL
;
2840 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2843 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2845 if (put_user_u32(len
, optlen
)
2846 || put_user_u8(val
, optval_addr
))
2847 return -TARGET_EFAULT
;
2849 if (len
> sizeof(int))
2851 if (put_user_u32(len
, optlen
)
2852 || put_user_u32(val
, optval_addr
))
2853 return -TARGET_EFAULT
;
2857 ret
= -TARGET_ENOPROTOOPT
;
2863 case IPV6_MTU_DISCOVER
:
2866 case IPV6_RECVPKTINFO
:
2867 case IPV6_UNICAST_HOPS
:
2868 case IPV6_MULTICAST_HOPS
:
2869 case IPV6_MULTICAST_LOOP
:
2871 case IPV6_RECVHOPLIMIT
:
2872 case IPV6_2292HOPLIMIT
:
2875 case IPV6_2292PKTINFO
:
2876 case IPV6_RECVTCLASS
:
2877 case IPV6_RECVRTHDR
:
2878 case IPV6_2292RTHDR
:
2879 case IPV6_RECVHOPOPTS
:
2880 case IPV6_2292HOPOPTS
:
2881 case IPV6_RECVDSTOPTS
:
2882 case IPV6_2292DSTOPTS
:
2884 case IPV6_ADDR_PREFERENCES
:
2885 #ifdef IPV6_RECVPATHMTU
2886 case IPV6_RECVPATHMTU
:
2888 #ifdef IPV6_TRANSPARENT
2889 case IPV6_TRANSPARENT
:
2891 #ifdef IPV6_FREEBIND
2894 #ifdef IPV6_RECVORIGDSTADDR
2895 case IPV6_RECVORIGDSTADDR
:
2897 if (get_user_u32(len
, optlen
))
2898 return -TARGET_EFAULT
;
2900 return -TARGET_EINVAL
;
2902 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2905 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2907 if (put_user_u32(len
, optlen
)
2908 || put_user_u8(val
, optval_addr
))
2909 return -TARGET_EFAULT
;
2911 if (len
> sizeof(int))
2913 if (put_user_u32(len
, optlen
)
2914 || put_user_u32(val
, optval_addr
))
2915 return -TARGET_EFAULT
;
2919 ret
= -TARGET_ENOPROTOOPT
;
2926 case NETLINK_PKTINFO
:
2927 case NETLINK_BROADCAST_ERROR
:
2928 case NETLINK_NO_ENOBUFS
:
2929 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2930 case NETLINK_LISTEN_ALL_NSID
:
2931 case NETLINK_CAP_ACK
:
2932 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2933 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2934 case NETLINK_EXT_ACK
:
2935 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2936 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2937 case NETLINK_GET_STRICT_CHK
:
2938 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2939 if (get_user_u32(len
, optlen
)) {
2940 return -TARGET_EFAULT
;
2942 if (len
!= sizeof(val
)) {
2943 return -TARGET_EINVAL
;
2946 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2950 if (put_user_u32(lv
, optlen
)
2951 || put_user_u32(val
, optval_addr
)) {
2952 return -TARGET_EFAULT
;
2955 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2956 case NETLINK_LIST_MEMBERSHIPS
:
2960 if (get_user_u32(len
, optlen
)) {
2961 return -TARGET_EFAULT
;
2964 return -TARGET_EINVAL
;
2966 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2967 if (!results
&& len
> 0) {
2968 return -TARGET_EFAULT
;
2971 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2973 unlock_user(results
, optval_addr
, 0);
2976 /* swap host endianess to target endianess. */
2977 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2978 results
[i
] = tswap32(results
[i
]);
2980 if (put_user_u32(lv
, optlen
)) {
2981 return -TARGET_EFAULT
;
2983 unlock_user(results
, optval_addr
, 0);
2986 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2991 #endif /* SOL_NETLINK */
2994 qemu_log_mask(LOG_UNIMP
,
2995 "getsockopt level=%d optname=%d not yet supported\n",
2997 ret
= -TARGET_EOPNOTSUPP
;
3003 /* Convert target low/high pair representing file offset into the host
3004 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3005 * as the kernel doesn't handle them either.
3007 static void target_to_host_low_high(abi_ulong tlow
,
3009 unsigned long *hlow
,
3010 unsigned long *hhigh
)
3012 uint64_t off
= tlow
|
3013 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
3014 TARGET_LONG_BITS
/ 2;
3017 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3020 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3021 abi_ulong count
, int copy
)
3023 struct target_iovec
*target_vec
;
3025 abi_ulong total_len
, max_len
;
3028 bool bad_address
= false;
3034 if (count
> IOV_MAX
) {
3039 vec
= g_try_new0(struct iovec
, count
);
3045 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3046 count
* sizeof(struct target_iovec
), 1);
3047 if (target_vec
== NULL
) {
3052 /* ??? If host page size > target page size, this will result in a
3053 value larger than what we can actually support. */
3054 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3057 for (i
= 0; i
< count
; i
++) {
3058 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3059 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3064 } else if (len
== 0) {
3065 /* Zero length pointer is ignored. */
3066 vec
[i
].iov_base
= 0;
3068 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3069 /* If the first buffer pointer is bad, this is a fault. But
3070 * subsequent bad buffers will result in a partial write; this
3071 * is realized by filling the vector with null pointers and
3073 if (!vec
[i
].iov_base
) {
3084 if (len
> max_len
- total_len
) {
3085 len
= max_len
- total_len
;
3088 vec
[i
].iov_len
= len
;
3092 unlock_user(target_vec
, target_addr
, 0);
3097 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3098 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3101 unlock_user(target_vec
, target_addr
, 0);
3108 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3109 abi_ulong count
, int copy
)
3111 struct target_iovec
*target_vec
;
3114 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3115 count
* sizeof(struct target_iovec
), 1);
3117 for (i
= 0; i
< count
; i
++) {
3118 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3119 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3123 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3125 unlock_user(target_vec
, target_addr
, 0);
3131 static inline int target_to_host_sock_type(int *type
)
3134 int target_type
= *type
;
3136 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3137 case TARGET_SOCK_DGRAM
:
3138 host_type
= SOCK_DGRAM
;
3140 case TARGET_SOCK_STREAM
:
3141 host_type
= SOCK_STREAM
;
3144 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3147 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3148 #if defined(SOCK_CLOEXEC)
3149 host_type
|= SOCK_CLOEXEC
;
3151 return -TARGET_EINVAL
;
3154 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3155 #if defined(SOCK_NONBLOCK)
3156 host_type
|= SOCK_NONBLOCK
;
3157 #elif !defined(O_NONBLOCK)
3158 return -TARGET_EINVAL
;
3165 /* Try to emulate socket type flags after socket creation. */
3166 static int sock_flags_fixup(int fd
, int target_type
)
3168 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3169 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3170 int flags
= fcntl(fd
, F_GETFL
);
3171 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3173 return -TARGET_EINVAL
;
3180 /* do_socket() Must return target values and target errnos. */
3181 static abi_long
do_socket(int domain
, int type
, int protocol
)
3183 int target_type
= type
;
3186 ret
= target_to_host_sock_type(&type
);
3191 if (domain
== PF_NETLINK
&& !(
3192 #ifdef CONFIG_RTNETLINK
3193 protocol
== NETLINK_ROUTE
||
3195 protocol
== NETLINK_KOBJECT_UEVENT
||
3196 protocol
== NETLINK_AUDIT
)) {
3197 return -TARGET_EPROTONOSUPPORT
;
3200 if (domain
== AF_PACKET
||
3201 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3202 protocol
= tswap16(protocol
);
3205 ret
= get_errno(socket(domain
, type
, protocol
));
3207 ret
= sock_flags_fixup(ret
, target_type
);
3208 if (type
== SOCK_PACKET
) {
3209 /* Manage an obsolete case :
3210 * if socket type is SOCK_PACKET, bind by name
3212 fd_trans_register(ret
, &target_packet_trans
);
3213 } else if (domain
== PF_NETLINK
) {
3215 #ifdef CONFIG_RTNETLINK
3217 fd_trans_register(ret
, &target_netlink_route_trans
);
3220 case NETLINK_KOBJECT_UEVENT
:
3221 /* nothing to do: messages are strings */
3224 fd_trans_register(ret
, &target_netlink_audit_trans
);
3227 g_assert_not_reached();
3234 /* do_bind() Must return target values and target errnos. */
3235 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3241 if ((int)addrlen
< 0) {
3242 return -TARGET_EINVAL
;
3245 addr
= alloca(addrlen
+1);
3247 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3251 return get_errno(bind(sockfd
, addr
, addrlen
));
3254 /* do_connect() Must return target values and target errnos. */
3255 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3261 if ((int)addrlen
< 0) {
3262 return -TARGET_EINVAL
;
3265 addr
= alloca(addrlen
+1);
3267 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3271 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3274 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3275 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3276 int flags
, int send
)
3282 abi_ulong target_vec
;
3284 if (msgp
->msg_name
) {
3285 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3286 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3287 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3288 tswapal(msgp
->msg_name
),
3290 if (ret
== -TARGET_EFAULT
) {
3291 /* For connected sockets msg_name and msg_namelen must
3292 * be ignored, so returning EFAULT immediately is wrong.
3293 * Instead, pass a bad msg_name to the host kernel, and
3294 * let it decide whether to return EFAULT or not.
3296 msg
.msg_name
= (void *)-1;
3301 msg
.msg_name
= NULL
;
3302 msg
.msg_namelen
= 0;
3304 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3305 msg
.msg_control
= alloca(msg
.msg_controllen
);
3306 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3308 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3310 count
= tswapal(msgp
->msg_iovlen
);
3311 target_vec
= tswapal(msgp
->msg_iov
);
3313 if (count
> IOV_MAX
) {
3314 /* sendrcvmsg returns a different errno for this condition than
3315 * readv/writev, so we must catch it here before lock_iovec() does.
3317 ret
= -TARGET_EMSGSIZE
;
3321 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3322 target_vec
, count
, send
);
3324 ret
= -host_to_target_errno(errno
);
3327 msg
.msg_iovlen
= count
;
3331 if (fd_trans_target_to_host_data(fd
)) {
3334 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3335 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3336 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3337 msg
.msg_iov
->iov_len
);
3339 msg
.msg_iov
->iov_base
= host_msg
;
3340 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3344 ret
= target_to_host_cmsg(&msg
, msgp
);
3346 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3350 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3351 if (!is_error(ret
)) {
3353 if (fd_trans_host_to_target_data(fd
)) {
3354 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3355 MIN(msg
.msg_iov
->iov_len
, len
));
3357 ret
= host_to_target_cmsg(msgp
, &msg
);
3359 if (!is_error(ret
)) {
3360 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3361 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3362 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3363 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3364 msg
.msg_name
, msg
.msg_namelen
);
3376 unlock_iovec(vec
, target_vec
, count
, !send
);
3381 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3382 int flags
, int send
)
3385 struct target_msghdr
*msgp
;
3387 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3391 return -TARGET_EFAULT
;
3393 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3394 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3398 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3399 * so it might not have this *mmsg-specific flag either.
3401 #ifndef MSG_WAITFORONE
3402 #define MSG_WAITFORONE 0x10000
3405 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3406 unsigned int vlen
, unsigned int flags
,
3409 struct target_mmsghdr
*mmsgp
;
3413 if (vlen
> UIO_MAXIOV
) {
3417 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3419 return -TARGET_EFAULT
;
3422 for (i
= 0; i
< vlen
; i
++) {
3423 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3424 if (is_error(ret
)) {
3427 mmsgp
[i
].msg_len
= tswap32(ret
);
3428 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3429 if (flags
& MSG_WAITFORONE
) {
3430 flags
|= MSG_DONTWAIT
;
3434 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3436 /* Return number of datagrams sent if we sent any at all;
3437 * otherwise return the error.
3445 /* do_accept4() Must return target values and target errnos. */
3446 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3447 abi_ulong target_addrlen_addr
, int flags
)
3449 socklen_t addrlen
, ret_addrlen
;
3454 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3456 if (target_addr
== 0) {
3457 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3460 /* linux returns EFAULT if addrlen pointer is invalid */
3461 if (get_user_u32(addrlen
, target_addrlen_addr
))
3462 return -TARGET_EFAULT
;
3464 if ((int)addrlen
< 0) {
3465 return -TARGET_EINVAL
;
3468 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3469 return -TARGET_EFAULT
;
3472 addr
= alloca(addrlen
);
3474 ret_addrlen
= addrlen
;
3475 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3476 if (!is_error(ret
)) {
3477 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3478 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3479 ret
= -TARGET_EFAULT
;
3485 /* do_getpeername() Must return target values and target errnos. */
3486 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3487 abi_ulong target_addrlen_addr
)
3489 socklen_t addrlen
, ret_addrlen
;
3493 if (get_user_u32(addrlen
, target_addrlen_addr
))
3494 return -TARGET_EFAULT
;
3496 if ((int)addrlen
< 0) {
3497 return -TARGET_EINVAL
;
3500 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3501 return -TARGET_EFAULT
;
3504 addr
= alloca(addrlen
);
3506 ret_addrlen
= addrlen
;
3507 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3508 if (!is_error(ret
)) {
3509 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3510 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3511 ret
= -TARGET_EFAULT
;
3517 /* do_getsockname() Must return target values and target errnos. */
3518 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3519 abi_ulong target_addrlen_addr
)
3521 socklen_t addrlen
, ret_addrlen
;
3525 if (get_user_u32(addrlen
, target_addrlen_addr
))
3526 return -TARGET_EFAULT
;
3528 if ((int)addrlen
< 0) {
3529 return -TARGET_EINVAL
;
3532 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3533 return -TARGET_EFAULT
;
3536 addr
= alloca(addrlen
);
3538 ret_addrlen
= addrlen
;
3539 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3540 if (!is_error(ret
)) {
3541 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3542 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3543 ret
= -TARGET_EFAULT
;
3549 /* do_socketpair() Must return target values and target errnos. */
3550 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3551 abi_ulong target_tab_addr
)
3556 target_to_host_sock_type(&type
);
3558 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3559 if (!is_error(ret
)) {
3560 if (put_user_s32(tab
[0], target_tab_addr
)
3561 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3562 ret
= -TARGET_EFAULT
;
3567 /* do_sendto() Must return target values and target errnos. */
3568 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3569 abi_ulong target_addr
, socklen_t addrlen
)
3573 void *copy_msg
= NULL
;
3576 if ((int)addrlen
< 0) {
3577 return -TARGET_EINVAL
;
3580 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3582 return -TARGET_EFAULT
;
3583 if (fd_trans_target_to_host_data(fd
)) {
3584 copy_msg
= host_msg
;
3585 host_msg
= g_malloc(len
);
3586 memcpy(host_msg
, copy_msg
, len
);
3587 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3593 addr
= alloca(addrlen
+1);
3594 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3598 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3600 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3605 host_msg
= copy_msg
;
3607 unlock_user(host_msg
, msg
, 0);
3611 /* do_recvfrom() Must return target values and target errnos. */
3612 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3613 abi_ulong target_addr
,
3614 abi_ulong target_addrlen
)
3616 socklen_t addrlen
, ret_addrlen
;
3624 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3626 return -TARGET_EFAULT
;
3630 if (get_user_u32(addrlen
, target_addrlen
)) {
3631 ret
= -TARGET_EFAULT
;
3634 if ((int)addrlen
< 0) {
3635 ret
= -TARGET_EINVAL
;
3638 addr
= alloca(addrlen
);
3639 ret_addrlen
= addrlen
;
3640 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3641 addr
, &ret_addrlen
));
3643 addr
= NULL
; /* To keep compiler quiet. */
3644 addrlen
= 0; /* To keep compiler quiet. */
3645 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3647 if (!is_error(ret
)) {
3648 if (fd_trans_host_to_target_data(fd
)) {
3650 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3651 if (is_error(trans
)) {
3657 host_to_target_sockaddr(target_addr
, addr
,
3658 MIN(addrlen
, ret_addrlen
));
3659 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3660 ret
= -TARGET_EFAULT
;
3664 unlock_user(host_msg
, msg
, len
);
3667 unlock_user(host_msg
, msg
, 0);
3672 #ifdef TARGET_NR_socketcall
3673 /* do_socketcall() must return target values and target errnos. */
3674 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3676 static const unsigned nargs
[] = { /* number of arguments per operation */
3677 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3678 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3679 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3680 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3681 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3682 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3683 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3684 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3685 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3686 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3687 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3688 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3689 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3690 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3691 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3692 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3693 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3694 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3695 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3696 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3698 abi_long a
[6]; /* max 6 args */
3701 /* check the range of the first argument num */
3702 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3703 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3704 return -TARGET_EINVAL
;
3706 /* ensure we have space for args */
3707 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3708 return -TARGET_EINVAL
;
3710 /* collect the arguments in a[] according to nargs[] */
3711 for (i
= 0; i
< nargs
[num
]; ++i
) {
3712 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3713 return -TARGET_EFAULT
;
3716 /* now when we have the args, invoke the appropriate underlying function */
3718 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3719 return do_socket(a
[0], a
[1], a
[2]);
3720 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3721 return do_bind(a
[0], a
[1], a
[2]);
3722 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3723 return do_connect(a
[0], a
[1], a
[2]);
3724 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3725 return get_errno(listen(a
[0], a
[1]));
3726 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3727 return do_accept4(a
[0], a
[1], a
[2], 0);
3728 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3729 return do_getsockname(a
[0], a
[1], a
[2]);
3730 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3731 return do_getpeername(a
[0], a
[1], a
[2]);
3732 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3733 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3734 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3735 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3736 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3737 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3738 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3739 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3740 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3741 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3742 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3743 return get_errno(shutdown(a
[0], a
[1]));
3744 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3745 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3746 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3747 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3748 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3749 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3750 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3751 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3752 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3753 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3754 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3755 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3756 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3757 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3759 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3760 return -TARGET_EINVAL
;
3765 #define N_SHM_REGIONS 32
3767 static struct shm_region
{
3771 } shm_regions
[N_SHM_REGIONS
];
3773 #ifndef TARGET_SEMID64_DS
3774 /* asm-generic version of this struct */
3775 struct target_semid64_ds
3777 struct target_ipc_perm sem_perm
;
3778 abi_ulong sem_otime
;
3779 #if TARGET_ABI_BITS == 32
3780 abi_ulong __unused1
;
3782 abi_ulong sem_ctime
;
3783 #if TARGET_ABI_BITS == 32
3784 abi_ulong __unused2
;
3786 abi_ulong sem_nsems
;
3787 abi_ulong __unused3
;
3788 abi_ulong __unused4
;
3792 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3793 abi_ulong target_addr
)
3795 struct target_ipc_perm
*target_ip
;
3796 struct target_semid64_ds
*target_sd
;
3798 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3799 return -TARGET_EFAULT
;
3800 target_ip
= &(target_sd
->sem_perm
);
3801 host_ip
->__key
= tswap32(target_ip
->__key
);
3802 host_ip
->uid
= tswap32(target_ip
->uid
);
3803 host_ip
->gid
= tswap32(target_ip
->gid
);
3804 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3805 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3806 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3807 host_ip
->mode
= tswap32(target_ip
->mode
);
3809 host_ip
->mode
= tswap16(target_ip
->mode
);
3811 #if defined(TARGET_PPC)
3812 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3814 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3816 unlock_user_struct(target_sd
, target_addr
, 0);
3820 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3821 struct ipc_perm
*host_ip
)
3823 struct target_ipc_perm
*target_ip
;
3824 struct target_semid64_ds
*target_sd
;
3826 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3827 return -TARGET_EFAULT
;
3828 target_ip
= &(target_sd
->sem_perm
);
3829 target_ip
->__key
= tswap32(host_ip
->__key
);
3830 target_ip
->uid
= tswap32(host_ip
->uid
);
3831 target_ip
->gid
= tswap32(host_ip
->gid
);
3832 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3833 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3834 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3835 target_ip
->mode
= tswap32(host_ip
->mode
);
3837 target_ip
->mode
= tswap16(host_ip
->mode
);
3839 #if defined(TARGET_PPC)
3840 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3842 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3844 unlock_user_struct(target_sd
, target_addr
, 1);
3848 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3849 abi_ulong target_addr
)
3851 struct target_semid64_ds
*target_sd
;
3853 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3854 return -TARGET_EFAULT
;
3855 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3856 return -TARGET_EFAULT
;
3857 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3858 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3859 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3860 unlock_user_struct(target_sd
, target_addr
, 0);
3864 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3865 struct semid_ds
*host_sd
)
3867 struct target_semid64_ds
*target_sd
;
3869 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3870 return -TARGET_EFAULT
;
3871 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3872 return -TARGET_EFAULT
;
3873 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3874 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3875 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3876 unlock_user_struct(target_sd
, target_addr
, 1);
3880 struct target_seminfo
{
3893 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3894 struct seminfo
*host_seminfo
)
3896 struct target_seminfo
*target_seminfo
;
3897 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3898 return -TARGET_EFAULT
;
3899 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3900 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3901 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3902 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3903 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3904 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3905 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3906 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3907 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3908 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3909 unlock_user_struct(target_seminfo
, target_addr
, 1);
3915 struct semid_ds
*buf
;
3916 unsigned short *array
;
3917 struct seminfo
*__buf
;
3920 union target_semun
{
3927 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3928 abi_ulong target_addr
)
3931 unsigned short *array
;
3933 struct semid_ds semid_ds
;
3936 semun
.buf
= &semid_ds
;
3938 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3940 return get_errno(ret
);
3942 nsems
= semid_ds
.sem_nsems
;
3944 *host_array
= g_try_new(unsigned short, nsems
);
3946 return -TARGET_ENOMEM
;
3948 array
= lock_user(VERIFY_READ
, target_addr
,
3949 nsems
*sizeof(unsigned short), 1);
3951 g_free(*host_array
);
3952 return -TARGET_EFAULT
;
3955 for(i
=0; i
<nsems
; i
++) {
3956 __get_user((*host_array
)[i
], &array
[i
]);
3958 unlock_user(array
, target_addr
, 0);
3963 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3964 unsigned short **host_array
)
3967 unsigned short *array
;
3969 struct semid_ds semid_ds
;
3972 semun
.buf
= &semid_ds
;
3974 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3976 return get_errno(ret
);
3978 nsems
= semid_ds
.sem_nsems
;
3980 array
= lock_user(VERIFY_WRITE
, target_addr
,
3981 nsems
*sizeof(unsigned short), 0);
3983 return -TARGET_EFAULT
;
3985 for(i
=0; i
<nsems
; i
++) {
3986 __put_user((*host_array
)[i
], &array
[i
]);
3988 g_free(*host_array
);
3989 unlock_user(array
, target_addr
, 1);
3994 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3995 abi_ulong target_arg
)
3997 union target_semun target_su
= { .buf
= target_arg
};
3999 struct semid_ds dsarg
;
4000 unsigned short *array
= NULL
;
4001 struct seminfo seminfo
;
4002 abi_long ret
= -TARGET_EINVAL
;
4009 /* In 64 bit cross-endian situations, we will erroneously pick up
4010 * the wrong half of the union for the "val" element. To rectify
4011 * this, the entire 8-byte structure is byteswapped, followed by
4012 * a swap of the 4 byte val field. In other cases, the data is
4013 * already in proper host byte order. */
4014 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4015 target_su
.buf
= tswapal(target_su
.buf
);
4016 arg
.val
= tswap32(target_su
.val
);
4018 arg
.val
= target_su
.val
;
4020 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4024 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4028 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4029 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4036 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4040 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4041 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4047 arg
.__buf
= &seminfo
;
4048 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4049 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4057 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4064 struct target_sembuf
{
4065 unsigned short sem_num
;
4070 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4071 abi_ulong target_addr
,
4074 struct target_sembuf
*target_sembuf
;
4077 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4078 nsops
*sizeof(struct target_sembuf
), 1);
4080 return -TARGET_EFAULT
;
4082 for(i
=0; i
<nsops
; i
++) {
4083 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4084 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4085 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4088 unlock_user(target_sembuf
, target_addr
, 0);
4093 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4094 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4097 * This macro is required to handle the s390 variants, which passes the
4098 * arguments in a different order than default.
4101 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4102 (__nsops), (__timeout), (__sops)
4104 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4105 (__nsops), 0, (__sops), (__timeout)
4108 static inline abi_long
do_semtimedop(int semid
,
4111 abi_long timeout
, bool time64
)
4113 struct sembuf
*sops
;
4114 struct timespec ts
, *pts
= NULL
;
4120 if (target_to_host_timespec64(pts
, timeout
)) {
4121 return -TARGET_EFAULT
;
4124 if (target_to_host_timespec(pts
, timeout
)) {
4125 return -TARGET_EFAULT
;
4130 if (nsops
> TARGET_SEMOPM
) {
4131 return -TARGET_E2BIG
;
4134 sops
= g_new(struct sembuf
, nsops
);
4136 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4138 return -TARGET_EFAULT
;
4141 ret
= -TARGET_ENOSYS
;
4142 #ifdef __NR_semtimedop
4143 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4146 if (ret
== -TARGET_ENOSYS
) {
4147 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4148 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4156 struct target_msqid_ds
4158 struct target_ipc_perm msg_perm
;
4159 abi_ulong msg_stime
;
4160 #if TARGET_ABI_BITS == 32
4161 abi_ulong __unused1
;
4163 abi_ulong msg_rtime
;
4164 #if TARGET_ABI_BITS == 32
4165 abi_ulong __unused2
;
4167 abi_ulong msg_ctime
;
4168 #if TARGET_ABI_BITS == 32
4169 abi_ulong __unused3
;
4171 abi_ulong __msg_cbytes
;
4173 abi_ulong msg_qbytes
;
4174 abi_ulong msg_lspid
;
4175 abi_ulong msg_lrpid
;
4176 abi_ulong __unused4
;
4177 abi_ulong __unused5
;
4180 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4181 abi_ulong target_addr
)
4183 struct target_msqid_ds
*target_md
;
4185 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4186 return -TARGET_EFAULT
;
4187 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4188 return -TARGET_EFAULT
;
4189 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4190 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4191 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4192 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4193 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4194 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4195 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4196 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4197 unlock_user_struct(target_md
, target_addr
, 0);
4201 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4202 struct msqid_ds
*host_md
)
4204 struct target_msqid_ds
*target_md
;
4206 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4207 return -TARGET_EFAULT
;
4208 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4209 return -TARGET_EFAULT
;
4210 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4211 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4212 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4213 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4214 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4215 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4216 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4217 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4218 unlock_user_struct(target_md
, target_addr
, 1);
4222 struct target_msginfo
{
4230 unsigned short int msgseg
;
4233 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4234 struct msginfo
*host_msginfo
)
4236 struct target_msginfo
*target_msginfo
;
4237 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4238 return -TARGET_EFAULT
;
4239 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4240 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4241 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4242 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4243 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4244 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4245 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4246 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4247 unlock_user_struct(target_msginfo
, target_addr
, 1);
4251 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4253 struct msqid_ds dsarg
;
4254 struct msginfo msginfo
;
4255 abi_long ret
= -TARGET_EINVAL
;
4263 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4264 return -TARGET_EFAULT
;
4265 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4266 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4267 return -TARGET_EFAULT
;
4270 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4274 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4275 if (host_to_target_msginfo(ptr
, &msginfo
))
4276 return -TARGET_EFAULT
;
4283 struct target_msgbuf
{
4288 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4289 ssize_t msgsz
, int msgflg
)
4291 struct target_msgbuf
*target_mb
;
4292 struct msgbuf
*host_mb
;
4296 return -TARGET_EINVAL
;
4299 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4300 return -TARGET_EFAULT
;
4301 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4303 unlock_user_struct(target_mb
, msgp
, 0);
4304 return -TARGET_ENOMEM
;
4306 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4307 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4308 ret
= -TARGET_ENOSYS
;
4310 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4313 if (ret
== -TARGET_ENOSYS
) {
4315 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4318 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4324 unlock_user_struct(target_mb
, msgp
, 0);
4330 #if defined(__sparc__)
4331 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4332 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4333 #elif defined(__s390x__)
4334 /* The s390 sys_ipc variant has only five parameters. */
4335 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4336 ((long int[]){(long int)__msgp, __msgtyp})
4338 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4339 ((long int[]){(long int)__msgp, __msgtyp}), 0
4343 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4344 ssize_t msgsz
, abi_long msgtyp
,
4347 struct target_msgbuf
*target_mb
;
4349 struct msgbuf
*host_mb
;
4353 return -TARGET_EINVAL
;
4356 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4357 return -TARGET_EFAULT
;
4359 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4361 ret
= -TARGET_ENOMEM
;
4364 ret
= -TARGET_ENOSYS
;
4366 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4369 if (ret
== -TARGET_ENOSYS
) {
4370 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4371 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4376 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4377 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4378 if (!target_mtext
) {
4379 ret
= -TARGET_EFAULT
;
4382 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4383 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4386 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4390 unlock_user_struct(target_mb
, msgp
, 1);
4395 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4396 abi_ulong target_addr
)
4398 struct target_shmid_ds
*target_sd
;
4400 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4401 return -TARGET_EFAULT
;
4402 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4403 return -TARGET_EFAULT
;
4404 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4405 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4406 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4407 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4408 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4409 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4410 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4411 unlock_user_struct(target_sd
, target_addr
, 0);
4415 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4416 struct shmid_ds
*host_sd
)
4418 struct target_shmid_ds
*target_sd
;
4420 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4421 return -TARGET_EFAULT
;
4422 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4423 return -TARGET_EFAULT
;
4424 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4425 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4426 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4427 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4428 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4429 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4430 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4431 unlock_user_struct(target_sd
, target_addr
, 1);
4435 struct target_shminfo
{
4443 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4444 struct shminfo
*host_shminfo
)
4446 struct target_shminfo
*target_shminfo
;
4447 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4448 return -TARGET_EFAULT
;
4449 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4450 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4451 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4452 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4453 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4454 unlock_user_struct(target_shminfo
, target_addr
, 1);
4458 struct target_shm_info
{
4463 abi_ulong swap_attempts
;
4464 abi_ulong swap_successes
;
4467 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4468 struct shm_info
*host_shm_info
)
4470 struct target_shm_info
*target_shm_info
;
4471 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4472 return -TARGET_EFAULT
;
4473 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4474 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4475 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4476 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4477 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4478 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4479 unlock_user_struct(target_shm_info
, target_addr
, 1);
4483 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4485 struct shmid_ds dsarg
;
4486 struct shminfo shminfo
;
4487 struct shm_info shm_info
;
4488 abi_long ret
= -TARGET_EINVAL
;
4496 if (target_to_host_shmid_ds(&dsarg
, buf
))
4497 return -TARGET_EFAULT
;
4498 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4499 if (host_to_target_shmid_ds(buf
, &dsarg
))
4500 return -TARGET_EFAULT
;
4503 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4504 if (host_to_target_shminfo(buf
, &shminfo
))
4505 return -TARGET_EFAULT
;
4508 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4509 if (host_to_target_shm_info(buf
, &shm_info
))
4510 return -TARGET_EFAULT
;
4515 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4522 #ifndef TARGET_FORCE_SHMLBA
4523 /* For most architectures, SHMLBA is the same as the page size;
4524 * some architectures have larger values, in which case they should
4525 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4526 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4527 * and defining its own value for SHMLBA.
4529 * The kernel also permits SHMLBA to be set by the architecture to a
4530 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4531 * this means that addresses are rounded to the large size if
4532 * SHM_RND is set but addresses not aligned to that size are not rejected
4533 * as long as they are at least page-aligned. Since the only architecture
4534 * which uses this is ia64 this code doesn't provide for that oddity.
4536 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4538 return TARGET_PAGE_SIZE
;
4542 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4543 int shmid
, abi_ulong shmaddr
, int shmflg
)
4545 CPUState
*cpu
= env_cpu(cpu_env
);
4548 struct shmid_ds shm_info
;
4552 /* shmat pointers are always untagged */
4554 /* find out the length of the shared memory segment */
4555 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4556 if (is_error(ret
)) {
4557 /* can't get length, bail out */
4561 shmlba
= target_shmlba(cpu_env
);
4563 if (shmaddr
& (shmlba
- 1)) {
4564 if (shmflg
& SHM_RND
) {
4565 shmaddr
&= ~(shmlba
- 1);
4567 return -TARGET_EINVAL
;
4570 if (!guest_range_valid_untagged(shmaddr
, shm_info
.shm_segsz
)) {
4571 return -TARGET_EINVAL
;
4577 * We're mapping shared memory, so ensure we generate code for parallel
4578 * execution and flush old translations. This will work up to the level
4579 * supported by the host -- anything that requires EXCP_ATOMIC will not
4580 * be atomic with respect to an external process.
4582 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
4583 cpu
->tcg_cflags
|= CF_PARALLEL
;
4588 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4590 abi_ulong mmap_start
;
4592 /* In order to use the host shmat, we need to honor host SHMLBA. */
4593 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4595 if (mmap_start
== -1) {
4597 host_raddr
= (void *)-1;
4599 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4600 shmflg
| SHM_REMAP
);
4603 if (host_raddr
== (void *)-1) {
4605 return get_errno((long)host_raddr
);
4607 raddr
=h2g((unsigned long)host_raddr
);
4609 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4610 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4611 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4613 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4614 if (!shm_regions
[i
].in_use
) {
4615 shm_regions
[i
].in_use
= true;
4616 shm_regions
[i
].start
= raddr
;
4617 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4627 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4632 /* shmdt pointers are always untagged */
4636 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4637 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4638 shm_regions
[i
].in_use
= false;
4639 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4643 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4650 #ifdef TARGET_NR_ipc
4651 /* ??? This only works with linear mappings. */
4652 /* do_ipc() must return target values and target errnos. */
4653 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4654 unsigned int call
, abi_long first
,
4655 abi_long second
, abi_long third
,
4656 abi_long ptr
, abi_long fifth
)
4661 version
= call
>> 16;
4666 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4668 case IPCOP_semtimedop
:
4670 * The s390 sys_ipc variant has only five parameters instead of six
4671 * (as for default variant) and the only difference is the handling of
4672 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4673 * to a struct timespec where the generic variant uses fifth parameter.
4675 #if defined(TARGET_S390X)
4676 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4678 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4683 ret
= get_errno(semget(first
, second
, third
));
4686 case IPCOP_semctl
: {
4687 /* The semun argument to semctl is passed by value, so dereference the
4690 get_user_ual(atptr
, ptr
);
4691 ret
= do_semctl(first
, second
, third
, atptr
);
4696 ret
= get_errno(msgget(first
, second
));
4700 ret
= do_msgsnd(first
, ptr
, second
, third
);
4704 ret
= do_msgctl(first
, second
, ptr
);
4711 struct target_ipc_kludge
{
4716 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4717 ret
= -TARGET_EFAULT
;
4721 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4723 unlock_user_struct(tmp
, ptr
, 0);
4727 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4736 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4737 if (is_error(raddr
))
4738 return get_errno(raddr
);
4739 if (put_user_ual(raddr
, third
))
4740 return -TARGET_EFAULT
;
4744 ret
= -TARGET_EINVAL
;
4749 ret
= do_shmdt(ptr
);
4753 /* IPC_* flag values are the same on all linux platforms */
4754 ret
= get_errno(shmget(first
, second
, third
));
4757 /* IPC_* and SHM_* command values are the same on all linux platforms */
4759 ret
= do_shmctl(first
, second
, ptr
);
4762 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4764 ret
= -TARGET_ENOSYS
;
4771 /* kernel structure types definitions */
4773 #define STRUCT(name, ...) STRUCT_ ## name,
4774 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4776 #include "syscall_types.h"
4780 #undef STRUCT_SPECIAL
4782 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4783 #define STRUCT_SPECIAL(name)
4784 #include "syscall_types.h"
4786 #undef STRUCT_SPECIAL
4788 #define MAX_STRUCT_SIZE 4096
4790 #ifdef CONFIG_FIEMAP
4791 /* So fiemap access checks don't overflow on 32 bit systems.
4792 * This is very slightly smaller than the limit imposed by
4793 * the underlying kernel.
4795 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4796 / sizeof(struct fiemap_extent))
4798 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4799 int fd
, int cmd
, abi_long arg
)
4801 /* The parameter for this ioctl is a struct fiemap followed
4802 * by an array of struct fiemap_extent whose size is set
4803 * in fiemap->fm_extent_count. The array is filled in by the
4806 int target_size_in
, target_size_out
;
4808 const argtype
*arg_type
= ie
->arg_type
;
4809 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4812 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4816 assert(arg_type
[0] == TYPE_PTR
);
4817 assert(ie
->access
== IOC_RW
);
4819 target_size_in
= thunk_type_size(arg_type
, 0);
4820 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4822 return -TARGET_EFAULT
;
4824 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4825 unlock_user(argptr
, arg
, 0);
4826 fm
= (struct fiemap
*)buf_temp
;
4827 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4828 return -TARGET_EINVAL
;
4831 outbufsz
= sizeof (*fm
) +
4832 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4834 if (outbufsz
> MAX_STRUCT_SIZE
) {
4835 /* We can't fit all the extents into the fixed size buffer.
4836 * Allocate one that is large enough and use it instead.
4838 fm
= g_try_malloc(outbufsz
);
4840 return -TARGET_ENOMEM
;
4842 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4845 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4846 if (!is_error(ret
)) {
4847 target_size_out
= target_size_in
;
4848 /* An extent_count of 0 means we were only counting the extents
4849 * so there are no structs to copy
4851 if (fm
->fm_extent_count
!= 0) {
4852 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4854 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4856 ret
= -TARGET_EFAULT
;
4858 /* Convert the struct fiemap */
4859 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4860 if (fm
->fm_extent_count
!= 0) {
4861 p
= argptr
+ target_size_in
;
4862 /* ...and then all the struct fiemap_extents */
4863 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4864 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4869 unlock_user(argptr
, arg
, target_size_out
);
4879 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4880 int fd
, int cmd
, abi_long arg
)
4882 const argtype
*arg_type
= ie
->arg_type
;
4886 struct ifconf
*host_ifconf
;
4888 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4889 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4890 int target_ifreq_size
;
4895 abi_long target_ifc_buf
;
4899 assert(arg_type
[0] == TYPE_PTR
);
4900 assert(ie
->access
== IOC_RW
);
4903 target_size
= thunk_type_size(arg_type
, 0);
4905 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4907 return -TARGET_EFAULT
;
4908 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4909 unlock_user(argptr
, arg
, 0);
4911 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4912 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4913 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4915 if (target_ifc_buf
!= 0) {
4916 target_ifc_len
= host_ifconf
->ifc_len
;
4917 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4918 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4920 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4921 if (outbufsz
> MAX_STRUCT_SIZE
) {
4923 * We can't fit all the extents into the fixed size buffer.
4924 * Allocate one that is large enough and use it instead.
4926 host_ifconf
= g_try_malloc(outbufsz
);
4928 return -TARGET_ENOMEM
;
4930 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4933 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4935 host_ifconf
->ifc_len
= host_ifc_len
;
4937 host_ifc_buf
= NULL
;
4939 host_ifconf
->ifc_buf
= host_ifc_buf
;
4941 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4942 if (!is_error(ret
)) {
4943 /* convert host ifc_len to target ifc_len */
4945 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4946 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4947 host_ifconf
->ifc_len
= target_ifc_len
;
4949 /* restore target ifc_buf */
4951 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4953 /* copy struct ifconf to target user */
4955 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4957 return -TARGET_EFAULT
;
4958 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4959 unlock_user(argptr
, arg
, target_size
);
4961 if (target_ifc_buf
!= 0) {
4962 /* copy ifreq[] to target user */
4963 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4964 for (i
= 0; i
< nb_ifreq
; i
++) {
4965 thunk_convert(argptr
+ i
* target_ifreq_size
,
4966 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4967 ifreq_arg_type
, THUNK_TARGET
);
4969 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4974 g_free(host_ifconf
);
4980 #if defined(CONFIG_USBFS)
4981 #if HOST_LONG_BITS > 64
4982 #error USBDEVFS thunks do not support >64 bit hosts yet.
4985 uint64_t target_urb_adr
;
4986 uint64_t target_buf_adr
;
4987 char *target_buf_ptr
;
4988 struct usbdevfs_urb host_urb
;
4991 static GHashTable
*usbdevfs_urb_hashtable(void)
4993 static GHashTable
*urb_hashtable
;
4995 if (!urb_hashtable
) {
4996 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4998 return urb_hashtable
;
5001 static void urb_hashtable_insert(struct live_urb
*urb
)
5003 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5004 g_hash_table_insert(urb_hashtable
, urb
, urb
);
5007 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
5009 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5010 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
5013 static void urb_hashtable_remove(struct live_urb
*urb
)
5015 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5016 g_hash_table_remove(urb_hashtable
, urb
);
5020 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5021 int fd
, int cmd
, abi_long arg
)
5023 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
5024 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
5025 struct live_urb
*lurb
;
5029 uintptr_t target_urb_adr
;
5032 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
5034 memset(buf_temp
, 0, sizeof(uint64_t));
5035 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5036 if (is_error(ret
)) {
5040 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
5041 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
5042 if (!lurb
->target_urb_adr
) {
5043 return -TARGET_EFAULT
;
5045 urb_hashtable_remove(lurb
);
5046 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
5047 lurb
->host_urb
.buffer_length
);
5048 lurb
->target_buf_ptr
= NULL
;
5050 /* restore the guest buffer pointer */
5051 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
5053 /* update the guest urb struct */
5054 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5057 return -TARGET_EFAULT
;
5059 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5060 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5062 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5063 /* write back the urb handle */
5064 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5067 return -TARGET_EFAULT
;
5070 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5071 target_urb_adr
= lurb
->target_urb_adr
;
5072 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5073 unlock_user(argptr
, arg
, target_size
);
5080 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5081 uint8_t *buf_temp
__attribute__((unused
)),
5082 int fd
, int cmd
, abi_long arg
)
5084 struct live_urb
*lurb
;
5086 /* map target address back to host URB with metadata. */
5087 lurb
= urb_hashtable_lookup(arg
);
5089 return -TARGET_EFAULT
;
5091 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5095 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5096 int fd
, int cmd
, abi_long arg
)
5098 const argtype
*arg_type
= ie
->arg_type
;
5103 struct live_urb
*lurb
;
5106 * each submitted URB needs to map to a unique ID for the
5107 * kernel, and that unique ID needs to be a pointer to
5108 * host memory. hence, we need to malloc for each URB.
5109 * isochronous transfers have a variable length struct.
5112 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5114 /* construct host copy of urb and metadata */
5115 lurb
= g_try_new0(struct live_urb
, 1);
5117 return -TARGET_ENOMEM
;
5120 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5123 return -TARGET_EFAULT
;
5125 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5126 unlock_user(argptr
, arg
, 0);
5128 lurb
->target_urb_adr
= arg
;
5129 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5131 /* buffer space used depends on endpoint type so lock the entire buffer */
5132 /* control type urbs should check the buffer contents for true direction */
5133 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5134 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5135 lurb
->host_urb
.buffer_length
, 1);
5136 if (lurb
->target_buf_ptr
== NULL
) {
5138 return -TARGET_EFAULT
;
5141 /* update buffer pointer in host copy */
5142 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5144 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5145 if (is_error(ret
)) {
5146 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5149 urb_hashtable_insert(lurb
);
5154 #endif /* CONFIG_USBFS */
5156 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5157 int cmd
, abi_long arg
)
5160 struct dm_ioctl
*host_dm
;
5161 abi_long guest_data
;
5162 uint32_t guest_data_size
;
5164 const argtype
*arg_type
= ie
->arg_type
;
5166 void *big_buf
= NULL
;
5170 target_size
= thunk_type_size(arg_type
, 0);
5171 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5173 ret
= -TARGET_EFAULT
;
5176 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5177 unlock_user(argptr
, arg
, 0);
5179 /* buf_temp is too small, so fetch things into a bigger buffer */
5180 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5181 memcpy(big_buf
, buf_temp
, target_size
);
5185 guest_data
= arg
+ host_dm
->data_start
;
5186 if ((guest_data
- arg
) < 0) {
5187 ret
= -TARGET_EINVAL
;
5190 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5191 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5193 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5195 ret
= -TARGET_EFAULT
;
5199 switch (ie
->host_cmd
) {
5201 case DM_LIST_DEVICES
:
5204 case DM_DEV_SUSPEND
:
5207 case DM_TABLE_STATUS
:
5208 case DM_TABLE_CLEAR
:
5210 case DM_LIST_VERSIONS
:
5214 case DM_DEV_SET_GEOMETRY
:
5215 /* data contains only strings */
5216 memcpy(host_data
, argptr
, guest_data_size
);
5219 memcpy(host_data
, argptr
, guest_data_size
);
5220 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5224 void *gspec
= argptr
;
5225 void *cur_data
= host_data
;
5226 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5227 int spec_size
= thunk_type_size(arg_type
, 0);
5230 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5231 struct dm_target_spec
*spec
= cur_data
;
5235 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5236 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5238 spec
->next
= sizeof(*spec
) + slen
;
5239 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5241 cur_data
+= spec
->next
;
5246 ret
= -TARGET_EINVAL
;
5247 unlock_user(argptr
, guest_data
, 0);
5250 unlock_user(argptr
, guest_data
, 0);
5252 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5253 if (!is_error(ret
)) {
5254 guest_data
= arg
+ host_dm
->data_start
;
5255 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5256 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5257 switch (ie
->host_cmd
) {
5262 case DM_DEV_SUSPEND
:
5265 case DM_TABLE_CLEAR
:
5267 case DM_DEV_SET_GEOMETRY
:
5268 /* no return data */
5270 case DM_LIST_DEVICES
:
5272 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5273 uint32_t remaining_data
= guest_data_size
;
5274 void *cur_data
= argptr
;
5275 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5276 int nl_size
= 12; /* can't use thunk_size due to alignment */
5279 uint32_t next
= nl
->next
;
5281 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5283 if (remaining_data
< nl
->next
) {
5284 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5287 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5288 strcpy(cur_data
+ nl_size
, nl
->name
);
5289 cur_data
+= nl
->next
;
5290 remaining_data
-= nl
->next
;
5294 nl
= (void*)nl
+ next
;
5299 case DM_TABLE_STATUS
:
5301 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5302 void *cur_data
= argptr
;
5303 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5304 int spec_size
= thunk_type_size(arg_type
, 0);
5307 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5308 uint32_t next
= spec
->next
;
5309 int slen
= strlen((char*)&spec
[1]) + 1;
5310 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5311 if (guest_data_size
< spec
->next
) {
5312 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5315 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5316 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5317 cur_data
= argptr
+ spec
->next
;
5318 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5324 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5325 int count
= *(uint32_t*)hdata
;
5326 uint64_t *hdev
= hdata
+ 8;
5327 uint64_t *gdev
= argptr
+ 8;
5330 *(uint32_t*)argptr
= tswap32(count
);
5331 for (i
= 0; i
< count
; i
++) {
5332 *gdev
= tswap64(*hdev
);
5338 case DM_LIST_VERSIONS
:
5340 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5341 uint32_t remaining_data
= guest_data_size
;
5342 void *cur_data
= argptr
;
5343 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5344 int vers_size
= thunk_type_size(arg_type
, 0);
5347 uint32_t next
= vers
->next
;
5349 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5351 if (remaining_data
< vers
->next
) {
5352 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5355 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5356 strcpy(cur_data
+ vers_size
, vers
->name
);
5357 cur_data
+= vers
->next
;
5358 remaining_data
-= vers
->next
;
5362 vers
= (void*)vers
+ next
;
5367 unlock_user(argptr
, guest_data
, 0);
5368 ret
= -TARGET_EINVAL
;
5371 unlock_user(argptr
, guest_data
, guest_data_size
);
5373 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5375 ret
= -TARGET_EFAULT
;
5378 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5379 unlock_user(argptr
, arg
, target_size
);
5386 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5387 int cmd
, abi_long arg
)
5391 const argtype
*arg_type
= ie
->arg_type
;
5392 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5395 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5396 struct blkpg_partition host_part
;
5398 /* Read and convert blkpg */
5400 target_size
= thunk_type_size(arg_type
, 0);
5401 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5403 ret
= -TARGET_EFAULT
;
5406 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5407 unlock_user(argptr
, arg
, 0);
5409 switch (host_blkpg
->op
) {
5410 case BLKPG_ADD_PARTITION
:
5411 case BLKPG_DEL_PARTITION
:
5412 /* payload is struct blkpg_partition */
5415 /* Unknown opcode */
5416 ret
= -TARGET_EINVAL
;
5420 /* Read and convert blkpg->data */
5421 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5422 target_size
= thunk_type_size(part_arg_type
, 0);
5423 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5425 ret
= -TARGET_EFAULT
;
5428 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5429 unlock_user(argptr
, arg
, 0);
5431 /* Swizzle the data pointer to our local copy and call! */
5432 host_blkpg
->data
= &host_part
;
5433 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5439 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5440 int fd
, int cmd
, abi_long arg
)
5442 const argtype
*arg_type
= ie
->arg_type
;
5443 const StructEntry
*se
;
5444 const argtype
*field_types
;
5445 const int *dst_offsets
, *src_offsets
;
5448 abi_ulong
*target_rt_dev_ptr
= NULL
;
5449 unsigned long *host_rt_dev_ptr
= NULL
;
5453 assert(ie
->access
== IOC_W
);
5454 assert(*arg_type
== TYPE_PTR
);
5456 assert(*arg_type
== TYPE_STRUCT
);
5457 target_size
= thunk_type_size(arg_type
, 0);
5458 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5460 return -TARGET_EFAULT
;
5463 assert(*arg_type
== (int)STRUCT_rtentry
);
5464 se
= struct_entries
+ *arg_type
++;
5465 assert(se
->convert
[0] == NULL
);
5466 /* convert struct here to be able to catch rt_dev string */
5467 field_types
= se
->field_types
;
5468 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5469 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5470 for (i
= 0; i
< se
->nb_fields
; i
++) {
5471 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5472 assert(*field_types
== TYPE_PTRVOID
);
5473 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5474 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5475 if (*target_rt_dev_ptr
!= 0) {
5476 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5477 tswapal(*target_rt_dev_ptr
));
5478 if (!*host_rt_dev_ptr
) {
5479 unlock_user(argptr
, arg
, 0);
5480 return -TARGET_EFAULT
;
5483 *host_rt_dev_ptr
= 0;
5488 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5489 argptr
+ src_offsets
[i
],
5490 field_types
, THUNK_HOST
);
5492 unlock_user(argptr
, arg
, 0);
5494 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5496 assert(host_rt_dev_ptr
!= NULL
);
5497 assert(target_rt_dev_ptr
!= NULL
);
5498 if (*host_rt_dev_ptr
!= 0) {
5499 unlock_user((void *)*host_rt_dev_ptr
,
5500 *target_rt_dev_ptr
, 0);
5505 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5506 int fd
, int cmd
, abi_long arg
)
5508 int sig
= target_to_host_signal(arg
);
5509 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5512 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5513 int fd
, int cmd
, abi_long arg
)
5518 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5519 if (is_error(ret
)) {
5523 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5524 if (copy_to_user_timeval(arg
, &tv
)) {
5525 return -TARGET_EFAULT
;
5528 if (copy_to_user_timeval64(arg
, &tv
)) {
5529 return -TARGET_EFAULT
;
5536 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5537 int fd
, int cmd
, abi_long arg
)
5542 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5543 if (is_error(ret
)) {
5547 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5548 if (host_to_target_timespec(arg
, &ts
)) {
5549 return -TARGET_EFAULT
;
5552 if (host_to_target_timespec64(arg
, &ts
)) {
5553 return -TARGET_EFAULT
;
5561 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5562 int fd
, int cmd
, abi_long arg
)
5564 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5565 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5571 static void unlock_drm_version(struct drm_version
*host_ver
,
5572 struct target_drm_version
*target_ver
,
5575 unlock_user(host_ver
->name
, target_ver
->name
,
5576 copy
? host_ver
->name_len
: 0);
5577 unlock_user(host_ver
->date
, target_ver
->date
,
5578 copy
? host_ver
->date_len
: 0);
5579 unlock_user(host_ver
->desc
, target_ver
->desc
,
5580 copy
? host_ver
->desc_len
: 0);
5583 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5584 struct target_drm_version
*target_ver
)
5586 memset(host_ver
, 0, sizeof(*host_ver
));
5588 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5589 if (host_ver
->name_len
) {
5590 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5591 target_ver
->name_len
, 0);
5592 if (!host_ver
->name
) {
5597 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5598 if (host_ver
->date_len
) {
5599 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5600 target_ver
->date_len
, 0);
5601 if (!host_ver
->date
) {
5606 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5607 if (host_ver
->desc_len
) {
5608 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5609 target_ver
->desc_len
, 0);
5610 if (!host_ver
->desc
) {
5617 unlock_drm_version(host_ver
, target_ver
, false);
5621 static inline void host_to_target_drmversion(
5622 struct target_drm_version
*target_ver
,
5623 struct drm_version
*host_ver
)
5625 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5626 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5627 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5628 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5629 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5630 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5631 unlock_drm_version(host_ver
, target_ver
, true);
5634 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5635 int fd
, int cmd
, abi_long arg
)
5637 struct drm_version
*ver
;
5638 struct target_drm_version
*target_ver
;
5641 switch (ie
->host_cmd
) {
5642 case DRM_IOCTL_VERSION
:
5643 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5644 return -TARGET_EFAULT
;
5646 ver
= (struct drm_version
*)buf_temp
;
5647 ret
= target_to_host_drmversion(ver
, target_ver
);
5648 if (!is_error(ret
)) {
5649 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5650 if (is_error(ret
)) {
5651 unlock_drm_version(ver
, target_ver
, false);
5653 host_to_target_drmversion(target_ver
, ver
);
5656 unlock_user_struct(target_ver
, arg
, 0);
5659 return -TARGET_ENOSYS
;
5662 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5663 struct drm_i915_getparam
*gparam
,
5664 int fd
, abi_long arg
)
5668 struct target_drm_i915_getparam
*target_gparam
;
5670 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5671 return -TARGET_EFAULT
;
5674 __get_user(gparam
->param
, &target_gparam
->param
);
5675 gparam
->value
= &value
;
5676 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5677 put_user_s32(value
, target_gparam
->value
);
5679 unlock_user_struct(target_gparam
, arg
, 0);
5683 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5684 int fd
, int cmd
, abi_long arg
)
5686 switch (ie
->host_cmd
) {
5687 case DRM_IOCTL_I915_GETPARAM
:
5688 return do_ioctl_drm_i915_getparam(ie
,
5689 (struct drm_i915_getparam
*)buf_temp
,
5692 return -TARGET_ENOSYS
;
5698 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5699 int fd
, int cmd
, abi_long arg
)
5701 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5702 struct tun_filter
*target_filter
;
5705 assert(ie
->access
== IOC_W
);
5707 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5708 if (!target_filter
) {
5709 return -TARGET_EFAULT
;
5711 filter
->flags
= tswap16(target_filter
->flags
);
5712 filter
->count
= tswap16(target_filter
->count
);
5713 unlock_user(target_filter
, arg
, 0);
5715 if (filter
->count
) {
5716 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5718 return -TARGET_EFAULT
;
5721 target_addr
= lock_user(VERIFY_READ
,
5722 arg
+ offsetof(struct tun_filter
, addr
),
5723 filter
->count
* ETH_ALEN
, 1);
5725 return -TARGET_EFAULT
;
5727 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5728 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5731 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5734 IOCTLEntry ioctl_entries
[] = {
5735 #define IOCTL(cmd, access, ...) \
5736 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5737 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5738 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5739 #define IOCTL_IGNORE(cmd) \
5740 { TARGET_ ## cmd, 0, #cmd },
5745 /* ??? Implement proper locking for ioctls. */
5746 /* do_ioctl() Must return target values and target errnos. */
5747 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5749 const IOCTLEntry
*ie
;
5750 const argtype
*arg_type
;
5752 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5758 if (ie
->target_cmd
== 0) {
5760 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5761 return -TARGET_ENOSYS
;
5763 if (ie
->target_cmd
== cmd
)
5767 arg_type
= ie
->arg_type
;
5769 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5770 } else if (!ie
->host_cmd
) {
5771 /* Some architectures define BSD ioctls in their headers
5772 that are not implemented in Linux. */
5773 return -TARGET_ENOSYS
;
5776 switch(arg_type
[0]) {
5779 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5785 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5789 target_size
= thunk_type_size(arg_type
, 0);
5790 switch(ie
->access
) {
5792 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5793 if (!is_error(ret
)) {
5794 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5796 return -TARGET_EFAULT
;
5797 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5798 unlock_user(argptr
, arg
, target_size
);
5802 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5804 return -TARGET_EFAULT
;
5805 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5806 unlock_user(argptr
, arg
, 0);
5807 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5811 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5813 return -TARGET_EFAULT
;
5814 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5815 unlock_user(argptr
, arg
, 0);
5816 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5817 if (!is_error(ret
)) {
5818 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5820 return -TARGET_EFAULT
;
5821 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5822 unlock_user(argptr
, arg
, target_size
);
5828 qemu_log_mask(LOG_UNIMP
,
5829 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5830 (long)cmd
, arg_type
[0]);
5831 ret
= -TARGET_ENOSYS
;
5837 static const bitmask_transtbl iflag_tbl
[] = {
5838 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5839 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5840 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5841 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5842 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5843 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5844 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5845 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5846 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5847 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5848 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5849 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5850 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5851 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5852 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5856 static const bitmask_transtbl oflag_tbl
[] = {
5857 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5858 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5859 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5860 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5861 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5862 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5863 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5864 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5865 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5866 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5867 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5868 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5869 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5870 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5871 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5872 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5873 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5874 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5875 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5876 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5877 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5878 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5879 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5880 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5884 static const bitmask_transtbl cflag_tbl
[] = {
5885 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5886 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5887 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5888 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5889 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5890 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5891 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5892 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5893 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5894 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5895 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5896 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5897 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5898 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5899 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5900 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5901 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5902 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5903 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5904 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5905 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5906 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5907 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5908 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5909 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5910 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5911 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5912 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5913 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5914 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5915 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5919 static const bitmask_transtbl lflag_tbl
[] = {
5920 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5921 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5922 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5923 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5924 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5925 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5926 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5927 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5928 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5929 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5930 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5931 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5932 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5933 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5934 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5935 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5939 static void target_to_host_termios (void *dst
, const void *src
)
5941 struct host_termios
*host
= dst
;
5942 const struct target_termios
*target
= src
;
5945 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5947 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5949 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5951 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5952 host
->c_line
= target
->c_line
;
5954 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5955 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5956 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5957 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5958 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5959 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5960 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5961 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5962 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5963 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5964 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5965 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5966 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5967 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5968 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5969 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5970 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5971 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5974 static void host_to_target_termios (void *dst
, const void *src
)
5976 struct target_termios
*target
= dst
;
5977 const struct host_termios
*host
= src
;
5980 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5982 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5984 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5986 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5987 target
->c_line
= host
->c_line
;
5989 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5990 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5991 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5992 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5993 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5994 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5995 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5996 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5997 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5998 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5999 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
6000 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
6001 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
6002 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
6003 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
6004 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
6005 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
6006 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
6009 static const StructEntry struct_termios_def
= {
6010 .convert
= { host_to_target_termios
, target_to_host_termios
},
6011 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
6012 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
6013 .print
= print_termios
,
6016 static const bitmask_transtbl mmap_flags_tbl
[] = {
6017 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
6018 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
6019 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
6020 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
6021 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
6022 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
6023 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
6024 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
6025 MAP_DENYWRITE
, MAP_DENYWRITE
},
6026 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
6027 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
6028 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
6029 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
6030 MAP_NORESERVE
, MAP_NORESERVE
},
6031 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
6032 /* MAP_STACK had been ignored by the kernel for quite some time.
6033 Recognize it for the target insofar as we do not want to pass
6034 it through to the host. */
6035 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6040 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6041 * TARGET_I386 is defined if TARGET_X86_64 is defined
6043 #if defined(TARGET_I386)
6045 /* NOTE: there is really one LDT for all the threads */
6046 static uint8_t *ldt_table
;
6048 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6055 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6056 if (size
> bytecount
)
6058 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6060 return -TARGET_EFAULT
;
6061 /* ??? Should this by byteswapped? */
6062 memcpy(p
, ldt_table
, size
);
6063 unlock_user(p
, ptr
, size
);
6067 /* XXX: add locking support */
6068 static abi_long
write_ldt(CPUX86State
*env
,
6069 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6071 struct target_modify_ldt_ldt_s ldt_info
;
6072 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6073 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6074 int seg_not_present
, useable
, lm
;
6075 uint32_t *lp
, entry_1
, entry_2
;
6077 if (bytecount
!= sizeof(ldt_info
))
6078 return -TARGET_EINVAL
;
6079 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6080 return -TARGET_EFAULT
;
6081 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6082 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6083 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6084 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6085 unlock_user_struct(target_ldt_info
, ptr
, 0);
6087 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6088 return -TARGET_EINVAL
;
6089 seg_32bit
= ldt_info
.flags
& 1;
6090 contents
= (ldt_info
.flags
>> 1) & 3;
6091 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6092 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6093 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6094 useable
= (ldt_info
.flags
>> 6) & 1;
6098 lm
= (ldt_info
.flags
>> 7) & 1;
6100 if (contents
== 3) {
6102 return -TARGET_EINVAL
;
6103 if (seg_not_present
== 0)
6104 return -TARGET_EINVAL
;
6106 /* allocate the LDT */
6108 env
->ldt
.base
= target_mmap(0,
6109 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6110 PROT_READ
|PROT_WRITE
,
6111 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6112 if (env
->ldt
.base
== -1)
6113 return -TARGET_ENOMEM
;
6114 memset(g2h_untagged(env
->ldt
.base
), 0,
6115 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6116 env
->ldt
.limit
= 0xffff;
6117 ldt_table
= g2h_untagged(env
->ldt
.base
);
6120 /* NOTE: same code as Linux kernel */
6121 /* Allow LDTs to be cleared by the user. */
6122 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6125 read_exec_only
== 1 &&
6127 limit_in_pages
== 0 &&
6128 seg_not_present
== 1 &&
6136 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6137 (ldt_info
.limit
& 0x0ffff);
6138 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6139 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6140 (ldt_info
.limit
& 0xf0000) |
6141 ((read_exec_only
^ 1) << 9) |
6143 ((seg_not_present
^ 1) << 15) |
6145 (limit_in_pages
<< 23) |
6149 entry_2
|= (useable
<< 20);
6151 /* Install the new entry ... */
6153 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6154 lp
[0] = tswap32(entry_1
);
6155 lp
[1] = tswap32(entry_2
);
6159 /* specific and weird i386 syscalls */
6160 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6161 unsigned long bytecount
)
6167 ret
= read_ldt(ptr
, bytecount
);
6170 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6173 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6176 ret
= -TARGET_ENOSYS
;
6182 #if defined(TARGET_ABI32)
6183 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6185 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6186 struct target_modify_ldt_ldt_s ldt_info
;
6187 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6188 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6189 int seg_not_present
, useable
, lm
;
6190 uint32_t *lp
, entry_1
, entry_2
;
6193 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6194 if (!target_ldt_info
)
6195 return -TARGET_EFAULT
;
6196 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6197 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6198 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6199 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6200 if (ldt_info
.entry_number
== -1) {
6201 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6202 if (gdt_table
[i
] == 0) {
6203 ldt_info
.entry_number
= i
;
6204 target_ldt_info
->entry_number
= tswap32(i
);
6209 unlock_user_struct(target_ldt_info
, ptr
, 1);
6211 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6212 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6213 return -TARGET_EINVAL
;
6214 seg_32bit
= ldt_info
.flags
& 1;
6215 contents
= (ldt_info
.flags
>> 1) & 3;
6216 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6217 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6218 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6219 useable
= (ldt_info
.flags
>> 6) & 1;
6223 lm
= (ldt_info
.flags
>> 7) & 1;
6226 if (contents
== 3) {
6227 if (seg_not_present
== 0)
6228 return -TARGET_EINVAL
;
6231 /* NOTE: same code as Linux kernel */
6232 /* Allow LDTs to be cleared by the user. */
6233 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6234 if ((contents
== 0 &&
6235 read_exec_only
== 1 &&
6237 limit_in_pages
== 0 &&
6238 seg_not_present
== 1 &&
6246 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6247 (ldt_info
.limit
& 0x0ffff);
6248 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6249 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6250 (ldt_info
.limit
& 0xf0000) |
6251 ((read_exec_only
^ 1) << 9) |
6253 ((seg_not_present
^ 1) << 15) |
6255 (limit_in_pages
<< 23) |
6260 /* Install the new entry ... */
6262 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6263 lp
[0] = tswap32(entry_1
);
6264 lp
[1] = tswap32(entry_2
);
6268 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6270 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6271 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6272 uint32_t base_addr
, limit
, flags
;
6273 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6274 int seg_not_present
, useable
, lm
;
6275 uint32_t *lp
, entry_1
, entry_2
;
6277 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6278 if (!target_ldt_info
)
6279 return -TARGET_EFAULT
;
6280 idx
= tswap32(target_ldt_info
->entry_number
);
6281 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6282 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6283 unlock_user_struct(target_ldt_info
, ptr
, 1);
6284 return -TARGET_EINVAL
;
6286 lp
= (uint32_t *)(gdt_table
+ idx
);
6287 entry_1
= tswap32(lp
[0]);
6288 entry_2
= tswap32(lp
[1]);
6290 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6291 contents
= (entry_2
>> 10) & 3;
6292 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6293 seg_32bit
= (entry_2
>> 22) & 1;
6294 limit_in_pages
= (entry_2
>> 23) & 1;
6295 useable
= (entry_2
>> 20) & 1;
6299 lm
= (entry_2
>> 21) & 1;
6301 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6302 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6303 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6304 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6305 base_addr
= (entry_1
>> 16) |
6306 (entry_2
& 0xff000000) |
6307 ((entry_2
& 0xff) << 16);
6308 target_ldt_info
->base_addr
= tswapal(base_addr
);
6309 target_ldt_info
->limit
= tswap32(limit
);
6310 target_ldt_info
->flags
= tswap32(flags
);
6311 unlock_user_struct(target_ldt_info
, ptr
, 1);
6315 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6317 return -TARGET_ENOSYS
;
6320 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6327 case TARGET_ARCH_SET_GS
:
6328 case TARGET_ARCH_SET_FS
:
6329 if (code
== TARGET_ARCH_SET_GS
)
6333 cpu_x86_load_seg(env
, idx
, 0);
6334 env
->segs
[idx
].base
= addr
;
6336 case TARGET_ARCH_GET_GS
:
6337 case TARGET_ARCH_GET_FS
:
6338 if (code
== TARGET_ARCH_GET_GS
)
6342 val
= env
->segs
[idx
].base
;
6343 if (put_user(val
, addr
, abi_ulong
))
6344 ret
= -TARGET_EFAULT
;
6347 ret
= -TARGET_EINVAL
;
6352 #endif /* defined(TARGET_ABI32 */
6353 #endif /* defined(TARGET_I386) */
6356 * These constants are generic. Supply any that are missing from the host.
6359 # define PR_SET_NAME 15
6360 # define PR_GET_NAME 16
6362 #ifndef PR_SET_FP_MODE
6363 # define PR_SET_FP_MODE 45
6364 # define PR_GET_FP_MODE 46
6365 # define PR_FP_MODE_FR (1 << 0)
6366 # define PR_FP_MODE_FRE (1 << 1)
6368 #ifndef PR_SVE_SET_VL
6369 # define PR_SVE_SET_VL 50
6370 # define PR_SVE_GET_VL 51
6371 # define PR_SVE_VL_LEN_MASK 0xffff
6372 # define PR_SVE_VL_INHERIT (1 << 17)
6374 #ifndef PR_PAC_RESET_KEYS
6375 # define PR_PAC_RESET_KEYS 54
6376 # define PR_PAC_APIAKEY (1 << 0)
6377 # define PR_PAC_APIBKEY (1 << 1)
6378 # define PR_PAC_APDAKEY (1 << 2)
6379 # define PR_PAC_APDBKEY (1 << 3)
6380 # define PR_PAC_APGAKEY (1 << 4)
6382 #ifndef PR_SET_TAGGED_ADDR_CTRL
6383 # define PR_SET_TAGGED_ADDR_CTRL 55
6384 # define PR_GET_TAGGED_ADDR_CTRL 56
6385 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6387 #ifndef PR_MTE_TCF_SHIFT
6388 # define PR_MTE_TCF_SHIFT 1
6389 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6390 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6391 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6392 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6393 # define PR_MTE_TAG_SHIFT 3
6394 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6396 #ifndef PR_SET_IO_FLUSHER
6397 # define PR_SET_IO_FLUSHER 57
6398 # define PR_GET_IO_FLUSHER 58
6400 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6401 # define PR_SET_SYSCALL_USER_DISPATCH 59
6403 #ifndef PR_SME_SET_VL
6404 # define PR_SME_SET_VL 63
6405 # define PR_SME_GET_VL 64
6406 # define PR_SME_VL_LEN_MASK 0xffff
6407 # define PR_SME_VL_INHERIT (1 << 17)
6410 #include "target_prctl.h"
6412 static abi_long
do_prctl_inval0(CPUArchState
*env
)
6414 return -TARGET_EINVAL
;
6417 static abi_long
do_prctl_inval1(CPUArchState
*env
, abi_long arg2
)
6419 return -TARGET_EINVAL
;
6422 #ifndef do_prctl_get_fp_mode
6423 #define do_prctl_get_fp_mode do_prctl_inval0
6425 #ifndef do_prctl_set_fp_mode
6426 #define do_prctl_set_fp_mode do_prctl_inval1
6428 #ifndef do_prctl_sve_get_vl
6429 #define do_prctl_sve_get_vl do_prctl_inval0
6431 #ifndef do_prctl_sve_set_vl
6432 #define do_prctl_sve_set_vl do_prctl_inval1
6434 #ifndef do_prctl_reset_keys
6435 #define do_prctl_reset_keys do_prctl_inval1
6437 #ifndef do_prctl_set_tagged_addr_ctrl
6438 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6440 #ifndef do_prctl_get_tagged_addr_ctrl
6441 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6443 #ifndef do_prctl_get_unalign
6444 #define do_prctl_get_unalign do_prctl_inval1
6446 #ifndef do_prctl_set_unalign
6447 #define do_prctl_set_unalign do_prctl_inval1
6449 #ifndef do_prctl_sme_get_vl
6450 #define do_prctl_sme_get_vl do_prctl_inval0
6452 #ifndef do_prctl_sme_set_vl
6453 #define do_prctl_sme_set_vl do_prctl_inval1
6456 static abi_long
do_prctl(CPUArchState
*env
, abi_long option
, abi_long arg2
,
6457 abi_long arg3
, abi_long arg4
, abi_long arg5
)
6462 case PR_GET_PDEATHSIG
:
6465 ret
= get_errno(prctl(PR_GET_PDEATHSIG
, &deathsig
,
6467 if (!is_error(ret
) &&
6468 put_user_s32(host_to_target_signal(deathsig
), arg2
)) {
6469 return -TARGET_EFAULT
;
6473 case PR_SET_PDEATHSIG
:
6474 return get_errno(prctl(PR_SET_PDEATHSIG
, target_to_host_signal(arg2
),
6478 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
6480 return -TARGET_EFAULT
;
6482 ret
= get_errno(prctl(PR_GET_NAME
, (uintptr_t)name
,
6484 unlock_user(name
, arg2
, 16);
6489 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
6491 return -TARGET_EFAULT
;
6493 ret
= get_errno(prctl(PR_SET_NAME
, (uintptr_t)name
,
6495 unlock_user(name
, arg2
, 0);
6498 case PR_GET_FP_MODE
:
6499 return do_prctl_get_fp_mode(env
);
6500 case PR_SET_FP_MODE
:
6501 return do_prctl_set_fp_mode(env
, arg2
);
6503 return do_prctl_sve_get_vl(env
);
6505 return do_prctl_sve_set_vl(env
, arg2
);
6507 return do_prctl_sme_get_vl(env
);
6509 return do_prctl_sme_set_vl(env
, arg2
);
6510 case PR_PAC_RESET_KEYS
:
6511 if (arg3
|| arg4
|| arg5
) {
6512 return -TARGET_EINVAL
;
6514 return do_prctl_reset_keys(env
, arg2
);
6515 case PR_SET_TAGGED_ADDR_CTRL
:
6516 if (arg3
|| arg4
|| arg5
) {
6517 return -TARGET_EINVAL
;
6519 return do_prctl_set_tagged_addr_ctrl(env
, arg2
);
6520 case PR_GET_TAGGED_ADDR_CTRL
:
6521 if (arg2
|| arg3
|| arg4
|| arg5
) {
6522 return -TARGET_EINVAL
;
6524 return do_prctl_get_tagged_addr_ctrl(env
);
6526 case PR_GET_UNALIGN
:
6527 return do_prctl_get_unalign(env
, arg2
);
6528 case PR_SET_UNALIGN
:
6529 return do_prctl_set_unalign(env
, arg2
);
6531 case PR_CAP_AMBIENT
:
6532 case PR_CAPBSET_READ
:
6533 case PR_CAPBSET_DROP
:
6534 case PR_GET_DUMPABLE
:
6535 case PR_SET_DUMPABLE
:
6536 case PR_GET_KEEPCAPS
:
6537 case PR_SET_KEEPCAPS
:
6538 case PR_GET_SECUREBITS
:
6539 case PR_SET_SECUREBITS
:
6542 case PR_GET_TIMERSLACK
:
6543 case PR_SET_TIMERSLACK
:
6545 case PR_MCE_KILL_GET
:
6546 case PR_GET_NO_NEW_PRIVS
:
6547 case PR_SET_NO_NEW_PRIVS
:
6548 case PR_GET_IO_FLUSHER
:
6549 case PR_SET_IO_FLUSHER
:
6550 /* Some prctl options have no pointer arguments and we can pass on. */
6551 return get_errno(prctl(option
, arg2
, arg3
, arg4
, arg5
));
6553 case PR_GET_CHILD_SUBREAPER
:
6554 case PR_SET_CHILD_SUBREAPER
:
6555 case PR_GET_SPECULATION_CTRL
:
6556 case PR_SET_SPECULATION_CTRL
:
6557 case PR_GET_TID_ADDRESS
:
6559 return -TARGET_EINVAL
;
6563 /* Was used for SPE on PowerPC. */
6564 return -TARGET_EINVAL
;
6571 case PR_GET_SECCOMP
:
6572 case PR_SET_SECCOMP
:
6573 case PR_SET_SYSCALL_USER_DISPATCH
:
6574 case PR_GET_THP_DISABLE
:
6575 case PR_SET_THP_DISABLE
:
6578 /* Disable to prevent the target disabling stuff we need. */
6579 return -TARGET_EINVAL
;
6582 qemu_log_mask(LOG_UNIMP
, "Unsupported prctl: " TARGET_ABI_FMT_ld
"\n",
6584 return -TARGET_EINVAL
;
6588 #define NEW_STACK_SIZE 0x40000
6591 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6594 pthread_mutex_t mutex
;
6595 pthread_cond_t cond
;
6598 abi_ulong child_tidptr
;
6599 abi_ulong parent_tidptr
;
6603 static void *clone_func(void *arg
)
6605 new_thread_info
*info
= arg
;
6610 rcu_register_thread();
6611 tcg_register_thread();
6615 ts
= (TaskState
*)cpu
->opaque
;
6616 info
->tid
= sys_gettid();
6618 if (info
->child_tidptr
)
6619 put_user_u32(info
->tid
, info
->child_tidptr
);
6620 if (info
->parent_tidptr
)
6621 put_user_u32(info
->tid
, info
->parent_tidptr
);
6622 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6623 /* Enable signals. */
6624 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6625 /* Signal to the parent that we're ready. */
6626 pthread_mutex_lock(&info
->mutex
);
6627 pthread_cond_broadcast(&info
->cond
);
6628 pthread_mutex_unlock(&info
->mutex
);
6629 /* Wait until the parent has finished initializing the tls state. */
6630 pthread_mutex_lock(&clone_lock
);
6631 pthread_mutex_unlock(&clone_lock
);
6637 /* do_fork() Must return host values and target errnos (unlike most
6638 do_*() functions). */
6639 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6640 abi_ulong parent_tidptr
, target_ulong newtls
,
6641 abi_ulong child_tidptr
)
6643 CPUState
*cpu
= env_cpu(env
);
6647 CPUArchState
*new_env
;
6650 flags
&= ~CLONE_IGNORED_FLAGS
;
6652 /* Emulate vfork() with fork() */
6653 if (flags
& CLONE_VFORK
)
6654 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6656 if (flags
& CLONE_VM
) {
6657 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6658 new_thread_info info
;
6659 pthread_attr_t attr
;
6661 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6662 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6663 return -TARGET_EINVAL
;
6666 ts
= g_new0(TaskState
, 1);
6667 init_task_state(ts
);
6669 /* Grab a mutex so that thread setup appears atomic. */
6670 pthread_mutex_lock(&clone_lock
);
6673 * If this is our first additional thread, we need to ensure we
6674 * generate code for parallel execution and flush old translations.
6675 * Do this now so that the copy gets CF_PARALLEL too.
6677 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
6678 cpu
->tcg_cflags
|= CF_PARALLEL
;
6682 /* we create a new CPU instance. */
6683 new_env
= cpu_copy(env
);
6684 /* Init regs that differ from the parent. */
6685 cpu_clone_regs_child(new_env
, newsp
, flags
);
6686 cpu_clone_regs_parent(env
, flags
);
6687 new_cpu
= env_cpu(new_env
);
6688 new_cpu
->opaque
= ts
;
6689 ts
->bprm
= parent_ts
->bprm
;
6690 ts
->info
= parent_ts
->info
;
6691 ts
->signal_mask
= parent_ts
->signal_mask
;
6693 if (flags
& CLONE_CHILD_CLEARTID
) {
6694 ts
->child_tidptr
= child_tidptr
;
6697 if (flags
& CLONE_SETTLS
) {
6698 cpu_set_tls (new_env
, newtls
);
6701 memset(&info
, 0, sizeof(info
));
6702 pthread_mutex_init(&info
.mutex
, NULL
);
6703 pthread_mutex_lock(&info
.mutex
);
6704 pthread_cond_init(&info
.cond
, NULL
);
6706 if (flags
& CLONE_CHILD_SETTID
) {
6707 info
.child_tidptr
= child_tidptr
;
6709 if (flags
& CLONE_PARENT_SETTID
) {
6710 info
.parent_tidptr
= parent_tidptr
;
6713 ret
= pthread_attr_init(&attr
);
6714 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6715 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6716 /* It is not safe to deliver signals until the child has finished
6717 initializing, so temporarily block all signals. */
6718 sigfillset(&sigmask
);
6719 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6720 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6722 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6723 /* TODO: Free new CPU state if thread creation failed. */
6725 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6726 pthread_attr_destroy(&attr
);
6728 /* Wait for the child to initialize. */
6729 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6734 pthread_mutex_unlock(&info
.mutex
);
6735 pthread_cond_destroy(&info
.cond
);
6736 pthread_mutex_destroy(&info
.mutex
);
6737 pthread_mutex_unlock(&clone_lock
);
6739 /* if no CLONE_VM, we consider it is a fork */
6740 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6741 return -TARGET_EINVAL
;
6744 /* We can't support custom termination signals */
6745 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6746 return -TARGET_EINVAL
;
6749 if (block_signals()) {
6750 return -QEMU_ERESTARTSYS
;
6756 /* Child Process. */
6757 cpu_clone_regs_child(env
, newsp
, flags
);
6759 /* There is a race condition here. The parent process could
6760 theoretically read the TID in the child process before the child
6761 tid is set. This would require using either ptrace
6762 (not implemented) or having *_tidptr to point at a shared memory
6763 mapping. We can't repeat the spinlock hack used above because
6764 the child process gets its own copy of the lock. */
6765 if (flags
& CLONE_CHILD_SETTID
)
6766 put_user_u32(sys_gettid(), child_tidptr
);
6767 if (flags
& CLONE_PARENT_SETTID
)
6768 put_user_u32(sys_gettid(), parent_tidptr
);
6769 ts
= (TaskState
*)cpu
->opaque
;
6770 if (flags
& CLONE_SETTLS
)
6771 cpu_set_tls (env
, newtls
);
6772 if (flags
& CLONE_CHILD_CLEARTID
)
6773 ts
->child_tidptr
= child_tidptr
;
6775 cpu_clone_regs_parent(env
, flags
);
6782 /* warning : doesn't handle linux specific flags... */
6783 static int target_to_host_fcntl_cmd(int cmd
)
6788 case TARGET_F_DUPFD
:
6789 case TARGET_F_GETFD
:
6790 case TARGET_F_SETFD
:
6791 case TARGET_F_GETFL
:
6792 case TARGET_F_SETFL
:
6793 case TARGET_F_OFD_GETLK
:
6794 case TARGET_F_OFD_SETLK
:
6795 case TARGET_F_OFD_SETLKW
:
6798 case TARGET_F_GETLK
:
6801 case TARGET_F_SETLK
:
6804 case TARGET_F_SETLKW
:
6807 case TARGET_F_GETOWN
:
6810 case TARGET_F_SETOWN
:
6813 case TARGET_F_GETSIG
:
6816 case TARGET_F_SETSIG
:
6819 #if TARGET_ABI_BITS == 32
6820 case TARGET_F_GETLK64
:
6823 case TARGET_F_SETLK64
:
6826 case TARGET_F_SETLKW64
:
6830 case TARGET_F_SETLEASE
:
6833 case TARGET_F_GETLEASE
:
6836 #ifdef F_DUPFD_CLOEXEC
6837 case TARGET_F_DUPFD_CLOEXEC
:
6838 ret
= F_DUPFD_CLOEXEC
;
6841 case TARGET_F_NOTIFY
:
6845 case TARGET_F_GETOWN_EX
:
6850 case TARGET_F_SETOWN_EX
:
6855 case TARGET_F_SETPIPE_SZ
:
6858 case TARGET_F_GETPIPE_SZ
:
6863 case TARGET_F_ADD_SEALS
:
6866 case TARGET_F_GET_SEALS
:
6871 ret
= -TARGET_EINVAL
;
6875 #if defined(__powerpc64__)
6876 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6877 * is not supported by kernel. The glibc fcntl call actually adjusts
6878 * them to 5, 6 and 7 before making the syscall(). Since we make the
6879 * syscall directly, adjust to what is supported by the kernel.
6881 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6882 ret
-= F_GETLK64
- 5;
6889 #define FLOCK_TRANSTBL \
6891 TRANSTBL_CONVERT(F_RDLCK); \
6892 TRANSTBL_CONVERT(F_WRLCK); \
6893 TRANSTBL_CONVERT(F_UNLCK); \
6896 static int target_to_host_flock(int type
)
6898 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6900 #undef TRANSTBL_CONVERT
6901 return -TARGET_EINVAL
;
6904 static int host_to_target_flock(int type
)
6906 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6908 #undef TRANSTBL_CONVERT
6909 /* if we don't know how to convert the value coming
6910 * from the host we copy to the target field as-is
6915 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6916 abi_ulong target_flock_addr
)
6918 struct target_flock
*target_fl
;
6921 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6922 return -TARGET_EFAULT
;
6925 __get_user(l_type
, &target_fl
->l_type
);
6926 l_type
= target_to_host_flock(l_type
);
6930 fl
->l_type
= l_type
;
6931 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6932 __get_user(fl
->l_start
, &target_fl
->l_start
);
6933 __get_user(fl
->l_len
, &target_fl
->l_len
);
6934 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6935 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6939 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6940 const struct flock64
*fl
)
6942 struct target_flock
*target_fl
;
6945 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6946 return -TARGET_EFAULT
;
6949 l_type
= host_to_target_flock(fl
->l_type
);
6950 __put_user(l_type
, &target_fl
->l_type
);
6951 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6952 __put_user(fl
->l_start
, &target_fl
->l_start
);
6953 __put_user(fl
->l_len
, &target_fl
->l_len
);
6954 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6955 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6959 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6960 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6962 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6963 struct target_oabi_flock64
{
6971 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6972 abi_ulong target_flock_addr
)
6974 struct target_oabi_flock64
*target_fl
;
6977 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6978 return -TARGET_EFAULT
;
6981 __get_user(l_type
, &target_fl
->l_type
);
6982 l_type
= target_to_host_flock(l_type
);
6986 fl
->l_type
= l_type
;
6987 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6988 __get_user(fl
->l_start
, &target_fl
->l_start
);
6989 __get_user(fl
->l_len
, &target_fl
->l_len
);
6990 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6991 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6995 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6996 const struct flock64
*fl
)
6998 struct target_oabi_flock64
*target_fl
;
7001 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
7002 return -TARGET_EFAULT
;
7005 l_type
= host_to_target_flock(fl
->l_type
);
7006 __put_user(l_type
, &target_fl
->l_type
);
7007 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
7008 __put_user(fl
->l_start
, &target_fl
->l_start
);
7009 __put_user(fl
->l_len
, &target_fl
->l_len
);
7010 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
7011 unlock_user_struct(target_fl
, target_flock_addr
, 1);
7016 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
7017 abi_ulong target_flock_addr
)
7019 struct target_flock64
*target_fl
;
7022 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
7023 return -TARGET_EFAULT
;
7026 __get_user(l_type
, &target_fl
->l_type
);
7027 l_type
= target_to_host_flock(l_type
);
7031 fl
->l_type
= l_type
;
7032 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
7033 __get_user(fl
->l_start
, &target_fl
->l_start
);
7034 __get_user(fl
->l_len
, &target_fl
->l_len
);
7035 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
7036 unlock_user_struct(target_fl
, target_flock_addr
, 0);
7040 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
7041 const struct flock64
*fl
)
7043 struct target_flock64
*target_fl
;
7046 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
7047 return -TARGET_EFAULT
;
7050 l_type
= host_to_target_flock(fl
->l_type
);
7051 __put_user(l_type
, &target_fl
->l_type
);
7052 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
7053 __put_user(fl
->l_start
, &target_fl
->l_start
);
7054 __put_user(fl
->l_len
, &target_fl
->l_len
);
7055 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
7056 unlock_user_struct(target_fl
, target_flock_addr
, 1);
7060 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
7062 struct flock64 fl64
;
7064 struct f_owner_ex fox
;
7065 struct target_f_owner_ex
*target_fox
;
7068 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
7070 if (host_cmd
== -TARGET_EINVAL
)
7074 case TARGET_F_GETLK
:
7075 ret
= copy_from_user_flock(&fl64
, arg
);
7079 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7081 ret
= copy_to_user_flock(arg
, &fl64
);
7085 case TARGET_F_SETLK
:
7086 case TARGET_F_SETLKW
:
7087 ret
= copy_from_user_flock(&fl64
, arg
);
7091 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7094 case TARGET_F_GETLK64
:
7095 case TARGET_F_OFD_GETLK
:
7096 ret
= copy_from_user_flock64(&fl64
, arg
);
7100 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7102 ret
= copy_to_user_flock64(arg
, &fl64
);
7105 case TARGET_F_SETLK64
:
7106 case TARGET_F_SETLKW64
:
7107 case TARGET_F_OFD_SETLK
:
7108 case TARGET_F_OFD_SETLKW
:
7109 ret
= copy_from_user_flock64(&fl64
, arg
);
7113 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7116 case TARGET_F_GETFL
:
7117 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7119 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
7123 case TARGET_F_SETFL
:
7124 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
7125 target_to_host_bitmask(arg
,
7130 case TARGET_F_GETOWN_EX
:
7131 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7133 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
7134 return -TARGET_EFAULT
;
7135 target_fox
->type
= tswap32(fox
.type
);
7136 target_fox
->pid
= tswap32(fox
.pid
);
7137 unlock_user_struct(target_fox
, arg
, 1);
7143 case TARGET_F_SETOWN_EX
:
7144 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
7145 return -TARGET_EFAULT
;
7146 fox
.type
= tswap32(target_fox
->type
);
7147 fox
.pid
= tswap32(target_fox
->pid
);
7148 unlock_user_struct(target_fox
, arg
, 0);
7149 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7153 case TARGET_F_SETSIG
:
7154 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
7157 case TARGET_F_GETSIG
:
7158 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
7161 case TARGET_F_SETOWN
:
7162 case TARGET_F_GETOWN
:
7163 case TARGET_F_SETLEASE
:
7164 case TARGET_F_GETLEASE
:
7165 case TARGET_F_SETPIPE_SZ
:
7166 case TARGET_F_GETPIPE_SZ
:
7167 case TARGET_F_ADD_SEALS
:
7168 case TARGET_F_GET_SEALS
:
7169 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7173 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
7181 static inline int high2lowuid(int uid
)
7189 static inline int high2lowgid(int gid
)
7197 static inline int low2highuid(int uid
)
7199 if ((int16_t)uid
== -1)
7205 static inline int low2highgid(int gid
)
7207 if ((int16_t)gid
== -1)
7212 static inline int tswapid(int id
)
7217 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7219 #else /* !USE_UID16 */
7220 static inline int high2lowuid(int uid
)
7224 static inline int high2lowgid(int gid
)
7228 static inline int low2highuid(int uid
)
7232 static inline int low2highgid(int gid
)
7236 static inline int tswapid(int id
)
7241 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7243 #endif /* USE_UID16 */
7245 /* We must do direct syscalls for setting UID/GID, because we want to
7246 * implement the Linux system call semantics of "change only for this thread",
7247 * not the libc/POSIX semantics of "change for all threads in process".
7248 * (See http://ewontfix.com/17/ for more details.)
7249 * We use the 32-bit version of the syscalls if present; if it is not
7250 * then either the host architecture supports 32-bit UIDs natively with
7251 * the standard syscall, or the 16-bit UID is the best we can do.
7253 #ifdef __NR_setuid32
7254 #define __NR_sys_setuid __NR_setuid32
7256 #define __NR_sys_setuid __NR_setuid
7258 #ifdef __NR_setgid32
7259 #define __NR_sys_setgid __NR_setgid32
7261 #define __NR_sys_setgid __NR_setgid
7263 #ifdef __NR_setresuid32
7264 #define __NR_sys_setresuid __NR_setresuid32
7266 #define __NR_sys_setresuid __NR_setresuid
7268 #ifdef __NR_setresgid32
7269 #define __NR_sys_setresgid __NR_setresgid32
7271 #define __NR_sys_setresgid __NR_setresgid
7274 _syscall1(int, sys_setuid
, uid_t
, uid
)
7275 _syscall1(int, sys_setgid
, gid_t
, gid
)
7276 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7277 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7279 void syscall_init(void)
7282 const argtype
*arg_type
;
7285 thunk_init(STRUCT_MAX
);
7287 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7288 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7289 #include "syscall_types.h"
7291 #undef STRUCT_SPECIAL
7293 /* we patch the ioctl size if necessary. We rely on the fact that
7294 no ioctl has all the bits at '1' in the size field */
7296 while (ie
->target_cmd
!= 0) {
7297 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7298 TARGET_IOC_SIZEMASK
) {
7299 arg_type
= ie
->arg_type
;
7300 if (arg_type
[0] != TYPE_PTR
) {
7301 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7306 size
= thunk_type_size(arg_type
, 0);
7307 ie
->target_cmd
= (ie
->target_cmd
&
7308 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7309 (size
<< TARGET_IOC_SIZESHIFT
);
7312 /* automatic consistency check if same arch */
7313 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7314 (defined(__x86_64__) && defined(TARGET_X86_64))
7315 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7316 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7317 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7324 #ifdef TARGET_NR_truncate64
7325 static inline abi_long
target_truncate64(CPUArchState
*cpu_env
, const char *arg1
,
7330 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7334 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7338 #ifdef TARGET_NR_ftruncate64
7339 static inline abi_long
target_ftruncate64(CPUArchState
*cpu_env
, abi_long arg1
,
7344 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7348 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7352 #if defined(TARGET_NR_timer_settime) || \
7353 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7354 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7355 abi_ulong target_addr
)
7357 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7358 offsetof(struct target_itimerspec
,
7360 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7361 offsetof(struct target_itimerspec
,
7363 return -TARGET_EFAULT
;
7370 #if defined(TARGET_NR_timer_settime64) || \
7371 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7372 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7373 abi_ulong target_addr
)
7375 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7376 offsetof(struct target__kernel_itimerspec
,
7378 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7379 offsetof(struct target__kernel_itimerspec
,
7381 return -TARGET_EFAULT
;
7388 #if ((defined(TARGET_NR_timerfd_gettime) || \
7389 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7390 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7391 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7392 struct itimerspec
*host_its
)
7394 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7396 &host_its
->it_interval
) ||
7397 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7399 &host_its
->it_value
)) {
7400 return -TARGET_EFAULT
;
7406 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7407 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7408 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7409 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7410 struct itimerspec
*host_its
)
7412 if (host_to_target_timespec64(target_addr
+
7413 offsetof(struct target__kernel_itimerspec
,
7415 &host_its
->it_interval
) ||
7416 host_to_target_timespec64(target_addr
+
7417 offsetof(struct target__kernel_itimerspec
,
7419 &host_its
->it_value
)) {
7420 return -TARGET_EFAULT
;
7426 #if defined(TARGET_NR_adjtimex) || \
7427 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7428 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7429 abi_long target_addr
)
7431 struct target_timex
*target_tx
;
7433 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7434 return -TARGET_EFAULT
;
7437 __get_user(host_tx
->modes
, &target_tx
->modes
);
7438 __get_user(host_tx
->offset
, &target_tx
->offset
);
7439 __get_user(host_tx
->freq
, &target_tx
->freq
);
7440 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7441 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7442 __get_user(host_tx
->status
, &target_tx
->status
);
7443 __get_user(host_tx
->constant
, &target_tx
->constant
);
7444 __get_user(host_tx
->precision
, &target_tx
->precision
);
7445 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7446 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7447 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7448 __get_user(host_tx
->tick
, &target_tx
->tick
);
7449 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7450 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7451 __get_user(host_tx
->shift
, &target_tx
->shift
);
7452 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7453 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7454 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7455 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7456 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7457 __get_user(host_tx
->tai
, &target_tx
->tai
);
7459 unlock_user_struct(target_tx
, target_addr
, 0);
7463 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7464 struct timex
*host_tx
)
7466 struct target_timex
*target_tx
;
7468 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7469 return -TARGET_EFAULT
;
7472 __put_user(host_tx
->modes
, &target_tx
->modes
);
7473 __put_user(host_tx
->offset
, &target_tx
->offset
);
7474 __put_user(host_tx
->freq
, &target_tx
->freq
);
7475 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7476 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7477 __put_user(host_tx
->status
, &target_tx
->status
);
7478 __put_user(host_tx
->constant
, &target_tx
->constant
);
7479 __put_user(host_tx
->precision
, &target_tx
->precision
);
7480 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7481 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7482 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7483 __put_user(host_tx
->tick
, &target_tx
->tick
);
7484 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7485 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7486 __put_user(host_tx
->shift
, &target_tx
->shift
);
7487 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7488 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7489 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7490 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7491 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7492 __put_user(host_tx
->tai
, &target_tx
->tai
);
7494 unlock_user_struct(target_tx
, target_addr
, 1);
7500 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7501 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7502 abi_long target_addr
)
7504 struct target__kernel_timex
*target_tx
;
7506 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7507 offsetof(struct target__kernel_timex
,
7509 return -TARGET_EFAULT
;
7512 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7513 return -TARGET_EFAULT
;
7516 __get_user(host_tx
->modes
, &target_tx
->modes
);
7517 __get_user(host_tx
->offset
, &target_tx
->offset
);
7518 __get_user(host_tx
->freq
, &target_tx
->freq
);
7519 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7520 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7521 __get_user(host_tx
->status
, &target_tx
->status
);
7522 __get_user(host_tx
->constant
, &target_tx
->constant
);
7523 __get_user(host_tx
->precision
, &target_tx
->precision
);
7524 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7525 __get_user(host_tx
->tick
, &target_tx
->tick
);
7526 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7527 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7528 __get_user(host_tx
->shift
, &target_tx
->shift
);
7529 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7530 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7531 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7532 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7533 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7534 __get_user(host_tx
->tai
, &target_tx
->tai
);
7536 unlock_user_struct(target_tx
, target_addr
, 0);
7540 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7541 struct timex
*host_tx
)
7543 struct target__kernel_timex
*target_tx
;
7545 if (copy_to_user_timeval64(target_addr
+
7546 offsetof(struct target__kernel_timex
, time
),
7548 return -TARGET_EFAULT
;
7551 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7552 return -TARGET_EFAULT
;
7555 __put_user(host_tx
->modes
, &target_tx
->modes
);
7556 __put_user(host_tx
->offset
, &target_tx
->offset
);
7557 __put_user(host_tx
->freq
, &target_tx
->freq
);
7558 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7559 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7560 __put_user(host_tx
->status
, &target_tx
->status
);
7561 __put_user(host_tx
->constant
, &target_tx
->constant
);
7562 __put_user(host_tx
->precision
, &target_tx
->precision
);
7563 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7564 __put_user(host_tx
->tick
, &target_tx
->tick
);
7565 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7566 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7567 __put_user(host_tx
->shift
, &target_tx
->shift
);
7568 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7569 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7570 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7571 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7572 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7573 __put_user(host_tx
->tai
, &target_tx
->tai
);
7575 unlock_user_struct(target_tx
, target_addr
, 1);
7580 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7581 #define sigev_notify_thread_id _sigev_un._tid
7584 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7585 abi_ulong target_addr
)
7587 struct target_sigevent
*target_sevp
;
7589 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7590 return -TARGET_EFAULT
;
7593 /* This union is awkward on 64 bit systems because it has a 32 bit
7594 * integer and a pointer in it; we follow the conversion approach
7595 * used for handling sigval types in signal.c so the guest should get
7596 * the correct value back even if we did a 64 bit byteswap and it's
7597 * using the 32 bit integer.
7599 host_sevp
->sigev_value
.sival_ptr
=
7600 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7601 host_sevp
->sigev_signo
=
7602 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7603 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7604 host_sevp
->sigev_notify_thread_id
= tswap32(target_sevp
->_sigev_un
._tid
);
7606 unlock_user_struct(target_sevp
, target_addr
, 1);
7610 #if defined(TARGET_NR_mlockall)
7611 static inline int target_to_host_mlockall_arg(int arg
)
7615 if (arg
& TARGET_MCL_CURRENT
) {
7616 result
|= MCL_CURRENT
;
7618 if (arg
& TARGET_MCL_FUTURE
) {
7619 result
|= MCL_FUTURE
;
7622 if (arg
& TARGET_MCL_ONFAULT
) {
7623 result
|= MCL_ONFAULT
;
7631 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7632 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7633 defined(TARGET_NR_newfstatat))
7634 static inline abi_long
host_to_target_stat64(CPUArchState
*cpu_env
,
7635 abi_ulong target_addr
,
7636 struct stat
*host_st
)
7638 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7639 if (cpu_env
->eabi
) {
7640 struct target_eabi_stat64
*target_st
;
7642 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7643 return -TARGET_EFAULT
;
7644 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7645 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7646 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7647 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7648 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7650 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7651 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7652 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7653 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7654 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7655 __put_user(host_st
->st_size
, &target_st
->st_size
);
7656 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7657 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7658 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7659 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7660 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7661 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7662 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7663 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7664 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7666 unlock_user_struct(target_st
, target_addr
, 1);
7670 #if defined(TARGET_HAS_STRUCT_STAT64)
7671 struct target_stat64
*target_st
;
7673 struct target_stat
*target_st
;
7676 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7677 return -TARGET_EFAULT
;
7678 memset(target_st
, 0, sizeof(*target_st
));
7679 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7680 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7681 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7682 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7684 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7685 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7686 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7687 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7688 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7689 /* XXX: better use of kernel struct */
7690 __put_user(host_st
->st_size
, &target_st
->st_size
);
7691 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7692 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7693 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7694 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7695 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7696 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7697 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7698 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7699 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7701 unlock_user_struct(target_st
, target_addr
, 1);
7708 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7709 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7710 abi_ulong target_addr
)
7712 struct target_statx
*target_stx
;
7714 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7715 return -TARGET_EFAULT
;
7717 memset(target_stx
, 0, sizeof(*target_stx
));
7719 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7720 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7721 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7722 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7723 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7724 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7725 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7726 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7727 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7728 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7729 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7730 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7731 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7732 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7733 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7734 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7735 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7736 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7737 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7738 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7739 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7740 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7741 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7743 unlock_user_struct(target_stx
, target_addr
, 1);
7749 static int do_sys_futex(int *uaddr
, int op
, int val
,
7750 const struct timespec
*timeout
, int *uaddr2
,
7753 #if HOST_LONG_BITS == 64
7754 #if defined(__NR_futex)
7755 /* always a 64-bit time_t, it doesn't define _time64 version */
7756 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7759 #else /* HOST_LONG_BITS == 64 */
7760 #if defined(__NR_futex_time64)
7761 if (sizeof(timeout
->tv_sec
) == 8) {
7762 /* _time64 function on 32bit arch */
7763 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7766 #if defined(__NR_futex)
7767 /* old function on 32bit arch */
7768 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7770 #endif /* HOST_LONG_BITS == 64 */
7771 g_assert_not_reached();
7774 static int do_safe_futex(int *uaddr
, int op
, int val
,
7775 const struct timespec
*timeout
, int *uaddr2
,
7778 #if HOST_LONG_BITS == 64
7779 #if defined(__NR_futex)
7780 /* always a 64-bit time_t, it doesn't define _time64 version */
7781 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7783 #else /* HOST_LONG_BITS == 64 */
7784 #if defined(__NR_futex_time64)
7785 if (sizeof(timeout
->tv_sec
) == 8) {
7786 /* _time64 function on 32bit arch */
7787 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7791 #if defined(__NR_futex)
7792 /* old function on 32bit arch */
7793 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7795 #endif /* HOST_LONG_BITS == 64 */
7796 return -TARGET_ENOSYS
;
7799 /* ??? Using host futex calls even when target atomic operations
7800 are not really atomic probably breaks things. However implementing
7801 futexes locally would make futexes shared between multiple processes
7802 tricky. However they're probably useless because guest atomic
7803 operations won't work either. */
7804 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7805 static int do_futex(CPUState
*cpu
, bool time64
, target_ulong uaddr
,
7806 int op
, int val
, target_ulong timeout
,
7807 target_ulong uaddr2
, int val3
)
7809 struct timespec ts
, *pts
= NULL
;
7810 void *haddr2
= NULL
;
7813 /* We assume FUTEX_* constants are the same on both host and target. */
7814 #ifdef FUTEX_CMD_MASK
7815 base_op
= op
& FUTEX_CMD_MASK
;
7821 case FUTEX_WAIT_BITSET
:
7824 case FUTEX_WAIT_REQUEUE_PI
:
7826 haddr2
= g2h(cpu
, uaddr2
);
7829 case FUTEX_LOCK_PI2
:
7832 case FUTEX_WAKE_BITSET
:
7833 case FUTEX_TRYLOCK_PI
:
7834 case FUTEX_UNLOCK_PI
:
7838 val
= target_to_host_signal(val
);
7841 case FUTEX_CMP_REQUEUE
:
7842 case FUTEX_CMP_REQUEUE_PI
:
7843 val3
= tswap32(val3
);
7848 * For these, the 4th argument is not TIMEOUT, but VAL2.
7849 * But the prototype of do_safe_futex takes a pointer, so
7850 * insert casts to satisfy the compiler. We do not need
7851 * to tswap VAL2 since it's not compared to guest memory.
7853 pts
= (struct timespec
*)(uintptr_t)timeout
;
7855 haddr2
= g2h(cpu
, uaddr2
);
7858 return -TARGET_ENOSYS
;
7863 ? target_to_host_timespec64(pts
, timeout
)
7864 : target_to_host_timespec(pts
, timeout
)) {
7865 return -TARGET_EFAULT
;
7868 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, haddr2
, val3
);
7872 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7873 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7874 abi_long handle
, abi_long mount_id
,
7877 struct file_handle
*target_fh
;
7878 struct file_handle
*fh
;
7882 unsigned int size
, total_size
;
7884 if (get_user_s32(size
, handle
)) {
7885 return -TARGET_EFAULT
;
7888 name
= lock_user_string(pathname
);
7890 return -TARGET_EFAULT
;
7893 total_size
= sizeof(struct file_handle
) + size
;
7894 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7896 unlock_user(name
, pathname
, 0);
7897 return -TARGET_EFAULT
;
7900 fh
= g_malloc0(total_size
);
7901 fh
->handle_bytes
= size
;
7903 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7904 unlock_user(name
, pathname
, 0);
7906 /* man name_to_handle_at(2):
7907 * Other than the use of the handle_bytes field, the caller should treat
7908 * the file_handle structure as an opaque data type
7911 memcpy(target_fh
, fh
, total_size
);
7912 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7913 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7915 unlock_user(target_fh
, handle
, total_size
);
7917 if (put_user_s32(mid
, mount_id
)) {
7918 return -TARGET_EFAULT
;
7926 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7927 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7930 struct file_handle
*target_fh
;
7931 struct file_handle
*fh
;
7932 unsigned int size
, total_size
;
7935 if (get_user_s32(size
, handle
)) {
7936 return -TARGET_EFAULT
;
7939 total_size
= sizeof(struct file_handle
) + size
;
7940 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7942 return -TARGET_EFAULT
;
7945 fh
= g_memdup(target_fh
, total_size
);
7946 fh
->handle_bytes
= size
;
7947 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7949 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7950 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7954 unlock_user(target_fh
, handle
, total_size
);
7960 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7962 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7965 target_sigset_t
*target_mask
;
7969 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
7970 return -TARGET_EINVAL
;
7972 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7973 return -TARGET_EFAULT
;
7976 target_to_host_sigset(&host_mask
, target_mask
);
7978 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7980 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7982 fd_trans_register(ret
, &target_signalfd_trans
);
7985 unlock_user_struct(target_mask
, mask
, 0);
7991 /* Map host to target signal numbers for the wait family of syscalls.
7992 Assume all other status bits are the same. */
7993 int host_to_target_waitstatus(int status
)
7995 if (WIFSIGNALED(status
)) {
7996 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7998 if (WIFSTOPPED(status
)) {
7999 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
8005 static int open_self_cmdline(CPUArchState
*cpu_env
, int fd
)
8007 CPUState
*cpu
= env_cpu(cpu_env
);
8008 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
8011 for (i
= 0; i
< bprm
->argc
; i
++) {
8012 size_t len
= strlen(bprm
->argv
[i
]) + 1;
8014 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
8022 static int open_self_maps(CPUArchState
*cpu_env
, int fd
)
8024 CPUState
*cpu
= env_cpu(cpu_env
);
8025 TaskState
*ts
= cpu
->opaque
;
8026 GSList
*map_info
= read_self_maps();
8030 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
8031 MapInfo
*e
= (MapInfo
*) s
->data
;
8033 if (h2g_valid(e
->start
)) {
8034 unsigned long min
= e
->start
;
8035 unsigned long max
= e
->end
;
8036 int flags
= page_get_flags(h2g(min
));
8039 max
= h2g_valid(max
- 1) ?
8040 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
8042 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
8047 if (h2g(max
) == ts
->info
->stack_limit
) {
8049 if (h2g(min
) == ts
->info
->stack_limit
) {
8056 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
8057 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
8058 h2g(min
), h2g(max
- 1) + 1,
8059 (flags
& PAGE_READ
) ? 'r' : '-',
8060 (flags
& PAGE_WRITE_ORG
) ? 'w' : '-',
8061 (flags
& PAGE_EXEC
) ? 'x' : '-',
8062 e
->is_priv
? 'p' : 's',
8063 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
8065 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
8072 free_self_maps(map_info
);
8074 #ifdef TARGET_VSYSCALL_PAGE
8076 * We only support execution from the vsyscall page.
8077 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8079 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
8080 " --xp 00000000 00:00 0",
8081 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
8082 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
8088 static int open_self_stat(CPUArchState
*cpu_env
, int fd
)
8090 CPUState
*cpu
= env_cpu(cpu_env
);
8091 TaskState
*ts
= cpu
->opaque
;
8092 g_autoptr(GString
) buf
= g_string_new(NULL
);
8095 for (i
= 0; i
< 44; i
++) {
8098 g_string_printf(buf
, FMT_pid
" ", getpid());
8099 } else if (i
== 1) {
8101 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
8102 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
8103 g_string_printf(buf
, "(%.15s) ", bin
);
8104 } else if (i
== 3) {
8106 g_string_printf(buf
, FMT_pid
" ", getppid());
8107 } else if (i
== 21) {
8109 g_string_printf(buf
, "%" PRIu64
" ", ts
->start_boottime
);
8110 } else if (i
== 27) {
8112 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
8114 /* for the rest, there is MasterCard */
8115 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
8118 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
8126 static int open_self_auxv(CPUArchState
*cpu_env
, int fd
)
8128 CPUState
*cpu
= env_cpu(cpu_env
);
8129 TaskState
*ts
= cpu
->opaque
;
8130 abi_ulong auxv
= ts
->info
->saved_auxv
;
8131 abi_ulong len
= ts
->info
->auxv_len
;
8135 * Auxiliary vector is stored in target process stack.
8136 * read in whole auxv vector and copy it to file
8138 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
8142 r
= write(fd
, ptr
, len
);
8149 lseek(fd
, 0, SEEK_SET
);
8150 unlock_user(ptr
, auxv
, len
);
8156 static int is_proc_myself(const char *filename
, const char *entry
)
8158 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
8159 filename
+= strlen("/proc/");
8160 if (!strncmp(filename
, "self/", strlen("self/"))) {
8161 filename
+= strlen("self/");
8162 } else if (*filename
>= '1' && *filename
<= '9') {
8164 snprintf(myself
, sizeof(myself
), "%d/", getpid());
8165 if (!strncmp(filename
, myself
, strlen(myself
))) {
8166 filename
+= strlen(myself
);
8173 if (!strcmp(filename
, entry
)) {
8180 static void excp_dump_file(FILE *logfile
, CPUArchState
*env
,
8181 const char *fmt
, int code
)
8184 CPUState
*cs
= env_cpu(env
);
8186 fprintf(logfile
, fmt
, code
);
8187 fprintf(logfile
, "Failing executable: %s\n", exec_path
);
8188 cpu_dump_state(cs
, logfile
, 0);
8189 open_self_maps(env
, fileno(logfile
));
8193 void target_exception_dump(CPUArchState
*env
, const char *fmt
, int code
)
8195 /* dump to console */
8196 excp_dump_file(stderr
, env
, fmt
, code
);
8198 /* dump to log file */
8199 if (qemu_log_separate()) {
8200 FILE *logfile
= qemu_log_trylock();
8202 excp_dump_file(logfile
, env
, fmt
, code
);
8203 qemu_log_unlock(logfile
);
8207 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8208 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8209 static int is_proc(const char *filename
, const char *entry
)
8211 return strcmp(filename
, entry
) == 0;
8215 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8216 static int open_net_route(CPUArchState
*cpu_env
, int fd
)
8223 fp
= fopen("/proc/net/route", "r");
8230 read
= getline(&line
, &len
, fp
);
8231 dprintf(fd
, "%s", line
);
8235 while ((read
= getline(&line
, &len
, fp
)) != -1) {
8237 uint32_t dest
, gw
, mask
;
8238 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8241 fields
= sscanf(line
,
8242 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8243 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8244 &mask
, &mtu
, &window
, &irtt
);
8248 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8249 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8250 metric
, tswap32(mask
), mtu
, window
, irtt
);
8260 #if defined(TARGET_SPARC)
8261 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8263 dprintf(fd
, "type\t\t: sun4u\n");
8268 #if defined(TARGET_HPPA)
8269 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8271 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
8272 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
8273 dprintf(fd
, "capabilities\t: os32\n");
8274 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
8275 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8280 #if defined(TARGET_M68K)
8281 static int open_hardware(CPUArchState
*cpu_env
, int fd
)
8283 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8288 static int do_openat(CPUArchState
*cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
8291 const char *filename
;
8292 int (*fill
)(CPUArchState
*cpu_env
, int fd
);
8293 int (*cmp
)(const char *s1
, const char *s2
);
8295 const struct fake_open
*fake_open
;
8296 static const struct fake_open fakes
[] = {
8297 { "maps", open_self_maps
, is_proc_myself
},
8298 { "stat", open_self_stat
, is_proc_myself
},
8299 { "auxv", open_self_auxv
, is_proc_myself
},
8300 { "cmdline", open_self_cmdline
, is_proc_myself
},
8301 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8302 { "/proc/net/route", open_net_route
, is_proc
},
8304 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8305 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8307 #if defined(TARGET_M68K)
8308 { "/proc/hardware", open_hardware
, is_proc
},
8310 { NULL
, NULL
, NULL
}
8313 if (is_proc_myself(pathname
, "exe")) {
8314 return safe_openat(dirfd
, exec_path
, flags
, mode
);
8317 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8318 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8323 if (fake_open
->filename
) {
8325 char filename
[PATH_MAX
];
8328 fd
= memfd_create("qemu-open", 0);
8330 if (errno
!= ENOSYS
) {
8333 /* create temporary file to map stat to */
8334 tmpdir
= getenv("TMPDIR");
8337 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8338 fd
= mkstemp(filename
);
8345 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8351 lseek(fd
, 0, SEEK_SET
);
8356 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8359 #define TIMER_MAGIC 0x0caf0000
8360 #define TIMER_MAGIC_MASK 0xffff0000
8362 /* Convert QEMU provided timer ID back to internal 16bit index format */
8363 static target_timer_t
get_timer_id(abi_long arg
)
8365 target_timer_t timerid
= arg
;
8367 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8368 return -TARGET_EINVAL
;
8373 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8374 return -TARGET_EINVAL
;
8380 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8382 abi_ulong target_addr
,
8385 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8386 unsigned host_bits
= sizeof(*host_mask
) * 8;
8387 abi_ulong
*target_mask
;
8390 assert(host_size
>= target_size
);
8392 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8394 return -TARGET_EFAULT
;
8396 memset(host_mask
, 0, host_size
);
8398 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8399 unsigned bit
= i
* target_bits
;
8402 __get_user(val
, &target_mask
[i
]);
8403 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8404 if (val
& (1UL << j
)) {
8405 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8410 unlock_user(target_mask
, target_addr
, 0);
8414 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8416 abi_ulong target_addr
,
8419 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8420 unsigned host_bits
= sizeof(*host_mask
) * 8;
8421 abi_ulong
*target_mask
;
8424 assert(host_size
>= target_size
);
8426 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8428 return -TARGET_EFAULT
;
8431 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8432 unsigned bit
= i
* target_bits
;
8435 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8436 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8440 __put_user(val
, &target_mask
[i
]);
8443 unlock_user(target_mask
, target_addr
, target_size
);
8447 #ifdef TARGET_NR_getdents
8448 static int do_getdents(abi_long dirfd
, abi_long arg2
, abi_long count
)
8450 g_autofree
void *hdirp
= NULL
;
8452 int hlen
, hoff
, toff
;
8453 int hreclen
, treclen
;
8454 off64_t prev_diroff
= 0;
8456 hdirp
= g_try_malloc(count
);
8458 return -TARGET_ENOMEM
;
8461 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8462 hlen
= sys_getdents(dirfd
, hdirp
, count
);
8464 hlen
= sys_getdents64(dirfd
, hdirp
, count
);
8467 hlen
= get_errno(hlen
);
8468 if (is_error(hlen
)) {
8472 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8474 return -TARGET_EFAULT
;
8477 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8478 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8479 struct linux_dirent
*hde
= hdirp
+ hoff
;
8481 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8483 struct target_dirent
*tde
= tdirp
+ toff
;
8487 namelen
= strlen(hde
->d_name
);
8488 hreclen
= hde
->d_reclen
;
8489 treclen
= offsetof(struct target_dirent
, d_name
) + namelen
+ 2;
8490 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent
));
8492 if (toff
+ treclen
> count
) {
8494 * If the host struct is smaller than the target struct, or
8495 * requires less alignment and thus packs into less space,
8496 * then the host can return more entries than we can pass
8500 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8504 * Return what we have, resetting the file pointer to the
8505 * location of the first record not returned.
8507 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8511 prev_diroff
= hde
->d_off
;
8512 tde
->d_ino
= tswapal(hde
->d_ino
);
8513 tde
->d_off
= tswapal(hde
->d_off
);
8514 tde
->d_reclen
= tswap16(treclen
);
8515 memcpy(tde
->d_name
, hde
->d_name
, namelen
+ 1);
8518 * The getdents type is in what was formerly a padding byte at the
8519 * end of the structure.
8521 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8522 type
= *((uint8_t *)hde
+ hreclen
- 1);
8526 *((uint8_t *)tde
+ treclen
- 1) = type
;
8529 unlock_user(tdirp
, arg2
, toff
);
8532 #endif /* TARGET_NR_getdents */
8534 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8535 static int do_getdents64(abi_long dirfd
, abi_long arg2
, abi_long count
)
8537 g_autofree
void *hdirp
= NULL
;
8539 int hlen
, hoff
, toff
;
8540 int hreclen
, treclen
;
8541 off64_t prev_diroff
= 0;
8543 hdirp
= g_try_malloc(count
);
8545 return -TARGET_ENOMEM
;
8548 hlen
= get_errno(sys_getdents64(dirfd
, hdirp
, count
));
8549 if (is_error(hlen
)) {
8553 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8555 return -TARGET_EFAULT
;
8558 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8559 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8560 struct target_dirent64
*tde
= tdirp
+ toff
;
8563 namelen
= strlen(hde
->d_name
) + 1;
8564 hreclen
= hde
->d_reclen
;
8565 treclen
= offsetof(struct target_dirent64
, d_name
) + namelen
;
8566 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent64
));
8568 if (toff
+ treclen
> count
) {
8570 * If the host struct is smaller than the target struct, or
8571 * requires less alignment and thus packs into less space,
8572 * then the host can return more entries than we can pass
8576 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8580 * Return what we have, resetting the file pointer to the
8581 * location of the first record not returned.
8583 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8587 prev_diroff
= hde
->d_off
;
8588 tde
->d_ino
= tswap64(hde
->d_ino
);
8589 tde
->d_off
= tswap64(hde
->d_off
);
8590 tde
->d_reclen
= tswap16(treclen
);
8591 tde
->d_type
= hde
->d_type
;
8592 memcpy(tde
->d_name
, hde
->d_name
, namelen
);
8595 unlock_user(tdirp
, arg2
, toff
);
8598 #endif /* TARGET_NR_getdents64 */
8600 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8601 _syscall2(int, pivot_root
, const char *, new_root
, const char *, put_old
)
8604 /* This is an internal helper for do_syscall so that it is easier
8605 * to have a single return point, so that actions, such as logging
8606 * of syscall results, can be performed.
8607 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8609 static abi_long
do_syscall1(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
8610 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8611 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8614 CPUState
*cpu
= env_cpu(cpu_env
);
8616 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8617 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8618 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8619 || defined(TARGET_NR_statx)
8622 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8623 || defined(TARGET_NR_fstatfs)
8629 case TARGET_NR_exit
:
8630 /* In old applications this may be used to implement _exit(2).
8631 However in threaded applications it is used for thread termination,
8632 and _exit_group is used for application termination.
8633 Do thread termination if we have more then one thread. */
8635 if (block_signals()) {
8636 return -QEMU_ERESTARTSYS
;
8639 pthread_mutex_lock(&clone_lock
);
8641 if (CPU_NEXT(first_cpu
)) {
8642 TaskState
*ts
= cpu
->opaque
;
8644 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
8645 object_unref(OBJECT(cpu
));
8647 * At this point the CPU should be unrealized and removed
8648 * from cpu lists. We can clean-up the rest of the thread
8649 * data without the lock held.
8652 pthread_mutex_unlock(&clone_lock
);
8654 if (ts
->child_tidptr
) {
8655 put_user_u32(0, ts
->child_tidptr
);
8656 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
8657 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
8661 rcu_unregister_thread();
8665 pthread_mutex_unlock(&clone_lock
);
8666 preexit_cleanup(cpu_env
, arg1
);
8668 return 0; /* avoid warning */
8669 case TARGET_NR_read
:
8670 if (arg2
== 0 && arg3
== 0) {
8671 return get_errno(safe_read(arg1
, 0, 0));
8673 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8674 return -TARGET_EFAULT
;
8675 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8677 fd_trans_host_to_target_data(arg1
)) {
8678 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8680 unlock_user(p
, arg2
, ret
);
8683 case TARGET_NR_write
:
8684 if (arg2
== 0 && arg3
== 0) {
8685 return get_errno(safe_write(arg1
, 0, 0));
8687 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8688 return -TARGET_EFAULT
;
8689 if (fd_trans_target_to_host_data(arg1
)) {
8690 void *copy
= g_malloc(arg3
);
8691 memcpy(copy
, p
, arg3
);
8692 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8694 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8698 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8700 unlock_user(p
, arg2
, 0);
8703 #ifdef TARGET_NR_open
8704 case TARGET_NR_open
:
8705 if (!(p
= lock_user_string(arg1
)))
8706 return -TARGET_EFAULT
;
8707 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8708 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8710 fd_trans_unregister(ret
);
8711 unlock_user(p
, arg1
, 0);
8714 case TARGET_NR_openat
:
8715 if (!(p
= lock_user_string(arg2
)))
8716 return -TARGET_EFAULT
;
8717 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8718 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8720 fd_trans_unregister(ret
);
8721 unlock_user(p
, arg2
, 0);
8723 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8724 case TARGET_NR_name_to_handle_at
:
8725 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8728 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8729 case TARGET_NR_open_by_handle_at
:
8730 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8731 fd_trans_unregister(ret
);
8734 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8735 case TARGET_NR_pidfd_open
:
8736 return get_errno(pidfd_open(arg1
, arg2
));
8738 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8739 case TARGET_NR_pidfd_send_signal
:
8741 siginfo_t uinfo
, *puinfo
;
8744 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8746 return -TARGET_EFAULT
;
8748 target_to_host_siginfo(&uinfo
, p
);
8749 unlock_user(p
, arg3
, 0);
8754 ret
= get_errno(pidfd_send_signal(arg1
, target_to_host_signal(arg2
),
8759 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8760 case TARGET_NR_pidfd_getfd
:
8761 return get_errno(pidfd_getfd(arg1
, arg2
, arg3
));
8763 case TARGET_NR_close
:
8764 fd_trans_unregister(arg1
);
8765 return get_errno(close(arg1
));
8766 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
8767 case TARGET_NR_close_range
:
8768 ret
= get_errno(sys_close_range(arg1
, arg2
, arg3
));
8769 if (ret
== 0 && !(arg3
& CLOSE_RANGE_CLOEXEC
)) {
8771 maxfd
= MIN(arg2
, target_fd_max
);
8772 for (fd
= arg1
; fd
< maxfd
; fd
++) {
8773 fd_trans_unregister(fd
);
8780 return do_brk(arg1
);
8781 #ifdef TARGET_NR_fork
8782 case TARGET_NR_fork
:
8783 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8785 #ifdef TARGET_NR_waitpid
8786 case TARGET_NR_waitpid
:
8789 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8790 if (!is_error(ret
) && arg2
&& ret
8791 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8792 return -TARGET_EFAULT
;
8796 #ifdef TARGET_NR_waitid
8797 case TARGET_NR_waitid
:
8801 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8802 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8803 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8804 return -TARGET_EFAULT
;
8805 host_to_target_siginfo(p
, &info
);
8806 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8811 #ifdef TARGET_NR_creat /* not on alpha */
8812 case TARGET_NR_creat
:
8813 if (!(p
= lock_user_string(arg1
)))
8814 return -TARGET_EFAULT
;
8815 ret
= get_errno(creat(p
, arg2
));
8816 fd_trans_unregister(ret
);
8817 unlock_user(p
, arg1
, 0);
8820 #ifdef TARGET_NR_link
8821 case TARGET_NR_link
:
8824 p
= lock_user_string(arg1
);
8825 p2
= lock_user_string(arg2
);
8827 ret
= -TARGET_EFAULT
;
8829 ret
= get_errno(link(p
, p2
));
8830 unlock_user(p2
, arg2
, 0);
8831 unlock_user(p
, arg1
, 0);
8835 #if defined(TARGET_NR_linkat)
8836 case TARGET_NR_linkat
:
8840 return -TARGET_EFAULT
;
8841 p
= lock_user_string(arg2
);
8842 p2
= lock_user_string(arg4
);
8844 ret
= -TARGET_EFAULT
;
8846 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8847 unlock_user(p
, arg2
, 0);
8848 unlock_user(p2
, arg4
, 0);
8852 #ifdef TARGET_NR_unlink
8853 case TARGET_NR_unlink
:
8854 if (!(p
= lock_user_string(arg1
)))
8855 return -TARGET_EFAULT
;
8856 ret
= get_errno(unlink(p
));
8857 unlock_user(p
, arg1
, 0);
8860 #if defined(TARGET_NR_unlinkat)
8861 case TARGET_NR_unlinkat
:
8862 if (!(p
= lock_user_string(arg2
)))
8863 return -TARGET_EFAULT
;
8864 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8865 unlock_user(p
, arg2
, 0);
8868 case TARGET_NR_execve
:
8870 char **argp
, **envp
;
8873 abi_ulong guest_argp
;
8874 abi_ulong guest_envp
;
8880 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8881 if (get_user_ual(addr
, gp
))
8882 return -TARGET_EFAULT
;
8889 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8890 if (get_user_ual(addr
, gp
))
8891 return -TARGET_EFAULT
;
8897 argp
= g_new0(char *, argc
+ 1);
8898 envp
= g_new0(char *, envc
+ 1);
8900 for (gp
= guest_argp
, q
= argp
; gp
;
8901 gp
+= sizeof(abi_ulong
), q
++) {
8902 if (get_user_ual(addr
, gp
))
8906 if (!(*q
= lock_user_string(addr
)))
8911 for (gp
= guest_envp
, q
= envp
; gp
;
8912 gp
+= sizeof(abi_ulong
), q
++) {
8913 if (get_user_ual(addr
, gp
))
8917 if (!(*q
= lock_user_string(addr
)))
8922 if (!(p
= lock_user_string(arg1
)))
8924 /* Although execve() is not an interruptible syscall it is
8925 * a special case where we must use the safe_syscall wrapper:
8926 * if we allow a signal to happen before we make the host
8927 * syscall then we will 'lose' it, because at the point of
8928 * execve the process leaves QEMU's control. So we use the
8929 * safe syscall wrapper to ensure that we either take the
8930 * signal as a guest signal, or else it does not happen
8931 * before the execve completes and makes it the other
8932 * program's problem.
8934 if (is_proc_myself(p
, "exe")) {
8935 ret
= get_errno(safe_execve(exec_path
, argp
, envp
));
8937 ret
= get_errno(safe_execve(p
, argp
, envp
));
8939 unlock_user(p
, arg1
, 0);
8944 ret
= -TARGET_EFAULT
;
8947 for (gp
= guest_argp
, q
= argp
; *q
;
8948 gp
+= sizeof(abi_ulong
), q
++) {
8949 if (get_user_ual(addr
, gp
)
8952 unlock_user(*q
, addr
, 0);
8954 for (gp
= guest_envp
, q
= envp
; *q
;
8955 gp
+= sizeof(abi_ulong
), q
++) {
8956 if (get_user_ual(addr
, gp
)
8959 unlock_user(*q
, addr
, 0);
8966 case TARGET_NR_chdir
:
8967 if (!(p
= lock_user_string(arg1
)))
8968 return -TARGET_EFAULT
;
8969 ret
= get_errno(chdir(p
));
8970 unlock_user(p
, arg1
, 0);
8972 #ifdef TARGET_NR_time
8973 case TARGET_NR_time
:
8976 ret
= get_errno(time(&host_time
));
8979 && put_user_sal(host_time
, arg1
))
8980 return -TARGET_EFAULT
;
8984 #ifdef TARGET_NR_mknod
8985 case TARGET_NR_mknod
:
8986 if (!(p
= lock_user_string(arg1
)))
8987 return -TARGET_EFAULT
;
8988 ret
= get_errno(mknod(p
, arg2
, arg3
));
8989 unlock_user(p
, arg1
, 0);
8992 #if defined(TARGET_NR_mknodat)
8993 case TARGET_NR_mknodat
:
8994 if (!(p
= lock_user_string(arg2
)))
8995 return -TARGET_EFAULT
;
8996 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8997 unlock_user(p
, arg2
, 0);
9000 #ifdef TARGET_NR_chmod
9001 case TARGET_NR_chmod
:
9002 if (!(p
= lock_user_string(arg1
)))
9003 return -TARGET_EFAULT
;
9004 ret
= get_errno(chmod(p
, arg2
));
9005 unlock_user(p
, arg1
, 0);
9008 #ifdef TARGET_NR_lseek
9009 case TARGET_NR_lseek
:
9010 return get_errno(lseek(arg1
, arg2
, arg3
));
9012 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9013 /* Alpha specific */
9014 case TARGET_NR_getxpid
:
9015 cpu_env
->ir
[IR_A4
] = getppid();
9016 return get_errno(getpid());
9018 #ifdef TARGET_NR_getpid
9019 case TARGET_NR_getpid
:
9020 return get_errno(getpid());
9022 case TARGET_NR_mount
:
9024 /* need to look at the data field */
9028 p
= lock_user_string(arg1
);
9030 return -TARGET_EFAULT
;
9036 p2
= lock_user_string(arg2
);
9039 unlock_user(p
, arg1
, 0);
9041 return -TARGET_EFAULT
;
9045 p3
= lock_user_string(arg3
);
9048 unlock_user(p
, arg1
, 0);
9050 unlock_user(p2
, arg2
, 0);
9051 return -TARGET_EFAULT
;
9057 /* FIXME - arg5 should be locked, but it isn't clear how to
9058 * do that since it's not guaranteed to be a NULL-terminated
9062 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
9064 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
9066 ret
= get_errno(ret
);
9069 unlock_user(p
, arg1
, 0);
9071 unlock_user(p2
, arg2
, 0);
9073 unlock_user(p3
, arg3
, 0);
9077 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9078 #if defined(TARGET_NR_umount)
9079 case TARGET_NR_umount
:
9081 #if defined(TARGET_NR_oldumount)
9082 case TARGET_NR_oldumount
:
9084 if (!(p
= lock_user_string(arg1
)))
9085 return -TARGET_EFAULT
;
9086 ret
= get_errno(umount(p
));
9087 unlock_user(p
, arg1
, 0);
9090 #ifdef TARGET_NR_stime /* not on alpha */
9091 case TARGET_NR_stime
:
9095 if (get_user_sal(ts
.tv_sec
, arg1
)) {
9096 return -TARGET_EFAULT
;
9098 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
9101 #ifdef TARGET_NR_alarm /* not on alpha */
9102 case TARGET_NR_alarm
:
9105 #ifdef TARGET_NR_pause /* not on alpha */
9106 case TARGET_NR_pause
:
9107 if (!block_signals()) {
9108 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
9110 return -TARGET_EINTR
;
9112 #ifdef TARGET_NR_utime
9113 case TARGET_NR_utime
:
9115 struct utimbuf tbuf
, *host_tbuf
;
9116 struct target_utimbuf
*target_tbuf
;
9118 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
9119 return -TARGET_EFAULT
;
9120 tbuf
.actime
= tswapal(target_tbuf
->actime
);
9121 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
9122 unlock_user_struct(target_tbuf
, arg2
, 0);
9127 if (!(p
= lock_user_string(arg1
)))
9128 return -TARGET_EFAULT
;
9129 ret
= get_errno(utime(p
, host_tbuf
));
9130 unlock_user(p
, arg1
, 0);
9134 #ifdef TARGET_NR_utimes
9135 case TARGET_NR_utimes
:
9137 struct timeval
*tvp
, tv
[2];
9139 if (copy_from_user_timeval(&tv
[0], arg2
)
9140 || copy_from_user_timeval(&tv
[1],
9141 arg2
+ sizeof(struct target_timeval
)))
9142 return -TARGET_EFAULT
;
9147 if (!(p
= lock_user_string(arg1
)))
9148 return -TARGET_EFAULT
;
9149 ret
= get_errno(utimes(p
, tvp
));
9150 unlock_user(p
, arg1
, 0);
9154 #if defined(TARGET_NR_futimesat)
9155 case TARGET_NR_futimesat
:
9157 struct timeval
*tvp
, tv
[2];
9159 if (copy_from_user_timeval(&tv
[0], arg3
)
9160 || copy_from_user_timeval(&tv
[1],
9161 arg3
+ sizeof(struct target_timeval
)))
9162 return -TARGET_EFAULT
;
9167 if (!(p
= lock_user_string(arg2
))) {
9168 return -TARGET_EFAULT
;
9170 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
9171 unlock_user(p
, arg2
, 0);
9175 #ifdef TARGET_NR_access
9176 case TARGET_NR_access
:
9177 if (!(p
= lock_user_string(arg1
))) {
9178 return -TARGET_EFAULT
;
9180 ret
= get_errno(access(path(p
), arg2
));
9181 unlock_user(p
, arg1
, 0);
9184 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9185 case TARGET_NR_faccessat
:
9186 if (!(p
= lock_user_string(arg2
))) {
9187 return -TARGET_EFAULT
;
9189 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
9190 unlock_user(p
, arg2
, 0);
9193 #if defined(TARGET_NR_faccessat2)
9194 case TARGET_NR_faccessat2
:
9195 if (!(p
= lock_user_string(arg2
))) {
9196 return -TARGET_EFAULT
;
9198 ret
= get_errno(faccessat(arg1
, p
, arg3
, arg4
));
9199 unlock_user(p
, arg2
, 0);
9202 #ifdef TARGET_NR_nice /* not on alpha */
9203 case TARGET_NR_nice
:
9204 return get_errno(nice(arg1
));
9206 case TARGET_NR_sync
:
9209 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9210 case TARGET_NR_syncfs
:
9211 return get_errno(syncfs(arg1
));
9213 case TARGET_NR_kill
:
9214 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
9215 #ifdef TARGET_NR_rename
9216 case TARGET_NR_rename
:
9219 p
= lock_user_string(arg1
);
9220 p2
= lock_user_string(arg2
);
9222 ret
= -TARGET_EFAULT
;
9224 ret
= get_errno(rename(p
, p2
));
9225 unlock_user(p2
, arg2
, 0);
9226 unlock_user(p
, arg1
, 0);
9230 #if defined(TARGET_NR_renameat)
9231 case TARGET_NR_renameat
:
9234 p
= lock_user_string(arg2
);
9235 p2
= lock_user_string(arg4
);
9237 ret
= -TARGET_EFAULT
;
9239 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
9240 unlock_user(p2
, arg4
, 0);
9241 unlock_user(p
, arg2
, 0);
9245 #if defined(TARGET_NR_renameat2)
9246 case TARGET_NR_renameat2
:
9249 p
= lock_user_string(arg2
);
9250 p2
= lock_user_string(arg4
);
9252 ret
= -TARGET_EFAULT
;
9254 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
9256 unlock_user(p2
, arg4
, 0);
9257 unlock_user(p
, arg2
, 0);
9261 #ifdef TARGET_NR_mkdir
9262 case TARGET_NR_mkdir
:
9263 if (!(p
= lock_user_string(arg1
)))
9264 return -TARGET_EFAULT
;
9265 ret
= get_errno(mkdir(p
, arg2
));
9266 unlock_user(p
, arg1
, 0);
9269 #if defined(TARGET_NR_mkdirat)
9270 case TARGET_NR_mkdirat
:
9271 if (!(p
= lock_user_string(arg2
)))
9272 return -TARGET_EFAULT
;
9273 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
9274 unlock_user(p
, arg2
, 0);
9277 #ifdef TARGET_NR_rmdir
9278 case TARGET_NR_rmdir
:
9279 if (!(p
= lock_user_string(arg1
)))
9280 return -TARGET_EFAULT
;
9281 ret
= get_errno(rmdir(p
));
9282 unlock_user(p
, arg1
, 0);
9286 ret
= get_errno(dup(arg1
));
9288 fd_trans_dup(arg1
, ret
);
9291 #ifdef TARGET_NR_pipe
9292 case TARGET_NR_pipe
:
9293 return do_pipe(cpu_env
, arg1
, 0, 0);
9295 #ifdef TARGET_NR_pipe2
9296 case TARGET_NR_pipe2
:
9297 return do_pipe(cpu_env
, arg1
,
9298 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
9300 case TARGET_NR_times
:
9302 struct target_tms
*tmsp
;
9304 ret
= get_errno(times(&tms
));
9306 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
9308 return -TARGET_EFAULT
;
9309 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
9310 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
9311 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
9312 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
9315 ret
= host_to_target_clock_t(ret
);
9318 case TARGET_NR_acct
:
9320 ret
= get_errno(acct(NULL
));
9322 if (!(p
= lock_user_string(arg1
))) {
9323 return -TARGET_EFAULT
;
9325 ret
= get_errno(acct(path(p
)));
9326 unlock_user(p
, arg1
, 0);
9329 #ifdef TARGET_NR_umount2
9330 case TARGET_NR_umount2
:
9331 if (!(p
= lock_user_string(arg1
)))
9332 return -TARGET_EFAULT
;
9333 ret
= get_errno(umount2(p
, arg2
));
9334 unlock_user(p
, arg1
, 0);
9337 case TARGET_NR_ioctl
:
9338 return do_ioctl(arg1
, arg2
, arg3
);
9339 #ifdef TARGET_NR_fcntl
9340 case TARGET_NR_fcntl
:
9341 return do_fcntl(arg1
, arg2
, arg3
);
9343 case TARGET_NR_setpgid
:
9344 return get_errno(setpgid(arg1
, arg2
));
9345 case TARGET_NR_umask
:
9346 return get_errno(umask(arg1
));
9347 case TARGET_NR_chroot
:
9348 if (!(p
= lock_user_string(arg1
)))
9349 return -TARGET_EFAULT
;
9350 ret
= get_errno(chroot(p
));
9351 unlock_user(p
, arg1
, 0);
9353 #ifdef TARGET_NR_dup2
9354 case TARGET_NR_dup2
:
9355 ret
= get_errno(dup2(arg1
, arg2
));
9357 fd_trans_dup(arg1
, arg2
);
9361 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9362 case TARGET_NR_dup3
:
9366 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
9369 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
9370 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
9372 fd_trans_dup(arg1
, arg2
);
9377 #ifdef TARGET_NR_getppid /* not on alpha */
9378 case TARGET_NR_getppid
:
9379 return get_errno(getppid());
9381 #ifdef TARGET_NR_getpgrp
9382 case TARGET_NR_getpgrp
:
9383 return get_errno(getpgrp());
9385 case TARGET_NR_setsid
:
9386 return get_errno(setsid());
9387 #ifdef TARGET_NR_sigaction
9388 case TARGET_NR_sigaction
:
9390 #if defined(TARGET_MIPS)
9391 struct target_sigaction act
, oact
, *pact
, *old_act
;
9394 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9395 return -TARGET_EFAULT
;
9396 act
._sa_handler
= old_act
->_sa_handler
;
9397 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
9398 act
.sa_flags
= old_act
->sa_flags
;
9399 unlock_user_struct(old_act
, arg2
, 0);
9405 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9407 if (!is_error(ret
) && arg3
) {
9408 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9409 return -TARGET_EFAULT
;
9410 old_act
->_sa_handler
= oact
._sa_handler
;
9411 old_act
->sa_flags
= oact
.sa_flags
;
9412 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
9413 old_act
->sa_mask
.sig
[1] = 0;
9414 old_act
->sa_mask
.sig
[2] = 0;
9415 old_act
->sa_mask
.sig
[3] = 0;
9416 unlock_user_struct(old_act
, arg3
, 1);
9419 struct target_old_sigaction
*old_act
;
9420 struct target_sigaction act
, oact
, *pact
;
9422 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9423 return -TARGET_EFAULT
;
9424 act
._sa_handler
= old_act
->_sa_handler
;
9425 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9426 act
.sa_flags
= old_act
->sa_flags
;
9427 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9428 act
.sa_restorer
= old_act
->sa_restorer
;
9430 unlock_user_struct(old_act
, arg2
, 0);
9435 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9436 if (!is_error(ret
) && arg3
) {
9437 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9438 return -TARGET_EFAULT
;
9439 old_act
->_sa_handler
= oact
._sa_handler
;
9440 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9441 old_act
->sa_flags
= oact
.sa_flags
;
9442 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9443 old_act
->sa_restorer
= oact
.sa_restorer
;
9445 unlock_user_struct(old_act
, arg3
, 1);
9451 case TARGET_NR_rt_sigaction
:
9454 * For Alpha and SPARC this is a 5 argument syscall, with
9455 * a 'restorer' parameter which must be copied into the
9456 * sa_restorer field of the sigaction struct.
9457 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9458 * and arg5 is the sigsetsize.
9460 #if defined(TARGET_ALPHA)
9461 target_ulong sigsetsize
= arg4
;
9462 target_ulong restorer
= arg5
;
9463 #elif defined(TARGET_SPARC)
9464 target_ulong restorer
= arg4
;
9465 target_ulong sigsetsize
= arg5
;
9467 target_ulong sigsetsize
= arg4
;
9468 target_ulong restorer
= 0;
9470 struct target_sigaction
*act
= NULL
;
9471 struct target_sigaction
*oact
= NULL
;
9473 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9474 return -TARGET_EINVAL
;
9476 if (arg2
&& !lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9477 return -TARGET_EFAULT
;
9479 if (arg3
&& !lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9480 ret
= -TARGET_EFAULT
;
9482 ret
= get_errno(do_sigaction(arg1
, act
, oact
, restorer
));
9484 unlock_user_struct(oact
, arg3
, 1);
9488 unlock_user_struct(act
, arg2
, 0);
9492 #ifdef TARGET_NR_sgetmask /* not on alpha */
9493 case TARGET_NR_sgetmask
:
9496 abi_ulong target_set
;
9497 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9499 host_to_target_old_sigset(&target_set
, &cur_set
);
9505 #ifdef TARGET_NR_ssetmask /* not on alpha */
9506 case TARGET_NR_ssetmask
:
9509 abi_ulong target_set
= arg1
;
9510 target_to_host_old_sigset(&set
, &target_set
);
9511 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9513 host_to_target_old_sigset(&target_set
, &oset
);
9519 #ifdef TARGET_NR_sigprocmask
9520 case TARGET_NR_sigprocmask
:
9522 #if defined(TARGET_ALPHA)
9523 sigset_t set
, oldset
;
9528 case TARGET_SIG_BLOCK
:
9531 case TARGET_SIG_UNBLOCK
:
9534 case TARGET_SIG_SETMASK
:
9538 return -TARGET_EINVAL
;
9541 target_to_host_old_sigset(&set
, &mask
);
9543 ret
= do_sigprocmask(how
, &set
, &oldset
);
9544 if (!is_error(ret
)) {
9545 host_to_target_old_sigset(&mask
, &oldset
);
9547 cpu_env
->ir
[IR_V0
] = 0; /* force no error */
9550 sigset_t set
, oldset
, *set_ptr
;
9554 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
9556 return -TARGET_EFAULT
;
9558 target_to_host_old_sigset(&set
, p
);
9559 unlock_user(p
, arg2
, 0);
9562 case TARGET_SIG_BLOCK
:
9565 case TARGET_SIG_UNBLOCK
:
9568 case TARGET_SIG_SETMASK
:
9572 return -TARGET_EINVAL
;
9578 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9579 if (!is_error(ret
) && arg3
) {
9580 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9581 return -TARGET_EFAULT
;
9582 host_to_target_old_sigset(p
, &oldset
);
9583 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9589 case TARGET_NR_rt_sigprocmask
:
9592 sigset_t set
, oldset
, *set_ptr
;
9594 if (arg4
!= sizeof(target_sigset_t
)) {
9595 return -TARGET_EINVAL
;
9599 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
9601 return -TARGET_EFAULT
;
9603 target_to_host_sigset(&set
, p
);
9604 unlock_user(p
, arg2
, 0);
9607 case TARGET_SIG_BLOCK
:
9610 case TARGET_SIG_UNBLOCK
:
9613 case TARGET_SIG_SETMASK
:
9617 return -TARGET_EINVAL
;
9623 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9624 if (!is_error(ret
) && arg3
) {
9625 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9626 return -TARGET_EFAULT
;
9627 host_to_target_sigset(p
, &oldset
);
9628 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9632 #ifdef TARGET_NR_sigpending
9633 case TARGET_NR_sigpending
:
9636 ret
= get_errno(sigpending(&set
));
9637 if (!is_error(ret
)) {
9638 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9639 return -TARGET_EFAULT
;
9640 host_to_target_old_sigset(p
, &set
);
9641 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9646 case TARGET_NR_rt_sigpending
:
9650 /* Yes, this check is >, not != like most. We follow the kernel's
9651 * logic and it does it like this because it implements
9652 * NR_sigpending through the same code path, and in that case
9653 * the old_sigset_t is smaller in size.
9655 if (arg2
> sizeof(target_sigset_t
)) {
9656 return -TARGET_EINVAL
;
9659 ret
= get_errno(sigpending(&set
));
9660 if (!is_error(ret
)) {
9661 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9662 return -TARGET_EFAULT
;
9663 host_to_target_sigset(p
, &set
);
9664 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9668 #ifdef TARGET_NR_sigsuspend
9669 case TARGET_NR_sigsuspend
:
9673 #if defined(TARGET_ALPHA)
9674 TaskState
*ts
= cpu
->opaque
;
9675 /* target_to_host_old_sigset will bswap back */
9676 abi_ulong mask
= tswapal(arg1
);
9677 set
= &ts
->sigsuspend_mask
;
9678 target_to_host_old_sigset(set
, &mask
);
9680 ret
= process_sigsuspend_mask(&set
, arg1
, sizeof(target_sigset_t
));
9685 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
9686 finish_sigsuspend_mask(ret
);
9690 case TARGET_NR_rt_sigsuspend
:
9694 ret
= process_sigsuspend_mask(&set
, arg1
, arg2
);
9698 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
9699 finish_sigsuspend_mask(ret
);
9702 #ifdef TARGET_NR_rt_sigtimedwait
9703 case TARGET_NR_rt_sigtimedwait
:
9706 struct timespec uts
, *puts
;
9709 if (arg4
!= sizeof(target_sigset_t
)) {
9710 return -TARGET_EINVAL
;
9713 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9714 return -TARGET_EFAULT
;
9715 target_to_host_sigset(&set
, p
);
9716 unlock_user(p
, arg1
, 0);
9719 if (target_to_host_timespec(puts
, arg3
)) {
9720 return -TARGET_EFAULT
;
9725 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9727 if (!is_error(ret
)) {
9729 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9732 return -TARGET_EFAULT
;
9734 host_to_target_siginfo(p
, &uinfo
);
9735 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9737 ret
= host_to_target_signal(ret
);
9742 #ifdef TARGET_NR_rt_sigtimedwait_time64
9743 case TARGET_NR_rt_sigtimedwait_time64
:
9746 struct timespec uts
, *puts
;
9749 if (arg4
!= sizeof(target_sigset_t
)) {
9750 return -TARGET_EINVAL
;
9753 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9755 return -TARGET_EFAULT
;
9757 target_to_host_sigset(&set
, p
);
9758 unlock_user(p
, arg1
, 0);
9761 if (target_to_host_timespec64(puts
, arg3
)) {
9762 return -TARGET_EFAULT
;
9767 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9769 if (!is_error(ret
)) {
9771 p
= lock_user(VERIFY_WRITE
, arg2
,
9772 sizeof(target_siginfo_t
), 0);
9774 return -TARGET_EFAULT
;
9776 host_to_target_siginfo(p
, &uinfo
);
9777 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9779 ret
= host_to_target_signal(ret
);
9784 case TARGET_NR_rt_sigqueueinfo
:
9788 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9790 return -TARGET_EFAULT
;
9792 target_to_host_siginfo(&uinfo
, p
);
9793 unlock_user(p
, arg3
, 0);
9794 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, target_to_host_signal(arg2
), &uinfo
));
9797 case TARGET_NR_rt_tgsigqueueinfo
:
9801 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9803 return -TARGET_EFAULT
;
9805 target_to_host_siginfo(&uinfo
, p
);
9806 unlock_user(p
, arg4
, 0);
9807 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, target_to_host_signal(arg3
), &uinfo
));
9810 #ifdef TARGET_NR_sigreturn
9811 case TARGET_NR_sigreturn
:
9812 if (block_signals()) {
9813 return -QEMU_ERESTARTSYS
;
9815 return do_sigreturn(cpu_env
);
9817 case TARGET_NR_rt_sigreturn
:
9818 if (block_signals()) {
9819 return -QEMU_ERESTARTSYS
;
9821 return do_rt_sigreturn(cpu_env
);
9822 case TARGET_NR_sethostname
:
9823 if (!(p
= lock_user_string(arg1
)))
9824 return -TARGET_EFAULT
;
9825 ret
= get_errno(sethostname(p
, arg2
));
9826 unlock_user(p
, arg1
, 0);
9828 #ifdef TARGET_NR_setrlimit
9829 case TARGET_NR_setrlimit
:
9831 int resource
= target_to_host_resource(arg1
);
9832 struct target_rlimit
*target_rlim
;
9834 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9835 return -TARGET_EFAULT
;
9836 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9837 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9838 unlock_user_struct(target_rlim
, arg2
, 0);
9840 * If we just passed through resource limit settings for memory then
9841 * they would also apply to QEMU's own allocations, and QEMU will
9842 * crash or hang or die if its allocations fail. Ideally we would
9843 * track the guest allocations in QEMU and apply the limits ourselves.
9844 * For now, just tell the guest the call succeeded but don't actually
9847 if (resource
!= RLIMIT_AS
&&
9848 resource
!= RLIMIT_DATA
&&
9849 resource
!= RLIMIT_STACK
) {
9850 return get_errno(setrlimit(resource
, &rlim
));
9856 #ifdef TARGET_NR_getrlimit
9857 case TARGET_NR_getrlimit
:
9859 int resource
= target_to_host_resource(arg1
);
9860 struct target_rlimit
*target_rlim
;
9863 ret
= get_errno(getrlimit(resource
, &rlim
));
9864 if (!is_error(ret
)) {
9865 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9866 return -TARGET_EFAULT
;
9867 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9868 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9869 unlock_user_struct(target_rlim
, arg2
, 1);
9874 case TARGET_NR_getrusage
:
9876 struct rusage rusage
;
9877 ret
= get_errno(getrusage(arg1
, &rusage
));
9878 if (!is_error(ret
)) {
9879 ret
= host_to_target_rusage(arg2
, &rusage
);
9883 #if defined(TARGET_NR_gettimeofday)
9884 case TARGET_NR_gettimeofday
:
9889 ret
= get_errno(gettimeofday(&tv
, &tz
));
9890 if (!is_error(ret
)) {
9891 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9892 return -TARGET_EFAULT
;
9894 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9895 return -TARGET_EFAULT
;
9901 #if defined(TARGET_NR_settimeofday)
9902 case TARGET_NR_settimeofday
:
9904 struct timeval tv
, *ptv
= NULL
;
9905 struct timezone tz
, *ptz
= NULL
;
9908 if (copy_from_user_timeval(&tv
, arg1
)) {
9909 return -TARGET_EFAULT
;
9915 if (copy_from_user_timezone(&tz
, arg2
)) {
9916 return -TARGET_EFAULT
;
9921 return get_errno(settimeofday(ptv
, ptz
));
9924 #if defined(TARGET_NR_select)
9925 case TARGET_NR_select
:
9926 #if defined(TARGET_WANT_NI_OLD_SELECT)
9927 /* some architectures used to have old_select here
9928 * but now ENOSYS it.
9930 ret
= -TARGET_ENOSYS
;
9931 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9932 ret
= do_old_select(arg1
);
9934 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9938 #ifdef TARGET_NR_pselect6
9939 case TARGET_NR_pselect6
:
9940 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9942 #ifdef TARGET_NR_pselect6_time64
9943 case TARGET_NR_pselect6_time64
:
9944 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9946 #ifdef TARGET_NR_symlink
9947 case TARGET_NR_symlink
:
9950 p
= lock_user_string(arg1
);
9951 p2
= lock_user_string(arg2
);
9953 ret
= -TARGET_EFAULT
;
9955 ret
= get_errno(symlink(p
, p2
));
9956 unlock_user(p2
, arg2
, 0);
9957 unlock_user(p
, arg1
, 0);
9961 #if defined(TARGET_NR_symlinkat)
9962 case TARGET_NR_symlinkat
:
9965 p
= lock_user_string(arg1
);
9966 p2
= lock_user_string(arg3
);
9968 ret
= -TARGET_EFAULT
;
9970 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9971 unlock_user(p2
, arg3
, 0);
9972 unlock_user(p
, arg1
, 0);
9976 #ifdef TARGET_NR_readlink
9977 case TARGET_NR_readlink
:
9980 p
= lock_user_string(arg1
);
9981 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9983 ret
= -TARGET_EFAULT
;
9985 /* Short circuit this for the magic exe check. */
9986 ret
= -TARGET_EINVAL
;
9987 } else if (is_proc_myself((const char *)p
, "exe")) {
9988 char real
[PATH_MAX
], *temp
;
9989 temp
= realpath(exec_path
, real
);
9990 /* Return value is # of bytes that we wrote to the buffer. */
9992 ret
= get_errno(-1);
9994 /* Don't worry about sign mismatch as earlier mapping
9995 * logic would have thrown a bad address error. */
9996 ret
= MIN(strlen(real
), arg3
);
9997 /* We cannot NUL terminate the string. */
9998 memcpy(p2
, real
, ret
);
10001 ret
= get_errno(readlink(path(p
), p2
, arg3
));
10003 unlock_user(p2
, arg2
, ret
);
10004 unlock_user(p
, arg1
, 0);
10008 #if defined(TARGET_NR_readlinkat)
10009 case TARGET_NR_readlinkat
:
10012 p
= lock_user_string(arg2
);
10013 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10015 ret
= -TARGET_EFAULT
;
10016 } else if (!arg4
) {
10017 /* Short circuit this for the magic exe check. */
10018 ret
= -TARGET_EINVAL
;
10019 } else if (is_proc_myself((const char *)p
, "exe")) {
10020 char real
[PATH_MAX
], *temp
;
10021 temp
= realpath(exec_path
, real
);
10022 /* Return value is # of bytes that we wrote to the buffer. */
10023 if (temp
== NULL
) {
10024 ret
= get_errno(-1);
10026 /* Don't worry about sign mismatch as earlier mapping
10027 * logic would have thrown a bad address error. */
10028 ret
= MIN(strlen(real
), arg4
);
10029 /* We cannot NUL terminate the string. */
10030 memcpy(p2
, real
, ret
);
10033 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
10035 unlock_user(p2
, arg3
, ret
);
10036 unlock_user(p
, arg2
, 0);
10040 #ifdef TARGET_NR_swapon
10041 case TARGET_NR_swapon
:
10042 if (!(p
= lock_user_string(arg1
)))
10043 return -TARGET_EFAULT
;
10044 ret
= get_errno(swapon(p
, arg2
));
10045 unlock_user(p
, arg1
, 0);
10048 case TARGET_NR_reboot
:
10049 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
10050 /* arg4 must be ignored in all other cases */
10051 p
= lock_user_string(arg4
);
10053 return -TARGET_EFAULT
;
10055 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
10056 unlock_user(p
, arg4
, 0);
10058 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
10061 #ifdef TARGET_NR_mmap
10062 case TARGET_NR_mmap
:
10063 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10064 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10065 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10066 || defined(TARGET_S390X)
10069 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
10070 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
10071 return -TARGET_EFAULT
;
10072 v1
= tswapal(v
[0]);
10073 v2
= tswapal(v
[1]);
10074 v3
= tswapal(v
[2]);
10075 v4
= tswapal(v
[3]);
10076 v5
= tswapal(v
[4]);
10077 v6
= tswapal(v
[5]);
10078 unlock_user(v
, arg1
, 0);
10079 ret
= get_errno(target_mmap(v1
, v2
, v3
,
10080 target_to_host_bitmask(v4
, mmap_flags_tbl
),
10084 /* mmap pointers are always untagged */
10085 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
10086 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
10092 #ifdef TARGET_NR_mmap2
10093 case TARGET_NR_mmap2
:
10095 #define MMAP_SHIFT 12
10097 ret
= target_mmap(arg1
, arg2
, arg3
,
10098 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
10099 arg5
, arg6
<< MMAP_SHIFT
);
10100 return get_errno(ret
);
10102 case TARGET_NR_munmap
:
10103 arg1
= cpu_untagged_addr(cpu
, arg1
);
10104 return get_errno(target_munmap(arg1
, arg2
));
10105 case TARGET_NR_mprotect
:
10106 arg1
= cpu_untagged_addr(cpu
, arg1
);
10108 TaskState
*ts
= cpu
->opaque
;
10109 /* Special hack to detect libc making the stack executable. */
10110 if ((arg3
& PROT_GROWSDOWN
)
10111 && arg1
>= ts
->info
->stack_limit
10112 && arg1
<= ts
->info
->start_stack
) {
10113 arg3
&= ~PROT_GROWSDOWN
;
10114 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
10115 arg1
= ts
->info
->stack_limit
;
10118 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
10119 #ifdef TARGET_NR_mremap
10120 case TARGET_NR_mremap
:
10121 arg1
= cpu_untagged_addr(cpu
, arg1
);
10122 /* mremap new_addr (arg5) is always untagged */
10123 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
10125 /* ??? msync/mlock/munlock are broken for softmmu. */
10126 #ifdef TARGET_NR_msync
10127 case TARGET_NR_msync
:
10128 return get_errno(msync(g2h(cpu
, arg1
), arg2
, arg3
));
10130 #ifdef TARGET_NR_mlock
10131 case TARGET_NR_mlock
:
10132 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
10134 #ifdef TARGET_NR_munlock
10135 case TARGET_NR_munlock
:
10136 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
10138 #ifdef TARGET_NR_mlockall
10139 case TARGET_NR_mlockall
:
10140 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
10142 #ifdef TARGET_NR_munlockall
10143 case TARGET_NR_munlockall
:
10144 return get_errno(munlockall());
10146 #ifdef TARGET_NR_truncate
10147 case TARGET_NR_truncate
:
10148 if (!(p
= lock_user_string(arg1
)))
10149 return -TARGET_EFAULT
;
10150 ret
= get_errno(truncate(p
, arg2
));
10151 unlock_user(p
, arg1
, 0);
10154 #ifdef TARGET_NR_ftruncate
10155 case TARGET_NR_ftruncate
:
10156 return get_errno(ftruncate(arg1
, arg2
));
10158 case TARGET_NR_fchmod
:
10159 return get_errno(fchmod(arg1
, arg2
));
10160 #if defined(TARGET_NR_fchmodat)
10161 case TARGET_NR_fchmodat
:
10162 if (!(p
= lock_user_string(arg2
)))
10163 return -TARGET_EFAULT
;
10164 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
10165 unlock_user(p
, arg2
, 0);
10168 case TARGET_NR_getpriority
:
10169 /* Note that negative values are valid for getpriority, so we must
10170 differentiate based on errno settings. */
10172 ret
= getpriority(arg1
, arg2
);
10173 if (ret
== -1 && errno
!= 0) {
10174 return -host_to_target_errno(errno
);
10176 #ifdef TARGET_ALPHA
10177 /* Return value is the unbiased priority. Signal no error. */
10178 cpu_env
->ir
[IR_V0
] = 0;
10180 /* Return value is a biased priority to avoid negative numbers. */
10184 case TARGET_NR_setpriority
:
10185 return get_errno(setpriority(arg1
, arg2
, arg3
));
10186 #ifdef TARGET_NR_statfs
10187 case TARGET_NR_statfs
:
10188 if (!(p
= lock_user_string(arg1
))) {
10189 return -TARGET_EFAULT
;
10191 ret
= get_errno(statfs(path(p
), &stfs
));
10192 unlock_user(p
, arg1
, 0);
10194 if (!is_error(ret
)) {
10195 struct target_statfs
*target_stfs
;
10197 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
10198 return -TARGET_EFAULT
;
10199 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10200 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10201 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10202 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10203 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10204 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10205 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10206 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10207 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10208 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10209 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10210 #ifdef _STATFS_F_FLAGS
10211 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10213 __put_user(0, &target_stfs
->f_flags
);
10215 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10216 unlock_user_struct(target_stfs
, arg2
, 1);
10220 #ifdef TARGET_NR_fstatfs
10221 case TARGET_NR_fstatfs
:
10222 ret
= get_errno(fstatfs(arg1
, &stfs
));
10223 goto convert_statfs
;
10225 #ifdef TARGET_NR_statfs64
10226 case TARGET_NR_statfs64
:
10227 if (!(p
= lock_user_string(arg1
))) {
10228 return -TARGET_EFAULT
;
10230 ret
= get_errno(statfs(path(p
), &stfs
));
10231 unlock_user(p
, arg1
, 0);
10233 if (!is_error(ret
)) {
10234 struct target_statfs64
*target_stfs
;
10236 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
10237 return -TARGET_EFAULT
;
10238 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10239 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10240 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10241 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10242 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10243 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10244 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10245 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10246 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10247 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10248 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10249 #ifdef _STATFS_F_FLAGS
10250 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10252 __put_user(0, &target_stfs
->f_flags
);
10254 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10255 unlock_user_struct(target_stfs
, arg3
, 1);
10258 case TARGET_NR_fstatfs64
:
10259 ret
= get_errno(fstatfs(arg1
, &stfs
));
10260 goto convert_statfs64
;
10262 #ifdef TARGET_NR_socketcall
10263 case TARGET_NR_socketcall
:
10264 return do_socketcall(arg1
, arg2
);
10266 #ifdef TARGET_NR_accept
10267 case TARGET_NR_accept
:
10268 return do_accept4(arg1
, arg2
, arg3
, 0);
10270 #ifdef TARGET_NR_accept4
10271 case TARGET_NR_accept4
:
10272 return do_accept4(arg1
, arg2
, arg3
, arg4
);
10274 #ifdef TARGET_NR_bind
10275 case TARGET_NR_bind
:
10276 return do_bind(arg1
, arg2
, arg3
);
10278 #ifdef TARGET_NR_connect
10279 case TARGET_NR_connect
:
10280 return do_connect(arg1
, arg2
, arg3
);
10282 #ifdef TARGET_NR_getpeername
10283 case TARGET_NR_getpeername
:
10284 return do_getpeername(arg1
, arg2
, arg3
);
10286 #ifdef TARGET_NR_getsockname
10287 case TARGET_NR_getsockname
:
10288 return do_getsockname(arg1
, arg2
, arg3
);
10290 #ifdef TARGET_NR_getsockopt
10291 case TARGET_NR_getsockopt
:
10292 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
10294 #ifdef TARGET_NR_listen
10295 case TARGET_NR_listen
:
10296 return get_errno(listen(arg1
, arg2
));
10298 #ifdef TARGET_NR_recv
10299 case TARGET_NR_recv
:
10300 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
10302 #ifdef TARGET_NR_recvfrom
10303 case TARGET_NR_recvfrom
:
10304 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10306 #ifdef TARGET_NR_recvmsg
10307 case TARGET_NR_recvmsg
:
10308 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
10310 #ifdef TARGET_NR_send
10311 case TARGET_NR_send
:
10312 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
10314 #ifdef TARGET_NR_sendmsg
10315 case TARGET_NR_sendmsg
:
10316 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
10318 #ifdef TARGET_NR_sendmmsg
10319 case TARGET_NR_sendmmsg
:
10320 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
10322 #ifdef TARGET_NR_recvmmsg
10323 case TARGET_NR_recvmmsg
:
10324 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
10326 #ifdef TARGET_NR_sendto
10327 case TARGET_NR_sendto
:
10328 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10330 #ifdef TARGET_NR_shutdown
10331 case TARGET_NR_shutdown
:
10332 return get_errno(shutdown(arg1
, arg2
));
10334 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10335 case TARGET_NR_getrandom
:
10336 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10338 return -TARGET_EFAULT
;
10340 ret
= get_errno(getrandom(p
, arg2
, arg3
));
10341 unlock_user(p
, arg1
, ret
);
10344 #ifdef TARGET_NR_socket
10345 case TARGET_NR_socket
:
10346 return do_socket(arg1
, arg2
, arg3
);
10348 #ifdef TARGET_NR_socketpair
10349 case TARGET_NR_socketpair
:
10350 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
10352 #ifdef TARGET_NR_setsockopt
10353 case TARGET_NR_setsockopt
:
10354 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
10356 #if defined(TARGET_NR_syslog)
10357 case TARGET_NR_syslog
:
10362 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
10363 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
10364 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
10365 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
10366 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
10367 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
10368 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
10369 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
10370 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
10371 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
10372 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
10373 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
10376 return -TARGET_EINVAL
;
10381 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10383 return -TARGET_EFAULT
;
10385 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
10386 unlock_user(p
, arg2
, arg3
);
10390 return -TARGET_EINVAL
;
10395 case TARGET_NR_setitimer
:
10397 struct itimerval value
, ovalue
, *pvalue
;
10401 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10402 || copy_from_user_timeval(&pvalue
->it_value
,
10403 arg2
+ sizeof(struct target_timeval
)))
10404 return -TARGET_EFAULT
;
10408 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10409 if (!is_error(ret
) && arg3
) {
10410 if (copy_to_user_timeval(arg3
,
10411 &ovalue
.it_interval
)
10412 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10414 return -TARGET_EFAULT
;
10418 case TARGET_NR_getitimer
:
10420 struct itimerval value
;
10422 ret
= get_errno(getitimer(arg1
, &value
));
10423 if (!is_error(ret
) && arg2
) {
10424 if (copy_to_user_timeval(arg2
,
10425 &value
.it_interval
)
10426 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10428 return -TARGET_EFAULT
;
10432 #ifdef TARGET_NR_stat
10433 case TARGET_NR_stat
:
10434 if (!(p
= lock_user_string(arg1
))) {
10435 return -TARGET_EFAULT
;
10437 ret
= get_errno(stat(path(p
), &st
));
10438 unlock_user(p
, arg1
, 0);
10441 #ifdef TARGET_NR_lstat
10442 case TARGET_NR_lstat
:
10443 if (!(p
= lock_user_string(arg1
))) {
10444 return -TARGET_EFAULT
;
10446 ret
= get_errno(lstat(path(p
), &st
));
10447 unlock_user(p
, arg1
, 0);
10450 #ifdef TARGET_NR_fstat
10451 case TARGET_NR_fstat
:
10453 ret
= get_errno(fstat(arg1
, &st
));
10454 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10457 if (!is_error(ret
)) {
10458 struct target_stat
*target_st
;
10460 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10461 return -TARGET_EFAULT
;
10462 memset(target_st
, 0, sizeof(*target_st
));
10463 __put_user(st
.st_dev
, &target_st
->st_dev
);
10464 __put_user(st
.st_ino
, &target_st
->st_ino
);
10465 __put_user(st
.st_mode
, &target_st
->st_mode
);
10466 __put_user(st
.st_uid
, &target_st
->st_uid
);
10467 __put_user(st
.st_gid
, &target_st
->st_gid
);
10468 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10469 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10470 __put_user(st
.st_size
, &target_st
->st_size
);
10471 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10472 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10473 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10474 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10475 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10476 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10477 __put_user(st
.st_atim
.tv_nsec
,
10478 &target_st
->target_st_atime_nsec
);
10479 __put_user(st
.st_mtim
.tv_nsec
,
10480 &target_st
->target_st_mtime_nsec
);
10481 __put_user(st
.st_ctim
.tv_nsec
,
10482 &target_st
->target_st_ctime_nsec
);
10484 unlock_user_struct(target_st
, arg2
, 1);
10489 case TARGET_NR_vhangup
:
10490 return get_errno(vhangup());
10491 #ifdef TARGET_NR_syscall
10492 case TARGET_NR_syscall
:
10493 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10494 arg6
, arg7
, arg8
, 0);
10496 #if defined(TARGET_NR_wait4)
10497 case TARGET_NR_wait4
:
10500 abi_long status_ptr
= arg2
;
10501 struct rusage rusage
, *rusage_ptr
;
10502 abi_ulong target_rusage
= arg4
;
10503 abi_long rusage_err
;
10505 rusage_ptr
= &rusage
;
10508 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10509 if (!is_error(ret
)) {
10510 if (status_ptr
&& ret
) {
10511 status
= host_to_target_waitstatus(status
);
10512 if (put_user_s32(status
, status_ptr
))
10513 return -TARGET_EFAULT
;
10515 if (target_rusage
) {
10516 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10525 #ifdef TARGET_NR_swapoff
10526 case TARGET_NR_swapoff
:
10527 if (!(p
= lock_user_string(arg1
)))
10528 return -TARGET_EFAULT
;
10529 ret
= get_errno(swapoff(p
));
10530 unlock_user(p
, arg1
, 0);
10533 case TARGET_NR_sysinfo
:
10535 struct target_sysinfo
*target_value
;
10536 struct sysinfo value
;
10537 ret
= get_errno(sysinfo(&value
));
10538 if (!is_error(ret
) && arg1
)
10540 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10541 return -TARGET_EFAULT
;
10542 __put_user(value
.uptime
, &target_value
->uptime
);
10543 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10544 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10545 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10546 __put_user(value
.totalram
, &target_value
->totalram
);
10547 __put_user(value
.freeram
, &target_value
->freeram
);
10548 __put_user(value
.sharedram
, &target_value
->sharedram
);
10549 __put_user(value
.bufferram
, &target_value
->bufferram
);
10550 __put_user(value
.totalswap
, &target_value
->totalswap
);
10551 __put_user(value
.freeswap
, &target_value
->freeswap
);
10552 __put_user(value
.procs
, &target_value
->procs
);
10553 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10554 __put_user(value
.freehigh
, &target_value
->freehigh
);
10555 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10556 unlock_user_struct(target_value
, arg1
, 1);
10560 #ifdef TARGET_NR_ipc
10561 case TARGET_NR_ipc
:
10562 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10564 #ifdef TARGET_NR_semget
10565 case TARGET_NR_semget
:
10566 return get_errno(semget(arg1
, arg2
, arg3
));
10568 #ifdef TARGET_NR_semop
10569 case TARGET_NR_semop
:
10570 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10572 #ifdef TARGET_NR_semtimedop
10573 case TARGET_NR_semtimedop
:
10574 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10576 #ifdef TARGET_NR_semtimedop_time64
10577 case TARGET_NR_semtimedop_time64
:
10578 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10580 #ifdef TARGET_NR_semctl
10581 case TARGET_NR_semctl
:
10582 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10584 #ifdef TARGET_NR_msgctl
10585 case TARGET_NR_msgctl
:
10586 return do_msgctl(arg1
, arg2
, arg3
);
10588 #ifdef TARGET_NR_msgget
10589 case TARGET_NR_msgget
:
10590 return get_errno(msgget(arg1
, arg2
));
10592 #ifdef TARGET_NR_msgrcv
10593 case TARGET_NR_msgrcv
:
10594 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10596 #ifdef TARGET_NR_msgsnd
10597 case TARGET_NR_msgsnd
:
10598 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10600 #ifdef TARGET_NR_shmget
10601 case TARGET_NR_shmget
:
10602 return get_errno(shmget(arg1
, arg2
, arg3
));
10604 #ifdef TARGET_NR_shmctl
10605 case TARGET_NR_shmctl
:
10606 return do_shmctl(arg1
, arg2
, arg3
);
10608 #ifdef TARGET_NR_shmat
10609 case TARGET_NR_shmat
:
10610 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10612 #ifdef TARGET_NR_shmdt
10613 case TARGET_NR_shmdt
:
10614 return do_shmdt(arg1
);
10616 case TARGET_NR_fsync
:
10617 return get_errno(fsync(arg1
));
10618 case TARGET_NR_clone
:
10619 /* Linux manages to have three different orderings for its
10620 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10621 * match the kernel's CONFIG_CLONE_* settings.
10622 * Microblaze is further special in that it uses a sixth
10623 * implicit argument to clone for the TLS pointer.
10625 #if defined(TARGET_MICROBLAZE)
10626 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10627 #elif defined(TARGET_CLONE_BACKWARDS)
10628 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10629 #elif defined(TARGET_CLONE_BACKWARDS2)
10630 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10632 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10635 #ifdef __NR_exit_group
10636 /* new thread calls */
10637 case TARGET_NR_exit_group
:
10638 preexit_cleanup(cpu_env
, arg1
);
10639 return get_errno(exit_group(arg1
));
10641 case TARGET_NR_setdomainname
:
10642 if (!(p
= lock_user_string(arg1
)))
10643 return -TARGET_EFAULT
;
10644 ret
= get_errno(setdomainname(p
, arg2
));
10645 unlock_user(p
, arg1
, 0);
10647 case TARGET_NR_uname
:
10648 /* no need to transcode because we use the linux syscall */
10650 struct new_utsname
* buf
;
10652 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10653 return -TARGET_EFAULT
;
10654 ret
= get_errno(sys_uname(buf
));
10655 if (!is_error(ret
)) {
10656 /* Overwrite the native machine name with whatever is being
10658 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10659 sizeof(buf
->machine
));
10660 /* Allow the user to override the reported release. */
10661 if (qemu_uname_release
&& *qemu_uname_release
) {
10662 g_strlcpy(buf
->release
, qemu_uname_release
,
10663 sizeof(buf
->release
));
10666 unlock_user_struct(buf
, arg1
, 1);
10670 case TARGET_NR_modify_ldt
:
10671 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10672 #if !defined(TARGET_X86_64)
10673 case TARGET_NR_vm86
:
10674 return do_vm86(cpu_env
, arg1
, arg2
);
10677 #if defined(TARGET_NR_adjtimex)
10678 case TARGET_NR_adjtimex
:
10680 struct timex host_buf
;
10682 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10683 return -TARGET_EFAULT
;
10685 ret
= get_errno(adjtimex(&host_buf
));
10686 if (!is_error(ret
)) {
10687 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10688 return -TARGET_EFAULT
;
10694 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10695 case TARGET_NR_clock_adjtime
:
10697 struct timex htx
, *phtx
= &htx
;
10699 if (target_to_host_timex(phtx
, arg2
) != 0) {
10700 return -TARGET_EFAULT
;
10702 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10703 if (!is_error(ret
) && phtx
) {
10704 if (host_to_target_timex(arg2
, phtx
) != 0) {
10705 return -TARGET_EFAULT
;
10711 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10712 case TARGET_NR_clock_adjtime64
:
10716 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10717 return -TARGET_EFAULT
;
10719 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10720 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10721 return -TARGET_EFAULT
;
10726 case TARGET_NR_getpgid
:
10727 return get_errno(getpgid(arg1
));
10728 case TARGET_NR_fchdir
:
10729 return get_errno(fchdir(arg1
));
10730 case TARGET_NR_personality
:
10731 return get_errno(personality(arg1
));
10732 #ifdef TARGET_NR__llseek /* Not on alpha */
10733 case TARGET_NR__llseek
:
10736 #if !defined(__NR_llseek)
10737 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10739 ret
= get_errno(res
);
10744 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10746 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10747 return -TARGET_EFAULT
;
10752 #ifdef TARGET_NR_getdents
10753 case TARGET_NR_getdents
:
10754 return do_getdents(arg1
, arg2
, arg3
);
10755 #endif /* TARGET_NR_getdents */
10756 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10757 case TARGET_NR_getdents64
:
10758 return do_getdents64(arg1
, arg2
, arg3
);
10759 #endif /* TARGET_NR_getdents64 */
10760 #if defined(TARGET_NR__newselect)
10761 case TARGET_NR__newselect
:
10762 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10764 #ifdef TARGET_NR_poll
10765 case TARGET_NR_poll
:
10766 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10768 #ifdef TARGET_NR_ppoll
10769 case TARGET_NR_ppoll
:
10770 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10772 #ifdef TARGET_NR_ppoll_time64
10773 case TARGET_NR_ppoll_time64
:
10774 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10776 case TARGET_NR_flock
:
10777 /* NOTE: the flock constant seems to be the same for every
10779 return get_errno(safe_flock(arg1
, arg2
));
10780 case TARGET_NR_readv
:
10782 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10784 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10785 unlock_iovec(vec
, arg2
, arg3
, 1);
10787 ret
= -host_to_target_errno(errno
);
10791 case TARGET_NR_writev
:
10793 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10795 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10796 unlock_iovec(vec
, arg2
, arg3
, 0);
10798 ret
= -host_to_target_errno(errno
);
10802 #if defined(TARGET_NR_preadv)
10803 case TARGET_NR_preadv
:
10805 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10807 unsigned long low
, high
;
10809 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10810 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10811 unlock_iovec(vec
, arg2
, arg3
, 1);
10813 ret
= -host_to_target_errno(errno
);
10818 #if defined(TARGET_NR_pwritev)
10819 case TARGET_NR_pwritev
:
10821 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10823 unsigned long low
, high
;
10825 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10826 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10827 unlock_iovec(vec
, arg2
, arg3
, 0);
10829 ret
= -host_to_target_errno(errno
);
10834 case TARGET_NR_getsid
:
10835 return get_errno(getsid(arg1
));
10836 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10837 case TARGET_NR_fdatasync
:
10838 return get_errno(fdatasync(arg1
));
10840 case TARGET_NR_sched_getaffinity
:
10842 unsigned int mask_size
;
10843 unsigned long *mask
;
10846 * sched_getaffinity needs multiples of ulong, so need to take
10847 * care of mismatches between target ulong and host ulong sizes.
10849 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10850 return -TARGET_EINVAL
;
10852 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10854 mask
= alloca(mask_size
);
10855 memset(mask
, 0, mask_size
);
10856 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10858 if (!is_error(ret
)) {
10860 /* More data returned than the caller's buffer will fit.
10861 * This only happens if sizeof(abi_long) < sizeof(long)
10862 * and the caller passed us a buffer holding an odd number
10863 * of abi_longs. If the host kernel is actually using the
10864 * extra 4 bytes then fail EINVAL; otherwise we can just
10865 * ignore them and only copy the interesting part.
10867 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10868 if (numcpus
> arg2
* 8) {
10869 return -TARGET_EINVAL
;
10874 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10875 return -TARGET_EFAULT
;
10880 case TARGET_NR_sched_setaffinity
:
10882 unsigned int mask_size
;
10883 unsigned long *mask
;
10886 * sched_setaffinity needs multiples of ulong, so need to take
10887 * care of mismatches between target ulong and host ulong sizes.
10889 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10890 return -TARGET_EINVAL
;
10892 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10893 mask
= alloca(mask_size
);
10895 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10900 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10902 case TARGET_NR_getcpu
:
10904 unsigned cpu
, node
;
10905 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10906 arg2
? &node
: NULL
,
10908 if (is_error(ret
)) {
10911 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10912 return -TARGET_EFAULT
;
10914 if (arg2
&& put_user_u32(node
, arg2
)) {
10915 return -TARGET_EFAULT
;
10919 case TARGET_NR_sched_setparam
:
10921 struct target_sched_param
*target_schp
;
10922 struct sched_param schp
;
10925 return -TARGET_EINVAL
;
10927 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1)) {
10928 return -TARGET_EFAULT
;
10930 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10931 unlock_user_struct(target_schp
, arg2
, 0);
10932 return get_errno(sys_sched_setparam(arg1
, &schp
));
10934 case TARGET_NR_sched_getparam
:
10936 struct target_sched_param
*target_schp
;
10937 struct sched_param schp
;
10940 return -TARGET_EINVAL
;
10942 ret
= get_errno(sys_sched_getparam(arg1
, &schp
));
10943 if (!is_error(ret
)) {
10944 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0)) {
10945 return -TARGET_EFAULT
;
10947 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10948 unlock_user_struct(target_schp
, arg2
, 1);
10952 case TARGET_NR_sched_setscheduler
:
10954 struct target_sched_param
*target_schp
;
10955 struct sched_param schp
;
10957 return -TARGET_EINVAL
;
10959 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1)) {
10960 return -TARGET_EFAULT
;
10962 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10963 unlock_user_struct(target_schp
, arg3
, 0);
10964 return get_errno(sys_sched_setscheduler(arg1
, arg2
, &schp
));
10966 case TARGET_NR_sched_getscheduler
:
10967 return get_errno(sys_sched_getscheduler(arg1
));
10968 case TARGET_NR_sched_getattr
:
10970 struct target_sched_attr
*target_scha
;
10971 struct sched_attr scha
;
10973 return -TARGET_EINVAL
;
10975 if (arg3
> sizeof(scha
)) {
10976 arg3
= sizeof(scha
);
10978 ret
= get_errno(sys_sched_getattr(arg1
, &scha
, arg3
, arg4
));
10979 if (!is_error(ret
)) {
10980 target_scha
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10981 if (!target_scha
) {
10982 return -TARGET_EFAULT
;
10984 target_scha
->size
= tswap32(scha
.size
);
10985 target_scha
->sched_policy
= tswap32(scha
.sched_policy
);
10986 target_scha
->sched_flags
= tswap64(scha
.sched_flags
);
10987 target_scha
->sched_nice
= tswap32(scha
.sched_nice
);
10988 target_scha
->sched_priority
= tswap32(scha
.sched_priority
);
10989 target_scha
->sched_runtime
= tswap64(scha
.sched_runtime
);
10990 target_scha
->sched_deadline
= tswap64(scha
.sched_deadline
);
10991 target_scha
->sched_period
= tswap64(scha
.sched_period
);
10992 if (scha
.size
> offsetof(struct sched_attr
, sched_util_min
)) {
10993 target_scha
->sched_util_min
= tswap32(scha
.sched_util_min
);
10994 target_scha
->sched_util_max
= tswap32(scha
.sched_util_max
);
10996 unlock_user(target_scha
, arg2
, arg3
);
11000 case TARGET_NR_sched_setattr
:
11002 struct target_sched_attr
*target_scha
;
11003 struct sched_attr scha
;
11007 return -TARGET_EINVAL
;
11009 if (get_user_u32(size
, arg2
)) {
11010 return -TARGET_EFAULT
;
11013 size
= offsetof(struct target_sched_attr
, sched_util_min
);
11015 if (size
< offsetof(struct target_sched_attr
, sched_util_min
)) {
11016 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
11017 return -TARGET_EFAULT
;
11019 return -TARGET_E2BIG
;
11022 zeroed
= check_zeroed_user(arg2
, sizeof(struct target_sched_attr
), size
);
11025 } else if (zeroed
== 0) {
11026 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
11027 return -TARGET_EFAULT
;
11029 return -TARGET_E2BIG
;
11031 if (size
> sizeof(struct target_sched_attr
)) {
11032 size
= sizeof(struct target_sched_attr
);
11035 target_scha
= lock_user(VERIFY_READ
, arg2
, size
, 1);
11036 if (!target_scha
) {
11037 return -TARGET_EFAULT
;
11040 scha
.sched_policy
= tswap32(target_scha
->sched_policy
);
11041 scha
.sched_flags
= tswap64(target_scha
->sched_flags
);
11042 scha
.sched_nice
= tswap32(target_scha
->sched_nice
);
11043 scha
.sched_priority
= tswap32(target_scha
->sched_priority
);
11044 scha
.sched_runtime
= tswap64(target_scha
->sched_runtime
);
11045 scha
.sched_deadline
= tswap64(target_scha
->sched_deadline
);
11046 scha
.sched_period
= tswap64(target_scha
->sched_period
);
11047 if (size
> offsetof(struct target_sched_attr
, sched_util_min
)) {
11048 scha
.sched_util_min
= tswap32(target_scha
->sched_util_min
);
11049 scha
.sched_util_max
= tswap32(target_scha
->sched_util_max
);
11051 unlock_user(target_scha
, arg2
, 0);
11052 return get_errno(sys_sched_setattr(arg1
, &scha
, arg3
));
11054 case TARGET_NR_sched_yield
:
11055 return get_errno(sched_yield());
11056 case TARGET_NR_sched_get_priority_max
:
11057 return get_errno(sched_get_priority_max(arg1
));
11058 case TARGET_NR_sched_get_priority_min
:
11059 return get_errno(sched_get_priority_min(arg1
));
11060 #ifdef TARGET_NR_sched_rr_get_interval
11061 case TARGET_NR_sched_rr_get_interval
:
11063 struct timespec ts
;
11064 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
11065 if (!is_error(ret
)) {
11066 ret
= host_to_target_timespec(arg2
, &ts
);
11071 #ifdef TARGET_NR_sched_rr_get_interval_time64
11072 case TARGET_NR_sched_rr_get_interval_time64
:
11074 struct timespec ts
;
11075 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
11076 if (!is_error(ret
)) {
11077 ret
= host_to_target_timespec64(arg2
, &ts
);
11082 #if defined(TARGET_NR_nanosleep)
11083 case TARGET_NR_nanosleep
:
11085 struct timespec req
, rem
;
11086 target_to_host_timespec(&req
, arg1
);
11087 ret
= get_errno(safe_nanosleep(&req
, &rem
));
11088 if (is_error(ret
) && arg2
) {
11089 host_to_target_timespec(arg2
, &rem
);
11094 case TARGET_NR_prctl
:
11095 return do_prctl(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
);
11097 #ifdef TARGET_NR_arch_prctl
11098 case TARGET_NR_arch_prctl
:
11099 return do_arch_prctl(cpu_env
, arg1
, arg2
);
11101 #ifdef TARGET_NR_pread64
11102 case TARGET_NR_pread64
:
11103 if (regpairs_aligned(cpu_env
, num
)) {
11107 if (arg2
== 0 && arg3
== 0) {
11108 /* Special-case NULL buffer and zero length, which should succeed */
11111 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11113 return -TARGET_EFAULT
;
11116 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11117 unlock_user(p
, arg2
, ret
);
11119 case TARGET_NR_pwrite64
:
11120 if (regpairs_aligned(cpu_env
, num
)) {
11124 if (arg2
== 0 && arg3
== 0) {
11125 /* Special-case NULL buffer and zero length, which should succeed */
11128 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
11130 return -TARGET_EFAULT
;
11133 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11134 unlock_user(p
, arg2
, 0);
11137 case TARGET_NR_getcwd
:
11138 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11139 return -TARGET_EFAULT
;
11140 ret
= get_errno(sys_getcwd1(p
, arg2
));
11141 unlock_user(p
, arg1
, ret
);
11143 case TARGET_NR_capget
:
11144 case TARGET_NR_capset
:
11146 struct target_user_cap_header
*target_header
;
11147 struct target_user_cap_data
*target_data
= NULL
;
11148 struct __user_cap_header_struct header
;
11149 struct __user_cap_data_struct data
[2];
11150 struct __user_cap_data_struct
*dataptr
= NULL
;
11151 int i
, target_datalen
;
11152 int data_items
= 1;
11154 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11155 return -TARGET_EFAULT
;
11157 header
.version
= tswap32(target_header
->version
);
11158 header
.pid
= tswap32(target_header
->pid
);
11160 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11161 /* Version 2 and up takes pointer to two user_data structs */
11165 target_datalen
= sizeof(*target_data
) * data_items
;
11168 if (num
== TARGET_NR_capget
) {
11169 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11171 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11173 if (!target_data
) {
11174 unlock_user_struct(target_header
, arg1
, 0);
11175 return -TARGET_EFAULT
;
11178 if (num
== TARGET_NR_capset
) {
11179 for (i
= 0; i
< data_items
; i
++) {
11180 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11181 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11182 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11189 if (num
== TARGET_NR_capget
) {
11190 ret
= get_errno(capget(&header
, dataptr
));
11192 ret
= get_errno(capset(&header
, dataptr
));
11195 /* The kernel always updates version for both capget and capset */
11196 target_header
->version
= tswap32(header
.version
);
11197 unlock_user_struct(target_header
, arg1
, 1);
11200 if (num
== TARGET_NR_capget
) {
11201 for (i
= 0; i
< data_items
; i
++) {
11202 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11203 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11204 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11206 unlock_user(target_data
, arg2
, target_datalen
);
11208 unlock_user(target_data
, arg2
, 0);
11213 case TARGET_NR_sigaltstack
:
11214 return do_sigaltstack(arg1
, arg2
, cpu_env
);
11216 #ifdef CONFIG_SENDFILE
11217 #ifdef TARGET_NR_sendfile
11218 case TARGET_NR_sendfile
:
11220 off_t
*offp
= NULL
;
11223 ret
= get_user_sal(off
, arg3
);
11224 if (is_error(ret
)) {
11229 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11230 if (!is_error(ret
) && arg3
) {
11231 abi_long ret2
= put_user_sal(off
, arg3
);
11232 if (is_error(ret2
)) {
11239 #ifdef TARGET_NR_sendfile64
11240 case TARGET_NR_sendfile64
:
11242 off_t
*offp
= NULL
;
11245 ret
= get_user_s64(off
, arg3
);
11246 if (is_error(ret
)) {
11251 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11252 if (!is_error(ret
) && arg3
) {
11253 abi_long ret2
= put_user_s64(off
, arg3
);
11254 if (is_error(ret2
)) {
11262 #ifdef TARGET_NR_vfork
11263 case TARGET_NR_vfork
:
11264 return get_errno(do_fork(cpu_env
,
11265 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11268 #ifdef TARGET_NR_ugetrlimit
11269 case TARGET_NR_ugetrlimit
:
11271 struct rlimit rlim
;
11272 int resource
= target_to_host_resource(arg1
);
11273 ret
= get_errno(getrlimit(resource
, &rlim
));
11274 if (!is_error(ret
)) {
11275 struct target_rlimit
*target_rlim
;
11276 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11277 return -TARGET_EFAULT
;
11278 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11279 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11280 unlock_user_struct(target_rlim
, arg2
, 1);
11285 #ifdef TARGET_NR_truncate64
11286 case TARGET_NR_truncate64
:
11287 if (!(p
= lock_user_string(arg1
)))
11288 return -TARGET_EFAULT
;
11289 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11290 unlock_user(p
, arg1
, 0);
11293 #ifdef TARGET_NR_ftruncate64
11294 case TARGET_NR_ftruncate64
:
11295 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11297 #ifdef TARGET_NR_stat64
11298 case TARGET_NR_stat64
:
11299 if (!(p
= lock_user_string(arg1
))) {
11300 return -TARGET_EFAULT
;
11302 ret
= get_errno(stat(path(p
), &st
));
11303 unlock_user(p
, arg1
, 0);
11304 if (!is_error(ret
))
11305 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11308 #ifdef TARGET_NR_lstat64
11309 case TARGET_NR_lstat64
:
11310 if (!(p
= lock_user_string(arg1
))) {
11311 return -TARGET_EFAULT
;
11313 ret
= get_errno(lstat(path(p
), &st
));
11314 unlock_user(p
, arg1
, 0);
11315 if (!is_error(ret
))
11316 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11319 #ifdef TARGET_NR_fstat64
11320 case TARGET_NR_fstat64
:
11321 ret
= get_errno(fstat(arg1
, &st
));
11322 if (!is_error(ret
))
11323 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11326 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11327 #ifdef TARGET_NR_fstatat64
11328 case TARGET_NR_fstatat64
:
11330 #ifdef TARGET_NR_newfstatat
11331 case TARGET_NR_newfstatat
:
11333 if (!(p
= lock_user_string(arg2
))) {
11334 return -TARGET_EFAULT
;
11336 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11337 unlock_user(p
, arg2
, 0);
11338 if (!is_error(ret
))
11339 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11342 #if defined(TARGET_NR_statx)
11343 case TARGET_NR_statx
:
11345 struct target_statx
*target_stx
;
11349 p
= lock_user_string(arg2
);
11351 return -TARGET_EFAULT
;
11353 #if defined(__NR_statx)
11356 * It is assumed that struct statx is architecture independent.
11358 struct target_statx host_stx
;
11361 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11362 if (!is_error(ret
)) {
11363 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11364 unlock_user(p
, arg2
, 0);
11365 return -TARGET_EFAULT
;
11369 if (ret
!= -TARGET_ENOSYS
) {
11370 unlock_user(p
, arg2
, 0);
11375 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11376 unlock_user(p
, arg2
, 0);
11378 if (!is_error(ret
)) {
11379 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11380 return -TARGET_EFAULT
;
11382 memset(target_stx
, 0, sizeof(*target_stx
));
11383 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11384 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11385 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11386 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11387 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11388 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11389 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11390 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11391 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11392 __put_user(st
.st_size
, &target_stx
->stx_size
);
11393 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11394 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11395 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11396 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11397 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11398 unlock_user_struct(target_stx
, arg5
, 1);
11403 #ifdef TARGET_NR_lchown
11404 case TARGET_NR_lchown
:
11405 if (!(p
= lock_user_string(arg1
)))
11406 return -TARGET_EFAULT
;
11407 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11408 unlock_user(p
, arg1
, 0);
11411 #ifdef TARGET_NR_getuid
11412 case TARGET_NR_getuid
:
11413 return get_errno(high2lowuid(getuid()));
11415 #ifdef TARGET_NR_getgid
11416 case TARGET_NR_getgid
:
11417 return get_errno(high2lowgid(getgid()));
11419 #ifdef TARGET_NR_geteuid
11420 case TARGET_NR_geteuid
:
11421 return get_errno(high2lowuid(geteuid()));
11423 #ifdef TARGET_NR_getegid
11424 case TARGET_NR_getegid
:
11425 return get_errno(high2lowgid(getegid()));
11427 case TARGET_NR_setreuid
:
11428 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11429 case TARGET_NR_setregid
:
11430 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11431 case TARGET_NR_getgroups
:
11433 int gidsetsize
= arg1
;
11434 target_id
*target_grouplist
;
11438 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11439 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11440 if (gidsetsize
== 0)
11442 if (!is_error(ret
)) {
11443 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11444 if (!target_grouplist
)
11445 return -TARGET_EFAULT
;
11446 for(i
= 0;i
< ret
; i
++)
11447 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11448 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11452 case TARGET_NR_setgroups
:
11454 int gidsetsize
= arg1
;
11455 target_id
*target_grouplist
;
11456 gid_t
*grouplist
= NULL
;
11459 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11460 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11461 if (!target_grouplist
) {
11462 return -TARGET_EFAULT
;
11464 for (i
= 0; i
< gidsetsize
; i
++) {
11465 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11467 unlock_user(target_grouplist
, arg2
, 0);
11469 return get_errno(setgroups(gidsetsize
, grouplist
));
11471 case TARGET_NR_fchown
:
11472 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11473 #if defined(TARGET_NR_fchownat)
11474 case TARGET_NR_fchownat
:
11475 if (!(p
= lock_user_string(arg2
)))
11476 return -TARGET_EFAULT
;
11477 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11478 low2highgid(arg4
), arg5
));
11479 unlock_user(p
, arg2
, 0);
11482 #ifdef TARGET_NR_setresuid
11483 case TARGET_NR_setresuid
:
11484 return get_errno(sys_setresuid(low2highuid(arg1
),
11486 low2highuid(arg3
)));
11488 #ifdef TARGET_NR_getresuid
11489 case TARGET_NR_getresuid
:
11491 uid_t ruid
, euid
, suid
;
11492 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11493 if (!is_error(ret
)) {
11494 if (put_user_id(high2lowuid(ruid
), arg1
)
11495 || put_user_id(high2lowuid(euid
), arg2
)
11496 || put_user_id(high2lowuid(suid
), arg3
))
11497 return -TARGET_EFAULT
;
11502 #ifdef TARGET_NR_getresgid
11503 case TARGET_NR_setresgid
:
11504 return get_errno(sys_setresgid(low2highgid(arg1
),
11506 low2highgid(arg3
)));
11508 #ifdef TARGET_NR_getresgid
11509 case TARGET_NR_getresgid
:
11511 gid_t rgid
, egid
, sgid
;
11512 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11513 if (!is_error(ret
)) {
11514 if (put_user_id(high2lowgid(rgid
), arg1
)
11515 || put_user_id(high2lowgid(egid
), arg2
)
11516 || put_user_id(high2lowgid(sgid
), arg3
))
11517 return -TARGET_EFAULT
;
11522 #ifdef TARGET_NR_chown
11523 case TARGET_NR_chown
:
11524 if (!(p
= lock_user_string(arg1
)))
11525 return -TARGET_EFAULT
;
11526 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11527 unlock_user(p
, arg1
, 0);
11530 case TARGET_NR_setuid
:
11531 return get_errno(sys_setuid(low2highuid(arg1
)));
11532 case TARGET_NR_setgid
:
11533 return get_errno(sys_setgid(low2highgid(arg1
)));
11534 case TARGET_NR_setfsuid
:
11535 return get_errno(setfsuid(arg1
));
11536 case TARGET_NR_setfsgid
:
11537 return get_errno(setfsgid(arg1
));
11539 #ifdef TARGET_NR_lchown32
11540 case TARGET_NR_lchown32
:
11541 if (!(p
= lock_user_string(arg1
)))
11542 return -TARGET_EFAULT
;
11543 ret
= get_errno(lchown(p
, arg2
, arg3
));
11544 unlock_user(p
, arg1
, 0);
11547 #ifdef TARGET_NR_getuid32
11548 case TARGET_NR_getuid32
:
11549 return get_errno(getuid());
11552 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11553 /* Alpha specific */
11554 case TARGET_NR_getxuid
:
11558 cpu_env
->ir
[IR_A4
]=euid
;
11560 return get_errno(getuid());
11562 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11563 /* Alpha specific */
11564 case TARGET_NR_getxgid
:
11568 cpu_env
->ir
[IR_A4
]=egid
;
11570 return get_errno(getgid());
11572 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11573 /* Alpha specific */
11574 case TARGET_NR_osf_getsysinfo
:
11575 ret
= -TARGET_EOPNOTSUPP
;
11577 case TARGET_GSI_IEEE_FP_CONTROL
:
11579 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11580 uint64_t swcr
= cpu_env
->swcr
;
11582 swcr
&= ~SWCR_STATUS_MASK
;
11583 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11585 if (put_user_u64 (swcr
, arg2
))
11586 return -TARGET_EFAULT
;
11591 /* case GSI_IEEE_STATE_AT_SIGNAL:
11592 -- Not implemented in linux kernel.
11594 -- Retrieves current unaligned access state; not much used.
11595 case GSI_PROC_TYPE:
11596 -- Retrieves implver information; surely not used.
11597 case GSI_GET_HWRPB:
11598 -- Grabs a copy of the HWRPB; surely not used.
11603 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11604 /* Alpha specific */
11605 case TARGET_NR_osf_setsysinfo
:
11606 ret
= -TARGET_EOPNOTSUPP
;
11608 case TARGET_SSI_IEEE_FP_CONTROL
:
11610 uint64_t swcr
, fpcr
;
11612 if (get_user_u64 (swcr
, arg2
)) {
11613 return -TARGET_EFAULT
;
11617 * The kernel calls swcr_update_status to update the
11618 * status bits from the fpcr at every point that it
11619 * could be queried. Therefore, we store the status
11620 * bits only in FPCR.
11622 cpu_env
->swcr
= swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11624 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11625 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11626 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11627 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11632 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11634 uint64_t exc
, fpcr
, fex
;
11636 if (get_user_u64(exc
, arg2
)) {
11637 return -TARGET_EFAULT
;
11639 exc
&= SWCR_STATUS_MASK
;
11640 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11642 /* Old exceptions are not signaled. */
11643 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11645 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11646 fex
&= (cpu_env
)->swcr
;
11648 /* Update the hardware fpcr. */
11649 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11650 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11653 int si_code
= TARGET_FPE_FLTUNK
;
11654 target_siginfo_t info
;
11656 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11657 si_code
= TARGET_FPE_FLTUND
;
11659 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11660 si_code
= TARGET_FPE_FLTRES
;
11662 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11663 si_code
= TARGET_FPE_FLTUND
;
11665 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11666 si_code
= TARGET_FPE_FLTOVF
;
11668 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11669 si_code
= TARGET_FPE_FLTDIV
;
11671 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11672 si_code
= TARGET_FPE_FLTINV
;
11675 info
.si_signo
= SIGFPE
;
11677 info
.si_code
= si_code
;
11678 info
._sifields
._sigfault
._addr
= (cpu_env
)->pc
;
11679 queue_signal(cpu_env
, info
.si_signo
,
11680 QEMU_SI_FAULT
, &info
);
11686 /* case SSI_NVPAIRS:
11687 -- Used with SSIN_UACPROC to enable unaligned accesses.
11688 case SSI_IEEE_STATE_AT_SIGNAL:
11689 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11690 -- Not implemented in linux kernel
11695 #ifdef TARGET_NR_osf_sigprocmask
11696 /* Alpha specific. */
11697 case TARGET_NR_osf_sigprocmask
:
11701 sigset_t set
, oldset
;
11704 case TARGET_SIG_BLOCK
:
11707 case TARGET_SIG_UNBLOCK
:
11710 case TARGET_SIG_SETMASK
:
11714 return -TARGET_EINVAL
;
11717 target_to_host_old_sigset(&set
, &mask
);
11718 ret
= do_sigprocmask(how
, &set
, &oldset
);
11720 host_to_target_old_sigset(&mask
, &oldset
);
11727 #ifdef TARGET_NR_getgid32
11728 case TARGET_NR_getgid32
:
11729 return get_errno(getgid());
11731 #ifdef TARGET_NR_geteuid32
11732 case TARGET_NR_geteuid32
:
11733 return get_errno(geteuid());
11735 #ifdef TARGET_NR_getegid32
11736 case TARGET_NR_getegid32
:
11737 return get_errno(getegid());
11739 #ifdef TARGET_NR_setreuid32
11740 case TARGET_NR_setreuid32
:
11741 return get_errno(setreuid(arg1
, arg2
));
11743 #ifdef TARGET_NR_setregid32
11744 case TARGET_NR_setregid32
:
11745 return get_errno(setregid(arg1
, arg2
));
11747 #ifdef TARGET_NR_getgroups32
11748 case TARGET_NR_getgroups32
:
11750 int gidsetsize
= arg1
;
11751 uint32_t *target_grouplist
;
11755 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11756 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11757 if (gidsetsize
== 0)
11759 if (!is_error(ret
)) {
11760 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11761 if (!target_grouplist
) {
11762 return -TARGET_EFAULT
;
11764 for(i
= 0;i
< ret
; i
++)
11765 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11766 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11771 #ifdef TARGET_NR_setgroups32
11772 case TARGET_NR_setgroups32
:
11774 int gidsetsize
= arg1
;
11775 uint32_t *target_grouplist
;
11779 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11780 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11781 if (!target_grouplist
) {
11782 return -TARGET_EFAULT
;
11784 for(i
= 0;i
< gidsetsize
; i
++)
11785 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11786 unlock_user(target_grouplist
, arg2
, 0);
11787 return get_errno(setgroups(gidsetsize
, grouplist
));
11790 #ifdef TARGET_NR_fchown32
11791 case TARGET_NR_fchown32
:
11792 return get_errno(fchown(arg1
, arg2
, arg3
));
11794 #ifdef TARGET_NR_setresuid32
11795 case TARGET_NR_setresuid32
:
11796 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11798 #ifdef TARGET_NR_getresuid32
11799 case TARGET_NR_getresuid32
:
11801 uid_t ruid
, euid
, suid
;
11802 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11803 if (!is_error(ret
)) {
11804 if (put_user_u32(ruid
, arg1
)
11805 || put_user_u32(euid
, arg2
)
11806 || put_user_u32(suid
, arg3
))
11807 return -TARGET_EFAULT
;
11812 #ifdef TARGET_NR_setresgid32
11813 case TARGET_NR_setresgid32
:
11814 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11816 #ifdef TARGET_NR_getresgid32
11817 case TARGET_NR_getresgid32
:
11819 gid_t rgid
, egid
, sgid
;
11820 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11821 if (!is_error(ret
)) {
11822 if (put_user_u32(rgid
, arg1
)
11823 || put_user_u32(egid
, arg2
)
11824 || put_user_u32(sgid
, arg3
))
11825 return -TARGET_EFAULT
;
11830 #ifdef TARGET_NR_chown32
11831 case TARGET_NR_chown32
:
11832 if (!(p
= lock_user_string(arg1
)))
11833 return -TARGET_EFAULT
;
11834 ret
= get_errno(chown(p
, arg2
, arg3
));
11835 unlock_user(p
, arg1
, 0);
11838 #ifdef TARGET_NR_setuid32
11839 case TARGET_NR_setuid32
:
11840 return get_errno(sys_setuid(arg1
));
11842 #ifdef TARGET_NR_setgid32
11843 case TARGET_NR_setgid32
:
11844 return get_errno(sys_setgid(arg1
));
11846 #ifdef TARGET_NR_setfsuid32
11847 case TARGET_NR_setfsuid32
:
11848 return get_errno(setfsuid(arg1
));
11850 #ifdef TARGET_NR_setfsgid32
11851 case TARGET_NR_setfsgid32
:
11852 return get_errno(setfsgid(arg1
));
11854 #ifdef TARGET_NR_mincore
11855 case TARGET_NR_mincore
:
11857 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11859 return -TARGET_ENOMEM
;
11861 p
= lock_user_string(arg3
);
11863 ret
= -TARGET_EFAULT
;
11865 ret
= get_errno(mincore(a
, arg2
, p
));
11866 unlock_user(p
, arg3
, ret
);
11868 unlock_user(a
, arg1
, 0);
11872 #ifdef TARGET_NR_arm_fadvise64_64
11873 case TARGET_NR_arm_fadvise64_64
:
11874 /* arm_fadvise64_64 looks like fadvise64_64 but
11875 * with different argument order: fd, advice, offset, len
11876 * rather than the usual fd, offset, len, advice.
11877 * Note that offset and len are both 64-bit so appear as
11878 * pairs of 32-bit registers.
11880 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11881 target_offset64(arg5
, arg6
), arg2
);
11882 return -host_to_target_errno(ret
);
11885 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11887 #ifdef TARGET_NR_fadvise64_64
11888 case TARGET_NR_fadvise64_64
:
11889 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11890 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11898 /* 6 args: fd, offset (high, low), len (high, low), advice */
11899 if (regpairs_aligned(cpu_env
, num
)) {
11900 /* offset is in (3,4), len in (5,6) and advice in 7 */
11908 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11909 target_offset64(arg4
, arg5
), arg6
);
11910 return -host_to_target_errno(ret
);
11913 #ifdef TARGET_NR_fadvise64
11914 case TARGET_NR_fadvise64
:
11915 /* 5 args: fd, offset (high, low), len, advice */
11916 if (regpairs_aligned(cpu_env
, num
)) {
11917 /* offset is in (3,4), len in 5 and advice in 6 */
11923 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11924 return -host_to_target_errno(ret
);
11927 #else /* not a 32-bit ABI */
11928 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11929 #ifdef TARGET_NR_fadvise64_64
11930 case TARGET_NR_fadvise64_64
:
11932 #ifdef TARGET_NR_fadvise64
11933 case TARGET_NR_fadvise64
:
11935 #ifdef TARGET_S390X
11937 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11938 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11939 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11940 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11944 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11946 #endif /* end of 64-bit ABI fadvise handling */
11948 #ifdef TARGET_NR_madvise
11949 case TARGET_NR_madvise
:
11950 return target_madvise(arg1
, arg2
, arg3
);
11952 #ifdef TARGET_NR_fcntl64
11953 case TARGET_NR_fcntl64
:
11957 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11958 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11961 if (!cpu_env
->eabi
) {
11962 copyfrom
= copy_from_user_oabi_flock64
;
11963 copyto
= copy_to_user_oabi_flock64
;
11967 cmd
= target_to_host_fcntl_cmd(arg2
);
11968 if (cmd
== -TARGET_EINVAL
) {
11973 case TARGET_F_GETLK64
:
11974 ret
= copyfrom(&fl
, arg3
);
11978 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11980 ret
= copyto(arg3
, &fl
);
11984 case TARGET_F_SETLK64
:
11985 case TARGET_F_SETLKW64
:
11986 ret
= copyfrom(&fl
, arg3
);
11990 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11993 ret
= do_fcntl(arg1
, arg2
, arg3
);
11999 #ifdef TARGET_NR_cacheflush
12000 case TARGET_NR_cacheflush
:
12001 /* self-modifying code is handled automatically, so nothing needed */
12004 #ifdef TARGET_NR_getpagesize
12005 case TARGET_NR_getpagesize
:
12006 return TARGET_PAGE_SIZE
;
12008 case TARGET_NR_gettid
:
12009 return get_errno(sys_gettid());
12010 #ifdef TARGET_NR_readahead
12011 case TARGET_NR_readahead
:
12012 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12013 if (regpairs_aligned(cpu_env
, num
)) {
12018 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
12020 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
12025 #ifdef TARGET_NR_setxattr
12026 case TARGET_NR_listxattr
:
12027 case TARGET_NR_llistxattr
:
12031 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
12033 return -TARGET_EFAULT
;
12036 p
= lock_user_string(arg1
);
12038 if (num
== TARGET_NR_listxattr
) {
12039 ret
= get_errno(listxattr(p
, b
, arg3
));
12041 ret
= get_errno(llistxattr(p
, b
, arg3
));
12044 ret
= -TARGET_EFAULT
;
12046 unlock_user(p
, arg1
, 0);
12047 unlock_user(b
, arg2
, arg3
);
12050 case TARGET_NR_flistxattr
:
12054 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
12056 return -TARGET_EFAULT
;
12059 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
12060 unlock_user(b
, arg2
, arg3
);
12063 case TARGET_NR_setxattr
:
12064 case TARGET_NR_lsetxattr
:
12066 void *p
, *n
, *v
= 0;
12068 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12070 return -TARGET_EFAULT
;
12073 p
= lock_user_string(arg1
);
12074 n
= lock_user_string(arg2
);
12076 if (num
== TARGET_NR_setxattr
) {
12077 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
12079 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
12082 ret
= -TARGET_EFAULT
;
12084 unlock_user(p
, arg1
, 0);
12085 unlock_user(n
, arg2
, 0);
12086 unlock_user(v
, arg3
, 0);
12089 case TARGET_NR_fsetxattr
:
12093 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12095 return -TARGET_EFAULT
;
12098 n
= lock_user_string(arg2
);
12100 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
12102 ret
= -TARGET_EFAULT
;
12104 unlock_user(n
, arg2
, 0);
12105 unlock_user(v
, arg3
, 0);
12108 case TARGET_NR_getxattr
:
12109 case TARGET_NR_lgetxattr
:
12111 void *p
, *n
, *v
= 0;
12113 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12115 return -TARGET_EFAULT
;
12118 p
= lock_user_string(arg1
);
12119 n
= lock_user_string(arg2
);
12121 if (num
== TARGET_NR_getxattr
) {
12122 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
12124 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
12127 ret
= -TARGET_EFAULT
;
12129 unlock_user(p
, arg1
, 0);
12130 unlock_user(n
, arg2
, 0);
12131 unlock_user(v
, arg3
, arg4
);
12134 case TARGET_NR_fgetxattr
:
12138 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12140 return -TARGET_EFAULT
;
12143 n
= lock_user_string(arg2
);
12145 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12147 ret
= -TARGET_EFAULT
;
12149 unlock_user(n
, arg2
, 0);
12150 unlock_user(v
, arg3
, arg4
);
12153 case TARGET_NR_removexattr
:
12154 case TARGET_NR_lremovexattr
:
12157 p
= lock_user_string(arg1
);
12158 n
= lock_user_string(arg2
);
12160 if (num
== TARGET_NR_removexattr
) {
12161 ret
= get_errno(removexattr(p
, n
));
12163 ret
= get_errno(lremovexattr(p
, n
));
12166 ret
= -TARGET_EFAULT
;
12168 unlock_user(p
, arg1
, 0);
12169 unlock_user(n
, arg2
, 0);
12172 case TARGET_NR_fremovexattr
:
12175 n
= lock_user_string(arg2
);
12177 ret
= get_errno(fremovexattr(arg1
, n
));
12179 ret
= -TARGET_EFAULT
;
12181 unlock_user(n
, arg2
, 0);
12185 #endif /* CONFIG_ATTR */
12186 #ifdef TARGET_NR_set_thread_area
12187 case TARGET_NR_set_thread_area
:
12188 #if defined(TARGET_MIPS)
12189 cpu_env
->active_tc
.CP0_UserLocal
= arg1
;
12191 #elif defined(TARGET_CRIS)
12193 ret
= -TARGET_EINVAL
;
12195 cpu_env
->pregs
[PR_PID
] = arg1
;
12199 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12200 return do_set_thread_area(cpu_env
, arg1
);
12201 #elif defined(TARGET_M68K)
12203 TaskState
*ts
= cpu
->opaque
;
12204 ts
->tp_value
= arg1
;
12208 return -TARGET_ENOSYS
;
12211 #ifdef TARGET_NR_get_thread_area
12212 case TARGET_NR_get_thread_area
:
12213 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12214 return do_get_thread_area(cpu_env
, arg1
);
12215 #elif defined(TARGET_M68K)
12217 TaskState
*ts
= cpu
->opaque
;
12218 return ts
->tp_value
;
12221 return -TARGET_ENOSYS
;
12224 #ifdef TARGET_NR_getdomainname
12225 case TARGET_NR_getdomainname
:
12226 return -TARGET_ENOSYS
;
12229 #ifdef TARGET_NR_clock_settime
12230 case TARGET_NR_clock_settime
:
12232 struct timespec ts
;
12234 ret
= target_to_host_timespec(&ts
, arg2
);
12235 if (!is_error(ret
)) {
12236 ret
= get_errno(clock_settime(arg1
, &ts
));
12241 #ifdef TARGET_NR_clock_settime64
12242 case TARGET_NR_clock_settime64
:
12244 struct timespec ts
;
12246 ret
= target_to_host_timespec64(&ts
, arg2
);
12247 if (!is_error(ret
)) {
12248 ret
= get_errno(clock_settime(arg1
, &ts
));
12253 #ifdef TARGET_NR_clock_gettime
12254 case TARGET_NR_clock_gettime
:
12256 struct timespec ts
;
12257 ret
= get_errno(clock_gettime(arg1
, &ts
));
12258 if (!is_error(ret
)) {
12259 ret
= host_to_target_timespec(arg2
, &ts
);
12264 #ifdef TARGET_NR_clock_gettime64
12265 case TARGET_NR_clock_gettime64
:
12267 struct timespec ts
;
12268 ret
= get_errno(clock_gettime(arg1
, &ts
));
12269 if (!is_error(ret
)) {
12270 ret
= host_to_target_timespec64(arg2
, &ts
);
12275 #ifdef TARGET_NR_clock_getres
12276 case TARGET_NR_clock_getres
:
12278 struct timespec ts
;
12279 ret
= get_errno(clock_getres(arg1
, &ts
));
12280 if (!is_error(ret
)) {
12281 host_to_target_timespec(arg2
, &ts
);
12286 #ifdef TARGET_NR_clock_getres_time64
12287 case TARGET_NR_clock_getres_time64
:
12289 struct timespec ts
;
12290 ret
= get_errno(clock_getres(arg1
, &ts
));
12291 if (!is_error(ret
)) {
12292 host_to_target_timespec64(arg2
, &ts
);
12297 #ifdef TARGET_NR_clock_nanosleep
12298 case TARGET_NR_clock_nanosleep
:
12300 struct timespec ts
;
12301 if (target_to_host_timespec(&ts
, arg3
)) {
12302 return -TARGET_EFAULT
;
12304 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12305 &ts
, arg4
? &ts
: NULL
));
12307 * if the call is interrupted by a signal handler, it fails
12308 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12309 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12311 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12312 host_to_target_timespec(arg4
, &ts
)) {
12313 return -TARGET_EFAULT
;
12319 #ifdef TARGET_NR_clock_nanosleep_time64
12320 case TARGET_NR_clock_nanosleep_time64
:
12322 struct timespec ts
;
12324 if (target_to_host_timespec64(&ts
, arg3
)) {
12325 return -TARGET_EFAULT
;
12328 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12329 &ts
, arg4
? &ts
: NULL
));
12331 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12332 host_to_target_timespec64(arg4
, &ts
)) {
12333 return -TARGET_EFAULT
;
12339 #if defined(TARGET_NR_set_tid_address)
12340 case TARGET_NR_set_tid_address
:
12342 TaskState
*ts
= cpu
->opaque
;
12343 ts
->child_tidptr
= arg1
;
12344 /* do not call host set_tid_address() syscall, instead return tid() */
12345 return get_errno(sys_gettid());
12349 case TARGET_NR_tkill
:
12350 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12352 case TARGET_NR_tgkill
:
12353 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12354 target_to_host_signal(arg3
)));
12356 #ifdef TARGET_NR_set_robust_list
12357 case TARGET_NR_set_robust_list
:
12358 case TARGET_NR_get_robust_list
:
12359 /* The ABI for supporting robust futexes has userspace pass
12360 * the kernel a pointer to a linked list which is updated by
12361 * userspace after the syscall; the list is walked by the kernel
12362 * when the thread exits. Since the linked list in QEMU guest
12363 * memory isn't a valid linked list for the host and we have
12364 * no way to reliably intercept the thread-death event, we can't
12365 * support these. Silently return ENOSYS so that guest userspace
12366 * falls back to a non-robust futex implementation (which should
12367 * be OK except in the corner case of the guest crashing while
12368 * holding a mutex that is shared with another process via
12371 return -TARGET_ENOSYS
;
12374 #if defined(TARGET_NR_utimensat)
12375 case TARGET_NR_utimensat
:
12377 struct timespec
*tsp
, ts
[2];
12381 if (target_to_host_timespec(ts
, arg3
)) {
12382 return -TARGET_EFAULT
;
12384 if (target_to_host_timespec(ts
+ 1, arg3
+
12385 sizeof(struct target_timespec
))) {
12386 return -TARGET_EFAULT
;
12391 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12393 if (!(p
= lock_user_string(arg2
))) {
12394 return -TARGET_EFAULT
;
12396 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12397 unlock_user(p
, arg2
, 0);
12402 #ifdef TARGET_NR_utimensat_time64
12403 case TARGET_NR_utimensat_time64
:
12405 struct timespec
*tsp
, ts
[2];
12409 if (target_to_host_timespec64(ts
, arg3
)) {
12410 return -TARGET_EFAULT
;
12412 if (target_to_host_timespec64(ts
+ 1, arg3
+
12413 sizeof(struct target__kernel_timespec
))) {
12414 return -TARGET_EFAULT
;
12419 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12421 p
= lock_user_string(arg2
);
12423 return -TARGET_EFAULT
;
12425 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12426 unlock_user(p
, arg2
, 0);
12431 #ifdef TARGET_NR_futex
12432 case TARGET_NR_futex
:
12433 return do_futex(cpu
, false, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12435 #ifdef TARGET_NR_futex_time64
12436 case TARGET_NR_futex_time64
:
12437 return do_futex(cpu
, true, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12439 #ifdef CONFIG_INOTIFY
12440 #if defined(TARGET_NR_inotify_init)
12441 case TARGET_NR_inotify_init
:
12442 ret
= get_errno(inotify_init());
12444 fd_trans_register(ret
, &target_inotify_trans
);
12448 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12449 case TARGET_NR_inotify_init1
:
12450 ret
= get_errno(inotify_init1(target_to_host_bitmask(arg1
,
12451 fcntl_flags_tbl
)));
12453 fd_trans_register(ret
, &target_inotify_trans
);
12457 #if defined(TARGET_NR_inotify_add_watch)
12458 case TARGET_NR_inotify_add_watch
:
12459 p
= lock_user_string(arg2
);
12460 ret
= get_errno(inotify_add_watch(arg1
, path(p
), arg3
));
12461 unlock_user(p
, arg2
, 0);
12464 #if defined(TARGET_NR_inotify_rm_watch)
12465 case TARGET_NR_inotify_rm_watch
:
12466 return get_errno(inotify_rm_watch(arg1
, arg2
));
12470 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12471 case TARGET_NR_mq_open
:
12473 struct mq_attr posix_mq_attr
;
12474 struct mq_attr
*pposix_mq_attr
;
12477 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12478 pposix_mq_attr
= NULL
;
12480 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12481 return -TARGET_EFAULT
;
12483 pposix_mq_attr
= &posix_mq_attr
;
12485 p
= lock_user_string(arg1
- 1);
12487 return -TARGET_EFAULT
;
12489 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12490 unlock_user (p
, arg1
, 0);
12494 case TARGET_NR_mq_unlink
:
12495 p
= lock_user_string(arg1
- 1);
12497 return -TARGET_EFAULT
;
12499 ret
= get_errno(mq_unlink(p
));
12500 unlock_user (p
, arg1
, 0);
12503 #ifdef TARGET_NR_mq_timedsend
12504 case TARGET_NR_mq_timedsend
:
12506 struct timespec ts
;
12508 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12510 if (target_to_host_timespec(&ts
, arg5
)) {
12511 return -TARGET_EFAULT
;
12513 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12514 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12515 return -TARGET_EFAULT
;
12518 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12520 unlock_user (p
, arg2
, arg3
);
12524 #ifdef TARGET_NR_mq_timedsend_time64
12525 case TARGET_NR_mq_timedsend_time64
:
12527 struct timespec ts
;
12529 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12531 if (target_to_host_timespec64(&ts
, arg5
)) {
12532 return -TARGET_EFAULT
;
12534 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12535 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12536 return -TARGET_EFAULT
;
12539 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12541 unlock_user(p
, arg2
, arg3
);
12546 #ifdef TARGET_NR_mq_timedreceive
12547 case TARGET_NR_mq_timedreceive
:
12549 struct timespec ts
;
12552 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12554 if (target_to_host_timespec(&ts
, arg5
)) {
12555 return -TARGET_EFAULT
;
12557 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12559 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12560 return -TARGET_EFAULT
;
12563 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12566 unlock_user (p
, arg2
, arg3
);
12568 put_user_u32(prio
, arg4
);
12572 #ifdef TARGET_NR_mq_timedreceive_time64
12573 case TARGET_NR_mq_timedreceive_time64
:
12575 struct timespec ts
;
12578 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12580 if (target_to_host_timespec64(&ts
, arg5
)) {
12581 return -TARGET_EFAULT
;
12583 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12585 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12586 return -TARGET_EFAULT
;
12589 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12592 unlock_user(p
, arg2
, arg3
);
12594 put_user_u32(prio
, arg4
);
12600 /* Not implemented for now... */
12601 /* case TARGET_NR_mq_notify: */
12604 case TARGET_NR_mq_getsetattr
:
12606 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12609 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12610 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12611 &posix_mq_attr_out
));
12612 } else if (arg3
!= 0) {
12613 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12615 if (ret
== 0 && arg3
!= 0) {
12616 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12622 #ifdef CONFIG_SPLICE
12623 #ifdef TARGET_NR_tee
12624 case TARGET_NR_tee
:
12626 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12630 #ifdef TARGET_NR_splice
12631 case TARGET_NR_splice
:
12633 loff_t loff_in
, loff_out
;
12634 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12636 if (get_user_u64(loff_in
, arg2
)) {
12637 return -TARGET_EFAULT
;
12639 ploff_in
= &loff_in
;
12642 if (get_user_u64(loff_out
, arg4
)) {
12643 return -TARGET_EFAULT
;
12645 ploff_out
= &loff_out
;
12647 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12649 if (put_user_u64(loff_in
, arg2
)) {
12650 return -TARGET_EFAULT
;
12654 if (put_user_u64(loff_out
, arg4
)) {
12655 return -TARGET_EFAULT
;
12661 #ifdef TARGET_NR_vmsplice
12662 case TARGET_NR_vmsplice
:
12664 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12666 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12667 unlock_iovec(vec
, arg2
, arg3
, 0);
12669 ret
= -host_to_target_errno(errno
);
12674 #endif /* CONFIG_SPLICE */
12675 #ifdef CONFIG_EVENTFD
12676 #if defined(TARGET_NR_eventfd)
12677 case TARGET_NR_eventfd
:
12678 ret
= get_errno(eventfd(arg1
, 0));
12680 fd_trans_register(ret
, &target_eventfd_trans
);
12684 #if defined(TARGET_NR_eventfd2)
12685 case TARGET_NR_eventfd2
:
12687 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
12688 if (arg2
& TARGET_O_NONBLOCK
) {
12689 host_flags
|= O_NONBLOCK
;
12691 if (arg2
& TARGET_O_CLOEXEC
) {
12692 host_flags
|= O_CLOEXEC
;
12694 ret
= get_errno(eventfd(arg1
, host_flags
));
12696 fd_trans_register(ret
, &target_eventfd_trans
);
12701 #endif /* CONFIG_EVENTFD */
12702 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12703 case TARGET_NR_fallocate
:
12704 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12705 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12706 target_offset64(arg5
, arg6
)));
12708 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12712 #if defined(CONFIG_SYNC_FILE_RANGE)
12713 #if defined(TARGET_NR_sync_file_range)
12714 case TARGET_NR_sync_file_range
:
12715 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12716 #if defined(TARGET_MIPS)
12717 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12718 target_offset64(arg5
, arg6
), arg7
));
12720 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12721 target_offset64(arg4
, arg5
), arg6
));
12722 #endif /* !TARGET_MIPS */
12724 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12728 #if defined(TARGET_NR_sync_file_range2) || \
12729 defined(TARGET_NR_arm_sync_file_range)
12730 #if defined(TARGET_NR_sync_file_range2)
12731 case TARGET_NR_sync_file_range2
:
12733 #if defined(TARGET_NR_arm_sync_file_range)
12734 case TARGET_NR_arm_sync_file_range
:
12736 /* This is like sync_file_range but the arguments are reordered */
12737 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12738 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12739 target_offset64(arg5
, arg6
), arg2
));
12741 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12746 #if defined(TARGET_NR_signalfd4)
12747 case TARGET_NR_signalfd4
:
12748 return do_signalfd4(arg1
, arg2
, arg4
);
12750 #if defined(TARGET_NR_signalfd)
12751 case TARGET_NR_signalfd
:
12752 return do_signalfd4(arg1
, arg2
, 0);
12754 #if defined(CONFIG_EPOLL)
12755 #if defined(TARGET_NR_epoll_create)
12756 case TARGET_NR_epoll_create
:
12757 return get_errno(epoll_create(arg1
));
12759 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12760 case TARGET_NR_epoll_create1
:
12761 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12763 #if defined(TARGET_NR_epoll_ctl)
12764 case TARGET_NR_epoll_ctl
:
12766 struct epoll_event ep
;
12767 struct epoll_event
*epp
= 0;
12769 if (arg2
!= EPOLL_CTL_DEL
) {
12770 struct target_epoll_event
*target_ep
;
12771 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12772 return -TARGET_EFAULT
;
12774 ep
.events
= tswap32(target_ep
->events
);
12776 * The epoll_data_t union is just opaque data to the kernel,
12777 * so we transfer all 64 bits across and need not worry what
12778 * actual data type it is.
12780 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12781 unlock_user_struct(target_ep
, arg4
, 0);
12784 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12785 * non-null pointer, even though this argument is ignored.
12790 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12794 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12795 #if defined(TARGET_NR_epoll_wait)
12796 case TARGET_NR_epoll_wait
:
12798 #if defined(TARGET_NR_epoll_pwait)
12799 case TARGET_NR_epoll_pwait
:
12802 struct target_epoll_event
*target_ep
;
12803 struct epoll_event
*ep
;
12805 int maxevents
= arg3
;
12806 int timeout
= arg4
;
12808 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12809 return -TARGET_EINVAL
;
12812 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12813 maxevents
* sizeof(struct target_epoll_event
), 1);
12815 return -TARGET_EFAULT
;
12818 ep
= g_try_new(struct epoll_event
, maxevents
);
12820 unlock_user(target_ep
, arg2
, 0);
12821 return -TARGET_ENOMEM
;
12825 #if defined(TARGET_NR_epoll_pwait)
12826 case TARGET_NR_epoll_pwait
:
12828 sigset_t
*set
= NULL
;
12831 ret
= process_sigsuspend_mask(&set
, arg5
, arg6
);
12837 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12838 set
, SIGSET_T_SIZE
));
12841 finish_sigsuspend_mask(ret
);
12846 #if defined(TARGET_NR_epoll_wait)
12847 case TARGET_NR_epoll_wait
:
12848 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12853 ret
= -TARGET_ENOSYS
;
12855 if (!is_error(ret
)) {
12857 for (i
= 0; i
< ret
; i
++) {
12858 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12859 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12861 unlock_user(target_ep
, arg2
,
12862 ret
* sizeof(struct target_epoll_event
));
12864 unlock_user(target_ep
, arg2
, 0);
12871 #ifdef TARGET_NR_prlimit64
12872 case TARGET_NR_prlimit64
:
12874 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12875 struct target_rlimit64
*target_rnew
, *target_rold
;
12876 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12877 int resource
= target_to_host_resource(arg2
);
12879 if (arg3
&& (resource
!= RLIMIT_AS
&&
12880 resource
!= RLIMIT_DATA
&&
12881 resource
!= RLIMIT_STACK
)) {
12882 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12883 return -TARGET_EFAULT
;
12885 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12886 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12887 unlock_user_struct(target_rnew
, arg3
, 0);
12891 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12892 if (!is_error(ret
) && arg4
) {
12893 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12894 return -TARGET_EFAULT
;
12896 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12897 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12898 unlock_user_struct(target_rold
, arg4
, 1);
12903 #ifdef TARGET_NR_gethostname
12904 case TARGET_NR_gethostname
:
12906 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12908 ret
= get_errno(gethostname(name
, arg2
));
12909 unlock_user(name
, arg1
, arg2
);
12911 ret
= -TARGET_EFAULT
;
12916 #ifdef TARGET_NR_atomic_cmpxchg_32
12917 case TARGET_NR_atomic_cmpxchg_32
:
12919 /* should use start_exclusive from main.c */
12920 abi_ulong mem_value
;
12921 if (get_user_u32(mem_value
, arg6
)) {
12922 target_siginfo_t info
;
12923 info
.si_signo
= SIGSEGV
;
12925 info
.si_code
= TARGET_SEGV_MAPERR
;
12926 info
._sifields
._sigfault
._addr
= arg6
;
12927 queue_signal(cpu_env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
12931 if (mem_value
== arg2
)
12932 put_user_u32(arg1
, arg6
);
12936 #ifdef TARGET_NR_atomic_barrier
12937 case TARGET_NR_atomic_barrier
:
12938 /* Like the kernel implementation and the
12939 qemu arm barrier, no-op this? */
12943 #ifdef TARGET_NR_timer_create
12944 case TARGET_NR_timer_create
:
12946 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12948 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12951 int timer_index
= next_free_host_timer();
12953 if (timer_index
< 0) {
12954 ret
= -TARGET_EAGAIN
;
12956 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12959 phost_sevp
= &host_sevp
;
12960 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12962 free_host_timer_slot(timer_index
);
12967 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12969 free_host_timer_slot(timer_index
);
12971 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12972 timer_delete(*phtimer
);
12973 free_host_timer_slot(timer_index
);
12974 return -TARGET_EFAULT
;
12982 #ifdef TARGET_NR_timer_settime
12983 case TARGET_NR_timer_settime
:
12985 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12986 * struct itimerspec * old_value */
12987 target_timer_t timerid
= get_timer_id(arg1
);
12991 } else if (arg3
== 0) {
12992 ret
= -TARGET_EINVAL
;
12994 timer_t htimer
= g_posix_timers
[timerid
];
12995 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12997 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12998 return -TARGET_EFAULT
;
13001 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
13002 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
13003 return -TARGET_EFAULT
;
13010 #ifdef TARGET_NR_timer_settime64
13011 case TARGET_NR_timer_settime64
:
13013 target_timer_t timerid
= get_timer_id(arg1
);
13017 } else if (arg3
== 0) {
13018 ret
= -TARGET_EINVAL
;
13020 timer_t htimer
= g_posix_timers
[timerid
];
13021 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
13023 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
13024 return -TARGET_EFAULT
;
13027 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
13028 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
13029 return -TARGET_EFAULT
;
13036 #ifdef TARGET_NR_timer_gettime
13037 case TARGET_NR_timer_gettime
:
13039 /* args: timer_t timerid, struct itimerspec *curr_value */
13040 target_timer_t timerid
= get_timer_id(arg1
);
13044 } else if (!arg2
) {
13045 ret
= -TARGET_EFAULT
;
13047 timer_t htimer
= g_posix_timers
[timerid
];
13048 struct itimerspec hspec
;
13049 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13051 if (host_to_target_itimerspec(arg2
, &hspec
)) {
13052 ret
= -TARGET_EFAULT
;
13059 #ifdef TARGET_NR_timer_gettime64
13060 case TARGET_NR_timer_gettime64
:
13062 /* args: timer_t timerid, struct itimerspec64 *curr_value */
13063 target_timer_t timerid
= get_timer_id(arg1
);
13067 } else if (!arg2
) {
13068 ret
= -TARGET_EFAULT
;
13070 timer_t htimer
= g_posix_timers
[timerid
];
13071 struct itimerspec hspec
;
13072 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13074 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
13075 ret
= -TARGET_EFAULT
;
13082 #ifdef TARGET_NR_timer_getoverrun
13083 case TARGET_NR_timer_getoverrun
:
13085 /* args: timer_t timerid */
13086 target_timer_t timerid
= get_timer_id(arg1
);
13091 timer_t htimer
= g_posix_timers
[timerid
];
13092 ret
= get_errno(timer_getoverrun(htimer
));
13098 #ifdef TARGET_NR_timer_delete
13099 case TARGET_NR_timer_delete
:
13101 /* args: timer_t timerid */
13102 target_timer_t timerid
= get_timer_id(arg1
);
13107 timer_t htimer
= g_posix_timers
[timerid
];
13108 ret
= get_errno(timer_delete(htimer
));
13109 free_host_timer_slot(timerid
);
13115 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13116 case TARGET_NR_timerfd_create
:
13117 return get_errno(timerfd_create(arg1
,
13118 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
13121 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13122 case TARGET_NR_timerfd_gettime
:
13124 struct itimerspec its_curr
;
13126 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13128 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
13129 return -TARGET_EFAULT
;
13135 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13136 case TARGET_NR_timerfd_gettime64
:
13138 struct itimerspec its_curr
;
13140 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13142 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
13143 return -TARGET_EFAULT
;
13149 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13150 case TARGET_NR_timerfd_settime
:
13152 struct itimerspec its_new
, its_old
, *p_new
;
13155 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13156 return -TARGET_EFAULT
;
13163 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13165 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13166 return -TARGET_EFAULT
;
13172 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13173 case TARGET_NR_timerfd_settime64
:
13175 struct itimerspec its_new
, its_old
, *p_new
;
13178 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13179 return -TARGET_EFAULT
;
13186 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13188 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13189 return -TARGET_EFAULT
;
13195 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13196 case TARGET_NR_ioprio_get
:
13197 return get_errno(ioprio_get(arg1
, arg2
));
13200 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13201 case TARGET_NR_ioprio_set
:
13202 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13205 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13206 case TARGET_NR_setns
:
13207 return get_errno(setns(arg1
, arg2
));
13209 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13210 case TARGET_NR_unshare
:
13211 return get_errno(unshare(arg1
));
13213 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13214 case TARGET_NR_kcmp
:
13215 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13217 #ifdef TARGET_NR_swapcontext
13218 case TARGET_NR_swapcontext
:
13219 /* PowerPC specific. */
13220 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13222 #ifdef TARGET_NR_memfd_create
13223 case TARGET_NR_memfd_create
:
13224 p
= lock_user_string(arg1
);
13226 return -TARGET_EFAULT
;
13228 ret
= get_errno(memfd_create(p
, arg2
));
13229 fd_trans_unregister(ret
);
13230 unlock_user(p
, arg1
, 0);
13233 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13234 case TARGET_NR_membarrier
:
13235 return get_errno(membarrier(arg1
, arg2
));
13238 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13239 case TARGET_NR_copy_file_range
:
13241 loff_t inoff
, outoff
;
13242 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13245 if (get_user_u64(inoff
, arg2
)) {
13246 return -TARGET_EFAULT
;
13251 if (get_user_u64(outoff
, arg4
)) {
13252 return -TARGET_EFAULT
;
13256 /* Do not sign-extend the count parameter. */
13257 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13258 (abi_ulong
)arg5
, arg6
));
13259 if (!is_error(ret
) && ret
> 0) {
13261 if (put_user_u64(inoff
, arg2
)) {
13262 return -TARGET_EFAULT
;
13266 if (put_user_u64(outoff
, arg4
)) {
13267 return -TARGET_EFAULT
;
13275 #if defined(TARGET_NR_pivot_root)
13276 case TARGET_NR_pivot_root
:
13279 p
= lock_user_string(arg1
); /* new_root */
13280 p2
= lock_user_string(arg2
); /* put_old */
13282 ret
= -TARGET_EFAULT
;
13284 ret
= get_errno(pivot_root(p
, p2
));
13286 unlock_user(p2
, arg2
, 0);
13287 unlock_user(p
, arg1
, 0);
13293 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13294 return -TARGET_ENOSYS
;
13299 abi_long
do_syscall(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
13300 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13301 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13304 CPUState
*cpu
= env_cpu(cpu_env
);
13307 #ifdef DEBUG_ERESTARTSYS
13308 /* Debug-only code for exercising the syscall-restart code paths
13309 * in the per-architecture cpu main loops: restart every syscall
13310 * the guest makes once before letting it through.
13316 return -QEMU_ERESTARTSYS
;
13321 record_syscall_start(cpu
, num
, arg1
,
13322 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13324 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13325 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13328 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13329 arg5
, arg6
, arg7
, arg8
);
13331 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13332 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13333 arg3
, arg4
, arg5
, arg6
);
13336 record_syscall_return(cpu
, num
, ret
);