4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
66 #include <sys/timerfd.h>
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu/xattr.h"
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
80 #ifdef HAVE_SYS_KCOV_H
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
97 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
120 #include <linux/btrfs.h>
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
126 #include "linux_loop.h"
130 #include "qemu/guest-random.h"
131 #include "qemu/selfmap.h"
132 #include "user/syscall-trace.h"
133 #include "qapi/error.h"
134 #include "fd-trans.h"
138 #define CLONE_IO 0x80000000 /* Clone io context */
141 /* We can't directly call the host clone syscall, because this will
142 * badly confuse libc (breaking mutexes, for example). So we must
143 * divide clone flags into:
144 * * flag combinations that look like pthread_create()
145 * * flag combinations that look like fork()
146 * * flags we can implement within QEMU itself
147 * * flags we can't support and will return an error for
149 /* For thread creation, all these flags must be present; for
150 * fork, none must be present.
152 #define CLONE_THREAD_FLAGS \
153 (CLONE_VM | CLONE_FS | CLONE_FILES | \
154 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
156 /* These flags are ignored:
157 * CLONE_DETACHED is now ignored by the kernel;
158 * CLONE_IO is just an optimisation hint to the I/O scheduler
160 #define CLONE_IGNORED_FLAGS \
161 (CLONE_DETACHED | CLONE_IO)
163 /* Flags for fork which we can implement within QEMU itself */
164 #define CLONE_OPTIONAL_FORK_FLAGS \
165 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
166 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
168 /* Flags for thread creation which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_THREAD_FLAGS \
170 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
171 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
173 #define CLONE_INVALID_FORK_FLAGS \
174 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
176 #define CLONE_INVALID_THREAD_FLAGS \
177 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
178 CLONE_IGNORED_FLAGS))
180 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181 * have almost all been allocated. We cannot support any of
182 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184 * The checks against the invalid thread masks above will catch these.
185 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
188 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189 * once. This exercises the codepaths for restart.
191 //#define DEBUG_ERESTARTSYS
193 //#include <linux/msdos_fs.h>
194 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
195 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
205 #define _syscall0(type,name) \
206 static type name (void) \
208 return syscall(__NR_##name); \
211 #define _syscall1(type,name,type1,arg1) \
212 static type name (type1 arg1) \
214 return syscall(__NR_##name, arg1); \
217 #define _syscall2(type,name,type1,arg1,type2,arg2) \
218 static type name (type1 arg1,type2 arg2) \
220 return syscall(__NR_##name, arg1, arg2); \
223 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
224 static type name (type1 arg1,type2 arg2,type3 arg3) \
226 return syscall(__NR_##name, arg1, arg2, arg3); \
229 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
232 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
235 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
239 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
243 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
244 type5,arg5,type6,arg6) \
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
252 #define __NR_sys_uname __NR_uname
253 #define __NR_sys_getcwd1 __NR_getcwd
254 #define __NR_sys_getdents __NR_getdents
255 #define __NR_sys_getdents64 __NR_getdents64
256 #define __NR_sys_getpriority __NR_getpriority
257 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259 #define __NR_sys_syslog __NR_syslog
260 #if defined(__NR_futex)
261 # define __NR_sys_futex __NR_futex
263 #if defined(__NR_futex_time64)
264 # define __NR_sys_futex_time64 __NR_futex_time64
266 #define __NR_sys_inotify_init __NR_inotify_init
267 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269 #define __NR_sys_statx __NR_statx
271 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272 #define __NR__llseek __NR_lseek
275 /* Newer kernel ports have llseek() instead of _llseek() */
276 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277 #define TARGET_NR__llseek TARGET_NR_llseek
280 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
281 #ifndef TARGET_O_NONBLOCK_MASK
282 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
285 #define __NR_sys_gettid __NR_gettid
286 _syscall0(int, sys_gettid
)
288 /* For the 64-bit guest on 32-bit host case we must emulate
289 * getdents using getdents64, because otherwise the host
290 * might hand us back more dirent records than we can fit
291 * into the guest buffer after structure format conversion.
292 * Otherwise we emulate getdents with getdents if the host has it.
294 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
295 #define EMULATE_GETDENTS_WITH_GETDENTS
298 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
299 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
301 #if (defined(TARGET_NR_getdents) && \
302 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
303 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
304 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
306 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
307 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
308 loff_t
*, res
, uint
, wh
);
310 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
311 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
313 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
314 #ifdef __NR_exit_group
315 _syscall1(int,exit_group
,int,error_code
)
317 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
318 _syscall1(int,set_tid_address
,int *,tidptr
)
320 #if defined(__NR_futex)
321 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
322 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
324 #if defined(__NR_futex_time64)
325 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
326 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
328 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
329 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
330 unsigned long *, user_mask_ptr
);
331 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
332 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
333 unsigned long *, user_mask_ptr
);
334 #define __NR_sys_getcpu __NR_getcpu
335 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
336 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
338 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
339 struct __user_cap_data_struct
*, data
);
340 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
341 struct __user_cap_data_struct
*, data
);
342 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
343 _syscall2(int, ioprio_get
, int, which
, int, who
)
345 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
346 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
348 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
349 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
352 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
353 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
354 unsigned long, idx1
, unsigned long, idx2
)
358 * It is assumed that struct statx is architecture independent.
360 #if defined(TARGET_NR_statx) && defined(__NR_statx)
361 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
362 unsigned int, mask
, struct target_statx
*, statxbuf
)
364 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
365 _syscall2(int, membarrier
, int, cmd
, int, flags
)
368 static const bitmask_transtbl fcntl_flags_tbl
[] = {
369 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
370 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
371 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
372 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
373 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
374 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
375 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
376 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
377 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
378 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
379 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
380 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
381 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
382 #if defined(O_DIRECT)
383 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
385 #if defined(O_NOATIME)
386 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
388 #if defined(O_CLOEXEC)
389 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
392 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
394 #if defined(O_TMPFILE)
395 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
397 /* Don't terminate the list prematurely on 64-bit host+guest. */
398 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
399 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
404 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
406 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
407 #if defined(__NR_utimensat)
408 #define __NR_sys_utimensat __NR_utimensat
409 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
410 const struct timespec
*,tsp
,int,flags
)
412 static int sys_utimensat(int dirfd
, const char *pathname
,
413 const struct timespec times
[2], int flags
)
419 #endif /* TARGET_NR_utimensat */
421 #ifdef TARGET_NR_renameat2
422 #if defined(__NR_renameat2)
423 #define __NR_sys_renameat2 __NR_renameat2
424 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
425 const char *, new, unsigned int, flags
)
427 static int sys_renameat2(int oldfd
, const char *old
,
428 int newfd
, const char *new, int flags
)
431 return renameat(oldfd
, old
, newfd
, new);
437 #endif /* TARGET_NR_renameat2 */
439 #ifdef CONFIG_INOTIFY
440 #include <sys/inotify.h>
442 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
443 static int sys_inotify_init(void)
445 return (inotify_init());
448 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
449 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
451 return (inotify_add_watch(fd
, pathname
, mask
));
454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
455 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
457 return (inotify_rm_watch(fd
, wd
));
460 #ifdef CONFIG_INOTIFY1
461 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
462 static int sys_inotify_init1(int flags
)
464 return (inotify_init1(flags
));
469 /* Userspace can usually survive runtime without inotify */
470 #undef TARGET_NR_inotify_init
471 #undef TARGET_NR_inotify_init1
472 #undef TARGET_NR_inotify_add_watch
473 #undef TARGET_NR_inotify_rm_watch
474 #endif /* CONFIG_INOTIFY */
476 #if defined(TARGET_NR_prlimit64)
477 #ifndef __NR_prlimit64
478 # define __NR_prlimit64 -1
480 #define __NR_sys_prlimit64 __NR_prlimit64
481 /* The glibc rlimit structure may not be that used by the underlying syscall */
482 struct host_rlimit64
{
486 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
487 const struct host_rlimit64
*, new_limit
,
488 struct host_rlimit64
*, old_limit
)
492 #if defined(TARGET_NR_timer_create)
493 /* Maximum of 32 active POSIX timers allowed at any one time. */
494 static timer_t g_posix_timers
[32] = { 0, } ;
496 static inline int next_free_host_timer(void)
499 /* FIXME: Does finding the next free slot require a lock? */
500 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
501 if (g_posix_timers
[k
] == 0) {
502 g_posix_timers
[k
] = (timer_t
) 1;
510 static inline int host_to_target_errno(int host_errno
)
512 switch (host_errno
) {
513 #define E(X) case X: return TARGET_##X;
514 #include "errnos.c.inc"
521 static inline int target_to_host_errno(int target_errno
)
523 switch (target_errno
) {
524 #define E(X) case TARGET_##X: return X;
525 #include "errnos.c.inc"
532 static inline abi_long
get_errno(abi_long ret
)
535 return -host_to_target_errno(errno
);
540 const char *target_strerror(int err
)
542 if (err
== TARGET_ERESTARTSYS
) {
543 return "To be restarted";
545 if (err
== TARGET_QEMU_ESIGRETURN
) {
546 return "Successful exit from sigreturn";
549 return strerror(target_to_host_errno(err
));
552 #define safe_syscall0(type, name) \
553 static type safe_##name(void) \
555 return safe_syscall(__NR_##name); \
558 #define safe_syscall1(type, name, type1, arg1) \
559 static type safe_##name(type1 arg1) \
561 return safe_syscall(__NR_##name, arg1); \
564 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
565 static type safe_##name(type1 arg1, type2 arg2) \
567 return safe_syscall(__NR_##name, arg1, arg2); \
570 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
571 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
573 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
576 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
578 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
580 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
583 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
584 type4, arg4, type5, arg5) \
585 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
588 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
591 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
592 type4, arg4, type5, arg5, type6, arg6) \
593 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
594 type5 arg5, type6 arg6) \
596 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
599 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
600 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
601 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
602 int, flags
, mode_t
, mode
)
603 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
604 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
605 struct rusage
*, rusage
)
607 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
608 int, options
, struct rusage
*, rusage
)
609 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
610 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
611 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
612 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
613 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
615 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
616 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
617 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
620 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
621 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
623 #if defined(__NR_futex)
624 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
625 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
627 #if defined(__NR_futex_time64)
628 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
629 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
631 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
632 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
633 safe_syscall2(int, tkill
, int, tid
, int, sig
)
634 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
635 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
636 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
637 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
638 unsigned long, pos_l
, unsigned long, pos_h
)
639 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
640 unsigned long, pos_l
, unsigned long, pos_h
)
641 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
643 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
644 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
645 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
646 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
647 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
648 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
649 safe_syscall2(int, flock
, int, fd
, int, operation
)
650 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
651 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
652 const struct timespec
*, uts
, size_t, sigsetsize
)
654 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
656 #if defined(TARGET_NR_nanosleep)
657 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
658 struct timespec
*, rem
)
660 #if defined(TARGET_NR_clock_nanosleep) || \
661 defined(TARGET_NR_clock_nanosleep_time64)
662 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
663 const struct timespec
*, req
, struct timespec
*, rem
)
667 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
670 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
671 void *, ptr
, long, fifth
)
675 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
679 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
680 long, msgtype
, int, flags
)
682 #ifdef __NR_semtimedop
683 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
684 unsigned, nsops
, const struct timespec
*, timeout
)
686 #if defined(TARGET_NR_mq_timedsend) || \
687 defined(TARGET_NR_mq_timedsend_time64)
688 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
689 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
691 #if defined(TARGET_NR_mq_timedreceive) || \
692 defined(TARGET_NR_mq_timedreceive_time64)
693 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
694 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
696 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
697 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
698 int, outfd
, loff_t
*, poutoff
, size_t, length
,
702 /* We do ioctl like this rather than via safe_syscall3 to preserve the
703 * "third argument might be integer or pointer or not present" behaviour of
706 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
707 /* Similarly for fcntl. Note that callers must always:
708 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
709 * use the flock64 struct rather than unsuffixed flock
710 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
713 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
715 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
718 static inline int host_to_target_sock_type(int host_type
)
722 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
724 target_type
= TARGET_SOCK_DGRAM
;
727 target_type
= TARGET_SOCK_STREAM
;
730 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
734 #if defined(SOCK_CLOEXEC)
735 if (host_type
& SOCK_CLOEXEC
) {
736 target_type
|= TARGET_SOCK_CLOEXEC
;
740 #if defined(SOCK_NONBLOCK)
741 if (host_type
& SOCK_NONBLOCK
) {
742 target_type
|= TARGET_SOCK_NONBLOCK
;
749 static abi_ulong target_brk
;
750 static abi_ulong target_original_brk
;
751 static abi_ulong brk_page
;
753 void target_set_brk(abi_ulong new_brk
)
755 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
756 brk_page
= HOST_PAGE_ALIGN(target_brk
);
759 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
760 #define DEBUGF_BRK(message, args...)
762 /* do_brk() must return target values and target errnos. */
763 abi_long
do_brk(abi_ulong new_brk
)
765 abi_long mapped_addr
;
766 abi_ulong new_alloc_size
;
768 /* brk pointers are always untagged */
770 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
773 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
776 if (new_brk
< target_original_brk
) {
777 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
782 /* If the new brk is less than the highest page reserved to the
783 * target heap allocation, set it and we're almost done... */
784 if (new_brk
<= brk_page
) {
785 /* Heap contents are initialized to zero, as for anonymous
787 if (new_brk
> target_brk
) {
788 memset(g2h_untagged(target_brk
), 0, new_brk
- target_brk
);
790 target_brk
= new_brk
;
791 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
795 /* We need to allocate more memory after the brk... Note that
796 * we don't use MAP_FIXED because that will map over the top of
797 * any existing mapping (like the one with the host libc or qemu
798 * itself); instead we treat "mapped but at wrong address" as
799 * a failure and unmap again.
801 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
802 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
803 PROT_READ
|PROT_WRITE
,
804 MAP_ANON
|MAP_PRIVATE
, 0, 0));
806 if (mapped_addr
== brk_page
) {
807 /* Heap contents are initialized to zero, as for anonymous
808 * mapped pages. Technically the new pages are already
809 * initialized to zero since they *are* anonymous mapped
810 * pages, however we have to take care with the contents that
811 * come from the remaining part of the previous page: it may
812 * contains garbage data due to a previous heap usage (grown
814 memset(g2h_untagged(target_brk
), 0, brk_page
- target_brk
);
816 target_brk
= new_brk
;
817 brk_page
= HOST_PAGE_ALIGN(target_brk
);
818 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
821 } else if (mapped_addr
!= -1) {
822 /* Mapped but at wrong address, meaning there wasn't actually
823 * enough space for this brk.
825 target_munmap(mapped_addr
, new_alloc_size
);
827 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
830 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
833 #if defined(TARGET_ALPHA)
834 /* We (partially) emulate OSF/1 on Alpha, which requires we
835 return a proper errno, not an unchanged brk value. */
836 return -TARGET_ENOMEM
;
838 /* For everything else, return the previous break. */
842 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
843 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
844 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
845 abi_ulong target_fds_addr
,
849 abi_ulong b
, *target_fds
;
851 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
852 if (!(target_fds
= lock_user(VERIFY_READ
,
854 sizeof(abi_ulong
) * nw
,
856 return -TARGET_EFAULT
;
860 for (i
= 0; i
< nw
; i
++) {
861 /* grab the abi_ulong */
862 __get_user(b
, &target_fds
[i
]);
863 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
864 /* check the bit inside the abi_ulong */
871 unlock_user(target_fds
, target_fds_addr
, 0);
876 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
877 abi_ulong target_fds_addr
,
880 if (target_fds_addr
) {
881 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
882 return -TARGET_EFAULT
;
890 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
896 abi_ulong
*target_fds
;
898 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
899 if (!(target_fds
= lock_user(VERIFY_WRITE
,
901 sizeof(abi_ulong
) * nw
,
903 return -TARGET_EFAULT
;
906 for (i
= 0; i
< nw
; i
++) {
908 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
909 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
912 __put_user(v
, &target_fds
[i
]);
915 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
921 #if defined(__alpha__)
927 static inline abi_long
host_to_target_clock_t(long ticks
)
929 #if HOST_HZ == TARGET_HZ
932 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
936 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
937 const struct rusage
*rusage
)
939 struct target_rusage
*target_rusage
;
941 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
942 return -TARGET_EFAULT
;
943 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
944 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
945 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
946 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
947 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
948 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
949 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
950 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
951 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
952 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
953 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
954 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
955 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
956 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
957 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
958 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
959 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
960 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
961 unlock_user_struct(target_rusage
, target_addr
, 1);
966 #ifdef TARGET_NR_setrlimit
967 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
969 abi_ulong target_rlim_swap
;
972 target_rlim_swap
= tswapal(target_rlim
);
973 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
974 return RLIM_INFINITY
;
976 result
= target_rlim_swap
;
977 if (target_rlim_swap
!= (rlim_t
)result
)
978 return RLIM_INFINITY
;
984 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
985 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
987 abi_ulong target_rlim_swap
;
990 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
991 target_rlim_swap
= TARGET_RLIM_INFINITY
;
993 target_rlim_swap
= rlim
;
994 result
= tswapal(target_rlim_swap
);
1000 static inline int target_to_host_resource(int code
)
1003 case TARGET_RLIMIT_AS
:
1005 case TARGET_RLIMIT_CORE
:
1007 case TARGET_RLIMIT_CPU
:
1009 case TARGET_RLIMIT_DATA
:
1011 case TARGET_RLIMIT_FSIZE
:
1012 return RLIMIT_FSIZE
;
1013 case TARGET_RLIMIT_LOCKS
:
1014 return RLIMIT_LOCKS
;
1015 case TARGET_RLIMIT_MEMLOCK
:
1016 return RLIMIT_MEMLOCK
;
1017 case TARGET_RLIMIT_MSGQUEUE
:
1018 return RLIMIT_MSGQUEUE
;
1019 case TARGET_RLIMIT_NICE
:
1021 case TARGET_RLIMIT_NOFILE
:
1022 return RLIMIT_NOFILE
;
1023 case TARGET_RLIMIT_NPROC
:
1024 return RLIMIT_NPROC
;
1025 case TARGET_RLIMIT_RSS
:
1027 case TARGET_RLIMIT_RTPRIO
:
1028 return RLIMIT_RTPRIO
;
1029 case TARGET_RLIMIT_SIGPENDING
:
1030 return RLIMIT_SIGPENDING
;
1031 case TARGET_RLIMIT_STACK
:
1032 return RLIMIT_STACK
;
1038 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1039 abi_ulong target_tv_addr
)
1041 struct target_timeval
*target_tv
;
1043 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1044 return -TARGET_EFAULT
;
1047 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1048 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1050 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1055 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1056 const struct timeval
*tv
)
1058 struct target_timeval
*target_tv
;
1060 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1061 return -TARGET_EFAULT
;
1064 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1065 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1067 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1072 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1073 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1074 abi_ulong target_tv_addr
)
1076 struct target__kernel_sock_timeval
*target_tv
;
1078 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1079 return -TARGET_EFAULT
;
1082 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1083 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1085 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1091 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1092 const struct timeval
*tv
)
1094 struct target__kernel_sock_timeval
*target_tv
;
1096 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1097 return -TARGET_EFAULT
;
1100 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1101 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1103 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1108 #if defined(TARGET_NR_futex) || \
1109 defined(TARGET_NR_rt_sigtimedwait) || \
1110 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1111 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1112 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1113 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1114 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1115 defined(TARGET_NR_timer_settime) || \
1116 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1117 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1118 abi_ulong target_addr
)
1120 struct target_timespec
*target_ts
;
1122 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1123 return -TARGET_EFAULT
;
1125 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1126 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1127 unlock_user_struct(target_ts
, target_addr
, 0);
1132 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1133 defined(TARGET_NR_timer_settime64) || \
1134 defined(TARGET_NR_mq_timedsend_time64) || \
1135 defined(TARGET_NR_mq_timedreceive_time64) || \
1136 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1137 defined(TARGET_NR_clock_nanosleep_time64) || \
1138 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1139 defined(TARGET_NR_utimensat) || \
1140 defined(TARGET_NR_utimensat_time64) || \
1141 defined(TARGET_NR_semtimedop_time64) || \
1142 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1143 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1144 abi_ulong target_addr
)
1146 struct target__kernel_timespec
*target_ts
;
1148 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1149 return -TARGET_EFAULT
;
1151 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1152 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1153 /* in 32bit mode, this drops the padding */
1154 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1155 unlock_user_struct(target_ts
, target_addr
, 0);
1160 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1161 struct timespec
*host_ts
)
1163 struct target_timespec
*target_ts
;
1165 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1166 return -TARGET_EFAULT
;
1168 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1169 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1170 unlock_user_struct(target_ts
, target_addr
, 1);
1174 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1175 struct timespec
*host_ts
)
1177 struct target__kernel_timespec
*target_ts
;
1179 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1180 return -TARGET_EFAULT
;
1182 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1183 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1184 unlock_user_struct(target_ts
, target_addr
, 1);
1188 #if defined(TARGET_NR_gettimeofday)
1189 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1190 struct timezone
*tz
)
1192 struct target_timezone
*target_tz
;
1194 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1195 return -TARGET_EFAULT
;
1198 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1199 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1201 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1207 #if defined(TARGET_NR_settimeofday)
1208 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1209 abi_ulong target_tz_addr
)
1211 struct target_timezone
*target_tz
;
1213 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1214 return -TARGET_EFAULT
;
1217 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1218 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1220 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1226 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1229 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1230 abi_ulong target_mq_attr_addr
)
1232 struct target_mq_attr
*target_mq_attr
;
1234 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1235 target_mq_attr_addr
, 1))
1236 return -TARGET_EFAULT
;
1238 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1239 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1240 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1241 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1243 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1248 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1249 const struct mq_attr
*attr
)
1251 struct target_mq_attr
*target_mq_attr
;
1253 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1254 target_mq_attr_addr
, 0))
1255 return -TARGET_EFAULT
;
1257 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1258 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1259 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1260 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1262 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1268 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1269 /* do_select() must return target values and target errnos. */
1270 static abi_long
do_select(int n
,
1271 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1272 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1274 fd_set rfds
, wfds
, efds
;
1275 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1277 struct timespec ts
, *ts_ptr
;
1280 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1284 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1288 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1293 if (target_tv_addr
) {
1294 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1295 return -TARGET_EFAULT
;
1296 ts
.tv_sec
= tv
.tv_sec
;
1297 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1303 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1306 if (!is_error(ret
)) {
1307 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1308 return -TARGET_EFAULT
;
1309 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1310 return -TARGET_EFAULT
;
1311 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1312 return -TARGET_EFAULT
;
1314 if (target_tv_addr
) {
1315 tv
.tv_sec
= ts
.tv_sec
;
1316 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1317 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1318 return -TARGET_EFAULT
;
1326 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1327 static abi_long
do_old_select(abi_ulong arg1
)
1329 struct target_sel_arg_struct
*sel
;
1330 abi_ulong inp
, outp
, exp
, tvp
;
1333 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1334 return -TARGET_EFAULT
;
1337 nsel
= tswapal(sel
->n
);
1338 inp
= tswapal(sel
->inp
);
1339 outp
= tswapal(sel
->outp
);
1340 exp
= tswapal(sel
->exp
);
1341 tvp
= tswapal(sel
->tvp
);
1343 unlock_user_struct(sel
, arg1
, 0);
1345 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1350 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1351 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1352 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1355 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1356 fd_set rfds
, wfds
, efds
;
1357 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1358 struct timespec ts
, *ts_ptr
;
1362 * The 6th arg is actually two args smashed together,
1363 * so we cannot use the C library.
1371 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1372 target_sigset_t
*target_sigset
;
1380 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1384 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1388 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1394 * This takes a timespec, and not a timeval, so we cannot
1395 * use the do_select() helper ...
1399 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1400 return -TARGET_EFAULT
;
1403 if (target_to_host_timespec(&ts
, ts_addr
)) {
1404 return -TARGET_EFAULT
;
1412 /* Extract the two packed args for the sigset */
1415 sig
.size
= SIGSET_T_SIZE
;
1417 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1419 return -TARGET_EFAULT
;
1421 arg_sigset
= tswapal(arg7
[0]);
1422 arg_sigsize
= tswapal(arg7
[1]);
1423 unlock_user(arg7
, arg6
, 0);
1427 if (arg_sigsize
!= sizeof(*target_sigset
)) {
1428 /* Like the kernel, we enforce correct size sigsets */
1429 return -TARGET_EINVAL
;
1431 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
1432 sizeof(*target_sigset
), 1);
1433 if (!target_sigset
) {
1434 return -TARGET_EFAULT
;
1436 target_to_host_sigset(&set
, target_sigset
);
1437 unlock_user(target_sigset
, arg_sigset
, 0);
1445 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1448 if (!is_error(ret
)) {
1449 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1450 return -TARGET_EFAULT
;
1452 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1453 return -TARGET_EFAULT
;
1455 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1456 return -TARGET_EFAULT
;
1459 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1460 return -TARGET_EFAULT
;
1463 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1464 return -TARGET_EFAULT
;
1472 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1473 defined(TARGET_NR_ppoll_time64)
1474 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1475 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1477 struct target_pollfd
*target_pfd
;
1478 unsigned int nfds
= arg2
;
1486 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1487 return -TARGET_EINVAL
;
1489 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1490 sizeof(struct target_pollfd
) * nfds
, 1);
1492 return -TARGET_EFAULT
;
1495 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1496 for (i
= 0; i
< nfds
; i
++) {
1497 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1498 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1502 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1503 target_sigset_t
*target_set
;
1504 sigset_t _set
, *set
= &_set
;
1508 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1509 unlock_user(target_pfd
, arg1
, 0);
1510 return -TARGET_EFAULT
;
1513 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1514 unlock_user(target_pfd
, arg1
, 0);
1515 return -TARGET_EFAULT
;
1523 if (arg5
!= sizeof(target_sigset_t
)) {
1524 unlock_user(target_pfd
, arg1
, 0);
1525 return -TARGET_EINVAL
;
1528 target_set
= lock_user(VERIFY_READ
, arg4
,
1529 sizeof(target_sigset_t
), 1);
1531 unlock_user(target_pfd
, arg1
, 0);
1532 return -TARGET_EFAULT
;
1534 target_to_host_sigset(set
, target_set
);
1539 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1540 set
, SIGSET_T_SIZE
));
1542 if (!is_error(ret
) && arg3
) {
1544 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1545 return -TARGET_EFAULT
;
1548 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1549 return -TARGET_EFAULT
;
1554 unlock_user(target_set
, arg4
, 0);
1557 struct timespec ts
, *pts
;
1560 /* Convert ms to secs, ns */
1561 ts
.tv_sec
= arg3
/ 1000;
1562 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1565 /* -ve poll() timeout means "infinite" */
1568 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1571 if (!is_error(ret
)) {
1572 for (i
= 0; i
< nfds
; i
++) {
1573 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1576 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1581 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1584 return pipe2(host_pipe
, flags
);
1590 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1591 int flags
, int is_pipe2
)
1595 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1598 return get_errno(ret
);
1600 /* Several targets have special calling conventions for the original
1601 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1603 #if defined(TARGET_ALPHA)
1604 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1605 return host_pipe
[0];
1606 #elif defined(TARGET_MIPS)
1607 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1608 return host_pipe
[0];
1609 #elif defined(TARGET_SH4)
1610 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1611 return host_pipe
[0];
1612 #elif defined(TARGET_SPARC)
1613 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1614 return host_pipe
[0];
1618 if (put_user_s32(host_pipe
[0], pipedes
)
1619 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1620 return -TARGET_EFAULT
;
1621 return get_errno(ret
);
1624 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1625 abi_ulong target_addr
,
1628 struct target_ip_mreqn
*target_smreqn
;
1630 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1632 return -TARGET_EFAULT
;
1633 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1634 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1635 if (len
== sizeof(struct target_ip_mreqn
))
1636 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1637 unlock_user(target_smreqn
, target_addr
, 0);
1642 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1643 abi_ulong target_addr
,
1646 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1647 sa_family_t sa_family
;
1648 struct target_sockaddr
*target_saddr
;
1650 if (fd_trans_target_to_host_addr(fd
)) {
1651 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1654 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1656 return -TARGET_EFAULT
;
1658 sa_family
= tswap16(target_saddr
->sa_family
);
1660 /* Oops. The caller might send a incomplete sun_path; sun_path
1661 * must be terminated by \0 (see the manual page), but
1662 * unfortunately it is quite common to specify sockaddr_un
1663 * length as "strlen(x->sun_path)" while it should be
1664 * "strlen(...) + 1". We'll fix that here if needed.
1665 * Linux kernel has a similar feature.
1668 if (sa_family
== AF_UNIX
) {
1669 if (len
< unix_maxlen
&& len
> 0) {
1670 char *cp
= (char*)target_saddr
;
1672 if ( cp
[len
-1] && !cp
[len
] )
1675 if (len
> unix_maxlen
)
1679 memcpy(addr
, target_saddr
, len
);
1680 addr
->sa_family
= sa_family
;
1681 if (sa_family
== AF_NETLINK
) {
1682 struct sockaddr_nl
*nladdr
;
1684 nladdr
= (struct sockaddr_nl
*)addr
;
1685 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1686 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1687 } else if (sa_family
== AF_PACKET
) {
1688 struct target_sockaddr_ll
*lladdr
;
1690 lladdr
= (struct target_sockaddr_ll
*)addr
;
1691 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1692 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1694 unlock_user(target_saddr
, target_addr
, 0);
1699 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1700 struct sockaddr
*addr
,
1703 struct target_sockaddr
*target_saddr
;
1710 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1712 return -TARGET_EFAULT
;
1713 memcpy(target_saddr
, addr
, len
);
1714 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1715 sizeof(target_saddr
->sa_family
)) {
1716 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1718 if (addr
->sa_family
== AF_NETLINK
&&
1719 len
>= sizeof(struct target_sockaddr_nl
)) {
1720 struct target_sockaddr_nl
*target_nl
=
1721 (struct target_sockaddr_nl
*)target_saddr
;
1722 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1723 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1724 } else if (addr
->sa_family
== AF_PACKET
) {
1725 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1726 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1727 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1728 } else if (addr
->sa_family
== AF_INET6
&&
1729 len
>= sizeof(struct target_sockaddr_in6
)) {
1730 struct target_sockaddr_in6
*target_in6
=
1731 (struct target_sockaddr_in6
*)target_saddr
;
1732 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1734 unlock_user(target_saddr
, target_addr
, len
);
1739 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1740 struct target_msghdr
*target_msgh
)
1742 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1743 abi_long msg_controllen
;
1744 abi_ulong target_cmsg_addr
;
1745 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1746 socklen_t space
= 0;
1748 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1749 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1751 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1752 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1753 target_cmsg_start
= target_cmsg
;
1755 return -TARGET_EFAULT
;
1757 while (cmsg
&& target_cmsg
) {
1758 void *data
= CMSG_DATA(cmsg
);
1759 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1761 int len
= tswapal(target_cmsg
->cmsg_len
)
1762 - sizeof(struct target_cmsghdr
);
1764 space
+= CMSG_SPACE(len
);
1765 if (space
> msgh
->msg_controllen
) {
1766 space
-= CMSG_SPACE(len
);
1767 /* This is a QEMU bug, since we allocated the payload
1768 * area ourselves (unlike overflow in host-to-target
1769 * conversion, which is just the guest giving us a buffer
1770 * that's too small). It can't happen for the payload types
1771 * we currently support; if it becomes an issue in future
1772 * we would need to improve our allocation strategy to
1773 * something more intelligent than "twice the size of the
1774 * target buffer we're reading from".
1776 qemu_log_mask(LOG_UNIMP
,
1777 ("Unsupported ancillary data %d/%d: "
1778 "unhandled msg size\n"),
1779 tswap32(target_cmsg
->cmsg_level
),
1780 tswap32(target_cmsg
->cmsg_type
));
1784 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1785 cmsg
->cmsg_level
= SOL_SOCKET
;
1787 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1789 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1790 cmsg
->cmsg_len
= CMSG_LEN(len
);
1792 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1793 int *fd
= (int *)data
;
1794 int *target_fd
= (int *)target_data
;
1795 int i
, numfds
= len
/ sizeof(int);
1797 for (i
= 0; i
< numfds
; i
++) {
1798 __get_user(fd
[i
], target_fd
+ i
);
1800 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1801 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1802 struct ucred
*cred
= (struct ucred
*)data
;
1803 struct target_ucred
*target_cred
=
1804 (struct target_ucred
*)target_data
;
1806 __get_user(cred
->pid
, &target_cred
->pid
);
1807 __get_user(cred
->uid
, &target_cred
->uid
);
1808 __get_user(cred
->gid
, &target_cred
->gid
);
1810 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1811 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1812 memcpy(data
, target_data
, len
);
1815 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1816 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1819 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1821 msgh
->msg_controllen
= space
;
1825 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1826 struct msghdr
*msgh
)
1828 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1829 abi_long msg_controllen
;
1830 abi_ulong target_cmsg_addr
;
1831 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1832 socklen_t space
= 0;
1834 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1835 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1837 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1838 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1839 target_cmsg_start
= target_cmsg
;
1841 return -TARGET_EFAULT
;
1843 while (cmsg
&& target_cmsg
) {
1844 void *data
= CMSG_DATA(cmsg
);
1845 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1847 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1848 int tgt_len
, tgt_space
;
1850 /* We never copy a half-header but may copy half-data;
1851 * this is Linux's behaviour in put_cmsg(). Note that
1852 * truncation here is a guest problem (which we report
1853 * to the guest via the CTRUNC bit), unlike truncation
1854 * in target_to_host_cmsg, which is a QEMU bug.
1856 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1857 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1861 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1862 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1864 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1866 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1868 /* Payload types which need a different size of payload on
1869 * the target must adjust tgt_len here.
1872 switch (cmsg
->cmsg_level
) {
1874 switch (cmsg
->cmsg_type
) {
1876 tgt_len
= sizeof(struct target_timeval
);
1886 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1887 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1888 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1891 /* We must now copy-and-convert len bytes of payload
1892 * into tgt_len bytes of destination space. Bear in mind
1893 * that in both source and destination we may be dealing
1894 * with a truncated value!
1896 switch (cmsg
->cmsg_level
) {
1898 switch (cmsg
->cmsg_type
) {
1901 int *fd
= (int *)data
;
1902 int *target_fd
= (int *)target_data
;
1903 int i
, numfds
= tgt_len
/ sizeof(int);
1905 for (i
= 0; i
< numfds
; i
++) {
1906 __put_user(fd
[i
], target_fd
+ i
);
1912 struct timeval
*tv
= (struct timeval
*)data
;
1913 struct target_timeval
*target_tv
=
1914 (struct target_timeval
*)target_data
;
1916 if (len
!= sizeof(struct timeval
) ||
1917 tgt_len
!= sizeof(struct target_timeval
)) {
1921 /* copy struct timeval to target */
1922 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1923 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1926 case SCM_CREDENTIALS
:
1928 struct ucred
*cred
= (struct ucred
*)data
;
1929 struct target_ucred
*target_cred
=
1930 (struct target_ucred
*)target_data
;
1932 __put_user(cred
->pid
, &target_cred
->pid
);
1933 __put_user(cred
->uid
, &target_cred
->uid
);
1934 __put_user(cred
->gid
, &target_cred
->gid
);
1943 switch (cmsg
->cmsg_type
) {
1946 uint32_t *v
= (uint32_t *)data
;
1947 uint32_t *t_int
= (uint32_t *)target_data
;
1949 if (len
!= sizeof(uint32_t) ||
1950 tgt_len
!= sizeof(uint32_t)) {
1953 __put_user(*v
, t_int
);
1959 struct sock_extended_err ee
;
1960 struct sockaddr_in offender
;
1962 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1963 struct errhdr_t
*target_errh
=
1964 (struct errhdr_t
*)target_data
;
1966 if (len
!= sizeof(struct errhdr_t
) ||
1967 tgt_len
!= sizeof(struct errhdr_t
)) {
1970 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1971 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1972 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1973 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1974 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1975 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1976 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1977 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1978 (void *) &errh
->offender
, sizeof(errh
->offender
));
1987 switch (cmsg
->cmsg_type
) {
1990 uint32_t *v
= (uint32_t *)data
;
1991 uint32_t *t_int
= (uint32_t *)target_data
;
1993 if (len
!= sizeof(uint32_t) ||
1994 tgt_len
!= sizeof(uint32_t)) {
1997 __put_user(*v
, t_int
);
2003 struct sock_extended_err ee
;
2004 struct sockaddr_in6 offender
;
2006 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2007 struct errhdr6_t
*target_errh
=
2008 (struct errhdr6_t
*)target_data
;
2010 if (len
!= sizeof(struct errhdr6_t
) ||
2011 tgt_len
!= sizeof(struct errhdr6_t
)) {
2014 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2015 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2016 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2017 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2018 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2019 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2020 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2021 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2022 (void *) &errh
->offender
, sizeof(errh
->offender
));
2032 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2033 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2034 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2035 if (tgt_len
> len
) {
2036 memset(target_data
+ len
, 0, tgt_len
- len
);
2040 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2041 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2042 if (msg_controllen
< tgt_space
) {
2043 tgt_space
= msg_controllen
;
2045 msg_controllen
-= tgt_space
;
2047 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2048 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2051 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2053 target_msgh
->msg_controllen
= tswapal(space
);
2057 /* do_setsockopt() Must return target values and target errnos. */
2058 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2059 abi_ulong optval_addr
, socklen_t optlen
)
2063 struct ip_mreqn
*ip_mreq
;
2064 struct ip_mreq_source
*ip_mreq_source
;
2069 /* TCP and UDP options all take an 'int' value. */
2070 if (optlen
< sizeof(uint32_t))
2071 return -TARGET_EINVAL
;
2073 if (get_user_u32(val
, optval_addr
))
2074 return -TARGET_EFAULT
;
2075 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2082 case IP_ROUTER_ALERT
:
2086 case IP_MTU_DISCOVER
:
2093 case IP_MULTICAST_TTL
:
2094 case IP_MULTICAST_LOOP
:
2096 if (optlen
>= sizeof(uint32_t)) {
2097 if (get_user_u32(val
, optval_addr
))
2098 return -TARGET_EFAULT
;
2099 } else if (optlen
>= 1) {
2100 if (get_user_u8(val
, optval_addr
))
2101 return -TARGET_EFAULT
;
2103 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2105 case IP_ADD_MEMBERSHIP
:
2106 case IP_DROP_MEMBERSHIP
:
2107 if (optlen
< sizeof (struct target_ip_mreq
) ||
2108 optlen
> sizeof (struct target_ip_mreqn
))
2109 return -TARGET_EINVAL
;
2111 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2112 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2113 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2116 case IP_BLOCK_SOURCE
:
2117 case IP_UNBLOCK_SOURCE
:
2118 case IP_ADD_SOURCE_MEMBERSHIP
:
2119 case IP_DROP_SOURCE_MEMBERSHIP
:
2120 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2121 return -TARGET_EINVAL
;
2123 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2124 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2125 unlock_user (ip_mreq_source
, optval_addr
, 0);
2134 case IPV6_MTU_DISCOVER
:
2137 case IPV6_RECVPKTINFO
:
2138 case IPV6_UNICAST_HOPS
:
2139 case IPV6_MULTICAST_HOPS
:
2140 case IPV6_MULTICAST_LOOP
:
2142 case IPV6_RECVHOPLIMIT
:
2143 case IPV6_2292HOPLIMIT
:
2146 case IPV6_2292PKTINFO
:
2147 case IPV6_RECVTCLASS
:
2148 case IPV6_RECVRTHDR
:
2149 case IPV6_2292RTHDR
:
2150 case IPV6_RECVHOPOPTS
:
2151 case IPV6_2292HOPOPTS
:
2152 case IPV6_RECVDSTOPTS
:
2153 case IPV6_2292DSTOPTS
:
2155 case IPV6_ADDR_PREFERENCES
:
2156 #ifdef IPV6_RECVPATHMTU
2157 case IPV6_RECVPATHMTU
:
2159 #ifdef IPV6_TRANSPARENT
2160 case IPV6_TRANSPARENT
:
2162 #ifdef IPV6_FREEBIND
2165 #ifdef IPV6_RECVORIGDSTADDR
2166 case IPV6_RECVORIGDSTADDR
:
2169 if (optlen
< sizeof(uint32_t)) {
2170 return -TARGET_EINVAL
;
2172 if (get_user_u32(val
, optval_addr
)) {
2173 return -TARGET_EFAULT
;
2175 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2176 &val
, sizeof(val
)));
2180 struct in6_pktinfo pki
;
2182 if (optlen
< sizeof(pki
)) {
2183 return -TARGET_EINVAL
;
2186 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2187 return -TARGET_EFAULT
;
2190 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2192 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2193 &pki
, sizeof(pki
)));
2196 case IPV6_ADD_MEMBERSHIP
:
2197 case IPV6_DROP_MEMBERSHIP
:
2199 struct ipv6_mreq ipv6mreq
;
2201 if (optlen
< sizeof(ipv6mreq
)) {
2202 return -TARGET_EINVAL
;
2205 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2206 return -TARGET_EFAULT
;
2209 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2211 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2212 &ipv6mreq
, sizeof(ipv6mreq
)));
2223 struct icmp6_filter icmp6f
;
2225 if (optlen
> sizeof(icmp6f
)) {
2226 optlen
= sizeof(icmp6f
);
2229 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2230 return -TARGET_EFAULT
;
2233 for (val
= 0; val
< 8; val
++) {
2234 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2237 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2249 /* those take an u32 value */
2250 if (optlen
< sizeof(uint32_t)) {
2251 return -TARGET_EINVAL
;
2254 if (get_user_u32(val
, optval_addr
)) {
2255 return -TARGET_EFAULT
;
2257 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2258 &val
, sizeof(val
)));
2265 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2270 char *alg_key
= g_malloc(optlen
);
2273 return -TARGET_ENOMEM
;
2275 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2277 return -TARGET_EFAULT
;
2279 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2284 case ALG_SET_AEAD_AUTHSIZE
:
2286 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2295 case TARGET_SOL_SOCKET
:
2297 case TARGET_SO_RCVTIMEO
:
2301 optname
= SO_RCVTIMEO
;
2304 if (optlen
!= sizeof(struct target_timeval
)) {
2305 return -TARGET_EINVAL
;
2308 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2309 return -TARGET_EFAULT
;
2312 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2316 case TARGET_SO_SNDTIMEO
:
2317 optname
= SO_SNDTIMEO
;
2319 case TARGET_SO_ATTACH_FILTER
:
2321 struct target_sock_fprog
*tfprog
;
2322 struct target_sock_filter
*tfilter
;
2323 struct sock_fprog fprog
;
2324 struct sock_filter
*filter
;
2327 if (optlen
!= sizeof(*tfprog
)) {
2328 return -TARGET_EINVAL
;
2330 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2331 return -TARGET_EFAULT
;
2333 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2334 tswapal(tfprog
->filter
), 0)) {
2335 unlock_user_struct(tfprog
, optval_addr
, 1);
2336 return -TARGET_EFAULT
;
2339 fprog
.len
= tswap16(tfprog
->len
);
2340 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2341 if (filter
== NULL
) {
2342 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2343 unlock_user_struct(tfprog
, optval_addr
, 1);
2344 return -TARGET_ENOMEM
;
2346 for (i
= 0; i
< fprog
.len
; i
++) {
2347 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2348 filter
[i
].jt
= tfilter
[i
].jt
;
2349 filter
[i
].jf
= tfilter
[i
].jf
;
2350 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2352 fprog
.filter
= filter
;
2354 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2355 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2358 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2359 unlock_user_struct(tfprog
, optval_addr
, 1);
2362 case TARGET_SO_BINDTODEVICE
:
2364 char *dev_ifname
, *addr_ifname
;
2366 if (optlen
> IFNAMSIZ
- 1) {
2367 optlen
= IFNAMSIZ
- 1;
2369 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2371 return -TARGET_EFAULT
;
2373 optname
= SO_BINDTODEVICE
;
2374 addr_ifname
= alloca(IFNAMSIZ
);
2375 memcpy(addr_ifname
, dev_ifname
, optlen
);
2376 addr_ifname
[optlen
] = 0;
2377 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2378 addr_ifname
, optlen
));
2379 unlock_user (dev_ifname
, optval_addr
, 0);
2382 case TARGET_SO_LINGER
:
2385 struct target_linger
*tlg
;
2387 if (optlen
!= sizeof(struct target_linger
)) {
2388 return -TARGET_EINVAL
;
2390 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2391 return -TARGET_EFAULT
;
2393 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2394 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2395 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2397 unlock_user_struct(tlg
, optval_addr
, 0);
2400 /* Options with 'int' argument. */
2401 case TARGET_SO_DEBUG
:
2404 case TARGET_SO_REUSEADDR
:
2405 optname
= SO_REUSEADDR
;
2408 case TARGET_SO_REUSEPORT
:
2409 optname
= SO_REUSEPORT
;
2412 case TARGET_SO_TYPE
:
2415 case TARGET_SO_ERROR
:
2418 case TARGET_SO_DONTROUTE
:
2419 optname
= SO_DONTROUTE
;
2421 case TARGET_SO_BROADCAST
:
2422 optname
= SO_BROADCAST
;
2424 case TARGET_SO_SNDBUF
:
2425 optname
= SO_SNDBUF
;
2427 case TARGET_SO_SNDBUFFORCE
:
2428 optname
= SO_SNDBUFFORCE
;
2430 case TARGET_SO_RCVBUF
:
2431 optname
= SO_RCVBUF
;
2433 case TARGET_SO_RCVBUFFORCE
:
2434 optname
= SO_RCVBUFFORCE
;
2436 case TARGET_SO_KEEPALIVE
:
2437 optname
= SO_KEEPALIVE
;
2439 case TARGET_SO_OOBINLINE
:
2440 optname
= SO_OOBINLINE
;
2442 case TARGET_SO_NO_CHECK
:
2443 optname
= SO_NO_CHECK
;
2445 case TARGET_SO_PRIORITY
:
2446 optname
= SO_PRIORITY
;
2449 case TARGET_SO_BSDCOMPAT
:
2450 optname
= SO_BSDCOMPAT
;
2453 case TARGET_SO_PASSCRED
:
2454 optname
= SO_PASSCRED
;
2456 case TARGET_SO_PASSSEC
:
2457 optname
= SO_PASSSEC
;
2459 case TARGET_SO_TIMESTAMP
:
2460 optname
= SO_TIMESTAMP
;
2462 case TARGET_SO_RCVLOWAT
:
2463 optname
= SO_RCVLOWAT
;
2468 if (optlen
< sizeof(uint32_t))
2469 return -TARGET_EINVAL
;
2471 if (get_user_u32(val
, optval_addr
))
2472 return -TARGET_EFAULT
;
2473 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2478 case NETLINK_PKTINFO
:
2479 case NETLINK_ADD_MEMBERSHIP
:
2480 case NETLINK_DROP_MEMBERSHIP
:
2481 case NETLINK_BROADCAST_ERROR
:
2482 case NETLINK_NO_ENOBUFS
:
2483 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2484 case NETLINK_LISTEN_ALL_NSID
:
2485 case NETLINK_CAP_ACK
:
2486 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2487 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2488 case NETLINK_EXT_ACK
:
2489 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2490 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2491 case NETLINK_GET_STRICT_CHK
:
2492 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2498 if (optlen
< sizeof(uint32_t)) {
2499 return -TARGET_EINVAL
;
2501 if (get_user_u32(val
, optval_addr
)) {
2502 return -TARGET_EFAULT
;
2504 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2507 #endif /* SOL_NETLINK */
2510 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2512 ret
= -TARGET_ENOPROTOOPT
;
2517 /* do_getsockopt() Must return target values and target errnos. */
2518 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2519 abi_ulong optval_addr
, abi_ulong optlen
)
2526 case TARGET_SOL_SOCKET
:
2529 /* These don't just return a single integer */
2530 case TARGET_SO_PEERNAME
:
2532 case TARGET_SO_RCVTIMEO
: {
2536 optname
= SO_RCVTIMEO
;
2539 if (get_user_u32(len
, optlen
)) {
2540 return -TARGET_EFAULT
;
2543 return -TARGET_EINVAL
;
2547 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2552 if (len
> sizeof(struct target_timeval
)) {
2553 len
= sizeof(struct target_timeval
);
2555 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2556 return -TARGET_EFAULT
;
2558 if (put_user_u32(len
, optlen
)) {
2559 return -TARGET_EFAULT
;
2563 case TARGET_SO_SNDTIMEO
:
2564 optname
= SO_SNDTIMEO
;
2566 case TARGET_SO_PEERCRED
: {
2569 struct target_ucred
*tcr
;
2571 if (get_user_u32(len
, optlen
)) {
2572 return -TARGET_EFAULT
;
2575 return -TARGET_EINVAL
;
2579 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2587 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2588 return -TARGET_EFAULT
;
2590 __put_user(cr
.pid
, &tcr
->pid
);
2591 __put_user(cr
.uid
, &tcr
->uid
);
2592 __put_user(cr
.gid
, &tcr
->gid
);
2593 unlock_user_struct(tcr
, optval_addr
, 1);
2594 if (put_user_u32(len
, optlen
)) {
2595 return -TARGET_EFAULT
;
2599 case TARGET_SO_PEERSEC
: {
2602 if (get_user_u32(len
, optlen
)) {
2603 return -TARGET_EFAULT
;
2606 return -TARGET_EINVAL
;
2608 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2610 return -TARGET_EFAULT
;
2613 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2615 if (put_user_u32(lv
, optlen
)) {
2616 ret
= -TARGET_EFAULT
;
2618 unlock_user(name
, optval_addr
, lv
);
2621 case TARGET_SO_LINGER
:
2625 struct target_linger
*tlg
;
2627 if (get_user_u32(len
, optlen
)) {
2628 return -TARGET_EFAULT
;
2631 return -TARGET_EINVAL
;
2635 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2643 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2644 return -TARGET_EFAULT
;
2646 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2647 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2648 unlock_user_struct(tlg
, optval_addr
, 1);
2649 if (put_user_u32(len
, optlen
)) {
2650 return -TARGET_EFAULT
;
2654 /* Options with 'int' argument. */
2655 case TARGET_SO_DEBUG
:
2658 case TARGET_SO_REUSEADDR
:
2659 optname
= SO_REUSEADDR
;
2662 case TARGET_SO_REUSEPORT
:
2663 optname
= SO_REUSEPORT
;
2666 case TARGET_SO_TYPE
:
2669 case TARGET_SO_ERROR
:
2672 case TARGET_SO_DONTROUTE
:
2673 optname
= SO_DONTROUTE
;
2675 case TARGET_SO_BROADCAST
:
2676 optname
= SO_BROADCAST
;
2678 case TARGET_SO_SNDBUF
:
2679 optname
= SO_SNDBUF
;
2681 case TARGET_SO_RCVBUF
:
2682 optname
= SO_RCVBUF
;
2684 case TARGET_SO_KEEPALIVE
:
2685 optname
= SO_KEEPALIVE
;
2687 case TARGET_SO_OOBINLINE
:
2688 optname
= SO_OOBINLINE
;
2690 case TARGET_SO_NO_CHECK
:
2691 optname
= SO_NO_CHECK
;
2693 case TARGET_SO_PRIORITY
:
2694 optname
= SO_PRIORITY
;
2697 case TARGET_SO_BSDCOMPAT
:
2698 optname
= SO_BSDCOMPAT
;
2701 case TARGET_SO_PASSCRED
:
2702 optname
= SO_PASSCRED
;
2704 case TARGET_SO_TIMESTAMP
:
2705 optname
= SO_TIMESTAMP
;
2707 case TARGET_SO_RCVLOWAT
:
2708 optname
= SO_RCVLOWAT
;
2710 case TARGET_SO_ACCEPTCONN
:
2711 optname
= SO_ACCEPTCONN
;
2713 case TARGET_SO_PROTOCOL
:
2714 optname
= SO_PROTOCOL
;
2716 case TARGET_SO_DOMAIN
:
2717 optname
= SO_DOMAIN
;
2725 /* TCP and UDP options all take an 'int' value. */
2727 if (get_user_u32(len
, optlen
))
2728 return -TARGET_EFAULT
;
2730 return -TARGET_EINVAL
;
2732 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2735 if (optname
== SO_TYPE
) {
2736 val
= host_to_target_sock_type(val
);
2741 if (put_user_u32(val
, optval_addr
))
2742 return -TARGET_EFAULT
;
2744 if (put_user_u8(val
, optval_addr
))
2745 return -TARGET_EFAULT
;
2747 if (put_user_u32(len
, optlen
))
2748 return -TARGET_EFAULT
;
2755 case IP_ROUTER_ALERT
:
2759 case IP_MTU_DISCOVER
:
2765 case IP_MULTICAST_TTL
:
2766 case IP_MULTICAST_LOOP
:
2767 if (get_user_u32(len
, optlen
))
2768 return -TARGET_EFAULT
;
2770 return -TARGET_EINVAL
;
2772 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2775 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2777 if (put_user_u32(len
, optlen
)
2778 || put_user_u8(val
, optval_addr
))
2779 return -TARGET_EFAULT
;
2781 if (len
> sizeof(int))
2783 if (put_user_u32(len
, optlen
)
2784 || put_user_u32(val
, optval_addr
))
2785 return -TARGET_EFAULT
;
2789 ret
= -TARGET_ENOPROTOOPT
;
2795 case IPV6_MTU_DISCOVER
:
2798 case IPV6_RECVPKTINFO
:
2799 case IPV6_UNICAST_HOPS
:
2800 case IPV6_MULTICAST_HOPS
:
2801 case IPV6_MULTICAST_LOOP
:
2803 case IPV6_RECVHOPLIMIT
:
2804 case IPV6_2292HOPLIMIT
:
2807 case IPV6_2292PKTINFO
:
2808 case IPV6_RECVTCLASS
:
2809 case IPV6_RECVRTHDR
:
2810 case IPV6_2292RTHDR
:
2811 case IPV6_RECVHOPOPTS
:
2812 case IPV6_2292HOPOPTS
:
2813 case IPV6_RECVDSTOPTS
:
2814 case IPV6_2292DSTOPTS
:
2816 case IPV6_ADDR_PREFERENCES
:
2817 #ifdef IPV6_RECVPATHMTU
2818 case IPV6_RECVPATHMTU
:
2820 #ifdef IPV6_TRANSPARENT
2821 case IPV6_TRANSPARENT
:
2823 #ifdef IPV6_FREEBIND
2826 #ifdef IPV6_RECVORIGDSTADDR
2827 case IPV6_RECVORIGDSTADDR
:
2829 if (get_user_u32(len
, optlen
))
2830 return -TARGET_EFAULT
;
2832 return -TARGET_EINVAL
;
2834 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2837 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2839 if (put_user_u32(len
, optlen
)
2840 || put_user_u8(val
, optval_addr
))
2841 return -TARGET_EFAULT
;
2843 if (len
> sizeof(int))
2845 if (put_user_u32(len
, optlen
)
2846 || put_user_u32(val
, optval_addr
))
2847 return -TARGET_EFAULT
;
2851 ret
= -TARGET_ENOPROTOOPT
;
2858 case NETLINK_PKTINFO
:
2859 case NETLINK_BROADCAST_ERROR
:
2860 case NETLINK_NO_ENOBUFS
:
2861 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2862 case NETLINK_LISTEN_ALL_NSID
:
2863 case NETLINK_CAP_ACK
:
2864 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2865 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2866 case NETLINK_EXT_ACK
:
2867 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2868 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2869 case NETLINK_GET_STRICT_CHK
:
2870 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2871 if (get_user_u32(len
, optlen
)) {
2872 return -TARGET_EFAULT
;
2874 if (len
!= sizeof(val
)) {
2875 return -TARGET_EINVAL
;
2878 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2882 if (put_user_u32(lv
, optlen
)
2883 || put_user_u32(val
, optval_addr
)) {
2884 return -TARGET_EFAULT
;
2887 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2888 case NETLINK_LIST_MEMBERSHIPS
:
2892 if (get_user_u32(len
, optlen
)) {
2893 return -TARGET_EFAULT
;
2896 return -TARGET_EINVAL
;
2898 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2899 if (!results
&& len
> 0) {
2900 return -TARGET_EFAULT
;
2903 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2905 unlock_user(results
, optval_addr
, 0);
2908 /* swap host endianess to target endianess. */
2909 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2910 results
[i
] = tswap32(results
[i
]);
2912 if (put_user_u32(lv
, optlen
)) {
2913 return -TARGET_EFAULT
;
2915 unlock_user(results
, optval_addr
, 0);
2918 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2923 #endif /* SOL_NETLINK */
2926 qemu_log_mask(LOG_UNIMP
,
2927 "getsockopt level=%d optname=%d not yet supported\n",
2929 ret
= -TARGET_EOPNOTSUPP
;
2935 /* Convert target low/high pair representing file offset into the host
2936 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2937 * as the kernel doesn't handle them either.
2939 static void target_to_host_low_high(abi_ulong tlow
,
2941 unsigned long *hlow
,
2942 unsigned long *hhigh
)
2944 uint64_t off
= tlow
|
2945 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2946 TARGET_LONG_BITS
/ 2;
2949 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2952 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2953 abi_ulong count
, int copy
)
2955 struct target_iovec
*target_vec
;
2957 abi_ulong total_len
, max_len
;
2960 bool bad_address
= false;
2966 if (count
> IOV_MAX
) {
2971 vec
= g_try_new0(struct iovec
, count
);
2977 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2978 count
* sizeof(struct target_iovec
), 1);
2979 if (target_vec
== NULL
) {
2984 /* ??? If host page size > target page size, this will result in a
2985 value larger than what we can actually support. */
2986 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2989 for (i
= 0; i
< count
; i
++) {
2990 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2991 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2996 } else if (len
== 0) {
2997 /* Zero length pointer is ignored. */
2998 vec
[i
].iov_base
= 0;
3000 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3001 /* If the first buffer pointer is bad, this is a fault. But
3002 * subsequent bad buffers will result in a partial write; this
3003 * is realized by filling the vector with null pointers and
3005 if (!vec
[i
].iov_base
) {
3016 if (len
> max_len
- total_len
) {
3017 len
= max_len
- total_len
;
3020 vec
[i
].iov_len
= len
;
3024 unlock_user(target_vec
, target_addr
, 0);
3029 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3030 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3033 unlock_user(target_vec
, target_addr
, 0);
3040 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3041 abi_ulong count
, int copy
)
3043 struct target_iovec
*target_vec
;
3046 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3047 count
* sizeof(struct target_iovec
), 1);
3049 for (i
= 0; i
< count
; i
++) {
3050 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3051 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3055 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3057 unlock_user(target_vec
, target_addr
, 0);
3063 static inline int target_to_host_sock_type(int *type
)
3066 int target_type
= *type
;
3068 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3069 case TARGET_SOCK_DGRAM
:
3070 host_type
= SOCK_DGRAM
;
3072 case TARGET_SOCK_STREAM
:
3073 host_type
= SOCK_STREAM
;
3076 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3079 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3080 #if defined(SOCK_CLOEXEC)
3081 host_type
|= SOCK_CLOEXEC
;
3083 return -TARGET_EINVAL
;
3086 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3087 #if defined(SOCK_NONBLOCK)
3088 host_type
|= SOCK_NONBLOCK
;
3089 #elif !defined(O_NONBLOCK)
3090 return -TARGET_EINVAL
;
3097 /* Try to emulate socket type flags after socket creation. */
3098 static int sock_flags_fixup(int fd
, int target_type
)
3100 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3101 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3102 int flags
= fcntl(fd
, F_GETFL
);
3103 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3105 return -TARGET_EINVAL
;
3112 /* do_socket() Must return target values and target errnos. */
3113 static abi_long
do_socket(int domain
, int type
, int protocol
)
3115 int target_type
= type
;
3118 ret
= target_to_host_sock_type(&type
);
3123 if (domain
== PF_NETLINK
&& !(
3124 #ifdef CONFIG_RTNETLINK
3125 protocol
== NETLINK_ROUTE
||
3127 protocol
== NETLINK_KOBJECT_UEVENT
||
3128 protocol
== NETLINK_AUDIT
)) {
3129 return -TARGET_EPROTONOSUPPORT
;
3132 if (domain
== AF_PACKET
||
3133 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3134 protocol
= tswap16(protocol
);
3137 ret
= get_errno(socket(domain
, type
, protocol
));
3139 ret
= sock_flags_fixup(ret
, target_type
);
3140 if (type
== SOCK_PACKET
) {
3141 /* Manage an obsolete case :
3142 * if socket type is SOCK_PACKET, bind by name
3144 fd_trans_register(ret
, &target_packet_trans
);
3145 } else if (domain
== PF_NETLINK
) {
3147 #ifdef CONFIG_RTNETLINK
3149 fd_trans_register(ret
, &target_netlink_route_trans
);
3152 case NETLINK_KOBJECT_UEVENT
:
3153 /* nothing to do: messages are strings */
3156 fd_trans_register(ret
, &target_netlink_audit_trans
);
3159 g_assert_not_reached();
3166 /* do_bind() Must return target values and target errnos. */
3167 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3173 if ((int)addrlen
< 0) {
3174 return -TARGET_EINVAL
;
3177 addr
= alloca(addrlen
+1);
3179 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3183 return get_errno(bind(sockfd
, addr
, addrlen
));
3186 /* do_connect() Must return target values and target errnos. */
3187 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3193 if ((int)addrlen
< 0) {
3194 return -TARGET_EINVAL
;
3197 addr
= alloca(addrlen
+1);
3199 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3203 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3206 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3207 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3208 int flags
, int send
)
3214 abi_ulong target_vec
;
3216 if (msgp
->msg_name
) {
3217 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3218 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3219 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3220 tswapal(msgp
->msg_name
),
3222 if (ret
== -TARGET_EFAULT
) {
3223 /* For connected sockets msg_name and msg_namelen must
3224 * be ignored, so returning EFAULT immediately is wrong.
3225 * Instead, pass a bad msg_name to the host kernel, and
3226 * let it decide whether to return EFAULT or not.
3228 msg
.msg_name
= (void *)-1;
3233 msg
.msg_name
= NULL
;
3234 msg
.msg_namelen
= 0;
3236 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3237 msg
.msg_control
= alloca(msg
.msg_controllen
);
3238 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3240 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3242 count
= tswapal(msgp
->msg_iovlen
);
3243 target_vec
= tswapal(msgp
->msg_iov
);
3245 if (count
> IOV_MAX
) {
3246 /* sendrcvmsg returns a different errno for this condition than
3247 * readv/writev, so we must catch it here before lock_iovec() does.
3249 ret
= -TARGET_EMSGSIZE
;
3253 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3254 target_vec
, count
, send
);
3256 ret
= -host_to_target_errno(errno
);
3259 msg
.msg_iovlen
= count
;
3263 if (fd_trans_target_to_host_data(fd
)) {
3266 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3267 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3268 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3269 msg
.msg_iov
->iov_len
);
3271 msg
.msg_iov
->iov_base
= host_msg
;
3272 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3276 ret
= target_to_host_cmsg(&msg
, msgp
);
3278 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3282 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3283 if (!is_error(ret
)) {
3285 if (fd_trans_host_to_target_data(fd
)) {
3286 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3287 MIN(msg
.msg_iov
->iov_len
, len
));
3289 ret
= host_to_target_cmsg(msgp
, &msg
);
3291 if (!is_error(ret
)) {
3292 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3293 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3294 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3295 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3296 msg
.msg_name
, msg
.msg_namelen
);
3308 unlock_iovec(vec
, target_vec
, count
, !send
);
3313 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3314 int flags
, int send
)
3317 struct target_msghdr
*msgp
;
3319 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3323 return -TARGET_EFAULT
;
3325 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3326 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3330 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3331 * so it might not have this *mmsg-specific flag either.
3333 #ifndef MSG_WAITFORONE
3334 #define MSG_WAITFORONE 0x10000
3337 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3338 unsigned int vlen
, unsigned int flags
,
3341 struct target_mmsghdr
*mmsgp
;
3345 if (vlen
> UIO_MAXIOV
) {
3349 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3351 return -TARGET_EFAULT
;
3354 for (i
= 0; i
< vlen
; i
++) {
3355 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3356 if (is_error(ret
)) {
3359 mmsgp
[i
].msg_len
= tswap32(ret
);
3360 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3361 if (flags
& MSG_WAITFORONE
) {
3362 flags
|= MSG_DONTWAIT
;
3366 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3368 /* Return number of datagrams sent if we sent any at all;
3369 * otherwise return the error.
3377 /* do_accept4() Must return target values and target errnos. */
3378 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3379 abi_ulong target_addrlen_addr
, int flags
)
3381 socklen_t addrlen
, ret_addrlen
;
3386 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3388 if (target_addr
== 0) {
3389 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3392 /* linux returns EFAULT if addrlen pointer is invalid */
3393 if (get_user_u32(addrlen
, target_addrlen_addr
))
3394 return -TARGET_EFAULT
;
3396 if ((int)addrlen
< 0) {
3397 return -TARGET_EINVAL
;
3400 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3401 return -TARGET_EFAULT
;
3404 addr
= alloca(addrlen
);
3406 ret_addrlen
= addrlen
;
3407 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3408 if (!is_error(ret
)) {
3409 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3410 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3411 ret
= -TARGET_EFAULT
;
3417 /* do_getpeername() Must return target values and target errnos. */
3418 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3419 abi_ulong target_addrlen_addr
)
3421 socklen_t addrlen
, ret_addrlen
;
3425 if (get_user_u32(addrlen
, target_addrlen_addr
))
3426 return -TARGET_EFAULT
;
3428 if ((int)addrlen
< 0) {
3429 return -TARGET_EINVAL
;
3432 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3433 return -TARGET_EFAULT
;
3436 addr
= alloca(addrlen
);
3438 ret_addrlen
= addrlen
;
3439 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3440 if (!is_error(ret
)) {
3441 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3442 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3443 ret
= -TARGET_EFAULT
;
3449 /* do_getsockname() Must return target values and target errnos. */
3450 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3451 abi_ulong target_addrlen_addr
)
3453 socklen_t addrlen
, ret_addrlen
;
3457 if (get_user_u32(addrlen
, target_addrlen_addr
))
3458 return -TARGET_EFAULT
;
3460 if ((int)addrlen
< 0) {
3461 return -TARGET_EINVAL
;
3464 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3465 return -TARGET_EFAULT
;
3468 addr
= alloca(addrlen
);
3470 ret_addrlen
= addrlen
;
3471 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3472 if (!is_error(ret
)) {
3473 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3474 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3475 ret
= -TARGET_EFAULT
;
3481 /* do_socketpair() Must return target values and target errnos. */
3482 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3483 abi_ulong target_tab_addr
)
3488 target_to_host_sock_type(&type
);
3490 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3491 if (!is_error(ret
)) {
3492 if (put_user_s32(tab
[0], target_tab_addr
)
3493 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3494 ret
= -TARGET_EFAULT
;
3499 /* do_sendto() Must return target values and target errnos. */
3500 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3501 abi_ulong target_addr
, socklen_t addrlen
)
3505 void *copy_msg
= NULL
;
3508 if ((int)addrlen
< 0) {
3509 return -TARGET_EINVAL
;
3512 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3514 return -TARGET_EFAULT
;
3515 if (fd_trans_target_to_host_data(fd
)) {
3516 copy_msg
= host_msg
;
3517 host_msg
= g_malloc(len
);
3518 memcpy(host_msg
, copy_msg
, len
);
3519 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3525 addr
= alloca(addrlen
+1);
3526 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3530 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3532 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3537 host_msg
= copy_msg
;
3539 unlock_user(host_msg
, msg
, 0);
3543 /* do_recvfrom() Must return target values and target errnos. */
3544 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3545 abi_ulong target_addr
,
3546 abi_ulong target_addrlen
)
3548 socklen_t addrlen
, ret_addrlen
;
3556 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3558 return -TARGET_EFAULT
;
3562 if (get_user_u32(addrlen
, target_addrlen
)) {
3563 ret
= -TARGET_EFAULT
;
3566 if ((int)addrlen
< 0) {
3567 ret
= -TARGET_EINVAL
;
3570 addr
= alloca(addrlen
);
3571 ret_addrlen
= addrlen
;
3572 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3573 addr
, &ret_addrlen
));
3575 addr
= NULL
; /* To keep compiler quiet. */
3576 addrlen
= 0; /* To keep compiler quiet. */
3577 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3579 if (!is_error(ret
)) {
3580 if (fd_trans_host_to_target_data(fd
)) {
3582 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3583 if (is_error(trans
)) {
3589 host_to_target_sockaddr(target_addr
, addr
,
3590 MIN(addrlen
, ret_addrlen
));
3591 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3592 ret
= -TARGET_EFAULT
;
3596 unlock_user(host_msg
, msg
, len
);
3599 unlock_user(host_msg
, msg
, 0);
3604 #ifdef TARGET_NR_socketcall
3605 /* do_socketcall() must return target values and target errnos. */
3606 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3608 static const unsigned nargs
[] = { /* number of arguments per operation */
3609 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3610 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3611 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3612 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3613 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3614 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3615 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3616 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3617 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3618 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3619 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3620 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3621 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3622 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3623 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3624 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3625 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3626 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3627 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3628 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3630 abi_long a
[6]; /* max 6 args */
3633 /* check the range of the first argument num */
3634 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3635 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3636 return -TARGET_EINVAL
;
3638 /* ensure we have space for args */
3639 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3640 return -TARGET_EINVAL
;
3642 /* collect the arguments in a[] according to nargs[] */
3643 for (i
= 0; i
< nargs
[num
]; ++i
) {
3644 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3645 return -TARGET_EFAULT
;
3648 /* now when we have the args, invoke the appropriate underlying function */
3650 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3651 return do_socket(a
[0], a
[1], a
[2]);
3652 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3653 return do_bind(a
[0], a
[1], a
[2]);
3654 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3655 return do_connect(a
[0], a
[1], a
[2]);
3656 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3657 return get_errno(listen(a
[0], a
[1]));
3658 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3659 return do_accept4(a
[0], a
[1], a
[2], 0);
3660 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3661 return do_getsockname(a
[0], a
[1], a
[2]);
3662 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3663 return do_getpeername(a
[0], a
[1], a
[2]);
3664 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3665 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3666 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3667 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3668 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3669 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3670 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3671 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3672 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3673 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3674 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3675 return get_errno(shutdown(a
[0], a
[1]));
3676 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3677 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3678 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3679 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3680 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3681 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3682 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3683 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3684 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3685 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3686 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3687 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3688 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3689 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3691 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3692 return -TARGET_EINVAL
;
3697 #define N_SHM_REGIONS 32
3699 static struct shm_region
{
3703 } shm_regions
[N_SHM_REGIONS
];
3705 #ifndef TARGET_SEMID64_DS
3706 /* asm-generic version of this struct */
3707 struct target_semid64_ds
3709 struct target_ipc_perm sem_perm
;
3710 abi_ulong sem_otime
;
3711 #if TARGET_ABI_BITS == 32
3712 abi_ulong __unused1
;
3714 abi_ulong sem_ctime
;
3715 #if TARGET_ABI_BITS == 32
3716 abi_ulong __unused2
;
3718 abi_ulong sem_nsems
;
3719 abi_ulong __unused3
;
3720 abi_ulong __unused4
;
3724 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3725 abi_ulong target_addr
)
3727 struct target_ipc_perm
*target_ip
;
3728 struct target_semid64_ds
*target_sd
;
3730 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3731 return -TARGET_EFAULT
;
3732 target_ip
= &(target_sd
->sem_perm
);
3733 host_ip
->__key
= tswap32(target_ip
->__key
);
3734 host_ip
->uid
= tswap32(target_ip
->uid
);
3735 host_ip
->gid
= tswap32(target_ip
->gid
);
3736 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3737 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3738 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3739 host_ip
->mode
= tswap32(target_ip
->mode
);
3741 host_ip
->mode
= tswap16(target_ip
->mode
);
3743 #if defined(TARGET_PPC)
3744 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3746 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3748 unlock_user_struct(target_sd
, target_addr
, 0);
3752 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3753 struct ipc_perm
*host_ip
)
3755 struct target_ipc_perm
*target_ip
;
3756 struct target_semid64_ds
*target_sd
;
3758 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3759 return -TARGET_EFAULT
;
3760 target_ip
= &(target_sd
->sem_perm
);
3761 target_ip
->__key
= tswap32(host_ip
->__key
);
3762 target_ip
->uid
= tswap32(host_ip
->uid
);
3763 target_ip
->gid
= tswap32(host_ip
->gid
);
3764 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3765 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3766 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3767 target_ip
->mode
= tswap32(host_ip
->mode
);
3769 target_ip
->mode
= tswap16(host_ip
->mode
);
3771 #if defined(TARGET_PPC)
3772 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3774 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3776 unlock_user_struct(target_sd
, target_addr
, 1);
3780 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3781 abi_ulong target_addr
)
3783 struct target_semid64_ds
*target_sd
;
3785 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3786 return -TARGET_EFAULT
;
3787 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3788 return -TARGET_EFAULT
;
3789 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3790 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3791 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3792 unlock_user_struct(target_sd
, target_addr
, 0);
3796 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3797 struct semid_ds
*host_sd
)
3799 struct target_semid64_ds
*target_sd
;
3801 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3802 return -TARGET_EFAULT
;
3803 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3804 return -TARGET_EFAULT
;
3805 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3806 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3807 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3808 unlock_user_struct(target_sd
, target_addr
, 1);
3812 struct target_seminfo
{
3825 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3826 struct seminfo
*host_seminfo
)
3828 struct target_seminfo
*target_seminfo
;
3829 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3830 return -TARGET_EFAULT
;
3831 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3832 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3833 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3834 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3835 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3836 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3837 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3838 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3839 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3840 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3841 unlock_user_struct(target_seminfo
, target_addr
, 1);
3847 struct semid_ds
*buf
;
3848 unsigned short *array
;
3849 struct seminfo
*__buf
;
3852 union target_semun
{
3859 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3860 abi_ulong target_addr
)
3863 unsigned short *array
;
3865 struct semid_ds semid_ds
;
3868 semun
.buf
= &semid_ds
;
3870 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3872 return get_errno(ret
);
3874 nsems
= semid_ds
.sem_nsems
;
3876 *host_array
= g_try_new(unsigned short, nsems
);
3878 return -TARGET_ENOMEM
;
3880 array
= lock_user(VERIFY_READ
, target_addr
,
3881 nsems
*sizeof(unsigned short), 1);
3883 g_free(*host_array
);
3884 return -TARGET_EFAULT
;
3887 for(i
=0; i
<nsems
; i
++) {
3888 __get_user((*host_array
)[i
], &array
[i
]);
3890 unlock_user(array
, target_addr
, 0);
3895 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3896 unsigned short **host_array
)
3899 unsigned short *array
;
3901 struct semid_ds semid_ds
;
3904 semun
.buf
= &semid_ds
;
3906 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3908 return get_errno(ret
);
3910 nsems
= semid_ds
.sem_nsems
;
3912 array
= lock_user(VERIFY_WRITE
, target_addr
,
3913 nsems
*sizeof(unsigned short), 0);
3915 return -TARGET_EFAULT
;
3917 for(i
=0; i
<nsems
; i
++) {
3918 __put_user((*host_array
)[i
], &array
[i
]);
3920 g_free(*host_array
);
3921 unlock_user(array
, target_addr
, 1);
3926 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3927 abi_ulong target_arg
)
3929 union target_semun target_su
= { .buf
= target_arg
};
3931 struct semid_ds dsarg
;
3932 unsigned short *array
= NULL
;
3933 struct seminfo seminfo
;
3934 abi_long ret
= -TARGET_EINVAL
;
3941 /* In 64 bit cross-endian situations, we will erroneously pick up
3942 * the wrong half of the union for the "val" element. To rectify
3943 * this, the entire 8-byte structure is byteswapped, followed by
3944 * a swap of the 4 byte val field. In other cases, the data is
3945 * already in proper host byte order. */
3946 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3947 target_su
.buf
= tswapal(target_su
.buf
);
3948 arg
.val
= tswap32(target_su
.val
);
3950 arg
.val
= target_su
.val
;
3952 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3956 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3960 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3961 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3968 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3972 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3973 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3979 arg
.__buf
= &seminfo
;
3980 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3981 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3989 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3996 struct target_sembuf
{
3997 unsigned short sem_num
;
4002 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4003 abi_ulong target_addr
,
4006 struct target_sembuf
*target_sembuf
;
4009 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4010 nsops
*sizeof(struct target_sembuf
), 1);
4012 return -TARGET_EFAULT
;
4014 for(i
=0; i
<nsops
; i
++) {
4015 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4016 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4017 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4020 unlock_user(target_sembuf
, target_addr
, 0);
4025 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4026 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4029 * This macro is required to handle the s390 variants, which passes the
4030 * arguments in a different order than default.
4033 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4034 (__nsops), (__timeout), (__sops)
4036 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4037 (__nsops), 0, (__sops), (__timeout)
4040 static inline abi_long
do_semtimedop(int semid
,
4043 abi_long timeout
, bool time64
)
4045 struct sembuf
*sops
;
4046 struct timespec ts
, *pts
= NULL
;
4052 if (target_to_host_timespec64(pts
, timeout
)) {
4053 return -TARGET_EFAULT
;
4056 if (target_to_host_timespec(pts
, timeout
)) {
4057 return -TARGET_EFAULT
;
4062 if (nsops
> TARGET_SEMOPM
) {
4063 return -TARGET_E2BIG
;
4066 sops
= g_new(struct sembuf
, nsops
);
4068 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4070 return -TARGET_EFAULT
;
4073 ret
= -TARGET_ENOSYS
;
4074 #ifdef __NR_semtimedop
4075 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4078 if (ret
== -TARGET_ENOSYS
) {
4079 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4080 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4088 struct target_msqid_ds
4090 struct target_ipc_perm msg_perm
;
4091 abi_ulong msg_stime
;
4092 #if TARGET_ABI_BITS == 32
4093 abi_ulong __unused1
;
4095 abi_ulong msg_rtime
;
4096 #if TARGET_ABI_BITS == 32
4097 abi_ulong __unused2
;
4099 abi_ulong msg_ctime
;
4100 #if TARGET_ABI_BITS == 32
4101 abi_ulong __unused3
;
4103 abi_ulong __msg_cbytes
;
4105 abi_ulong msg_qbytes
;
4106 abi_ulong msg_lspid
;
4107 abi_ulong msg_lrpid
;
4108 abi_ulong __unused4
;
4109 abi_ulong __unused5
;
4112 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4113 abi_ulong target_addr
)
4115 struct target_msqid_ds
*target_md
;
4117 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4118 return -TARGET_EFAULT
;
4119 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4120 return -TARGET_EFAULT
;
4121 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4122 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4123 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4124 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4125 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4126 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4127 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4128 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4129 unlock_user_struct(target_md
, target_addr
, 0);
4133 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4134 struct msqid_ds
*host_md
)
4136 struct target_msqid_ds
*target_md
;
4138 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4139 return -TARGET_EFAULT
;
4140 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4141 return -TARGET_EFAULT
;
4142 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4143 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4144 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4145 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4146 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4147 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4148 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4149 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4150 unlock_user_struct(target_md
, target_addr
, 1);
4154 struct target_msginfo
{
4162 unsigned short int msgseg
;
4165 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4166 struct msginfo
*host_msginfo
)
4168 struct target_msginfo
*target_msginfo
;
4169 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4170 return -TARGET_EFAULT
;
4171 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4172 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4173 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4174 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4175 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4176 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4177 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4178 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4179 unlock_user_struct(target_msginfo
, target_addr
, 1);
4183 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4185 struct msqid_ds dsarg
;
4186 struct msginfo msginfo
;
4187 abi_long ret
= -TARGET_EINVAL
;
4195 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4196 return -TARGET_EFAULT
;
4197 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4198 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4199 return -TARGET_EFAULT
;
4202 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4206 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4207 if (host_to_target_msginfo(ptr
, &msginfo
))
4208 return -TARGET_EFAULT
;
4215 struct target_msgbuf
{
4220 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4221 ssize_t msgsz
, int msgflg
)
4223 struct target_msgbuf
*target_mb
;
4224 struct msgbuf
*host_mb
;
4228 return -TARGET_EINVAL
;
4231 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4232 return -TARGET_EFAULT
;
4233 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4235 unlock_user_struct(target_mb
, msgp
, 0);
4236 return -TARGET_ENOMEM
;
4238 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4239 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4240 ret
= -TARGET_ENOSYS
;
4242 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4245 if (ret
== -TARGET_ENOSYS
) {
4247 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4250 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4256 unlock_user_struct(target_mb
, msgp
, 0);
4262 #if defined(__sparc__)
4263 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4264 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4265 #elif defined(__s390x__)
4266 /* The s390 sys_ipc variant has only five parameters. */
4267 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4268 ((long int[]){(long int)__msgp, __msgtyp})
4270 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4271 ((long int[]){(long int)__msgp, __msgtyp}), 0
4275 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4276 ssize_t msgsz
, abi_long msgtyp
,
4279 struct target_msgbuf
*target_mb
;
4281 struct msgbuf
*host_mb
;
4285 return -TARGET_EINVAL
;
4288 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4289 return -TARGET_EFAULT
;
4291 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4293 ret
= -TARGET_ENOMEM
;
4296 ret
= -TARGET_ENOSYS
;
4298 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4301 if (ret
== -TARGET_ENOSYS
) {
4302 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4303 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4308 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4309 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4310 if (!target_mtext
) {
4311 ret
= -TARGET_EFAULT
;
4314 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4315 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4318 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4322 unlock_user_struct(target_mb
, msgp
, 1);
4327 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4328 abi_ulong target_addr
)
4330 struct target_shmid_ds
*target_sd
;
4332 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4333 return -TARGET_EFAULT
;
4334 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4335 return -TARGET_EFAULT
;
4336 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4337 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4338 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4339 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4340 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4341 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4342 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4343 unlock_user_struct(target_sd
, target_addr
, 0);
4347 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4348 struct shmid_ds
*host_sd
)
4350 struct target_shmid_ds
*target_sd
;
4352 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4353 return -TARGET_EFAULT
;
4354 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4355 return -TARGET_EFAULT
;
4356 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4357 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4358 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4359 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4360 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4361 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4362 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4363 unlock_user_struct(target_sd
, target_addr
, 1);
4367 struct target_shminfo
{
4375 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4376 struct shminfo
*host_shminfo
)
4378 struct target_shminfo
*target_shminfo
;
4379 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4380 return -TARGET_EFAULT
;
4381 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4382 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4383 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4384 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4385 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4386 unlock_user_struct(target_shminfo
, target_addr
, 1);
4390 struct target_shm_info
{
4395 abi_ulong swap_attempts
;
4396 abi_ulong swap_successes
;
4399 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4400 struct shm_info
*host_shm_info
)
4402 struct target_shm_info
*target_shm_info
;
4403 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4404 return -TARGET_EFAULT
;
4405 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4406 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4407 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4408 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4409 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4410 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4411 unlock_user_struct(target_shm_info
, target_addr
, 1);
4415 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4417 struct shmid_ds dsarg
;
4418 struct shminfo shminfo
;
4419 struct shm_info shm_info
;
4420 abi_long ret
= -TARGET_EINVAL
;
4428 if (target_to_host_shmid_ds(&dsarg
, buf
))
4429 return -TARGET_EFAULT
;
4430 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4431 if (host_to_target_shmid_ds(buf
, &dsarg
))
4432 return -TARGET_EFAULT
;
4435 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4436 if (host_to_target_shminfo(buf
, &shminfo
))
4437 return -TARGET_EFAULT
;
4440 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4441 if (host_to_target_shm_info(buf
, &shm_info
))
4442 return -TARGET_EFAULT
;
4447 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4454 #ifndef TARGET_FORCE_SHMLBA
4455 /* For most architectures, SHMLBA is the same as the page size;
4456 * some architectures have larger values, in which case they should
4457 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4458 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4459 * and defining its own value for SHMLBA.
4461 * The kernel also permits SHMLBA to be set by the architecture to a
4462 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4463 * this means that addresses are rounded to the large size if
4464 * SHM_RND is set but addresses not aligned to that size are not rejected
4465 * as long as they are at least page-aligned. Since the only architecture
4466 * which uses this is ia64 this code doesn't provide for that oddity.
4468 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4470 return TARGET_PAGE_SIZE
;
4474 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4475 int shmid
, abi_ulong shmaddr
, int shmflg
)
4477 CPUState
*cpu
= env_cpu(cpu_env
);
4480 struct shmid_ds shm_info
;
4484 /* shmat pointers are always untagged */
4486 /* find out the length of the shared memory segment */
4487 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4488 if (is_error(ret
)) {
4489 /* can't get length, bail out */
4493 shmlba
= target_shmlba(cpu_env
);
4495 if (shmaddr
& (shmlba
- 1)) {
4496 if (shmflg
& SHM_RND
) {
4497 shmaddr
&= ~(shmlba
- 1);
4499 return -TARGET_EINVAL
;
4502 if (!guest_range_valid_untagged(shmaddr
, shm_info
.shm_segsz
)) {
4503 return -TARGET_EINVAL
;
4509 * We're mapping shared memory, so ensure we generate code for parallel
4510 * execution and flush old translations. This will work up to the level
4511 * supported by the host -- anything that requires EXCP_ATOMIC will not
4512 * be atomic with respect to an external process.
4514 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
4515 cpu
->tcg_cflags
|= CF_PARALLEL
;
4520 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4522 abi_ulong mmap_start
;
4524 /* In order to use the host shmat, we need to honor host SHMLBA. */
4525 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4527 if (mmap_start
== -1) {
4529 host_raddr
= (void *)-1;
4531 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4532 shmflg
| SHM_REMAP
);
4535 if (host_raddr
== (void *)-1) {
4537 return get_errno((long)host_raddr
);
4539 raddr
=h2g((unsigned long)host_raddr
);
4541 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4542 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4543 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4545 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4546 if (!shm_regions
[i
].in_use
) {
4547 shm_regions
[i
].in_use
= true;
4548 shm_regions
[i
].start
= raddr
;
4549 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4559 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4564 /* shmdt pointers are always untagged */
4568 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4569 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4570 shm_regions
[i
].in_use
= false;
4571 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4575 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4582 #ifdef TARGET_NR_ipc
4583 /* ??? This only works with linear mappings. */
4584 /* do_ipc() must return target values and target errnos. */
4585 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4586 unsigned int call
, abi_long first
,
4587 abi_long second
, abi_long third
,
4588 abi_long ptr
, abi_long fifth
)
4593 version
= call
>> 16;
4598 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4600 case IPCOP_semtimedop
:
4602 * The s390 sys_ipc variant has only five parameters instead of six
4603 * (as for default variant) and the only difference is the handling of
4604 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4605 * to a struct timespec where the generic variant uses fifth parameter.
4607 #if defined(TARGET_S390X)
4608 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4610 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4615 ret
= get_errno(semget(first
, second
, third
));
4618 case IPCOP_semctl
: {
4619 /* The semun argument to semctl is passed by value, so dereference the
4622 get_user_ual(atptr
, ptr
);
4623 ret
= do_semctl(first
, second
, third
, atptr
);
4628 ret
= get_errno(msgget(first
, second
));
4632 ret
= do_msgsnd(first
, ptr
, second
, third
);
4636 ret
= do_msgctl(first
, second
, ptr
);
4643 struct target_ipc_kludge
{
4648 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4649 ret
= -TARGET_EFAULT
;
4653 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4655 unlock_user_struct(tmp
, ptr
, 0);
4659 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4668 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4669 if (is_error(raddr
))
4670 return get_errno(raddr
);
4671 if (put_user_ual(raddr
, third
))
4672 return -TARGET_EFAULT
;
4676 ret
= -TARGET_EINVAL
;
4681 ret
= do_shmdt(ptr
);
4685 /* IPC_* flag values are the same on all linux platforms */
4686 ret
= get_errno(shmget(first
, second
, third
));
4689 /* IPC_* and SHM_* command values are the same on all linux platforms */
4691 ret
= do_shmctl(first
, second
, ptr
);
4694 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4696 ret
= -TARGET_ENOSYS
;
4703 /* kernel structure types definitions */
4705 #define STRUCT(name, ...) STRUCT_ ## name,
4706 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4708 #include "syscall_types.h"
4712 #undef STRUCT_SPECIAL
4714 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4715 #define STRUCT_SPECIAL(name)
4716 #include "syscall_types.h"
4718 #undef STRUCT_SPECIAL
4720 #define MAX_STRUCT_SIZE 4096
4722 #ifdef CONFIG_FIEMAP
4723 /* So fiemap access checks don't overflow on 32 bit systems.
4724 * This is very slightly smaller than the limit imposed by
4725 * the underlying kernel.
4727 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4728 / sizeof(struct fiemap_extent))
4730 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4731 int fd
, int cmd
, abi_long arg
)
4733 /* The parameter for this ioctl is a struct fiemap followed
4734 * by an array of struct fiemap_extent whose size is set
4735 * in fiemap->fm_extent_count. The array is filled in by the
4738 int target_size_in
, target_size_out
;
4740 const argtype
*arg_type
= ie
->arg_type
;
4741 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4744 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4748 assert(arg_type
[0] == TYPE_PTR
);
4749 assert(ie
->access
== IOC_RW
);
4751 target_size_in
= thunk_type_size(arg_type
, 0);
4752 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4754 return -TARGET_EFAULT
;
4756 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4757 unlock_user(argptr
, arg
, 0);
4758 fm
= (struct fiemap
*)buf_temp
;
4759 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4760 return -TARGET_EINVAL
;
4763 outbufsz
= sizeof (*fm
) +
4764 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4766 if (outbufsz
> MAX_STRUCT_SIZE
) {
4767 /* We can't fit all the extents into the fixed size buffer.
4768 * Allocate one that is large enough and use it instead.
4770 fm
= g_try_malloc(outbufsz
);
4772 return -TARGET_ENOMEM
;
4774 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4777 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4778 if (!is_error(ret
)) {
4779 target_size_out
= target_size_in
;
4780 /* An extent_count of 0 means we were only counting the extents
4781 * so there are no structs to copy
4783 if (fm
->fm_extent_count
!= 0) {
4784 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4786 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4788 ret
= -TARGET_EFAULT
;
4790 /* Convert the struct fiemap */
4791 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4792 if (fm
->fm_extent_count
!= 0) {
4793 p
= argptr
+ target_size_in
;
4794 /* ...and then all the struct fiemap_extents */
4795 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4796 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4801 unlock_user(argptr
, arg
, target_size_out
);
4811 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4812 int fd
, int cmd
, abi_long arg
)
4814 const argtype
*arg_type
= ie
->arg_type
;
4818 struct ifconf
*host_ifconf
;
4820 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4821 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4822 int target_ifreq_size
;
4827 abi_long target_ifc_buf
;
4831 assert(arg_type
[0] == TYPE_PTR
);
4832 assert(ie
->access
== IOC_RW
);
4835 target_size
= thunk_type_size(arg_type
, 0);
4837 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4839 return -TARGET_EFAULT
;
4840 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4841 unlock_user(argptr
, arg
, 0);
4843 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4844 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4845 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4847 if (target_ifc_buf
!= 0) {
4848 target_ifc_len
= host_ifconf
->ifc_len
;
4849 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4850 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4852 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4853 if (outbufsz
> MAX_STRUCT_SIZE
) {
4855 * We can't fit all the extents into the fixed size buffer.
4856 * Allocate one that is large enough and use it instead.
4858 host_ifconf
= malloc(outbufsz
);
4860 return -TARGET_ENOMEM
;
4862 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4865 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4867 host_ifconf
->ifc_len
= host_ifc_len
;
4869 host_ifc_buf
= NULL
;
4871 host_ifconf
->ifc_buf
= host_ifc_buf
;
4873 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4874 if (!is_error(ret
)) {
4875 /* convert host ifc_len to target ifc_len */
4877 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4878 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4879 host_ifconf
->ifc_len
= target_ifc_len
;
4881 /* restore target ifc_buf */
4883 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4885 /* copy struct ifconf to target user */
4887 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4889 return -TARGET_EFAULT
;
4890 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4891 unlock_user(argptr
, arg
, target_size
);
4893 if (target_ifc_buf
!= 0) {
4894 /* copy ifreq[] to target user */
4895 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4896 for (i
= 0; i
< nb_ifreq
; i
++) {
4897 thunk_convert(argptr
+ i
* target_ifreq_size
,
4898 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4899 ifreq_arg_type
, THUNK_TARGET
);
4901 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4912 #if defined(CONFIG_USBFS)
4913 #if HOST_LONG_BITS > 64
4914 #error USBDEVFS thunks do not support >64 bit hosts yet.
4917 uint64_t target_urb_adr
;
4918 uint64_t target_buf_adr
;
4919 char *target_buf_ptr
;
4920 struct usbdevfs_urb host_urb
;
4923 static GHashTable
*usbdevfs_urb_hashtable(void)
4925 static GHashTable
*urb_hashtable
;
4927 if (!urb_hashtable
) {
4928 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4930 return urb_hashtable
;
4933 static void urb_hashtable_insert(struct live_urb
*urb
)
4935 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4936 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4939 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4941 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4942 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4945 static void urb_hashtable_remove(struct live_urb
*urb
)
4947 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4948 g_hash_table_remove(urb_hashtable
, urb
);
4952 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4953 int fd
, int cmd
, abi_long arg
)
4955 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4956 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4957 struct live_urb
*lurb
;
4961 uintptr_t target_urb_adr
;
4964 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4966 memset(buf_temp
, 0, sizeof(uint64_t));
4967 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4968 if (is_error(ret
)) {
4972 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4973 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4974 if (!lurb
->target_urb_adr
) {
4975 return -TARGET_EFAULT
;
4977 urb_hashtable_remove(lurb
);
4978 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4979 lurb
->host_urb
.buffer_length
);
4980 lurb
->target_buf_ptr
= NULL
;
4982 /* restore the guest buffer pointer */
4983 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4985 /* update the guest urb struct */
4986 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4989 return -TARGET_EFAULT
;
4991 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4992 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4994 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4995 /* write back the urb handle */
4996 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4999 return -TARGET_EFAULT
;
5002 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5003 target_urb_adr
= lurb
->target_urb_adr
;
5004 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5005 unlock_user(argptr
, arg
, target_size
);
5012 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5013 uint8_t *buf_temp
__attribute__((unused
)),
5014 int fd
, int cmd
, abi_long arg
)
5016 struct live_urb
*lurb
;
5018 /* map target address back to host URB with metadata. */
5019 lurb
= urb_hashtable_lookup(arg
);
5021 return -TARGET_EFAULT
;
5023 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5027 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5028 int fd
, int cmd
, abi_long arg
)
5030 const argtype
*arg_type
= ie
->arg_type
;
5035 struct live_urb
*lurb
;
5038 * each submitted URB needs to map to a unique ID for the
5039 * kernel, and that unique ID needs to be a pointer to
5040 * host memory. hence, we need to malloc for each URB.
5041 * isochronous transfers have a variable length struct.
5044 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5046 /* construct host copy of urb and metadata */
5047 lurb
= g_try_malloc0(sizeof(struct live_urb
));
5049 return -TARGET_ENOMEM
;
5052 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5055 return -TARGET_EFAULT
;
5057 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5058 unlock_user(argptr
, arg
, 0);
5060 lurb
->target_urb_adr
= arg
;
5061 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5063 /* buffer space used depends on endpoint type so lock the entire buffer */
5064 /* control type urbs should check the buffer contents for true direction */
5065 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5066 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5067 lurb
->host_urb
.buffer_length
, 1);
5068 if (lurb
->target_buf_ptr
== NULL
) {
5070 return -TARGET_EFAULT
;
5073 /* update buffer pointer in host copy */
5074 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5076 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5077 if (is_error(ret
)) {
5078 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5081 urb_hashtable_insert(lurb
);
5086 #endif /* CONFIG_USBFS */
5088 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5089 int cmd
, abi_long arg
)
5092 struct dm_ioctl
*host_dm
;
5093 abi_long guest_data
;
5094 uint32_t guest_data_size
;
5096 const argtype
*arg_type
= ie
->arg_type
;
5098 void *big_buf
= NULL
;
5102 target_size
= thunk_type_size(arg_type
, 0);
5103 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5105 ret
= -TARGET_EFAULT
;
5108 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5109 unlock_user(argptr
, arg
, 0);
5111 /* buf_temp is too small, so fetch things into a bigger buffer */
5112 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5113 memcpy(big_buf
, buf_temp
, target_size
);
5117 guest_data
= arg
+ host_dm
->data_start
;
5118 if ((guest_data
- arg
) < 0) {
5119 ret
= -TARGET_EINVAL
;
5122 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5123 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5125 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5127 ret
= -TARGET_EFAULT
;
5131 switch (ie
->host_cmd
) {
5133 case DM_LIST_DEVICES
:
5136 case DM_DEV_SUSPEND
:
5139 case DM_TABLE_STATUS
:
5140 case DM_TABLE_CLEAR
:
5142 case DM_LIST_VERSIONS
:
5146 case DM_DEV_SET_GEOMETRY
:
5147 /* data contains only strings */
5148 memcpy(host_data
, argptr
, guest_data_size
);
5151 memcpy(host_data
, argptr
, guest_data_size
);
5152 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5156 void *gspec
= argptr
;
5157 void *cur_data
= host_data
;
5158 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5159 int spec_size
= thunk_type_size(arg_type
, 0);
5162 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5163 struct dm_target_spec
*spec
= cur_data
;
5167 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5168 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5170 spec
->next
= sizeof(*spec
) + slen
;
5171 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5173 cur_data
+= spec
->next
;
5178 ret
= -TARGET_EINVAL
;
5179 unlock_user(argptr
, guest_data
, 0);
5182 unlock_user(argptr
, guest_data
, 0);
5184 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5185 if (!is_error(ret
)) {
5186 guest_data
= arg
+ host_dm
->data_start
;
5187 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5188 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5189 switch (ie
->host_cmd
) {
5194 case DM_DEV_SUSPEND
:
5197 case DM_TABLE_CLEAR
:
5199 case DM_DEV_SET_GEOMETRY
:
5200 /* no return data */
5202 case DM_LIST_DEVICES
:
5204 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5205 uint32_t remaining_data
= guest_data_size
;
5206 void *cur_data
= argptr
;
5207 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5208 int nl_size
= 12; /* can't use thunk_size due to alignment */
5211 uint32_t next
= nl
->next
;
5213 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5215 if (remaining_data
< nl
->next
) {
5216 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5219 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5220 strcpy(cur_data
+ nl_size
, nl
->name
);
5221 cur_data
+= nl
->next
;
5222 remaining_data
-= nl
->next
;
5226 nl
= (void*)nl
+ next
;
5231 case DM_TABLE_STATUS
:
5233 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5234 void *cur_data
= argptr
;
5235 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5236 int spec_size
= thunk_type_size(arg_type
, 0);
5239 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5240 uint32_t next
= spec
->next
;
5241 int slen
= strlen((char*)&spec
[1]) + 1;
5242 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5243 if (guest_data_size
< spec
->next
) {
5244 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5247 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5248 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5249 cur_data
= argptr
+ spec
->next
;
5250 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5256 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5257 int count
= *(uint32_t*)hdata
;
5258 uint64_t *hdev
= hdata
+ 8;
5259 uint64_t *gdev
= argptr
+ 8;
5262 *(uint32_t*)argptr
= tswap32(count
);
5263 for (i
= 0; i
< count
; i
++) {
5264 *gdev
= tswap64(*hdev
);
5270 case DM_LIST_VERSIONS
:
5272 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5273 uint32_t remaining_data
= guest_data_size
;
5274 void *cur_data
= argptr
;
5275 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5276 int vers_size
= thunk_type_size(arg_type
, 0);
5279 uint32_t next
= vers
->next
;
5281 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5283 if (remaining_data
< vers
->next
) {
5284 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5287 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5288 strcpy(cur_data
+ vers_size
, vers
->name
);
5289 cur_data
+= vers
->next
;
5290 remaining_data
-= vers
->next
;
5294 vers
= (void*)vers
+ next
;
5299 unlock_user(argptr
, guest_data
, 0);
5300 ret
= -TARGET_EINVAL
;
5303 unlock_user(argptr
, guest_data
, guest_data_size
);
5305 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5307 ret
= -TARGET_EFAULT
;
5310 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5311 unlock_user(argptr
, arg
, target_size
);
5318 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5319 int cmd
, abi_long arg
)
5323 const argtype
*arg_type
= ie
->arg_type
;
5324 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5327 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5328 struct blkpg_partition host_part
;
5330 /* Read and convert blkpg */
5332 target_size
= thunk_type_size(arg_type
, 0);
5333 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5335 ret
= -TARGET_EFAULT
;
5338 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5339 unlock_user(argptr
, arg
, 0);
5341 switch (host_blkpg
->op
) {
5342 case BLKPG_ADD_PARTITION
:
5343 case BLKPG_DEL_PARTITION
:
5344 /* payload is struct blkpg_partition */
5347 /* Unknown opcode */
5348 ret
= -TARGET_EINVAL
;
5352 /* Read and convert blkpg->data */
5353 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5354 target_size
= thunk_type_size(part_arg_type
, 0);
5355 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5357 ret
= -TARGET_EFAULT
;
5360 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5361 unlock_user(argptr
, arg
, 0);
5363 /* Swizzle the data pointer to our local copy and call! */
5364 host_blkpg
->data
= &host_part
;
5365 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5371 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5372 int fd
, int cmd
, abi_long arg
)
5374 const argtype
*arg_type
= ie
->arg_type
;
5375 const StructEntry
*se
;
5376 const argtype
*field_types
;
5377 const int *dst_offsets
, *src_offsets
;
5380 abi_ulong
*target_rt_dev_ptr
= NULL
;
5381 unsigned long *host_rt_dev_ptr
= NULL
;
5385 assert(ie
->access
== IOC_W
);
5386 assert(*arg_type
== TYPE_PTR
);
5388 assert(*arg_type
== TYPE_STRUCT
);
5389 target_size
= thunk_type_size(arg_type
, 0);
5390 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5392 return -TARGET_EFAULT
;
5395 assert(*arg_type
== (int)STRUCT_rtentry
);
5396 se
= struct_entries
+ *arg_type
++;
5397 assert(se
->convert
[0] == NULL
);
5398 /* convert struct here to be able to catch rt_dev string */
5399 field_types
= se
->field_types
;
5400 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5401 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5402 for (i
= 0; i
< se
->nb_fields
; i
++) {
5403 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5404 assert(*field_types
== TYPE_PTRVOID
);
5405 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5406 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5407 if (*target_rt_dev_ptr
!= 0) {
5408 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5409 tswapal(*target_rt_dev_ptr
));
5410 if (!*host_rt_dev_ptr
) {
5411 unlock_user(argptr
, arg
, 0);
5412 return -TARGET_EFAULT
;
5415 *host_rt_dev_ptr
= 0;
5420 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5421 argptr
+ src_offsets
[i
],
5422 field_types
, THUNK_HOST
);
5424 unlock_user(argptr
, arg
, 0);
5426 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5428 assert(host_rt_dev_ptr
!= NULL
);
5429 assert(target_rt_dev_ptr
!= NULL
);
5430 if (*host_rt_dev_ptr
!= 0) {
5431 unlock_user((void *)*host_rt_dev_ptr
,
5432 *target_rt_dev_ptr
, 0);
5437 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5438 int fd
, int cmd
, abi_long arg
)
5440 int sig
= target_to_host_signal(arg
);
5441 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5444 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5445 int fd
, int cmd
, abi_long arg
)
5450 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5451 if (is_error(ret
)) {
5455 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5456 if (copy_to_user_timeval(arg
, &tv
)) {
5457 return -TARGET_EFAULT
;
5460 if (copy_to_user_timeval64(arg
, &tv
)) {
5461 return -TARGET_EFAULT
;
5468 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5469 int fd
, int cmd
, abi_long arg
)
5474 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5475 if (is_error(ret
)) {
5479 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5480 if (host_to_target_timespec(arg
, &ts
)) {
5481 return -TARGET_EFAULT
;
5484 if (host_to_target_timespec64(arg
, &ts
)) {
5485 return -TARGET_EFAULT
;
5493 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5494 int fd
, int cmd
, abi_long arg
)
5496 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5497 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5503 static void unlock_drm_version(struct drm_version
*host_ver
,
5504 struct target_drm_version
*target_ver
,
5507 unlock_user(host_ver
->name
, target_ver
->name
,
5508 copy
? host_ver
->name_len
: 0);
5509 unlock_user(host_ver
->date
, target_ver
->date
,
5510 copy
? host_ver
->date_len
: 0);
5511 unlock_user(host_ver
->desc
, target_ver
->desc
,
5512 copy
? host_ver
->desc_len
: 0);
5515 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5516 struct target_drm_version
*target_ver
)
5518 memset(host_ver
, 0, sizeof(*host_ver
));
5520 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5521 if (host_ver
->name_len
) {
5522 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5523 target_ver
->name_len
, 0);
5524 if (!host_ver
->name
) {
5529 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5530 if (host_ver
->date_len
) {
5531 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5532 target_ver
->date_len
, 0);
5533 if (!host_ver
->date
) {
5538 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5539 if (host_ver
->desc_len
) {
5540 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5541 target_ver
->desc_len
, 0);
5542 if (!host_ver
->desc
) {
5549 unlock_drm_version(host_ver
, target_ver
, false);
5553 static inline void host_to_target_drmversion(
5554 struct target_drm_version
*target_ver
,
5555 struct drm_version
*host_ver
)
5557 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5558 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5559 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5560 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5561 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5562 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5563 unlock_drm_version(host_ver
, target_ver
, true);
5566 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5567 int fd
, int cmd
, abi_long arg
)
5569 struct drm_version
*ver
;
5570 struct target_drm_version
*target_ver
;
5573 switch (ie
->host_cmd
) {
5574 case DRM_IOCTL_VERSION
:
5575 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5576 return -TARGET_EFAULT
;
5578 ver
= (struct drm_version
*)buf_temp
;
5579 ret
= target_to_host_drmversion(ver
, target_ver
);
5580 if (!is_error(ret
)) {
5581 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5582 if (is_error(ret
)) {
5583 unlock_drm_version(ver
, target_ver
, false);
5585 host_to_target_drmversion(target_ver
, ver
);
5588 unlock_user_struct(target_ver
, arg
, 0);
5591 return -TARGET_ENOSYS
;
5594 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5595 struct drm_i915_getparam
*gparam
,
5596 int fd
, abi_long arg
)
5600 struct target_drm_i915_getparam
*target_gparam
;
5602 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5603 return -TARGET_EFAULT
;
5606 __get_user(gparam
->param
, &target_gparam
->param
);
5607 gparam
->value
= &value
;
5608 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5609 put_user_s32(value
, target_gparam
->value
);
5611 unlock_user_struct(target_gparam
, arg
, 0);
5615 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5616 int fd
, int cmd
, abi_long arg
)
5618 switch (ie
->host_cmd
) {
5619 case DRM_IOCTL_I915_GETPARAM
:
5620 return do_ioctl_drm_i915_getparam(ie
,
5621 (struct drm_i915_getparam
*)buf_temp
,
5624 return -TARGET_ENOSYS
;
5630 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5631 int fd
, int cmd
, abi_long arg
)
5633 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5634 struct tun_filter
*target_filter
;
5637 assert(ie
->access
== IOC_W
);
5639 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5640 if (!target_filter
) {
5641 return -TARGET_EFAULT
;
5643 filter
->flags
= tswap16(target_filter
->flags
);
5644 filter
->count
= tswap16(target_filter
->count
);
5645 unlock_user(target_filter
, arg
, 0);
5647 if (filter
->count
) {
5648 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5650 return -TARGET_EFAULT
;
5653 target_addr
= lock_user(VERIFY_READ
,
5654 arg
+ offsetof(struct tun_filter
, addr
),
5655 filter
->count
* ETH_ALEN
, 1);
5657 return -TARGET_EFAULT
;
5659 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5660 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5663 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5666 IOCTLEntry ioctl_entries
[] = {
5667 #define IOCTL(cmd, access, ...) \
5668 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5669 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5670 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5671 #define IOCTL_IGNORE(cmd) \
5672 { TARGET_ ## cmd, 0, #cmd },
5677 /* ??? Implement proper locking for ioctls. */
5678 /* do_ioctl() Must return target values and target errnos. */
5679 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5681 const IOCTLEntry
*ie
;
5682 const argtype
*arg_type
;
5684 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5690 if (ie
->target_cmd
== 0) {
5692 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5693 return -TARGET_ENOSYS
;
5695 if (ie
->target_cmd
== cmd
)
5699 arg_type
= ie
->arg_type
;
5701 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5702 } else if (!ie
->host_cmd
) {
5703 /* Some architectures define BSD ioctls in their headers
5704 that are not implemented in Linux. */
5705 return -TARGET_ENOSYS
;
5708 switch(arg_type
[0]) {
5711 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5717 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5721 target_size
= thunk_type_size(arg_type
, 0);
5722 switch(ie
->access
) {
5724 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5725 if (!is_error(ret
)) {
5726 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5728 return -TARGET_EFAULT
;
5729 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5730 unlock_user(argptr
, arg
, target_size
);
5734 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5736 return -TARGET_EFAULT
;
5737 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5738 unlock_user(argptr
, arg
, 0);
5739 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5743 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5745 return -TARGET_EFAULT
;
5746 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5747 unlock_user(argptr
, arg
, 0);
5748 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5749 if (!is_error(ret
)) {
5750 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5752 return -TARGET_EFAULT
;
5753 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5754 unlock_user(argptr
, arg
, target_size
);
5760 qemu_log_mask(LOG_UNIMP
,
5761 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5762 (long)cmd
, arg_type
[0]);
5763 ret
= -TARGET_ENOSYS
;
5769 static const bitmask_transtbl iflag_tbl
[] = {
5770 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5771 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5772 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5773 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5774 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5775 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5776 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5777 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5778 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5779 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5780 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5781 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5782 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5783 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5784 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5788 static const bitmask_transtbl oflag_tbl
[] = {
5789 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5790 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5791 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5792 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5793 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5794 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5795 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5796 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5797 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5798 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5799 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5800 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5801 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5802 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5803 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5804 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5805 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5806 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5807 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5808 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5809 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5810 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5811 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5812 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5816 static const bitmask_transtbl cflag_tbl
[] = {
5817 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5818 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5819 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5820 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5821 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5822 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5823 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5824 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5825 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5826 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5827 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5828 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5829 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5830 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5831 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5832 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5833 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5834 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5835 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5836 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5837 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5838 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5839 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5840 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5841 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5842 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5843 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5844 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5845 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5846 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5847 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5851 static const bitmask_transtbl lflag_tbl
[] = {
5852 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5853 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5854 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5855 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5856 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5857 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5858 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5859 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5860 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5861 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5862 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5863 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5864 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5865 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5866 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5867 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5871 static void target_to_host_termios (void *dst
, const void *src
)
5873 struct host_termios
*host
= dst
;
5874 const struct target_termios
*target
= src
;
5877 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5879 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5881 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5883 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5884 host
->c_line
= target
->c_line
;
5886 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5887 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5888 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5889 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5890 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5891 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5892 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5893 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5894 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5895 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5896 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5897 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5898 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5899 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5900 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5901 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5902 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5903 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5906 static void host_to_target_termios (void *dst
, const void *src
)
5908 struct target_termios
*target
= dst
;
5909 const struct host_termios
*host
= src
;
5912 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5914 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5916 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5918 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5919 target
->c_line
= host
->c_line
;
5921 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5922 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5923 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5924 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5925 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5926 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5927 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5928 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5929 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5930 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5931 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5932 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5933 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5934 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5935 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5936 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5937 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5938 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5941 static const StructEntry struct_termios_def
= {
5942 .convert
= { host_to_target_termios
, target_to_host_termios
},
5943 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5944 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5945 .print
= print_termios
,
5948 static const bitmask_transtbl mmap_flags_tbl
[] = {
5949 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5950 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5951 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5952 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5953 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5954 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5955 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5956 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5957 MAP_DENYWRITE
, MAP_DENYWRITE
},
5958 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5959 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5960 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5961 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5962 MAP_NORESERVE
, MAP_NORESERVE
},
5963 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5964 /* MAP_STACK had been ignored by the kernel for quite some time.
5965 Recognize it for the target insofar as we do not want to pass
5966 it through to the host. */
5967 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5972 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5973 * TARGET_I386 is defined if TARGET_X86_64 is defined
5975 #if defined(TARGET_I386)
5977 /* NOTE: there is really one LDT for all the threads */
5978 static uint8_t *ldt_table
;
5980 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5987 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5988 if (size
> bytecount
)
5990 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5992 return -TARGET_EFAULT
;
5993 /* ??? Should this by byteswapped? */
5994 memcpy(p
, ldt_table
, size
);
5995 unlock_user(p
, ptr
, size
);
5999 /* XXX: add locking support */
6000 static abi_long
write_ldt(CPUX86State
*env
,
6001 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6003 struct target_modify_ldt_ldt_s ldt_info
;
6004 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6005 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6006 int seg_not_present
, useable
, lm
;
6007 uint32_t *lp
, entry_1
, entry_2
;
6009 if (bytecount
!= sizeof(ldt_info
))
6010 return -TARGET_EINVAL
;
6011 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6012 return -TARGET_EFAULT
;
6013 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6014 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6015 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6016 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6017 unlock_user_struct(target_ldt_info
, ptr
, 0);
6019 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6020 return -TARGET_EINVAL
;
6021 seg_32bit
= ldt_info
.flags
& 1;
6022 contents
= (ldt_info
.flags
>> 1) & 3;
6023 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6024 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6025 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6026 useable
= (ldt_info
.flags
>> 6) & 1;
6030 lm
= (ldt_info
.flags
>> 7) & 1;
6032 if (contents
== 3) {
6034 return -TARGET_EINVAL
;
6035 if (seg_not_present
== 0)
6036 return -TARGET_EINVAL
;
6038 /* allocate the LDT */
6040 env
->ldt
.base
= target_mmap(0,
6041 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6042 PROT_READ
|PROT_WRITE
,
6043 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6044 if (env
->ldt
.base
== -1)
6045 return -TARGET_ENOMEM
;
6046 memset(g2h_untagged(env
->ldt
.base
), 0,
6047 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6048 env
->ldt
.limit
= 0xffff;
6049 ldt_table
= g2h_untagged(env
->ldt
.base
);
6052 /* NOTE: same code as Linux kernel */
6053 /* Allow LDTs to be cleared by the user. */
6054 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6057 read_exec_only
== 1 &&
6059 limit_in_pages
== 0 &&
6060 seg_not_present
== 1 &&
6068 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6069 (ldt_info
.limit
& 0x0ffff);
6070 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6071 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6072 (ldt_info
.limit
& 0xf0000) |
6073 ((read_exec_only
^ 1) << 9) |
6075 ((seg_not_present
^ 1) << 15) |
6077 (limit_in_pages
<< 23) |
6081 entry_2
|= (useable
<< 20);
6083 /* Install the new entry ... */
6085 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6086 lp
[0] = tswap32(entry_1
);
6087 lp
[1] = tswap32(entry_2
);
6091 /* specific and weird i386 syscalls */
6092 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6093 unsigned long bytecount
)
6099 ret
= read_ldt(ptr
, bytecount
);
6102 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6105 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6108 ret
= -TARGET_ENOSYS
;
6114 #if defined(TARGET_ABI32)
6115 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6117 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6118 struct target_modify_ldt_ldt_s ldt_info
;
6119 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6120 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6121 int seg_not_present
, useable
, lm
;
6122 uint32_t *lp
, entry_1
, entry_2
;
6125 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6126 if (!target_ldt_info
)
6127 return -TARGET_EFAULT
;
6128 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6129 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6130 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6131 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6132 if (ldt_info
.entry_number
== -1) {
6133 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6134 if (gdt_table
[i
] == 0) {
6135 ldt_info
.entry_number
= i
;
6136 target_ldt_info
->entry_number
= tswap32(i
);
6141 unlock_user_struct(target_ldt_info
, ptr
, 1);
6143 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6144 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6145 return -TARGET_EINVAL
;
6146 seg_32bit
= ldt_info
.flags
& 1;
6147 contents
= (ldt_info
.flags
>> 1) & 3;
6148 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6149 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6150 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6151 useable
= (ldt_info
.flags
>> 6) & 1;
6155 lm
= (ldt_info
.flags
>> 7) & 1;
6158 if (contents
== 3) {
6159 if (seg_not_present
== 0)
6160 return -TARGET_EINVAL
;
6163 /* NOTE: same code as Linux kernel */
6164 /* Allow LDTs to be cleared by the user. */
6165 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6166 if ((contents
== 0 &&
6167 read_exec_only
== 1 &&
6169 limit_in_pages
== 0 &&
6170 seg_not_present
== 1 &&
6178 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6179 (ldt_info
.limit
& 0x0ffff);
6180 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6181 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6182 (ldt_info
.limit
& 0xf0000) |
6183 ((read_exec_only
^ 1) << 9) |
6185 ((seg_not_present
^ 1) << 15) |
6187 (limit_in_pages
<< 23) |
6192 /* Install the new entry ... */
6194 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6195 lp
[0] = tswap32(entry_1
);
6196 lp
[1] = tswap32(entry_2
);
6200 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6202 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6203 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6204 uint32_t base_addr
, limit
, flags
;
6205 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6206 int seg_not_present
, useable
, lm
;
6207 uint32_t *lp
, entry_1
, entry_2
;
6209 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6210 if (!target_ldt_info
)
6211 return -TARGET_EFAULT
;
6212 idx
= tswap32(target_ldt_info
->entry_number
);
6213 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6214 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6215 unlock_user_struct(target_ldt_info
, ptr
, 1);
6216 return -TARGET_EINVAL
;
6218 lp
= (uint32_t *)(gdt_table
+ idx
);
6219 entry_1
= tswap32(lp
[0]);
6220 entry_2
= tswap32(lp
[1]);
6222 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6223 contents
= (entry_2
>> 10) & 3;
6224 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6225 seg_32bit
= (entry_2
>> 22) & 1;
6226 limit_in_pages
= (entry_2
>> 23) & 1;
6227 useable
= (entry_2
>> 20) & 1;
6231 lm
= (entry_2
>> 21) & 1;
6233 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6234 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6235 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6236 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6237 base_addr
= (entry_1
>> 16) |
6238 (entry_2
& 0xff000000) |
6239 ((entry_2
& 0xff) << 16);
6240 target_ldt_info
->base_addr
= tswapal(base_addr
);
6241 target_ldt_info
->limit
= tswap32(limit
);
6242 target_ldt_info
->flags
= tswap32(flags
);
6243 unlock_user_struct(target_ldt_info
, ptr
, 1);
6247 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6249 return -TARGET_ENOSYS
;
6252 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6259 case TARGET_ARCH_SET_GS
:
6260 case TARGET_ARCH_SET_FS
:
6261 if (code
== TARGET_ARCH_SET_GS
)
6265 cpu_x86_load_seg(env
, idx
, 0);
6266 env
->segs
[idx
].base
= addr
;
6268 case TARGET_ARCH_GET_GS
:
6269 case TARGET_ARCH_GET_FS
:
6270 if (code
== TARGET_ARCH_GET_GS
)
6274 val
= env
->segs
[idx
].base
;
6275 if (put_user(val
, addr
, abi_ulong
))
6276 ret
= -TARGET_EFAULT
;
6279 ret
= -TARGET_EINVAL
;
6284 #endif /* defined(TARGET_ABI32 */
6286 #endif /* defined(TARGET_I386) */
6288 #define NEW_STACK_SIZE 0x40000
6291 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6294 pthread_mutex_t mutex
;
6295 pthread_cond_t cond
;
6298 abi_ulong child_tidptr
;
6299 abi_ulong parent_tidptr
;
6303 static void *clone_func(void *arg
)
6305 new_thread_info
*info
= arg
;
6310 rcu_register_thread();
6311 tcg_register_thread();
6315 ts
= (TaskState
*)cpu
->opaque
;
6316 info
->tid
= sys_gettid();
6318 if (info
->child_tidptr
)
6319 put_user_u32(info
->tid
, info
->child_tidptr
);
6320 if (info
->parent_tidptr
)
6321 put_user_u32(info
->tid
, info
->parent_tidptr
);
6322 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6323 /* Enable signals. */
6324 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6325 /* Signal to the parent that we're ready. */
6326 pthread_mutex_lock(&info
->mutex
);
6327 pthread_cond_broadcast(&info
->cond
);
6328 pthread_mutex_unlock(&info
->mutex
);
6329 /* Wait until the parent has finished initializing the tls state. */
6330 pthread_mutex_lock(&clone_lock
);
6331 pthread_mutex_unlock(&clone_lock
);
6337 /* do_fork() Must return host values and target errnos (unlike most
6338 do_*() functions). */
6339 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6340 abi_ulong parent_tidptr
, target_ulong newtls
,
6341 abi_ulong child_tidptr
)
6343 CPUState
*cpu
= env_cpu(env
);
6347 CPUArchState
*new_env
;
6350 flags
&= ~CLONE_IGNORED_FLAGS
;
6352 /* Emulate vfork() with fork() */
6353 if (flags
& CLONE_VFORK
)
6354 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6356 if (flags
& CLONE_VM
) {
6357 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6358 new_thread_info info
;
6359 pthread_attr_t attr
;
6361 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6362 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6363 return -TARGET_EINVAL
;
6366 ts
= g_new0(TaskState
, 1);
6367 init_task_state(ts
);
6369 /* Grab a mutex so that thread setup appears atomic. */
6370 pthread_mutex_lock(&clone_lock
);
6373 * If this is our first additional thread, we need to ensure we
6374 * generate code for parallel execution and flush old translations.
6375 * Do this now so that the copy gets CF_PARALLEL too.
6377 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
6378 cpu
->tcg_cflags
|= CF_PARALLEL
;
6382 /* we create a new CPU instance. */
6383 new_env
= cpu_copy(env
);
6384 /* Init regs that differ from the parent. */
6385 cpu_clone_regs_child(new_env
, newsp
, flags
);
6386 cpu_clone_regs_parent(env
, flags
);
6387 new_cpu
= env_cpu(new_env
);
6388 new_cpu
->opaque
= ts
;
6389 ts
->bprm
= parent_ts
->bprm
;
6390 ts
->info
= parent_ts
->info
;
6391 ts
->signal_mask
= parent_ts
->signal_mask
;
6393 if (flags
& CLONE_CHILD_CLEARTID
) {
6394 ts
->child_tidptr
= child_tidptr
;
6397 if (flags
& CLONE_SETTLS
) {
6398 cpu_set_tls (new_env
, newtls
);
6401 memset(&info
, 0, sizeof(info
));
6402 pthread_mutex_init(&info
.mutex
, NULL
);
6403 pthread_mutex_lock(&info
.mutex
);
6404 pthread_cond_init(&info
.cond
, NULL
);
6406 if (flags
& CLONE_CHILD_SETTID
) {
6407 info
.child_tidptr
= child_tidptr
;
6409 if (flags
& CLONE_PARENT_SETTID
) {
6410 info
.parent_tidptr
= parent_tidptr
;
6413 ret
= pthread_attr_init(&attr
);
6414 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6415 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6416 /* It is not safe to deliver signals until the child has finished
6417 initializing, so temporarily block all signals. */
6418 sigfillset(&sigmask
);
6419 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6420 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6422 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6423 /* TODO: Free new CPU state if thread creation failed. */
6425 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6426 pthread_attr_destroy(&attr
);
6428 /* Wait for the child to initialize. */
6429 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6434 pthread_mutex_unlock(&info
.mutex
);
6435 pthread_cond_destroy(&info
.cond
);
6436 pthread_mutex_destroy(&info
.mutex
);
6437 pthread_mutex_unlock(&clone_lock
);
6439 /* if no CLONE_VM, we consider it is a fork */
6440 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6441 return -TARGET_EINVAL
;
6444 /* We can't support custom termination signals */
6445 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6446 return -TARGET_EINVAL
;
6449 if (block_signals()) {
6450 return -TARGET_ERESTARTSYS
;
6456 /* Child Process. */
6457 cpu_clone_regs_child(env
, newsp
, flags
);
6459 /* There is a race condition here. The parent process could
6460 theoretically read the TID in the child process before the child
6461 tid is set. This would require using either ptrace
6462 (not implemented) or having *_tidptr to point at a shared memory
6463 mapping. We can't repeat the spinlock hack used above because
6464 the child process gets its own copy of the lock. */
6465 if (flags
& CLONE_CHILD_SETTID
)
6466 put_user_u32(sys_gettid(), child_tidptr
);
6467 if (flags
& CLONE_PARENT_SETTID
)
6468 put_user_u32(sys_gettid(), parent_tidptr
);
6469 ts
= (TaskState
*)cpu
->opaque
;
6470 if (flags
& CLONE_SETTLS
)
6471 cpu_set_tls (env
, newtls
);
6472 if (flags
& CLONE_CHILD_CLEARTID
)
6473 ts
->child_tidptr
= child_tidptr
;
6475 cpu_clone_regs_parent(env
, flags
);
6482 /* warning : doesn't handle linux specific flags... */
6483 static int target_to_host_fcntl_cmd(int cmd
)
6488 case TARGET_F_DUPFD
:
6489 case TARGET_F_GETFD
:
6490 case TARGET_F_SETFD
:
6491 case TARGET_F_GETFL
:
6492 case TARGET_F_SETFL
:
6493 case TARGET_F_OFD_GETLK
:
6494 case TARGET_F_OFD_SETLK
:
6495 case TARGET_F_OFD_SETLKW
:
6498 case TARGET_F_GETLK
:
6501 case TARGET_F_SETLK
:
6504 case TARGET_F_SETLKW
:
6507 case TARGET_F_GETOWN
:
6510 case TARGET_F_SETOWN
:
6513 case TARGET_F_GETSIG
:
6516 case TARGET_F_SETSIG
:
6519 #if TARGET_ABI_BITS == 32
6520 case TARGET_F_GETLK64
:
6523 case TARGET_F_SETLK64
:
6526 case TARGET_F_SETLKW64
:
6530 case TARGET_F_SETLEASE
:
6533 case TARGET_F_GETLEASE
:
6536 #ifdef F_DUPFD_CLOEXEC
6537 case TARGET_F_DUPFD_CLOEXEC
:
6538 ret
= F_DUPFD_CLOEXEC
;
6541 case TARGET_F_NOTIFY
:
6545 case TARGET_F_GETOWN_EX
:
6550 case TARGET_F_SETOWN_EX
:
6555 case TARGET_F_SETPIPE_SZ
:
6558 case TARGET_F_GETPIPE_SZ
:
6563 case TARGET_F_ADD_SEALS
:
6566 case TARGET_F_GET_SEALS
:
6571 ret
= -TARGET_EINVAL
;
6575 #if defined(__powerpc64__)
6576 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6577 * is not supported by kernel. The glibc fcntl call actually adjusts
6578 * them to 5, 6 and 7 before making the syscall(). Since we make the
6579 * syscall directly, adjust to what is supported by the kernel.
6581 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6582 ret
-= F_GETLK64
- 5;
6589 #define FLOCK_TRANSTBL \
6591 TRANSTBL_CONVERT(F_RDLCK); \
6592 TRANSTBL_CONVERT(F_WRLCK); \
6593 TRANSTBL_CONVERT(F_UNLCK); \
6596 static int target_to_host_flock(int type
)
6598 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6600 #undef TRANSTBL_CONVERT
6601 return -TARGET_EINVAL
;
6604 static int host_to_target_flock(int type
)
6606 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6608 #undef TRANSTBL_CONVERT
6609 /* if we don't know how to convert the value coming
6610 * from the host we copy to the target field as-is
6615 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6616 abi_ulong target_flock_addr
)
6618 struct target_flock
*target_fl
;
6621 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6622 return -TARGET_EFAULT
;
6625 __get_user(l_type
, &target_fl
->l_type
);
6626 l_type
= target_to_host_flock(l_type
);
6630 fl
->l_type
= l_type
;
6631 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6632 __get_user(fl
->l_start
, &target_fl
->l_start
);
6633 __get_user(fl
->l_len
, &target_fl
->l_len
);
6634 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6635 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6639 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6640 const struct flock64
*fl
)
6642 struct target_flock
*target_fl
;
6645 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6646 return -TARGET_EFAULT
;
6649 l_type
= host_to_target_flock(fl
->l_type
);
6650 __put_user(l_type
, &target_fl
->l_type
);
6651 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6652 __put_user(fl
->l_start
, &target_fl
->l_start
);
6653 __put_user(fl
->l_len
, &target_fl
->l_len
);
6654 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6655 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6659 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6660 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6662 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6663 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6664 abi_ulong target_flock_addr
)
6666 struct target_oabi_flock64
*target_fl
;
6669 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6670 return -TARGET_EFAULT
;
6673 __get_user(l_type
, &target_fl
->l_type
);
6674 l_type
= target_to_host_flock(l_type
);
6678 fl
->l_type
= l_type
;
6679 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6680 __get_user(fl
->l_start
, &target_fl
->l_start
);
6681 __get_user(fl
->l_len
, &target_fl
->l_len
);
6682 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6683 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6687 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6688 const struct flock64
*fl
)
6690 struct target_oabi_flock64
*target_fl
;
6693 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6694 return -TARGET_EFAULT
;
6697 l_type
= host_to_target_flock(fl
->l_type
);
6698 __put_user(l_type
, &target_fl
->l_type
);
6699 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6700 __put_user(fl
->l_start
, &target_fl
->l_start
);
6701 __put_user(fl
->l_len
, &target_fl
->l_len
);
6702 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6703 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6708 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6709 abi_ulong target_flock_addr
)
6711 struct target_flock64
*target_fl
;
6714 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6715 return -TARGET_EFAULT
;
6718 __get_user(l_type
, &target_fl
->l_type
);
6719 l_type
= target_to_host_flock(l_type
);
6723 fl
->l_type
= l_type
;
6724 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6725 __get_user(fl
->l_start
, &target_fl
->l_start
);
6726 __get_user(fl
->l_len
, &target_fl
->l_len
);
6727 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6728 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6732 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6733 const struct flock64
*fl
)
6735 struct target_flock64
*target_fl
;
6738 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6739 return -TARGET_EFAULT
;
6742 l_type
= host_to_target_flock(fl
->l_type
);
6743 __put_user(l_type
, &target_fl
->l_type
);
6744 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6745 __put_user(fl
->l_start
, &target_fl
->l_start
);
6746 __put_user(fl
->l_len
, &target_fl
->l_len
);
6747 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6748 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6752 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6754 struct flock64 fl64
;
6756 struct f_owner_ex fox
;
6757 struct target_f_owner_ex
*target_fox
;
6760 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6762 if (host_cmd
== -TARGET_EINVAL
)
6766 case TARGET_F_GETLK
:
6767 ret
= copy_from_user_flock(&fl64
, arg
);
6771 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6773 ret
= copy_to_user_flock(arg
, &fl64
);
6777 case TARGET_F_SETLK
:
6778 case TARGET_F_SETLKW
:
6779 ret
= copy_from_user_flock(&fl64
, arg
);
6783 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6786 case TARGET_F_GETLK64
:
6787 case TARGET_F_OFD_GETLK
:
6788 ret
= copy_from_user_flock64(&fl64
, arg
);
6792 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6794 ret
= copy_to_user_flock64(arg
, &fl64
);
6797 case TARGET_F_SETLK64
:
6798 case TARGET_F_SETLKW64
:
6799 case TARGET_F_OFD_SETLK
:
6800 case TARGET_F_OFD_SETLKW
:
6801 ret
= copy_from_user_flock64(&fl64
, arg
);
6805 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6808 case TARGET_F_GETFL
:
6809 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6811 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6815 case TARGET_F_SETFL
:
6816 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6817 target_to_host_bitmask(arg
,
6822 case TARGET_F_GETOWN_EX
:
6823 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6825 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6826 return -TARGET_EFAULT
;
6827 target_fox
->type
= tswap32(fox
.type
);
6828 target_fox
->pid
= tswap32(fox
.pid
);
6829 unlock_user_struct(target_fox
, arg
, 1);
6835 case TARGET_F_SETOWN_EX
:
6836 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6837 return -TARGET_EFAULT
;
6838 fox
.type
= tswap32(target_fox
->type
);
6839 fox
.pid
= tswap32(target_fox
->pid
);
6840 unlock_user_struct(target_fox
, arg
, 0);
6841 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6845 case TARGET_F_SETSIG
:
6846 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
6849 case TARGET_F_GETSIG
:
6850 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
6853 case TARGET_F_SETOWN
:
6854 case TARGET_F_GETOWN
:
6855 case TARGET_F_SETLEASE
:
6856 case TARGET_F_GETLEASE
:
6857 case TARGET_F_SETPIPE_SZ
:
6858 case TARGET_F_GETPIPE_SZ
:
6859 case TARGET_F_ADD_SEALS
:
6860 case TARGET_F_GET_SEALS
:
6861 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6865 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6873 static inline int high2lowuid(int uid
)
6881 static inline int high2lowgid(int gid
)
6889 static inline int low2highuid(int uid
)
6891 if ((int16_t)uid
== -1)
6897 static inline int low2highgid(int gid
)
6899 if ((int16_t)gid
== -1)
6904 static inline int tswapid(int id
)
6909 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6911 #else /* !USE_UID16 */
6912 static inline int high2lowuid(int uid
)
6916 static inline int high2lowgid(int gid
)
6920 static inline int low2highuid(int uid
)
6924 static inline int low2highgid(int gid
)
6928 static inline int tswapid(int id
)
6933 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6935 #endif /* USE_UID16 */
6937 /* We must do direct syscalls for setting UID/GID, because we want to
6938 * implement the Linux system call semantics of "change only for this thread",
6939 * not the libc/POSIX semantics of "change for all threads in process".
6940 * (See http://ewontfix.com/17/ for more details.)
6941 * We use the 32-bit version of the syscalls if present; if it is not
6942 * then either the host architecture supports 32-bit UIDs natively with
6943 * the standard syscall, or the 16-bit UID is the best we can do.
6945 #ifdef __NR_setuid32
6946 #define __NR_sys_setuid __NR_setuid32
6948 #define __NR_sys_setuid __NR_setuid
6950 #ifdef __NR_setgid32
6951 #define __NR_sys_setgid __NR_setgid32
6953 #define __NR_sys_setgid __NR_setgid
6955 #ifdef __NR_setresuid32
6956 #define __NR_sys_setresuid __NR_setresuid32
6958 #define __NR_sys_setresuid __NR_setresuid
6960 #ifdef __NR_setresgid32
6961 #define __NR_sys_setresgid __NR_setresgid32
6963 #define __NR_sys_setresgid __NR_setresgid
6966 _syscall1(int, sys_setuid
, uid_t
, uid
)
6967 _syscall1(int, sys_setgid
, gid_t
, gid
)
6968 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6969 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6971 void syscall_init(void)
6974 const argtype
*arg_type
;
6977 thunk_init(STRUCT_MAX
);
6979 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6980 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6981 #include "syscall_types.h"
6983 #undef STRUCT_SPECIAL
6985 /* we patch the ioctl size if necessary. We rely on the fact that
6986 no ioctl has all the bits at '1' in the size field */
6988 while (ie
->target_cmd
!= 0) {
6989 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6990 TARGET_IOC_SIZEMASK
) {
6991 arg_type
= ie
->arg_type
;
6992 if (arg_type
[0] != TYPE_PTR
) {
6993 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6998 size
= thunk_type_size(arg_type
, 0);
6999 ie
->target_cmd
= (ie
->target_cmd
&
7000 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7001 (size
<< TARGET_IOC_SIZESHIFT
);
7004 /* automatic consistency check if same arch */
7005 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7006 (defined(__x86_64__) && defined(TARGET_X86_64))
7007 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7008 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7009 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7016 #ifdef TARGET_NR_truncate64
7017 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
7022 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7026 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7030 #ifdef TARGET_NR_ftruncate64
7031 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
7036 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7040 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7044 #if defined(TARGET_NR_timer_settime) || \
7045 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7046 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7047 abi_ulong target_addr
)
7049 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7050 offsetof(struct target_itimerspec
,
7052 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7053 offsetof(struct target_itimerspec
,
7055 return -TARGET_EFAULT
;
7062 #if defined(TARGET_NR_timer_settime64) || \
7063 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7064 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7065 abi_ulong target_addr
)
7067 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7068 offsetof(struct target__kernel_itimerspec
,
7070 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7071 offsetof(struct target__kernel_itimerspec
,
7073 return -TARGET_EFAULT
;
7080 #if ((defined(TARGET_NR_timerfd_gettime) || \
7081 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7082 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7083 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7084 struct itimerspec
*host_its
)
7086 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7088 &host_its
->it_interval
) ||
7089 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7091 &host_its
->it_value
)) {
7092 return -TARGET_EFAULT
;
7098 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7099 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7100 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7101 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7102 struct itimerspec
*host_its
)
7104 if (host_to_target_timespec64(target_addr
+
7105 offsetof(struct target__kernel_itimerspec
,
7107 &host_its
->it_interval
) ||
7108 host_to_target_timespec64(target_addr
+
7109 offsetof(struct target__kernel_itimerspec
,
7111 &host_its
->it_value
)) {
7112 return -TARGET_EFAULT
;
7118 #if defined(TARGET_NR_adjtimex) || \
7119 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7120 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7121 abi_long target_addr
)
7123 struct target_timex
*target_tx
;
7125 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7126 return -TARGET_EFAULT
;
7129 __get_user(host_tx
->modes
, &target_tx
->modes
);
7130 __get_user(host_tx
->offset
, &target_tx
->offset
);
7131 __get_user(host_tx
->freq
, &target_tx
->freq
);
7132 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7133 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7134 __get_user(host_tx
->status
, &target_tx
->status
);
7135 __get_user(host_tx
->constant
, &target_tx
->constant
);
7136 __get_user(host_tx
->precision
, &target_tx
->precision
);
7137 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7138 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7139 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7140 __get_user(host_tx
->tick
, &target_tx
->tick
);
7141 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7142 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7143 __get_user(host_tx
->shift
, &target_tx
->shift
);
7144 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7145 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7146 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7147 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7148 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7149 __get_user(host_tx
->tai
, &target_tx
->tai
);
7151 unlock_user_struct(target_tx
, target_addr
, 0);
7155 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7156 struct timex
*host_tx
)
7158 struct target_timex
*target_tx
;
7160 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7161 return -TARGET_EFAULT
;
7164 __put_user(host_tx
->modes
, &target_tx
->modes
);
7165 __put_user(host_tx
->offset
, &target_tx
->offset
);
7166 __put_user(host_tx
->freq
, &target_tx
->freq
);
7167 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7168 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7169 __put_user(host_tx
->status
, &target_tx
->status
);
7170 __put_user(host_tx
->constant
, &target_tx
->constant
);
7171 __put_user(host_tx
->precision
, &target_tx
->precision
);
7172 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7173 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7174 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7175 __put_user(host_tx
->tick
, &target_tx
->tick
);
7176 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7177 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7178 __put_user(host_tx
->shift
, &target_tx
->shift
);
7179 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7180 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7181 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7182 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7183 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7184 __put_user(host_tx
->tai
, &target_tx
->tai
);
7186 unlock_user_struct(target_tx
, target_addr
, 1);
7192 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7193 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7194 abi_long target_addr
)
7196 struct target__kernel_timex
*target_tx
;
7198 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7199 offsetof(struct target__kernel_timex
,
7201 return -TARGET_EFAULT
;
7204 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7205 return -TARGET_EFAULT
;
7208 __get_user(host_tx
->modes
, &target_tx
->modes
);
7209 __get_user(host_tx
->offset
, &target_tx
->offset
);
7210 __get_user(host_tx
->freq
, &target_tx
->freq
);
7211 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7212 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7213 __get_user(host_tx
->status
, &target_tx
->status
);
7214 __get_user(host_tx
->constant
, &target_tx
->constant
);
7215 __get_user(host_tx
->precision
, &target_tx
->precision
);
7216 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7217 __get_user(host_tx
->tick
, &target_tx
->tick
);
7218 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7219 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7220 __get_user(host_tx
->shift
, &target_tx
->shift
);
7221 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7222 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7223 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7224 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7225 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7226 __get_user(host_tx
->tai
, &target_tx
->tai
);
7228 unlock_user_struct(target_tx
, target_addr
, 0);
7232 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7233 struct timex
*host_tx
)
7235 struct target__kernel_timex
*target_tx
;
7237 if (copy_to_user_timeval64(target_addr
+
7238 offsetof(struct target__kernel_timex
, time
),
7240 return -TARGET_EFAULT
;
7243 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7244 return -TARGET_EFAULT
;
7247 __put_user(host_tx
->modes
, &target_tx
->modes
);
7248 __put_user(host_tx
->offset
, &target_tx
->offset
);
7249 __put_user(host_tx
->freq
, &target_tx
->freq
);
7250 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7251 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7252 __put_user(host_tx
->status
, &target_tx
->status
);
7253 __put_user(host_tx
->constant
, &target_tx
->constant
);
7254 __put_user(host_tx
->precision
, &target_tx
->precision
);
7255 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7256 __put_user(host_tx
->tick
, &target_tx
->tick
);
7257 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7258 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7259 __put_user(host_tx
->shift
, &target_tx
->shift
);
7260 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7261 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7262 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7263 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7264 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7265 __put_user(host_tx
->tai
, &target_tx
->tai
);
7267 unlock_user_struct(target_tx
, target_addr
, 1);
7272 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7273 #define sigev_notify_thread_id _sigev_un._tid
7276 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7277 abi_ulong target_addr
)
7279 struct target_sigevent
*target_sevp
;
7281 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7282 return -TARGET_EFAULT
;
7285 /* This union is awkward on 64 bit systems because it has a 32 bit
7286 * integer and a pointer in it; we follow the conversion approach
7287 * used for handling sigval types in signal.c so the guest should get
7288 * the correct value back even if we did a 64 bit byteswap and it's
7289 * using the 32 bit integer.
7291 host_sevp
->sigev_value
.sival_ptr
=
7292 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7293 host_sevp
->sigev_signo
=
7294 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7295 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7296 host_sevp
->sigev_notify_thread_id
= tswap32(target_sevp
->_sigev_un
._tid
);
7298 unlock_user_struct(target_sevp
, target_addr
, 1);
7302 #if defined(TARGET_NR_mlockall)
7303 static inline int target_to_host_mlockall_arg(int arg
)
7307 if (arg
& TARGET_MCL_CURRENT
) {
7308 result
|= MCL_CURRENT
;
7310 if (arg
& TARGET_MCL_FUTURE
) {
7311 result
|= MCL_FUTURE
;
7314 if (arg
& TARGET_MCL_ONFAULT
) {
7315 result
|= MCL_ONFAULT
;
7323 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7324 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7325 defined(TARGET_NR_newfstatat))
7326 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7327 abi_ulong target_addr
,
7328 struct stat
*host_st
)
7330 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7331 if (((CPUARMState
*)cpu_env
)->eabi
) {
7332 struct target_eabi_stat64
*target_st
;
7334 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7335 return -TARGET_EFAULT
;
7336 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7337 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7338 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7339 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7340 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7342 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7343 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7344 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7345 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7346 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7347 __put_user(host_st
->st_size
, &target_st
->st_size
);
7348 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7349 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7350 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7351 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7352 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7353 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7354 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7355 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7356 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7358 unlock_user_struct(target_st
, target_addr
, 1);
7362 #if defined(TARGET_HAS_STRUCT_STAT64)
7363 struct target_stat64
*target_st
;
7365 struct target_stat
*target_st
;
7368 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7369 return -TARGET_EFAULT
;
7370 memset(target_st
, 0, sizeof(*target_st
));
7371 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7372 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7373 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7374 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7376 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7377 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7378 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7379 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7380 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7381 /* XXX: better use of kernel struct */
7382 __put_user(host_st
->st_size
, &target_st
->st_size
);
7383 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7384 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7385 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7386 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7387 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7388 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7389 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7390 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7391 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7393 unlock_user_struct(target_st
, target_addr
, 1);
7400 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7401 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7402 abi_ulong target_addr
)
7404 struct target_statx
*target_stx
;
7406 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7407 return -TARGET_EFAULT
;
7409 memset(target_stx
, 0, sizeof(*target_stx
));
7411 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7412 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7413 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7414 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7415 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7416 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7417 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7418 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7419 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7420 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7421 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7422 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7423 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7424 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7425 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7426 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7427 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7428 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7429 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7430 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7431 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7432 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7433 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7435 unlock_user_struct(target_stx
, target_addr
, 1);
7441 static int do_sys_futex(int *uaddr
, int op
, int val
,
7442 const struct timespec
*timeout
, int *uaddr2
,
7445 #if HOST_LONG_BITS == 64
7446 #if defined(__NR_futex)
7447 /* always a 64-bit time_t, it doesn't define _time64 version */
7448 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7451 #else /* HOST_LONG_BITS == 64 */
7452 #if defined(__NR_futex_time64)
7453 if (sizeof(timeout
->tv_sec
) == 8) {
7454 /* _time64 function on 32bit arch */
7455 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7458 #if defined(__NR_futex)
7459 /* old function on 32bit arch */
7460 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7462 #endif /* HOST_LONG_BITS == 64 */
7463 g_assert_not_reached();
7466 static int do_safe_futex(int *uaddr
, int op
, int val
,
7467 const struct timespec
*timeout
, int *uaddr2
,
7470 #if HOST_LONG_BITS == 64
7471 #if defined(__NR_futex)
7472 /* always a 64-bit time_t, it doesn't define _time64 version */
7473 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7475 #else /* HOST_LONG_BITS == 64 */
7476 #if defined(__NR_futex_time64)
7477 if (sizeof(timeout
->tv_sec
) == 8) {
7478 /* _time64 function on 32bit arch */
7479 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7483 #if defined(__NR_futex)
7484 /* old function on 32bit arch */
7485 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7487 #endif /* HOST_LONG_BITS == 64 */
7488 return -TARGET_ENOSYS
;
7491 /* ??? Using host futex calls even when target atomic operations
7492 are not really atomic probably breaks things. However implementing
7493 futexes locally would make futexes shared between multiple processes
7494 tricky. However they're probably useless because guest atomic
7495 operations won't work either. */
7496 #if defined(TARGET_NR_futex)
7497 static int do_futex(CPUState
*cpu
, target_ulong uaddr
, int op
, int val
,
7498 target_ulong timeout
, target_ulong uaddr2
, int val3
)
7500 struct timespec ts
, *pts
;
7503 /* ??? We assume FUTEX_* constants are the same on both host
7505 #ifdef FUTEX_CMD_MASK
7506 base_op
= op
& FUTEX_CMD_MASK
;
7512 case FUTEX_WAIT_BITSET
:
7515 target_to_host_timespec(pts
, timeout
);
7519 return do_safe_futex(g2h(cpu
, uaddr
),
7520 op
, tswap32(val
), pts
, NULL
, val3
);
7522 return do_safe_futex(g2h(cpu
, uaddr
),
7523 op
, val
, NULL
, NULL
, 0);
7525 return do_safe_futex(g2h(cpu
, uaddr
),
7526 op
, val
, NULL
, NULL
, 0);
7528 case FUTEX_CMP_REQUEUE
:
7530 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7531 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7532 But the prototype takes a `struct timespec *'; insert casts
7533 to satisfy the compiler. We do not need to tswap TIMEOUT
7534 since it's not compared to guest memory. */
7535 pts
= (struct timespec
*)(uintptr_t) timeout
;
7536 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7537 (base_op
== FUTEX_CMP_REQUEUE
7538 ? tswap32(val3
) : val3
));
7540 return -TARGET_ENOSYS
;
7545 #if defined(TARGET_NR_futex_time64)
7546 static int do_futex_time64(CPUState
*cpu
, target_ulong uaddr
, int op
,
7547 int val
, target_ulong timeout
,
7548 target_ulong uaddr2
, int val3
)
7550 struct timespec ts
, *pts
;
7553 /* ??? We assume FUTEX_* constants are the same on both host
7555 #ifdef FUTEX_CMD_MASK
7556 base_op
= op
& FUTEX_CMD_MASK
;
7562 case FUTEX_WAIT_BITSET
:
7565 if (target_to_host_timespec64(pts
, timeout
)) {
7566 return -TARGET_EFAULT
;
7571 return do_safe_futex(g2h(cpu
, uaddr
), op
,
7572 tswap32(val
), pts
, NULL
, val3
);
7574 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7576 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7578 case FUTEX_CMP_REQUEUE
:
7580 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7581 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7582 But the prototype takes a `struct timespec *'; insert casts
7583 to satisfy the compiler. We do not need to tswap TIMEOUT
7584 since it's not compared to guest memory. */
7585 pts
= (struct timespec
*)(uintptr_t) timeout
;
7586 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7587 (base_op
== FUTEX_CMP_REQUEUE
7588 ? tswap32(val3
) : val3
));
7590 return -TARGET_ENOSYS
;
7595 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7596 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7597 abi_long handle
, abi_long mount_id
,
7600 struct file_handle
*target_fh
;
7601 struct file_handle
*fh
;
7605 unsigned int size
, total_size
;
7607 if (get_user_s32(size
, handle
)) {
7608 return -TARGET_EFAULT
;
7611 name
= lock_user_string(pathname
);
7613 return -TARGET_EFAULT
;
7616 total_size
= sizeof(struct file_handle
) + size
;
7617 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7619 unlock_user(name
, pathname
, 0);
7620 return -TARGET_EFAULT
;
7623 fh
= g_malloc0(total_size
);
7624 fh
->handle_bytes
= size
;
7626 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7627 unlock_user(name
, pathname
, 0);
7629 /* man name_to_handle_at(2):
7630 * Other than the use of the handle_bytes field, the caller should treat
7631 * the file_handle structure as an opaque data type
7634 memcpy(target_fh
, fh
, total_size
);
7635 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7636 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7638 unlock_user(target_fh
, handle
, total_size
);
7640 if (put_user_s32(mid
, mount_id
)) {
7641 return -TARGET_EFAULT
;
7649 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7650 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7653 struct file_handle
*target_fh
;
7654 struct file_handle
*fh
;
7655 unsigned int size
, total_size
;
7658 if (get_user_s32(size
, handle
)) {
7659 return -TARGET_EFAULT
;
7662 total_size
= sizeof(struct file_handle
) + size
;
7663 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7665 return -TARGET_EFAULT
;
7668 fh
= g_memdup(target_fh
, total_size
);
7669 fh
->handle_bytes
= size
;
7670 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7672 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7673 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7677 unlock_user(target_fh
, handle
, total_size
);
7683 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7685 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7688 target_sigset_t
*target_mask
;
7692 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
7693 return -TARGET_EINVAL
;
7695 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7696 return -TARGET_EFAULT
;
7699 target_to_host_sigset(&host_mask
, target_mask
);
7701 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7703 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7705 fd_trans_register(ret
, &target_signalfd_trans
);
7708 unlock_user_struct(target_mask
, mask
, 0);
7714 /* Map host to target signal numbers for the wait family of syscalls.
7715 Assume all other status bits are the same. */
7716 int host_to_target_waitstatus(int status
)
7718 if (WIFSIGNALED(status
)) {
7719 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7721 if (WIFSTOPPED(status
)) {
7722 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7728 static int open_self_cmdline(void *cpu_env
, int fd
)
7730 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7731 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7734 for (i
= 0; i
< bprm
->argc
; i
++) {
7735 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7737 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7745 static int open_self_maps(void *cpu_env
, int fd
)
7747 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7748 TaskState
*ts
= cpu
->opaque
;
7749 GSList
*map_info
= read_self_maps();
7753 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7754 MapInfo
*e
= (MapInfo
*) s
->data
;
7756 if (h2g_valid(e
->start
)) {
7757 unsigned long min
= e
->start
;
7758 unsigned long max
= e
->end
;
7759 int flags
= page_get_flags(h2g(min
));
7762 max
= h2g_valid(max
- 1) ?
7763 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
7765 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7769 if (h2g(min
) == ts
->info
->stack_limit
) {
7775 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7776 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
7777 h2g(min
), h2g(max
- 1) + 1,
7778 (flags
& PAGE_READ
) ? 'r' : '-',
7779 (flags
& PAGE_WRITE_ORG
) ? 'w' : '-',
7780 (flags
& PAGE_EXEC
) ? 'x' : '-',
7781 e
->is_priv
? 'p' : '-',
7782 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
7784 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
7791 free_self_maps(map_info
);
7793 #ifdef TARGET_VSYSCALL_PAGE
7795 * We only support execution from the vsyscall page.
7796 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7798 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7799 " --xp 00000000 00:00 0",
7800 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7801 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
7807 static int open_self_stat(void *cpu_env
, int fd
)
7809 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7810 TaskState
*ts
= cpu
->opaque
;
7811 g_autoptr(GString
) buf
= g_string_new(NULL
);
7814 for (i
= 0; i
< 44; i
++) {
7817 g_string_printf(buf
, FMT_pid
" ", getpid());
7818 } else if (i
== 1) {
7820 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
7821 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
7822 g_string_printf(buf
, "(%.15s) ", bin
);
7823 } else if (i
== 3) {
7825 g_string_printf(buf
, FMT_pid
" ", getppid());
7826 } else if (i
== 27) {
7828 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
7830 /* for the rest, there is MasterCard */
7831 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
7834 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
7842 static int open_self_auxv(void *cpu_env
, int fd
)
7844 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7845 TaskState
*ts
= cpu
->opaque
;
7846 abi_ulong auxv
= ts
->info
->saved_auxv
;
7847 abi_ulong len
= ts
->info
->auxv_len
;
7851 * Auxiliary vector is stored in target process stack.
7852 * read in whole auxv vector and copy it to file
7854 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7858 r
= write(fd
, ptr
, len
);
7865 lseek(fd
, 0, SEEK_SET
);
7866 unlock_user(ptr
, auxv
, len
);
7872 static int is_proc_myself(const char *filename
, const char *entry
)
7874 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7875 filename
+= strlen("/proc/");
7876 if (!strncmp(filename
, "self/", strlen("self/"))) {
7877 filename
+= strlen("self/");
7878 } else if (*filename
>= '1' && *filename
<= '9') {
7880 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7881 if (!strncmp(filename
, myself
, strlen(myself
))) {
7882 filename
+= strlen(myself
);
7889 if (!strcmp(filename
, entry
)) {
7896 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7897 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7898 static int is_proc(const char *filename
, const char *entry
)
7900 return strcmp(filename
, entry
) == 0;
7904 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7905 static int open_net_route(void *cpu_env
, int fd
)
7912 fp
= fopen("/proc/net/route", "r");
7919 read
= getline(&line
, &len
, fp
);
7920 dprintf(fd
, "%s", line
);
7924 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7926 uint32_t dest
, gw
, mask
;
7927 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7930 fields
= sscanf(line
,
7931 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7932 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7933 &mask
, &mtu
, &window
, &irtt
);
7937 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7938 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7939 metric
, tswap32(mask
), mtu
, window
, irtt
);
7949 #if defined(TARGET_SPARC)
7950 static int open_cpuinfo(void *cpu_env
, int fd
)
7952 dprintf(fd
, "type\t\t: sun4u\n");
7957 #if defined(TARGET_HPPA)
7958 static int open_cpuinfo(void *cpu_env
, int fd
)
7960 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
7961 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
7962 dprintf(fd
, "capabilities\t: os32\n");
7963 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
7964 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7969 #if defined(TARGET_M68K)
7970 static int open_hardware(void *cpu_env
, int fd
)
7972 dprintf(fd
, "Model:\t\tqemu-m68k\n");
7977 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7980 const char *filename
;
7981 int (*fill
)(void *cpu_env
, int fd
);
7982 int (*cmp
)(const char *s1
, const char *s2
);
7984 const struct fake_open
*fake_open
;
7985 static const struct fake_open fakes
[] = {
7986 { "maps", open_self_maps
, is_proc_myself
},
7987 { "stat", open_self_stat
, is_proc_myself
},
7988 { "auxv", open_self_auxv
, is_proc_myself
},
7989 { "cmdline", open_self_cmdline
, is_proc_myself
},
7990 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7991 { "/proc/net/route", open_net_route
, is_proc
},
7993 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7994 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
7996 #if defined(TARGET_M68K)
7997 { "/proc/hardware", open_hardware
, is_proc
},
7999 { NULL
, NULL
, NULL
}
8002 if (is_proc_myself(pathname
, "exe")) {
8003 int execfd
= qemu_getauxval(AT_EXECFD
);
8004 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
8007 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8008 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8013 if (fake_open
->filename
) {
8015 char filename
[PATH_MAX
];
8018 /* create temporary file to map stat to */
8019 tmpdir
= getenv("TMPDIR");
8022 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8023 fd
= mkstemp(filename
);
8029 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8035 lseek(fd
, 0, SEEK_SET
);
8040 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8043 #define TIMER_MAGIC 0x0caf0000
8044 #define TIMER_MAGIC_MASK 0xffff0000
8046 /* Convert QEMU provided timer ID back to internal 16bit index format */
8047 static target_timer_t
get_timer_id(abi_long arg
)
8049 target_timer_t timerid
= arg
;
8051 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8052 return -TARGET_EINVAL
;
8057 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8058 return -TARGET_EINVAL
;
8064 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8066 abi_ulong target_addr
,
8069 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8070 unsigned host_bits
= sizeof(*host_mask
) * 8;
8071 abi_ulong
*target_mask
;
8074 assert(host_size
>= target_size
);
8076 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8078 return -TARGET_EFAULT
;
8080 memset(host_mask
, 0, host_size
);
8082 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8083 unsigned bit
= i
* target_bits
;
8086 __get_user(val
, &target_mask
[i
]);
8087 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8088 if (val
& (1UL << j
)) {
8089 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8094 unlock_user(target_mask
, target_addr
, 0);
8098 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8100 abi_ulong target_addr
,
8103 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8104 unsigned host_bits
= sizeof(*host_mask
) * 8;
8105 abi_ulong
*target_mask
;
8108 assert(host_size
>= target_size
);
8110 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8112 return -TARGET_EFAULT
;
8115 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8116 unsigned bit
= i
* target_bits
;
8119 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8120 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8124 __put_user(val
, &target_mask
[i
]);
8127 unlock_user(target_mask
, target_addr
, target_size
);
8131 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8132 _syscall2(int, pivot_root
, const char *, new_root
, const char *, put_old
)
8135 /* This is an internal helper for do_syscall so that it is easier
8136 * to have a single return point, so that actions, such as logging
8137 * of syscall results, can be performed.
8138 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8140 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
8141 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8142 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8145 CPUState
*cpu
= env_cpu(cpu_env
);
8147 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8148 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8149 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8150 || defined(TARGET_NR_statx)
8153 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8154 || defined(TARGET_NR_fstatfs)
8160 case TARGET_NR_exit
:
8161 /* In old applications this may be used to implement _exit(2).
8162 However in threaded applications it is used for thread termination,
8163 and _exit_group is used for application termination.
8164 Do thread termination if we have more then one thread. */
8166 if (block_signals()) {
8167 return -TARGET_ERESTARTSYS
;
8170 pthread_mutex_lock(&clone_lock
);
8172 if (CPU_NEXT(first_cpu
)) {
8173 TaskState
*ts
= cpu
->opaque
;
8175 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
8176 object_unref(OBJECT(cpu
));
8178 * At this point the CPU should be unrealized and removed
8179 * from cpu lists. We can clean-up the rest of the thread
8180 * data without the lock held.
8183 pthread_mutex_unlock(&clone_lock
);
8185 if (ts
->child_tidptr
) {
8186 put_user_u32(0, ts
->child_tidptr
);
8187 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
8188 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
8192 rcu_unregister_thread();
8196 pthread_mutex_unlock(&clone_lock
);
8197 preexit_cleanup(cpu_env
, arg1
);
8199 return 0; /* avoid warning */
8200 case TARGET_NR_read
:
8201 if (arg2
== 0 && arg3
== 0) {
8202 return get_errno(safe_read(arg1
, 0, 0));
8204 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8205 return -TARGET_EFAULT
;
8206 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8208 fd_trans_host_to_target_data(arg1
)) {
8209 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8211 unlock_user(p
, arg2
, ret
);
8214 case TARGET_NR_write
:
8215 if (arg2
== 0 && arg3
== 0) {
8216 return get_errno(safe_write(arg1
, 0, 0));
8218 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8219 return -TARGET_EFAULT
;
8220 if (fd_trans_target_to_host_data(arg1
)) {
8221 void *copy
= g_malloc(arg3
);
8222 memcpy(copy
, p
, arg3
);
8223 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8225 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8229 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8231 unlock_user(p
, arg2
, 0);
8234 #ifdef TARGET_NR_open
8235 case TARGET_NR_open
:
8236 if (!(p
= lock_user_string(arg1
)))
8237 return -TARGET_EFAULT
;
8238 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8239 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8241 fd_trans_unregister(ret
);
8242 unlock_user(p
, arg1
, 0);
8245 case TARGET_NR_openat
:
8246 if (!(p
= lock_user_string(arg2
)))
8247 return -TARGET_EFAULT
;
8248 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8249 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8251 fd_trans_unregister(ret
);
8252 unlock_user(p
, arg2
, 0);
8254 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8255 case TARGET_NR_name_to_handle_at
:
8256 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8259 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8260 case TARGET_NR_open_by_handle_at
:
8261 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8262 fd_trans_unregister(ret
);
8265 case TARGET_NR_close
:
8266 fd_trans_unregister(arg1
);
8267 return get_errno(close(arg1
));
8270 return do_brk(arg1
);
8271 #ifdef TARGET_NR_fork
8272 case TARGET_NR_fork
:
8273 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8275 #ifdef TARGET_NR_waitpid
8276 case TARGET_NR_waitpid
:
8279 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8280 if (!is_error(ret
) && arg2
&& ret
8281 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8282 return -TARGET_EFAULT
;
8286 #ifdef TARGET_NR_waitid
8287 case TARGET_NR_waitid
:
8291 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8292 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8293 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8294 return -TARGET_EFAULT
;
8295 host_to_target_siginfo(p
, &info
);
8296 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8301 #ifdef TARGET_NR_creat /* not on alpha */
8302 case TARGET_NR_creat
:
8303 if (!(p
= lock_user_string(arg1
)))
8304 return -TARGET_EFAULT
;
8305 ret
= get_errno(creat(p
, arg2
));
8306 fd_trans_unregister(ret
);
8307 unlock_user(p
, arg1
, 0);
8310 #ifdef TARGET_NR_link
8311 case TARGET_NR_link
:
8314 p
= lock_user_string(arg1
);
8315 p2
= lock_user_string(arg2
);
8317 ret
= -TARGET_EFAULT
;
8319 ret
= get_errno(link(p
, p2
));
8320 unlock_user(p2
, arg2
, 0);
8321 unlock_user(p
, arg1
, 0);
8325 #if defined(TARGET_NR_linkat)
8326 case TARGET_NR_linkat
:
8330 return -TARGET_EFAULT
;
8331 p
= lock_user_string(arg2
);
8332 p2
= lock_user_string(arg4
);
8334 ret
= -TARGET_EFAULT
;
8336 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8337 unlock_user(p
, arg2
, 0);
8338 unlock_user(p2
, arg4
, 0);
8342 #ifdef TARGET_NR_unlink
8343 case TARGET_NR_unlink
:
8344 if (!(p
= lock_user_string(arg1
)))
8345 return -TARGET_EFAULT
;
8346 ret
= get_errno(unlink(p
));
8347 unlock_user(p
, arg1
, 0);
8350 #if defined(TARGET_NR_unlinkat)
8351 case TARGET_NR_unlinkat
:
8352 if (!(p
= lock_user_string(arg2
)))
8353 return -TARGET_EFAULT
;
8354 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8355 unlock_user(p
, arg2
, 0);
8358 case TARGET_NR_execve
:
8360 char **argp
, **envp
;
8363 abi_ulong guest_argp
;
8364 abi_ulong guest_envp
;
8371 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8372 if (get_user_ual(addr
, gp
))
8373 return -TARGET_EFAULT
;
8380 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8381 if (get_user_ual(addr
, gp
))
8382 return -TARGET_EFAULT
;
8388 argp
= g_new0(char *, argc
+ 1);
8389 envp
= g_new0(char *, envc
+ 1);
8391 for (gp
= guest_argp
, q
= argp
; gp
;
8392 gp
+= sizeof(abi_ulong
), q
++) {
8393 if (get_user_ual(addr
, gp
))
8397 if (!(*q
= lock_user_string(addr
)))
8399 total_size
+= strlen(*q
) + 1;
8403 for (gp
= guest_envp
, q
= envp
; gp
;
8404 gp
+= sizeof(abi_ulong
), q
++) {
8405 if (get_user_ual(addr
, gp
))
8409 if (!(*q
= lock_user_string(addr
)))
8411 total_size
+= strlen(*q
) + 1;
8415 if (!(p
= lock_user_string(arg1
)))
8417 /* Although execve() is not an interruptible syscall it is
8418 * a special case where we must use the safe_syscall wrapper:
8419 * if we allow a signal to happen before we make the host
8420 * syscall then we will 'lose' it, because at the point of
8421 * execve the process leaves QEMU's control. So we use the
8422 * safe syscall wrapper to ensure that we either take the
8423 * signal as a guest signal, or else it does not happen
8424 * before the execve completes and makes it the other
8425 * program's problem.
8427 ret
= get_errno(safe_execve(p
, argp
, envp
));
8428 unlock_user(p
, arg1
, 0);
8433 ret
= -TARGET_EFAULT
;
8436 for (gp
= guest_argp
, q
= argp
; *q
;
8437 gp
+= sizeof(abi_ulong
), q
++) {
8438 if (get_user_ual(addr
, gp
)
8441 unlock_user(*q
, addr
, 0);
8443 for (gp
= guest_envp
, q
= envp
; *q
;
8444 gp
+= sizeof(abi_ulong
), q
++) {
8445 if (get_user_ual(addr
, gp
)
8448 unlock_user(*q
, addr
, 0);
8455 case TARGET_NR_chdir
:
8456 if (!(p
= lock_user_string(arg1
)))
8457 return -TARGET_EFAULT
;
8458 ret
= get_errno(chdir(p
));
8459 unlock_user(p
, arg1
, 0);
8461 #ifdef TARGET_NR_time
8462 case TARGET_NR_time
:
8465 ret
= get_errno(time(&host_time
));
8468 && put_user_sal(host_time
, arg1
))
8469 return -TARGET_EFAULT
;
8473 #ifdef TARGET_NR_mknod
8474 case TARGET_NR_mknod
:
8475 if (!(p
= lock_user_string(arg1
)))
8476 return -TARGET_EFAULT
;
8477 ret
= get_errno(mknod(p
, arg2
, arg3
));
8478 unlock_user(p
, arg1
, 0);
8481 #if defined(TARGET_NR_mknodat)
8482 case TARGET_NR_mknodat
:
8483 if (!(p
= lock_user_string(arg2
)))
8484 return -TARGET_EFAULT
;
8485 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8486 unlock_user(p
, arg2
, 0);
8489 #ifdef TARGET_NR_chmod
8490 case TARGET_NR_chmod
:
8491 if (!(p
= lock_user_string(arg1
)))
8492 return -TARGET_EFAULT
;
8493 ret
= get_errno(chmod(p
, arg2
));
8494 unlock_user(p
, arg1
, 0);
8497 #ifdef TARGET_NR_lseek
8498 case TARGET_NR_lseek
:
8499 return get_errno(lseek(arg1
, arg2
, arg3
));
8501 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8502 /* Alpha specific */
8503 case TARGET_NR_getxpid
:
8504 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8505 return get_errno(getpid());
8507 #ifdef TARGET_NR_getpid
8508 case TARGET_NR_getpid
:
8509 return get_errno(getpid());
8511 case TARGET_NR_mount
:
8513 /* need to look at the data field */
8517 p
= lock_user_string(arg1
);
8519 return -TARGET_EFAULT
;
8525 p2
= lock_user_string(arg2
);
8528 unlock_user(p
, arg1
, 0);
8530 return -TARGET_EFAULT
;
8534 p3
= lock_user_string(arg3
);
8537 unlock_user(p
, arg1
, 0);
8539 unlock_user(p2
, arg2
, 0);
8540 return -TARGET_EFAULT
;
8546 /* FIXME - arg5 should be locked, but it isn't clear how to
8547 * do that since it's not guaranteed to be a NULL-terminated
8551 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8553 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
8555 ret
= get_errno(ret
);
8558 unlock_user(p
, arg1
, 0);
8560 unlock_user(p2
, arg2
, 0);
8562 unlock_user(p3
, arg3
, 0);
8566 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8567 #if defined(TARGET_NR_umount)
8568 case TARGET_NR_umount
:
8570 #if defined(TARGET_NR_oldumount)
8571 case TARGET_NR_oldumount
:
8573 if (!(p
= lock_user_string(arg1
)))
8574 return -TARGET_EFAULT
;
8575 ret
= get_errno(umount(p
));
8576 unlock_user(p
, arg1
, 0);
8579 #ifdef TARGET_NR_stime /* not on alpha */
8580 case TARGET_NR_stime
:
8584 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8585 return -TARGET_EFAULT
;
8587 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8590 #ifdef TARGET_NR_alarm /* not on alpha */
8591 case TARGET_NR_alarm
:
8594 #ifdef TARGET_NR_pause /* not on alpha */
8595 case TARGET_NR_pause
:
8596 if (!block_signals()) {
8597 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8599 return -TARGET_EINTR
;
8601 #ifdef TARGET_NR_utime
8602 case TARGET_NR_utime
:
8604 struct utimbuf tbuf
, *host_tbuf
;
8605 struct target_utimbuf
*target_tbuf
;
8607 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8608 return -TARGET_EFAULT
;
8609 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8610 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8611 unlock_user_struct(target_tbuf
, arg2
, 0);
8616 if (!(p
= lock_user_string(arg1
)))
8617 return -TARGET_EFAULT
;
8618 ret
= get_errno(utime(p
, host_tbuf
));
8619 unlock_user(p
, arg1
, 0);
8623 #ifdef TARGET_NR_utimes
8624 case TARGET_NR_utimes
:
8626 struct timeval
*tvp
, tv
[2];
8628 if (copy_from_user_timeval(&tv
[0], arg2
)
8629 || copy_from_user_timeval(&tv
[1],
8630 arg2
+ sizeof(struct target_timeval
)))
8631 return -TARGET_EFAULT
;
8636 if (!(p
= lock_user_string(arg1
)))
8637 return -TARGET_EFAULT
;
8638 ret
= get_errno(utimes(p
, tvp
));
8639 unlock_user(p
, arg1
, 0);
8643 #if defined(TARGET_NR_futimesat)
8644 case TARGET_NR_futimesat
:
8646 struct timeval
*tvp
, tv
[2];
8648 if (copy_from_user_timeval(&tv
[0], arg3
)
8649 || copy_from_user_timeval(&tv
[1],
8650 arg3
+ sizeof(struct target_timeval
)))
8651 return -TARGET_EFAULT
;
8656 if (!(p
= lock_user_string(arg2
))) {
8657 return -TARGET_EFAULT
;
8659 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8660 unlock_user(p
, arg2
, 0);
8664 #ifdef TARGET_NR_access
8665 case TARGET_NR_access
:
8666 if (!(p
= lock_user_string(arg1
))) {
8667 return -TARGET_EFAULT
;
8669 ret
= get_errno(access(path(p
), arg2
));
8670 unlock_user(p
, arg1
, 0);
8673 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8674 case TARGET_NR_faccessat
:
8675 if (!(p
= lock_user_string(arg2
))) {
8676 return -TARGET_EFAULT
;
8678 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8679 unlock_user(p
, arg2
, 0);
8682 #ifdef TARGET_NR_nice /* not on alpha */
8683 case TARGET_NR_nice
:
8684 return get_errno(nice(arg1
));
8686 case TARGET_NR_sync
:
8689 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8690 case TARGET_NR_syncfs
:
8691 return get_errno(syncfs(arg1
));
8693 case TARGET_NR_kill
:
8694 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8695 #ifdef TARGET_NR_rename
8696 case TARGET_NR_rename
:
8699 p
= lock_user_string(arg1
);
8700 p2
= lock_user_string(arg2
);
8702 ret
= -TARGET_EFAULT
;
8704 ret
= get_errno(rename(p
, p2
));
8705 unlock_user(p2
, arg2
, 0);
8706 unlock_user(p
, arg1
, 0);
8710 #if defined(TARGET_NR_renameat)
8711 case TARGET_NR_renameat
:
8714 p
= lock_user_string(arg2
);
8715 p2
= lock_user_string(arg4
);
8717 ret
= -TARGET_EFAULT
;
8719 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8720 unlock_user(p2
, arg4
, 0);
8721 unlock_user(p
, arg2
, 0);
8725 #if defined(TARGET_NR_renameat2)
8726 case TARGET_NR_renameat2
:
8729 p
= lock_user_string(arg2
);
8730 p2
= lock_user_string(arg4
);
8732 ret
= -TARGET_EFAULT
;
8734 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8736 unlock_user(p2
, arg4
, 0);
8737 unlock_user(p
, arg2
, 0);
8741 #ifdef TARGET_NR_mkdir
8742 case TARGET_NR_mkdir
:
8743 if (!(p
= lock_user_string(arg1
)))
8744 return -TARGET_EFAULT
;
8745 ret
= get_errno(mkdir(p
, arg2
));
8746 unlock_user(p
, arg1
, 0);
8749 #if defined(TARGET_NR_mkdirat)
8750 case TARGET_NR_mkdirat
:
8751 if (!(p
= lock_user_string(arg2
)))
8752 return -TARGET_EFAULT
;
8753 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8754 unlock_user(p
, arg2
, 0);
8757 #ifdef TARGET_NR_rmdir
8758 case TARGET_NR_rmdir
:
8759 if (!(p
= lock_user_string(arg1
)))
8760 return -TARGET_EFAULT
;
8761 ret
= get_errno(rmdir(p
));
8762 unlock_user(p
, arg1
, 0);
8766 ret
= get_errno(dup(arg1
));
8768 fd_trans_dup(arg1
, ret
);
8771 #ifdef TARGET_NR_pipe
8772 case TARGET_NR_pipe
:
8773 return do_pipe(cpu_env
, arg1
, 0, 0);
8775 #ifdef TARGET_NR_pipe2
8776 case TARGET_NR_pipe2
:
8777 return do_pipe(cpu_env
, arg1
,
8778 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8780 case TARGET_NR_times
:
8782 struct target_tms
*tmsp
;
8784 ret
= get_errno(times(&tms
));
8786 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8788 return -TARGET_EFAULT
;
8789 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8790 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8791 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8792 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8795 ret
= host_to_target_clock_t(ret
);
8798 case TARGET_NR_acct
:
8800 ret
= get_errno(acct(NULL
));
8802 if (!(p
= lock_user_string(arg1
))) {
8803 return -TARGET_EFAULT
;
8805 ret
= get_errno(acct(path(p
)));
8806 unlock_user(p
, arg1
, 0);
8809 #ifdef TARGET_NR_umount2
8810 case TARGET_NR_umount2
:
8811 if (!(p
= lock_user_string(arg1
)))
8812 return -TARGET_EFAULT
;
8813 ret
= get_errno(umount2(p
, arg2
));
8814 unlock_user(p
, arg1
, 0);
8817 case TARGET_NR_ioctl
:
8818 return do_ioctl(arg1
, arg2
, arg3
);
8819 #ifdef TARGET_NR_fcntl
8820 case TARGET_NR_fcntl
:
8821 return do_fcntl(arg1
, arg2
, arg3
);
8823 case TARGET_NR_setpgid
:
8824 return get_errno(setpgid(arg1
, arg2
));
8825 case TARGET_NR_umask
:
8826 return get_errno(umask(arg1
));
8827 case TARGET_NR_chroot
:
8828 if (!(p
= lock_user_string(arg1
)))
8829 return -TARGET_EFAULT
;
8830 ret
= get_errno(chroot(p
));
8831 unlock_user(p
, arg1
, 0);
8833 #ifdef TARGET_NR_dup2
8834 case TARGET_NR_dup2
:
8835 ret
= get_errno(dup2(arg1
, arg2
));
8837 fd_trans_dup(arg1
, arg2
);
8841 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8842 case TARGET_NR_dup3
:
8846 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8849 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8850 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8852 fd_trans_dup(arg1
, arg2
);
8857 #ifdef TARGET_NR_getppid /* not on alpha */
8858 case TARGET_NR_getppid
:
8859 return get_errno(getppid());
8861 #ifdef TARGET_NR_getpgrp
8862 case TARGET_NR_getpgrp
:
8863 return get_errno(getpgrp());
8865 case TARGET_NR_setsid
:
8866 return get_errno(setsid());
8867 #ifdef TARGET_NR_sigaction
8868 case TARGET_NR_sigaction
:
8870 #if defined(TARGET_MIPS)
8871 struct target_sigaction act
, oact
, *pact
, *old_act
;
8874 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8875 return -TARGET_EFAULT
;
8876 act
._sa_handler
= old_act
->_sa_handler
;
8877 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8878 act
.sa_flags
= old_act
->sa_flags
;
8879 unlock_user_struct(old_act
, arg2
, 0);
8885 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
8887 if (!is_error(ret
) && arg3
) {
8888 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8889 return -TARGET_EFAULT
;
8890 old_act
->_sa_handler
= oact
._sa_handler
;
8891 old_act
->sa_flags
= oact
.sa_flags
;
8892 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8893 old_act
->sa_mask
.sig
[1] = 0;
8894 old_act
->sa_mask
.sig
[2] = 0;
8895 old_act
->sa_mask
.sig
[3] = 0;
8896 unlock_user_struct(old_act
, arg3
, 1);
8899 struct target_old_sigaction
*old_act
;
8900 struct target_sigaction act
, oact
, *pact
;
8902 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8903 return -TARGET_EFAULT
;
8904 act
._sa_handler
= old_act
->_sa_handler
;
8905 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8906 act
.sa_flags
= old_act
->sa_flags
;
8907 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8908 act
.sa_restorer
= old_act
->sa_restorer
;
8910 unlock_user_struct(old_act
, arg2
, 0);
8915 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
8916 if (!is_error(ret
) && arg3
) {
8917 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8918 return -TARGET_EFAULT
;
8919 old_act
->_sa_handler
= oact
._sa_handler
;
8920 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8921 old_act
->sa_flags
= oact
.sa_flags
;
8922 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8923 old_act
->sa_restorer
= oact
.sa_restorer
;
8925 unlock_user_struct(old_act
, arg3
, 1);
8931 case TARGET_NR_rt_sigaction
:
8934 * For Alpha and SPARC this is a 5 argument syscall, with
8935 * a 'restorer' parameter which must be copied into the
8936 * sa_restorer field of the sigaction struct.
8937 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8938 * and arg5 is the sigsetsize.
8940 #if defined(TARGET_ALPHA)
8941 target_ulong sigsetsize
= arg4
;
8942 target_ulong restorer
= arg5
;
8943 #elif defined(TARGET_SPARC)
8944 target_ulong restorer
= arg4
;
8945 target_ulong sigsetsize
= arg5
;
8947 target_ulong sigsetsize
= arg4
;
8948 target_ulong restorer
= 0;
8950 struct target_sigaction
*act
= NULL
;
8951 struct target_sigaction
*oact
= NULL
;
8953 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8954 return -TARGET_EINVAL
;
8956 if (arg2
&& !lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8957 return -TARGET_EFAULT
;
8959 if (arg3
&& !lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8960 ret
= -TARGET_EFAULT
;
8962 ret
= get_errno(do_sigaction(arg1
, act
, oact
, restorer
));
8964 unlock_user_struct(oact
, arg3
, 1);
8968 unlock_user_struct(act
, arg2
, 0);
8972 #ifdef TARGET_NR_sgetmask /* not on alpha */
8973 case TARGET_NR_sgetmask
:
8976 abi_ulong target_set
;
8977 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8979 host_to_target_old_sigset(&target_set
, &cur_set
);
8985 #ifdef TARGET_NR_ssetmask /* not on alpha */
8986 case TARGET_NR_ssetmask
:
8989 abi_ulong target_set
= arg1
;
8990 target_to_host_old_sigset(&set
, &target_set
);
8991 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8993 host_to_target_old_sigset(&target_set
, &oset
);
8999 #ifdef TARGET_NR_sigprocmask
9000 case TARGET_NR_sigprocmask
:
9002 #if defined(TARGET_ALPHA)
9003 sigset_t set
, oldset
;
9008 case TARGET_SIG_BLOCK
:
9011 case TARGET_SIG_UNBLOCK
:
9014 case TARGET_SIG_SETMASK
:
9018 return -TARGET_EINVAL
;
9021 target_to_host_old_sigset(&set
, &mask
);
9023 ret
= do_sigprocmask(how
, &set
, &oldset
);
9024 if (!is_error(ret
)) {
9025 host_to_target_old_sigset(&mask
, &oldset
);
9027 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
9030 sigset_t set
, oldset
, *set_ptr
;
9035 case TARGET_SIG_BLOCK
:
9038 case TARGET_SIG_UNBLOCK
:
9041 case TARGET_SIG_SETMASK
:
9045 return -TARGET_EINVAL
;
9047 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9048 return -TARGET_EFAULT
;
9049 target_to_host_old_sigset(&set
, p
);
9050 unlock_user(p
, arg2
, 0);
9056 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9057 if (!is_error(ret
) && arg3
) {
9058 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9059 return -TARGET_EFAULT
;
9060 host_to_target_old_sigset(p
, &oldset
);
9061 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9067 case TARGET_NR_rt_sigprocmask
:
9070 sigset_t set
, oldset
, *set_ptr
;
9072 if (arg4
!= sizeof(target_sigset_t
)) {
9073 return -TARGET_EINVAL
;
9078 case TARGET_SIG_BLOCK
:
9081 case TARGET_SIG_UNBLOCK
:
9084 case TARGET_SIG_SETMASK
:
9088 return -TARGET_EINVAL
;
9090 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9091 return -TARGET_EFAULT
;
9092 target_to_host_sigset(&set
, p
);
9093 unlock_user(p
, arg2
, 0);
9099 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9100 if (!is_error(ret
) && arg3
) {
9101 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9102 return -TARGET_EFAULT
;
9103 host_to_target_sigset(p
, &oldset
);
9104 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9108 #ifdef TARGET_NR_sigpending
9109 case TARGET_NR_sigpending
:
9112 ret
= get_errno(sigpending(&set
));
9113 if (!is_error(ret
)) {
9114 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9115 return -TARGET_EFAULT
;
9116 host_to_target_old_sigset(p
, &set
);
9117 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9122 case TARGET_NR_rt_sigpending
:
9126 /* Yes, this check is >, not != like most. We follow the kernel's
9127 * logic and it does it like this because it implements
9128 * NR_sigpending through the same code path, and in that case
9129 * the old_sigset_t is smaller in size.
9131 if (arg2
> sizeof(target_sigset_t
)) {
9132 return -TARGET_EINVAL
;
9135 ret
= get_errno(sigpending(&set
));
9136 if (!is_error(ret
)) {
9137 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9138 return -TARGET_EFAULT
;
9139 host_to_target_sigset(p
, &set
);
9140 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9144 #ifdef TARGET_NR_sigsuspend
9145 case TARGET_NR_sigsuspend
:
9147 TaskState
*ts
= cpu
->opaque
;
9148 #if defined(TARGET_ALPHA)
9149 abi_ulong mask
= arg1
;
9150 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
9152 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9153 return -TARGET_EFAULT
;
9154 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
9155 unlock_user(p
, arg1
, 0);
9157 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9159 if (ret
!= -TARGET_ERESTARTSYS
) {
9160 ts
->in_sigsuspend
= 1;
9165 case TARGET_NR_rt_sigsuspend
:
9167 TaskState
*ts
= cpu
->opaque
;
9169 if (arg2
!= sizeof(target_sigset_t
)) {
9170 return -TARGET_EINVAL
;
9172 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9173 return -TARGET_EFAULT
;
9174 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9175 unlock_user(p
, arg1
, 0);
9176 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9178 if (ret
!= -TARGET_ERESTARTSYS
) {
9179 ts
->in_sigsuspend
= 1;
9183 #ifdef TARGET_NR_rt_sigtimedwait
9184 case TARGET_NR_rt_sigtimedwait
:
9187 struct timespec uts
, *puts
;
9190 if (arg4
!= sizeof(target_sigset_t
)) {
9191 return -TARGET_EINVAL
;
9194 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9195 return -TARGET_EFAULT
;
9196 target_to_host_sigset(&set
, p
);
9197 unlock_user(p
, arg1
, 0);
9200 if (target_to_host_timespec(puts
, arg3
)) {
9201 return -TARGET_EFAULT
;
9206 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9208 if (!is_error(ret
)) {
9210 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9213 return -TARGET_EFAULT
;
9215 host_to_target_siginfo(p
, &uinfo
);
9216 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9218 ret
= host_to_target_signal(ret
);
9223 #ifdef TARGET_NR_rt_sigtimedwait_time64
9224 case TARGET_NR_rt_sigtimedwait_time64
:
9227 struct timespec uts
, *puts
;
9230 if (arg4
!= sizeof(target_sigset_t
)) {
9231 return -TARGET_EINVAL
;
9234 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9236 return -TARGET_EFAULT
;
9238 target_to_host_sigset(&set
, p
);
9239 unlock_user(p
, arg1
, 0);
9242 if (target_to_host_timespec64(puts
, arg3
)) {
9243 return -TARGET_EFAULT
;
9248 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9250 if (!is_error(ret
)) {
9252 p
= lock_user(VERIFY_WRITE
, arg2
,
9253 sizeof(target_siginfo_t
), 0);
9255 return -TARGET_EFAULT
;
9257 host_to_target_siginfo(p
, &uinfo
);
9258 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9260 ret
= host_to_target_signal(ret
);
9265 case TARGET_NR_rt_sigqueueinfo
:
9269 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9271 return -TARGET_EFAULT
;
9273 target_to_host_siginfo(&uinfo
, p
);
9274 unlock_user(p
, arg3
, 0);
9275 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9278 case TARGET_NR_rt_tgsigqueueinfo
:
9282 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9284 return -TARGET_EFAULT
;
9286 target_to_host_siginfo(&uinfo
, p
);
9287 unlock_user(p
, arg4
, 0);
9288 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9291 #ifdef TARGET_NR_sigreturn
9292 case TARGET_NR_sigreturn
:
9293 if (block_signals()) {
9294 return -TARGET_ERESTARTSYS
;
9296 return do_sigreturn(cpu_env
);
9298 case TARGET_NR_rt_sigreturn
:
9299 if (block_signals()) {
9300 return -TARGET_ERESTARTSYS
;
9302 return do_rt_sigreturn(cpu_env
);
9303 case TARGET_NR_sethostname
:
9304 if (!(p
= lock_user_string(arg1
)))
9305 return -TARGET_EFAULT
;
9306 ret
= get_errno(sethostname(p
, arg2
));
9307 unlock_user(p
, arg1
, 0);
9309 #ifdef TARGET_NR_setrlimit
9310 case TARGET_NR_setrlimit
:
9312 int resource
= target_to_host_resource(arg1
);
9313 struct target_rlimit
*target_rlim
;
9315 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9316 return -TARGET_EFAULT
;
9317 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9318 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9319 unlock_user_struct(target_rlim
, arg2
, 0);
9321 * If we just passed through resource limit settings for memory then
9322 * they would also apply to QEMU's own allocations, and QEMU will
9323 * crash or hang or die if its allocations fail. Ideally we would
9324 * track the guest allocations in QEMU and apply the limits ourselves.
9325 * For now, just tell the guest the call succeeded but don't actually
9328 if (resource
!= RLIMIT_AS
&&
9329 resource
!= RLIMIT_DATA
&&
9330 resource
!= RLIMIT_STACK
) {
9331 return get_errno(setrlimit(resource
, &rlim
));
9337 #ifdef TARGET_NR_getrlimit
9338 case TARGET_NR_getrlimit
:
9340 int resource
= target_to_host_resource(arg1
);
9341 struct target_rlimit
*target_rlim
;
9344 ret
= get_errno(getrlimit(resource
, &rlim
));
9345 if (!is_error(ret
)) {
9346 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9347 return -TARGET_EFAULT
;
9348 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9349 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9350 unlock_user_struct(target_rlim
, arg2
, 1);
9355 case TARGET_NR_getrusage
:
9357 struct rusage rusage
;
9358 ret
= get_errno(getrusage(arg1
, &rusage
));
9359 if (!is_error(ret
)) {
9360 ret
= host_to_target_rusage(arg2
, &rusage
);
9364 #if defined(TARGET_NR_gettimeofday)
9365 case TARGET_NR_gettimeofday
:
9370 ret
= get_errno(gettimeofday(&tv
, &tz
));
9371 if (!is_error(ret
)) {
9372 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9373 return -TARGET_EFAULT
;
9375 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9376 return -TARGET_EFAULT
;
9382 #if defined(TARGET_NR_settimeofday)
9383 case TARGET_NR_settimeofday
:
9385 struct timeval tv
, *ptv
= NULL
;
9386 struct timezone tz
, *ptz
= NULL
;
9389 if (copy_from_user_timeval(&tv
, arg1
)) {
9390 return -TARGET_EFAULT
;
9396 if (copy_from_user_timezone(&tz
, arg2
)) {
9397 return -TARGET_EFAULT
;
9402 return get_errno(settimeofday(ptv
, ptz
));
9405 #if defined(TARGET_NR_select)
9406 case TARGET_NR_select
:
9407 #if defined(TARGET_WANT_NI_OLD_SELECT)
9408 /* some architectures used to have old_select here
9409 * but now ENOSYS it.
9411 ret
= -TARGET_ENOSYS
;
9412 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9413 ret
= do_old_select(arg1
);
9415 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9419 #ifdef TARGET_NR_pselect6
9420 case TARGET_NR_pselect6
:
9421 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9423 #ifdef TARGET_NR_pselect6_time64
9424 case TARGET_NR_pselect6_time64
:
9425 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9427 #ifdef TARGET_NR_symlink
9428 case TARGET_NR_symlink
:
9431 p
= lock_user_string(arg1
);
9432 p2
= lock_user_string(arg2
);
9434 ret
= -TARGET_EFAULT
;
9436 ret
= get_errno(symlink(p
, p2
));
9437 unlock_user(p2
, arg2
, 0);
9438 unlock_user(p
, arg1
, 0);
9442 #if defined(TARGET_NR_symlinkat)
9443 case TARGET_NR_symlinkat
:
9446 p
= lock_user_string(arg1
);
9447 p2
= lock_user_string(arg3
);
9449 ret
= -TARGET_EFAULT
;
9451 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9452 unlock_user(p2
, arg3
, 0);
9453 unlock_user(p
, arg1
, 0);
9457 #ifdef TARGET_NR_readlink
9458 case TARGET_NR_readlink
:
9461 p
= lock_user_string(arg1
);
9462 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9464 ret
= -TARGET_EFAULT
;
9466 /* Short circuit this for the magic exe check. */
9467 ret
= -TARGET_EINVAL
;
9468 } else if (is_proc_myself((const char *)p
, "exe")) {
9469 char real
[PATH_MAX
], *temp
;
9470 temp
= realpath(exec_path
, real
);
9471 /* Return value is # of bytes that we wrote to the buffer. */
9473 ret
= get_errno(-1);
9475 /* Don't worry about sign mismatch as earlier mapping
9476 * logic would have thrown a bad address error. */
9477 ret
= MIN(strlen(real
), arg3
);
9478 /* We cannot NUL terminate the string. */
9479 memcpy(p2
, real
, ret
);
9482 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9484 unlock_user(p2
, arg2
, ret
);
9485 unlock_user(p
, arg1
, 0);
9489 #if defined(TARGET_NR_readlinkat)
9490 case TARGET_NR_readlinkat
:
9493 p
= lock_user_string(arg2
);
9494 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9496 ret
= -TARGET_EFAULT
;
9497 } else if (is_proc_myself((const char *)p
, "exe")) {
9498 char real
[PATH_MAX
], *temp
;
9499 temp
= realpath(exec_path
, real
);
9500 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9501 snprintf((char *)p2
, arg4
, "%s", real
);
9503 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9505 unlock_user(p2
, arg3
, ret
);
9506 unlock_user(p
, arg2
, 0);
9510 #ifdef TARGET_NR_swapon
9511 case TARGET_NR_swapon
:
9512 if (!(p
= lock_user_string(arg1
)))
9513 return -TARGET_EFAULT
;
9514 ret
= get_errno(swapon(p
, arg2
));
9515 unlock_user(p
, arg1
, 0);
9518 case TARGET_NR_reboot
:
9519 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9520 /* arg4 must be ignored in all other cases */
9521 p
= lock_user_string(arg4
);
9523 return -TARGET_EFAULT
;
9525 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9526 unlock_user(p
, arg4
, 0);
9528 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9531 #ifdef TARGET_NR_mmap
9532 case TARGET_NR_mmap
:
9533 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9534 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9535 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9536 || defined(TARGET_S390X)
9539 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9540 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9541 return -TARGET_EFAULT
;
9548 unlock_user(v
, arg1
, 0);
9549 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9550 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9554 /* mmap pointers are always untagged */
9555 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9556 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9562 #ifdef TARGET_NR_mmap2
9563 case TARGET_NR_mmap2
:
9565 #define MMAP_SHIFT 12
9567 ret
= target_mmap(arg1
, arg2
, arg3
,
9568 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9569 arg5
, arg6
<< MMAP_SHIFT
);
9570 return get_errno(ret
);
9572 case TARGET_NR_munmap
:
9573 arg1
= cpu_untagged_addr(cpu
, arg1
);
9574 return get_errno(target_munmap(arg1
, arg2
));
9575 case TARGET_NR_mprotect
:
9576 arg1
= cpu_untagged_addr(cpu
, arg1
);
9578 TaskState
*ts
= cpu
->opaque
;
9579 /* Special hack to detect libc making the stack executable. */
9580 if ((arg3
& PROT_GROWSDOWN
)
9581 && arg1
>= ts
->info
->stack_limit
9582 && arg1
<= ts
->info
->start_stack
) {
9583 arg3
&= ~PROT_GROWSDOWN
;
9584 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9585 arg1
= ts
->info
->stack_limit
;
9588 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9589 #ifdef TARGET_NR_mremap
9590 case TARGET_NR_mremap
:
9591 arg1
= cpu_untagged_addr(cpu
, arg1
);
9592 /* mremap new_addr (arg5) is always untagged */
9593 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9595 /* ??? msync/mlock/munlock are broken for softmmu. */
9596 #ifdef TARGET_NR_msync
9597 case TARGET_NR_msync
:
9598 return get_errno(msync(g2h(cpu
, arg1
), arg2
, arg3
));
9600 #ifdef TARGET_NR_mlock
9601 case TARGET_NR_mlock
:
9602 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
9604 #ifdef TARGET_NR_munlock
9605 case TARGET_NR_munlock
:
9606 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
9608 #ifdef TARGET_NR_mlockall
9609 case TARGET_NR_mlockall
:
9610 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9612 #ifdef TARGET_NR_munlockall
9613 case TARGET_NR_munlockall
:
9614 return get_errno(munlockall());
9616 #ifdef TARGET_NR_truncate
9617 case TARGET_NR_truncate
:
9618 if (!(p
= lock_user_string(arg1
)))
9619 return -TARGET_EFAULT
;
9620 ret
= get_errno(truncate(p
, arg2
));
9621 unlock_user(p
, arg1
, 0);
9624 #ifdef TARGET_NR_ftruncate
9625 case TARGET_NR_ftruncate
:
9626 return get_errno(ftruncate(arg1
, arg2
));
9628 case TARGET_NR_fchmod
:
9629 return get_errno(fchmod(arg1
, arg2
));
9630 #if defined(TARGET_NR_fchmodat)
9631 case TARGET_NR_fchmodat
:
9632 if (!(p
= lock_user_string(arg2
)))
9633 return -TARGET_EFAULT
;
9634 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9635 unlock_user(p
, arg2
, 0);
9638 case TARGET_NR_getpriority
:
9639 /* Note that negative values are valid for getpriority, so we must
9640 differentiate based on errno settings. */
9642 ret
= getpriority(arg1
, arg2
);
9643 if (ret
== -1 && errno
!= 0) {
9644 return -host_to_target_errno(errno
);
9647 /* Return value is the unbiased priority. Signal no error. */
9648 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9650 /* Return value is a biased priority to avoid negative numbers. */
9654 case TARGET_NR_setpriority
:
9655 return get_errno(setpriority(arg1
, arg2
, arg3
));
9656 #ifdef TARGET_NR_statfs
9657 case TARGET_NR_statfs
:
9658 if (!(p
= lock_user_string(arg1
))) {
9659 return -TARGET_EFAULT
;
9661 ret
= get_errno(statfs(path(p
), &stfs
));
9662 unlock_user(p
, arg1
, 0);
9664 if (!is_error(ret
)) {
9665 struct target_statfs
*target_stfs
;
9667 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9668 return -TARGET_EFAULT
;
9669 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9670 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9671 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9672 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9673 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9674 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9675 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9676 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9677 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9678 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9679 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9680 #ifdef _STATFS_F_FLAGS
9681 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9683 __put_user(0, &target_stfs
->f_flags
);
9685 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9686 unlock_user_struct(target_stfs
, arg2
, 1);
9690 #ifdef TARGET_NR_fstatfs
9691 case TARGET_NR_fstatfs
:
9692 ret
= get_errno(fstatfs(arg1
, &stfs
));
9693 goto convert_statfs
;
9695 #ifdef TARGET_NR_statfs64
9696 case TARGET_NR_statfs64
:
9697 if (!(p
= lock_user_string(arg1
))) {
9698 return -TARGET_EFAULT
;
9700 ret
= get_errno(statfs(path(p
), &stfs
));
9701 unlock_user(p
, arg1
, 0);
9703 if (!is_error(ret
)) {
9704 struct target_statfs64
*target_stfs
;
9706 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9707 return -TARGET_EFAULT
;
9708 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9709 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9710 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9711 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9712 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9713 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9714 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9715 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9716 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9717 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9718 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9719 #ifdef _STATFS_F_FLAGS
9720 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9722 __put_user(0, &target_stfs
->f_flags
);
9724 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9725 unlock_user_struct(target_stfs
, arg3
, 1);
9728 case TARGET_NR_fstatfs64
:
9729 ret
= get_errno(fstatfs(arg1
, &stfs
));
9730 goto convert_statfs64
;
9732 #ifdef TARGET_NR_socketcall
9733 case TARGET_NR_socketcall
:
9734 return do_socketcall(arg1
, arg2
);
9736 #ifdef TARGET_NR_accept
9737 case TARGET_NR_accept
:
9738 return do_accept4(arg1
, arg2
, arg3
, 0);
9740 #ifdef TARGET_NR_accept4
9741 case TARGET_NR_accept4
:
9742 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9744 #ifdef TARGET_NR_bind
9745 case TARGET_NR_bind
:
9746 return do_bind(arg1
, arg2
, arg3
);
9748 #ifdef TARGET_NR_connect
9749 case TARGET_NR_connect
:
9750 return do_connect(arg1
, arg2
, arg3
);
9752 #ifdef TARGET_NR_getpeername
9753 case TARGET_NR_getpeername
:
9754 return do_getpeername(arg1
, arg2
, arg3
);
9756 #ifdef TARGET_NR_getsockname
9757 case TARGET_NR_getsockname
:
9758 return do_getsockname(arg1
, arg2
, arg3
);
9760 #ifdef TARGET_NR_getsockopt
9761 case TARGET_NR_getsockopt
:
9762 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9764 #ifdef TARGET_NR_listen
9765 case TARGET_NR_listen
:
9766 return get_errno(listen(arg1
, arg2
));
9768 #ifdef TARGET_NR_recv
9769 case TARGET_NR_recv
:
9770 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9772 #ifdef TARGET_NR_recvfrom
9773 case TARGET_NR_recvfrom
:
9774 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9776 #ifdef TARGET_NR_recvmsg
9777 case TARGET_NR_recvmsg
:
9778 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9780 #ifdef TARGET_NR_send
9781 case TARGET_NR_send
:
9782 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9784 #ifdef TARGET_NR_sendmsg
9785 case TARGET_NR_sendmsg
:
9786 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9788 #ifdef TARGET_NR_sendmmsg
9789 case TARGET_NR_sendmmsg
:
9790 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9792 #ifdef TARGET_NR_recvmmsg
9793 case TARGET_NR_recvmmsg
:
9794 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9796 #ifdef TARGET_NR_sendto
9797 case TARGET_NR_sendto
:
9798 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9800 #ifdef TARGET_NR_shutdown
9801 case TARGET_NR_shutdown
:
9802 return get_errno(shutdown(arg1
, arg2
));
9804 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9805 case TARGET_NR_getrandom
:
9806 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9808 return -TARGET_EFAULT
;
9810 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9811 unlock_user(p
, arg1
, ret
);
9814 #ifdef TARGET_NR_socket
9815 case TARGET_NR_socket
:
9816 return do_socket(arg1
, arg2
, arg3
);
9818 #ifdef TARGET_NR_socketpair
9819 case TARGET_NR_socketpair
:
9820 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9822 #ifdef TARGET_NR_setsockopt
9823 case TARGET_NR_setsockopt
:
9824 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9826 #if defined(TARGET_NR_syslog)
9827 case TARGET_NR_syslog
:
9832 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9833 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9834 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9835 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9836 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9837 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9838 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9839 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9840 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9841 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9842 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9843 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9846 return -TARGET_EINVAL
;
9851 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9853 return -TARGET_EFAULT
;
9855 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9856 unlock_user(p
, arg2
, arg3
);
9860 return -TARGET_EINVAL
;
9865 case TARGET_NR_setitimer
:
9867 struct itimerval value
, ovalue
, *pvalue
;
9871 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9872 || copy_from_user_timeval(&pvalue
->it_value
,
9873 arg2
+ sizeof(struct target_timeval
)))
9874 return -TARGET_EFAULT
;
9878 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9879 if (!is_error(ret
) && arg3
) {
9880 if (copy_to_user_timeval(arg3
,
9881 &ovalue
.it_interval
)
9882 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9884 return -TARGET_EFAULT
;
9888 case TARGET_NR_getitimer
:
9890 struct itimerval value
;
9892 ret
= get_errno(getitimer(arg1
, &value
));
9893 if (!is_error(ret
) && arg2
) {
9894 if (copy_to_user_timeval(arg2
,
9896 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9898 return -TARGET_EFAULT
;
9902 #ifdef TARGET_NR_stat
9903 case TARGET_NR_stat
:
9904 if (!(p
= lock_user_string(arg1
))) {
9905 return -TARGET_EFAULT
;
9907 ret
= get_errno(stat(path(p
), &st
));
9908 unlock_user(p
, arg1
, 0);
9911 #ifdef TARGET_NR_lstat
9912 case TARGET_NR_lstat
:
9913 if (!(p
= lock_user_string(arg1
))) {
9914 return -TARGET_EFAULT
;
9916 ret
= get_errno(lstat(path(p
), &st
));
9917 unlock_user(p
, arg1
, 0);
9920 #ifdef TARGET_NR_fstat
9921 case TARGET_NR_fstat
:
9923 ret
= get_errno(fstat(arg1
, &st
));
9924 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9927 if (!is_error(ret
)) {
9928 struct target_stat
*target_st
;
9930 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9931 return -TARGET_EFAULT
;
9932 memset(target_st
, 0, sizeof(*target_st
));
9933 __put_user(st
.st_dev
, &target_st
->st_dev
);
9934 __put_user(st
.st_ino
, &target_st
->st_ino
);
9935 __put_user(st
.st_mode
, &target_st
->st_mode
);
9936 __put_user(st
.st_uid
, &target_st
->st_uid
);
9937 __put_user(st
.st_gid
, &target_st
->st_gid
);
9938 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9939 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9940 __put_user(st
.st_size
, &target_st
->st_size
);
9941 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9942 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9943 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9944 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9945 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9946 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
9947 __put_user(st
.st_atim
.tv_nsec
,
9948 &target_st
->target_st_atime_nsec
);
9949 __put_user(st
.st_mtim
.tv_nsec
,
9950 &target_st
->target_st_mtime_nsec
);
9951 __put_user(st
.st_ctim
.tv_nsec
,
9952 &target_st
->target_st_ctime_nsec
);
9954 unlock_user_struct(target_st
, arg2
, 1);
9959 case TARGET_NR_vhangup
:
9960 return get_errno(vhangup());
9961 #ifdef TARGET_NR_syscall
9962 case TARGET_NR_syscall
:
9963 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9964 arg6
, arg7
, arg8
, 0);
9966 #if defined(TARGET_NR_wait4)
9967 case TARGET_NR_wait4
:
9970 abi_long status_ptr
= arg2
;
9971 struct rusage rusage
, *rusage_ptr
;
9972 abi_ulong target_rusage
= arg4
;
9973 abi_long rusage_err
;
9975 rusage_ptr
= &rusage
;
9978 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9979 if (!is_error(ret
)) {
9980 if (status_ptr
&& ret
) {
9981 status
= host_to_target_waitstatus(status
);
9982 if (put_user_s32(status
, status_ptr
))
9983 return -TARGET_EFAULT
;
9985 if (target_rusage
) {
9986 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9995 #ifdef TARGET_NR_swapoff
9996 case TARGET_NR_swapoff
:
9997 if (!(p
= lock_user_string(arg1
)))
9998 return -TARGET_EFAULT
;
9999 ret
= get_errno(swapoff(p
));
10000 unlock_user(p
, arg1
, 0);
10003 case TARGET_NR_sysinfo
:
10005 struct target_sysinfo
*target_value
;
10006 struct sysinfo value
;
10007 ret
= get_errno(sysinfo(&value
));
10008 if (!is_error(ret
) && arg1
)
10010 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10011 return -TARGET_EFAULT
;
10012 __put_user(value
.uptime
, &target_value
->uptime
);
10013 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10014 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10015 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10016 __put_user(value
.totalram
, &target_value
->totalram
);
10017 __put_user(value
.freeram
, &target_value
->freeram
);
10018 __put_user(value
.sharedram
, &target_value
->sharedram
);
10019 __put_user(value
.bufferram
, &target_value
->bufferram
);
10020 __put_user(value
.totalswap
, &target_value
->totalswap
);
10021 __put_user(value
.freeswap
, &target_value
->freeswap
);
10022 __put_user(value
.procs
, &target_value
->procs
);
10023 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10024 __put_user(value
.freehigh
, &target_value
->freehigh
);
10025 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10026 unlock_user_struct(target_value
, arg1
, 1);
10030 #ifdef TARGET_NR_ipc
10031 case TARGET_NR_ipc
:
10032 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10034 #ifdef TARGET_NR_semget
10035 case TARGET_NR_semget
:
10036 return get_errno(semget(arg1
, arg2
, arg3
));
10038 #ifdef TARGET_NR_semop
10039 case TARGET_NR_semop
:
10040 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10042 #ifdef TARGET_NR_semtimedop
10043 case TARGET_NR_semtimedop
:
10044 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10046 #ifdef TARGET_NR_semtimedop_time64
10047 case TARGET_NR_semtimedop_time64
:
10048 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10050 #ifdef TARGET_NR_semctl
10051 case TARGET_NR_semctl
:
10052 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10054 #ifdef TARGET_NR_msgctl
10055 case TARGET_NR_msgctl
:
10056 return do_msgctl(arg1
, arg2
, arg3
);
10058 #ifdef TARGET_NR_msgget
10059 case TARGET_NR_msgget
:
10060 return get_errno(msgget(arg1
, arg2
));
10062 #ifdef TARGET_NR_msgrcv
10063 case TARGET_NR_msgrcv
:
10064 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10066 #ifdef TARGET_NR_msgsnd
10067 case TARGET_NR_msgsnd
:
10068 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10070 #ifdef TARGET_NR_shmget
10071 case TARGET_NR_shmget
:
10072 return get_errno(shmget(arg1
, arg2
, arg3
));
10074 #ifdef TARGET_NR_shmctl
10075 case TARGET_NR_shmctl
:
10076 return do_shmctl(arg1
, arg2
, arg3
);
10078 #ifdef TARGET_NR_shmat
10079 case TARGET_NR_shmat
:
10080 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10082 #ifdef TARGET_NR_shmdt
10083 case TARGET_NR_shmdt
:
10084 return do_shmdt(arg1
);
10086 case TARGET_NR_fsync
:
10087 return get_errno(fsync(arg1
));
10088 case TARGET_NR_clone
:
10089 /* Linux manages to have three different orderings for its
10090 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10091 * match the kernel's CONFIG_CLONE_* settings.
10092 * Microblaze is further special in that it uses a sixth
10093 * implicit argument to clone for the TLS pointer.
10095 #if defined(TARGET_MICROBLAZE)
10096 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10097 #elif defined(TARGET_CLONE_BACKWARDS)
10098 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10099 #elif defined(TARGET_CLONE_BACKWARDS2)
10100 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10102 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10105 #ifdef __NR_exit_group
10106 /* new thread calls */
10107 case TARGET_NR_exit_group
:
10108 preexit_cleanup(cpu_env
, arg1
);
10109 return get_errno(exit_group(arg1
));
10111 case TARGET_NR_setdomainname
:
10112 if (!(p
= lock_user_string(arg1
)))
10113 return -TARGET_EFAULT
;
10114 ret
= get_errno(setdomainname(p
, arg2
));
10115 unlock_user(p
, arg1
, 0);
10117 case TARGET_NR_uname
:
10118 /* no need to transcode because we use the linux syscall */
10120 struct new_utsname
* buf
;
10122 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10123 return -TARGET_EFAULT
;
10124 ret
= get_errno(sys_uname(buf
));
10125 if (!is_error(ret
)) {
10126 /* Overwrite the native machine name with whatever is being
10128 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10129 sizeof(buf
->machine
));
10130 /* Allow the user to override the reported release. */
10131 if (qemu_uname_release
&& *qemu_uname_release
) {
10132 g_strlcpy(buf
->release
, qemu_uname_release
,
10133 sizeof(buf
->release
));
10136 unlock_user_struct(buf
, arg1
, 1);
10140 case TARGET_NR_modify_ldt
:
10141 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10142 #if !defined(TARGET_X86_64)
10143 case TARGET_NR_vm86
:
10144 return do_vm86(cpu_env
, arg1
, arg2
);
10147 #if defined(TARGET_NR_adjtimex)
10148 case TARGET_NR_adjtimex
:
10150 struct timex host_buf
;
10152 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10153 return -TARGET_EFAULT
;
10155 ret
= get_errno(adjtimex(&host_buf
));
10156 if (!is_error(ret
)) {
10157 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10158 return -TARGET_EFAULT
;
10164 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10165 case TARGET_NR_clock_adjtime
:
10167 struct timex htx
, *phtx
= &htx
;
10169 if (target_to_host_timex(phtx
, arg2
) != 0) {
10170 return -TARGET_EFAULT
;
10172 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10173 if (!is_error(ret
) && phtx
) {
10174 if (host_to_target_timex(arg2
, phtx
) != 0) {
10175 return -TARGET_EFAULT
;
10181 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10182 case TARGET_NR_clock_adjtime64
:
10186 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10187 return -TARGET_EFAULT
;
10189 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10190 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10191 return -TARGET_EFAULT
;
10196 case TARGET_NR_getpgid
:
10197 return get_errno(getpgid(arg1
));
10198 case TARGET_NR_fchdir
:
10199 return get_errno(fchdir(arg1
));
10200 case TARGET_NR_personality
:
10201 return get_errno(personality(arg1
));
10202 #ifdef TARGET_NR__llseek /* Not on alpha */
10203 case TARGET_NR__llseek
:
10206 #if !defined(__NR_llseek)
10207 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10209 ret
= get_errno(res
);
10214 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10216 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10217 return -TARGET_EFAULT
;
10222 #ifdef TARGET_NR_getdents
10223 case TARGET_NR_getdents
:
10224 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10225 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10227 struct target_dirent
*target_dirp
;
10228 struct linux_dirent
*dirp
;
10229 abi_long count
= arg3
;
10231 dirp
= g_try_malloc(count
);
10233 return -TARGET_ENOMEM
;
10236 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10237 if (!is_error(ret
)) {
10238 struct linux_dirent
*de
;
10239 struct target_dirent
*tde
;
10241 int reclen
, treclen
;
10242 int count1
, tnamelen
;
10246 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10247 return -TARGET_EFAULT
;
10250 reclen
= de
->d_reclen
;
10251 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10252 assert(tnamelen
>= 0);
10253 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10254 assert(count1
+ treclen
<= count
);
10255 tde
->d_reclen
= tswap16(treclen
);
10256 tde
->d_ino
= tswapal(de
->d_ino
);
10257 tde
->d_off
= tswapal(de
->d_off
);
10258 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10259 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10261 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10265 unlock_user(target_dirp
, arg2
, ret
);
10271 struct linux_dirent
*dirp
;
10272 abi_long count
= arg3
;
10274 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10275 return -TARGET_EFAULT
;
10276 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10277 if (!is_error(ret
)) {
10278 struct linux_dirent
*de
;
10283 reclen
= de
->d_reclen
;
10286 de
->d_reclen
= tswap16(reclen
);
10287 tswapls(&de
->d_ino
);
10288 tswapls(&de
->d_off
);
10289 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10293 unlock_user(dirp
, arg2
, ret
);
10297 /* Implement getdents in terms of getdents64 */
10299 struct linux_dirent64
*dirp
;
10300 abi_long count
= arg3
;
10302 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10304 return -TARGET_EFAULT
;
10306 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10307 if (!is_error(ret
)) {
10308 /* Convert the dirent64 structs to target dirent. We do this
10309 * in-place, since we can guarantee that a target_dirent is no
10310 * larger than a dirent64; however this means we have to be
10311 * careful to read everything before writing in the new format.
10313 struct linux_dirent64
*de
;
10314 struct target_dirent
*tde
;
10319 tde
= (struct target_dirent
*)dirp
;
10321 int namelen
, treclen
;
10322 int reclen
= de
->d_reclen
;
10323 uint64_t ino
= de
->d_ino
;
10324 int64_t off
= de
->d_off
;
10325 uint8_t type
= de
->d_type
;
10327 namelen
= strlen(de
->d_name
);
10328 treclen
= offsetof(struct target_dirent
, d_name
)
10330 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10332 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10333 tde
->d_ino
= tswapal(ino
);
10334 tde
->d_off
= tswapal(off
);
10335 tde
->d_reclen
= tswap16(treclen
);
10336 /* The target_dirent type is in what was formerly a padding
10337 * byte at the end of the structure:
10339 *(((char *)tde
) + treclen
- 1) = type
;
10341 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10342 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10348 unlock_user(dirp
, arg2
, ret
);
10352 #endif /* TARGET_NR_getdents */
10353 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10354 case TARGET_NR_getdents64
:
10356 struct linux_dirent64
*dirp
;
10357 abi_long count
= arg3
;
10358 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10359 return -TARGET_EFAULT
;
10360 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10361 if (!is_error(ret
)) {
10362 struct linux_dirent64
*de
;
10367 reclen
= de
->d_reclen
;
10370 de
->d_reclen
= tswap16(reclen
);
10371 tswap64s((uint64_t *)&de
->d_ino
);
10372 tswap64s((uint64_t *)&de
->d_off
);
10373 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10377 unlock_user(dirp
, arg2
, ret
);
10380 #endif /* TARGET_NR_getdents64 */
10381 #if defined(TARGET_NR__newselect)
10382 case TARGET_NR__newselect
:
10383 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10385 #ifdef TARGET_NR_poll
10386 case TARGET_NR_poll
:
10387 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10389 #ifdef TARGET_NR_ppoll
10390 case TARGET_NR_ppoll
:
10391 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10393 #ifdef TARGET_NR_ppoll_time64
10394 case TARGET_NR_ppoll_time64
:
10395 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10397 case TARGET_NR_flock
:
10398 /* NOTE: the flock constant seems to be the same for every
10400 return get_errno(safe_flock(arg1
, arg2
));
10401 case TARGET_NR_readv
:
10403 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10405 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10406 unlock_iovec(vec
, arg2
, arg3
, 1);
10408 ret
= -host_to_target_errno(errno
);
10412 case TARGET_NR_writev
:
10414 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10416 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10417 unlock_iovec(vec
, arg2
, arg3
, 0);
10419 ret
= -host_to_target_errno(errno
);
10423 #if defined(TARGET_NR_preadv)
10424 case TARGET_NR_preadv
:
10426 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10428 unsigned long low
, high
;
10430 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10431 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10432 unlock_iovec(vec
, arg2
, arg3
, 1);
10434 ret
= -host_to_target_errno(errno
);
10439 #if defined(TARGET_NR_pwritev)
10440 case TARGET_NR_pwritev
:
10442 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10444 unsigned long low
, high
;
10446 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10447 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10448 unlock_iovec(vec
, arg2
, arg3
, 0);
10450 ret
= -host_to_target_errno(errno
);
10455 case TARGET_NR_getsid
:
10456 return get_errno(getsid(arg1
));
10457 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10458 case TARGET_NR_fdatasync
:
10459 return get_errno(fdatasync(arg1
));
10461 case TARGET_NR_sched_getaffinity
:
10463 unsigned int mask_size
;
10464 unsigned long *mask
;
10467 * sched_getaffinity needs multiples of ulong, so need to take
10468 * care of mismatches between target ulong and host ulong sizes.
10470 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10471 return -TARGET_EINVAL
;
10473 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10475 mask
= alloca(mask_size
);
10476 memset(mask
, 0, mask_size
);
10477 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10479 if (!is_error(ret
)) {
10481 /* More data returned than the caller's buffer will fit.
10482 * This only happens if sizeof(abi_long) < sizeof(long)
10483 * and the caller passed us a buffer holding an odd number
10484 * of abi_longs. If the host kernel is actually using the
10485 * extra 4 bytes then fail EINVAL; otherwise we can just
10486 * ignore them and only copy the interesting part.
10488 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10489 if (numcpus
> arg2
* 8) {
10490 return -TARGET_EINVAL
;
10495 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10496 return -TARGET_EFAULT
;
10501 case TARGET_NR_sched_setaffinity
:
10503 unsigned int mask_size
;
10504 unsigned long *mask
;
10507 * sched_setaffinity needs multiples of ulong, so need to take
10508 * care of mismatches between target ulong and host ulong sizes.
10510 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10511 return -TARGET_EINVAL
;
10513 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10514 mask
= alloca(mask_size
);
10516 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10521 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10523 case TARGET_NR_getcpu
:
10525 unsigned cpu
, node
;
10526 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10527 arg2
? &node
: NULL
,
10529 if (is_error(ret
)) {
10532 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10533 return -TARGET_EFAULT
;
10535 if (arg2
&& put_user_u32(node
, arg2
)) {
10536 return -TARGET_EFAULT
;
10540 case TARGET_NR_sched_setparam
:
10542 struct sched_param
*target_schp
;
10543 struct sched_param schp
;
10546 return -TARGET_EINVAL
;
10548 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10549 return -TARGET_EFAULT
;
10550 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10551 unlock_user_struct(target_schp
, arg2
, 0);
10552 return get_errno(sched_setparam(arg1
, &schp
));
10554 case TARGET_NR_sched_getparam
:
10556 struct sched_param
*target_schp
;
10557 struct sched_param schp
;
10560 return -TARGET_EINVAL
;
10562 ret
= get_errno(sched_getparam(arg1
, &schp
));
10563 if (!is_error(ret
)) {
10564 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10565 return -TARGET_EFAULT
;
10566 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10567 unlock_user_struct(target_schp
, arg2
, 1);
10571 case TARGET_NR_sched_setscheduler
:
10573 struct sched_param
*target_schp
;
10574 struct sched_param schp
;
10576 return -TARGET_EINVAL
;
10578 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10579 return -TARGET_EFAULT
;
10580 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10581 unlock_user_struct(target_schp
, arg3
, 0);
10582 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10584 case TARGET_NR_sched_getscheduler
:
10585 return get_errno(sched_getscheduler(arg1
));
10586 case TARGET_NR_sched_yield
:
10587 return get_errno(sched_yield());
10588 case TARGET_NR_sched_get_priority_max
:
10589 return get_errno(sched_get_priority_max(arg1
));
10590 case TARGET_NR_sched_get_priority_min
:
10591 return get_errno(sched_get_priority_min(arg1
));
10592 #ifdef TARGET_NR_sched_rr_get_interval
10593 case TARGET_NR_sched_rr_get_interval
:
10595 struct timespec ts
;
10596 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10597 if (!is_error(ret
)) {
10598 ret
= host_to_target_timespec(arg2
, &ts
);
10603 #ifdef TARGET_NR_sched_rr_get_interval_time64
10604 case TARGET_NR_sched_rr_get_interval_time64
:
10606 struct timespec ts
;
10607 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10608 if (!is_error(ret
)) {
10609 ret
= host_to_target_timespec64(arg2
, &ts
);
10614 #if defined(TARGET_NR_nanosleep)
10615 case TARGET_NR_nanosleep
:
10617 struct timespec req
, rem
;
10618 target_to_host_timespec(&req
, arg1
);
10619 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10620 if (is_error(ret
) && arg2
) {
10621 host_to_target_timespec(arg2
, &rem
);
10626 case TARGET_NR_prctl
:
10628 case PR_GET_PDEATHSIG
:
10631 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10632 if (!is_error(ret
) && arg2
10633 && put_user_s32(deathsig
, arg2
)) {
10634 return -TARGET_EFAULT
;
10641 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10643 return -TARGET_EFAULT
;
10645 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10646 arg3
, arg4
, arg5
));
10647 unlock_user(name
, arg2
, 16);
10652 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10654 return -TARGET_EFAULT
;
10656 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10657 arg3
, arg4
, arg5
));
10658 unlock_user(name
, arg2
, 0);
10663 case TARGET_PR_GET_FP_MODE
:
10665 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10667 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10668 ret
|= TARGET_PR_FP_MODE_FR
;
10670 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10671 ret
|= TARGET_PR_FP_MODE_FRE
;
10675 case TARGET_PR_SET_FP_MODE
:
10677 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10678 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10679 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10680 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10681 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10683 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10684 TARGET_PR_FP_MODE_FRE
;
10686 /* If nothing to change, return right away, successfully. */
10687 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10690 /* Check the value is valid */
10691 if (arg2
& ~known_bits
) {
10692 return -TARGET_EOPNOTSUPP
;
10694 /* Setting FRE without FR is not supported. */
10695 if (new_fre
&& !new_fr
) {
10696 return -TARGET_EOPNOTSUPP
;
10698 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10699 /* FR1 is not supported */
10700 return -TARGET_EOPNOTSUPP
;
10702 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10703 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10704 /* cannot set FR=0 */
10705 return -TARGET_EOPNOTSUPP
;
10707 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10708 /* Cannot set FRE=1 */
10709 return -TARGET_EOPNOTSUPP
;
10713 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10714 for (i
= 0; i
< 32 ; i
+= 2) {
10715 if (!old_fr
&& new_fr
) {
10716 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10717 } else if (old_fr
&& !new_fr
) {
10718 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10723 env
->CP0_Status
|= (1 << CP0St_FR
);
10724 env
->hflags
|= MIPS_HFLAG_F64
;
10726 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10727 env
->hflags
&= ~MIPS_HFLAG_F64
;
10730 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10731 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10732 env
->hflags
|= MIPS_HFLAG_FRE
;
10735 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10736 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10742 #ifdef TARGET_AARCH64
10743 case TARGET_PR_SVE_SET_VL
:
10745 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10746 * PR_SVE_VL_INHERIT. Note the kernel definition
10747 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10748 * even though the current architectural maximum is VQ=16.
10750 ret
= -TARGET_EINVAL
;
10751 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10752 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10753 CPUARMState
*env
= cpu_env
;
10754 ARMCPU
*cpu
= env_archcpu(env
);
10755 uint32_t vq
, old_vq
;
10757 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10758 vq
= MAX(arg2
/ 16, 1);
10759 vq
= MIN(vq
, cpu
->sve_max_vq
);
10762 aarch64_sve_narrow_vq(env
, vq
);
10764 env
->vfp
.zcr_el
[1] = vq
- 1;
10765 arm_rebuild_hflags(env
);
10769 case TARGET_PR_SVE_GET_VL
:
10770 ret
= -TARGET_EINVAL
;
10772 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10773 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10774 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10778 case TARGET_PR_PAC_RESET_KEYS
:
10780 CPUARMState
*env
= cpu_env
;
10781 ARMCPU
*cpu
= env_archcpu(env
);
10783 if (arg3
|| arg4
|| arg5
) {
10784 return -TARGET_EINVAL
;
10786 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10787 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10788 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10789 TARGET_PR_PAC_APGAKEY
);
10795 } else if (arg2
& ~all
) {
10796 return -TARGET_EINVAL
;
10798 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10799 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10800 sizeof(ARMPACKey
), &err
);
10802 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10803 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10804 sizeof(ARMPACKey
), &err
);
10806 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10807 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10808 sizeof(ARMPACKey
), &err
);
10810 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10811 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10812 sizeof(ARMPACKey
), &err
);
10814 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10815 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10816 sizeof(ARMPACKey
), &err
);
10820 * Some unknown failure in the crypto. The best
10821 * we can do is log it and fail the syscall.
10822 * The real syscall cannot fail this way.
10824 qemu_log_mask(LOG_UNIMP
,
10825 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10826 error_get_pretty(err
));
10828 return -TARGET_EIO
;
10833 return -TARGET_EINVAL
;
10834 case TARGET_PR_SET_TAGGED_ADDR_CTRL
:
10836 abi_ulong valid_mask
= TARGET_PR_TAGGED_ADDR_ENABLE
;
10837 CPUARMState
*env
= cpu_env
;
10838 ARMCPU
*cpu
= env_archcpu(env
);
10840 if (cpu_isar_feature(aa64_mte
, cpu
)) {
10841 valid_mask
|= TARGET_PR_MTE_TCF_MASK
;
10842 valid_mask
|= TARGET_PR_MTE_TAG_MASK
;
10845 if ((arg2
& ~valid_mask
) || arg3
|| arg4
|| arg5
) {
10846 return -TARGET_EINVAL
;
10848 env
->tagged_addr_enable
= arg2
& TARGET_PR_TAGGED_ADDR_ENABLE
;
10850 if (cpu_isar_feature(aa64_mte
, cpu
)) {
10851 switch (arg2
& TARGET_PR_MTE_TCF_MASK
) {
10852 case TARGET_PR_MTE_TCF_NONE
:
10853 case TARGET_PR_MTE_TCF_SYNC
:
10854 case TARGET_PR_MTE_TCF_ASYNC
:
10861 * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10862 * Note that the syscall values are consistent with hw.
10864 env
->cp15
.sctlr_el
[1] =
10865 deposit64(env
->cp15
.sctlr_el
[1], 38, 2,
10866 arg2
>> TARGET_PR_MTE_TCF_SHIFT
);
10869 * Write PR_MTE_TAG to GCR_EL1[Exclude].
10870 * Note that the syscall uses an include mask,
10871 * and hardware uses an exclude mask -- invert.
10873 env
->cp15
.gcr_el1
=
10874 deposit64(env
->cp15
.gcr_el1
, 0, 16,
10875 ~arg2
>> TARGET_PR_MTE_TAG_SHIFT
);
10876 arm_rebuild_hflags(env
);
10880 case TARGET_PR_GET_TAGGED_ADDR_CTRL
:
10883 CPUARMState
*env
= cpu_env
;
10884 ARMCPU
*cpu
= env_archcpu(env
);
10886 if (arg2
|| arg3
|| arg4
|| arg5
) {
10887 return -TARGET_EINVAL
;
10889 if (env
->tagged_addr_enable
) {
10890 ret
|= TARGET_PR_TAGGED_ADDR_ENABLE
;
10892 if (cpu_isar_feature(aa64_mte
, cpu
)) {
10894 ret
|= (extract64(env
->cp15
.sctlr_el
[1], 38, 2)
10895 << TARGET_PR_MTE_TCF_SHIFT
);
10896 ret
= deposit64(ret
, TARGET_PR_MTE_TAG_SHIFT
, 16,
10897 ~env
->cp15
.gcr_el1
);
10901 #endif /* AARCH64 */
10902 case PR_GET_SECCOMP
:
10903 case PR_SET_SECCOMP
:
10904 /* Disable seccomp to prevent the target disabling syscalls we
10906 return -TARGET_EINVAL
;
10908 /* Most prctl options have no pointer arguments */
10909 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10912 #ifdef TARGET_NR_arch_prctl
10913 case TARGET_NR_arch_prctl
:
10914 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10916 #ifdef TARGET_NR_pread64
10917 case TARGET_NR_pread64
:
10918 if (regpairs_aligned(cpu_env
, num
)) {
10922 if (arg2
== 0 && arg3
== 0) {
10923 /* Special-case NULL buffer and zero length, which should succeed */
10926 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10928 return -TARGET_EFAULT
;
10931 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10932 unlock_user(p
, arg2
, ret
);
10934 case TARGET_NR_pwrite64
:
10935 if (regpairs_aligned(cpu_env
, num
)) {
10939 if (arg2
== 0 && arg3
== 0) {
10940 /* Special-case NULL buffer and zero length, which should succeed */
10943 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10945 return -TARGET_EFAULT
;
10948 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10949 unlock_user(p
, arg2
, 0);
10952 case TARGET_NR_getcwd
:
10953 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10954 return -TARGET_EFAULT
;
10955 ret
= get_errno(sys_getcwd1(p
, arg2
));
10956 unlock_user(p
, arg1
, ret
);
10958 case TARGET_NR_capget
:
10959 case TARGET_NR_capset
:
10961 struct target_user_cap_header
*target_header
;
10962 struct target_user_cap_data
*target_data
= NULL
;
10963 struct __user_cap_header_struct header
;
10964 struct __user_cap_data_struct data
[2];
10965 struct __user_cap_data_struct
*dataptr
= NULL
;
10966 int i
, target_datalen
;
10967 int data_items
= 1;
10969 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10970 return -TARGET_EFAULT
;
10972 header
.version
= tswap32(target_header
->version
);
10973 header
.pid
= tswap32(target_header
->pid
);
10975 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10976 /* Version 2 and up takes pointer to two user_data structs */
10980 target_datalen
= sizeof(*target_data
) * data_items
;
10983 if (num
== TARGET_NR_capget
) {
10984 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10986 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10988 if (!target_data
) {
10989 unlock_user_struct(target_header
, arg1
, 0);
10990 return -TARGET_EFAULT
;
10993 if (num
== TARGET_NR_capset
) {
10994 for (i
= 0; i
< data_items
; i
++) {
10995 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10996 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10997 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11004 if (num
== TARGET_NR_capget
) {
11005 ret
= get_errno(capget(&header
, dataptr
));
11007 ret
= get_errno(capset(&header
, dataptr
));
11010 /* The kernel always updates version for both capget and capset */
11011 target_header
->version
= tswap32(header
.version
);
11012 unlock_user_struct(target_header
, arg1
, 1);
11015 if (num
== TARGET_NR_capget
) {
11016 for (i
= 0; i
< data_items
; i
++) {
11017 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11018 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11019 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11021 unlock_user(target_data
, arg2
, target_datalen
);
11023 unlock_user(target_data
, arg2
, 0);
11028 case TARGET_NR_sigaltstack
:
11029 return do_sigaltstack(arg1
, arg2
, cpu_env
);
11031 #ifdef CONFIG_SENDFILE
11032 #ifdef TARGET_NR_sendfile
11033 case TARGET_NR_sendfile
:
11035 off_t
*offp
= NULL
;
11038 ret
= get_user_sal(off
, arg3
);
11039 if (is_error(ret
)) {
11044 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11045 if (!is_error(ret
) && arg3
) {
11046 abi_long ret2
= put_user_sal(off
, arg3
);
11047 if (is_error(ret2
)) {
11054 #ifdef TARGET_NR_sendfile64
11055 case TARGET_NR_sendfile64
:
11057 off_t
*offp
= NULL
;
11060 ret
= get_user_s64(off
, arg3
);
11061 if (is_error(ret
)) {
11066 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11067 if (!is_error(ret
) && arg3
) {
11068 abi_long ret2
= put_user_s64(off
, arg3
);
11069 if (is_error(ret2
)) {
11077 #ifdef TARGET_NR_vfork
11078 case TARGET_NR_vfork
:
11079 return get_errno(do_fork(cpu_env
,
11080 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11083 #ifdef TARGET_NR_ugetrlimit
11084 case TARGET_NR_ugetrlimit
:
11086 struct rlimit rlim
;
11087 int resource
= target_to_host_resource(arg1
);
11088 ret
= get_errno(getrlimit(resource
, &rlim
));
11089 if (!is_error(ret
)) {
11090 struct target_rlimit
*target_rlim
;
11091 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11092 return -TARGET_EFAULT
;
11093 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11094 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11095 unlock_user_struct(target_rlim
, arg2
, 1);
11100 #ifdef TARGET_NR_truncate64
11101 case TARGET_NR_truncate64
:
11102 if (!(p
= lock_user_string(arg1
)))
11103 return -TARGET_EFAULT
;
11104 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11105 unlock_user(p
, arg1
, 0);
11108 #ifdef TARGET_NR_ftruncate64
11109 case TARGET_NR_ftruncate64
:
11110 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11112 #ifdef TARGET_NR_stat64
11113 case TARGET_NR_stat64
:
11114 if (!(p
= lock_user_string(arg1
))) {
11115 return -TARGET_EFAULT
;
11117 ret
= get_errno(stat(path(p
), &st
));
11118 unlock_user(p
, arg1
, 0);
11119 if (!is_error(ret
))
11120 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11123 #ifdef TARGET_NR_lstat64
11124 case TARGET_NR_lstat64
:
11125 if (!(p
= lock_user_string(arg1
))) {
11126 return -TARGET_EFAULT
;
11128 ret
= get_errno(lstat(path(p
), &st
));
11129 unlock_user(p
, arg1
, 0);
11130 if (!is_error(ret
))
11131 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11134 #ifdef TARGET_NR_fstat64
11135 case TARGET_NR_fstat64
:
11136 ret
= get_errno(fstat(arg1
, &st
));
11137 if (!is_error(ret
))
11138 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11141 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11142 #ifdef TARGET_NR_fstatat64
11143 case TARGET_NR_fstatat64
:
11145 #ifdef TARGET_NR_newfstatat
11146 case TARGET_NR_newfstatat
:
11148 if (!(p
= lock_user_string(arg2
))) {
11149 return -TARGET_EFAULT
;
11151 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11152 unlock_user(p
, arg2
, 0);
11153 if (!is_error(ret
))
11154 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11157 #if defined(TARGET_NR_statx)
11158 case TARGET_NR_statx
:
11160 struct target_statx
*target_stx
;
11164 p
= lock_user_string(arg2
);
11166 return -TARGET_EFAULT
;
11168 #if defined(__NR_statx)
11171 * It is assumed that struct statx is architecture independent.
11173 struct target_statx host_stx
;
11176 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11177 if (!is_error(ret
)) {
11178 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11179 unlock_user(p
, arg2
, 0);
11180 return -TARGET_EFAULT
;
11184 if (ret
!= -TARGET_ENOSYS
) {
11185 unlock_user(p
, arg2
, 0);
11190 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11191 unlock_user(p
, arg2
, 0);
11193 if (!is_error(ret
)) {
11194 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11195 return -TARGET_EFAULT
;
11197 memset(target_stx
, 0, sizeof(*target_stx
));
11198 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11199 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11200 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11201 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11202 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11203 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11204 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11205 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11206 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11207 __put_user(st
.st_size
, &target_stx
->stx_size
);
11208 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11209 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11210 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11211 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11212 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11213 unlock_user_struct(target_stx
, arg5
, 1);
11218 #ifdef TARGET_NR_lchown
11219 case TARGET_NR_lchown
:
11220 if (!(p
= lock_user_string(arg1
)))
11221 return -TARGET_EFAULT
;
11222 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11223 unlock_user(p
, arg1
, 0);
11226 #ifdef TARGET_NR_getuid
11227 case TARGET_NR_getuid
:
11228 return get_errno(high2lowuid(getuid()));
11230 #ifdef TARGET_NR_getgid
11231 case TARGET_NR_getgid
:
11232 return get_errno(high2lowgid(getgid()));
11234 #ifdef TARGET_NR_geteuid
11235 case TARGET_NR_geteuid
:
11236 return get_errno(high2lowuid(geteuid()));
11238 #ifdef TARGET_NR_getegid
11239 case TARGET_NR_getegid
:
11240 return get_errno(high2lowgid(getegid()));
11242 case TARGET_NR_setreuid
:
11243 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11244 case TARGET_NR_setregid
:
11245 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11246 case TARGET_NR_getgroups
:
11248 int gidsetsize
= arg1
;
11249 target_id
*target_grouplist
;
11253 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11254 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11255 if (gidsetsize
== 0)
11257 if (!is_error(ret
)) {
11258 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11259 if (!target_grouplist
)
11260 return -TARGET_EFAULT
;
11261 for(i
= 0;i
< ret
; i
++)
11262 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11263 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11267 case TARGET_NR_setgroups
:
11269 int gidsetsize
= arg1
;
11270 target_id
*target_grouplist
;
11271 gid_t
*grouplist
= NULL
;
11274 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11275 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11276 if (!target_grouplist
) {
11277 return -TARGET_EFAULT
;
11279 for (i
= 0; i
< gidsetsize
; i
++) {
11280 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11282 unlock_user(target_grouplist
, arg2
, 0);
11284 return get_errno(setgroups(gidsetsize
, grouplist
));
11286 case TARGET_NR_fchown
:
11287 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11288 #if defined(TARGET_NR_fchownat)
11289 case TARGET_NR_fchownat
:
11290 if (!(p
= lock_user_string(arg2
)))
11291 return -TARGET_EFAULT
;
11292 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11293 low2highgid(arg4
), arg5
));
11294 unlock_user(p
, arg2
, 0);
11297 #ifdef TARGET_NR_setresuid
11298 case TARGET_NR_setresuid
:
11299 return get_errno(sys_setresuid(low2highuid(arg1
),
11301 low2highuid(arg3
)));
11303 #ifdef TARGET_NR_getresuid
11304 case TARGET_NR_getresuid
:
11306 uid_t ruid
, euid
, suid
;
11307 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11308 if (!is_error(ret
)) {
11309 if (put_user_id(high2lowuid(ruid
), arg1
)
11310 || put_user_id(high2lowuid(euid
), arg2
)
11311 || put_user_id(high2lowuid(suid
), arg3
))
11312 return -TARGET_EFAULT
;
11317 #ifdef TARGET_NR_getresgid
11318 case TARGET_NR_setresgid
:
11319 return get_errno(sys_setresgid(low2highgid(arg1
),
11321 low2highgid(arg3
)));
11323 #ifdef TARGET_NR_getresgid
11324 case TARGET_NR_getresgid
:
11326 gid_t rgid
, egid
, sgid
;
11327 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11328 if (!is_error(ret
)) {
11329 if (put_user_id(high2lowgid(rgid
), arg1
)
11330 || put_user_id(high2lowgid(egid
), arg2
)
11331 || put_user_id(high2lowgid(sgid
), arg3
))
11332 return -TARGET_EFAULT
;
11337 #ifdef TARGET_NR_chown
11338 case TARGET_NR_chown
:
11339 if (!(p
= lock_user_string(arg1
)))
11340 return -TARGET_EFAULT
;
11341 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11342 unlock_user(p
, arg1
, 0);
11345 case TARGET_NR_setuid
:
11346 return get_errno(sys_setuid(low2highuid(arg1
)));
11347 case TARGET_NR_setgid
:
11348 return get_errno(sys_setgid(low2highgid(arg1
)));
11349 case TARGET_NR_setfsuid
:
11350 return get_errno(setfsuid(arg1
));
11351 case TARGET_NR_setfsgid
:
11352 return get_errno(setfsgid(arg1
));
11354 #ifdef TARGET_NR_lchown32
11355 case TARGET_NR_lchown32
:
11356 if (!(p
= lock_user_string(arg1
)))
11357 return -TARGET_EFAULT
;
11358 ret
= get_errno(lchown(p
, arg2
, arg3
));
11359 unlock_user(p
, arg1
, 0);
11362 #ifdef TARGET_NR_getuid32
11363 case TARGET_NR_getuid32
:
11364 return get_errno(getuid());
11367 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11368 /* Alpha specific */
11369 case TARGET_NR_getxuid
:
11373 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11375 return get_errno(getuid());
11377 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11378 /* Alpha specific */
11379 case TARGET_NR_getxgid
:
11383 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11385 return get_errno(getgid());
11387 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11388 /* Alpha specific */
11389 case TARGET_NR_osf_getsysinfo
:
11390 ret
= -TARGET_EOPNOTSUPP
;
11392 case TARGET_GSI_IEEE_FP_CONTROL
:
11394 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11395 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11397 swcr
&= ~SWCR_STATUS_MASK
;
11398 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11400 if (put_user_u64 (swcr
, arg2
))
11401 return -TARGET_EFAULT
;
11406 /* case GSI_IEEE_STATE_AT_SIGNAL:
11407 -- Not implemented in linux kernel.
11409 -- Retrieves current unaligned access state; not much used.
11410 case GSI_PROC_TYPE:
11411 -- Retrieves implver information; surely not used.
11412 case GSI_GET_HWRPB:
11413 -- Grabs a copy of the HWRPB; surely not used.
11418 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11419 /* Alpha specific */
11420 case TARGET_NR_osf_setsysinfo
:
11421 ret
= -TARGET_EOPNOTSUPP
;
11423 case TARGET_SSI_IEEE_FP_CONTROL
:
11425 uint64_t swcr
, fpcr
;
11427 if (get_user_u64 (swcr
, arg2
)) {
11428 return -TARGET_EFAULT
;
11432 * The kernel calls swcr_update_status to update the
11433 * status bits from the fpcr at every point that it
11434 * could be queried. Therefore, we store the status
11435 * bits only in FPCR.
11437 ((CPUAlphaState
*)cpu_env
)->swcr
11438 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11440 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11441 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11442 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11443 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11448 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11450 uint64_t exc
, fpcr
, fex
;
11452 if (get_user_u64(exc
, arg2
)) {
11453 return -TARGET_EFAULT
;
11455 exc
&= SWCR_STATUS_MASK
;
11456 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11458 /* Old exceptions are not signaled. */
11459 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11461 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11462 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11464 /* Update the hardware fpcr. */
11465 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11466 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11469 int si_code
= TARGET_FPE_FLTUNK
;
11470 target_siginfo_t info
;
11472 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11473 si_code
= TARGET_FPE_FLTUND
;
11475 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11476 si_code
= TARGET_FPE_FLTRES
;
11478 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11479 si_code
= TARGET_FPE_FLTUND
;
11481 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11482 si_code
= TARGET_FPE_FLTOVF
;
11484 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11485 si_code
= TARGET_FPE_FLTDIV
;
11487 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11488 si_code
= TARGET_FPE_FLTINV
;
11491 info
.si_signo
= SIGFPE
;
11493 info
.si_code
= si_code
;
11494 info
._sifields
._sigfault
._addr
11495 = ((CPUArchState
*)cpu_env
)->pc
;
11496 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11497 QEMU_SI_FAULT
, &info
);
11503 /* case SSI_NVPAIRS:
11504 -- Used with SSIN_UACPROC to enable unaligned accesses.
11505 case SSI_IEEE_STATE_AT_SIGNAL:
11506 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11507 -- Not implemented in linux kernel
11512 #ifdef TARGET_NR_osf_sigprocmask
11513 /* Alpha specific. */
11514 case TARGET_NR_osf_sigprocmask
:
11518 sigset_t set
, oldset
;
11521 case TARGET_SIG_BLOCK
:
11524 case TARGET_SIG_UNBLOCK
:
11527 case TARGET_SIG_SETMASK
:
11531 return -TARGET_EINVAL
;
11534 target_to_host_old_sigset(&set
, &mask
);
11535 ret
= do_sigprocmask(how
, &set
, &oldset
);
11537 host_to_target_old_sigset(&mask
, &oldset
);
11544 #ifdef TARGET_NR_getgid32
11545 case TARGET_NR_getgid32
:
11546 return get_errno(getgid());
11548 #ifdef TARGET_NR_geteuid32
11549 case TARGET_NR_geteuid32
:
11550 return get_errno(geteuid());
11552 #ifdef TARGET_NR_getegid32
11553 case TARGET_NR_getegid32
:
11554 return get_errno(getegid());
11556 #ifdef TARGET_NR_setreuid32
11557 case TARGET_NR_setreuid32
:
11558 return get_errno(setreuid(arg1
, arg2
));
11560 #ifdef TARGET_NR_setregid32
11561 case TARGET_NR_setregid32
:
11562 return get_errno(setregid(arg1
, arg2
));
11564 #ifdef TARGET_NR_getgroups32
11565 case TARGET_NR_getgroups32
:
11567 int gidsetsize
= arg1
;
11568 uint32_t *target_grouplist
;
11572 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11573 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11574 if (gidsetsize
== 0)
11576 if (!is_error(ret
)) {
11577 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11578 if (!target_grouplist
) {
11579 return -TARGET_EFAULT
;
11581 for(i
= 0;i
< ret
; i
++)
11582 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11583 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11588 #ifdef TARGET_NR_setgroups32
11589 case TARGET_NR_setgroups32
:
11591 int gidsetsize
= arg1
;
11592 uint32_t *target_grouplist
;
11596 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11597 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11598 if (!target_grouplist
) {
11599 return -TARGET_EFAULT
;
11601 for(i
= 0;i
< gidsetsize
; i
++)
11602 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11603 unlock_user(target_grouplist
, arg2
, 0);
11604 return get_errno(setgroups(gidsetsize
, grouplist
));
11607 #ifdef TARGET_NR_fchown32
11608 case TARGET_NR_fchown32
:
11609 return get_errno(fchown(arg1
, arg2
, arg3
));
11611 #ifdef TARGET_NR_setresuid32
11612 case TARGET_NR_setresuid32
:
11613 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11615 #ifdef TARGET_NR_getresuid32
11616 case TARGET_NR_getresuid32
:
11618 uid_t ruid
, euid
, suid
;
11619 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11620 if (!is_error(ret
)) {
11621 if (put_user_u32(ruid
, arg1
)
11622 || put_user_u32(euid
, arg2
)
11623 || put_user_u32(suid
, arg3
))
11624 return -TARGET_EFAULT
;
11629 #ifdef TARGET_NR_setresgid32
11630 case TARGET_NR_setresgid32
:
11631 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11633 #ifdef TARGET_NR_getresgid32
11634 case TARGET_NR_getresgid32
:
11636 gid_t rgid
, egid
, sgid
;
11637 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11638 if (!is_error(ret
)) {
11639 if (put_user_u32(rgid
, arg1
)
11640 || put_user_u32(egid
, arg2
)
11641 || put_user_u32(sgid
, arg3
))
11642 return -TARGET_EFAULT
;
11647 #ifdef TARGET_NR_chown32
11648 case TARGET_NR_chown32
:
11649 if (!(p
= lock_user_string(arg1
)))
11650 return -TARGET_EFAULT
;
11651 ret
= get_errno(chown(p
, arg2
, arg3
));
11652 unlock_user(p
, arg1
, 0);
11655 #ifdef TARGET_NR_setuid32
11656 case TARGET_NR_setuid32
:
11657 return get_errno(sys_setuid(arg1
));
11659 #ifdef TARGET_NR_setgid32
11660 case TARGET_NR_setgid32
:
11661 return get_errno(sys_setgid(arg1
));
11663 #ifdef TARGET_NR_setfsuid32
11664 case TARGET_NR_setfsuid32
:
11665 return get_errno(setfsuid(arg1
));
11667 #ifdef TARGET_NR_setfsgid32
11668 case TARGET_NR_setfsgid32
:
11669 return get_errno(setfsgid(arg1
));
11671 #ifdef TARGET_NR_mincore
11672 case TARGET_NR_mincore
:
11674 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11676 return -TARGET_ENOMEM
;
11678 p
= lock_user_string(arg3
);
11680 ret
= -TARGET_EFAULT
;
11682 ret
= get_errno(mincore(a
, arg2
, p
));
11683 unlock_user(p
, arg3
, ret
);
11685 unlock_user(a
, arg1
, 0);
11689 #ifdef TARGET_NR_arm_fadvise64_64
11690 case TARGET_NR_arm_fadvise64_64
:
11691 /* arm_fadvise64_64 looks like fadvise64_64 but
11692 * with different argument order: fd, advice, offset, len
11693 * rather than the usual fd, offset, len, advice.
11694 * Note that offset and len are both 64-bit so appear as
11695 * pairs of 32-bit registers.
11697 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11698 target_offset64(arg5
, arg6
), arg2
);
11699 return -host_to_target_errno(ret
);
11702 #if TARGET_ABI_BITS == 32
11704 #ifdef TARGET_NR_fadvise64_64
11705 case TARGET_NR_fadvise64_64
:
11706 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11707 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11715 /* 6 args: fd, offset (high, low), len (high, low), advice */
11716 if (regpairs_aligned(cpu_env
, num
)) {
11717 /* offset is in (3,4), len in (5,6) and advice in 7 */
11725 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11726 target_offset64(arg4
, arg5
), arg6
);
11727 return -host_to_target_errno(ret
);
11730 #ifdef TARGET_NR_fadvise64
11731 case TARGET_NR_fadvise64
:
11732 /* 5 args: fd, offset (high, low), len, advice */
11733 if (regpairs_aligned(cpu_env
, num
)) {
11734 /* offset is in (3,4), len in 5 and advice in 6 */
11740 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11741 return -host_to_target_errno(ret
);
11744 #else /* not a 32-bit ABI */
11745 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11746 #ifdef TARGET_NR_fadvise64_64
11747 case TARGET_NR_fadvise64_64
:
11749 #ifdef TARGET_NR_fadvise64
11750 case TARGET_NR_fadvise64
:
11752 #ifdef TARGET_S390X
11754 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11755 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11756 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11757 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11761 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11763 #endif /* end of 64-bit ABI fadvise handling */
11765 #ifdef TARGET_NR_madvise
11766 case TARGET_NR_madvise
:
11767 /* A straight passthrough may not be safe because qemu sometimes
11768 turns private file-backed mappings into anonymous mappings.
11769 This will break MADV_DONTNEED.
11770 This is a hint, so ignoring and returning success is ok. */
11773 #ifdef TARGET_NR_fcntl64
11774 case TARGET_NR_fcntl64
:
11778 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11779 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11782 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11783 copyfrom
= copy_from_user_oabi_flock64
;
11784 copyto
= copy_to_user_oabi_flock64
;
11788 cmd
= target_to_host_fcntl_cmd(arg2
);
11789 if (cmd
== -TARGET_EINVAL
) {
11794 case TARGET_F_GETLK64
:
11795 ret
= copyfrom(&fl
, arg3
);
11799 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11801 ret
= copyto(arg3
, &fl
);
11805 case TARGET_F_SETLK64
:
11806 case TARGET_F_SETLKW64
:
11807 ret
= copyfrom(&fl
, arg3
);
11811 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11814 ret
= do_fcntl(arg1
, arg2
, arg3
);
11820 #ifdef TARGET_NR_cacheflush
11821 case TARGET_NR_cacheflush
:
11822 /* self-modifying code is handled automatically, so nothing needed */
11825 #ifdef TARGET_NR_getpagesize
11826 case TARGET_NR_getpagesize
:
11827 return TARGET_PAGE_SIZE
;
11829 case TARGET_NR_gettid
:
11830 return get_errno(sys_gettid());
11831 #ifdef TARGET_NR_readahead
11832 case TARGET_NR_readahead
:
11833 #if TARGET_ABI_BITS == 32
11834 if (regpairs_aligned(cpu_env
, num
)) {
11839 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11841 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11846 #ifdef TARGET_NR_setxattr
11847 case TARGET_NR_listxattr
:
11848 case TARGET_NR_llistxattr
:
11852 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11854 return -TARGET_EFAULT
;
11857 p
= lock_user_string(arg1
);
11859 if (num
== TARGET_NR_listxattr
) {
11860 ret
= get_errno(listxattr(p
, b
, arg3
));
11862 ret
= get_errno(llistxattr(p
, b
, arg3
));
11865 ret
= -TARGET_EFAULT
;
11867 unlock_user(p
, arg1
, 0);
11868 unlock_user(b
, arg2
, arg3
);
11871 case TARGET_NR_flistxattr
:
11875 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11877 return -TARGET_EFAULT
;
11880 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11881 unlock_user(b
, arg2
, arg3
);
11884 case TARGET_NR_setxattr
:
11885 case TARGET_NR_lsetxattr
:
11887 void *p
, *n
, *v
= 0;
11889 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11891 return -TARGET_EFAULT
;
11894 p
= lock_user_string(arg1
);
11895 n
= lock_user_string(arg2
);
11897 if (num
== TARGET_NR_setxattr
) {
11898 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11900 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11903 ret
= -TARGET_EFAULT
;
11905 unlock_user(p
, arg1
, 0);
11906 unlock_user(n
, arg2
, 0);
11907 unlock_user(v
, arg3
, 0);
11910 case TARGET_NR_fsetxattr
:
11914 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11916 return -TARGET_EFAULT
;
11919 n
= lock_user_string(arg2
);
11921 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11923 ret
= -TARGET_EFAULT
;
11925 unlock_user(n
, arg2
, 0);
11926 unlock_user(v
, arg3
, 0);
11929 case TARGET_NR_getxattr
:
11930 case TARGET_NR_lgetxattr
:
11932 void *p
, *n
, *v
= 0;
11934 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11936 return -TARGET_EFAULT
;
11939 p
= lock_user_string(arg1
);
11940 n
= lock_user_string(arg2
);
11942 if (num
== TARGET_NR_getxattr
) {
11943 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11945 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11948 ret
= -TARGET_EFAULT
;
11950 unlock_user(p
, arg1
, 0);
11951 unlock_user(n
, arg2
, 0);
11952 unlock_user(v
, arg3
, arg4
);
11955 case TARGET_NR_fgetxattr
:
11959 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11961 return -TARGET_EFAULT
;
11964 n
= lock_user_string(arg2
);
11966 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11968 ret
= -TARGET_EFAULT
;
11970 unlock_user(n
, arg2
, 0);
11971 unlock_user(v
, arg3
, arg4
);
11974 case TARGET_NR_removexattr
:
11975 case TARGET_NR_lremovexattr
:
11978 p
= lock_user_string(arg1
);
11979 n
= lock_user_string(arg2
);
11981 if (num
== TARGET_NR_removexattr
) {
11982 ret
= get_errno(removexattr(p
, n
));
11984 ret
= get_errno(lremovexattr(p
, n
));
11987 ret
= -TARGET_EFAULT
;
11989 unlock_user(p
, arg1
, 0);
11990 unlock_user(n
, arg2
, 0);
11993 case TARGET_NR_fremovexattr
:
11996 n
= lock_user_string(arg2
);
11998 ret
= get_errno(fremovexattr(arg1
, n
));
12000 ret
= -TARGET_EFAULT
;
12002 unlock_user(n
, arg2
, 0);
12006 #endif /* CONFIG_ATTR */
12007 #ifdef TARGET_NR_set_thread_area
12008 case TARGET_NR_set_thread_area
:
12009 #if defined(TARGET_MIPS)
12010 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
12012 #elif defined(TARGET_CRIS)
12014 ret
= -TARGET_EINVAL
;
12016 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
12020 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12021 return do_set_thread_area(cpu_env
, arg1
);
12022 #elif defined(TARGET_M68K)
12024 TaskState
*ts
= cpu
->opaque
;
12025 ts
->tp_value
= arg1
;
12029 return -TARGET_ENOSYS
;
12032 #ifdef TARGET_NR_get_thread_area
12033 case TARGET_NR_get_thread_area
:
12034 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12035 return do_get_thread_area(cpu_env
, arg1
);
12036 #elif defined(TARGET_M68K)
12038 TaskState
*ts
= cpu
->opaque
;
12039 return ts
->tp_value
;
12042 return -TARGET_ENOSYS
;
12045 #ifdef TARGET_NR_getdomainname
12046 case TARGET_NR_getdomainname
:
12047 return -TARGET_ENOSYS
;
12050 #ifdef TARGET_NR_clock_settime
12051 case TARGET_NR_clock_settime
:
12053 struct timespec ts
;
12055 ret
= target_to_host_timespec(&ts
, arg2
);
12056 if (!is_error(ret
)) {
12057 ret
= get_errno(clock_settime(arg1
, &ts
));
12062 #ifdef TARGET_NR_clock_settime64
12063 case TARGET_NR_clock_settime64
:
12065 struct timespec ts
;
12067 ret
= target_to_host_timespec64(&ts
, arg2
);
12068 if (!is_error(ret
)) {
12069 ret
= get_errno(clock_settime(arg1
, &ts
));
12074 #ifdef TARGET_NR_clock_gettime
12075 case TARGET_NR_clock_gettime
:
12077 struct timespec ts
;
12078 ret
= get_errno(clock_gettime(arg1
, &ts
));
12079 if (!is_error(ret
)) {
12080 ret
= host_to_target_timespec(arg2
, &ts
);
12085 #ifdef TARGET_NR_clock_gettime64
12086 case TARGET_NR_clock_gettime64
:
12088 struct timespec ts
;
12089 ret
= get_errno(clock_gettime(arg1
, &ts
));
12090 if (!is_error(ret
)) {
12091 ret
= host_to_target_timespec64(arg2
, &ts
);
12096 #ifdef TARGET_NR_clock_getres
12097 case TARGET_NR_clock_getres
:
12099 struct timespec ts
;
12100 ret
= get_errno(clock_getres(arg1
, &ts
));
12101 if (!is_error(ret
)) {
12102 host_to_target_timespec(arg2
, &ts
);
12107 #ifdef TARGET_NR_clock_getres_time64
12108 case TARGET_NR_clock_getres_time64
:
12110 struct timespec ts
;
12111 ret
= get_errno(clock_getres(arg1
, &ts
));
12112 if (!is_error(ret
)) {
12113 host_to_target_timespec64(arg2
, &ts
);
12118 #ifdef TARGET_NR_clock_nanosleep
12119 case TARGET_NR_clock_nanosleep
:
12121 struct timespec ts
;
12122 if (target_to_host_timespec(&ts
, arg3
)) {
12123 return -TARGET_EFAULT
;
12125 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12126 &ts
, arg4
? &ts
: NULL
));
12128 * if the call is interrupted by a signal handler, it fails
12129 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12130 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12132 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12133 host_to_target_timespec(arg4
, &ts
)) {
12134 return -TARGET_EFAULT
;
12140 #ifdef TARGET_NR_clock_nanosleep_time64
12141 case TARGET_NR_clock_nanosleep_time64
:
12143 struct timespec ts
;
12145 if (target_to_host_timespec64(&ts
, arg3
)) {
12146 return -TARGET_EFAULT
;
12149 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12150 &ts
, arg4
? &ts
: NULL
));
12152 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12153 host_to_target_timespec64(arg4
, &ts
)) {
12154 return -TARGET_EFAULT
;
12160 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12161 case TARGET_NR_set_tid_address
:
12162 return get_errno(set_tid_address((int *)g2h(cpu
, arg1
)));
12165 case TARGET_NR_tkill
:
12166 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12168 case TARGET_NR_tgkill
:
12169 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12170 target_to_host_signal(arg3
)));
12172 #ifdef TARGET_NR_set_robust_list
12173 case TARGET_NR_set_robust_list
:
12174 case TARGET_NR_get_robust_list
:
12175 /* The ABI for supporting robust futexes has userspace pass
12176 * the kernel a pointer to a linked list which is updated by
12177 * userspace after the syscall; the list is walked by the kernel
12178 * when the thread exits. Since the linked list in QEMU guest
12179 * memory isn't a valid linked list for the host and we have
12180 * no way to reliably intercept the thread-death event, we can't
12181 * support these. Silently return ENOSYS so that guest userspace
12182 * falls back to a non-robust futex implementation (which should
12183 * be OK except in the corner case of the guest crashing while
12184 * holding a mutex that is shared with another process via
12187 return -TARGET_ENOSYS
;
12190 #if defined(TARGET_NR_utimensat)
12191 case TARGET_NR_utimensat
:
12193 struct timespec
*tsp
, ts
[2];
12197 if (target_to_host_timespec(ts
, arg3
)) {
12198 return -TARGET_EFAULT
;
12200 if (target_to_host_timespec(ts
+ 1, arg3
+
12201 sizeof(struct target_timespec
))) {
12202 return -TARGET_EFAULT
;
12207 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12209 if (!(p
= lock_user_string(arg2
))) {
12210 return -TARGET_EFAULT
;
12212 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12213 unlock_user(p
, arg2
, 0);
12218 #ifdef TARGET_NR_utimensat_time64
12219 case TARGET_NR_utimensat_time64
:
12221 struct timespec
*tsp
, ts
[2];
12225 if (target_to_host_timespec64(ts
, arg3
)) {
12226 return -TARGET_EFAULT
;
12228 if (target_to_host_timespec64(ts
+ 1, arg3
+
12229 sizeof(struct target__kernel_timespec
))) {
12230 return -TARGET_EFAULT
;
12235 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12237 p
= lock_user_string(arg2
);
12239 return -TARGET_EFAULT
;
12241 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12242 unlock_user(p
, arg2
, 0);
12247 #ifdef TARGET_NR_futex
12248 case TARGET_NR_futex
:
12249 return do_futex(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12251 #ifdef TARGET_NR_futex_time64
12252 case TARGET_NR_futex_time64
:
12253 return do_futex_time64(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12255 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12256 case TARGET_NR_inotify_init
:
12257 ret
= get_errno(sys_inotify_init());
12259 fd_trans_register(ret
, &target_inotify_trans
);
12263 #ifdef CONFIG_INOTIFY1
12264 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12265 case TARGET_NR_inotify_init1
:
12266 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
12267 fcntl_flags_tbl
)));
12269 fd_trans_register(ret
, &target_inotify_trans
);
12274 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12275 case TARGET_NR_inotify_add_watch
:
12276 p
= lock_user_string(arg2
);
12277 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12278 unlock_user(p
, arg2
, 0);
12281 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12282 case TARGET_NR_inotify_rm_watch
:
12283 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12286 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12287 case TARGET_NR_mq_open
:
12289 struct mq_attr posix_mq_attr
;
12290 struct mq_attr
*pposix_mq_attr
;
12293 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12294 pposix_mq_attr
= NULL
;
12296 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12297 return -TARGET_EFAULT
;
12299 pposix_mq_attr
= &posix_mq_attr
;
12301 p
= lock_user_string(arg1
- 1);
12303 return -TARGET_EFAULT
;
12305 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12306 unlock_user (p
, arg1
, 0);
12310 case TARGET_NR_mq_unlink
:
12311 p
= lock_user_string(arg1
- 1);
12313 return -TARGET_EFAULT
;
12315 ret
= get_errno(mq_unlink(p
));
12316 unlock_user (p
, arg1
, 0);
12319 #ifdef TARGET_NR_mq_timedsend
12320 case TARGET_NR_mq_timedsend
:
12322 struct timespec ts
;
12324 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12326 if (target_to_host_timespec(&ts
, arg5
)) {
12327 return -TARGET_EFAULT
;
12329 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12330 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12331 return -TARGET_EFAULT
;
12334 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12336 unlock_user (p
, arg2
, arg3
);
12340 #ifdef TARGET_NR_mq_timedsend_time64
12341 case TARGET_NR_mq_timedsend_time64
:
12343 struct timespec ts
;
12345 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12347 if (target_to_host_timespec64(&ts
, arg5
)) {
12348 return -TARGET_EFAULT
;
12350 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12351 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12352 return -TARGET_EFAULT
;
12355 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12357 unlock_user(p
, arg2
, arg3
);
12362 #ifdef TARGET_NR_mq_timedreceive
12363 case TARGET_NR_mq_timedreceive
:
12365 struct timespec ts
;
12368 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12370 if (target_to_host_timespec(&ts
, arg5
)) {
12371 return -TARGET_EFAULT
;
12373 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12375 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12376 return -TARGET_EFAULT
;
12379 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12382 unlock_user (p
, arg2
, arg3
);
12384 put_user_u32(prio
, arg4
);
12388 #ifdef TARGET_NR_mq_timedreceive_time64
12389 case TARGET_NR_mq_timedreceive_time64
:
12391 struct timespec ts
;
12394 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12396 if (target_to_host_timespec64(&ts
, arg5
)) {
12397 return -TARGET_EFAULT
;
12399 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12401 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12402 return -TARGET_EFAULT
;
12405 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12408 unlock_user(p
, arg2
, arg3
);
12410 put_user_u32(prio
, arg4
);
12416 /* Not implemented for now... */
12417 /* case TARGET_NR_mq_notify: */
12420 case TARGET_NR_mq_getsetattr
:
12422 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12425 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12426 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12427 &posix_mq_attr_out
));
12428 } else if (arg3
!= 0) {
12429 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12431 if (ret
== 0 && arg3
!= 0) {
12432 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12438 #ifdef CONFIG_SPLICE
12439 #ifdef TARGET_NR_tee
12440 case TARGET_NR_tee
:
12442 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12446 #ifdef TARGET_NR_splice
12447 case TARGET_NR_splice
:
12449 loff_t loff_in
, loff_out
;
12450 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12452 if (get_user_u64(loff_in
, arg2
)) {
12453 return -TARGET_EFAULT
;
12455 ploff_in
= &loff_in
;
12458 if (get_user_u64(loff_out
, arg4
)) {
12459 return -TARGET_EFAULT
;
12461 ploff_out
= &loff_out
;
12463 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12465 if (put_user_u64(loff_in
, arg2
)) {
12466 return -TARGET_EFAULT
;
12470 if (put_user_u64(loff_out
, arg4
)) {
12471 return -TARGET_EFAULT
;
12477 #ifdef TARGET_NR_vmsplice
12478 case TARGET_NR_vmsplice
:
12480 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12482 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12483 unlock_iovec(vec
, arg2
, arg3
, 0);
12485 ret
= -host_to_target_errno(errno
);
12490 #endif /* CONFIG_SPLICE */
12491 #ifdef CONFIG_EVENTFD
12492 #if defined(TARGET_NR_eventfd)
12493 case TARGET_NR_eventfd
:
12494 ret
= get_errno(eventfd(arg1
, 0));
12496 fd_trans_register(ret
, &target_eventfd_trans
);
12500 #if defined(TARGET_NR_eventfd2)
12501 case TARGET_NR_eventfd2
:
12503 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
12504 if (arg2
& TARGET_O_NONBLOCK
) {
12505 host_flags
|= O_NONBLOCK
;
12507 if (arg2
& TARGET_O_CLOEXEC
) {
12508 host_flags
|= O_CLOEXEC
;
12510 ret
= get_errno(eventfd(arg1
, host_flags
));
12512 fd_trans_register(ret
, &target_eventfd_trans
);
12517 #endif /* CONFIG_EVENTFD */
12518 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12519 case TARGET_NR_fallocate
:
12520 #if TARGET_ABI_BITS == 32
12521 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12522 target_offset64(arg5
, arg6
)));
12524 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12528 #if defined(CONFIG_SYNC_FILE_RANGE)
12529 #if defined(TARGET_NR_sync_file_range)
12530 case TARGET_NR_sync_file_range
:
12531 #if TARGET_ABI_BITS == 32
12532 #if defined(TARGET_MIPS)
12533 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12534 target_offset64(arg5
, arg6
), arg7
));
12536 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12537 target_offset64(arg4
, arg5
), arg6
));
12538 #endif /* !TARGET_MIPS */
12540 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12544 #if defined(TARGET_NR_sync_file_range2) || \
12545 defined(TARGET_NR_arm_sync_file_range)
12546 #if defined(TARGET_NR_sync_file_range2)
12547 case TARGET_NR_sync_file_range2
:
12549 #if defined(TARGET_NR_arm_sync_file_range)
12550 case TARGET_NR_arm_sync_file_range
:
12552 /* This is like sync_file_range but the arguments are reordered */
12553 #if TARGET_ABI_BITS == 32
12554 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12555 target_offset64(arg5
, arg6
), arg2
));
12557 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12562 #if defined(TARGET_NR_signalfd4)
12563 case TARGET_NR_signalfd4
:
12564 return do_signalfd4(arg1
, arg2
, arg4
);
12566 #if defined(TARGET_NR_signalfd)
12567 case TARGET_NR_signalfd
:
12568 return do_signalfd4(arg1
, arg2
, 0);
12570 #if defined(CONFIG_EPOLL)
12571 #if defined(TARGET_NR_epoll_create)
12572 case TARGET_NR_epoll_create
:
12573 return get_errno(epoll_create(arg1
));
12575 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12576 case TARGET_NR_epoll_create1
:
12577 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12579 #if defined(TARGET_NR_epoll_ctl)
12580 case TARGET_NR_epoll_ctl
:
12582 struct epoll_event ep
;
12583 struct epoll_event
*epp
= 0;
12585 if (arg2
!= EPOLL_CTL_DEL
) {
12586 struct target_epoll_event
*target_ep
;
12587 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12588 return -TARGET_EFAULT
;
12590 ep
.events
= tswap32(target_ep
->events
);
12592 * The epoll_data_t union is just opaque data to the kernel,
12593 * so we transfer all 64 bits across and need not worry what
12594 * actual data type it is.
12596 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12597 unlock_user_struct(target_ep
, arg4
, 0);
12600 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12601 * non-null pointer, even though this argument is ignored.
12606 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12610 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12611 #if defined(TARGET_NR_epoll_wait)
12612 case TARGET_NR_epoll_wait
:
12614 #if defined(TARGET_NR_epoll_pwait)
12615 case TARGET_NR_epoll_pwait
:
12618 struct target_epoll_event
*target_ep
;
12619 struct epoll_event
*ep
;
12621 int maxevents
= arg3
;
12622 int timeout
= arg4
;
12624 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12625 return -TARGET_EINVAL
;
12628 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12629 maxevents
* sizeof(struct target_epoll_event
), 1);
12631 return -TARGET_EFAULT
;
12634 ep
= g_try_new(struct epoll_event
, maxevents
);
12636 unlock_user(target_ep
, arg2
, 0);
12637 return -TARGET_ENOMEM
;
12641 #if defined(TARGET_NR_epoll_pwait)
12642 case TARGET_NR_epoll_pwait
:
12644 target_sigset_t
*target_set
;
12645 sigset_t _set
, *set
= &_set
;
12648 if (arg6
!= sizeof(target_sigset_t
)) {
12649 ret
= -TARGET_EINVAL
;
12653 target_set
= lock_user(VERIFY_READ
, arg5
,
12654 sizeof(target_sigset_t
), 1);
12656 ret
= -TARGET_EFAULT
;
12659 target_to_host_sigset(set
, target_set
);
12660 unlock_user(target_set
, arg5
, 0);
12665 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12666 set
, SIGSET_T_SIZE
));
12670 #if defined(TARGET_NR_epoll_wait)
12671 case TARGET_NR_epoll_wait
:
12672 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12677 ret
= -TARGET_ENOSYS
;
12679 if (!is_error(ret
)) {
12681 for (i
= 0; i
< ret
; i
++) {
12682 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12683 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12685 unlock_user(target_ep
, arg2
,
12686 ret
* sizeof(struct target_epoll_event
));
12688 unlock_user(target_ep
, arg2
, 0);
12695 #ifdef TARGET_NR_prlimit64
12696 case TARGET_NR_prlimit64
:
12698 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12699 struct target_rlimit64
*target_rnew
, *target_rold
;
12700 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12701 int resource
= target_to_host_resource(arg2
);
12703 if (arg3
&& (resource
!= RLIMIT_AS
&&
12704 resource
!= RLIMIT_DATA
&&
12705 resource
!= RLIMIT_STACK
)) {
12706 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12707 return -TARGET_EFAULT
;
12709 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12710 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12711 unlock_user_struct(target_rnew
, arg3
, 0);
12715 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12716 if (!is_error(ret
) && arg4
) {
12717 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12718 return -TARGET_EFAULT
;
12720 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12721 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12722 unlock_user_struct(target_rold
, arg4
, 1);
12727 #ifdef TARGET_NR_gethostname
12728 case TARGET_NR_gethostname
:
12730 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12732 ret
= get_errno(gethostname(name
, arg2
));
12733 unlock_user(name
, arg1
, arg2
);
12735 ret
= -TARGET_EFAULT
;
12740 #ifdef TARGET_NR_atomic_cmpxchg_32
12741 case TARGET_NR_atomic_cmpxchg_32
:
12743 /* should use start_exclusive from main.c */
12744 abi_ulong mem_value
;
12745 if (get_user_u32(mem_value
, arg6
)) {
12746 target_siginfo_t info
;
12747 info
.si_signo
= SIGSEGV
;
12749 info
.si_code
= TARGET_SEGV_MAPERR
;
12750 info
._sifields
._sigfault
._addr
= arg6
;
12751 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12752 QEMU_SI_FAULT
, &info
);
12756 if (mem_value
== arg2
)
12757 put_user_u32(arg1
, arg6
);
12761 #ifdef TARGET_NR_atomic_barrier
12762 case TARGET_NR_atomic_barrier
:
12763 /* Like the kernel implementation and the
12764 qemu arm barrier, no-op this? */
12768 #ifdef TARGET_NR_timer_create
12769 case TARGET_NR_timer_create
:
12771 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12773 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12776 int timer_index
= next_free_host_timer();
12778 if (timer_index
< 0) {
12779 ret
= -TARGET_EAGAIN
;
12781 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12784 phost_sevp
= &host_sevp
;
12785 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12791 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12795 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12796 return -TARGET_EFAULT
;
12804 #ifdef TARGET_NR_timer_settime
12805 case TARGET_NR_timer_settime
:
12807 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12808 * struct itimerspec * old_value */
12809 target_timer_t timerid
= get_timer_id(arg1
);
12813 } else if (arg3
== 0) {
12814 ret
= -TARGET_EINVAL
;
12816 timer_t htimer
= g_posix_timers
[timerid
];
12817 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12819 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12820 return -TARGET_EFAULT
;
12823 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12824 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12825 return -TARGET_EFAULT
;
12832 #ifdef TARGET_NR_timer_settime64
12833 case TARGET_NR_timer_settime64
:
12835 target_timer_t timerid
= get_timer_id(arg1
);
12839 } else if (arg3
== 0) {
12840 ret
= -TARGET_EINVAL
;
12842 timer_t htimer
= g_posix_timers
[timerid
];
12843 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12845 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12846 return -TARGET_EFAULT
;
12849 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12850 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12851 return -TARGET_EFAULT
;
12858 #ifdef TARGET_NR_timer_gettime
12859 case TARGET_NR_timer_gettime
:
12861 /* args: timer_t timerid, struct itimerspec *curr_value */
12862 target_timer_t timerid
= get_timer_id(arg1
);
12866 } else if (!arg2
) {
12867 ret
= -TARGET_EFAULT
;
12869 timer_t htimer
= g_posix_timers
[timerid
];
12870 struct itimerspec hspec
;
12871 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12873 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12874 ret
= -TARGET_EFAULT
;
12881 #ifdef TARGET_NR_timer_gettime64
12882 case TARGET_NR_timer_gettime64
:
12884 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12885 target_timer_t timerid
= get_timer_id(arg1
);
12889 } else if (!arg2
) {
12890 ret
= -TARGET_EFAULT
;
12892 timer_t htimer
= g_posix_timers
[timerid
];
12893 struct itimerspec hspec
;
12894 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12896 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
12897 ret
= -TARGET_EFAULT
;
12904 #ifdef TARGET_NR_timer_getoverrun
12905 case TARGET_NR_timer_getoverrun
:
12907 /* args: timer_t timerid */
12908 target_timer_t timerid
= get_timer_id(arg1
);
12913 timer_t htimer
= g_posix_timers
[timerid
];
12914 ret
= get_errno(timer_getoverrun(htimer
));
12920 #ifdef TARGET_NR_timer_delete
12921 case TARGET_NR_timer_delete
:
12923 /* args: timer_t timerid */
12924 target_timer_t timerid
= get_timer_id(arg1
);
12929 timer_t htimer
= g_posix_timers
[timerid
];
12930 ret
= get_errno(timer_delete(htimer
));
12931 g_posix_timers
[timerid
] = 0;
12937 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12938 case TARGET_NR_timerfd_create
:
12939 return get_errno(timerfd_create(arg1
,
12940 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12943 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12944 case TARGET_NR_timerfd_gettime
:
12946 struct itimerspec its_curr
;
12948 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12950 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12951 return -TARGET_EFAULT
;
12957 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12958 case TARGET_NR_timerfd_gettime64
:
12960 struct itimerspec its_curr
;
12962 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12964 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
12965 return -TARGET_EFAULT
;
12971 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12972 case TARGET_NR_timerfd_settime
:
12974 struct itimerspec its_new
, its_old
, *p_new
;
12977 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12978 return -TARGET_EFAULT
;
12985 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12987 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12988 return -TARGET_EFAULT
;
12994 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12995 case TARGET_NR_timerfd_settime64
:
12997 struct itimerspec its_new
, its_old
, *p_new
;
13000 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13001 return -TARGET_EFAULT
;
13008 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13010 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13011 return -TARGET_EFAULT
;
13017 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13018 case TARGET_NR_ioprio_get
:
13019 return get_errno(ioprio_get(arg1
, arg2
));
13022 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13023 case TARGET_NR_ioprio_set
:
13024 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13027 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13028 case TARGET_NR_setns
:
13029 return get_errno(setns(arg1
, arg2
));
13031 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13032 case TARGET_NR_unshare
:
13033 return get_errno(unshare(arg1
));
13035 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13036 case TARGET_NR_kcmp
:
13037 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13039 #ifdef TARGET_NR_swapcontext
13040 case TARGET_NR_swapcontext
:
13041 /* PowerPC specific. */
13042 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13044 #ifdef TARGET_NR_memfd_create
13045 case TARGET_NR_memfd_create
:
13046 p
= lock_user_string(arg1
);
13048 return -TARGET_EFAULT
;
13050 ret
= get_errno(memfd_create(p
, arg2
));
13051 fd_trans_unregister(ret
);
13052 unlock_user(p
, arg1
, 0);
13055 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13056 case TARGET_NR_membarrier
:
13057 return get_errno(membarrier(arg1
, arg2
));
13060 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13061 case TARGET_NR_copy_file_range
:
13063 loff_t inoff
, outoff
;
13064 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13067 if (get_user_u64(inoff
, arg2
)) {
13068 return -TARGET_EFAULT
;
13073 if (get_user_u64(outoff
, arg4
)) {
13074 return -TARGET_EFAULT
;
13078 /* Do not sign-extend the count parameter. */
13079 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13080 (abi_ulong
)arg5
, arg6
));
13081 if (!is_error(ret
) && ret
> 0) {
13083 if (put_user_u64(inoff
, arg2
)) {
13084 return -TARGET_EFAULT
;
13088 if (put_user_u64(outoff
, arg4
)) {
13089 return -TARGET_EFAULT
;
13097 #if defined(TARGET_NR_pivot_root)
13098 case TARGET_NR_pivot_root
:
13101 p
= lock_user_string(arg1
); /* new_root */
13102 p2
= lock_user_string(arg2
); /* put_old */
13104 ret
= -TARGET_EFAULT
;
13106 ret
= get_errno(pivot_root(p
, p2
));
13108 unlock_user(p2
, arg2
, 0);
13109 unlock_user(p
, arg1
, 0);
13115 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13116 return -TARGET_ENOSYS
;
13121 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
13122 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13123 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13126 CPUState
*cpu
= env_cpu(cpu_env
);
13129 #ifdef DEBUG_ERESTARTSYS
13130 /* Debug-only code for exercising the syscall-restart code paths
13131 * in the per-architecture cpu main loops: restart every syscall
13132 * the guest makes once before letting it through.
13138 return -TARGET_ERESTARTSYS
;
13143 record_syscall_start(cpu
, num
, arg1
,
13144 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13146 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13147 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13150 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13151 arg5
, arg6
, arg7
, arg8
);
13153 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13154 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13155 arg3
, arg4
, arg5
, arg6
);
13158 record_syscall_return(cpu
, num
, ret
);