4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
66 #include <sys/timerfd.h>
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu/xattr.h"
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
80 #ifdef HAVE_SYS_KCOV_H
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
97 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
120 #include <linux/btrfs.h>
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
126 #include "linux_loop.h"
130 #include "qemu/guest-random.h"
131 #include "qemu/selfmap.h"
132 #include "user/syscall-trace.h"
133 #include "qapi/error.h"
134 #include "fd-trans.h"
138 #define CLONE_IO 0x80000000 /* Clone io context */
141 /* We can't directly call the host clone syscall, because this will
142 * badly confuse libc (breaking mutexes, for example). So we must
143 * divide clone flags into:
144 * * flag combinations that look like pthread_create()
145 * * flag combinations that look like fork()
146 * * flags we can implement within QEMU itself
147 * * flags we can't support and will return an error for
149 /* For thread creation, all these flags must be present; for
150 * fork, none must be present.
152 #define CLONE_THREAD_FLAGS \
153 (CLONE_VM | CLONE_FS | CLONE_FILES | \
154 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
156 /* These flags are ignored:
157 * CLONE_DETACHED is now ignored by the kernel;
158 * CLONE_IO is just an optimisation hint to the I/O scheduler
160 #define CLONE_IGNORED_FLAGS \
161 (CLONE_DETACHED | CLONE_IO)
163 /* Flags for fork which we can implement within QEMU itself */
164 #define CLONE_OPTIONAL_FORK_FLAGS \
165 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
166 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
168 /* Flags for thread creation which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_THREAD_FLAGS \
170 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
171 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
173 #define CLONE_INVALID_FORK_FLAGS \
174 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
176 #define CLONE_INVALID_THREAD_FLAGS \
177 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
178 CLONE_IGNORED_FLAGS))
180 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181 * have almost all been allocated. We cannot support any of
182 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184 * The checks against the invalid thread masks above will catch these.
185 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
188 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189 * once. This exercises the codepaths for restart.
191 //#define DEBUG_ERESTARTSYS
193 //#include <linux/msdos_fs.h>
194 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
195 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
205 #define _syscall0(type,name) \
206 static type name (void) \
208 return syscall(__NR_##name); \
211 #define _syscall1(type,name,type1,arg1) \
212 static type name (type1 arg1) \
214 return syscall(__NR_##name, arg1); \
217 #define _syscall2(type,name,type1,arg1,type2,arg2) \
218 static type name (type1 arg1,type2 arg2) \
220 return syscall(__NR_##name, arg1, arg2); \
223 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
224 static type name (type1 arg1,type2 arg2,type3 arg3) \
226 return syscall(__NR_##name, arg1, arg2, arg3); \
229 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
232 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
235 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
239 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
243 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
244 type5,arg5,type6,arg6) \
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
252 #define __NR_sys_uname __NR_uname
253 #define __NR_sys_getcwd1 __NR_getcwd
254 #define __NR_sys_getdents __NR_getdents
255 #define __NR_sys_getdents64 __NR_getdents64
256 #define __NR_sys_getpriority __NR_getpriority
257 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259 #define __NR_sys_syslog __NR_syslog
260 #if defined(__NR_futex)
261 # define __NR_sys_futex __NR_futex
263 #if defined(__NR_futex_time64)
264 # define __NR_sys_futex_time64 __NR_futex_time64
266 #define __NR_sys_inotify_init __NR_inotify_init
267 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269 #define __NR_sys_statx __NR_statx
271 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272 #define __NR__llseek __NR_lseek
275 /* Newer kernel ports have llseek() instead of _llseek() */
276 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277 #define TARGET_NR__llseek TARGET_NR_llseek
280 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
281 #ifndef TARGET_O_NONBLOCK_MASK
282 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
285 #define __NR_sys_gettid __NR_gettid
286 _syscall0(int, sys_gettid
)
288 /* For the 64-bit guest on 32-bit host case we must emulate
289 * getdents using getdents64, because otherwise the host
290 * might hand us back more dirent records than we can fit
291 * into the guest buffer after structure format conversion.
292 * Otherwise we emulate getdents with getdents if the host has it.
294 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
295 #define EMULATE_GETDENTS_WITH_GETDENTS
298 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
299 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
301 #if (defined(TARGET_NR_getdents) && \
302 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
303 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
304 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
306 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
307 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
308 loff_t
*, res
, uint
, wh
);
310 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
311 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
313 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
314 #ifdef __NR_exit_group
315 _syscall1(int,exit_group
,int,error_code
)
317 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
318 _syscall1(int,set_tid_address
,int *,tidptr
)
320 #if defined(__NR_futex)
321 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
322 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
324 #if defined(__NR_futex_time64)
325 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
326 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
328 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
329 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
330 unsigned long *, user_mask_ptr
);
331 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
332 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
333 unsigned long *, user_mask_ptr
);
334 #define __NR_sys_getcpu __NR_getcpu
335 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
336 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
338 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
339 struct __user_cap_data_struct
*, data
);
340 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
341 struct __user_cap_data_struct
*, data
);
342 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
343 _syscall2(int, ioprio_get
, int, which
, int, who
)
345 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
346 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
348 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
349 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
352 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
353 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
354 unsigned long, idx1
, unsigned long, idx2
)
358 * It is assumed that struct statx is architecture independent.
360 #if defined(TARGET_NR_statx) && defined(__NR_statx)
361 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
362 unsigned int, mask
, struct target_statx
*, statxbuf
)
364 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
365 _syscall2(int, membarrier
, int, cmd
, int, flags
)
368 static bitmask_transtbl fcntl_flags_tbl
[] = {
369 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
370 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
371 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
372 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
373 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
374 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
375 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
376 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
377 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
378 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
379 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
380 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
381 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
382 #if defined(O_DIRECT)
383 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
385 #if defined(O_NOATIME)
386 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
388 #if defined(O_CLOEXEC)
389 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
392 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
394 #if defined(O_TMPFILE)
395 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
397 /* Don't terminate the list prematurely on 64-bit host+guest. */
398 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
399 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
404 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
406 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
407 #if defined(__NR_utimensat)
408 #define __NR_sys_utimensat __NR_utimensat
409 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
410 const struct timespec
*,tsp
,int,flags
)
412 static int sys_utimensat(int dirfd
, const char *pathname
,
413 const struct timespec times
[2], int flags
)
419 #endif /* TARGET_NR_utimensat */
421 #ifdef TARGET_NR_renameat2
422 #if defined(__NR_renameat2)
423 #define __NR_sys_renameat2 __NR_renameat2
424 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
425 const char *, new, unsigned int, flags
)
427 static int sys_renameat2(int oldfd
, const char *old
,
428 int newfd
, const char *new, int flags
)
431 return renameat(oldfd
, old
, newfd
, new);
437 #endif /* TARGET_NR_renameat2 */
439 #ifdef CONFIG_INOTIFY
440 #include <sys/inotify.h>
442 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
443 static int sys_inotify_init(void)
445 return (inotify_init());
448 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
449 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
451 return (inotify_add_watch(fd
, pathname
, mask
));
454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
455 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
457 return (inotify_rm_watch(fd
, wd
));
460 #ifdef CONFIG_INOTIFY1
461 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
462 static int sys_inotify_init1(int flags
)
464 return (inotify_init1(flags
));
469 /* Userspace can usually survive runtime without inotify */
470 #undef TARGET_NR_inotify_init
471 #undef TARGET_NR_inotify_init1
472 #undef TARGET_NR_inotify_add_watch
473 #undef TARGET_NR_inotify_rm_watch
474 #endif /* CONFIG_INOTIFY */
476 #if defined(TARGET_NR_prlimit64)
477 #ifndef __NR_prlimit64
478 # define __NR_prlimit64 -1
480 #define __NR_sys_prlimit64 __NR_prlimit64
481 /* The glibc rlimit structure may not be that used by the underlying syscall */
482 struct host_rlimit64
{
486 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
487 const struct host_rlimit64
*, new_limit
,
488 struct host_rlimit64
*, old_limit
)
492 #if defined(TARGET_NR_timer_create)
493 /* Maximum of 32 active POSIX timers allowed at any one time. */
494 static timer_t g_posix_timers
[32] = { 0, } ;
496 static inline int next_free_host_timer(void)
499 /* FIXME: Does finding the next free slot require a lock? */
500 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
501 if (g_posix_timers
[k
] == 0) {
502 g_posix_timers
[k
] = (timer_t
) 1;
510 #define ERRNO_TABLE_SIZE 1200
512 /* target_to_host_errno_table[] is initialized from
513 * host_to_target_errno_table[] in syscall_init(). */
514 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
518 * This list is the union of errno values overridden in asm-<arch>/errno.h
519 * minus the errnos that are not actually generic to all archs.
521 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
522 [EAGAIN
] = TARGET_EAGAIN
,
523 [EIDRM
] = TARGET_EIDRM
,
524 [ECHRNG
] = TARGET_ECHRNG
,
525 [EL2NSYNC
] = TARGET_EL2NSYNC
,
526 [EL3HLT
] = TARGET_EL3HLT
,
527 [EL3RST
] = TARGET_EL3RST
,
528 [ELNRNG
] = TARGET_ELNRNG
,
529 [EUNATCH
] = TARGET_EUNATCH
,
530 [ENOCSI
] = TARGET_ENOCSI
,
531 [EL2HLT
] = TARGET_EL2HLT
,
532 [EDEADLK
] = TARGET_EDEADLK
,
533 [ENOLCK
] = TARGET_ENOLCK
,
534 [EBADE
] = TARGET_EBADE
,
535 [EBADR
] = TARGET_EBADR
,
536 [EXFULL
] = TARGET_EXFULL
,
537 [ENOANO
] = TARGET_ENOANO
,
538 [EBADRQC
] = TARGET_EBADRQC
,
539 [EBADSLT
] = TARGET_EBADSLT
,
540 [EBFONT
] = TARGET_EBFONT
,
541 [ENOSTR
] = TARGET_ENOSTR
,
542 [ENODATA
] = TARGET_ENODATA
,
543 [ETIME
] = TARGET_ETIME
,
544 [ENOSR
] = TARGET_ENOSR
,
545 [ENONET
] = TARGET_ENONET
,
546 [ENOPKG
] = TARGET_ENOPKG
,
547 [EREMOTE
] = TARGET_EREMOTE
,
548 [ENOLINK
] = TARGET_ENOLINK
,
549 [EADV
] = TARGET_EADV
,
550 [ESRMNT
] = TARGET_ESRMNT
,
551 [ECOMM
] = TARGET_ECOMM
,
552 [EPROTO
] = TARGET_EPROTO
,
553 [EDOTDOT
] = TARGET_EDOTDOT
,
554 [EMULTIHOP
] = TARGET_EMULTIHOP
,
555 [EBADMSG
] = TARGET_EBADMSG
,
556 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
557 [EOVERFLOW
] = TARGET_EOVERFLOW
,
558 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
559 [EBADFD
] = TARGET_EBADFD
,
560 [EREMCHG
] = TARGET_EREMCHG
,
561 [ELIBACC
] = TARGET_ELIBACC
,
562 [ELIBBAD
] = TARGET_ELIBBAD
,
563 [ELIBSCN
] = TARGET_ELIBSCN
,
564 [ELIBMAX
] = TARGET_ELIBMAX
,
565 [ELIBEXEC
] = TARGET_ELIBEXEC
,
566 [EILSEQ
] = TARGET_EILSEQ
,
567 [ENOSYS
] = TARGET_ENOSYS
,
568 [ELOOP
] = TARGET_ELOOP
,
569 [ERESTART
] = TARGET_ERESTART
,
570 [ESTRPIPE
] = TARGET_ESTRPIPE
,
571 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
572 [EUSERS
] = TARGET_EUSERS
,
573 [ENOTSOCK
] = TARGET_ENOTSOCK
,
574 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
575 [EMSGSIZE
] = TARGET_EMSGSIZE
,
576 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
577 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
578 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
579 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
580 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
581 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
582 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
583 [EADDRINUSE
] = TARGET_EADDRINUSE
,
584 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
585 [ENETDOWN
] = TARGET_ENETDOWN
,
586 [ENETUNREACH
] = TARGET_ENETUNREACH
,
587 [ENETRESET
] = TARGET_ENETRESET
,
588 [ECONNABORTED
] = TARGET_ECONNABORTED
,
589 [ECONNRESET
] = TARGET_ECONNRESET
,
590 [ENOBUFS
] = TARGET_ENOBUFS
,
591 [EISCONN
] = TARGET_EISCONN
,
592 [ENOTCONN
] = TARGET_ENOTCONN
,
593 [EUCLEAN
] = TARGET_EUCLEAN
,
594 [ENOTNAM
] = TARGET_ENOTNAM
,
595 [ENAVAIL
] = TARGET_ENAVAIL
,
596 [EISNAM
] = TARGET_EISNAM
,
597 [EREMOTEIO
] = TARGET_EREMOTEIO
,
598 [EDQUOT
] = TARGET_EDQUOT
,
599 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
600 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
601 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
602 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
603 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
604 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
605 [EALREADY
] = TARGET_EALREADY
,
606 [EINPROGRESS
] = TARGET_EINPROGRESS
,
607 [ESTALE
] = TARGET_ESTALE
,
608 [ECANCELED
] = TARGET_ECANCELED
,
609 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
610 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
612 [ENOKEY
] = TARGET_ENOKEY
,
615 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
618 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
621 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
624 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
626 #ifdef ENOTRECOVERABLE
627 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
630 [ENOMSG
] = TARGET_ENOMSG
,
633 [ERFKILL
] = TARGET_ERFKILL
,
636 [EHWPOISON
] = TARGET_EHWPOISON
,
640 static inline int host_to_target_errno(int err
)
642 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
643 host_to_target_errno_table
[err
]) {
644 return host_to_target_errno_table
[err
];
649 static inline int target_to_host_errno(int err
)
651 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
652 target_to_host_errno_table
[err
]) {
653 return target_to_host_errno_table
[err
];
658 static inline abi_long
get_errno(abi_long ret
)
661 return -host_to_target_errno(errno
);
666 const char *target_strerror(int err
)
668 if (err
== TARGET_ERESTARTSYS
) {
669 return "To be restarted";
671 if (err
== TARGET_QEMU_ESIGRETURN
) {
672 return "Successful exit from sigreturn";
675 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
678 return strerror(target_to_host_errno(err
));
681 #define safe_syscall0(type, name) \
682 static type safe_##name(void) \
684 return safe_syscall(__NR_##name); \
687 #define safe_syscall1(type, name, type1, arg1) \
688 static type safe_##name(type1 arg1) \
690 return safe_syscall(__NR_##name, arg1); \
693 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
694 static type safe_##name(type1 arg1, type2 arg2) \
696 return safe_syscall(__NR_##name, arg1, arg2); \
699 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
700 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
702 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
705 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
707 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
709 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
712 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
713 type4, arg4, type5, arg5) \
714 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
717 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
720 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
721 type4, arg4, type5, arg5, type6, arg6) \
722 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
723 type5 arg5, type6 arg6) \
725 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
728 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
729 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
730 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
731 int, flags
, mode_t
, mode
)
732 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
733 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
734 struct rusage
*, rusage
)
736 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
737 int, options
, struct rusage
*, rusage
)
738 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
739 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
740 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
741 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
742 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
744 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
745 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
746 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
749 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
750 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
752 #if defined(__NR_futex)
753 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
754 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
756 #if defined(__NR_futex_time64)
757 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
758 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
760 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
761 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
762 safe_syscall2(int, tkill
, int, tid
, int, sig
)
763 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
764 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
765 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
766 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
767 unsigned long, pos_l
, unsigned long, pos_h
)
768 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
769 unsigned long, pos_l
, unsigned long, pos_h
)
770 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
772 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
773 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
774 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
775 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
776 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
777 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
778 safe_syscall2(int, flock
, int, fd
, int, operation
)
779 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
780 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
781 const struct timespec
*, uts
, size_t, sigsetsize
)
783 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
785 #if defined(TARGET_NR_nanosleep)
786 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
787 struct timespec
*, rem
)
789 #if defined(TARGET_NR_clock_nanosleep) || \
790 defined(TARGET_NR_clock_nanosleep_time64)
791 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
792 const struct timespec
*, req
, struct timespec
*, rem
)
796 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
799 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
800 void *, ptr
, long, fifth
)
804 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
808 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
809 long, msgtype
, int, flags
)
811 #ifdef __NR_semtimedop
812 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
813 unsigned, nsops
, const struct timespec
*, timeout
)
815 #if defined(TARGET_NR_mq_timedsend) || \
816 defined(TARGET_NR_mq_timedsend_time64)
817 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
818 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
820 #if defined(TARGET_NR_mq_timedreceive) || \
821 defined(TARGET_NR_mq_timedreceive_time64)
822 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
823 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
825 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
826 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
827 int, outfd
, loff_t
*, poutoff
, size_t, length
,
831 /* We do ioctl like this rather than via safe_syscall3 to preserve the
832 * "third argument might be integer or pointer or not present" behaviour of
835 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
836 /* Similarly for fcntl. Note that callers must always:
837 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
838 * use the flock64 struct rather than unsuffixed flock
839 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
842 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
844 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
847 static inline int host_to_target_sock_type(int host_type
)
851 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
853 target_type
= TARGET_SOCK_DGRAM
;
856 target_type
= TARGET_SOCK_STREAM
;
859 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
863 #if defined(SOCK_CLOEXEC)
864 if (host_type
& SOCK_CLOEXEC
) {
865 target_type
|= TARGET_SOCK_CLOEXEC
;
869 #if defined(SOCK_NONBLOCK)
870 if (host_type
& SOCK_NONBLOCK
) {
871 target_type
|= TARGET_SOCK_NONBLOCK
;
878 static abi_ulong target_brk
;
879 static abi_ulong target_original_brk
;
880 static abi_ulong brk_page
;
882 void target_set_brk(abi_ulong new_brk
)
884 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
885 brk_page
= HOST_PAGE_ALIGN(target_brk
);
888 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
889 #define DEBUGF_BRK(message, args...)
891 /* do_brk() must return target values and target errnos. */
892 abi_long
do_brk(abi_ulong new_brk
)
894 abi_long mapped_addr
;
895 abi_ulong new_alloc_size
;
897 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
900 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
903 if (new_brk
< target_original_brk
) {
904 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
909 /* If the new brk is less than the highest page reserved to the
910 * target heap allocation, set it and we're almost done... */
911 if (new_brk
<= brk_page
) {
912 /* Heap contents are initialized to zero, as for anonymous
914 if (new_brk
> target_brk
) {
915 memset(g2h_untagged(target_brk
), 0, new_brk
- target_brk
);
917 target_brk
= new_brk
;
918 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
922 /* We need to allocate more memory after the brk... Note that
923 * we don't use MAP_FIXED because that will map over the top of
924 * any existing mapping (like the one with the host libc or qemu
925 * itself); instead we treat "mapped but at wrong address" as
926 * a failure and unmap again.
928 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
929 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
930 PROT_READ
|PROT_WRITE
,
931 MAP_ANON
|MAP_PRIVATE
, 0, 0));
933 if (mapped_addr
== brk_page
) {
934 /* Heap contents are initialized to zero, as for anonymous
935 * mapped pages. Technically the new pages are already
936 * initialized to zero since they *are* anonymous mapped
937 * pages, however we have to take care with the contents that
938 * come from the remaining part of the previous page: it may
939 * contains garbage data due to a previous heap usage (grown
941 memset(g2h_untagged(target_brk
), 0, brk_page
- target_brk
);
943 target_brk
= new_brk
;
944 brk_page
= HOST_PAGE_ALIGN(target_brk
);
945 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
948 } else if (mapped_addr
!= -1) {
949 /* Mapped but at wrong address, meaning there wasn't actually
950 * enough space for this brk.
952 target_munmap(mapped_addr
, new_alloc_size
);
954 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
957 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
960 #if defined(TARGET_ALPHA)
961 /* We (partially) emulate OSF/1 on Alpha, which requires we
962 return a proper errno, not an unchanged brk value. */
963 return -TARGET_ENOMEM
;
965 /* For everything else, return the previous break. */
969 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
970 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
971 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
972 abi_ulong target_fds_addr
,
976 abi_ulong b
, *target_fds
;
978 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
979 if (!(target_fds
= lock_user(VERIFY_READ
,
981 sizeof(abi_ulong
) * nw
,
983 return -TARGET_EFAULT
;
987 for (i
= 0; i
< nw
; i
++) {
988 /* grab the abi_ulong */
989 __get_user(b
, &target_fds
[i
]);
990 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
991 /* check the bit inside the abi_ulong */
998 unlock_user(target_fds
, target_fds_addr
, 0);
1003 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1004 abi_ulong target_fds_addr
,
1007 if (target_fds_addr
) {
1008 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1009 return -TARGET_EFAULT
;
1017 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1023 abi_ulong
*target_fds
;
1025 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1026 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1028 sizeof(abi_ulong
) * nw
,
1030 return -TARGET_EFAULT
;
1033 for (i
= 0; i
< nw
; i
++) {
1035 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1036 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1039 __put_user(v
, &target_fds
[i
]);
1042 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1048 #if defined(__alpha__)
1049 #define HOST_HZ 1024
1054 static inline abi_long
host_to_target_clock_t(long ticks
)
1056 #if HOST_HZ == TARGET_HZ
1059 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1063 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1064 const struct rusage
*rusage
)
1066 struct target_rusage
*target_rusage
;
1068 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1069 return -TARGET_EFAULT
;
1070 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1071 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1072 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1073 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1074 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1075 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1076 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1077 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1078 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1079 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1080 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1081 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1082 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1083 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1084 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1085 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1086 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1087 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1088 unlock_user_struct(target_rusage
, target_addr
, 1);
1093 #ifdef TARGET_NR_setrlimit
1094 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1096 abi_ulong target_rlim_swap
;
1099 target_rlim_swap
= tswapal(target_rlim
);
1100 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1101 return RLIM_INFINITY
;
1103 result
= target_rlim_swap
;
1104 if (target_rlim_swap
!= (rlim_t
)result
)
1105 return RLIM_INFINITY
;
1111 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1112 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1114 abi_ulong target_rlim_swap
;
1117 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1118 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1120 target_rlim_swap
= rlim
;
1121 result
= tswapal(target_rlim_swap
);
1127 static inline int target_to_host_resource(int code
)
1130 case TARGET_RLIMIT_AS
:
1132 case TARGET_RLIMIT_CORE
:
1134 case TARGET_RLIMIT_CPU
:
1136 case TARGET_RLIMIT_DATA
:
1138 case TARGET_RLIMIT_FSIZE
:
1139 return RLIMIT_FSIZE
;
1140 case TARGET_RLIMIT_LOCKS
:
1141 return RLIMIT_LOCKS
;
1142 case TARGET_RLIMIT_MEMLOCK
:
1143 return RLIMIT_MEMLOCK
;
1144 case TARGET_RLIMIT_MSGQUEUE
:
1145 return RLIMIT_MSGQUEUE
;
1146 case TARGET_RLIMIT_NICE
:
1148 case TARGET_RLIMIT_NOFILE
:
1149 return RLIMIT_NOFILE
;
1150 case TARGET_RLIMIT_NPROC
:
1151 return RLIMIT_NPROC
;
1152 case TARGET_RLIMIT_RSS
:
1154 case TARGET_RLIMIT_RTPRIO
:
1155 return RLIMIT_RTPRIO
;
1156 case TARGET_RLIMIT_SIGPENDING
:
1157 return RLIMIT_SIGPENDING
;
1158 case TARGET_RLIMIT_STACK
:
1159 return RLIMIT_STACK
;
1165 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1166 abi_ulong target_tv_addr
)
1168 struct target_timeval
*target_tv
;
1170 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1171 return -TARGET_EFAULT
;
1174 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1175 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1177 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1182 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1183 const struct timeval
*tv
)
1185 struct target_timeval
*target_tv
;
1187 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1188 return -TARGET_EFAULT
;
1191 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1192 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1194 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1199 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1200 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1201 abi_ulong target_tv_addr
)
1203 struct target__kernel_sock_timeval
*target_tv
;
1205 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1206 return -TARGET_EFAULT
;
1209 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1210 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1212 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1218 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1219 const struct timeval
*tv
)
1221 struct target__kernel_sock_timeval
*target_tv
;
1223 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1224 return -TARGET_EFAULT
;
1227 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1228 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1230 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1235 #if defined(TARGET_NR_futex) || \
1236 defined(TARGET_NR_rt_sigtimedwait) || \
1237 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1238 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1239 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1240 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1241 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1242 defined(TARGET_NR_timer_settime) || \
1243 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1244 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1245 abi_ulong target_addr
)
1247 struct target_timespec
*target_ts
;
1249 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1250 return -TARGET_EFAULT
;
1252 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1253 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1254 unlock_user_struct(target_ts
, target_addr
, 0);
1259 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1260 defined(TARGET_NR_timer_settime64) || \
1261 defined(TARGET_NR_mq_timedsend_time64) || \
1262 defined(TARGET_NR_mq_timedreceive_time64) || \
1263 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1264 defined(TARGET_NR_clock_nanosleep_time64) || \
1265 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1266 defined(TARGET_NR_utimensat) || \
1267 defined(TARGET_NR_utimensat_time64) || \
1268 defined(TARGET_NR_semtimedop_time64) || \
1269 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1270 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1271 abi_ulong target_addr
)
1273 struct target__kernel_timespec
*target_ts
;
1275 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1276 return -TARGET_EFAULT
;
1278 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1279 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1280 /* in 32bit mode, this drops the padding */
1281 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1282 unlock_user_struct(target_ts
, target_addr
, 0);
1287 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1288 struct timespec
*host_ts
)
1290 struct target_timespec
*target_ts
;
1292 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1293 return -TARGET_EFAULT
;
1295 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1296 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1297 unlock_user_struct(target_ts
, target_addr
, 1);
1301 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1302 struct timespec
*host_ts
)
1304 struct target__kernel_timespec
*target_ts
;
1306 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1307 return -TARGET_EFAULT
;
1309 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1310 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1311 unlock_user_struct(target_ts
, target_addr
, 1);
1315 #if defined(TARGET_NR_gettimeofday)
1316 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1317 struct timezone
*tz
)
1319 struct target_timezone
*target_tz
;
1321 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1322 return -TARGET_EFAULT
;
1325 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1326 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1328 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1334 #if defined(TARGET_NR_settimeofday)
1335 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1336 abi_ulong target_tz_addr
)
1338 struct target_timezone
*target_tz
;
1340 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1341 return -TARGET_EFAULT
;
1344 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1345 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1347 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1353 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1356 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1357 abi_ulong target_mq_attr_addr
)
1359 struct target_mq_attr
*target_mq_attr
;
1361 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1362 target_mq_attr_addr
, 1))
1363 return -TARGET_EFAULT
;
1365 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1366 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1367 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1368 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1370 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1375 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1376 const struct mq_attr
*attr
)
1378 struct target_mq_attr
*target_mq_attr
;
1380 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1381 target_mq_attr_addr
, 0))
1382 return -TARGET_EFAULT
;
1384 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1385 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1386 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1387 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1389 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1395 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1396 /* do_select() must return target values and target errnos. */
1397 static abi_long
do_select(int n
,
1398 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1399 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1401 fd_set rfds
, wfds
, efds
;
1402 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1404 struct timespec ts
, *ts_ptr
;
1407 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1411 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1415 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1420 if (target_tv_addr
) {
1421 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1422 return -TARGET_EFAULT
;
1423 ts
.tv_sec
= tv
.tv_sec
;
1424 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1430 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1433 if (!is_error(ret
)) {
1434 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1435 return -TARGET_EFAULT
;
1436 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1437 return -TARGET_EFAULT
;
1438 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1439 return -TARGET_EFAULT
;
1441 if (target_tv_addr
) {
1442 tv
.tv_sec
= ts
.tv_sec
;
1443 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1444 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1445 return -TARGET_EFAULT
;
1453 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1454 static abi_long
do_old_select(abi_ulong arg1
)
1456 struct target_sel_arg_struct
*sel
;
1457 abi_ulong inp
, outp
, exp
, tvp
;
1460 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1461 return -TARGET_EFAULT
;
1464 nsel
= tswapal(sel
->n
);
1465 inp
= tswapal(sel
->inp
);
1466 outp
= tswapal(sel
->outp
);
1467 exp
= tswapal(sel
->exp
);
1468 tvp
= tswapal(sel
->tvp
);
1470 unlock_user_struct(sel
, arg1
, 0);
1472 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1477 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1478 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1479 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1482 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1483 fd_set rfds
, wfds
, efds
;
1484 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1485 struct timespec ts
, *ts_ptr
;
1489 * The 6th arg is actually two args smashed together,
1490 * so we cannot use the C library.
1498 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1499 target_sigset_t
*target_sigset
;
1507 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1511 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1515 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1521 * This takes a timespec, and not a timeval, so we cannot
1522 * use the do_select() helper ...
1526 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1527 return -TARGET_EFAULT
;
1530 if (target_to_host_timespec(&ts
, ts_addr
)) {
1531 return -TARGET_EFAULT
;
1539 /* Extract the two packed args for the sigset */
1542 sig
.size
= SIGSET_T_SIZE
;
1544 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1546 return -TARGET_EFAULT
;
1548 arg_sigset
= tswapal(arg7
[0]);
1549 arg_sigsize
= tswapal(arg7
[1]);
1550 unlock_user(arg7
, arg6
, 0);
1554 if (arg_sigsize
!= sizeof(*target_sigset
)) {
1555 /* Like the kernel, we enforce correct size sigsets */
1556 return -TARGET_EINVAL
;
1558 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
1559 sizeof(*target_sigset
), 1);
1560 if (!target_sigset
) {
1561 return -TARGET_EFAULT
;
1563 target_to_host_sigset(&set
, target_sigset
);
1564 unlock_user(target_sigset
, arg_sigset
, 0);
1572 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1575 if (!is_error(ret
)) {
1576 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1577 return -TARGET_EFAULT
;
1579 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1580 return -TARGET_EFAULT
;
1582 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1583 return -TARGET_EFAULT
;
1586 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1587 return -TARGET_EFAULT
;
1590 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1591 return -TARGET_EFAULT
;
1599 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1600 defined(TARGET_NR_ppoll_time64)
1601 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1602 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1604 struct target_pollfd
*target_pfd
;
1605 unsigned int nfds
= arg2
;
1613 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1614 return -TARGET_EINVAL
;
1616 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1617 sizeof(struct target_pollfd
) * nfds
, 1);
1619 return -TARGET_EFAULT
;
1622 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1623 for (i
= 0; i
< nfds
; i
++) {
1624 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1625 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1629 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1630 target_sigset_t
*target_set
;
1631 sigset_t _set
, *set
= &_set
;
1635 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1636 unlock_user(target_pfd
, arg1
, 0);
1637 return -TARGET_EFAULT
;
1640 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1641 unlock_user(target_pfd
, arg1
, 0);
1642 return -TARGET_EFAULT
;
1650 if (arg5
!= sizeof(target_sigset_t
)) {
1651 unlock_user(target_pfd
, arg1
, 0);
1652 return -TARGET_EINVAL
;
1655 target_set
= lock_user(VERIFY_READ
, arg4
,
1656 sizeof(target_sigset_t
), 1);
1658 unlock_user(target_pfd
, arg1
, 0);
1659 return -TARGET_EFAULT
;
1661 target_to_host_sigset(set
, target_set
);
1666 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1667 set
, SIGSET_T_SIZE
));
1669 if (!is_error(ret
) && arg3
) {
1671 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1672 return -TARGET_EFAULT
;
1675 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1676 return -TARGET_EFAULT
;
1681 unlock_user(target_set
, arg4
, 0);
1684 struct timespec ts
, *pts
;
1687 /* Convert ms to secs, ns */
1688 ts
.tv_sec
= arg3
/ 1000;
1689 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1692 /* -ve poll() timeout means "infinite" */
1695 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1698 if (!is_error(ret
)) {
1699 for (i
= 0; i
< nfds
; i
++) {
1700 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1703 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1708 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1711 return pipe2(host_pipe
, flags
);
1717 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1718 int flags
, int is_pipe2
)
1722 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1725 return get_errno(ret
);
1727 /* Several targets have special calling conventions for the original
1728 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1730 #if defined(TARGET_ALPHA)
1731 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1732 return host_pipe
[0];
1733 #elif defined(TARGET_MIPS)
1734 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1735 return host_pipe
[0];
1736 #elif defined(TARGET_SH4)
1737 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1738 return host_pipe
[0];
1739 #elif defined(TARGET_SPARC)
1740 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1741 return host_pipe
[0];
1745 if (put_user_s32(host_pipe
[0], pipedes
)
1746 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1747 return -TARGET_EFAULT
;
1748 return get_errno(ret
);
1751 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1752 abi_ulong target_addr
,
1755 struct target_ip_mreqn
*target_smreqn
;
1757 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1759 return -TARGET_EFAULT
;
1760 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1761 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1762 if (len
== sizeof(struct target_ip_mreqn
))
1763 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1764 unlock_user(target_smreqn
, target_addr
, 0);
1769 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1770 abi_ulong target_addr
,
1773 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1774 sa_family_t sa_family
;
1775 struct target_sockaddr
*target_saddr
;
1777 if (fd_trans_target_to_host_addr(fd
)) {
1778 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1781 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1783 return -TARGET_EFAULT
;
1785 sa_family
= tswap16(target_saddr
->sa_family
);
1787 /* Oops. The caller might send a incomplete sun_path; sun_path
1788 * must be terminated by \0 (see the manual page), but
1789 * unfortunately it is quite common to specify sockaddr_un
1790 * length as "strlen(x->sun_path)" while it should be
1791 * "strlen(...) + 1". We'll fix that here if needed.
1792 * Linux kernel has a similar feature.
1795 if (sa_family
== AF_UNIX
) {
1796 if (len
< unix_maxlen
&& len
> 0) {
1797 char *cp
= (char*)target_saddr
;
1799 if ( cp
[len
-1] && !cp
[len
] )
1802 if (len
> unix_maxlen
)
1806 memcpy(addr
, target_saddr
, len
);
1807 addr
->sa_family
= sa_family
;
1808 if (sa_family
== AF_NETLINK
) {
1809 struct sockaddr_nl
*nladdr
;
1811 nladdr
= (struct sockaddr_nl
*)addr
;
1812 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1813 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1814 } else if (sa_family
== AF_PACKET
) {
1815 struct target_sockaddr_ll
*lladdr
;
1817 lladdr
= (struct target_sockaddr_ll
*)addr
;
1818 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1819 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1821 unlock_user(target_saddr
, target_addr
, 0);
1826 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1827 struct sockaddr
*addr
,
1830 struct target_sockaddr
*target_saddr
;
1837 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1839 return -TARGET_EFAULT
;
1840 memcpy(target_saddr
, addr
, len
);
1841 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1842 sizeof(target_saddr
->sa_family
)) {
1843 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1845 if (addr
->sa_family
== AF_NETLINK
&&
1846 len
>= sizeof(struct target_sockaddr_nl
)) {
1847 struct target_sockaddr_nl
*target_nl
=
1848 (struct target_sockaddr_nl
*)target_saddr
;
1849 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1850 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1851 } else if (addr
->sa_family
== AF_PACKET
) {
1852 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1853 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1854 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1855 } else if (addr
->sa_family
== AF_INET6
&&
1856 len
>= sizeof(struct target_sockaddr_in6
)) {
1857 struct target_sockaddr_in6
*target_in6
=
1858 (struct target_sockaddr_in6
*)target_saddr
;
1859 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1861 unlock_user(target_saddr
, target_addr
, len
);
1866 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1867 struct target_msghdr
*target_msgh
)
1869 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1870 abi_long msg_controllen
;
1871 abi_ulong target_cmsg_addr
;
1872 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1873 socklen_t space
= 0;
1875 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1876 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1878 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1879 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1880 target_cmsg_start
= target_cmsg
;
1882 return -TARGET_EFAULT
;
1884 while (cmsg
&& target_cmsg
) {
1885 void *data
= CMSG_DATA(cmsg
);
1886 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1888 int len
= tswapal(target_cmsg
->cmsg_len
)
1889 - sizeof(struct target_cmsghdr
);
1891 space
+= CMSG_SPACE(len
);
1892 if (space
> msgh
->msg_controllen
) {
1893 space
-= CMSG_SPACE(len
);
1894 /* This is a QEMU bug, since we allocated the payload
1895 * area ourselves (unlike overflow in host-to-target
1896 * conversion, which is just the guest giving us a buffer
1897 * that's too small). It can't happen for the payload types
1898 * we currently support; if it becomes an issue in future
1899 * we would need to improve our allocation strategy to
1900 * something more intelligent than "twice the size of the
1901 * target buffer we're reading from".
1903 qemu_log_mask(LOG_UNIMP
,
1904 ("Unsupported ancillary data %d/%d: "
1905 "unhandled msg size\n"),
1906 tswap32(target_cmsg
->cmsg_level
),
1907 tswap32(target_cmsg
->cmsg_type
));
1911 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1912 cmsg
->cmsg_level
= SOL_SOCKET
;
1914 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1916 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1917 cmsg
->cmsg_len
= CMSG_LEN(len
);
1919 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1920 int *fd
= (int *)data
;
1921 int *target_fd
= (int *)target_data
;
1922 int i
, numfds
= len
/ sizeof(int);
1924 for (i
= 0; i
< numfds
; i
++) {
1925 __get_user(fd
[i
], target_fd
+ i
);
1927 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1928 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1929 struct ucred
*cred
= (struct ucred
*)data
;
1930 struct target_ucred
*target_cred
=
1931 (struct target_ucred
*)target_data
;
1933 __get_user(cred
->pid
, &target_cred
->pid
);
1934 __get_user(cred
->uid
, &target_cred
->uid
);
1935 __get_user(cred
->gid
, &target_cred
->gid
);
1937 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1938 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1939 memcpy(data
, target_data
, len
);
1942 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1943 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1946 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1948 msgh
->msg_controllen
= space
;
1952 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1953 struct msghdr
*msgh
)
1955 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1956 abi_long msg_controllen
;
1957 abi_ulong target_cmsg_addr
;
1958 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1959 socklen_t space
= 0;
1961 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1962 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1964 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1965 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1966 target_cmsg_start
= target_cmsg
;
1968 return -TARGET_EFAULT
;
1970 while (cmsg
&& target_cmsg
) {
1971 void *data
= CMSG_DATA(cmsg
);
1972 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1974 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1975 int tgt_len
, tgt_space
;
1977 /* We never copy a half-header but may copy half-data;
1978 * this is Linux's behaviour in put_cmsg(). Note that
1979 * truncation here is a guest problem (which we report
1980 * to the guest via the CTRUNC bit), unlike truncation
1981 * in target_to_host_cmsg, which is a QEMU bug.
1983 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1984 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1988 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1989 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1991 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1993 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1995 /* Payload types which need a different size of payload on
1996 * the target must adjust tgt_len here.
1999 switch (cmsg
->cmsg_level
) {
2001 switch (cmsg
->cmsg_type
) {
2003 tgt_len
= sizeof(struct target_timeval
);
2013 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
2014 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
2015 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
2018 /* We must now copy-and-convert len bytes of payload
2019 * into tgt_len bytes of destination space. Bear in mind
2020 * that in both source and destination we may be dealing
2021 * with a truncated value!
2023 switch (cmsg
->cmsg_level
) {
2025 switch (cmsg
->cmsg_type
) {
2028 int *fd
= (int *)data
;
2029 int *target_fd
= (int *)target_data
;
2030 int i
, numfds
= tgt_len
/ sizeof(int);
2032 for (i
= 0; i
< numfds
; i
++) {
2033 __put_user(fd
[i
], target_fd
+ i
);
2039 struct timeval
*tv
= (struct timeval
*)data
;
2040 struct target_timeval
*target_tv
=
2041 (struct target_timeval
*)target_data
;
2043 if (len
!= sizeof(struct timeval
) ||
2044 tgt_len
!= sizeof(struct target_timeval
)) {
2048 /* copy struct timeval to target */
2049 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
2050 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
2053 case SCM_CREDENTIALS
:
2055 struct ucred
*cred
= (struct ucred
*)data
;
2056 struct target_ucred
*target_cred
=
2057 (struct target_ucred
*)target_data
;
2059 __put_user(cred
->pid
, &target_cred
->pid
);
2060 __put_user(cred
->uid
, &target_cred
->uid
);
2061 __put_user(cred
->gid
, &target_cred
->gid
);
2070 switch (cmsg
->cmsg_type
) {
2073 uint32_t *v
= (uint32_t *)data
;
2074 uint32_t *t_int
= (uint32_t *)target_data
;
2076 if (len
!= sizeof(uint32_t) ||
2077 tgt_len
!= sizeof(uint32_t)) {
2080 __put_user(*v
, t_int
);
2086 struct sock_extended_err ee
;
2087 struct sockaddr_in offender
;
2089 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
2090 struct errhdr_t
*target_errh
=
2091 (struct errhdr_t
*)target_data
;
2093 if (len
!= sizeof(struct errhdr_t
) ||
2094 tgt_len
!= sizeof(struct errhdr_t
)) {
2097 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2098 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2099 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2100 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2101 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2102 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2103 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2104 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2105 (void *) &errh
->offender
, sizeof(errh
->offender
));
2114 switch (cmsg
->cmsg_type
) {
2117 uint32_t *v
= (uint32_t *)data
;
2118 uint32_t *t_int
= (uint32_t *)target_data
;
2120 if (len
!= sizeof(uint32_t) ||
2121 tgt_len
!= sizeof(uint32_t)) {
2124 __put_user(*v
, t_int
);
2130 struct sock_extended_err ee
;
2131 struct sockaddr_in6 offender
;
2133 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2134 struct errhdr6_t
*target_errh
=
2135 (struct errhdr6_t
*)target_data
;
2137 if (len
!= sizeof(struct errhdr6_t
) ||
2138 tgt_len
!= sizeof(struct errhdr6_t
)) {
2141 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2142 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2143 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2144 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2145 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2146 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2147 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2148 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2149 (void *) &errh
->offender
, sizeof(errh
->offender
));
2159 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2160 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2161 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2162 if (tgt_len
> len
) {
2163 memset(target_data
+ len
, 0, tgt_len
- len
);
2167 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2168 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2169 if (msg_controllen
< tgt_space
) {
2170 tgt_space
= msg_controllen
;
2172 msg_controllen
-= tgt_space
;
2174 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2175 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2178 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2180 target_msgh
->msg_controllen
= tswapal(space
);
2184 /* do_setsockopt() Must return target values and target errnos. */
2185 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2186 abi_ulong optval_addr
, socklen_t optlen
)
2190 struct ip_mreqn
*ip_mreq
;
2191 struct ip_mreq_source
*ip_mreq_source
;
2196 /* TCP and UDP options all take an 'int' value. */
2197 if (optlen
< sizeof(uint32_t))
2198 return -TARGET_EINVAL
;
2200 if (get_user_u32(val
, optval_addr
))
2201 return -TARGET_EFAULT
;
2202 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2209 case IP_ROUTER_ALERT
:
2213 case IP_MTU_DISCOVER
:
2220 case IP_MULTICAST_TTL
:
2221 case IP_MULTICAST_LOOP
:
2223 if (optlen
>= sizeof(uint32_t)) {
2224 if (get_user_u32(val
, optval_addr
))
2225 return -TARGET_EFAULT
;
2226 } else if (optlen
>= 1) {
2227 if (get_user_u8(val
, optval_addr
))
2228 return -TARGET_EFAULT
;
2230 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2232 case IP_ADD_MEMBERSHIP
:
2233 case IP_DROP_MEMBERSHIP
:
2234 if (optlen
< sizeof (struct target_ip_mreq
) ||
2235 optlen
> sizeof (struct target_ip_mreqn
))
2236 return -TARGET_EINVAL
;
2238 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2239 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2240 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2243 case IP_BLOCK_SOURCE
:
2244 case IP_UNBLOCK_SOURCE
:
2245 case IP_ADD_SOURCE_MEMBERSHIP
:
2246 case IP_DROP_SOURCE_MEMBERSHIP
:
2247 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2248 return -TARGET_EINVAL
;
2250 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2251 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2252 unlock_user (ip_mreq_source
, optval_addr
, 0);
2261 case IPV6_MTU_DISCOVER
:
2264 case IPV6_RECVPKTINFO
:
2265 case IPV6_UNICAST_HOPS
:
2266 case IPV6_MULTICAST_HOPS
:
2267 case IPV6_MULTICAST_LOOP
:
2269 case IPV6_RECVHOPLIMIT
:
2270 case IPV6_2292HOPLIMIT
:
2273 case IPV6_2292PKTINFO
:
2274 case IPV6_RECVTCLASS
:
2275 case IPV6_RECVRTHDR
:
2276 case IPV6_2292RTHDR
:
2277 case IPV6_RECVHOPOPTS
:
2278 case IPV6_2292HOPOPTS
:
2279 case IPV6_RECVDSTOPTS
:
2280 case IPV6_2292DSTOPTS
:
2282 case IPV6_ADDR_PREFERENCES
:
2283 #ifdef IPV6_RECVPATHMTU
2284 case IPV6_RECVPATHMTU
:
2286 #ifdef IPV6_TRANSPARENT
2287 case IPV6_TRANSPARENT
:
2289 #ifdef IPV6_FREEBIND
2292 #ifdef IPV6_RECVORIGDSTADDR
2293 case IPV6_RECVORIGDSTADDR
:
2296 if (optlen
< sizeof(uint32_t)) {
2297 return -TARGET_EINVAL
;
2299 if (get_user_u32(val
, optval_addr
)) {
2300 return -TARGET_EFAULT
;
2302 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2303 &val
, sizeof(val
)));
2307 struct in6_pktinfo pki
;
2309 if (optlen
< sizeof(pki
)) {
2310 return -TARGET_EINVAL
;
2313 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2314 return -TARGET_EFAULT
;
2317 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2319 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2320 &pki
, sizeof(pki
)));
2323 case IPV6_ADD_MEMBERSHIP
:
2324 case IPV6_DROP_MEMBERSHIP
:
2326 struct ipv6_mreq ipv6mreq
;
2328 if (optlen
< sizeof(ipv6mreq
)) {
2329 return -TARGET_EINVAL
;
2332 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2333 return -TARGET_EFAULT
;
2336 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2338 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2339 &ipv6mreq
, sizeof(ipv6mreq
)));
2350 struct icmp6_filter icmp6f
;
2352 if (optlen
> sizeof(icmp6f
)) {
2353 optlen
= sizeof(icmp6f
);
2356 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2357 return -TARGET_EFAULT
;
2360 for (val
= 0; val
< 8; val
++) {
2361 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2364 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2376 /* those take an u32 value */
2377 if (optlen
< sizeof(uint32_t)) {
2378 return -TARGET_EINVAL
;
2381 if (get_user_u32(val
, optval_addr
)) {
2382 return -TARGET_EFAULT
;
2384 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2385 &val
, sizeof(val
)));
2392 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2397 char *alg_key
= g_malloc(optlen
);
2400 return -TARGET_ENOMEM
;
2402 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2404 return -TARGET_EFAULT
;
2406 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2411 case ALG_SET_AEAD_AUTHSIZE
:
2413 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2422 case TARGET_SOL_SOCKET
:
2424 case TARGET_SO_RCVTIMEO
:
2428 optname
= SO_RCVTIMEO
;
2431 if (optlen
!= sizeof(struct target_timeval
)) {
2432 return -TARGET_EINVAL
;
2435 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2436 return -TARGET_EFAULT
;
2439 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2443 case TARGET_SO_SNDTIMEO
:
2444 optname
= SO_SNDTIMEO
;
2446 case TARGET_SO_ATTACH_FILTER
:
2448 struct target_sock_fprog
*tfprog
;
2449 struct target_sock_filter
*tfilter
;
2450 struct sock_fprog fprog
;
2451 struct sock_filter
*filter
;
2454 if (optlen
!= sizeof(*tfprog
)) {
2455 return -TARGET_EINVAL
;
2457 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2458 return -TARGET_EFAULT
;
2460 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2461 tswapal(tfprog
->filter
), 0)) {
2462 unlock_user_struct(tfprog
, optval_addr
, 1);
2463 return -TARGET_EFAULT
;
2466 fprog
.len
= tswap16(tfprog
->len
);
2467 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2468 if (filter
== NULL
) {
2469 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2470 unlock_user_struct(tfprog
, optval_addr
, 1);
2471 return -TARGET_ENOMEM
;
2473 for (i
= 0; i
< fprog
.len
; i
++) {
2474 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2475 filter
[i
].jt
= tfilter
[i
].jt
;
2476 filter
[i
].jf
= tfilter
[i
].jf
;
2477 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2479 fprog
.filter
= filter
;
2481 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2482 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2485 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2486 unlock_user_struct(tfprog
, optval_addr
, 1);
2489 case TARGET_SO_BINDTODEVICE
:
2491 char *dev_ifname
, *addr_ifname
;
2493 if (optlen
> IFNAMSIZ
- 1) {
2494 optlen
= IFNAMSIZ
- 1;
2496 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2498 return -TARGET_EFAULT
;
2500 optname
= SO_BINDTODEVICE
;
2501 addr_ifname
= alloca(IFNAMSIZ
);
2502 memcpy(addr_ifname
, dev_ifname
, optlen
);
2503 addr_ifname
[optlen
] = 0;
2504 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2505 addr_ifname
, optlen
));
2506 unlock_user (dev_ifname
, optval_addr
, 0);
2509 case TARGET_SO_LINGER
:
2512 struct target_linger
*tlg
;
2514 if (optlen
!= sizeof(struct target_linger
)) {
2515 return -TARGET_EINVAL
;
2517 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2518 return -TARGET_EFAULT
;
2520 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2521 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2522 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2524 unlock_user_struct(tlg
, optval_addr
, 0);
2527 /* Options with 'int' argument. */
2528 case TARGET_SO_DEBUG
:
2531 case TARGET_SO_REUSEADDR
:
2532 optname
= SO_REUSEADDR
;
2535 case TARGET_SO_REUSEPORT
:
2536 optname
= SO_REUSEPORT
;
2539 case TARGET_SO_TYPE
:
2542 case TARGET_SO_ERROR
:
2545 case TARGET_SO_DONTROUTE
:
2546 optname
= SO_DONTROUTE
;
2548 case TARGET_SO_BROADCAST
:
2549 optname
= SO_BROADCAST
;
2551 case TARGET_SO_SNDBUF
:
2552 optname
= SO_SNDBUF
;
2554 case TARGET_SO_SNDBUFFORCE
:
2555 optname
= SO_SNDBUFFORCE
;
2557 case TARGET_SO_RCVBUF
:
2558 optname
= SO_RCVBUF
;
2560 case TARGET_SO_RCVBUFFORCE
:
2561 optname
= SO_RCVBUFFORCE
;
2563 case TARGET_SO_KEEPALIVE
:
2564 optname
= SO_KEEPALIVE
;
2566 case TARGET_SO_OOBINLINE
:
2567 optname
= SO_OOBINLINE
;
2569 case TARGET_SO_NO_CHECK
:
2570 optname
= SO_NO_CHECK
;
2572 case TARGET_SO_PRIORITY
:
2573 optname
= SO_PRIORITY
;
2576 case TARGET_SO_BSDCOMPAT
:
2577 optname
= SO_BSDCOMPAT
;
2580 case TARGET_SO_PASSCRED
:
2581 optname
= SO_PASSCRED
;
2583 case TARGET_SO_PASSSEC
:
2584 optname
= SO_PASSSEC
;
2586 case TARGET_SO_TIMESTAMP
:
2587 optname
= SO_TIMESTAMP
;
2589 case TARGET_SO_RCVLOWAT
:
2590 optname
= SO_RCVLOWAT
;
2595 if (optlen
< sizeof(uint32_t))
2596 return -TARGET_EINVAL
;
2598 if (get_user_u32(val
, optval_addr
))
2599 return -TARGET_EFAULT
;
2600 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2605 case NETLINK_PKTINFO
:
2606 case NETLINK_ADD_MEMBERSHIP
:
2607 case NETLINK_DROP_MEMBERSHIP
:
2608 case NETLINK_BROADCAST_ERROR
:
2609 case NETLINK_NO_ENOBUFS
:
2610 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2611 case NETLINK_LISTEN_ALL_NSID
:
2612 case NETLINK_CAP_ACK
:
2613 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2614 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2615 case NETLINK_EXT_ACK
:
2616 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2617 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2618 case NETLINK_GET_STRICT_CHK
:
2619 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2625 if (optlen
< sizeof(uint32_t)) {
2626 return -TARGET_EINVAL
;
2628 if (get_user_u32(val
, optval_addr
)) {
2629 return -TARGET_EFAULT
;
2631 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2634 #endif /* SOL_NETLINK */
2637 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2639 ret
= -TARGET_ENOPROTOOPT
;
2644 /* do_getsockopt() Must return target values and target errnos. */
2645 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2646 abi_ulong optval_addr
, abi_ulong optlen
)
2653 case TARGET_SOL_SOCKET
:
2656 /* These don't just return a single integer */
2657 case TARGET_SO_PEERNAME
:
2659 case TARGET_SO_RCVTIMEO
: {
2663 optname
= SO_RCVTIMEO
;
2666 if (get_user_u32(len
, optlen
)) {
2667 return -TARGET_EFAULT
;
2670 return -TARGET_EINVAL
;
2674 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2679 if (len
> sizeof(struct target_timeval
)) {
2680 len
= sizeof(struct target_timeval
);
2682 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2683 return -TARGET_EFAULT
;
2685 if (put_user_u32(len
, optlen
)) {
2686 return -TARGET_EFAULT
;
2690 case TARGET_SO_SNDTIMEO
:
2691 optname
= SO_SNDTIMEO
;
2693 case TARGET_SO_PEERCRED
: {
2696 struct target_ucred
*tcr
;
2698 if (get_user_u32(len
, optlen
)) {
2699 return -TARGET_EFAULT
;
2702 return -TARGET_EINVAL
;
2706 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2714 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2715 return -TARGET_EFAULT
;
2717 __put_user(cr
.pid
, &tcr
->pid
);
2718 __put_user(cr
.uid
, &tcr
->uid
);
2719 __put_user(cr
.gid
, &tcr
->gid
);
2720 unlock_user_struct(tcr
, optval_addr
, 1);
2721 if (put_user_u32(len
, optlen
)) {
2722 return -TARGET_EFAULT
;
2726 case TARGET_SO_PEERSEC
: {
2729 if (get_user_u32(len
, optlen
)) {
2730 return -TARGET_EFAULT
;
2733 return -TARGET_EINVAL
;
2735 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2737 return -TARGET_EFAULT
;
2740 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2742 if (put_user_u32(lv
, optlen
)) {
2743 ret
= -TARGET_EFAULT
;
2745 unlock_user(name
, optval_addr
, lv
);
2748 case TARGET_SO_LINGER
:
2752 struct target_linger
*tlg
;
2754 if (get_user_u32(len
, optlen
)) {
2755 return -TARGET_EFAULT
;
2758 return -TARGET_EINVAL
;
2762 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2770 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2771 return -TARGET_EFAULT
;
2773 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2774 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2775 unlock_user_struct(tlg
, optval_addr
, 1);
2776 if (put_user_u32(len
, optlen
)) {
2777 return -TARGET_EFAULT
;
2781 /* Options with 'int' argument. */
2782 case TARGET_SO_DEBUG
:
2785 case TARGET_SO_REUSEADDR
:
2786 optname
= SO_REUSEADDR
;
2789 case TARGET_SO_REUSEPORT
:
2790 optname
= SO_REUSEPORT
;
2793 case TARGET_SO_TYPE
:
2796 case TARGET_SO_ERROR
:
2799 case TARGET_SO_DONTROUTE
:
2800 optname
= SO_DONTROUTE
;
2802 case TARGET_SO_BROADCAST
:
2803 optname
= SO_BROADCAST
;
2805 case TARGET_SO_SNDBUF
:
2806 optname
= SO_SNDBUF
;
2808 case TARGET_SO_RCVBUF
:
2809 optname
= SO_RCVBUF
;
2811 case TARGET_SO_KEEPALIVE
:
2812 optname
= SO_KEEPALIVE
;
2814 case TARGET_SO_OOBINLINE
:
2815 optname
= SO_OOBINLINE
;
2817 case TARGET_SO_NO_CHECK
:
2818 optname
= SO_NO_CHECK
;
2820 case TARGET_SO_PRIORITY
:
2821 optname
= SO_PRIORITY
;
2824 case TARGET_SO_BSDCOMPAT
:
2825 optname
= SO_BSDCOMPAT
;
2828 case TARGET_SO_PASSCRED
:
2829 optname
= SO_PASSCRED
;
2831 case TARGET_SO_TIMESTAMP
:
2832 optname
= SO_TIMESTAMP
;
2834 case TARGET_SO_RCVLOWAT
:
2835 optname
= SO_RCVLOWAT
;
2837 case TARGET_SO_ACCEPTCONN
:
2838 optname
= SO_ACCEPTCONN
;
2840 case TARGET_SO_PROTOCOL
:
2841 optname
= SO_PROTOCOL
;
2843 case TARGET_SO_DOMAIN
:
2844 optname
= SO_DOMAIN
;
2852 /* TCP and UDP options all take an 'int' value. */
2854 if (get_user_u32(len
, optlen
))
2855 return -TARGET_EFAULT
;
2857 return -TARGET_EINVAL
;
2859 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2862 if (optname
== SO_TYPE
) {
2863 val
= host_to_target_sock_type(val
);
2868 if (put_user_u32(val
, optval_addr
))
2869 return -TARGET_EFAULT
;
2871 if (put_user_u8(val
, optval_addr
))
2872 return -TARGET_EFAULT
;
2874 if (put_user_u32(len
, optlen
))
2875 return -TARGET_EFAULT
;
2882 case IP_ROUTER_ALERT
:
2886 case IP_MTU_DISCOVER
:
2892 case IP_MULTICAST_TTL
:
2893 case IP_MULTICAST_LOOP
:
2894 if (get_user_u32(len
, optlen
))
2895 return -TARGET_EFAULT
;
2897 return -TARGET_EINVAL
;
2899 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2902 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2904 if (put_user_u32(len
, optlen
)
2905 || put_user_u8(val
, optval_addr
))
2906 return -TARGET_EFAULT
;
2908 if (len
> sizeof(int))
2910 if (put_user_u32(len
, optlen
)
2911 || put_user_u32(val
, optval_addr
))
2912 return -TARGET_EFAULT
;
2916 ret
= -TARGET_ENOPROTOOPT
;
2922 case IPV6_MTU_DISCOVER
:
2925 case IPV6_RECVPKTINFO
:
2926 case IPV6_UNICAST_HOPS
:
2927 case IPV6_MULTICAST_HOPS
:
2928 case IPV6_MULTICAST_LOOP
:
2930 case IPV6_RECVHOPLIMIT
:
2931 case IPV6_2292HOPLIMIT
:
2934 case IPV6_2292PKTINFO
:
2935 case IPV6_RECVTCLASS
:
2936 case IPV6_RECVRTHDR
:
2937 case IPV6_2292RTHDR
:
2938 case IPV6_RECVHOPOPTS
:
2939 case IPV6_2292HOPOPTS
:
2940 case IPV6_RECVDSTOPTS
:
2941 case IPV6_2292DSTOPTS
:
2943 case IPV6_ADDR_PREFERENCES
:
2944 #ifdef IPV6_RECVPATHMTU
2945 case IPV6_RECVPATHMTU
:
2947 #ifdef IPV6_TRANSPARENT
2948 case IPV6_TRANSPARENT
:
2950 #ifdef IPV6_FREEBIND
2953 #ifdef IPV6_RECVORIGDSTADDR
2954 case IPV6_RECVORIGDSTADDR
:
2956 if (get_user_u32(len
, optlen
))
2957 return -TARGET_EFAULT
;
2959 return -TARGET_EINVAL
;
2961 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2964 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2966 if (put_user_u32(len
, optlen
)
2967 || put_user_u8(val
, optval_addr
))
2968 return -TARGET_EFAULT
;
2970 if (len
> sizeof(int))
2972 if (put_user_u32(len
, optlen
)
2973 || put_user_u32(val
, optval_addr
))
2974 return -TARGET_EFAULT
;
2978 ret
= -TARGET_ENOPROTOOPT
;
2985 case NETLINK_PKTINFO
:
2986 case NETLINK_BROADCAST_ERROR
:
2987 case NETLINK_NO_ENOBUFS
:
2988 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2989 case NETLINK_LISTEN_ALL_NSID
:
2990 case NETLINK_CAP_ACK
:
2991 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2992 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2993 case NETLINK_EXT_ACK
:
2994 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2995 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2996 case NETLINK_GET_STRICT_CHK
:
2997 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2998 if (get_user_u32(len
, optlen
)) {
2999 return -TARGET_EFAULT
;
3001 if (len
!= sizeof(val
)) {
3002 return -TARGET_EINVAL
;
3005 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3009 if (put_user_u32(lv
, optlen
)
3010 || put_user_u32(val
, optval_addr
)) {
3011 return -TARGET_EFAULT
;
3014 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
3015 case NETLINK_LIST_MEMBERSHIPS
:
3019 if (get_user_u32(len
, optlen
)) {
3020 return -TARGET_EFAULT
;
3023 return -TARGET_EINVAL
;
3025 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
3027 return -TARGET_EFAULT
;
3030 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
3032 unlock_user(results
, optval_addr
, 0);
3035 /* swap host endianess to target endianess. */
3036 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
3037 results
[i
] = tswap32(results
[i
]);
3039 if (put_user_u32(lv
, optlen
)) {
3040 return -TARGET_EFAULT
;
3042 unlock_user(results
, optval_addr
, 0);
3045 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3050 #endif /* SOL_NETLINK */
3053 qemu_log_mask(LOG_UNIMP
,
3054 "getsockopt level=%d optname=%d not yet supported\n",
3056 ret
= -TARGET_EOPNOTSUPP
;
3062 /* Convert target low/high pair representing file offset into the host
3063 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3064 * as the kernel doesn't handle them either.
3066 static void target_to_host_low_high(abi_ulong tlow
,
3068 unsigned long *hlow
,
3069 unsigned long *hhigh
)
3071 uint64_t off
= tlow
|
3072 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
3073 TARGET_LONG_BITS
/ 2;
3076 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3079 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3080 abi_ulong count
, int copy
)
3082 struct target_iovec
*target_vec
;
3084 abi_ulong total_len
, max_len
;
3087 bool bad_address
= false;
3093 if (count
> IOV_MAX
) {
3098 vec
= g_try_new0(struct iovec
, count
);
3104 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3105 count
* sizeof(struct target_iovec
), 1);
3106 if (target_vec
== NULL
) {
3111 /* ??? If host page size > target page size, this will result in a
3112 value larger than what we can actually support. */
3113 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3116 for (i
= 0; i
< count
; i
++) {
3117 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3118 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3123 } else if (len
== 0) {
3124 /* Zero length pointer is ignored. */
3125 vec
[i
].iov_base
= 0;
3127 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3128 /* If the first buffer pointer is bad, this is a fault. But
3129 * subsequent bad buffers will result in a partial write; this
3130 * is realized by filling the vector with null pointers and
3132 if (!vec
[i
].iov_base
) {
3143 if (len
> max_len
- total_len
) {
3144 len
= max_len
- total_len
;
3147 vec
[i
].iov_len
= len
;
3151 unlock_user(target_vec
, target_addr
, 0);
3156 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3157 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3160 unlock_user(target_vec
, target_addr
, 0);
3167 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3168 abi_ulong count
, int copy
)
3170 struct target_iovec
*target_vec
;
3173 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3174 count
* sizeof(struct target_iovec
), 1);
3176 for (i
= 0; i
< count
; i
++) {
3177 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3178 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3182 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3184 unlock_user(target_vec
, target_addr
, 0);
3190 static inline int target_to_host_sock_type(int *type
)
3193 int target_type
= *type
;
3195 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3196 case TARGET_SOCK_DGRAM
:
3197 host_type
= SOCK_DGRAM
;
3199 case TARGET_SOCK_STREAM
:
3200 host_type
= SOCK_STREAM
;
3203 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3206 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3207 #if defined(SOCK_CLOEXEC)
3208 host_type
|= SOCK_CLOEXEC
;
3210 return -TARGET_EINVAL
;
3213 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3214 #if defined(SOCK_NONBLOCK)
3215 host_type
|= SOCK_NONBLOCK
;
3216 #elif !defined(O_NONBLOCK)
3217 return -TARGET_EINVAL
;
3224 /* Try to emulate socket type flags after socket creation. */
3225 static int sock_flags_fixup(int fd
, int target_type
)
3227 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3228 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3229 int flags
= fcntl(fd
, F_GETFL
);
3230 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3232 return -TARGET_EINVAL
;
3239 /* do_socket() Must return target values and target errnos. */
3240 static abi_long
do_socket(int domain
, int type
, int protocol
)
3242 int target_type
= type
;
3245 ret
= target_to_host_sock_type(&type
);
3250 if (domain
== PF_NETLINK
&& !(
3251 #ifdef CONFIG_RTNETLINK
3252 protocol
== NETLINK_ROUTE
||
3254 protocol
== NETLINK_KOBJECT_UEVENT
||
3255 protocol
== NETLINK_AUDIT
)) {
3256 return -TARGET_EPROTONOSUPPORT
;
3259 if (domain
== AF_PACKET
||
3260 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3261 protocol
= tswap16(protocol
);
3264 ret
= get_errno(socket(domain
, type
, protocol
));
3266 ret
= sock_flags_fixup(ret
, target_type
);
3267 if (type
== SOCK_PACKET
) {
3268 /* Manage an obsolete case :
3269 * if socket type is SOCK_PACKET, bind by name
3271 fd_trans_register(ret
, &target_packet_trans
);
3272 } else if (domain
== PF_NETLINK
) {
3274 #ifdef CONFIG_RTNETLINK
3276 fd_trans_register(ret
, &target_netlink_route_trans
);
3279 case NETLINK_KOBJECT_UEVENT
:
3280 /* nothing to do: messages are strings */
3283 fd_trans_register(ret
, &target_netlink_audit_trans
);
3286 g_assert_not_reached();
3293 /* do_bind() Must return target values and target errnos. */
3294 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3300 if ((int)addrlen
< 0) {
3301 return -TARGET_EINVAL
;
3304 addr
= alloca(addrlen
+1);
3306 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3310 return get_errno(bind(sockfd
, addr
, addrlen
));
3313 /* do_connect() Must return target values and target errnos. */
3314 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3320 if ((int)addrlen
< 0) {
3321 return -TARGET_EINVAL
;
3324 addr
= alloca(addrlen
+1);
3326 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3330 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3333 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3334 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3335 int flags
, int send
)
3341 abi_ulong target_vec
;
3343 if (msgp
->msg_name
) {
3344 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3345 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3346 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3347 tswapal(msgp
->msg_name
),
3349 if (ret
== -TARGET_EFAULT
) {
3350 /* For connected sockets msg_name and msg_namelen must
3351 * be ignored, so returning EFAULT immediately is wrong.
3352 * Instead, pass a bad msg_name to the host kernel, and
3353 * let it decide whether to return EFAULT or not.
3355 msg
.msg_name
= (void *)-1;
3360 msg
.msg_name
= NULL
;
3361 msg
.msg_namelen
= 0;
3363 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3364 msg
.msg_control
= alloca(msg
.msg_controllen
);
3365 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3367 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3369 count
= tswapal(msgp
->msg_iovlen
);
3370 target_vec
= tswapal(msgp
->msg_iov
);
3372 if (count
> IOV_MAX
) {
3373 /* sendrcvmsg returns a different errno for this condition than
3374 * readv/writev, so we must catch it here before lock_iovec() does.
3376 ret
= -TARGET_EMSGSIZE
;
3380 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3381 target_vec
, count
, send
);
3383 ret
= -host_to_target_errno(errno
);
3386 msg
.msg_iovlen
= count
;
3390 if (fd_trans_target_to_host_data(fd
)) {
3393 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3394 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3395 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3396 msg
.msg_iov
->iov_len
);
3398 msg
.msg_iov
->iov_base
= host_msg
;
3399 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3403 ret
= target_to_host_cmsg(&msg
, msgp
);
3405 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3409 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3410 if (!is_error(ret
)) {
3412 if (fd_trans_host_to_target_data(fd
)) {
3413 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3414 MIN(msg
.msg_iov
->iov_len
, len
));
3416 ret
= host_to_target_cmsg(msgp
, &msg
);
3418 if (!is_error(ret
)) {
3419 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3420 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3421 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3422 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3423 msg
.msg_name
, msg
.msg_namelen
);
3435 unlock_iovec(vec
, target_vec
, count
, !send
);
3440 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3441 int flags
, int send
)
3444 struct target_msghdr
*msgp
;
3446 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3450 return -TARGET_EFAULT
;
3452 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3453 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3457 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3458 * so it might not have this *mmsg-specific flag either.
3460 #ifndef MSG_WAITFORONE
3461 #define MSG_WAITFORONE 0x10000
3464 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3465 unsigned int vlen
, unsigned int flags
,
3468 struct target_mmsghdr
*mmsgp
;
3472 if (vlen
> UIO_MAXIOV
) {
3476 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3478 return -TARGET_EFAULT
;
3481 for (i
= 0; i
< vlen
; i
++) {
3482 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3483 if (is_error(ret
)) {
3486 mmsgp
[i
].msg_len
= tswap32(ret
);
3487 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3488 if (flags
& MSG_WAITFORONE
) {
3489 flags
|= MSG_DONTWAIT
;
3493 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3495 /* Return number of datagrams sent if we sent any at all;
3496 * otherwise return the error.
3504 /* do_accept4() Must return target values and target errnos. */
3505 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3506 abi_ulong target_addrlen_addr
, int flags
)
3508 socklen_t addrlen
, ret_addrlen
;
3513 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3515 if (target_addr
== 0) {
3516 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3519 /* linux returns EFAULT if addrlen pointer is invalid */
3520 if (get_user_u32(addrlen
, target_addrlen_addr
))
3521 return -TARGET_EFAULT
;
3523 if ((int)addrlen
< 0) {
3524 return -TARGET_EINVAL
;
3527 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3528 return -TARGET_EFAULT
;
3530 addr
= alloca(addrlen
);
3532 ret_addrlen
= addrlen
;
3533 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3534 if (!is_error(ret
)) {
3535 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3536 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3537 ret
= -TARGET_EFAULT
;
3543 /* do_getpeername() Must return target values and target errnos. */
3544 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3545 abi_ulong target_addrlen_addr
)
3547 socklen_t addrlen
, ret_addrlen
;
3551 if (get_user_u32(addrlen
, target_addrlen_addr
))
3552 return -TARGET_EFAULT
;
3554 if ((int)addrlen
< 0) {
3555 return -TARGET_EINVAL
;
3558 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3559 return -TARGET_EFAULT
;
3561 addr
= alloca(addrlen
);
3563 ret_addrlen
= addrlen
;
3564 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3565 if (!is_error(ret
)) {
3566 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3567 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3568 ret
= -TARGET_EFAULT
;
3574 /* do_getsockname() Must return target values and target errnos. */
3575 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3576 abi_ulong target_addrlen_addr
)
3578 socklen_t addrlen
, ret_addrlen
;
3582 if (get_user_u32(addrlen
, target_addrlen_addr
))
3583 return -TARGET_EFAULT
;
3585 if ((int)addrlen
< 0) {
3586 return -TARGET_EINVAL
;
3589 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3590 return -TARGET_EFAULT
;
3592 addr
= alloca(addrlen
);
3594 ret_addrlen
= addrlen
;
3595 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3596 if (!is_error(ret
)) {
3597 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3598 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3599 ret
= -TARGET_EFAULT
;
3605 /* do_socketpair() Must return target values and target errnos. */
3606 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3607 abi_ulong target_tab_addr
)
3612 target_to_host_sock_type(&type
);
3614 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3615 if (!is_error(ret
)) {
3616 if (put_user_s32(tab
[0], target_tab_addr
)
3617 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3618 ret
= -TARGET_EFAULT
;
3623 /* do_sendto() Must return target values and target errnos. */
3624 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3625 abi_ulong target_addr
, socklen_t addrlen
)
3629 void *copy_msg
= NULL
;
3632 if ((int)addrlen
< 0) {
3633 return -TARGET_EINVAL
;
3636 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3638 return -TARGET_EFAULT
;
3639 if (fd_trans_target_to_host_data(fd
)) {
3640 copy_msg
= host_msg
;
3641 host_msg
= g_malloc(len
);
3642 memcpy(host_msg
, copy_msg
, len
);
3643 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3649 addr
= alloca(addrlen
+1);
3650 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3654 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3656 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3661 host_msg
= copy_msg
;
3663 unlock_user(host_msg
, msg
, 0);
3667 /* do_recvfrom() Must return target values and target errnos. */
3668 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3669 abi_ulong target_addr
,
3670 abi_ulong target_addrlen
)
3672 socklen_t addrlen
, ret_addrlen
;
3677 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3679 return -TARGET_EFAULT
;
3681 if (get_user_u32(addrlen
, target_addrlen
)) {
3682 ret
= -TARGET_EFAULT
;
3685 if ((int)addrlen
< 0) {
3686 ret
= -TARGET_EINVAL
;
3689 addr
= alloca(addrlen
);
3690 ret_addrlen
= addrlen
;
3691 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3692 addr
, &ret_addrlen
));
3694 addr
= NULL
; /* To keep compiler quiet. */
3695 addrlen
= 0; /* To keep compiler quiet. */
3696 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3698 if (!is_error(ret
)) {
3699 if (fd_trans_host_to_target_data(fd
)) {
3701 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3702 if (is_error(trans
)) {
3708 host_to_target_sockaddr(target_addr
, addr
,
3709 MIN(addrlen
, ret_addrlen
));
3710 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3711 ret
= -TARGET_EFAULT
;
3715 unlock_user(host_msg
, msg
, len
);
3718 unlock_user(host_msg
, msg
, 0);
3723 #ifdef TARGET_NR_socketcall
3724 /* do_socketcall() must return target values and target errnos. */
3725 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3727 static const unsigned nargs
[] = { /* number of arguments per operation */
3728 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3729 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3730 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3731 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3732 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3733 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3734 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3735 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3736 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3737 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3738 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3739 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3740 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3741 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3742 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3743 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3744 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3745 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3746 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3747 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3749 abi_long a
[6]; /* max 6 args */
3752 /* check the range of the first argument num */
3753 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3754 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3755 return -TARGET_EINVAL
;
3757 /* ensure we have space for args */
3758 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3759 return -TARGET_EINVAL
;
3761 /* collect the arguments in a[] according to nargs[] */
3762 for (i
= 0; i
< nargs
[num
]; ++i
) {
3763 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3764 return -TARGET_EFAULT
;
3767 /* now when we have the args, invoke the appropriate underlying function */
3769 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3770 return do_socket(a
[0], a
[1], a
[2]);
3771 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3772 return do_bind(a
[0], a
[1], a
[2]);
3773 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3774 return do_connect(a
[0], a
[1], a
[2]);
3775 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3776 return get_errno(listen(a
[0], a
[1]));
3777 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3778 return do_accept4(a
[0], a
[1], a
[2], 0);
3779 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3780 return do_getsockname(a
[0], a
[1], a
[2]);
3781 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3782 return do_getpeername(a
[0], a
[1], a
[2]);
3783 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3784 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3785 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3786 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3787 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3788 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3789 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3790 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3791 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3792 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3793 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3794 return get_errno(shutdown(a
[0], a
[1]));
3795 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3796 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3797 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3798 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3799 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3800 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3801 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3802 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3803 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3804 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3805 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3806 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3807 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3808 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3810 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3811 return -TARGET_EINVAL
;
3816 #define N_SHM_REGIONS 32
3818 static struct shm_region
{
3822 } shm_regions
[N_SHM_REGIONS
];
3824 #ifndef TARGET_SEMID64_DS
3825 /* asm-generic version of this struct */
3826 struct target_semid64_ds
3828 struct target_ipc_perm sem_perm
;
3829 abi_ulong sem_otime
;
3830 #if TARGET_ABI_BITS == 32
3831 abi_ulong __unused1
;
3833 abi_ulong sem_ctime
;
3834 #if TARGET_ABI_BITS == 32
3835 abi_ulong __unused2
;
3837 abi_ulong sem_nsems
;
3838 abi_ulong __unused3
;
3839 abi_ulong __unused4
;
3843 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3844 abi_ulong target_addr
)
3846 struct target_ipc_perm
*target_ip
;
3847 struct target_semid64_ds
*target_sd
;
3849 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3850 return -TARGET_EFAULT
;
3851 target_ip
= &(target_sd
->sem_perm
);
3852 host_ip
->__key
= tswap32(target_ip
->__key
);
3853 host_ip
->uid
= tswap32(target_ip
->uid
);
3854 host_ip
->gid
= tswap32(target_ip
->gid
);
3855 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3856 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3857 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3858 host_ip
->mode
= tswap32(target_ip
->mode
);
3860 host_ip
->mode
= tswap16(target_ip
->mode
);
3862 #if defined(TARGET_PPC)
3863 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3865 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3867 unlock_user_struct(target_sd
, target_addr
, 0);
3871 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3872 struct ipc_perm
*host_ip
)
3874 struct target_ipc_perm
*target_ip
;
3875 struct target_semid64_ds
*target_sd
;
3877 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3878 return -TARGET_EFAULT
;
3879 target_ip
= &(target_sd
->sem_perm
);
3880 target_ip
->__key
= tswap32(host_ip
->__key
);
3881 target_ip
->uid
= tswap32(host_ip
->uid
);
3882 target_ip
->gid
= tswap32(host_ip
->gid
);
3883 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3884 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3885 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3886 target_ip
->mode
= tswap32(host_ip
->mode
);
3888 target_ip
->mode
= tswap16(host_ip
->mode
);
3890 #if defined(TARGET_PPC)
3891 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3893 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3895 unlock_user_struct(target_sd
, target_addr
, 1);
3899 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3900 abi_ulong target_addr
)
3902 struct target_semid64_ds
*target_sd
;
3904 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3905 return -TARGET_EFAULT
;
3906 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3907 return -TARGET_EFAULT
;
3908 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3909 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3910 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3911 unlock_user_struct(target_sd
, target_addr
, 0);
3915 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3916 struct semid_ds
*host_sd
)
3918 struct target_semid64_ds
*target_sd
;
3920 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3921 return -TARGET_EFAULT
;
3922 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3923 return -TARGET_EFAULT
;
3924 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3925 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3926 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3927 unlock_user_struct(target_sd
, target_addr
, 1);
3931 struct target_seminfo
{
3944 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3945 struct seminfo
*host_seminfo
)
3947 struct target_seminfo
*target_seminfo
;
3948 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3949 return -TARGET_EFAULT
;
3950 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3951 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3952 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3953 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3954 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3955 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3956 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3957 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3958 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3959 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3960 unlock_user_struct(target_seminfo
, target_addr
, 1);
3966 struct semid_ds
*buf
;
3967 unsigned short *array
;
3968 struct seminfo
*__buf
;
3971 union target_semun
{
3978 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3979 abi_ulong target_addr
)
3982 unsigned short *array
;
3984 struct semid_ds semid_ds
;
3987 semun
.buf
= &semid_ds
;
3989 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3991 return get_errno(ret
);
3993 nsems
= semid_ds
.sem_nsems
;
3995 *host_array
= g_try_new(unsigned short, nsems
);
3997 return -TARGET_ENOMEM
;
3999 array
= lock_user(VERIFY_READ
, target_addr
,
4000 nsems
*sizeof(unsigned short), 1);
4002 g_free(*host_array
);
4003 return -TARGET_EFAULT
;
4006 for(i
=0; i
<nsems
; i
++) {
4007 __get_user((*host_array
)[i
], &array
[i
]);
4009 unlock_user(array
, target_addr
, 0);
4014 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4015 unsigned short **host_array
)
4018 unsigned short *array
;
4020 struct semid_ds semid_ds
;
4023 semun
.buf
= &semid_ds
;
4025 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4027 return get_errno(ret
);
4029 nsems
= semid_ds
.sem_nsems
;
4031 array
= lock_user(VERIFY_WRITE
, target_addr
,
4032 nsems
*sizeof(unsigned short), 0);
4034 return -TARGET_EFAULT
;
4036 for(i
=0; i
<nsems
; i
++) {
4037 __put_user((*host_array
)[i
], &array
[i
]);
4039 g_free(*host_array
);
4040 unlock_user(array
, target_addr
, 1);
4045 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4046 abi_ulong target_arg
)
4048 union target_semun target_su
= { .buf
= target_arg
};
4050 struct semid_ds dsarg
;
4051 unsigned short *array
= NULL
;
4052 struct seminfo seminfo
;
4053 abi_long ret
= -TARGET_EINVAL
;
4060 /* In 64 bit cross-endian situations, we will erroneously pick up
4061 * the wrong half of the union for the "val" element. To rectify
4062 * this, the entire 8-byte structure is byteswapped, followed by
4063 * a swap of the 4 byte val field. In other cases, the data is
4064 * already in proper host byte order. */
4065 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4066 target_su
.buf
= tswapal(target_su
.buf
);
4067 arg
.val
= tswap32(target_su
.val
);
4069 arg
.val
= target_su
.val
;
4071 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4075 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4079 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4080 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4087 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4091 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4092 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4098 arg
.__buf
= &seminfo
;
4099 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4100 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4108 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4115 struct target_sembuf
{
4116 unsigned short sem_num
;
4121 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4122 abi_ulong target_addr
,
4125 struct target_sembuf
*target_sembuf
;
4128 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4129 nsops
*sizeof(struct target_sembuf
), 1);
4131 return -TARGET_EFAULT
;
4133 for(i
=0; i
<nsops
; i
++) {
4134 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4135 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4136 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4139 unlock_user(target_sembuf
, target_addr
, 0);
4144 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4145 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4148 * This macro is required to handle the s390 variants, which passes the
4149 * arguments in a different order than default.
4152 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4153 (__nsops), (__timeout), (__sops)
4155 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4156 (__nsops), 0, (__sops), (__timeout)
4159 static inline abi_long
do_semtimedop(int semid
,
4162 abi_long timeout
, bool time64
)
4164 struct sembuf
*sops
;
4165 struct timespec ts
, *pts
= NULL
;
4171 if (target_to_host_timespec64(pts
, timeout
)) {
4172 return -TARGET_EFAULT
;
4175 if (target_to_host_timespec(pts
, timeout
)) {
4176 return -TARGET_EFAULT
;
4181 if (nsops
> TARGET_SEMOPM
) {
4182 return -TARGET_E2BIG
;
4185 sops
= g_new(struct sembuf
, nsops
);
4187 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4189 return -TARGET_EFAULT
;
4192 ret
= -TARGET_ENOSYS
;
4193 #ifdef __NR_semtimedop
4194 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4197 if (ret
== -TARGET_ENOSYS
) {
4198 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4199 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4207 struct target_msqid_ds
4209 struct target_ipc_perm msg_perm
;
4210 abi_ulong msg_stime
;
4211 #if TARGET_ABI_BITS == 32
4212 abi_ulong __unused1
;
4214 abi_ulong msg_rtime
;
4215 #if TARGET_ABI_BITS == 32
4216 abi_ulong __unused2
;
4218 abi_ulong msg_ctime
;
4219 #if TARGET_ABI_BITS == 32
4220 abi_ulong __unused3
;
4222 abi_ulong __msg_cbytes
;
4224 abi_ulong msg_qbytes
;
4225 abi_ulong msg_lspid
;
4226 abi_ulong msg_lrpid
;
4227 abi_ulong __unused4
;
4228 abi_ulong __unused5
;
4231 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4232 abi_ulong target_addr
)
4234 struct target_msqid_ds
*target_md
;
4236 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4237 return -TARGET_EFAULT
;
4238 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4239 return -TARGET_EFAULT
;
4240 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4241 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4242 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4243 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4244 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4245 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4246 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4247 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4248 unlock_user_struct(target_md
, target_addr
, 0);
4252 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4253 struct msqid_ds
*host_md
)
4255 struct target_msqid_ds
*target_md
;
4257 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4258 return -TARGET_EFAULT
;
4259 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4260 return -TARGET_EFAULT
;
4261 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4262 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4263 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4264 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4265 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4266 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4267 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4268 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4269 unlock_user_struct(target_md
, target_addr
, 1);
4273 struct target_msginfo
{
4281 unsigned short int msgseg
;
4284 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4285 struct msginfo
*host_msginfo
)
4287 struct target_msginfo
*target_msginfo
;
4288 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4289 return -TARGET_EFAULT
;
4290 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4291 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4292 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4293 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4294 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4295 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4296 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4297 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4298 unlock_user_struct(target_msginfo
, target_addr
, 1);
4302 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4304 struct msqid_ds dsarg
;
4305 struct msginfo msginfo
;
4306 abi_long ret
= -TARGET_EINVAL
;
4314 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4315 return -TARGET_EFAULT
;
4316 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4317 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4318 return -TARGET_EFAULT
;
4321 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4325 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4326 if (host_to_target_msginfo(ptr
, &msginfo
))
4327 return -TARGET_EFAULT
;
4334 struct target_msgbuf
{
4339 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4340 ssize_t msgsz
, int msgflg
)
4342 struct target_msgbuf
*target_mb
;
4343 struct msgbuf
*host_mb
;
4347 return -TARGET_EINVAL
;
4350 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4351 return -TARGET_EFAULT
;
4352 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4354 unlock_user_struct(target_mb
, msgp
, 0);
4355 return -TARGET_ENOMEM
;
4357 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4358 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4359 ret
= -TARGET_ENOSYS
;
4361 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4364 if (ret
== -TARGET_ENOSYS
) {
4366 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4369 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4375 unlock_user_struct(target_mb
, msgp
, 0);
4381 #if defined(__sparc__)
4382 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4383 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4384 #elif defined(__s390x__)
4385 /* The s390 sys_ipc variant has only five parameters. */
4386 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4387 ((long int[]){(long int)__msgp, __msgtyp})
4389 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4390 ((long int[]){(long int)__msgp, __msgtyp}), 0
4394 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4395 ssize_t msgsz
, abi_long msgtyp
,
4398 struct target_msgbuf
*target_mb
;
4400 struct msgbuf
*host_mb
;
4404 return -TARGET_EINVAL
;
4407 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4408 return -TARGET_EFAULT
;
4410 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4412 ret
= -TARGET_ENOMEM
;
4415 ret
= -TARGET_ENOSYS
;
4417 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4420 if (ret
== -TARGET_ENOSYS
) {
4421 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4422 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4427 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4428 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4429 if (!target_mtext
) {
4430 ret
= -TARGET_EFAULT
;
4433 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4434 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4437 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4441 unlock_user_struct(target_mb
, msgp
, 1);
4446 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4447 abi_ulong target_addr
)
4449 struct target_shmid_ds
*target_sd
;
4451 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4452 return -TARGET_EFAULT
;
4453 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4454 return -TARGET_EFAULT
;
4455 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4456 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4457 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4458 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4459 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4460 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4461 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4462 unlock_user_struct(target_sd
, target_addr
, 0);
4466 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4467 struct shmid_ds
*host_sd
)
4469 struct target_shmid_ds
*target_sd
;
4471 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4472 return -TARGET_EFAULT
;
4473 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4474 return -TARGET_EFAULT
;
4475 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4476 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4477 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4478 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4479 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4480 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4481 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4482 unlock_user_struct(target_sd
, target_addr
, 1);
4486 struct target_shminfo
{
4494 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4495 struct shminfo
*host_shminfo
)
4497 struct target_shminfo
*target_shminfo
;
4498 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4499 return -TARGET_EFAULT
;
4500 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4501 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4502 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4503 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4504 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4505 unlock_user_struct(target_shminfo
, target_addr
, 1);
4509 struct target_shm_info
{
4514 abi_ulong swap_attempts
;
4515 abi_ulong swap_successes
;
4518 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4519 struct shm_info
*host_shm_info
)
4521 struct target_shm_info
*target_shm_info
;
4522 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4523 return -TARGET_EFAULT
;
4524 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4525 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4526 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4527 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4528 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4529 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4530 unlock_user_struct(target_shm_info
, target_addr
, 1);
4534 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4536 struct shmid_ds dsarg
;
4537 struct shminfo shminfo
;
4538 struct shm_info shm_info
;
4539 abi_long ret
= -TARGET_EINVAL
;
4547 if (target_to_host_shmid_ds(&dsarg
, buf
))
4548 return -TARGET_EFAULT
;
4549 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4550 if (host_to_target_shmid_ds(buf
, &dsarg
))
4551 return -TARGET_EFAULT
;
4554 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4555 if (host_to_target_shminfo(buf
, &shminfo
))
4556 return -TARGET_EFAULT
;
4559 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4560 if (host_to_target_shm_info(buf
, &shm_info
))
4561 return -TARGET_EFAULT
;
4566 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4573 #ifndef TARGET_FORCE_SHMLBA
4574 /* For most architectures, SHMLBA is the same as the page size;
4575 * some architectures have larger values, in which case they should
4576 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4577 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4578 * and defining its own value for SHMLBA.
4580 * The kernel also permits SHMLBA to be set by the architecture to a
4581 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4582 * this means that addresses are rounded to the large size if
4583 * SHM_RND is set but addresses not aligned to that size are not rejected
4584 * as long as they are at least page-aligned. Since the only architecture
4585 * which uses this is ia64 this code doesn't provide for that oddity.
4587 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4589 return TARGET_PAGE_SIZE
;
4593 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4594 int shmid
, abi_ulong shmaddr
, int shmflg
)
4598 struct shmid_ds shm_info
;
4602 /* find out the length of the shared memory segment */
4603 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4604 if (is_error(ret
)) {
4605 /* can't get length, bail out */
4609 shmlba
= target_shmlba(cpu_env
);
4611 if (shmaddr
& (shmlba
- 1)) {
4612 if (shmflg
& SHM_RND
) {
4613 shmaddr
&= ~(shmlba
- 1);
4615 return -TARGET_EINVAL
;
4618 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4619 return -TARGET_EINVAL
;
4625 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4627 abi_ulong mmap_start
;
4629 /* In order to use the host shmat, we need to honor host SHMLBA. */
4630 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4632 if (mmap_start
== -1) {
4634 host_raddr
= (void *)-1;
4636 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4637 shmflg
| SHM_REMAP
);
4640 if (host_raddr
== (void *)-1) {
4642 return get_errno((long)host_raddr
);
4644 raddr
=h2g((unsigned long)host_raddr
);
4646 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4647 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4648 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4650 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4651 if (!shm_regions
[i
].in_use
) {
4652 shm_regions
[i
].in_use
= true;
4653 shm_regions
[i
].start
= raddr
;
4654 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4664 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4671 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4672 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4673 shm_regions
[i
].in_use
= false;
4674 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4678 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4685 #ifdef TARGET_NR_ipc
4686 /* ??? This only works with linear mappings. */
4687 /* do_ipc() must return target values and target errnos. */
4688 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4689 unsigned int call
, abi_long first
,
4690 abi_long second
, abi_long third
,
4691 abi_long ptr
, abi_long fifth
)
4696 version
= call
>> 16;
4701 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4703 case IPCOP_semtimedop
:
4705 * The s390 sys_ipc variant has only five parameters instead of six
4706 * (as for default variant) and the only difference is the handling of
4707 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4708 * to a struct timespec where the generic variant uses fifth parameter.
4710 #if defined(TARGET_S390X)
4711 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4713 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4718 ret
= get_errno(semget(first
, second
, third
));
4721 case IPCOP_semctl
: {
4722 /* The semun argument to semctl is passed by value, so dereference the
4725 get_user_ual(atptr
, ptr
);
4726 ret
= do_semctl(first
, second
, third
, atptr
);
4731 ret
= get_errno(msgget(first
, second
));
4735 ret
= do_msgsnd(first
, ptr
, second
, third
);
4739 ret
= do_msgctl(first
, second
, ptr
);
4746 struct target_ipc_kludge
{
4751 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4752 ret
= -TARGET_EFAULT
;
4756 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4758 unlock_user_struct(tmp
, ptr
, 0);
4762 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4771 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4772 if (is_error(raddr
))
4773 return get_errno(raddr
);
4774 if (put_user_ual(raddr
, third
))
4775 return -TARGET_EFAULT
;
4779 ret
= -TARGET_EINVAL
;
4784 ret
= do_shmdt(ptr
);
4788 /* IPC_* flag values are the same on all linux platforms */
4789 ret
= get_errno(shmget(first
, second
, third
));
4792 /* IPC_* and SHM_* command values are the same on all linux platforms */
4794 ret
= do_shmctl(first
, second
, ptr
);
4797 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4799 ret
= -TARGET_ENOSYS
;
4806 /* kernel structure types definitions */
4808 #define STRUCT(name, ...) STRUCT_ ## name,
4809 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4811 #include "syscall_types.h"
4815 #undef STRUCT_SPECIAL
4817 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4818 #define STRUCT_SPECIAL(name)
4819 #include "syscall_types.h"
4821 #undef STRUCT_SPECIAL
4823 #define MAX_STRUCT_SIZE 4096
4825 #ifdef CONFIG_FIEMAP
4826 /* So fiemap access checks don't overflow on 32 bit systems.
4827 * This is very slightly smaller than the limit imposed by
4828 * the underlying kernel.
4830 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4831 / sizeof(struct fiemap_extent))
4833 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4834 int fd
, int cmd
, abi_long arg
)
4836 /* The parameter for this ioctl is a struct fiemap followed
4837 * by an array of struct fiemap_extent whose size is set
4838 * in fiemap->fm_extent_count. The array is filled in by the
4841 int target_size_in
, target_size_out
;
4843 const argtype
*arg_type
= ie
->arg_type
;
4844 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4847 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4851 assert(arg_type
[0] == TYPE_PTR
);
4852 assert(ie
->access
== IOC_RW
);
4854 target_size_in
= thunk_type_size(arg_type
, 0);
4855 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4857 return -TARGET_EFAULT
;
4859 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4860 unlock_user(argptr
, arg
, 0);
4861 fm
= (struct fiemap
*)buf_temp
;
4862 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4863 return -TARGET_EINVAL
;
4866 outbufsz
= sizeof (*fm
) +
4867 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4869 if (outbufsz
> MAX_STRUCT_SIZE
) {
4870 /* We can't fit all the extents into the fixed size buffer.
4871 * Allocate one that is large enough and use it instead.
4873 fm
= g_try_malloc(outbufsz
);
4875 return -TARGET_ENOMEM
;
4877 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4880 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4881 if (!is_error(ret
)) {
4882 target_size_out
= target_size_in
;
4883 /* An extent_count of 0 means we were only counting the extents
4884 * so there are no structs to copy
4886 if (fm
->fm_extent_count
!= 0) {
4887 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4889 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4891 ret
= -TARGET_EFAULT
;
4893 /* Convert the struct fiemap */
4894 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4895 if (fm
->fm_extent_count
!= 0) {
4896 p
= argptr
+ target_size_in
;
4897 /* ...and then all the struct fiemap_extents */
4898 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4899 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4904 unlock_user(argptr
, arg
, target_size_out
);
4914 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4915 int fd
, int cmd
, abi_long arg
)
4917 const argtype
*arg_type
= ie
->arg_type
;
4921 struct ifconf
*host_ifconf
;
4923 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4924 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4925 int target_ifreq_size
;
4930 abi_long target_ifc_buf
;
4934 assert(arg_type
[0] == TYPE_PTR
);
4935 assert(ie
->access
== IOC_RW
);
4938 target_size
= thunk_type_size(arg_type
, 0);
4940 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4942 return -TARGET_EFAULT
;
4943 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4944 unlock_user(argptr
, arg
, 0);
4946 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4947 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4948 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4950 if (target_ifc_buf
!= 0) {
4951 target_ifc_len
= host_ifconf
->ifc_len
;
4952 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4953 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4955 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4956 if (outbufsz
> MAX_STRUCT_SIZE
) {
4958 * We can't fit all the extents into the fixed size buffer.
4959 * Allocate one that is large enough and use it instead.
4961 host_ifconf
= malloc(outbufsz
);
4963 return -TARGET_ENOMEM
;
4965 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4968 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4970 host_ifconf
->ifc_len
= host_ifc_len
;
4972 host_ifc_buf
= NULL
;
4974 host_ifconf
->ifc_buf
= host_ifc_buf
;
4976 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4977 if (!is_error(ret
)) {
4978 /* convert host ifc_len to target ifc_len */
4980 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4981 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4982 host_ifconf
->ifc_len
= target_ifc_len
;
4984 /* restore target ifc_buf */
4986 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4988 /* copy struct ifconf to target user */
4990 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4992 return -TARGET_EFAULT
;
4993 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4994 unlock_user(argptr
, arg
, target_size
);
4996 if (target_ifc_buf
!= 0) {
4997 /* copy ifreq[] to target user */
4998 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4999 for (i
= 0; i
< nb_ifreq
; i
++) {
5000 thunk_convert(argptr
+ i
* target_ifreq_size
,
5001 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5002 ifreq_arg_type
, THUNK_TARGET
);
5004 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5015 #if defined(CONFIG_USBFS)
5016 #if HOST_LONG_BITS > 64
5017 #error USBDEVFS thunks do not support >64 bit hosts yet.
5020 uint64_t target_urb_adr
;
5021 uint64_t target_buf_adr
;
5022 char *target_buf_ptr
;
5023 struct usbdevfs_urb host_urb
;
5026 static GHashTable
*usbdevfs_urb_hashtable(void)
5028 static GHashTable
*urb_hashtable
;
5030 if (!urb_hashtable
) {
5031 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
5033 return urb_hashtable
;
5036 static void urb_hashtable_insert(struct live_urb
*urb
)
5038 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5039 g_hash_table_insert(urb_hashtable
, urb
, urb
);
5042 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
5044 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5045 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
5048 static void urb_hashtable_remove(struct live_urb
*urb
)
5050 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5051 g_hash_table_remove(urb_hashtable
, urb
);
5055 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5056 int fd
, int cmd
, abi_long arg
)
5058 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
5059 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
5060 struct live_urb
*lurb
;
5064 uintptr_t target_urb_adr
;
5067 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
5069 memset(buf_temp
, 0, sizeof(uint64_t));
5070 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5071 if (is_error(ret
)) {
5075 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
5076 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
5077 if (!lurb
->target_urb_adr
) {
5078 return -TARGET_EFAULT
;
5080 urb_hashtable_remove(lurb
);
5081 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
5082 lurb
->host_urb
.buffer_length
);
5083 lurb
->target_buf_ptr
= NULL
;
5085 /* restore the guest buffer pointer */
5086 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
5088 /* update the guest urb struct */
5089 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5092 return -TARGET_EFAULT
;
5094 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5095 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5097 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5098 /* write back the urb handle */
5099 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5102 return -TARGET_EFAULT
;
5105 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5106 target_urb_adr
= lurb
->target_urb_adr
;
5107 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5108 unlock_user(argptr
, arg
, target_size
);
5115 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5116 uint8_t *buf_temp
__attribute__((unused
)),
5117 int fd
, int cmd
, abi_long arg
)
5119 struct live_urb
*lurb
;
5121 /* map target address back to host URB with metadata. */
5122 lurb
= urb_hashtable_lookup(arg
);
5124 return -TARGET_EFAULT
;
5126 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5130 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5131 int fd
, int cmd
, abi_long arg
)
5133 const argtype
*arg_type
= ie
->arg_type
;
5138 struct live_urb
*lurb
;
5141 * each submitted URB needs to map to a unique ID for the
5142 * kernel, and that unique ID needs to be a pointer to
5143 * host memory. hence, we need to malloc for each URB.
5144 * isochronous transfers have a variable length struct.
5147 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5149 /* construct host copy of urb and metadata */
5150 lurb
= g_try_malloc0(sizeof(struct live_urb
));
5152 return -TARGET_ENOMEM
;
5155 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5158 return -TARGET_EFAULT
;
5160 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5161 unlock_user(argptr
, arg
, 0);
5163 lurb
->target_urb_adr
= arg
;
5164 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5166 /* buffer space used depends on endpoint type so lock the entire buffer */
5167 /* control type urbs should check the buffer contents for true direction */
5168 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5169 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5170 lurb
->host_urb
.buffer_length
, 1);
5171 if (lurb
->target_buf_ptr
== NULL
) {
5173 return -TARGET_EFAULT
;
5176 /* update buffer pointer in host copy */
5177 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5179 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5180 if (is_error(ret
)) {
5181 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5184 urb_hashtable_insert(lurb
);
5189 #endif /* CONFIG_USBFS */
5191 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5192 int cmd
, abi_long arg
)
5195 struct dm_ioctl
*host_dm
;
5196 abi_long guest_data
;
5197 uint32_t guest_data_size
;
5199 const argtype
*arg_type
= ie
->arg_type
;
5201 void *big_buf
= NULL
;
5205 target_size
= thunk_type_size(arg_type
, 0);
5206 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5208 ret
= -TARGET_EFAULT
;
5211 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5212 unlock_user(argptr
, arg
, 0);
5214 /* buf_temp is too small, so fetch things into a bigger buffer */
5215 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5216 memcpy(big_buf
, buf_temp
, target_size
);
5220 guest_data
= arg
+ host_dm
->data_start
;
5221 if ((guest_data
- arg
) < 0) {
5222 ret
= -TARGET_EINVAL
;
5225 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5226 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5228 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5230 ret
= -TARGET_EFAULT
;
5234 switch (ie
->host_cmd
) {
5236 case DM_LIST_DEVICES
:
5239 case DM_DEV_SUSPEND
:
5242 case DM_TABLE_STATUS
:
5243 case DM_TABLE_CLEAR
:
5245 case DM_LIST_VERSIONS
:
5249 case DM_DEV_SET_GEOMETRY
:
5250 /* data contains only strings */
5251 memcpy(host_data
, argptr
, guest_data_size
);
5254 memcpy(host_data
, argptr
, guest_data_size
);
5255 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5259 void *gspec
= argptr
;
5260 void *cur_data
= host_data
;
5261 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5262 int spec_size
= thunk_type_size(arg_type
, 0);
5265 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5266 struct dm_target_spec
*spec
= cur_data
;
5270 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5271 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5273 spec
->next
= sizeof(*spec
) + slen
;
5274 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5276 cur_data
+= spec
->next
;
5281 ret
= -TARGET_EINVAL
;
5282 unlock_user(argptr
, guest_data
, 0);
5285 unlock_user(argptr
, guest_data
, 0);
5287 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5288 if (!is_error(ret
)) {
5289 guest_data
= arg
+ host_dm
->data_start
;
5290 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5291 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5292 switch (ie
->host_cmd
) {
5297 case DM_DEV_SUSPEND
:
5300 case DM_TABLE_CLEAR
:
5302 case DM_DEV_SET_GEOMETRY
:
5303 /* no return data */
5305 case DM_LIST_DEVICES
:
5307 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5308 uint32_t remaining_data
= guest_data_size
;
5309 void *cur_data
= argptr
;
5310 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5311 int nl_size
= 12; /* can't use thunk_size due to alignment */
5314 uint32_t next
= nl
->next
;
5316 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5318 if (remaining_data
< nl
->next
) {
5319 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5322 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5323 strcpy(cur_data
+ nl_size
, nl
->name
);
5324 cur_data
+= nl
->next
;
5325 remaining_data
-= nl
->next
;
5329 nl
= (void*)nl
+ next
;
5334 case DM_TABLE_STATUS
:
5336 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5337 void *cur_data
= argptr
;
5338 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5339 int spec_size
= thunk_type_size(arg_type
, 0);
5342 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5343 uint32_t next
= spec
->next
;
5344 int slen
= strlen((char*)&spec
[1]) + 1;
5345 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5346 if (guest_data_size
< spec
->next
) {
5347 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5350 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5351 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5352 cur_data
= argptr
+ spec
->next
;
5353 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5359 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5360 int count
= *(uint32_t*)hdata
;
5361 uint64_t *hdev
= hdata
+ 8;
5362 uint64_t *gdev
= argptr
+ 8;
5365 *(uint32_t*)argptr
= tswap32(count
);
5366 for (i
= 0; i
< count
; i
++) {
5367 *gdev
= tswap64(*hdev
);
5373 case DM_LIST_VERSIONS
:
5375 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5376 uint32_t remaining_data
= guest_data_size
;
5377 void *cur_data
= argptr
;
5378 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5379 int vers_size
= thunk_type_size(arg_type
, 0);
5382 uint32_t next
= vers
->next
;
5384 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5386 if (remaining_data
< vers
->next
) {
5387 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5390 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5391 strcpy(cur_data
+ vers_size
, vers
->name
);
5392 cur_data
+= vers
->next
;
5393 remaining_data
-= vers
->next
;
5397 vers
= (void*)vers
+ next
;
5402 unlock_user(argptr
, guest_data
, 0);
5403 ret
= -TARGET_EINVAL
;
5406 unlock_user(argptr
, guest_data
, guest_data_size
);
5408 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5410 ret
= -TARGET_EFAULT
;
5413 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5414 unlock_user(argptr
, arg
, target_size
);
5421 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5422 int cmd
, abi_long arg
)
5426 const argtype
*arg_type
= ie
->arg_type
;
5427 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5430 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5431 struct blkpg_partition host_part
;
5433 /* Read and convert blkpg */
5435 target_size
= thunk_type_size(arg_type
, 0);
5436 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5438 ret
= -TARGET_EFAULT
;
5441 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5442 unlock_user(argptr
, arg
, 0);
5444 switch (host_blkpg
->op
) {
5445 case BLKPG_ADD_PARTITION
:
5446 case BLKPG_DEL_PARTITION
:
5447 /* payload is struct blkpg_partition */
5450 /* Unknown opcode */
5451 ret
= -TARGET_EINVAL
;
5455 /* Read and convert blkpg->data */
5456 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5457 target_size
= thunk_type_size(part_arg_type
, 0);
5458 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5460 ret
= -TARGET_EFAULT
;
5463 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5464 unlock_user(argptr
, arg
, 0);
5466 /* Swizzle the data pointer to our local copy and call! */
5467 host_blkpg
->data
= &host_part
;
5468 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5474 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5475 int fd
, int cmd
, abi_long arg
)
5477 const argtype
*arg_type
= ie
->arg_type
;
5478 const StructEntry
*se
;
5479 const argtype
*field_types
;
5480 const int *dst_offsets
, *src_offsets
;
5483 abi_ulong
*target_rt_dev_ptr
= NULL
;
5484 unsigned long *host_rt_dev_ptr
= NULL
;
5488 assert(ie
->access
== IOC_W
);
5489 assert(*arg_type
== TYPE_PTR
);
5491 assert(*arg_type
== TYPE_STRUCT
);
5492 target_size
= thunk_type_size(arg_type
, 0);
5493 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5495 return -TARGET_EFAULT
;
5498 assert(*arg_type
== (int)STRUCT_rtentry
);
5499 se
= struct_entries
+ *arg_type
++;
5500 assert(se
->convert
[0] == NULL
);
5501 /* convert struct here to be able to catch rt_dev string */
5502 field_types
= se
->field_types
;
5503 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5504 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5505 for (i
= 0; i
< se
->nb_fields
; i
++) {
5506 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5507 assert(*field_types
== TYPE_PTRVOID
);
5508 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5509 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5510 if (*target_rt_dev_ptr
!= 0) {
5511 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5512 tswapal(*target_rt_dev_ptr
));
5513 if (!*host_rt_dev_ptr
) {
5514 unlock_user(argptr
, arg
, 0);
5515 return -TARGET_EFAULT
;
5518 *host_rt_dev_ptr
= 0;
5523 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5524 argptr
+ src_offsets
[i
],
5525 field_types
, THUNK_HOST
);
5527 unlock_user(argptr
, arg
, 0);
5529 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5531 assert(host_rt_dev_ptr
!= NULL
);
5532 assert(target_rt_dev_ptr
!= NULL
);
5533 if (*host_rt_dev_ptr
!= 0) {
5534 unlock_user((void *)*host_rt_dev_ptr
,
5535 *target_rt_dev_ptr
, 0);
5540 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5541 int fd
, int cmd
, abi_long arg
)
5543 int sig
= target_to_host_signal(arg
);
5544 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5547 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5548 int fd
, int cmd
, abi_long arg
)
5553 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5554 if (is_error(ret
)) {
5558 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5559 if (copy_to_user_timeval(arg
, &tv
)) {
5560 return -TARGET_EFAULT
;
5563 if (copy_to_user_timeval64(arg
, &tv
)) {
5564 return -TARGET_EFAULT
;
5571 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5572 int fd
, int cmd
, abi_long arg
)
5577 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5578 if (is_error(ret
)) {
5582 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5583 if (host_to_target_timespec(arg
, &ts
)) {
5584 return -TARGET_EFAULT
;
5587 if (host_to_target_timespec64(arg
, &ts
)) {
5588 return -TARGET_EFAULT
;
5596 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5597 int fd
, int cmd
, abi_long arg
)
5599 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5600 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5606 static void unlock_drm_version(struct drm_version
*host_ver
,
5607 struct target_drm_version
*target_ver
,
5610 unlock_user(host_ver
->name
, target_ver
->name
,
5611 copy
? host_ver
->name_len
: 0);
5612 unlock_user(host_ver
->date
, target_ver
->date
,
5613 copy
? host_ver
->date_len
: 0);
5614 unlock_user(host_ver
->desc
, target_ver
->desc
,
5615 copy
? host_ver
->desc_len
: 0);
5618 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5619 struct target_drm_version
*target_ver
)
5621 memset(host_ver
, 0, sizeof(*host_ver
));
5623 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5624 if (host_ver
->name_len
) {
5625 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5626 target_ver
->name_len
, 0);
5627 if (!host_ver
->name
) {
5632 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5633 if (host_ver
->date_len
) {
5634 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5635 target_ver
->date_len
, 0);
5636 if (!host_ver
->date
) {
5641 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5642 if (host_ver
->desc_len
) {
5643 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5644 target_ver
->desc_len
, 0);
5645 if (!host_ver
->desc
) {
5652 unlock_drm_version(host_ver
, target_ver
, false);
5656 static inline void host_to_target_drmversion(
5657 struct target_drm_version
*target_ver
,
5658 struct drm_version
*host_ver
)
5660 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5661 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5662 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5663 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5664 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5665 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5666 unlock_drm_version(host_ver
, target_ver
, true);
5669 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5670 int fd
, int cmd
, abi_long arg
)
5672 struct drm_version
*ver
;
5673 struct target_drm_version
*target_ver
;
5676 switch (ie
->host_cmd
) {
5677 case DRM_IOCTL_VERSION
:
5678 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5679 return -TARGET_EFAULT
;
5681 ver
= (struct drm_version
*)buf_temp
;
5682 ret
= target_to_host_drmversion(ver
, target_ver
);
5683 if (!is_error(ret
)) {
5684 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5685 if (is_error(ret
)) {
5686 unlock_drm_version(ver
, target_ver
, false);
5688 host_to_target_drmversion(target_ver
, ver
);
5691 unlock_user_struct(target_ver
, arg
, 0);
5694 return -TARGET_ENOSYS
;
5697 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5698 struct drm_i915_getparam
*gparam
,
5699 int fd
, abi_long arg
)
5703 struct target_drm_i915_getparam
*target_gparam
;
5705 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5706 return -TARGET_EFAULT
;
5709 __get_user(gparam
->param
, &target_gparam
->param
);
5710 gparam
->value
= &value
;
5711 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5712 put_user_s32(value
, target_gparam
->value
);
5714 unlock_user_struct(target_gparam
, arg
, 0);
5718 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5719 int fd
, int cmd
, abi_long arg
)
5721 switch (ie
->host_cmd
) {
5722 case DRM_IOCTL_I915_GETPARAM
:
5723 return do_ioctl_drm_i915_getparam(ie
,
5724 (struct drm_i915_getparam
*)buf_temp
,
5727 return -TARGET_ENOSYS
;
5733 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5734 int fd
, int cmd
, abi_long arg
)
5736 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5737 struct tun_filter
*target_filter
;
5740 assert(ie
->access
== IOC_W
);
5742 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5743 if (!target_filter
) {
5744 return -TARGET_EFAULT
;
5746 filter
->flags
= tswap16(target_filter
->flags
);
5747 filter
->count
= tswap16(target_filter
->count
);
5748 unlock_user(target_filter
, arg
, 0);
5750 if (filter
->count
) {
5751 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5753 return -TARGET_EFAULT
;
5756 target_addr
= lock_user(VERIFY_READ
,
5757 arg
+ offsetof(struct tun_filter
, addr
),
5758 filter
->count
* ETH_ALEN
, 1);
5760 return -TARGET_EFAULT
;
5762 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5763 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5766 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5769 IOCTLEntry ioctl_entries
[] = {
5770 #define IOCTL(cmd, access, ...) \
5771 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5772 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5773 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5774 #define IOCTL_IGNORE(cmd) \
5775 { TARGET_ ## cmd, 0, #cmd },
5780 /* ??? Implement proper locking for ioctls. */
5781 /* do_ioctl() Must return target values and target errnos. */
5782 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5784 const IOCTLEntry
*ie
;
5785 const argtype
*arg_type
;
5787 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5793 if (ie
->target_cmd
== 0) {
5795 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5796 return -TARGET_ENOSYS
;
5798 if (ie
->target_cmd
== cmd
)
5802 arg_type
= ie
->arg_type
;
5804 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5805 } else if (!ie
->host_cmd
) {
5806 /* Some architectures define BSD ioctls in their headers
5807 that are not implemented in Linux. */
5808 return -TARGET_ENOSYS
;
5811 switch(arg_type
[0]) {
5814 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5820 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5824 target_size
= thunk_type_size(arg_type
, 0);
5825 switch(ie
->access
) {
5827 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5828 if (!is_error(ret
)) {
5829 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5831 return -TARGET_EFAULT
;
5832 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5833 unlock_user(argptr
, arg
, target_size
);
5837 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5839 return -TARGET_EFAULT
;
5840 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5841 unlock_user(argptr
, arg
, 0);
5842 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5846 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5848 return -TARGET_EFAULT
;
5849 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5850 unlock_user(argptr
, arg
, 0);
5851 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5852 if (!is_error(ret
)) {
5853 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5855 return -TARGET_EFAULT
;
5856 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5857 unlock_user(argptr
, arg
, target_size
);
5863 qemu_log_mask(LOG_UNIMP
,
5864 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5865 (long)cmd
, arg_type
[0]);
5866 ret
= -TARGET_ENOSYS
;
5872 static const bitmask_transtbl iflag_tbl
[] = {
5873 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5874 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5875 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5876 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5877 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5878 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5879 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5880 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5881 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5882 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5883 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5884 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5885 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5886 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5887 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5891 static const bitmask_transtbl oflag_tbl
[] = {
5892 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5893 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5894 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5895 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5896 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5897 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5898 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5899 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5900 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5901 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5902 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5903 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5904 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5905 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5906 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5907 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5908 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5909 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5910 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5911 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5912 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5913 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5914 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5915 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5919 static const bitmask_transtbl cflag_tbl
[] = {
5920 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5921 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5922 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5923 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5924 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5925 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5926 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5927 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5928 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5929 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5930 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5931 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5932 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5933 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5934 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5935 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5936 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5937 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5938 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5939 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5940 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5941 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5942 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5943 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5944 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5945 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5946 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5947 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5948 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5949 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5950 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5954 static const bitmask_transtbl lflag_tbl
[] = {
5955 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5956 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5957 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5958 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5959 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5960 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5961 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5962 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5963 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5964 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5965 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5966 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5967 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5968 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5969 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5970 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5974 static void target_to_host_termios (void *dst
, const void *src
)
5976 struct host_termios
*host
= dst
;
5977 const struct target_termios
*target
= src
;
5980 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5982 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5984 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5986 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5987 host
->c_line
= target
->c_line
;
5989 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5990 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5991 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5992 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5993 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5994 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5995 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5996 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5997 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5998 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5999 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
6000 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
6001 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
6002 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
6003 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
6004 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
6005 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
6006 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
6009 static void host_to_target_termios (void *dst
, const void *src
)
6011 struct target_termios
*target
= dst
;
6012 const struct host_termios
*host
= src
;
6015 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
6017 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
6019 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
6021 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
6022 target
->c_line
= host
->c_line
;
6024 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
6025 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
6026 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
6027 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
6028 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
6029 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
6030 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
6031 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
6032 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
6033 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
6034 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
6035 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
6036 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
6037 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
6038 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
6039 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
6040 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
6041 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
6044 static const StructEntry struct_termios_def
= {
6045 .convert
= { host_to_target_termios
, target_to_host_termios
},
6046 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
6047 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
6048 .print
= print_termios
,
6051 static bitmask_transtbl mmap_flags_tbl
[] = {
6052 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
6053 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
6054 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
6055 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
6056 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
6057 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
6058 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
6059 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
6060 MAP_DENYWRITE
, MAP_DENYWRITE
},
6061 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
6062 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
6063 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
6064 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
6065 MAP_NORESERVE
, MAP_NORESERVE
},
6066 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
6067 /* MAP_STACK had been ignored by the kernel for quite some time.
6068 Recognize it for the target insofar as we do not want to pass
6069 it through to the host. */
6070 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6075 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6076 * TARGET_I386 is defined if TARGET_X86_64 is defined
6078 #if defined(TARGET_I386)
6080 /* NOTE: there is really one LDT for all the threads */
6081 static uint8_t *ldt_table
;
6083 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6090 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6091 if (size
> bytecount
)
6093 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6095 return -TARGET_EFAULT
;
6096 /* ??? Should this by byteswapped? */
6097 memcpy(p
, ldt_table
, size
);
6098 unlock_user(p
, ptr
, size
);
6102 /* XXX: add locking support */
6103 static abi_long
write_ldt(CPUX86State
*env
,
6104 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6106 struct target_modify_ldt_ldt_s ldt_info
;
6107 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6108 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6109 int seg_not_present
, useable
, lm
;
6110 uint32_t *lp
, entry_1
, entry_2
;
6112 if (bytecount
!= sizeof(ldt_info
))
6113 return -TARGET_EINVAL
;
6114 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6115 return -TARGET_EFAULT
;
6116 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6117 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6118 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6119 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6120 unlock_user_struct(target_ldt_info
, ptr
, 0);
6122 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6123 return -TARGET_EINVAL
;
6124 seg_32bit
= ldt_info
.flags
& 1;
6125 contents
= (ldt_info
.flags
>> 1) & 3;
6126 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6127 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6128 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6129 useable
= (ldt_info
.flags
>> 6) & 1;
6133 lm
= (ldt_info
.flags
>> 7) & 1;
6135 if (contents
== 3) {
6137 return -TARGET_EINVAL
;
6138 if (seg_not_present
== 0)
6139 return -TARGET_EINVAL
;
6141 /* allocate the LDT */
6143 env
->ldt
.base
= target_mmap(0,
6144 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6145 PROT_READ
|PROT_WRITE
,
6146 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6147 if (env
->ldt
.base
== -1)
6148 return -TARGET_ENOMEM
;
6149 memset(g2h_untagged(env
->ldt
.base
), 0,
6150 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6151 env
->ldt
.limit
= 0xffff;
6152 ldt_table
= g2h_untagged(env
->ldt
.base
);
6155 /* NOTE: same code as Linux kernel */
6156 /* Allow LDTs to be cleared by the user. */
6157 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6160 read_exec_only
== 1 &&
6162 limit_in_pages
== 0 &&
6163 seg_not_present
== 1 &&
6171 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6172 (ldt_info
.limit
& 0x0ffff);
6173 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6174 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6175 (ldt_info
.limit
& 0xf0000) |
6176 ((read_exec_only
^ 1) << 9) |
6178 ((seg_not_present
^ 1) << 15) |
6180 (limit_in_pages
<< 23) |
6184 entry_2
|= (useable
<< 20);
6186 /* Install the new entry ... */
6188 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6189 lp
[0] = tswap32(entry_1
);
6190 lp
[1] = tswap32(entry_2
);
6194 /* specific and weird i386 syscalls */
6195 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6196 unsigned long bytecount
)
6202 ret
= read_ldt(ptr
, bytecount
);
6205 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6208 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6211 ret
= -TARGET_ENOSYS
;
6217 #if defined(TARGET_ABI32)
6218 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6220 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6221 struct target_modify_ldt_ldt_s ldt_info
;
6222 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6223 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6224 int seg_not_present
, useable
, lm
;
6225 uint32_t *lp
, entry_1
, entry_2
;
6228 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6229 if (!target_ldt_info
)
6230 return -TARGET_EFAULT
;
6231 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6232 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6233 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6234 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6235 if (ldt_info
.entry_number
== -1) {
6236 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6237 if (gdt_table
[i
] == 0) {
6238 ldt_info
.entry_number
= i
;
6239 target_ldt_info
->entry_number
= tswap32(i
);
6244 unlock_user_struct(target_ldt_info
, ptr
, 1);
6246 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6247 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6248 return -TARGET_EINVAL
;
6249 seg_32bit
= ldt_info
.flags
& 1;
6250 contents
= (ldt_info
.flags
>> 1) & 3;
6251 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6252 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6253 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6254 useable
= (ldt_info
.flags
>> 6) & 1;
6258 lm
= (ldt_info
.flags
>> 7) & 1;
6261 if (contents
== 3) {
6262 if (seg_not_present
== 0)
6263 return -TARGET_EINVAL
;
6266 /* NOTE: same code as Linux kernel */
6267 /* Allow LDTs to be cleared by the user. */
6268 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6269 if ((contents
== 0 &&
6270 read_exec_only
== 1 &&
6272 limit_in_pages
== 0 &&
6273 seg_not_present
== 1 &&
6281 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6282 (ldt_info
.limit
& 0x0ffff);
6283 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6284 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6285 (ldt_info
.limit
& 0xf0000) |
6286 ((read_exec_only
^ 1) << 9) |
6288 ((seg_not_present
^ 1) << 15) |
6290 (limit_in_pages
<< 23) |
6295 /* Install the new entry ... */
6297 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6298 lp
[0] = tswap32(entry_1
);
6299 lp
[1] = tswap32(entry_2
);
6303 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6305 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6306 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6307 uint32_t base_addr
, limit
, flags
;
6308 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6309 int seg_not_present
, useable
, lm
;
6310 uint32_t *lp
, entry_1
, entry_2
;
6312 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6313 if (!target_ldt_info
)
6314 return -TARGET_EFAULT
;
6315 idx
= tswap32(target_ldt_info
->entry_number
);
6316 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6317 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6318 unlock_user_struct(target_ldt_info
, ptr
, 1);
6319 return -TARGET_EINVAL
;
6321 lp
= (uint32_t *)(gdt_table
+ idx
);
6322 entry_1
= tswap32(lp
[0]);
6323 entry_2
= tswap32(lp
[1]);
6325 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6326 contents
= (entry_2
>> 10) & 3;
6327 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6328 seg_32bit
= (entry_2
>> 22) & 1;
6329 limit_in_pages
= (entry_2
>> 23) & 1;
6330 useable
= (entry_2
>> 20) & 1;
6334 lm
= (entry_2
>> 21) & 1;
6336 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6337 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6338 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6339 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6340 base_addr
= (entry_1
>> 16) |
6341 (entry_2
& 0xff000000) |
6342 ((entry_2
& 0xff) << 16);
6343 target_ldt_info
->base_addr
= tswapal(base_addr
);
6344 target_ldt_info
->limit
= tswap32(limit
);
6345 target_ldt_info
->flags
= tswap32(flags
);
6346 unlock_user_struct(target_ldt_info
, ptr
, 1);
6350 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6352 return -TARGET_ENOSYS
;
6355 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6362 case TARGET_ARCH_SET_GS
:
6363 case TARGET_ARCH_SET_FS
:
6364 if (code
== TARGET_ARCH_SET_GS
)
6368 cpu_x86_load_seg(env
, idx
, 0);
6369 env
->segs
[idx
].base
= addr
;
6371 case TARGET_ARCH_GET_GS
:
6372 case TARGET_ARCH_GET_FS
:
6373 if (code
== TARGET_ARCH_GET_GS
)
6377 val
= env
->segs
[idx
].base
;
6378 if (put_user(val
, addr
, abi_ulong
))
6379 ret
= -TARGET_EFAULT
;
6382 ret
= -TARGET_EINVAL
;
6387 #endif /* defined(TARGET_ABI32 */
6389 #endif /* defined(TARGET_I386) */
6391 #define NEW_STACK_SIZE 0x40000
6394 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6397 pthread_mutex_t mutex
;
6398 pthread_cond_t cond
;
6401 abi_ulong child_tidptr
;
6402 abi_ulong parent_tidptr
;
6406 static void *clone_func(void *arg
)
6408 new_thread_info
*info
= arg
;
6413 rcu_register_thread();
6414 tcg_register_thread();
6418 ts
= (TaskState
*)cpu
->opaque
;
6419 info
->tid
= sys_gettid();
6421 if (info
->child_tidptr
)
6422 put_user_u32(info
->tid
, info
->child_tidptr
);
6423 if (info
->parent_tidptr
)
6424 put_user_u32(info
->tid
, info
->parent_tidptr
);
6425 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6426 /* Enable signals. */
6427 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6428 /* Signal to the parent that we're ready. */
6429 pthread_mutex_lock(&info
->mutex
);
6430 pthread_cond_broadcast(&info
->cond
);
6431 pthread_mutex_unlock(&info
->mutex
);
6432 /* Wait until the parent has finished initializing the tls state. */
6433 pthread_mutex_lock(&clone_lock
);
6434 pthread_mutex_unlock(&clone_lock
);
6440 /* do_fork() Must return host values and target errnos (unlike most
6441 do_*() functions). */
6442 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6443 abi_ulong parent_tidptr
, target_ulong newtls
,
6444 abi_ulong child_tidptr
)
6446 CPUState
*cpu
= env_cpu(env
);
6450 CPUArchState
*new_env
;
6453 flags
&= ~CLONE_IGNORED_FLAGS
;
6455 /* Emulate vfork() with fork() */
6456 if (flags
& CLONE_VFORK
)
6457 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6459 if (flags
& CLONE_VM
) {
6460 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6461 new_thread_info info
;
6462 pthread_attr_t attr
;
6464 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6465 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6466 return -TARGET_EINVAL
;
6469 ts
= g_new0(TaskState
, 1);
6470 init_task_state(ts
);
6472 /* Grab a mutex so that thread setup appears atomic. */
6473 pthread_mutex_lock(&clone_lock
);
6475 /* we create a new CPU instance. */
6476 new_env
= cpu_copy(env
);
6477 /* Init regs that differ from the parent. */
6478 cpu_clone_regs_child(new_env
, newsp
, flags
);
6479 cpu_clone_regs_parent(env
, flags
);
6480 new_cpu
= env_cpu(new_env
);
6481 new_cpu
->opaque
= ts
;
6482 ts
->bprm
= parent_ts
->bprm
;
6483 ts
->info
= parent_ts
->info
;
6484 ts
->signal_mask
= parent_ts
->signal_mask
;
6486 if (flags
& CLONE_CHILD_CLEARTID
) {
6487 ts
->child_tidptr
= child_tidptr
;
6490 if (flags
& CLONE_SETTLS
) {
6491 cpu_set_tls (new_env
, newtls
);
6494 memset(&info
, 0, sizeof(info
));
6495 pthread_mutex_init(&info
.mutex
, NULL
);
6496 pthread_mutex_lock(&info
.mutex
);
6497 pthread_cond_init(&info
.cond
, NULL
);
6499 if (flags
& CLONE_CHILD_SETTID
) {
6500 info
.child_tidptr
= child_tidptr
;
6502 if (flags
& CLONE_PARENT_SETTID
) {
6503 info
.parent_tidptr
= parent_tidptr
;
6506 ret
= pthread_attr_init(&attr
);
6507 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6508 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6509 /* It is not safe to deliver signals until the child has finished
6510 initializing, so temporarily block all signals. */
6511 sigfillset(&sigmask
);
6512 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6513 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6515 /* If this is our first additional thread, we need to ensure we
6516 * generate code for parallel execution and flush old translations.
6518 if (!parallel_cpus
) {
6519 parallel_cpus
= true;
6523 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6524 /* TODO: Free new CPU state if thread creation failed. */
6526 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6527 pthread_attr_destroy(&attr
);
6529 /* Wait for the child to initialize. */
6530 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6535 pthread_mutex_unlock(&info
.mutex
);
6536 pthread_cond_destroy(&info
.cond
);
6537 pthread_mutex_destroy(&info
.mutex
);
6538 pthread_mutex_unlock(&clone_lock
);
6540 /* if no CLONE_VM, we consider it is a fork */
6541 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6542 return -TARGET_EINVAL
;
6545 /* We can't support custom termination signals */
6546 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6547 return -TARGET_EINVAL
;
6550 if (block_signals()) {
6551 return -TARGET_ERESTARTSYS
;
6557 /* Child Process. */
6558 cpu_clone_regs_child(env
, newsp
, flags
);
6560 /* There is a race condition here. The parent process could
6561 theoretically read the TID in the child process before the child
6562 tid is set. This would require using either ptrace
6563 (not implemented) or having *_tidptr to point at a shared memory
6564 mapping. We can't repeat the spinlock hack used above because
6565 the child process gets its own copy of the lock. */
6566 if (flags
& CLONE_CHILD_SETTID
)
6567 put_user_u32(sys_gettid(), child_tidptr
);
6568 if (flags
& CLONE_PARENT_SETTID
)
6569 put_user_u32(sys_gettid(), parent_tidptr
);
6570 ts
= (TaskState
*)cpu
->opaque
;
6571 if (flags
& CLONE_SETTLS
)
6572 cpu_set_tls (env
, newtls
);
6573 if (flags
& CLONE_CHILD_CLEARTID
)
6574 ts
->child_tidptr
= child_tidptr
;
6576 cpu_clone_regs_parent(env
, flags
);
6583 /* warning : doesn't handle linux specific flags... */
6584 static int target_to_host_fcntl_cmd(int cmd
)
6589 case TARGET_F_DUPFD
:
6590 case TARGET_F_GETFD
:
6591 case TARGET_F_SETFD
:
6592 case TARGET_F_GETFL
:
6593 case TARGET_F_SETFL
:
6594 case TARGET_F_OFD_GETLK
:
6595 case TARGET_F_OFD_SETLK
:
6596 case TARGET_F_OFD_SETLKW
:
6599 case TARGET_F_GETLK
:
6602 case TARGET_F_SETLK
:
6605 case TARGET_F_SETLKW
:
6608 case TARGET_F_GETOWN
:
6611 case TARGET_F_SETOWN
:
6614 case TARGET_F_GETSIG
:
6617 case TARGET_F_SETSIG
:
6620 #if TARGET_ABI_BITS == 32
6621 case TARGET_F_GETLK64
:
6624 case TARGET_F_SETLK64
:
6627 case TARGET_F_SETLKW64
:
6631 case TARGET_F_SETLEASE
:
6634 case TARGET_F_GETLEASE
:
6637 #ifdef F_DUPFD_CLOEXEC
6638 case TARGET_F_DUPFD_CLOEXEC
:
6639 ret
= F_DUPFD_CLOEXEC
;
6642 case TARGET_F_NOTIFY
:
6646 case TARGET_F_GETOWN_EX
:
6651 case TARGET_F_SETOWN_EX
:
6656 case TARGET_F_SETPIPE_SZ
:
6659 case TARGET_F_GETPIPE_SZ
:
6664 case TARGET_F_ADD_SEALS
:
6667 case TARGET_F_GET_SEALS
:
6672 ret
= -TARGET_EINVAL
;
6676 #if defined(__powerpc64__)
6677 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6678 * is not supported by kernel. The glibc fcntl call actually adjusts
6679 * them to 5, 6 and 7 before making the syscall(). Since we make the
6680 * syscall directly, adjust to what is supported by the kernel.
6682 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6683 ret
-= F_GETLK64
- 5;
6690 #define FLOCK_TRANSTBL \
6692 TRANSTBL_CONVERT(F_RDLCK); \
6693 TRANSTBL_CONVERT(F_WRLCK); \
6694 TRANSTBL_CONVERT(F_UNLCK); \
6697 static int target_to_host_flock(int type
)
6699 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6701 #undef TRANSTBL_CONVERT
6702 return -TARGET_EINVAL
;
6705 static int host_to_target_flock(int type
)
6707 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6709 #undef TRANSTBL_CONVERT
6710 /* if we don't know how to convert the value coming
6711 * from the host we copy to the target field as-is
6716 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6717 abi_ulong target_flock_addr
)
6719 struct target_flock
*target_fl
;
6722 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6723 return -TARGET_EFAULT
;
6726 __get_user(l_type
, &target_fl
->l_type
);
6727 l_type
= target_to_host_flock(l_type
);
6731 fl
->l_type
= l_type
;
6732 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6733 __get_user(fl
->l_start
, &target_fl
->l_start
);
6734 __get_user(fl
->l_len
, &target_fl
->l_len
);
6735 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6736 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6740 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6741 const struct flock64
*fl
)
6743 struct target_flock
*target_fl
;
6746 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6747 return -TARGET_EFAULT
;
6750 l_type
= host_to_target_flock(fl
->l_type
);
6751 __put_user(l_type
, &target_fl
->l_type
);
6752 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6753 __put_user(fl
->l_start
, &target_fl
->l_start
);
6754 __put_user(fl
->l_len
, &target_fl
->l_len
);
6755 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6756 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6760 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6761 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6763 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6764 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6765 abi_ulong target_flock_addr
)
6767 struct target_oabi_flock64
*target_fl
;
6770 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6771 return -TARGET_EFAULT
;
6774 __get_user(l_type
, &target_fl
->l_type
);
6775 l_type
= target_to_host_flock(l_type
);
6779 fl
->l_type
= l_type
;
6780 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6781 __get_user(fl
->l_start
, &target_fl
->l_start
);
6782 __get_user(fl
->l_len
, &target_fl
->l_len
);
6783 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6784 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6788 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6789 const struct flock64
*fl
)
6791 struct target_oabi_flock64
*target_fl
;
6794 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6795 return -TARGET_EFAULT
;
6798 l_type
= host_to_target_flock(fl
->l_type
);
6799 __put_user(l_type
, &target_fl
->l_type
);
6800 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6801 __put_user(fl
->l_start
, &target_fl
->l_start
);
6802 __put_user(fl
->l_len
, &target_fl
->l_len
);
6803 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6804 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6809 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6810 abi_ulong target_flock_addr
)
6812 struct target_flock64
*target_fl
;
6815 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6816 return -TARGET_EFAULT
;
6819 __get_user(l_type
, &target_fl
->l_type
);
6820 l_type
= target_to_host_flock(l_type
);
6824 fl
->l_type
= l_type
;
6825 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6826 __get_user(fl
->l_start
, &target_fl
->l_start
);
6827 __get_user(fl
->l_len
, &target_fl
->l_len
);
6828 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6829 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6833 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6834 const struct flock64
*fl
)
6836 struct target_flock64
*target_fl
;
6839 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6840 return -TARGET_EFAULT
;
6843 l_type
= host_to_target_flock(fl
->l_type
);
6844 __put_user(l_type
, &target_fl
->l_type
);
6845 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6846 __put_user(fl
->l_start
, &target_fl
->l_start
);
6847 __put_user(fl
->l_len
, &target_fl
->l_len
);
6848 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6849 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6853 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6855 struct flock64 fl64
;
6857 struct f_owner_ex fox
;
6858 struct target_f_owner_ex
*target_fox
;
6861 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6863 if (host_cmd
== -TARGET_EINVAL
)
6867 case TARGET_F_GETLK
:
6868 ret
= copy_from_user_flock(&fl64
, arg
);
6872 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6874 ret
= copy_to_user_flock(arg
, &fl64
);
6878 case TARGET_F_SETLK
:
6879 case TARGET_F_SETLKW
:
6880 ret
= copy_from_user_flock(&fl64
, arg
);
6884 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6887 case TARGET_F_GETLK64
:
6888 case TARGET_F_OFD_GETLK
:
6889 ret
= copy_from_user_flock64(&fl64
, arg
);
6893 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6895 ret
= copy_to_user_flock64(arg
, &fl64
);
6898 case TARGET_F_SETLK64
:
6899 case TARGET_F_SETLKW64
:
6900 case TARGET_F_OFD_SETLK
:
6901 case TARGET_F_OFD_SETLKW
:
6902 ret
= copy_from_user_flock64(&fl64
, arg
);
6906 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6909 case TARGET_F_GETFL
:
6910 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6912 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6916 case TARGET_F_SETFL
:
6917 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6918 target_to_host_bitmask(arg
,
6923 case TARGET_F_GETOWN_EX
:
6924 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6926 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6927 return -TARGET_EFAULT
;
6928 target_fox
->type
= tswap32(fox
.type
);
6929 target_fox
->pid
= tswap32(fox
.pid
);
6930 unlock_user_struct(target_fox
, arg
, 1);
6936 case TARGET_F_SETOWN_EX
:
6937 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6938 return -TARGET_EFAULT
;
6939 fox
.type
= tswap32(target_fox
->type
);
6940 fox
.pid
= tswap32(target_fox
->pid
);
6941 unlock_user_struct(target_fox
, arg
, 0);
6942 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6946 case TARGET_F_SETSIG
:
6947 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
6950 case TARGET_F_GETSIG
:
6951 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
6954 case TARGET_F_SETOWN
:
6955 case TARGET_F_GETOWN
:
6956 case TARGET_F_SETLEASE
:
6957 case TARGET_F_GETLEASE
:
6958 case TARGET_F_SETPIPE_SZ
:
6959 case TARGET_F_GETPIPE_SZ
:
6960 case TARGET_F_ADD_SEALS
:
6961 case TARGET_F_GET_SEALS
:
6962 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6966 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6974 static inline int high2lowuid(int uid
)
6982 static inline int high2lowgid(int gid
)
6990 static inline int low2highuid(int uid
)
6992 if ((int16_t)uid
== -1)
6998 static inline int low2highgid(int gid
)
7000 if ((int16_t)gid
== -1)
7005 static inline int tswapid(int id
)
7010 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7012 #else /* !USE_UID16 */
7013 static inline int high2lowuid(int uid
)
7017 static inline int high2lowgid(int gid
)
7021 static inline int low2highuid(int uid
)
7025 static inline int low2highgid(int gid
)
7029 static inline int tswapid(int id
)
7034 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7036 #endif /* USE_UID16 */
7038 /* We must do direct syscalls for setting UID/GID, because we want to
7039 * implement the Linux system call semantics of "change only for this thread",
7040 * not the libc/POSIX semantics of "change for all threads in process".
7041 * (See http://ewontfix.com/17/ for more details.)
7042 * We use the 32-bit version of the syscalls if present; if it is not
7043 * then either the host architecture supports 32-bit UIDs natively with
7044 * the standard syscall, or the 16-bit UID is the best we can do.
7046 #ifdef __NR_setuid32
7047 #define __NR_sys_setuid __NR_setuid32
7049 #define __NR_sys_setuid __NR_setuid
7051 #ifdef __NR_setgid32
7052 #define __NR_sys_setgid __NR_setgid32
7054 #define __NR_sys_setgid __NR_setgid
7056 #ifdef __NR_setresuid32
7057 #define __NR_sys_setresuid __NR_setresuid32
7059 #define __NR_sys_setresuid __NR_setresuid
7061 #ifdef __NR_setresgid32
7062 #define __NR_sys_setresgid __NR_setresgid32
7064 #define __NR_sys_setresgid __NR_setresgid
7067 _syscall1(int, sys_setuid
, uid_t
, uid
)
7068 _syscall1(int, sys_setgid
, gid_t
, gid
)
7069 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7070 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7072 void syscall_init(void)
7075 const argtype
*arg_type
;
7079 thunk_init(STRUCT_MAX
);
7081 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7082 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7083 #include "syscall_types.h"
7085 #undef STRUCT_SPECIAL
7087 /* Build target_to_host_errno_table[] table from
7088 * host_to_target_errno_table[]. */
7089 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
7090 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
7093 /* we patch the ioctl size if necessary. We rely on the fact that
7094 no ioctl has all the bits at '1' in the size field */
7096 while (ie
->target_cmd
!= 0) {
7097 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7098 TARGET_IOC_SIZEMASK
) {
7099 arg_type
= ie
->arg_type
;
7100 if (arg_type
[0] != TYPE_PTR
) {
7101 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7106 size
= thunk_type_size(arg_type
, 0);
7107 ie
->target_cmd
= (ie
->target_cmd
&
7108 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7109 (size
<< TARGET_IOC_SIZESHIFT
);
7112 /* automatic consistency check if same arch */
7113 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7114 (defined(__x86_64__) && defined(TARGET_X86_64))
7115 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7116 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7117 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7124 #ifdef TARGET_NR_truncate64
7125 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
7130 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7134 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7138 #ifdef TARGET_NR_ftruncate64
7139 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
7144 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7148 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7152 #if defined(TARGET_NR_timer_settime) || \
7153 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7154 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7155 abi_ulong target_addr
)
7157 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7158 offsetof(struct target_itimerspec
,
7160 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7161 offsetof(struct target_itimerspec
,
7163 return -TARGET_EFAULT
;
7170 #if defined(TARGET_NR_timer_settime64) || \
7171 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7172 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7173 abi_ulong target_addr
)
7175 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7176 offsetof(struct target__kernel_itimerspec
,
7178 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7179 offsetof(struct target__kernel_itimerspec
,
7181 return -TARGET_EFAULT
;
7188 #if ((defined(TARGET_NR_timerfd_gettime) || \
7189 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7190 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7191 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7192 struct itimerspec
*host_its
)
7194 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7196 &host_its
->it_interval
) ||
7197 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7199 &host_its
->it_value
)) {
7200 return -TARGET_EFAULT
;
7206 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7207 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7208 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7209 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7210 struct itimerspec
*host_its
)
7212 if (host_to_target_timespec64(target_addr
+
7213 offsetof(struct target__kernel_itimerspec
,
7215 &host_its
->it_interval
) ||
7216 host_to_target_timespec64(target_addr
+
7217 offsetof(struct target__kernel_itimerspec
,
7219 &host_its
->it_value
)) {
7220 return -TARGET_EFAULT
;
7226 #if defined(TARGET_NR_adjtimex) || \
7227 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7228 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7229 abi_long target_addr
)
7231 struct target_timex
*target_tx
;
7233 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7234 return -TARGET_EFAULT
;
7237 __get_user(host_tx
->modes
, &target_tx
->modes
);
7238 __get_user(host_tx
->offset
, &target_tx
->offset
);
7239 __get_user(host_tx
->freq
, &target_tx
->freq
);
7240 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7241 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7242 __get_user(host_tx
->status
, &target_tx
->status
);
7243 __get_user(host_tx
->constant
, &target_tx
->constant
);
7244 __get_user(host_tx
->precision
, &target_tx
->precision
);
7245 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7246 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7247 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7248 __get_user(host_tx
->tick
, &target_tx
->tick
);
7249 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7250 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7251 __get_user(host_tx
->shift
, &target_tx
->shift
);
7252 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7253 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7254 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7255 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7256 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7257 __get_user(host_tx
->tai
, &target_tx
->tai
);
7259 unlock_user_struct(target_tx
, target_addr
, 0);
7263 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7264 struct timex
*host_tx
)
7266 struct target_timex
*target_tx
;
7268 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7269 return -TARGET_EFAULT
;
7272 __put_user(host_tx
->modes
, &target_tx
->modes
);
7273 __put_user(host_tx
->offset
, &target_tx
->offset
);
7274 __put_user(host_tx
->freq
, &target_tx
->freq
);
7275 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7276 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7277 __put_user(host_tx
->status
, &target_tx
->status
);
7278 __put_user(host_tx
->constant
, &target_tx
->constant
);
7279 __put_user(host_tx
->precision
, &target_tx
->precision
);
7280 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7281 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7282 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7283 __put_user(host_tx
->tick
, &target_tx
->tick
);
7284 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7285 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7286 __put_user(host_tx
->shift
, &target_tx
->shift
);
7287 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7288 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7289 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7290 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7291 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7292 __put_user(host_tx
->tai
, &target_tx
->tai
);
7294 unlock_user_struct(target_tx
, target_addr
, 1);
7300 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7301 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7302 abi_long target_addr
)
7304 struct target__kernel_timex
*target_tx
;
7306 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7307 offsetof(struct target__kernel_timex
,
7309 return -TARGET_EFAULT
;
7312 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7313 return -TARGET_EFAULT
;
7316 __get_user(host_tx
->modes
, &target_tx
->modes
);
7317 __get_user(host_tx
->offset
, &target_tx
->offset
);
7318 __get_user(host_tx
->freq
, &target_tx
->freq
);
7319 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7320 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7321 __get_user(host_tx
->status
, &target_tx
->status
);
7322 __get_user(host_tx
->constant
, &target_tx
->constant
);
7323 __get_user(host_tx
->precision
, &target_tx
->precision
);
7324 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7325 __get_user(host_tx
->tick
, &target_tx
->tick
);
7326 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7327 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7328 __get_user(host_tx
->shift
, &target_tx
->shift
);
7329 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7330 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7331 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7332 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7333 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7334 __get_user(host_tx
->tai
, &target_tx
->tai
);
7336 unlock_user_struct(target_tx
, target_addr
, 0);
7340 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7341 struct timex
*host_tx
)
7343 struct target__kernel_timex
*target_tx
;
7345 if (copy_to_user_timeval64(target_addr
+
7346 offsetof(struct target__kernel_timex
, time
),
7348 return -TARGET_EFAULT
;
7351 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7352 return -TARGET_EFAULT
;
7355 __put_user(host_tx
->modes
, &target_tx
->modes
);
7356 __put_user(host_tx
->offset
, &target_tx
->offset
);
7357 __put_user(host_tx
->freq
, &target_tx
->freq
);
7358 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7359 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7360 __put_user(host_tx
->status
, &target_tx
->status
);
7361 __put_user(host_tx
->constant
, &target_tx
->constant
);
7362 __put_user(host_tx
->precision
, &target_tx
->precision
);
7363 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7364 __put_user(host_tx
->tick
, &target_tx
->tick
);
7365 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7366 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7367 __put_user(host_tx
->shift
, &target_tx
->shift
);
7368 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7369 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7370 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7371 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7372 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7373 __put_user(host_tx
->tai
, &target_tx
->tai
);
7375 unlock_user_struct(target_tx
, target_addr
, 1);
7380 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7381 abi_ulong target_addr
)
7383 struct target_sigevent
*target_sevp
;
7385 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7386 return -TARGET_EFAULT
;
7389 /* This union is awkward on 64 bit systems because it has a 32 bit
7390 * integer and a pointer in it; we follow the conversion approach
7391 * used for handling sigval types in signal.c so the guest should get
7392 * the correct value back even if we did a 64 bit byteswap and it's
7393 * using the 32 bit integer.
7395 host_sevp
->sigev_value
.sival_ptr
=
7396 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7397 host_sevp
->sigev_signo
=
7398 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7399 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7400 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7402 unlock_user_struct(target_sevp
, target_addr
, 1);
7406 #if defined(TARGET_NR_mlockall)
7407 static inline int target_to_host_mlockall_arg(int arg
)
7411 if (arg
& TARGET_MCL_CURRENT
) {
7412 result
|= MCL_CURRENT
;
7414 if (arg
& TARGET_MCL_FUTURE
) {
7415 result
|= MCL_FUTURE
;
7418 if (arg
& TARGET_MCL_ONFAULT
) {
7419 result
|= MCL_ONFAULT
;
7427 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7428 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7429 defined(TARGET_NR_newfstatat))
7430 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7431 abi_ulong target_addr
,
7432 struct stat
*host_st
)
7434 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7435 if (((CPUARMState
*)cpu_env
)->eabi
) {
7436 struct target_eabi_stat64
*target_st
;
7438 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7439 return -TARGET_EFAULT
;
7440 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7441 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7442 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7443 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7444 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7446 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7447 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7448 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7449 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7450 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7451 __put_user(host_st
->st_size
, &target_st
->st_size
);
7452 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7453 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7454 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7455 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7456 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7457 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7458 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7459 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7460 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7462 unlock_user_struct(target_st
, target_addr
, 1);
7466 #if defined(TARGET_HAS_STRUCT_STAT64)
7467 struct target_stat64
*target_st
;
7469 struct target_stat
*target_st
;
7472 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7473 return -TARGET_EFAULT
;
7474 memset(target_st
, 0, sizeof(*target_st
));
7475 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7476 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7477 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7478 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7480 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7481 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7482 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7483 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7484 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7485 /* XXX: better use of kernel struct */
7486 __put_user(host_st
->st_size
, &target_st
->st_size
);
7487 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7488 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7489 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7490 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7491 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7492 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7493 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7494 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7495 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7497 unlock_user_struct(target_st
, target_addr
, 1);
7504 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7505 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7506 abi_ulong target_addr
)
7508 struct target_statx
*target_stx
;
7510 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7511 return -TARGET_EFAULT
;
7513 memset(target_stx
, 0, sizeof(*target_stx
));
7515 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7516 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7517 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7518 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7519 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7520 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7521 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7522 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7523 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7524 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7525 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7526 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7527 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7528 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7529 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7530 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7531 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7532 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7533 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7534 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7535 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7536 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7537 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7539 unlock_user_struct(target_stx
, target_addr
, 1);
7545 static int do_sys_futex(int *uaddr
, int op
, int val
,
7546 const struct timespec
*timeout
, int *uaddr2
,
7549 #if HOST_LONG_BITS == 64
7550 #if defined(__NR_futex)
7551 /* always a 64-bit time_t, it doesn't define _time64 version */
7552 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7555 #else /* HOST_LONG_BITS == 64 */
7556 #if defined(__NR_futex_time64)
7557 if (sizeof(timeout
->tv_sec
) == 8) {
7558 /* _time64 function on 32bit arch */
7559 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7562 #if defined(__NR_futex)
7563 /* old function on 32bit arch */
7564 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7566 #endif /* HOST_LONG_BITS == 64 */
7567 g_assert_not_reached();
7570 static int do_safe_futex(int *uaddr
, int op
, int val
,
7571 const struct timespec
*timeout
, int *uaddr2
,
7574 #if HOST_LONG_BITS == 64
7575 #if defined(__NR_futex)
7576 /* always a 64-bit time_t, it doesn't define _time64 version */
7577 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7579 #else /* HOST_LONG_BITS == 64 */
7580 #if defined(__NR_futex_time64)
7581 if (sizeof(timeout
->tv_sec
) == 8) {
7582 /* _time64 function on 32bit arch */
7583 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7587 #if defined(__NR_futex)
7588 /* old function on 32bit arch */
7589 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7591 #endif /* HOST_LONG_BITS == 64 */
7592 return -TARGET_ENOSYS
;
7595 /* ??? Using host futex calls even when target atomic operations
7596 are not really atomic probably breaks things. However implementing
7597 futexes locally would make futexes shared between multiple processes
7598 tricky. However they're probably useless because guest atomic
7599 operations won't work either. */
7600 #if defined(TARGET_NR_futex)
7601 static int do_futex(CPUState
*cpu
, target_ulong uaddr
, int op
, int val
,
7602 target_ulong timeout
, target_ulong uaddr2
, int val3
)
7604 struct timespec ts
, *pts
;
7607 /* ??? We assume FUTEX_* constants are the same on both host
7609 #ifdef FUTEX_CMD_MASK
7610 base_op
= op
& FUTEX_CMD_MASK
;
7616 case FUTEX_WAIT_BITSET
:
7619 target_to_host_timespec(pts
, timeout
);
7623 return do_safe_futex(g2h(cpu
, uaddr
),
7624 op
, tswap32(val
), pts
, NULL
, val3
);
7626 return do_safe_futex(g2h(cpu
, uaddr
),
7627 op
, val
, NULL
, NULL
, 0);
7629 return do_safe_futex(g2h(cpu
, uaddr
),
7630 op
, val
, NULL
, NULL
, 0);
7632 case FUTEX_CMP_REQUEUE
:
7634 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7635 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7636 But the prototype takes a `struct timespec *'; insert casts
7637 to satisfy the compiler. We do not need to tswap TIMEOUT
7638 since it's not compared to guest memory. */
7639 pts
= (struct timespec
*)(uintptr_t) timeout
;
7640 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7641 (base_op
== FUTEX_CMP_REQUEUE
7642 ? tswap32(val3
) : val3
));
7644 return -TARGET_ENOSYS
;
7649 #if defined(TARGET_NR_futex_time64)
7650 static int do_futex_time64(CPUState
*cpu
, target_ulong uaddr
, int op
,
7651 int val
, target_ulong timeout
,
7652 target_ulong uaddr2
, int val3
)
7654 struct timespec ts
, *pts
;
7657 /* ??? We assume FUTEX_* constants are the same on both host
7659 #ifdef FUTEX_CMD_MASK
7660 base_op
= op
& FUTEX_CMD_MASK
;
7666 case FUTEX_WAIT_BITSET
:
7669 if (target_to_host_timespec64(pts
, timeout
)) {
7670 return -TARGET_EFAULT
;
7675 return do_safe_futex(g2h(cpu
, uaddr
), op
,
7676 tswap32(val
), pts
, NULL
, val3
);
7678 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7680 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7682 case FUTEX_CMP_REQUEUE
:
7684 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7685 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7686 But the prototype takes a `struct timespec *'; insert casts
7687 to satisfy the compiler. We do not need to tswap TIMEOUT
7688 since it's not compared to guest memory. */
7689 pts
= (struct timespec
*)(uintptr_t) timeout
;
7690 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7691 (base_op
== FUTEX_CMP_REQUEUE
7692 ? tswap32(val3
) : val3
));
7694 return -TARGET_ENOSYS
;
7699 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7700 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7701 abi_long handle
, abi_long mount_id
,
7704 struct file_handle
*target_fh
;
7705 struct file_handle
*fh
;
7709 unsigned int size
, total_size
;
7711 if (get_user_s32(size
, handle
)) {
7712 return -TARGET_EFAULT
;
7715 name
= lock_user_string(pathname
);
7717 return -TARGET_EFAULT
;
7720 total_size
= sizeof(struct file_handle
) + size
;
7721 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7723 unlock_user(name
, pathname
, 0);
7724 return -TARGET_EFAULT
;
7727 fh
= g_malloc0(total_size
);
7728 fh
->handle_bytes
= size
;
7730 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7731 unlock_user(name
, pathname
, 0);
7733 /* man name_to_handle_at(2):
7734 * Other than the use of the handle_bytes field, the caller should treat
7735 * the file_handle structure as an opaque data type
7738 memcpy(target_fh
, fh
, total_size
);
7739 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7740 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7742 unlock_user(target_fh
, handle
, total_size
);
7744 if (put_user_s32(mid
, mount_id
)) {
7745 return -TARGET_EFAULT
;
7753 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7754 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7757 struct file_handle
*target_fh
;
7758 struct file_handle
*fh
;
7759 unsigned int size
, total_size
;
7762 if (get_user_s32(size
, handle
)) {
7763 return -TARGET_EFAULT
;
7766 total_size
= sizeof(struct file_handle
) + size
;
7767 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7769 return -TARGET_EFAULT
;
7772 fh
= g_memdup(target_fh
, total_size
);
7773 fh
->handle_bytes
= size
;
7774 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7776 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7777 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7781 unlock_user(target_fh
, handle
, total_size
);
7787 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7789 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7792 target_sigset_t
*target_mask
;
7796 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
7797 return -TARGET_EINVAL
;
7799 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7800 return -TARGET_EFAULT
;
7803 target_to_host_sigset(&host_mask
, target_mask
);
7805 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7807 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7809 fd_trans_register(ret
, &target_signalfd_trans
);
7812 unlock_user_struct(target_mask
, mask
, 0);
7818 /* Map host to target signal numbers for the wait family of syscalls.
7819 Assume all other status bits are the same. */
7820 int host_to_target_waitstatus(int status
)
7822 if (WIFSIGNALED(status
)) {
7823 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7825 if (WIFSTOPPED(status
)) {
7826 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7832 static int open_self_cmdline(void *cpu_env
, int fd
)
7834 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7835 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7838 for (i
= 0; i
< bprm
->argc
; i
++) {
7839 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7841 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7849 static int open_self_maps(void *cpu_env
, int fd
)
7851 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7852 TaskState
*ts
= cpu
->opaque
;
7853 GSList
*map_info
= read_self_maps();
7857 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7858 MapInfo
*e
= (MapInfo
*) s
->data
;
7860 if (h2g_valid(e
->start
)) {
7861 unsigned long min
= e
->start
;
7862 unsigned long max
= e
->end
;
7863 int flags
= page_get_flags(h2g(min
));
7866 max
= h2g_valid(max
- 1) ?
7867 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
7869 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7873 if (h2g(min
) == ts
->info
->stack_limit
) {
7879 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7880 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
7881 h2g(min
), h2g(max
- 1) + 1,
7882 e
->is_read
? 'r' : '-',
7883 e
->is_write
? 'w' : '-',
7884 e
->is_exec
? 'x' : '-',
7885 e
->is_priv
? 'p' : '-',
7886 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
7888 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
7895 free_self_maps(map_info
);
7897 #ifdef TARGET_VSYSCALL_PAGE
7899 * We only support execution from the vsyscall page.
7900 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7902 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7903 " --xp 00000000 00:00 0",
7904 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7905 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
7911 static int open_self_stat(void *cpu_env
, int fd
)
7913 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7914 TaskState
*ts
= cpu
->opaque
;
7915 g_autoptr(GString
) buf
= g_string_new(NULL
);
7918 for (i
= 0; i
< 44; i
++) {
7921 g_string_printf(buf
, FMT_pid
" ", getpid());
7922 } else if (i
== 1) {
7924 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
7925 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
7926 g_string_printf(buf
, "(%.15s) ", bin
);
7927 } else if (i
== 27) {
7929 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
7931 /* for the rest, there is MasterCard */
7932 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
7935 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
7943 static int open_self_auxv(void *cpu_env
, int fd
)
7945 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7946 TaskState
*ts
= cpu
->opaque
;
7947 abi_ulong auxv
= ts
->info
->saved_auxv
;
7948 abi_ulong len
= ts
->info
->auxv_len
;
7952 * Auxiliary vector is stored in target process stack.
7953 * read in whole auxv vector and copy it to file
7955 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7959 r
= write(fd
, ptr
, len
);
7966 lseek(fd
, 0, SEEK_SET
);
7967 unlock_user(ptr
, auxv
, len
);
7973 static int is_proc_myself(const char *filename
, const char *entry
)
7975 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7976 filename
+= strlen("/proc/");
7977 if (!strncmp(filename
, "self/", strlen("self/"))) {
7978 filename
+= strlen("self/");
7979 } else if (*filename
>= '1' && *filename
<= '9') {
7981 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7982 if (!strncmp(filename
, myself
, strlen(myself
))) {
7983 filename
+= strlen(myself
);
7990 if (!strcmp(filename
, entry
)) {
7997 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7998 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7999 static int is_proc(const char *filename
, const char *entry
)
8001 return strcmp(filename
, entry
) == 0;
8005 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8006 static int open_net_route(void *cpu_env
, int fd
)
8013 fp
= fopen("/proc/net/route", "r");
8020 read
= getline(&line
, &len
, fp
);
8021 dprintf(fd
, "%s", line
);
8025 while ((read
= getline(&line
, &len
, fp
)) != -1) {
8027 uint32_t dest
, gw
, mask
;
8028 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8031 fields
= sscanf(line
,
8032 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8033 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8034 &mask
, &mtu
, &window
, &irtt
);
8038 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8039 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8040 metric
, tswap32(mask
), mtu
, window
, irtt
);
8050 #if defined(TARGET_SPARC)
8051 static int open_cpuinfo(void *cpu_env
, int fd
)
8053 dprintf(fd
, "type\t\t: sun4u\n");
8058 #if defined(TARGET_HPPA)
8059 static int open_cpuinfo(void *cpu_env
, int fd
)
8061 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
8062 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
8063 dprintf(fd
, "capabilities\t: os32\n");
8064 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
8065 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8070 #if defined(TARGET_M68K)
8071 static int open_hardware(void *cpu_env
, int fd
)
8073 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8078 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
8081 const char *filename
;
8082 int (*fill
)(void *cpu_env
, int fd
);
8083 int (*cmp
)(const char *s1
, const char *s2
);
8085 const struct fake_open
*fake_open
;
8086 static const struct fake_open fakes
[] = {
8087 { "maps", open_self_maps
, is_proc_myself
},
8088 { "stat", open_self_stat
, is_proc_myself
},
8089 { "auxv", open_self_auxv
, is_proc_myself
},
8090 { "cmdline", open_self_cmdline
, is_proc_myself
},
8091 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8092 { "/proc/net/route", open_net_route
, is_proc
},
8094 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8095 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8097 #if defined(TARGET_M68K)
8098 { "/proc/hardware", open_hardware
, is_proc
},
8100 { NULL
, NULL
, NULL
}
8103 if (is_proc_myself(pathname
, "exe")) {
8104 int execfd
= qemu_getauxval(AT_EXECFD
);
8105 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
8108 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8109 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8114 if (fake_open
->filename
) {
8116 char filename
[PATH_MAX
];
8119 /* create temporary file to map stat to */
8120 tmpdir
= getenv("TMPDIR");
8123 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8124 fd
= mkstemp(filename
);
8130 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8136 lseek(fd
, 0, SEEK_SET
);
8141 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8144 #define TIMER_MAGIC 0x0caf0000
8145 #define TIMER_MAGIC_MASK 0xffff0000
8147 /* Convert QEMU provided timer ID back to internal 16bit index format */
8148 static target_timer_t
get_timer_id(abi_long arg
)
8150 target_timer_t timerid
= arg
;
8152 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8153 return -TARGET_EINVAL
;
8158 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8159 return -TARGET_EINVAL
;
8165 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8167 abi_ulong target_addr
,
8170 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8171 unsigned host_bits
= sizeof(*host_mask
) * 8;
8172 abi_ulong
*target_mask
;
8175 assert(host_size
>= target_size
);
8177 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8179 return -TARGET_EFAULT
;
8181 memset(host_mask
, 0, host_size
);
8183 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8184 unsigned bit
= i
* target_bits
;
8187 __get_user(val
, &target_mask
[i
]);
8188 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8189 if (val
& (1UL << j
)) {
8190 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8195 unlock_user(target_mask
, target_addr
, 0);
8199 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8201 abi_ulong target_addr
,
8204 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8205 unsigned host_bits
= sizeof(*host_mask
) * 8;
8206 abi_ulong
*target_mask
;
8209 assert(host_size
>= target_size
);
8211 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8213 return -TARGET_EFAULT
;
8216 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8217 unsigned bit
= i
* target_bits
;
8220 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8221 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8225 __put_user(val
, &target_mask
[i
]);
8228 unlock_user(target_mask
, target_addr
, target_size
);
8232 /* This is an internal helper for do_syscall so that it is easier
8233 * to have a single return point, so that actions, such as logging
8234 * of syscall results, can be performed.
8235 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8237 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
8238 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8239 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8242 CPUState
*cpu
= env_cpu(cpu_env
);
8244 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8245 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8246 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8247 || defined(TARGET_NR_statx)
8250 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8251 || defined(TARGET_NR_fstatfs)
8257 case TARGET_NR_exit
:
8258 /* In old applications this may be used to implement _exit(2).
8259 However in threaded applications it is used for thread termination,
8260 and _exit_group is used for application termination.
8261 Do thread termination if we have more then one thread. */
8263 if (block_signals()) {
8264 return -TARGET_ERESTARTSYS
;
8267 pthread_mutex_lock(&clone_lock
);
8269 if (CPU_NEXT(first_cpu
)) {
8270 TaskState
*ts
= cpu
->opaque
;
8272 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
8273 object_unref(OBJECT(cpu
));
8275 * At this point the CPU should be unrealized and removed
8276 * from cpu lists. We can clean-up the rest of the thread
8277 * data without the lock held.
8280 pthread_mutex_unlock(&clone_lock
);
8282 if (ts
->child_tidptr
) {
8283 put_user_u32(0, ts
->child_tidptr
);
8284 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
8285 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
8289 rcu_unregister_thread();
8293 pthread_mutex_unlock(&clone_lock
);
8294 preexit_cleanup(cpu_env
, arg1
);
8296 return 0; /* avoid warning */
8297 case TARGET_NR_read
:
8298 if (arg2
== 0 && arg3
== 0) {
8299 return get_errno(safe_read(arg1
, 0, 0));
8301 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8302 return -TARGET_EFAULT
;
8303 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8305 fd_trans_host_to_target_data(arg1
)) {
8306 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8308 unlock_user(p
, arg2
, ret
);
8311 case TARGET_NR_write
:
8312 if (arg2
== 0 && arg3
== 0) {
8313 return get_errno(safe_write(arg1
, 0, 0));
8315 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8316 return -TARGET_EFAULT
;
8317 if (fd_trans_target_to_host_data(arg1
)) {
8318 void *copy
= g_malloc(arg3
);
8319 memcpy(copy
, p
, arg3
);
8320 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8322 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8326 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8328 unlock_user(p
, arg2
, 0);
8331 #ifdef TARGET_NR_open
8332 case TARGET_NR_open
:
8333 if (!(p
= lock_user_string(arg1
)))
8334 return -TARGET_EFAULT
;
8335 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8336 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8338 fd_trans_unregister(ret
);
8339 unlock_user(p
, arg1
, 0);
8342 case TARGET_NR_openat
:
8343 if (!(p
= lock_user_string(arg2
)))
8344 return -TARGET_EFAULT
;
8345 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8346 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8348 fd_trans_unregister(ret
);
8349 unlock_user(p
, arg2
, 0);
8351 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8352 case TARGET_NR_name_to_handle_at
:
8353 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8356 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8357 case TARGET_NR_open_by_handle_at
:
8358 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8359 fd_trans_unregister(ret
);
8362 case TARGET_NR_close
:
8363 fd_trans_unregister(arg1
);
8364 return get_errno(close(arg1
));
8367 return do_brk(arg1
);
8368 #ifdef TARGET_NR_fork
8369 case TARGET_NR_fork
:
8370 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8372 #ifdef TARGET_NR_waitpid
8373 case TARGET_NR_waitpid
:
8376 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8377 if (!is_error(ret
) && arg2
&& ret
8378 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8379 return -TARGET_EFAULT
;
8383 #ifdef TARGET_NR_waitid
8384 case TARGET_NR_waitid
:
8388 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8389 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8390 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8391 return -TARGET_EFAULT
;
8392 host_to_target_siginfo(p
, &info
);
8393 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8398 #ifdef TARGET_NR_creat /* not on alpha */
8399 case TARGET_NR_creat
:
8400 if (!(p
= lock_user_string(arg1
)))
8401 return -TARGET_EFAULT
;
8402 ret
= get_errno(creat(p
, arg2
));
8403 fd_trans_unregister(ret
);
8404 unlock_user(p
, arg1
, 0);
8407 #ifdef TARGET_NR_link
8408 case TARGET_NR_link
:
8411 p
= lock_user_string(arg1
);
8412 p2
= lock_user_string(arg2
);
8414 ret
= -TARGET_EFAULT
;
8416 ret
= get_errno(link(p
, p2
));
8417 unlock_user(p2
, arg2
, 0);
8418 unlock_user(p
, arg1
, 0);
8422 #if defined(TARGET_NR_linkat)
8423 case TARGET_NR_linkat
:
8427 return -TARGET_EFAULT
;
8428 p
= lock_user_string(arg2
);
8429 p2
= lock_user_string(arg4
);
8431 ret
= -TARGET_EFAULT
;
8433 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8434 unlock_user(p
, arg2
, 0);
8435 unlock_user(p2
, arg4
, 0);
8439 #ifdef TARGET_NR_unlink
8440 case TARGET_NR_unlink
:
8441 if (!(p
= lock_user_string(arg1
)))
8442 return -TARGET_EFAULT
;
8443 ret
= get_errno(unlink(p
));
8444 unlock_user(p
, arg1
, 0);
8447 #if defined(TARGET_NR_unlinkat)
8448 case TARGET_NR_unlinkat
:
8449 if (!(p
= lock_user_string(arg2
)))
8450 return -TARGET_EFAULT
;
8451 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8452 unlock_user(p
, arg2
, 0);
8455 case TARGET_NR_execve
:
8457 char **argp
, **envp
;
8460 abi_ulong guest_argp
;
8461 abi_ulong guest_envp
;
8468 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8469 if (get_user_ual(addr
, gp
))
8470 return -TARGET_EFAULT
;
8477 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8478 if (get_user_ual(addr
, gp
))
8479 return -TARGET_EFAULT
;
8485 argp
= g_new0(char *, argc
+ 1);
8486 envp
= g_new0(char *, envc
+ 1);
8488 for (gp
= guest_argp
, q
= argp
; gp
;
8489 gp
+= sizeof(abi_ulong
), q
++) {
8490 if (get_user_ual(addr
, gp
))
8494 if (!(*q
= lock_user_string(addr
)))
8496 total_size
+= strlen(*q
) + 1;
8500 for (gp
= guest_envp
, q
= envp
; gp
;
8501 gp
+= sizeof(abi_ulong
), q
++) {
8502 if (get_user_ual(addr
, gp
))
8506 if (!(*q
= lock_user_string(addr
)))
8508 total_size
+= strlen(*q
) + 1;
8512 if (!(p
= lock_user_string(arg1
)))
8514 /* Although execve() is not an interruptible syscall it is
8515 * a special case where we must use the safe_syscall wrapper:
8516 * if we allow a signal to happen before we make the host
8517 * syscall then we will 'lose' it, because at the point of
8518 * execve the process leaves QEMU's control. So we use the
8519 * safe syscall wrapper to ensure that we either take the
8520 * signal as a guest signal, or else it does not happen
8521 * before the execve completes and makes it the other
8522 * program's problem.
8524 ret
= get_errno(safe_execve(p
, argp
, envp
));
8525 unlock_user(p
, arg1
, 0);
8530 ret
= -TARGET_EFAULT
;
8533 for (gp
= guest_argp
, q
= argp
; *q
;
8534 gp
+= sizeof(abi_ulong
), q
++) {
8535 if (get_user_ual(addr
, gp
)
8538 unlock_user(*q
, addr
, 0);
8540 for (gp
= guest_envp
, q
= envp
; *q
;
8541 gp
+= sizeof(abi_ulong
), q
++) {
8542 if (get_user_ual(addr
, gp
)
8545 unlock_user(*q
, addr
, 0);
8552 case TARGET_NR_chdir
:
8553 if (!(p
= lock_user_string(arg1
)))
8554 return -TARGET_EFAULT
;
8555 ret
= get_errno(chdir(p
));
8556 unlock_user(p
, arg1
, 0);
8558 #ifdef TARGET_NR_time
8559 case TARGET_NR_time
:
8562 ret
= get_errno(time(&host_time
));
8565 && put_user_sal(host_time
, arg1
))
8566 return -TARGET_EFAULT
;
8570 #ifdef TARGET_NR_mknod
8571 case TARGET_NR_mknod
:
8572 if (!(p
= lock_user_string(arg1
)))
8573 return -TARGET_EFAULT
;
8574 ret
= get_errno(mknod(p
, arg2
, arg3
));
8575 unlock_user(p
, arg1
, 0);
8578 #if defined(TARGET_NR_mknodat)
8579 case TARGET_NR_mknodat
:
8580 if (!(p
= lock_user_string(arg2
)))
8581 return -TARGET_EFAULT
;
8582 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8583 unlock_user(p
, arg2
, 0);
8586 #ifdef TARGET_NR_chmod
8587 case TARGET_NR_chmod
:
8588 if (!(p
= lock_user_string(arg1
)))
8589 return -TARGET_EFAULT
;
8590 ret
= get_errno(chmod(p
, arg2
));
8591 unlock_user(p
, arg1
, 0);
8594 #ifdef TARGET_NR_lseek
8595 case TARGET_NR_lseek
:
8596 return get_errno(lseek(arg1
, arg2
, arg3
));
8598 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8599 /* Alpha specific */
8600 case TARGET_NR_getxpid
:
8601 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8602 return get_errno(getpid());
8604 #ifdef TARGET_NR_getpid
8605 case TARGET_NR_getpid
:
8606 return get_errno(getpid());
8608 case TARGET_NR_mount
:
8610 /* need to look at the data field */
8614 p
= lock_user_string(arg1
);
8616 return -TARGET_EFAULT
;
8622 p2
= lock_user_string(arg2
);
8625 unlock_user(p
, arg1
, 0);
8627 return -TARGET_EFAULT
;
8631 p3
= lock_user_string(arg3
);
8634 unlock_user(p
, arg1
, 0);
8636 unlock_user(p2
, arg2
, 0);
8637 return -TARGET_EFAULT
;
8643 /* FIXME - arg5 should be locked, but it isn't clear how to
8644 * do that since it's not guaranteed to be a NULL-terminated
8648 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8650 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
8652 ret
= get_errno(ret
);
8655 unlock_user(p
, arg1
, 0);
8657 unlock_user(p2
, arg2
, 0);
8659 unlock_user(p3
, arg3
, 0);
8663 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8664 #if defined(TARGET_NR_umount)
8665 case TARGET_NR_umount
:
8667 #if defined(TARGET_NR_oldumount)
8668 case TARGET_NR_oldumount
:
8670 if (!(p
= lock_user_string(arg1
)))
8671 return -TARGET_EFAULT
;
8672 ret
= get_errno(umount(p
));
8673 unlock_user(p
, arg1
, 0);
8676 #ifdef TARGET_NR_stime /* not on alpha */
8677 case TARGET_NR_stime
:
8681 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8682 return -TARGET_EFAULT
;
8684 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8687 #ifdef TARGET_NR_alarm /* not on alpha */
8688 case TARGET_NR_alarm
:
8691 #ifdef TARGET_NR_pause /* not on alpha */
8692 case TARGET_NR_pause
:
8693 if (!block_signals()) {
8694 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8696 return -TARGET_EINTR
;
8698 #ifdef TARGET_NR_utime
8699 case TARGET_NR_utime
:
8701 struct utimbuf tbuf
, *host_tbuf
;
8702 struct target_utimbuf
*target_tbuf
;
8704 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8705 return -TARGET_EFAULT
;
8706 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8707 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8708 unlock_user_struct(target_tbuf
, arg2
, 0);
8713 if (!(p
= lock_user_string(arg1
)))
8714 return -TARGET_EFAULT
;
8715 ret
= get_errno(utime(p
, host_tbuf
));
8716 unlock_user(p
, arg1
, 0);
8720 #ifdef TARGET_NR_utimes
8721 case TARGET_NR_utimes
:
8723 struct timeval
*tvp
, tv
[2];
8725 if (copy_from_user_timeval(&tv
[0], arg2
)
8726 || copy_from_user_timeval(&tv
[1],
8727 arg2
+ sizeof(struct target_timeval
)))
8728 return -TARGET_EFAULT
;
8733 if (!(p
= lock_user_string(arg1
)))
8734 return -TARGET_EFAULT
;
8735 ret
= get_errno(utimes(p
, tvp
));
8736 unlock_user(p
, arg1
, 0);
8740 #if defined(TARGET_NR_futimesat)
8741 case TARGET_NR_futimesat
:
8743 struct timeval
*tvp
, tv
[2];
8745 if (copy_from_user_timeval(&tv
[0], arg3
)
8746 || copy_from_user_timeval(&tv
[1],
8747 arg3
+ sizeof(struct target_timeval
)))
8748 return -TARGET_EFAULT
;
8753 if (!(p
= lock_user_string(arg2
))) {
8754 return -TARGET_EFAULT
;
8756 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8757 unlock_user(p
, arg2
, 0);
8761 #ifdef TARGET_NR_access
8762 case TARGET_NR_access
:
8763 if (!(p
= lock_user_string(arg1
))) {
8764 return -TARGET_EFAULT
;
8766 ret
= get_errno(access(path(p
), arg2
));
8767 unlock_user(p
, arg1
, 0);
8770 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8771 case TARGET_NR_faccessat
:
8772 if (!(p
= lock_user_string(arg2
))) {
8773 return -TARGET_EFAULT
;
8775 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8776 unlock_user(p
, arg2
, 0);
8779 #ifdef TARGET_NR_nice /* not on alpha */
8780 case TARGET_NR_nice
:
8781 return get_errno(nice(arg1
));
8783 case TARGET_NR_sync
:
8786 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8787 case TARGET_NR_syncfs
:
8788 return get_errno(syncfs(arg1
));
8790 case TARGET_NR_kill
:
8791 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8792 #ifdef TARGET_NR_rename
8793 case TARGET_NR_rename
:
8796 p
= lock_user_string(arg1
);
8797 p2
= lock_user_string(arg2
);
8799 ret
= -TARGET_EFAULT
;
8801 ret
= get_errno(rename(p
, p2
));
8802 unlock_user(p2
, arg2
, 0);
8803 unlock_user(p
, arg1
, 0);
8807 #if defined(TARGET_NR_renameat)
8808 case TARGET_NR_renameat
:
8811 p
= lock_user_string(arg2
);
8812 p2
= lock_user_string(arg4
);
8814 ret
= -TARGET_EFAULT
;
8816 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8817 unlock_user(p2
, arg4
, 0);
8818 unlock_user(p
, arg2
, 0);
8822 #if defined(TARGET_NR_renameat2)
8823 case TARGET_NR_renameat2
:
8826 p
= lock_user_string(arg2
);
8827 p2
= lock_user_string(arg4
);
8829 ret
= -TARGET_EFAULT
;
8831 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8833 unlock_user(p2
, arg4
, 0);
8834 unlock_user(p
, arg2
, 0);
8838 #ifdef TARGET_NR_mkdir
8839 case TARGET_NR_mkdir
:
8840 if (!(p
= lock_user_string(arg1
)))
8841 return -TARGET_EFAULT
;
8842 ret
= get_errno(mkdir(p
, arg2
));
8843 unlock_user(p
, arg1
, 0);
8846 #if defined(TARGET_NR_mkdirat)
8847 case TARGET_NR_mkdirat
:
8848 if (!(p
= lock_user_string(arg2
)))
8849 return -TARGET_EFAULT
;
8850 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8851 unlock_user(p
, arg2
, 0);
8854 #ifdef TARGET_NR_rmdir
8855 case TARGET_NR_rmdir
:
8856 if (!(p
= lock_user_string(arg1
)))
8857 return -TARGET_EFAULT
;
8858 ret
= get_errno(rmdir(p
));
8859 unlock_user(p
, arg1
, 0);
8863 ret
= get_errno(dup(arg1
));
8865 fd_trans_dup(arg1
, ret
);
8868 #ifdef TARGET_NR_pipe
8869 case TARGET_NR_pipe
:
8870 return do_pipe(cpu_env
, arg1
, 0, 0);
8872 #ifdef TARGET_NR_pipe2
8873 case TARGET_NR_pipe2
:
8874 return do_pipe(cpu_env
, arg1
,
8875 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8877 case TARGET_NR_times
:
8879 struct target_tms
*tmsp
;
8881 ret
= get_errno(times(&tms
));
8883 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8885 return -TARGET_EFAULT
;
8886 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8887 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8888 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8889 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8892 ret
= host_to_target_clock_t(ret
);
8895 case TARGET_NR_acct
:
8897 ret
= get_errno(acct(NULL
));
8899 if (!(p
= lock_user_string(arg1
))) {
8900 return -TARGET_EFAULT
;
8902 ret
= get_errno(acct(path(p
)));
8903 unlock_user(p
, arg1
, 0);
8906 #ifdef TARGET_NR_umount2
8907 case TARGET_NR_umount2
:
8908 if (!(p
= lock_user_string(arg1
)))
8909 return -TARGET_EFAULT
;
8910 ret
= get_errno(umount2(p
, arg2
));
8911 unlock_user(p
, arg1
, 0);
8914 case TARGET_NR_ioctl
:
8915 return do_ioctl(arg1
, arg2
, arg3
);
8916 #ifdef TARGET_NR_fcntl
8917 case TARGET_NR_fcntl
:
8918 return do_fcntl(arg1
, arg2
, arg3
);
8920 case TARGET_NR_setpgid
:
8921 return get_errno(setpgid(arg1
, arg2
));
8922 case TARGET_NR_umask
:
8923 return get_errno(umask(arg1
));
8924 case TARGET_NR_chroot
:
8925 if (!(p
= lock_user_string(arg1
)))
8926 return -TARGET_EFAULT
;
8927 ret
= get_errno(chroot(p
));
8928 unlock_user(p
, arg1
, 0);
8930 #ifdef TARGET_NR_dup2
8931 case TARGET_NR_dup2
:
8932 ret
= get_errno(dup2(arg1
, arg2
));
8934 fd_trans_dup(arg1
, arg2
);
8938 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8939 case TARGET_NR_dup3
:
8943 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8946 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8947 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8949 fd_trans_dup(arg1
, arg2
);
8954 #ifdef TARGET_NR_getppid /* not on alpha */
8955 case TARGET_NR_getppid
:
8956 return get_errno(getppid());
8958 #ifdef TARGET_NR_getpgrp
8959 case TARGET_NR_getpgrp
:
8960 return get_errno(getpgrp());
8962 case TARGET_NR_setsid
:
8963 return get_errno(setsid());
8964 #ifdef TARGET_NR_sigaction
8965 case TARGET_NR_sigaction
:
8967 #if defined(TARGET_ALPHA)
8968 struct target_sigaction act
, oact
, *pact
= 0;
8969 struct target_old_sigaction
*old_act
;
8971 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8972 return -TARGET_EFAULT
;
8973 act
._sa_handler
= old_act
->_sa_handler
;
8974 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8975 act
.sa_flags
= old_act
->sa_flags
;
8976 act
.sa_restorer
= 0;
8977 unlock_user_struct(old_act
, arg2
, 0);
8980 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8981 if (!is_error(ret
) && arg3
) {
8982 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8983 return -TARGET_EFAULT
;
8984 old_act
->_sa_handler
= oact
._sa_handler
;
8985 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8986 old_act
->sa_flags
= oact
.sa_flags
;
8987 unlock_user_struct(old_act
, arg3
, 1);
8989 #elif defined(TARGET_MIPS)
8990 struct target_sigaction act
, oact
, *pact
, *old_act
;
8993 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8994 return -TARGET_EFAULT
;
8995 act
._sa_handler
= old_act
->_sa_handler
;
8996 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8997 act
.sa_flags
= old_act
->sa_flags
;
8998 unlock_user_struct(old_act
, arg2
, 0);
9004 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
9006 if (!is_error(ret
) && arg3
) {
9007 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9008 return -TARGET_EFAULT
;
9009 old_act
->_sa_handler
= oact
._sa_handler
;
9010 old_act
->sa_flags
= oact
.sa_flags
;
9011 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
9012 old_act
->sa_mask
.sig
[1] = 0;
9013 old_act
->sa_mask
.sig
[2] = 0;
9014 old_act
->sa_mask
.sig
[3] = 0;
9015 unlock_user_struct(old_act
, arg3
, 1);
9018 struct target_old_sigaction
*old_act
;
9019 struct target_sigaction act
, oact
, *pact
;
9021 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9022 return -TARGET_EFAULT
;
9023 act
._sa_handler
= old_act
->_sa_handler
;
9024 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9025 act
.sa_flags
= old_act
->sa_flags
;
9026 act
.sa_restorer
= old_act
->sa_restorer
;
9027 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9028 act
.ka_restorer
= 0;
9030 unlock_user_struct(old_act
, arg2
, 0);
9035 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
9036 if (!is_error(ret
) && arg3
) {
9037 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9038 return -TARGET_EFAULT
;
9039 old_act
->_sa_handler
= oact
._sa_handler
;
9040 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9041 old_act
->sa_flags
= oact
.sa_flags
;
9042 old_act
->sa_restorer
= oact
.sa_restorer
;
9043 unlock_user_struct(old_act
, arg3
, 1);
9049 case TARGET_NR_rt_sigaction
:
9051 #if defined(TARGET_ALPHA)
9052 /* For Alpha and SPARC this is a 5 argument syscall, with
9053 * a 'restorer' parameter which must be copied into the
9054 * sa_restorer field of the sigaction struct.
9055 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9056 * and arg5 is the sigsetsize.
9057 * Alpha also has a separate rt_sigaction struct that it uses
9058 * here; SPARC uses the usual sigaction struct.
9060 struct target_rt_sigaction
*rt_act
;
9061 struct target_sigaction act
, oact
, *pact
= 0;
9063 if (arg4
!= sizeof(target_sigset_t
)) {
9064 return -TARGET_EINVAL
;
9067 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
9068 return -TARGET_EFAULT
;
9069 act
._sa_handler
= rt_act
->_sa_handler
;
9070 act
.sa_mask
= rt_act
->sa_mask
;
9071 act
.sa_flags
= rt_act
->sa_flags
;
9072 act
.sa_restorer
= arg5
;
9073 unlock_user_struct(rt_act
, arg2
, 0);
9076 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
9077 if (!is_error(ret
) && arg3
) {
9078 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
9079 return -TARGET_EFAULT
;
9080 rt_act
->_sa_handler
= oact
._sa_handler
;
9081 rt_act
->sa_mask
= oact
.sa_mask
;
9082 rt_act
->sa_flags
= oact
.sa_flags
;
9083 unlock_user_struct(rt_act
, arg3
, 1);
9087 target_ulong restorer
= arg4
;
9088 target_ulong sigsetsize
= arg5
;
9090 target_ulong sigsetsize
= arg4
;
9092 struct target_sigaction
*act
;
9093 struct target_sigaction
*oact
;
9095 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9096 return -TARGET_EINVAL
;
9099 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9100 return -TARGET_EFAULT
;
9102 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9103 act
->ka_restorer
= restorer
;
9109 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9110 ret
= -TARGET_EFAULT
;
9111 goto rt_sigaction_fail
;
9115 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
9118 unlock_user_struct(act
, arg2
, 0);
9120 unlock_user_struct(oact
, arg3
, 1);
9124 #ifdef TARGET_NR_sgetmask /* not on alpha */
9125 case TARGET_NR_sgetmask
:
9128 abi_ulong target_set
;
9129 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9131 host_to_target_old_sigset(&target_set
, &cur_set
);
9137 #ifdef TARGET_NR_ssetmask /* not on alpha */
9138 case TARGET_NR_ssetmask
:
9141 abi_ulong target_set
= arg1
;
9142 target_to_host_old_sigset(&set
, &target_set
);
9143 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9145 host_to_target_old_sigset(&target_set
, &oset
);
9151 #ifdef TARGET_NR_sigprocmask
9152 case TARGET_NR_sigprocmask
:
9154 #if defined(TARGET_ALPHA)
9155 sigset_t set
, oldset
;
9160 case TARGET_SIG_BLOCK
:
9163 case TARGET_SIG_UNBLOCK
:
9166 case TARGET_SIG_SETMASK
:
9170 return -TARGET_EINVAL
;
9173 target_to_host_old_sigset(&set
, &mask
);
9175 ret
= do_sigprocmask(how
, &set
, &oldset
);
9176 if (!is_error(ret
)) {
9177 host_to_target_old_sigset(&mask
, &oldset
);
9179 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
9182 sigset_t set
, oldset
, *set_ptr
;
9187 case TARGET_SIG_BLOCK
:
9190 case TARGET_SIG_UNBLOCK
:
9193 case TARGET_SIG_SETMASK
:
9197 return -TARGET_EINVAL
;
9199 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9200 return -TARGET_EFAULT
;
9201 target_to_host_old_sigset(&set
, p
);
9202 unlock_user(p
, arg2
, 0);
9208 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9209 if (!is_error(ret
) && arg3
) {
9210 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9211 return -TARGET_EFAULT
;
9212 host_to_target_old_sigset(p
, &oldset
);
9213 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9219 case TARGET_NR_rt_sigprocmask
:
9222 sigset_t set
, oldset
, *set_ptr
;
9224 if (arg4
!= sizeof(target_sigset_t
)) {
9225 return -TARGET_EINVAL
;
9230 case TARGET_SIG_BLOCK
:
9233 case TARGET_SIG_UNBLOCK
:
9236 case TARGET_SIG_SETMASK
:
9240 return -TARGET_EINVAL
;
9242 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9243 return -TARGET_EFAULT
;
9244 target_to_host_sigset(&set
, p
);
9245 unlock_user(p
, arg2
, 0);
9251 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9252 if (!is_error(ret
) && arg3
) {
9253 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9254 return -TARGET_EFAULT
;
9255 host_to_target_sigset(p
, &oldset
);
9256 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9260 #ifdef TARGET_NR_sigpending
9261 case TARGET_NR_sigpending
:
9264 ret
= get_errno(sigpending(&set
));
9265 if (!is_error(ret
)) {
9266 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9267 return -TARGET_EFAULT
;
9268 host_to_target_old_sigset(p
, &set
);
9269 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9274 case TARGET_NR_rt_sigpending
:
9278 /* Yes, this check is >, not != like most. We follow the kernel's
9279 * logic and it does it like this because it implements
9280 * NR_sigpending through the same code path, and in that case
9281 * the old_sigset_t is smaller in size.
9283 if (arg2
> sizeof(target_sigset_t
)) {
9284 return -TARGET_EINVAL
;
9287 ret
= get_errno(sigpending(&set
));
9288 if (!is_error(ret
)) {
9289 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9290 return -TARGET_EFAULT
;
9291 host_to_target_sigset(p
, &set
);
9292 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9296 #ifdef TARGET_NR_sigsuspend
9297 case TARGET_NR_sigsuspend
:
9299 TaskState
*ts
= cpu
->opaque
;
9300 #if defined(TARGET_ALPHA)
9301 abi_ulong mask
= arg1
;
9302 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
9304 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9305 return -TARGET_EFAULT
;
9306 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
9307 unlock_user(p
, arg1
, 0);
9309 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9311 if (ret
!= -TARGET_ERESTARTSYS
) {
9312 ts
->in_sigsuspend
= 1;
9317 case TARGET_NR_rt_sigsuspend
:
9319 TaskState
*ts
= cpu
->opaque
;
9321 if (arg2
!= sizeof(target_sigset_t
)) {
9322 return -TARGET_EINVAL
;
9324 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9325 return -TARGET_EFAULT
;
9326 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9327 unlock_user(p
, arg1
, 0);
9328 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9330 if (ret
!= -TARGET_ERESTARTSYS
) {
9331 ts
->in_sigsuspend
= 1;
9335 #ifdef TARGET_NR_rt_sigtimedwait
9336 case TARGET_NR_rt_sigtimedwait
:
9339 struct timespec uts
, *puts
;
9342 if (arg4
!= sizeof(target_sigset_t
)) {
9343 return -TARGET_EINVAL
;
9346 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9347 return -TARGET_EFAULT
;
9348 target_to_host_sigset(&set
, p
);
9349 unlock_user(p
, arg1
, 0);
9352 if (target_to_host_timespec(puts
, arg3
)) {
9353 return -TARGET_EFAULT
;
9358 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9360 if (!is_error(ret
)) {
9362 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9365 return -TARGET_EFAULT
;
9367 host_to_target_siginfo(p
, &uinfo
);
9368 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9370 ret
= host_to_target_signal(ret
);
9375 #ifdef TARGET_NR_rt_sigtimedwait_time64
9376 case TARGET_NR_rt_sigtimedwait_time64
:
9379 struct timespec uts
, *puts
;
9382 if (arg4
!= sizeof(target_sigset_t
)) {
9383 return -TARGET_EINVAL
;
9386 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9388 return -TARGET_EFAULT
;
9390 target_to_host_sigset(&set
, p
);
9391 unlock_user(p
, arg1
, 0);
9394 if (target_to_host_timespec64(puts
, arg3
)) {
9395 return -TARGET_EFAULT
;
9400 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9402 if (!is_error(ret
)) {
9404 p
= lock_user(VERIFY_WRITE
, arg2
,
9405 sizeof(target_siginfo_t
), 0);
9407 return -TARGET_EFAULT
;
9409 host_to_target_siginfo(p
, &uinfo
);
9410 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9412 ret
= host_to_target_signal(ret
);
9417 case TARGET_NR_rt_sigqueueinfo
:
9421 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9423 return -TARGET_EFAULT
;
9425 target_to_host_siginfo(&uinfo
, p
);
9426 unlock_user(p
, arg3
, 0);
9427 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9430 case TARGET_NR_rt_tgsigqueueinfo
:
9434 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9436 return -TARGET_EFAULT
;
9438 target_to_host_siginfo(&uinfo
, p
);
9439 unlock_user(p
, arg4
, 0);
9440 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9443 #ifdef TARGET_NR_sigreturn
9444 case TARGET_NR_sigreturn
:
9445 if (block_signals()) {
9446 return -TARGET_ERESTARTSYS
;
9448 return do_sigreturn(cpu_env
);
9450 case TARGET_NR_rt_sigreturn
:
9451 if (block_signals()) {
9452 return -TARGET_ERESTARTSYS
;
9454 return do_rt_sigreturn(cpu_env
);
9455 case TARGET_NR_sethostname
:
9456 if (!(p
= lock_user_string(arg1
)))
9457 return -TARGET_EFAULT
;
9458 ret
= get_errno(sethostname(p
, arg2
));
9459 unlock_user(p
, arg1
, 0);
9461 #ifdef TARGET_NR_setrlimit
9462 case TARGET_NR_setrlimit
:
9464 int resource
= target_to_host_resource(arg1
);
9465 struct target_rlimit
*target_rlim
;
9467 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9468 return -TARGET_EFAULT
;
9469 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9470 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9471 unlock_user_struct(target_rlim
, arg2
, 0);
9473 * If we just passed through resource limit settings for memory then
9474 * they would also apply to QEMU's own allocations, and QEMU will
9475 * crash or hang or die if its allocations fail. Ideally we would
9476 * track the guest allocations in QEMU and apply the limits ourselves.
9477 * For now, just tell the guest the call succeeded but don't actually
9480 if (resource
!= RLIMIT_AS
&&
9481 resource
!= RLIMIT_DATA
&&
9482 resource
!= RLIMIT_STACK
) {
9483 return get_errno(setrlimit(resource
, &rlim
));
9489 #ifdef TARGET_NR_getrlimit
9490 case TARGET_NR_getrlimit
:
9492 int resource
= target_to_host_resource(arg1
);
9493 struct target_rlimit
*target_rlim
;
9496 ret
= get_errno(getrlimit(resource
, &rlim
));
9497 if (!is_error(ret
)) {
9498 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9499 return -TARGET_EFAULT
;
9500 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9501 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9502 unlock_user_struct(target_rlim
, arg2
, 1);
9507 case TARGET_NR_getrusage
:
9509 struct rusage rusage
;
9510 ret
= get_errno(getrusage(arg1
, &rusage
));
9511 if (!is_error(ret
)) {
9512 ret
= host_to_target_rusage(arg2
, &rusage
);
9516 #if defined(TARGET_NR_gettimeofday)
9517 case TARGET_NR_gettimeofday
:
9522 ret
= get_errno(gettimeofday(&tv
, &tz
));
9523 if (!is_error(ret
)) {
9524 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9525 return -TARGET_EFAULT
;
9527 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9528 return -TARGET_EFAULT
;
9534 #if defined(TARGET_NR_settimeofday)
9535 case TARGET_NR_settimeofday
:
9537 struct timeval tv
, *ptv
= NULL
;
9538 struct timezone tz
, *ptz
= NULL
;
9541 if (copy_from_user_timeval(&tv
, arg1
)) {
9542 return -TARGET_EFAULT
;
9548 if (copy_from_user_timezone(&tz
, arg2
)) {
9549 return -TARGET_EFAULT
;
9554 return get_errno(settimeofday(ptv
, ptz
));
9557 #if defined(TARGET_NR_select)
9558 case TARGET_NR_select
:
9559 #if defined(TARGET_WANT_NI_OLD_SELECT)
9560 /* some architectures used to have old_select here
9561 * but now ENOSYS it.
9563 ret
= -TARGET_ENOSYS
;
9564 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9565 ret
= do_old_select(arg1
);
9567 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9571 #ifdef TARGET_NR_pselect6
9572 case TARGET_NR_pselect6
:
9573 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9575 #ifdef TARGET_NR_pselect6_time64
9576 case TARGET_NR_pselect6_time64
:
9577 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9579 #ifdef TARGET_NR_symlink
9580 case TARGET_NR_symlink
:
9583 p
= lock_user_string(arg1
);
9584 p2
= lock_user_string(arg2
);
9586 ret
= -TARGET_EFAULT
;
9588 ret
= get_errno(symlink(p
, p2
));
9589 unlock_user(p2
, arg2
, 0);
9590 unlock_user(p
, arg1
, 0);
9594 #if defined(TARGET_NR_symlinkat)
9595 case TARGET_NR_symlinkat
:
9598 p
= lock_user_string(arg1
);
9599 p2
= lock_user_string(arg3
);
9601 ret
= -TARGET_EFAULT
;
9603 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9604 unlock_user(p2
, arg3
, 0);
9605 unlock_user(p
, arg1
, 0);
9609 #ifdef TARGET_NR_readlink
9610 case TARGET_NR_readlink
:
9613 p
= lock_user_string(arg1
);
9614 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9616 ret
= -TARGET_EFAULT
;
9618 /* Short circuit this for the magic exe check. */
9619 ret
= -TARGET_EINVAL
;
9620 } else if (is_proc_myself((const char *)p
, "exe")) {
9621 char real
[PATH_MAX
], *temp
;
9622 temp
= realpath(exec_path
, real
);
9623 /* Return value is # of bytes that we wrote to the buffer. */
9625 ret
= get_errno(-1);
9627 /* Don't worry about sign mismatch as earlier mapping
9628 * logic would have thrown a bad address error. */
9629 ret
= MIN(strlen(real
), arg3
);
9630 /* We cannot NUL terminate the string. */
9631 memcpy(p2
, real
, ret
);
9634 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9636 unlock_user(p2
, arg2
, ret
);
9637 unlock_user(p
, arg1
, 0);
9641 #if defined(TARGET_NR_readlinkat)
9642 case TARGET_NR_readlinkat
:
9645 p
= lock_user_string(arg2
);
9646 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9648 ret
= -TARGET_EFAULT
;
9649 } else if (is_proc_myself((const char *)p
, "exe")) {
9650 char real
[PATH_MAX
], *temp
;
9651 temp
= realpath(exec_path
, real
);
9652 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9653 snprintf((char *)p2
, arg4
, "%s", real
);
9655 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9657 unlock_user(p2
, arg3
, ret
);
9658 unlock_user(p
, arg2
, 0);
9662 #ifdef TARGET_NR_swapon
9663 case TARGET_NR_swapon
:
9664 if (!(p
= lock_user_string(arg1
)))
9665 return -TARGET_EFAULT
;
9666 ret
= get_errno(swapon(p
, arg2
));
9667 unlock_user(p
, arg1
, 0);
9670 case TARGET_NR_reboot
:
9671 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9672 /* arg4 must be ignored in all other cases */
9673 p
= lock_user_string(arg4
);
9675 return -TARGET_EFAULT
;
9677 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9678 unlock_user(p
, arg4
, 0);
9680 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9683 #ifdef TARGET_NR_mmap
9684 case TARGET_NR_mmap
:
9685 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9686 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9687 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9688 || defined(TARGET_S390X)
9691 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9692 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9693 return -TARGET_EFAULT
;
9700 unlock_user(v
, arg1
, 0);
9701 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9702 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9706 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9707 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9713 #ifdef TARGET_NR_mmap2
9714 case TARGET_NR_mmap2
:
9716 #define MMAP_SHIFT 12
9718 ret
= target_mmap(arg1
, arg2
, arg3
,
9719 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9720 arg5
, arg6
<< MMAP_SHIFT
);
9721 return get_errno(ret
);
9723 case TARGET_NR_munmap
:
9724 return get_errno(target_munmap(arg1
, arg2
));
9725 case TARGET_NR_mprotect
:
9727 TaskState
*ts
= cpu
->opaque
;
9728 /* Special hack to detect libc making the stack executable. */
9729 if ((arg3
& PROT_GROWSDOWN
)
9730 && arg1
>= ts
->info
->stack_limit
9731 && arg1
<= ts
->info
->start_stack
) {
9732 arg3
&= ~PROT_GROWSDOWN
;
9733 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9734 arg1
= ts
->info
->stack_limit
;
9737 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9738 #ifdef TARGET_NR_mremap
9739 case TARGET_NR_mremap
:
9740 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9742 /* ??? msync/mlock/munlock are broken for softmmu. */
9743 #ifdef TARGET_NR_msync
9744 case TARGET_NR_msync
:
9745 return get_errno(msync(g2h(cpu
, arg1
), arg2
, arg3
));
9747 #ifdef TARGET_NR_mlock
9748 case TARGET_NR_mlock
:
9749 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
9751 #ifdef TARGET_NR_munlock
9752 case TARGET_NR_munlock
:
9753 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
9755 #ifdef TARGET_NR_mlockall
9756 case TARGET_NR_mlockall
:
9757 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9759 #ifdef TARGET_NR_munlockall
9760 case TARGET_NR_munlockall
:
9761 return get_errno(munlockall());
9763 #ifdef TARGET_NR_truncate
9764 case TARGET_NR_truncate
:
9765 if (!(p
= lock_user_string(arg1
)))
9766 return -TARGET_EFAULT
;
9767 ret
= get_errno(truncate(p
, arg2
));
9768 unlock_user(p
, arg1
, 0);
9771 #ifdef TARGET_NR_ftruncate
9772 case TARGET_NR_ftruncate
:
9773 return get_errno(ftruncate(arg1
, arg2
));
9775 case TARGET_NR_fchmod
:
9776 return get_errno(fchmod(arg1
, arg2
));
9777 #if defined(TARGET_NR_fchmodat)
9778 case TARGET_NR_fchmodat
:
9779 if (!(p
= lock_user_string(arg2
)))
9780 return -TARGET_EFAULT
;
9781 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9782 unlock_user(p
, arg2
, 0);
9785 case TARGET_NR_getpriority
:
9786 /* Note that negative values are valid for getpriority, so we must
9787 differentiate based on errno settings. */
9789 ret
= getpriority(arg1
, arg2
);
9790 if (ret
== -1 && errno
!= 0) {
9791 return -host_to_target_errno(errno
);
9794 /* Return value is the unbiased priority. Signal no error. */
9795 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9797 /* Return value is a biased priority to avoid negative numbers. */
9801 case TARGET_NR_setpriority
:
9802 return get_errno(setpriority(arg1
, arg2
, arg3
));
9803 #ifdef TARGET_NR_statfs
9804 case TARGET_NR_statfs
:
9805 if (!(p
= lock_user_string(arg1
))) {
9806 return -TARGET_EFAULT
;
9808 ret
= get_errno(statfs(path(p
), &stfs
));
9809 unlock_user(p
, arg1
, 0);
9811 if (!is_error(ret
)) {
9812 struct target_statfs
*target_stfs
;
9814 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9815 return -TARGET_EFAULT
;
9816 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9817 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9818 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9819 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9820 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9821 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9822 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9823 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9824 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9825 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9826 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9827 #ifdef _STATFS_F_FLAGS
9828 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9830 __put_user(0, &target_stfs
->f_flags
);
9832 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9833 unlock_user_struct(target_stfs
, arg2
, 1);
9837 #ifdef TARGET_NR_fstatfs
9838 case TARGET_NR_fstatfs
:
9839 ret
= get_errno(fstatfs(arg1
, &stfs
));
9840 goto convert_statfs
;
9842 #ifdef TARGET_NR_statfs64
9843 case TARGET_NR_statfs64
:
9844 if (!(p
= lock_user_string(arg1
))) {
9845 return -TARGET_EFAULT
;
9847 ret
= get_errno(statfs(path(p
), &stfs
));
9848 unlock_user(p
, arg1
, 0);
9850 if (!is_error(ret
)) {
9851 struct target_statfs64
*target_stfs
;
9853 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9854 return -TARGET_EFAULT
;
9855 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9856 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9857 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9858 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9859 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9860 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9861 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9862 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9863 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9864 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9865 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9866 #ifdef _STATFS_F_FLAGS
9867 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9869 __put_user(0, &target_stfs
->f_flags
);
9871 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9872 unlock_user_struct(target_stfs
, arg3
, 1);
9875 case TARGET_NR_fstatfs64
:
9876 ret
= get_errno(fstatfs(arg1
, &stfs
));
9877 goto convert_statfs64
;
9879 #ifdef TARGET_NR_socketcall
9880 case TARGET_NR_socketcall
:
9881 return do_socketcall(arg1
, arg2
);
9883 #ifdef TARGET_NR_accept
9884 case TARGET_NR_accept
:
9885 return do_accept4(arg1
, arg2
, arg3
, 0);
9887 #ifdef TARGET_NR_accept4
9888 case TARGET_NR_accept4
:
9889 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9891 #ifdef TARGET_NR_bind
9892 case TARGET_NR_bind
:
9893 return do_bind(arg1
, arg2
, arg3
);
9895 #ifdef TARGET_NR_connect
9896 case TARGET_NR_connect
:
9897 return do_connect(arg1
, arg2
, arg3
);
9899 #ifdef TARGET_NR_getpeername
9900 case TARGET_NR_getpeername
:
9901 return do_getpeername(arg1
, arg2
, arg3
);
9903 #ifdef TARGET_NR_getsockname
9904 case TARGET_NR_getsockname
:
9905 return do_getsockname(arg1
, arg2
, arg3
);
9907 #ifdef TARGET_NR_getsockopt
9908 case TARGET_NR_getsockopt
:
9909 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9911 #ifdef TARGET_NR_listen
9912 case TARGET_NR_listen
:
9913 return get_errno(listen(arg1
, arg2
));
9915 #ifdef TARGET_NR_recv
9916 case TARGET_NR_recv
:
9917 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9919 #ifdef TARGET_NR_recvfrom
9920 case TARGET_NR_recvfrom
:
9921 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9923 #ifdef TARGET_NR_recvmsg
9924 case TARGET_NR_recvmsg
:
9925 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9927 #ifdef TARGET_NR_send
9928 case TARGET_NR_send
:
9929 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9931 #ifdef TARGET_NR_sendmsg
9932 case TARGET_NR_sendmsg
:
9933 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9935 #ifdef TARGET_NR_sendmmsg
9936 case TARGET_NR_sendmmsg
:
9937 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9939 #ifdef TARGET_NR_recvmmsg
9940 case TARGET_NR_recvmmsg
:
9941 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9943 #ifdef TARGET_NR_sendto
9944 case TARGET_NR_sendto
:
9945 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9947 #ifdef TARGET_NR_shutdown
9948 case TARGET_NR_shutdown
:
9949 return get_errno(shutdown(arg1
, arg2
));
9951 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9952 case TARGET_NR_getrandom
:
9953 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9955 return -TARGET_EFAULT
;
9957 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9958 unlock_user(p
, arg1
, ret
);
9961 #ifdef TARGET_NR_socket
9962 case TARGET_NR_socket
:
9963 return do_socket(arg1
, arg2
, arg3
);
9965 #ifdef TARGET_NR_socketpair
9966 case TARGET_NR_socketpair
:
9967 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9969 #ifdef TARGET_NR_setsockopt
9970 case TARGET_NR_setsockopt
:
9971 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9973 #if defined(TARGET_NR_syslog)
9974 case TARGET_NR_syslog
:
9979 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9980 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9981 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9982 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9983 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9984 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9985 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9986 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9987 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9988 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9989 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9990 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9993 return -TARGET_EINVAL
;
9998 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10000 return -TARGET_EFAULT
;
10002 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
10003 unlock_user(p
, arg2
, arg3
);
10007 return -TARGET_EINVAL
;
10012 case TARGET_NR_setitimer
:
10014 struct itimerval value
, ovalue
, *pvalue
;
10018 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10019 || copy_from_user_timeval(&pvalue
->it_value
,
10020 arg2
+ sizeof(struct target_timeval
)))
10021 return -TARGET_EFAULT
;
10025 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10026 if (!is_error(ret
) && arg3
) {
10027 if (copy_to_user_timeval(arg3
,
10028 &ovalue
.it_interval
)
10029 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10031 return -TARGET_EFAULT
;
10035 case TARGET_NR_getitimer
:
10037 struct itimerval value
;
10039 ret
= get_errno(getitimer(arg1
, &value
));
10040 if (!is_error(ret
) && arg2
) {
10041 if (copy_to_user_timeval(arg2
,
10042 &value
.it_interval
)
10043 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10045 return -TARGET_EFAULT
;
10049 #ifdef TARGET_NR_stat
10050 case TARGET_NR_stat
:
10051 if (!(p
= lock_user_string(arg1
))) {
10052 return -TARGET_EFAULT
;
10054 ret
= get_errno(stat(path(p
), &st
));
10055 unlock_user(p
, arg1
, 0);
10058 #ifdef TARGET_NR_lstat
10059 case TARGET_NR_lstat
:
10060 if (!(p
= lock_user_string(arg1
))) {
10061 return -TARGET_EFAULT
;
10063 ret
= get_errno(lstat(path(p
), &st
));
10064 unlock_user(p
, arg1
, 0);
10067 #ifdef TARGET_NR_fstat
10068 case TARGET_NR_fstat
:
10070 ret
= get_errno(fstat(arg1
, &st
));
10071 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10074 if (!is_error(ret
)) {
10075 struct target_stat
*target_st
;
10077 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10078 return -TARGET_EFAULT
;
10079 memset(target_st
, 0, sizeof(*target_st
));
10080 __put_user(st
.st_dev
, &target_st
->st_dev
);
10081 __put_user(st
.st_ino
, &target_st
->st_ino
);
10082 __put_user(st
.st_mode
, &target_st
->st_mode
);
10083 __put_user(st
.st_uid
, &target_st
->st_uid
);
10084 __put_user(st
.st_gid
, &target_st
->st_gid
);
10085 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10086 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10087 __put_user(st
.st_size
, &target_st
->st_size
);
10088 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10089 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10090 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10091 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10092 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10093 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10094 defined(TARGET_STAT_HAVE_NSEC)
10095 __put_user(st
.st_atim
.tv_nsec
,
10096 &target_st
->target_st_atime_nsec
);
10097 __put_user(st
.st_mtim
.tv_nsec
,
10098 &target_st
->target_st_mtime_nsec
);
10099 __put_user(st
.st_ctim
.tv_nsec
,
10100 &target_st
->target_st_ctime_nsec
);
10102 unlock_user_struct(target_st
, arg2
, 1);
10107 case TARGET_NR_vhangup
:
10108 return get_errno(vhangup());
10109 #ifdef TARGET_NR_syscall
10110 case TARGET_NR_syscall
:
10111 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10112 arg6
, arg7
, arg8
, 0);
10114 #if defined(TARGET_NR_wait4)
10115 case TARGET_NR_wait4
:
10118 abi_long status_ptr
= arg2
;
10119 struct rusage rusage
, *rusage_ptr
;
10120 abi_ulong target_rusage
= arg4
;
10121 abi_long rusage_err
;
10123 rusage_ptr
= &rusage
;
10126 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10127 if (!is_error(ret
)) {
10128 if (status_ptr
&& ret
) {
10129 status
= host_to_target_waitstatus(status
);
10130 if (put_user_s32(status
, status_ptr
))
10131 return -TARGET_EFAULT
;
10133 if (target_rusage
) {
10134 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10143 #ifdef TARGET_NR_swapoff
10144 case TARGET_NR_swapoff
:
10145 if (!(p
= lock_user_string(arg1
)))
10146 return -TARGET_EFAULT
;
10147 ret
= get_errno(swapoff(p
));
10148 unlock_user(p
, arg1
, 0);
10151 case TARGET_NR_sysinfo
:
10153 struct target_sysinfo
*target_value
;
10154 struct sysinfo value
;
10155 ret
= get_errno(sysinfo(&value
));
10156 if (!is_error(ret
) && arg1
)
10158 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10159 return -TARGET_EFAULT
;
10160 __put_user(value
.uptime
, &target_value
->uptime
);
10161 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10162 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10163 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10164 __put_user(value
.totalram
, &target_value
->totalram
);
10165 __put_user(value
.freeram
, &target_value
->freeram
);
10166 __put_user(value
.sharedram
, &target_value
->sharedram
);
10167 __put_user(value
.bufferram
, &target_value
->bufferram
);
10168 __put_user(value
.totalswap
, &target_value
->totalswap
);
10169 __put_user(value
.freeswap
, &target_value
->freeswap
);
10170 __put_user(value
.procs
, &target_value
->procs
);
10171 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10172 __put_user(value
.freehigh
, &target_value
->freehigh
);
10173 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10174 unlock_user_struct(target_value
, arg1
, 1);
10178 #ifdef TARGET_NR_ipc
10179 case TARGET_NR_ipc
:
10180 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10182 #ifdef TARGET_NR_semget
10183 case TARGET_NR_semget
:
10184 return get_errno(semget(arg1
, arg2
, arg3
));
10186 #ifdef TARGET_NR_semop
10187 case TARGET_NR_semop
:
10188 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10190 #ifdef TARGET_NR_semtimedop
10191 case TARGET_NR_semtimedop
:
10192 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10194 #ifdef TARGET_NR_semtimedop_time64
10195 case TARGET_NR_semtimedop_time64
:
10196 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10198 #ifdef TARGET_NR_semctl
10199 case TARGET_NR_semctl
:
10200 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10202 #ifdef TARGET_NR_msgctl
10203 case TARGET_NR_msgctl
:
10204 return do_msgctl(arg1
, arg2
, arg3
);
10206 #ifdef TARGET_NR_msgget
10207 case TARGET_NR_msgget
:
10208 return get_errno(msgget(arg1
, arg2
));
10210 #ifdef TARGET_NR_msgrcv
10211 case TARGET_NR_msgrcv
:
10212 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10214 #ifdef TARGET_NR_msgsnd
10215 case TARGET_NR_msgsnd
:
10216 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10218 #ifdef TARGET_NR_shmget
10219 case TARGET_NR_shmget
:
10220 return get_errno(shmget(arg1
, arg2
, arg3
));
10222 #ifdef TARGET_NR_shmctl
10223 case TARGET_NR_shmctl
:
10224 return do_shmctl(arg1
, arg2
, arg3
);
10226 #ifdef TARGET_NR_shmat
10227 case TARGET_NR_shmat
:
10228 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10230 #ifdef TARGET_NR_shmdt
10231 case TARGET_NR_shmdt
:
10232 return do_shmdt(arg1
);
10234 case TARGET_NR_fsync
:
10235 return get_errno(fsync(arg1
));
10236 case TARGET_NR_clone
:
10237 /* Linux manages to have three different orderings for its
10238 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10239 * match the kernel's CONFIG_CLONE_* settings.
10240 * Microblaze is further special in that it uses a sixth
10241 * implicit argument to clone for the TLS pointer.
10243 #if defined(TARGET_MICROBLAZE)
10244 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10245 #elif defined(TARGET_CLONE_BACKWARDS)
10246 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10247 #elif defined(TARGET_CLONE_BACKWARDS2)
10248 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10250 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10253 #ifdef __NR_exit_group
10254 /* new thread calls */
10255 case TARGET_NR_exit_group
:
10256 preexit_cleanup(cpu_env
, arg1
);
10257 return get_errno(exit_group(arg1
));
10259 case TARGET_NR_setdomainname
:
10260 if (!(p
= lock_user_string(arg1
)))
10261 return -TARGET_EFAULT
;
10262 ret
= get_errno(setdomainname(p
, arg2
));
10263 unlock_user(p
, arg1
, 0);
10265 case TARGET_NR_uname
:
10266 /* no need to transcode because we use the linux syscall */
10268 struct new_utsname
* buf
;
10270 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10271 return -TARGET_EFAULT
;
10272 ret
= get_errno(sys_uname(buf
));
10273 if (!is_error(ret
)) {
10274 /* Overwrite the native machine name with whatever is being
10276 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10277 sizeof(buf
->machine
));
10278 /* Allow the user to override the reported release. */
10279 if (qemu_uname_release
&& *qemu_uname_release
) {
10280 g_strlcpy(buf
->release
, qemu_uname_release
,
10281 sizeof(buf
->release
));
10284 unlock_user_struct(buf
, arg1
, 1);
10288 case TARGET_NR_modify_ldt
:
10289 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10290 #if !defined(TARGET_X86_64)
10291 case TARGET_NR_vm86
:
10292 return do_vm86(cpu_env
, arg1
, arg2
);
10295 #if defined(TARGET_NR_adjtimex)
10296 case TARGET_NR_adjtimex
:
10298 struct timex host_buf
;
10300 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10301 return -TARGET_EFAULT
;
10303 ret
= get_errno(adjtimex(&host_buf
));
10304 if (!is_error(ret
)) {
10305 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10306 return -TARGET_EFAULT
;
10312 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10313 case TARGET_NR_clock_adjtime
:
10315 struct timex htx
, *phtx
= &htx
;
10317 if (target_to_host_timex(phtx
, arg2
) != 0) {
10318 return -TARGET_EFAULT
;
10320 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10321 if (!is_error(ret
) && phtx
) {
10322 if (host_to_target_timex(arg2
, phtx
) != 0) {
10323 return -TARGET_EFAULT
;
10329 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10330 case TARGET_NR_clock_adjtime64
:
10334 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10335 return -TARGET_EFAULT
;
10337 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10338 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10339 return -TARGET_EFAULT
;
10344 case TARGET_NR_getpgid
:
10345 return get_errno(getpgid(arg1
));
10346 case TARGET_NR_fchdir
:
10347 return get_errno(fchdir(arg1
));
10348 case TARGET_NR_personality
:
10349 return get_errno(personality(arg1
));
10350 #ifdef TARGET_NR__llseek /* Not on alpha */
10351 case TARGET_NR__llseek
:
10354 #if !defined(__NR_llseek)
10355 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10357 ret
= get_errno(res
);
10362 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10364 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10365 return -TARGET_EFAULT
;
10370 #ifdef TARGET_NR_getdents
10371 case TARGET_NR_getdents
:
10372 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10373 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10375 struct target_dirent
*target_dirp
;
10376 struct linux_dirent
*dirp
;
10377 abi_long count
= arg3
;
10379 dirp
= g_try_malloc(count
);
10381 return -TARGET_ENOMEM
;
10384 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10385 if (!is_error(ret
)) {
10386 struct linux_dirent
*de
;
10387 struct target_dirent
*tde
;
10389 int reclen
, treclen
;
10390 int count1
, tnamelen
;
10394 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10395 return -TARGET_EFAULT
;
10398 reclen
= de
->d_reclen
;
10399 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10400 assert(tnamelen
>= 0);
10401 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10402 assert(count1
+ treclen
<= count
);
10403 tde
->d_reclen
= tswap16(treclen
);
10404 tde
->d_ino
= tswapal(de
->d_ino
);
10405 tde
->d_off
= tswapal(de
->d_off
);
10406 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10407 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10409 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10413 unlock_user(target_dirp
, arg2
, ret
);
10419 struct linux_dirent
*dirp
;
10420 abi_long count
= arg3
;
10422 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10423 return -TARGET_EFAULT
;
10424 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10425 if (!is_error(ret
)) {
10426 struct linux_dirent
*de
;
10431 reclen
= de
->d_reclen
;
10434 de
->d_reclen
= tswap16(reclen
);
10435 tswapls(&de
->d_ino
);
10436 tswapls(&de
->d_off
);
10437 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10441 unlock_user(dirp
, arg2
, ret
);
10445 /* Implement getdents in terms of getdents64 */
10447 struct linux_dirent64
*dirp
;
10448 abi_long count
= arg3
;
10450 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10452 return -TARGET_EFAULT
;
10454 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10455 if (!is_error(ret
)) {
10456 /* Convert the dirent64 structs to target dirent. We do this
10457 * in-place, since we can guarantee that a target_dirent is no
10458 * larger than a dirent64; however this means we have to be
10459 * careful to read everything before writing in the new format.
10461 struct linux_dirent64
*de
;
10462 struct target_dirent
*tde
;
10467 tde
= (struct target_dirent
*)dirp
;
10469 int namelen
, treclen
;
10470 int reclen
= de
->d_reclen
;
10471 uint64_t ino
= de
->d_ino
;
10472 int64_t off
= de
->d_off
;
10473 uint8_t type
= de
->d_type
;
10475 namelen
= strlen(de
->d_name
);
10476 treclen
= offsetof(struct target_dirent
, d_name
)
10478 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10480 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10481 tde
->d_ino
= tswapal(ino
);
10482 tde
->d_off
= tswapal(off
);
10483 tde
->d_reclen
= tswap16(treclen
);
10484 /* The target_dirent type is in what was formerly a padding
10485 * byte at the end of the structure:
10487 *(((char *)tde
) + treclen
- 1) = type
;
10489 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10490 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10496 unlock_user(dirp
, arg2
, ret
);
10500 #endif /* TARGET_NR_getdents */
10501 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10502 case TARGET_NR_getdents64
:
10504 struct linux_dirent64
*dirp
;
10505 abi_long count
= arg3
;
10506 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10507 return -TARGET_EFAULT
;
10508 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10509 if (!is_error(ret
)) {
10510 struct linux_dirent64
*de
;
10515 reclen
= de
->d_reclen
;
10518 de
->d_reclen
= tswap16(reclen
);
10519 tswap64s((uint64_t *)&de
->d_ino
);
10520 tswap64s((uint64_t *)&de
->d_off
);
10521 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10525 unlock_user(dirp
, arg2
, ret
);
10528 #endif /* TARGET_NR_getdents64 */
10529 #if defined(TARGET_NR__newselect)
10530 case TARGET_NR__newselect
:
10531 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10533 #ifdef TARGET_NR_poll
10534 case TARGET_NR_poll
:
10535 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10537 #ifdef TARGET_NR_ppoll
10538 case TARGET_NR_ppoll
:
10539 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10541 #ifdef TARGET_NR_ppoll_time64
10542 case TARGET_NR_ppoll_time64
:
10543 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10545 case TARGET_NR_flock
:
10546 /* NOTE: the flock constant seems to be the same for every
10548 return get_errno(safe_flock(arg1
, arg2
));
10549 case TARGET_NR_readv
:
10551 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10553 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10554 unlock_iovec(vec
, arg2
, arg3
, 1);
10556 ret
= -host_to_target_errno(errno
);
10560 case TARGET_NR_writev
:
10562 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10564 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10565 unlock_iovec(vec
, arg2
, arg3
, 0);
10567 ret
= -host_to_target_errno(errno
);
10571 #if defined(TARGET_NR_preadv)
10572 case TARGET_NR_preadv
:
10574 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10576 unsigned long low
, high
;
10578 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10579 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10580 unlock_iovec(vec
, arg2
, arg3
, 1);
10582 ret
= -host_to_target_errno(errno
);
10587 #if defined(TARGET_NR_pwritev)
10588 case TARGET_NR_pwritev
:
10590 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10592 unsigned long low
, high
;
10594 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10595 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10596 unlock_iovec(vec
, arg2
, arg3
, 0);
10598 ret
= -host_to_target_errno(errno
);
10603 case TARGET_NR_getsid
:
10604 return get_errno(getsid(arg1
));
10605 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10606 case TARGET_NR_fdatasync
:
10607 return get_errno(fdatasync(arg1
));
10609 case TARGET_NR_sched_getaffinity
:
10611 unsigned int mask_size
;
10612 unsigned long *mask
;
10615 * sched_getaffinity needs multiples of ulong, so need to take
10616 * care of mismatches between target ulong and host ulong sizes.
10618 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10619 return -TARGET_EINVAL
;
10621 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10623 mask
= alloca(mask_size
);
10624 memset(mask
, 0, mask_size
);
10625 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10627 if (!is_error(ret
)) {
10629 /* More data returned than the caller's buffer will fit.
10630 * This only happens if sizeof(abi_long) < sizeof(long)
10631 * and the caller passed us a buffer holding an odd number
10632 * of abi_longs. If the host kernel is actually using the
10633 * extra 4 bytes then fail EINVAL; otherwise we can just
10634 * ignore them and only copy the interesting part.
10636 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10637 if (numcpus
> arg2
* 8) {
10638 return -TARGET_EINVAL
;
10643 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10644 return -TARGET_EFAULT
;
10649 case TARGET_NR_sched_setaffinity
:
10651 unsigned int mask_size
;
10652 unsigned long *mask
;
10655 * sched_setaffinity needs multiples of ulong, so need to take
10656 * care of mismatches between target ulong and host ulong sizes.
10658 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10659 return -TARGET_EINVAL
;
10661 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10662 mask
= alloca(mask_size
);
10664 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10669 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10671 case TARGET_NR_getcpu
:
10673 unsigned cpu
, node
;
10674 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10675 arg2
? &node
: NULL
,
10677 if (is_error(ret
)) {
10680 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10681 return -TARGET_EFAULT
;
10683 if (arg2
&& put_user_u32(node
, arg2
)) {
10684 return -TARGET_EFAULT
;
10688 case TARGET_NR_sched_setparam
:
10690 struct sched_param
*target_schp
;
10691 struct sched_param schp
;
10694 return -TARGET_EINVAL
;
10696 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10697 return -TARGET_EFAULT
;
10698 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10699 unlock_user_struct(target_schp
, arg2
, 0);
10700 return get_errno(sched_setparam(arg1
, &schp
));
10702 case TARGET_NR_sched_getparam
:
10704 struct sched_param
*target_schp
;
10705 struct sched_param schp
;
10708 return -TARGET_EINVAL
;
10710 ret
= get_errno(sched_getparam(arg1
, &schp
));
10711 if (!is_error(ret
)) {
10712 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10713 return -TARGET_EFAULT
;
10714 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10715 unlock_user_struct(target_schp
, arg2
, 1);
10719 case TARGET_NR_sched_setscheduler
:
10721 struct sched_param
*target_schp
;
10722 struct sched_param schp
;
10724 return -TARGET_EINVAL
;
10726 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10727 return -TARGET_EFAULT
;
10728 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10729 unlock_user_struct(target_schp
, arg3
, 0);
10730 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10732 case TARGET_NR_sched_getscheduler
:
10733 return get_errno(sched_getscheduler(arg1
));
10734 case TARGET_NR_sched_yield
:
10735 return get_errno(sched_yield());
10736 case TARGET_NR_sched_get_priority_max
:
10737 return get_errno(sched_get_priority_max(arg1
));
10738 case TARGET_NR_sched_get_priority_min
:
10739 return get_errno(sched_get_priority_min(arg1
));
10740 #ifdef TARGET_NR_sched_rr_get_interval
10741 case TARGET_NR_sched_rr_get_interval
:
10743 struct timespec ts
;
10744 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10745 if (!is_error(ret
)) {
10746 ret
= host_to_target_timespec(arg2
, &ts
);
10751 #ifdef TARGET_NR_sched_rr_get_interval_time64
10752 case TARGET_NR_sched_rr_get_interval_time64
:
10754 struct timespec ts
;
10755 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10756 if (!is_error(ret
)) {
10757 ret
= host_to_target_timespec64(arg2
, &ts
);
10762 #if defined(TARGET_NR_nanosleep)
10763 case TARGET_NR_nanosleep
:
10765 struct timespec req
, rem
;
10766 target_to_host_timespec(&req
, arg1
);
10767 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10768 if (is_error(ret
) && arg2
) {
10769 host_to_target_timespec(arg2
, &rem
);
10774 case TARGET_NR_prctl
:
10776 case PR_GET_PDEATHSIG
:
10779 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10780 if (!is_error(ret
) && arg2
10781 && put_user_s32(deathsig
, arg2
)) {
10782 return -TARGET_EFAULT
;
10789 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10791 return -TARGET_EFAULT
;
10793 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10794 arg3
, arg4
, arg5
));
10795 unlock_user(name
, arg2
, 16);
10800 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10802 return -TARGET_EFAULT
;
10804 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10805 arg3
, arg4
, arg5
));
10806 unlock_user(name
, arg2
, 0);
10811 case TARGET_PR_GET_FP_MODE
:
10813 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10815 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10816 ret
|= TARGET_PR_FP_MODE_FR
;
10818 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10819 ret
|= TARGET_PR_FP_MODE_FRE
;
10823 case TARGET_PR_SET_FP_MODE
:
10825 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10826 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10827 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10828 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10829 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10831 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10832 TARGET_PR_FP_MODE_FRE
;
10834 /* If nothing to change, return right away, successfully. */
10835 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10838 /* Check the value is valid */
10839 if (arg2
& ~known_bits
) {
10840 return -TARGET_EOPNOTSUPP
;
10842 /* Setting FRE without FR is not supported. */
10843 if (new_fre
&& !new_fr
) {
10844 return -TARGET_EOPNOTSUPP
;
10846 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10847 /* FR1 is not supported */
10848 return -TARGET_EOPNOTSUPP
;
10850 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10851 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10852 /* cannot set FR=0 */
10853 return -TARGET_EOPNOTSUPP
;
10855 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10856 /* Cannot set FRE=1 */
10857 return -TARGET_EOPNOTSUPP
;
10861 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10862 for (i
= 0; i
< 32 ; i
+= 2) {
10863 if (!old_fr
&& new_fr
) {
10864 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10865 } else if (old_fr
&& !new_fr
) {
10866 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10871 env
->CP0_Status
|= (1 << CP0St_FR
);
10872 env
->hflags
|= MIPS_HFLAG_F64
;
10874 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10875 env
->hflags
&= ~MIPS_HFLAG_F64
;
10878 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10879 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10880 env
->hflags
|= MIPS_HFLAG_FRE
;
10883 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10884 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10890 #ifdef TARGET_AARCH64
10891 case TARGET_PR_SVE_SET_VL
:
10893 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10894 * PR_SVE_VL_INHERIT. Note the kernel definition
10895 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10896 * even though the current architectural maximum is VQ=16.
10898 ret
= -TARGET_EINVAL
;
10899 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10900 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10901 CPUARMState
*env
= cpu_env
;
10902 ARMCPU
*cpu
= env_archcpu(env
);
10903 uint32_t vq
, old_vq
;
10905 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10906 vq
= MAX(arg2
/ 16, 1);
10907 vq
= MIN(vq
, cpu
->sve_max_vq
);
10910 aarch64_sve_narrow_vq(env
, vq
);
10912 env
->vfp
.zcr_el
[1] = vq
- 1;
10913 arm_rebuild_hflags(env
);
10917 case TARGET_PR_SVE_GET_VL
:
10918 ret
= -TARGET_EINVAL
;
10920 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10921 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10922 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10926 case TARGET_PR_PAC_RESET_KEYS
:
10928 CPUARMState
*env
= cpu_env
;
10929 ARMCPU
*cpu
= env_archcpu(env
);
10931 if (arg3
|| arg4
|| arg5
) {
10932 return -TARGET_EINVAL
;
10934 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10935 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10936 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10937 TARGET_PR_PAC_APGAKEY
);
10943 } else if (arg2
& ~all
) {
10944 return -TARGET_EINVAL
;
10946 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10947 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10948 sizeof(ARMPACKey
), &err
);
10950 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10951 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10952 sizeof(ARMPACKey
), &err
);
10954 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10955 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10956 sizeof(ARMPACKey
), &err
);
10958 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10959 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10960 sizeof(ARMPACKey
), &err
);
10962 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10963 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10964 sizeof(ARMPACKey
), &err
);
10968 * Some unknown failure in the crypto. The best
10969 * we can do is log it and fail the syscall.
10970 * The real syscall cannot fail this way.
10972 qemu_log_mask(LOG_UNIMP
,
10973 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10974 error_get_pretty(err
));
10976 return -TARGET_EIO
;
10981 return -TARGET_EINVAL
;
10982 #endif /* AARCH64 */
10983 case PR_GET_SECCOMP
:
10984 case PR_SET_SECCOMP
:
10985 /* Disable seccomp to prevent the target disabling syscalls we
10987 return -TARGET_EINVAL
;
10989 /* Most prctl options have no pointer arguments */
10990 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10993 #ifdef TARGET_NR_arch_prctl
10994 case TARGET_NR_arch_prctl
:
10995 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10997 #ifdef TARGET_NR_pread64
10998 case TARGET_NR_pread64
:
10999 if (regpairs_aligned(cpu_env
, num
)) {
11003 if (arg2
== 0 && arg3
== 0) {
11004 /* Special-case NULL buffer and zero length, which should succeed */
11007 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11009 return -TARGET_EFAULT
;
11012 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11013 unlock_user(p
, arg2
, ret
);
11015 case TARGET_NR_pwrite64
:
11016 if (regpairs_aligned(cpu_env
, num
)) {
11020 if (arg2
== 0 && arg3
== 0) {
11021 /* Special-case NULL buffer and zero length, which should succeed */
11024 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
11026 return -TARGET_EFAULT
;
11029 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11030 unlock_user(p
, arg2
, 0);
11033 case TARGET_NR_getcwd
:
11034 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11035 return -TARGET_EFAULT
;
11036 ret
= get_errno(sys_getcwd1(p
, arg2
));
11037 unlock_user(p
, arg1
, ret
);
11039 case TARGET_NR_capget
:
11040 case TARGET_NR_capset
:
11042 struct target_user_cap_header
*target_header
;
11043 struct target_user_cap_data
*target_data
= NULL
;
11044 struct __user_cap_header_struct header
;
11045 struct __user_cap_data_struct data
[2];
11046 struct __user_cap_data_struct
*dataptr
= NULL
;
11047 int i
, target_datalen
;
11048 int data_items
= 1;
11050 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11051 return -TARGET_EFAULT
;
11053 header
.version
= tswap32(target_header
->version
);
11054 header
.pid
= tswap32(target_header
->pid
);
11056 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11057 /* Version 2 and up takes pointer to two user_data structs */
11061 target_datalen
= sizeof(*target_data
) * data_items
;
11064 if (num
== TARGET_NR_capget
) {
11065 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11067 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11069 if (!target_data
) {
11070 unlock_user_struct(target_header
, arg1
, 0);
11071 return -TARGET_EFAULT
;
11074 if (num
== TARGET_NR_capset
) {
11075 for (i
= 0; i
< data_items
; i
++) {
11076 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11077 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11078 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11085 if (num
== TARGET_NR_capget
) {
11086 ret
= get_errno(capget(&header
, dataptr
));
11088 ret
= get_errno(capset(&header
, dataptr
));
11091 /* The kernel always updates version for both capget and capset */
11092 target_header
->version
= tswap32(header
.version
);
11093 unlock_user_struct(target_header
, arg1
, 1);
11096 if (num
== TARGET_NR_capget
) {
11097 for (i
= 0; i
< data_items
; i
++) {
11098 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11099 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11100 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11102 unlock_user(target_data
, arg2
, target_datalen
);
11104 unlock_user(target_data
, arg2
, 0);
11109 case TARGET_NR_sigaltstack
:
11110 return do_sigaltstack(arg1
, arg2
,
11111 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
11113 #ifdef CONFIG_SENDFILE
11114 #ifdef TARGET_NR_sendfile
11115 case TARGET_NR_sendfile
:
11117 off_t
*offp
= NULL
;
11120 ret
= get_user_sal(off
, arg3
);
11121 if (is_error(ret
)) {
11126 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11127 if (!is_error(ret
) && arg3
) {
11128 abi_long ret2
= put_user_sal(off
, arg3
);
11129 if (is_error(ret2
)) {
11136 #ifdef TARGET_NR_sendfile64
11137 case TARGET_NR_sendfile64
:
11139 off_t
*offp
= NULL
;
11142 ret
= get_user_s64(off
, arg3
);
11143 if (is_error(ret
)) {
11148 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11149 if (!is_error(ret
) && arg3
) {
11150 abi_long ret2
= put_user_s64(off
, arg3
);
11151 if (is_error(ret2
)) {
11159 #ifdef TARGET_NR_vfork
11160 case TARGET_NR_vfork
:
11161 return get_errno(do_fork(cpu_env
,
11162 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11165 #ifdef TARGET_NR_ugetrlimit
11166 case TARGET_NR_ugetrlimit
:
11168 struct rlimit rlim
;
11169 int resource
= target_to_host_resource(arg1
);
11170 ret
= get_errno(getrlimit(resource
, &rlim
));
11171 if (!is_error(ret
)) {
11172 struct target_rlimit
*target_rlim
;
11173 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11174 return -TARGET_EFAULT
;
11175 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11176 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11177 unlock_user_struct(target_rlim
, arg2
, 1);
11182 #ifdef TARGET_NR_truncate64
11183 case TARGET_NR_truncate64
:
11184 if (!(p
= lock_user_string(arg1
)))
11185 return -TARGET_EFAULT
;
11186 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11187 unlock_user(p
, arg1
, 0);
11190 #ifdef TARGET_NR_ftruncate64
11191 case TARGET_NR_ftruncate64
:
11192 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11194 #ifdef TARGET_NR_stat64
11195 case TARGET_NR_stat64
:
11196 if (!(p
= lock_user_string(arg1
))) {
11197 return -TARGET_EFAULT
;
11199 ret
= get_errno(stat(path(p
), &st
));
11200 unlock_user(p
, arg1
, 0);
11201 if (!is_error(ret
))
11202 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11205 #ifdef TARGET_NR_lstat64
11206 case TARGET_NR_lstat64
:
11207 if (!(p
= lock_user_string(arg1
))) {
11208 return -TARGET_EFAULT
;
11210 ret
= get_errno(lstat(path(p
), &st
));
11211 unlock_user(p
, arg1
, 0);
11212 if (!is_error(ret
))
11213 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11216 #ifdef TARGET_NR_fstat64
11217 case TARGET_NR_fstat64
:
11218 ret
= get_errno(fstat(arg1
, &st
));
11219 if (!is_error(ret
))
11220 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11223 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11224 #ifdef TARGET_NR_fstatat64
11225 case TARGET_NR_fstatat64
:
11227 #ifdef TARGET_NR_newfstatat
11228 case TARGET_NR_newfstatat
:
11230 if (!(p
= lock_user_string(arg2
))) {
11231 return -TARGET_EFAULT
;
11233 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11234 unlock_user(p
, arg2
, 0);
11235 if (!is_error(ret
))
11236 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11239 #if defined(TARGET_NR_statx)
11240 case TARGET_NR_statx
:
11242 struct target_statx
*target_stx
;
11246 p
= lock_user_string(arg2
);
11248 return -TARGET_EFAULT
;
11250 #if defined(__NR_statx)
11253 * It is assumed that struct statx is architecture independent.
11255 struct target_statx host_stx
;
11258 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11259 if (!is_error(ret
)) {
11260 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11261 unlock_user(p
, arg2
, 0);
11262 return -TARGET_EFAULT
;
11266 if (ret
!= -TARGET_ENOSYS
) {
11267 unlock_user(p
, arg2
, 0);
11272 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11273 unlock_user(p
, arg2
, 0);
11275 if (!is_error(ret
)) {
11276 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11277 return -TARGET_EFAULT
;
11279 memset(target_stx
, 0, sizeof(*target_stx
));
11280 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11281 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11282 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11283 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11284 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11285 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11286 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11287 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11288 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11289 __put_user(st
.st_size
, &target_stx
->stx_size
);
11290 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11291 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11292 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11293 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11294 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11295 unlock_user_struct(target_stx
, arg5
, 1);
11300 #ifdef TARGET_NR_lchown
11301 case TARGET_NR_lchown
:
11302 if (!(p
= lock_user_string(arg1
)))
11303 return -TARGET_EFAULT
;
11304 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11305 unlock_user(p
, arg1
, 0);
11308 #ifdef TARGET_NR_getuid
11309 case TARGET_NR_getuid
:
11310 return get_errno(high2lowuid(getuid()));
11312 #ifdef TARGET_NR_getgid
11313 case TARGET_NR_getgid
:
11314 return get_errno(high2lowgid(getgid()));
11316 #ifdef TARGET_NR_geteuid
11317 case TARGET_NR_geteuid
:
11318 return get_errno(high2lowuid(geteuid()));
11320 #ifdef TARGET_NR_getegid
11321 case TARGET_NR_getegid
:
11322 return get_errno(high2lowgid(getegid()));
11324 case TARGET_NR_setreuid
:
11325 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11326 case TARGET_NR_setregid
:
11327 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11328 case TARGET_NR_getgroups
:
11330 int gidsetsize
= arg1
;
11331 target_id
*target_grouplist
;
11335 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11336 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11337 if (gidsetsize
== 0)
11339 if (!is_error(ret
)) {
11340 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11341 if (!target_grouplist
)
11342 return -TARGET_EFAULT
;
11343 for(i
= 0;i
< ret
; i
++)
11344 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11345 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11349 case TARGET_NR_setgroups
:
11351 int gidsetsize
= arg1
;
11352 target_id
*target_grouplist
;
11353 gid_t
*grouplist
= NULL
;
11356 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11357 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11358 if (!target_grouplist
) {
11359 return -TARGET_EFAULT
;
11361 for (i
= 0; i
< gidsetsize
; i
++) {
11362 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11364 unlock_user(target_grouplist
, arg2
, 0);
11366 return get_errno(setgroups(gidsetsize
, grouplist
));
11368 case TARGET_NR_fchown
:
11369 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11370 #if defined(TARGET_NR_fchownat)
11371 case TARGET_NR_fchownat
:
11372 if (!(p
= lock_user_string(arg2
)))
11373 return -TARGET_EFAULT
;
11374 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11375 low2highgid(arg4
), arg5
));
11376 unlock_user(p
, arg2
, 0);
11379 #ifdef TARGET_NR_setresuid
11380 case TARGET_NR_setresuid
:
11381 return get_errno(sys_setresuid(low2highuid(arg1
),
11383 low2highuid(arg3
)));
11385 #ifdef TARGET_NR_getresuid
11386 case TARGET_NR_getresuid
:
11388 uid_t ruid
, euid
, suid
;
11389 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11390 if (!is_error(ret
)) {
11391 if (put_user_id(high2lowuid(ruid
), arg1
)
11392 || put_user_id(high2lowuid(euid
), arg2
)
11393 || put_user_id(high2lowuid(suid
), arg3
))
11394 return -TARGET_EFAULT
;
11399 #ifdef TARGET_NR_getresgid
11400 case TARGET_NR_setresgid
:
11401 return get_errno(sys_setresgid(low2highgid(arg1
),
11403 low2highgid(arg3
)));
11405 #ifdef TARGET_NR_getresgid
11406 case TARGET_NR_getresgid
:
11408 gid_t rgid
, egid
, sgid
;
11409 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11410 if (!is_error(ret
)) {
11411 if (put_user_id(high2lowgid(rgid
), arg1
)
11412 || put_user_id(high2lowgid(egid
), arg2
)
11413 || put_user_id(high2lowgid(sgid
), arg3
))
11414 return -TARGET_EFAULT
;
11419 #ifdef TARGET_NR_chown
11420 case TARGET_NR_chown
:
11421 if (!(p
= lock_user_string(arg1
)))
11422 return -TARGET_EFAULT
;
11423 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11424 unlock_user(p
, arg1
, 0);
11427 case TARGET_NR_setuid
:
11428 return get_errno(sys_setuid(low2highuid(arg1
)));
11429 case TARGET_NR_setgid
:
11430 return get_errno(sys_setgid(low2highgid(arg1
)));
11431 case TARGET_NR_setfsuid
:
11432 return get_errno(setfsuid(arg1
));
11433 case TARGET_NR_setfsgid
:
11434 return get_errno(setfsgid(arg1
));
11436 #ifdef TARGET_NR_lchown32
11437 case TARGET_NR_lchown32
:
11438 if (!(p
= lock_user_string(arg1
)))
11439 return -TARGET_EFAULT
;
11440 ret
= get_errno(lchown(p
, arg2
, arg3
));
11441 unlock_user(p
, arg1
, 0);
11444 #ifdef TARGET_NR_getuid32
11445 case TARGET_NR_getuid32
:
11446 return get_errno(getuid());
11449 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11450 /* Alpha specific */
11451 case TARGET_NR_getxuid
:
11455 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11457 return get_errno(getuid());
11459 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11460 /* Alpha specific */
11461 case TARGET_NR_getxgid
:
11465 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11467 return get_errno(getgid());
11469 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11470 /* Alpha specific */
11471 case TARGET_NR_osf_getsysinfo
:
11472 ret
= -TARGET_EOPNOTSUPP
;
11474 case TARGET_GSI_IEEE_FP_CONTROL
:
11476 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11477 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11479 swcr
&= ~SWCR_STATUS_MASK
;
11480 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11482 if (put_user_u64 (swcr
, arg2
))
11483 return -TARGET_EFAULT
;
11488 /* case GSI_IEEE_STATE_AT_SIGNAL:
11489 -- Not implemented in linux kernel.
11491 -- Retrieves current unaligned access state; not much used.
11492 case GSI_PROC_TYPE:
11493 -- Retrieves implver information; surely not used.
11494 case GSI_GET_HWRPB:
11495 -- Grabs a copy of the HWRPB; surely not used.
11500 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11501 /* Alpha specific */
11502 case TARGET_NR_osf_setsysinfo
:
11503 ret
= -TARGET_EOPNOTSUPP
;
11505 case TARGET_SSI_IEEE_FP_CONTROL
:
11507 uint64_t swcr
, fpcr
;
11509 if (get_user_u64 (swcr
, arg2
)) {
11510 return -TARGET_EFAULT
;
11514 * The kernel calls swcr_update_status to update the
11515 * status bits from the fpcr at every point that it
11516 * could be queried. Therefore, we store the status
11517 * bits only in FPCR.
11519 ((CPUAlphaState
*)cpu_env
)->swcr
11520 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11522 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11523 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11524 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11525 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11530 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11532 uint64_t exc
, fpcr
, fex
;
11534 if (get_user_u64(exc
, arg2
)) {
11535 return -TARGET_EFAULT
;
11537 exc
&= SWCR_STATUS_MASK
;
11538 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11540 /* Old exceptions are not signaled. */
11541 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11543 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11544 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11546 /* Update the hardware fpcr. */
11547 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11548 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11551 int si_code
= TARGET_FPE_FLTUNK
;
11552 target_siginfo_t info
;
11554 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11555 si_code
= TARGET_FPE_FLTUND
;
11557 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11558 si_code
= TARGET_FPE_FLTRES
;
11560 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11561 si_code
= TARGET_FPE_FLTUND
;
11563 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11564 si_code
= TARGET_FPE_FLTOVF
;
11566 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11567 si_code
= TARGET_FPE_FLTDIV
;
11569 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11570 si_code
= TARGET_FPE_FLTINV
;
11573 info
.si_signo
= SIGFPE
;
11575 info
.si_code
= si_code
;
11576 info
._sifields
._sigfault
._addr
11577 = ((CPUArchState
*)cpu_env
)->pc
;
11578 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11579 QEMU_SI_FAULT
, &info
);
11585 /* case SSI_NVPAIRS:
11586 -- Used with SSIN_UACPROC to enable unaligned accesses.
11587 case SSI_IEEE_STATE_AT_SIGNAL:
11588 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11589 -- Not implemented in linux kernel
11594 #ifdef TARGET_NR_osf_sigprocmask
11595 /* Alpha specific. */
11596 case TARGET_NR_osf_sigprocmask
:
11600 sigset_t set
, oldset
;
11603 case TARGET_SIG_BLOCK
:
11606 case TARGET_SIG_UNBLOCK
:
11609 case TARGET_SIG_SETMASK
:
11613 return -TARGET_EINVAL
;
11616 target_to_host_old_sigset(&set
, &mask
);
11617 ret
= do_sigprocmask(how
, &set
, &oldset
);
11619 host_to_target_old_sigset(&mask
, &oldset
);
11626 #ifdef TARGET_NR_getgid32
11627 case TARGET_NR_getgid32
:
11628 return get_errno(getgid());
11630 #ifdef TARGET_NR_geteuid32
11631 case TARGET_NR_geteuid32
:
11632 return get_errno(geteuid());
11634 #ifdef TARGET_NR_getegid32
11635 case TARGET_NR_getegid32
:
11636 return get_errno(getegid());
11638 #ifdef TARGET_NR_setreuid32
11639 case TARGET_NR_setreuid32
:
11640 return get_errno(setreuid(arg1
, arg2
));
11642 #ifdef TARGET_NR_setregid32
11643 case TARGET_NR_setregid32
:
11644 return get_errno(setregid(arg1
, arg2
));
11646 #ifdef TARGET_NR_getgroups32
11647 case TARGET_NR_getgroups32
:
11649 int gidsetsize
= arg1
;
11650 uint32_t *target_grouplist
;
11654 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11655 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11656 if (gidsetsize
== 0)
11658 if (!is_error(ret
)) {
11659 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11660 if (!target_grouplist
) {
11661 return -TARGET_EFAULT
;
11663 for(i
= 0;i
< ret
; i
++)
11664 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11665 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11670 #ifdef TARGET_NR_setgroups32
11671 case TARGET_NR_setgroups32
:
11673 int gidsetsize
= arg1
;
11674 uint32_t *target_grouplist
;
11678 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11679 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11680 if (!target_grouplist
) {
11681 return -TARGET_EFAULT
;
11683 for(i
= 0;i
< gidsetsize
; i
++)
11684 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11685 unlock_user(target_grouplist
, arg2
, 0);
11686 return get_errno(setgroups(gidsetsize
, grouplist
));
11689 #ifdef TARGET_NR_fchown32
11690 case TARGET_NR_fchown32
:
11691 return get_errno(fchown(arg1
, arg2
, arg3
));
11693 #ifdef TARGET_NR_setresuid32
11694 case TARGET_NR_setresuid32
:
11695 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11697 #ifdef TARGET_NR_getresuid32
11698 case TARGET_NR_getresuid32
:
11700 uid_t ruid
, euid
, suid
;
11701 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11702 if (!is_error(ret
)) {
11703 if (put_user_u32(ruid
, arg1
)
11704 || put_user_u32(euid
, arg2
)
11705 || put_user_u32(suid
, arg3
))
11706 return -TARGET_EFAULT
;
11711 #ifdef TARGET_NR_setresgid32
11712 case TARGET_NR_setresgid32
:
11713 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11715 #ifdef TARGET_NR_getresgid32
11716 case TARGET_NR_getresgid32
:
11718 gid_t rgid
, egid
, sgid
;
11719 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11720 if (!is_error(ret
)) {
11721 if (put_user_u32(rgid
, arg1
)
11722 || put_user_u32(egid
, arg2
)
11723 || put_user_u32(sgid
, arg3
))
11724 return -TARGET_EFAULT
;
11729 #ifdef TARGET_NR_chown32
11730 case TARGET_NR_chown32
:
11731 if (!(p
= lock_user_string(arg1
)))
11732 return -TARGET_EFAULT
;
11733 ret
= get_errno(chown(p
, arg2
, arg3
));
11734 unlock_user(p
, arg1
, 0);
11737 #ifdef TARGET_NR_setuid32
11738 case TARGET_NR_setuid32
:
11739 return get_errno(sys_setuid(arg1
));
11741 #ifdef TARGET_NR_setgid32
11742 case TARGET_NR_setgid32
:
11743 return get_errno(sys_setgid(arg1
));
11745 #ifdef TARGET_NR_setfsuid32
11746 case TARGET_NR_setfsuid32
:
11747 return get_errno(setfsuid(arg1
));
11749 #ifdef TARGET_NR_setfsgid32
11750 case TARGET_NR_setfsgid32
:
11751 return get_errno(setfsgid(arg1
));
11753 #ifdef TARGET_NR_mincore
11754 case TARGET_NR_mincore
:
11756 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11758 return -TARGET_ENOMEM
;
11760 p
= lock_user_string(arg3
);
11762 ret
= -TARGET_EFAULT
;
11764 ret
= get_errno(mincore(a
, arg2
, p
));
11765 unlock_user(p
, arg3
, ret
);
11767 unlock_user(a
, arg1
, 0);
11771 #ifdef TARGET_NR_arm_fadvise64_64
11772 case TARGET_NR_arm_fadvise64_64
:
11773 /* arm_fadvise64_64 looks like fadvise64_64 but
11774 * with different argument order: fd, advice, offset, len
11775 * rather than the usual fd, offset, len, advice.
11776 * Note that offset and len are both 64-bit so appear as
11777 * pairs of 32-bit registers.
11779 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11780 target_offset64(arg5
, arg6
), arg2
);
11781 return -host_to_target_errno(ret
);
11784 #if TARGET_ABI_BITS == 32
11786 #ifdef TARGET_NR_fadvise64_64
11787 case TARGET_NR_fadvise64_64
:
11788 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11789 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11797 /* 6 args: fd, offset (high, low), len (high, low), advice */
11798 if (regpairs_aligned(cpu_env
, num
)) {
11799 /* offset is in (3,4), len in (5,6) and advice in 7 */
11807 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11808 target_offset64(arg4
, arg5
), arg6
);
11809 return -host_to_target_errno(ret
);
11812 #ifdef TARGET_NR_fadvise64
11813 case TARGET_NR_fadvise64
:
11814 /* 5 args: fd, offset (high, low), len, advice */
11815 if (regpairs_aligned(cpu_env
, num
)) {
11816 /* offset is in (3,4), len in 5 and advice in 6 */
11822 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11823 return -host_to_target_errno(ret
);
11826 #else /* not a 32-bit ABI */
11827 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11828 #ifdef TARGET_NR_fadvise64_64
11829 case TARGET_NR_fadvise64_64
:
11831 #ifdef TARGET_NR_fadvise64
11832 case TARGET_NR_fadvise64
:
11834 #ifdef TARGET_S390X
11836 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11837 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11838 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11839 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11843 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11845 #endif /* end of 64-bit ABI fadvise handling */
11847 #ifdef TARGET_NR_madvise
11848 case TARGET_NR_madvise
:
11849 /* A straight passthrough may not be safe because qemu sometimes
11850 turns private file-backed mappings into anonymous mappings.
11851 This will break MADV_DONTNEED.
11852 This is a hint, so ignoring and returning success is ok. */
11855 #ifdef TARGET_NR_fcntl64
11856 case TARGET_NR_fcntl64
:
11860 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11861 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11864 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11865 copyfrom
= copy_from_user_oabi_flock64
;
11866 copyto
= copy_to_user_oabi_flock64
;
11870 cmd
= target_to_host_fcntl_cmd(arg2
);
11871 if (cmd
== -TARGET_EINVAL
) {
11876 case TARGET_F_GETLK64
:
11877 ret
= copyfrom(&fl
, arg3
);
11881 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11883 ret
= copyto(arg3
, &fl
);
11887 case TARGET_F_SETLK64
:
11888 case TARGET_F_SETLKW64
:
11889 ret
= copyfrom(&fl
, arg3
);
11893 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11896 ret
= do_fcntl(arg1
, arg2
, arg3
);
11902 #ifdef TARGET_NR_cacheflush
11903 case TARGET_NR_cacheflush
:
11904 /* self-modifying code is handled automatically, so nothing needed */
11907 #ifdef TARGET_NR_getpagesize
11908 case TARGET_NR_getpagesize
:
11909 return TARGET_PAGE_SIZE
;
11911 case TARGET_NR_gettid
:
11912 return get_errno(sys_gettid());
11913 #ifdef TARGET_NR_readahead
11914 case TARGET_NR_readahead
:
11915 #if TARGET_ABI_BITS == 32
11916 if (regpairs_aligned(cpu_env
, num
)) {
11921 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11923 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11928 #ifdef TARGET_NR_setxattr
11929 case TARGET_NR_listxattr
:
11930 case TARGET_NR_llistxattr
:
11934 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11936 return -TARGET_EFAULT
;
11939 p
= lock_user_string(arg1
);
11941 if (num
== TARGET_NR_listxattr
) {
11942 ret
= get_errno(listxattr(p
, b
, arg3
));
11944 ret
= get_errno(llistxattr(p
, b
, arg3
));
11947 ret
= -TARGET_EFAULT
;
11949 unlock_user(p
, arg1
, 0);
11950 unlock_user(b
, arg2
, arg3
);
11953 case TARGET_NR_flistxattr
:
11957 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11959 return -TARGET_EFAULT
;
11962 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11963 unlock_user(b
, arg2
, arg3
);
11966 case TARGET_NR_setxattr
:
11967 case TARGET_NR_lsetxattr
:
11969 void *p
, *n
, *v
= 0;
11971 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11973 return -TARGET_EFAULT
;
11976 p
= lock_user_string(arg1
);
11977 n
= lock_user_string(arg2
);
11979 if (num
== TARGET_NR_setxattr
) {
11980 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11982 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11985 ret
= -TARGET_EFAULT
;
11987 unlock_user(p
, arg1
, 0);
11988 unlock_user(n
, arg2
, 0);
11989 unlock_user(v
, arg3
, 0);
11992 case TARGET_NR_fsetxattr
:
11996 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11998 return -TARGET_EFAULT
;
12001 n
= lock_user_string(arg2
);
12003 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
12005 ret
= -TARGET_EFAULT
;
12007 unlock_user(n
, arg2
, 0);
12008 unlock_user(v
, arg3
, 0);
12011 case TARGET_NR_getxattr
:
12012 case TARGET_NR_lgetxattr
:
12014 void *p
, *n
, *v
= 0;
12016 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12018 return -TARGET_EFAULT
;
12021 p
= lock_user_string(arg1
);
12022 n
= lock_user_string(arg2
);
12024 if (num
== TARGET_NR_getxattr
) {
12025 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
12027 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
12030 ret
= -TARGET_EFAULT
;
12032 unlock_user(p
, arg1
, 0);
12033 unlock_user(n
, arg2
, 0);
12034 unlock_user(v
, arg3
, arg4
);
12037 case TARGET_NR_fgetxattr
:
12041 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12043 return -TARGET_EFAULT
;
12046 n
= lock_user_string(arg2
);
12048 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12050 ret
= -TARGET_EFAULT
;
12052 unlock_user(n
, arg2
, 0);
12053 unlock_user(v
, arg3
, arg4
);
12056 case TARGET_NR_removexattr
:
12057 case TARGET_NR_lremovexattr
:
12060 p
= lock_user_string(arg1
);
12061 n
= lock_user_string(arg2
);
12063 if (num
== TARGET_NR_removexattr
) {
12064 ret
= get_errno(removexattr(p
, n
));
12066 ret
= get_errno(lremovexattr(p
, n
));
12069 ret
= -TARGET_EFAULT
;
12071 unlock_user(p
, arg1
, 0);
12072 unlock_user(n
, arg2
, 0);
12075 case TARGET_NR_fremovexattr
:
12078 n
= lock_user_string(arg2
);
12080 ret
= get_errno(fremovexattr(arg1
, n
));
12082 ret
= -TARGET_EFAULT
;
12084 unlock_user(n
, arg2
, 0);
12088 #endif /* CONFIG_ATTR */
12089 #ifdef TARGET_NR_set_thread_area
12090 case TARGET_NR_set_thread_area
:
12091 #if defined(TARGET_MIPS)
12092 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
12094 #elif defined(TARGET_CRIS)
12096 ret
= -TARGET_EINVAL
;
12098 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
12102 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12103 return do_set_thread_area(cpu_env
, arg1
);
12104 #elif defined(TARGET_M68K)
12106 TaskState
*ts
= cpu
->opaque
;
12107 ts
->tp_value
= arg1
;
12111 return -TARGET_ENOSYS
;
12114 #ifdef TARGET_NR_get_thread_area
12115 case TARGET_NR_get_thread_area
:
12116 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12117 return do_get_thread_area(cpu_env
, arg1
);
12118 #elif defined(TARGET_M68K)
12120 TaskState
*ts
= cpu
->opaque
;
12121 return ts
->tp_value
;
12124 return -TARGET_ENOSYS
;
12127 #ifdef TARGET_NR_getdomainname
12128 case TARGET_NR_getdomainname
:
12129 return -TARGET_ENOSYS
;
12132 #ifdef TARGET_NR_clock_settime
12133 case TARGET_NR_clock_settime
:
12135 struct timespec ts
;
12137 ret
= target_to_host_timespec(&ts
, arg2
);
12138 if (!is_error(ret
)) {
12139 ret
= get_errno(clock_settime(arg1
, &ts
));
12144 #ifdef TARGET_NR_clock_settime64
12145 case TARGET_NR_clock_settime64
:
12147 struct timespec ts
;
12149 ret
= target_to_host_timespec64(&ts
, arg2
);
12150 if (!is_error(ret
)) {
12151 ret
= get_errno(clock_settime(arg1
, &ts
));
12156 #ifdef TARGET_NR_clock_gettime
12157 case TARGET_NR_clock_gettime
:
12159 struct timespec ts
;
12160 ret
= get_errno(clock_gettime(arg1
, &ts
));
12161 if (!is_error(ret
)) {
12162 ret
= host_to_target_timespec(arg2
, &ts
);
12167 #ifdef TARGET_NR_clock_gettime64
12168 case TARGET_NR_clock_gettime64
:
12170 struct timespec ts
;
12171 ret
= get_errno(clock_gettime(arg1
, &ts
));
12172 if (!is_error(ret
)) {
12173 ret
= host_to_target_timespec64(arg2
, &ts
);
12178 #ifdef TARGET_NR_clock_getres
12179 case TARGET_NR_clock_getres
:
12181 struct timespec ts
;
12182 ret
= get_errno(clock_getres(arg1
, &ts
));
12183 if (!is_error(ret
)) {
12184 host_to_target_timespec(arg2
, &ts
);
12189 #ifdef TARGET_NR_clock_getres_time64
12190 case TARGET_NR_clock_getres_time64
:
12192 struct timespec ts
;
12193 ret
= get_errno(clock_getres(arg1
, &ts
));
12194 if (!is_error(ret
)) {
12195 host_to_target_timespec64(arg2
, &ts
);
12200 #ifdef TARGET_NR_clock_nanosleep
12201 case TARGET_NR_clock_nanosleep
:
12203 struct timespec ts
;
12204 if (target_to_host_timespec(&ts
, arg3
)) {
12205 return -TARGET_EFAULT
;
12207 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12208 &ts
, arg4
? &ts
: NULL
));
12210 * if the call is interrupted by a signal handler, it fails
12211 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12212 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12214 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12215 host_to_target_timespec(arg4
, &ts
)) {
12216 return -TARGET_EFAULT
;
12222 #ifdef TARGET_NR_clock_nanosleep_time64
12223 case TARGET_NR_clock_nanosleep_time64
:
12225 struct timespec ts
;
12227 if (target_to_host_timespec64(&ts
, arg3
)) {
12228 return -TARGET_EFAULT
;
12231 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12232 &ts
, arg4
? &ts
: NULL
));
12234 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12235 host_to_target_timespec64(arg4
, &ts
)) {
12236 return -TARGET_EFAULT
;
12242 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12243 case TARGET_NR_set_tid_address
:
12244 return get_errno(set_tid_address((int *)g2h(cpu
, arg1
)));
12247 case TARGET_NR_tkill
:
12248 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12250 case TARGET_NR_tgkill
:
12251 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12252 target_to_host_signal(arg3
)));
12254 #ifdef TARGET_NR_set_robust_list
12255 case TARGET_NR_set_robust_list
:
12256 case TARGET_NR_get_robust_list
:
12257 /* The ABI for supporting robust futexes has userspace pass
12258 * the kernel a pointer to a linked list which is updated by
12259 * userspace after the syscall; the list is walked by the kernel
12260 * when the thread exits. Since the linked list in QEMU guest
12261 * memory isn't a valid linked list for the host and we have
12262 * no way to reliably intercept the thread-death event, we can't
12263 * support these. Silently return ENOSYS so that guest userspace
12264 * falls back to a non-robust futex implementation (which should
12265 * be OK except in the corner case of the guest crashing while
12266 * holding a mutex that is shared with another process via
12269 return -TARGET_ENOSYS
;
12272 #if defined(TARGET_NR_utimensat)
12273 case TARGET_NR_utimensat
:
12275 struct timespec
*tsp
, ts
[2];
12279 if (target_to_host_timespec(ts
, arg3
)) {
12280 return -TARGET_EFAULT
;
12282 if (target_to_host_timespec(ts
+ 1, arg3
+
12283 sizeof(struct target_timespec
))) {
12284 return -TARGET_EFAULT
;
12289 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12291 if (!(p
= lock_user_string(arg2
))) {
12292 return -TARGET_EFAULT
;
12294 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12295 unlock_user(p
, arg2
, 0);
12300 #ifdef TARGET_NR_utimensat_time64
12301 case TARGET_NR_utimensat_time64
:
12303 struct timespec
*tsp
, ts
[2];
12307 if (target_to_host_timespec64(ts
, arg3
)) {
12308 return -TARGET_EFAULT
;
12310 if (target_to_host_timespec64(ts
+ 1, arg3
+
12311 sizeof(struct target__kernel_timespec
))) {
12312 return -TARGET_EFAULT
;
12317 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12319 p
= lock_user_string(arg2
);
12321 return -TARGET_EFAULT
;
12323 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12324 unlock_user(p
, arg2
, 0);
12329 #ifdef TARGET_NR_futex
12330 case TARGET_NR_futex
:
12331 return do_futex(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12333 #ifdef TARGET_NR_futex_time64
12334 case TARGET_NR_futex_time64
:
12335 return do_futex_time64(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12337 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12338 case TARGET_NR_inotify_init
:
12339 ret
= get_errno(sys_inotify_init());
12341 fd_trans_register(ret
, &target_inotify_trans
);
12345 #ifdef CONFIG_INOTIFY1
12346 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12347 case TARGET_NR_inotify_init1
:
12348 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
12349 fcntl_flags_tbl
)));
12351 fd_trans_register(ret
, &target_inotify_trans
);
12356 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12357 case TARGET_NR_inotify_add_watch
:
12358 p
= lock_user_string(arg2
);
12359 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12360 unlock_user(p
, arg2
, 0);
12363 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12364 case TARGET_NR_inotify_rm_watch
:
12365 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12368 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12369 case TARGET_NR_mq_open
:
12371 struct mq_attr posix_mq_attr
;
12372 struct mq_attr
*pposix_mq_attr
;
12375 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12376 pposix_mq_attr
= NULL
;
12378 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12379 return -TARGET_EFAULT
;
12381 pposix_mq_attr
= &posix_mq_attr
;
12383 p
= lock_user_string(arg1
- 1);
12385 return -TARGET_EFAULT
;
12387 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12388 unlock_user (p
, arg1
, 0);
12392 case TARGET_NR_mq_unlink
:
12393 p
= lock_user_string(arg1
- 1);
12395 return -TARGET_EFAULT
;
12397 ret
= get_errno(mq_unlink(p
));
12398 unlock_user (p
, arg1
, 0);
12401 #ifdef TARGET_NR_mq_timedsend
12402 case TARGET_NR_mq_timedsend
:
12404 struct timespec ts
;
12406 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12408 if (target_to_host_timespec(&ts
, arg5
)) {
12409 return -TARGET_EFAULT
;
12411 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12412 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12413 return -TARGET_EFAULT
;
12416 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12418 unlock_user (p
, arg2
, arg3
);
12422 #ifdef TARGET_NR_mq_timedsend_time64
12423 case TARGET_NR_mq_timedsend_time64
:
12425 struct timespec ts
;
12427 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12429 if (target_to_host_timespec64(&ts
, arg5
)) {
12430 return -TARGET_EFAULT
;
12432 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12433 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12434 return -TARGET_EFAULT
;
12437 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12439 unlock_user(p
, arg2
, arg3
);
12444 #ifdef TARGET_NR_mq_timedreceive
12445 case TARGET_NR_mq_timedreceive
:
12447 struct timespec ts
;
12450 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12452 if (target_to_host_timespec(&ts
, arg5
)) {
12453 return -TARGET_EFAULT
;
12455 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12457 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12458 return -TARGET_EFAULT
;
12461 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12464 unlock_user (p
, arg2
, arg3
);
12466 put_user_u32(prio
, arg4
);
12470 #ifdef TARGET_NR_mq_timedreceive_time64
12471 case TARGET_NR_mq_timedreceive_time64
:
12473 struct timespec ts
;
12476 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12478 if (target_to_host_timespec64(&ts
, arg5
)) {
12479 return -TARGET_EFAULT
;
12481 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12483 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12484 return -TARGET_EFAULT
;
12487 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12490 unlock_user(p
, arg2
, arg3
);
12492 put_user_u32(prio
, arg4
);
12498 /* Not implemented for now... */
12499 /* case TARGET_NR_mq_notify: */
12502 case TARGET_NR_mq_getsetattr
:
12504 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12507 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12508 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12509 &posix_mq_attr_out
));
12510 } else if (arg3
!= 0) {
12511 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12513 if (ret
== 0 && arg3
!= 0) {
12514 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12520 #ifdef CONFIG_SPLICE
12521 #ifdef TARGET_NR_tee
12522 case TARGET_NR_tee
:
12524 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12528 #ifdef TARGET_NR_splice
12529 case TARGET_NR_splice
:
12531 loff_t loff_in
, loff_out
;
12532 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12534 if (get_user_u64(loff_in
, arg2
)) {
12535 return -TARGET_EFAULT
;
12537 ploff_in
= &loff_in
;
12540 if (get_user_u64(loff_out
, arg4
)) {
12541 return -TARGET_EFAULT
;
12543 ploff_out
= &loff_out
;
12545 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12547 if (put_user_u64(loff_in
, arg2
)) {
12548 return -TARGET_EFAULT
;
12552 if (put_user_u64(loff_out
, arg4
)) {
12553 return -TARGET_EFAULT
;
12559 #ifdef TARGET_NR_vmsplice
12560 case TARGET_NR_vmsplice
:
12562 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12564 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12565 unlock_iovec(vec
, arg2
, arg3
, 0);
12567 ret
= -host_to_target_errno(errno
);
12572 #endif /* CONFIG_SPLICE */
12573 #ifdef CONFIG_EVENTFD
12574 #if defined(TARGET_NR_eventfd)
12575 case TARGET_NR_eventfd
:
12576 ret
= get_errno(eventfd(arg1
, 0));
12578 fd_trans_register(ret
, &target_eventfd_trans
);
12582 #if defined(TARGET_NR_eventfd2)
12583 case TARGET_NR_eventfd2
:
12585 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
12586 if (arg2
& TARGET_O_NONBLOCK
) {
12587 host_flags
|= O_NONBLOCK
;
12589 if (arg2
& TARGET_O_CLOEXEC
) {
12590 host_flags
|= O_CLOEXEC
;
12592 ret
= get_errno(eventfd(arg1
, host_flags
));
12594 fd_trans_register(ret
, &target_eventfd_trans
);
12599 #endif /* CONFIG_EVENTFD */
12600 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12601 case TARGET_NR_fallocate
:
12602 #if TARGET_ABI_BITS == 32
12603 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12604 target_offset64(arg5
, arg6
)));
12606 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12610 #if defined(CONFIG_SYNC_FILE_RANGE)
12611 #if defined(TARGET_NR_sync_file_range)
12612 case TARGET_NR_sync_file_range
:
12613 #if TARGET_ABI_BITS == 32
12614 #if defined(TARGET_MIPS)
12615 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12616 target_offset64(arg5
, arg6
), arg7
));
12618 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12619 target_offset64(arg4
, arg5
), arg6
));
12620 #endif /* !TARGET_MIPS */
12622 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12626 #if defined(TARGET_NR_sync_file_range2) || \
12627 defined(TARGET_NR_arm_sync_file_range)
12628 #if defined(TARGET_NR_sync_file_range2)
12629 case TARGET_NR_sync_file_range2
:
12631 #if defined(TARGET_NR_arm_sync_file_range)
12632 case TARGET_NR_arm_sync_file_range
:
12634 /* This is like sync_file_range but the arguments are reordered */
12635 #if TARGET_ABI_BITS == 32
12636 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12637 target_offset64(arg5
, arg6
), arg2
));
12639 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12644 #if defined(TARGET_NR_signalfd4)
12645 case TARGET_NR_signalfd4
:
12646 return do_signalfd4(arg1
, arg2
, arg4
);
12648 #if defined(TARGET_NR_signalfd)
12649 case TARGET_NR_signalfd
:
12650 return do_signalfd4(arg1
, arg2
, 0);
12652 #if defined(CONFIG_EPOLL)
12653 #if defined(TARGET_NR_epoll_create)
12654 case TARGET_NR_epoll_create
:
12655 return get_errno(epoll_create(arg1
));
12657 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12658 case TARGET_NR_epoll_create1
:
12659 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12661 #if defined(TARGET_NR_epoll_ctl)
12662 case TARGET_NR_epoll_ctl
:
12664 struct epoll_event ep
;
12665 struct epoll_event
*epp
= 0;
12667 if (arg2
!= EPOLL_CTL_DEL
) {
12668 struct target_epoll_event
*target_ep
;
12669 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12670 return -TARGET_EFAULT
;
12672 ep
.events
= tswap32(target_ep
->events
);
12674 * The epoll_data_t union is just opaque data to the kernel,
12675 * so we transfer all 64 bits across and need not worry what
12676 * actual data type it is.
12678 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12679 unlock_user_struct(target_ep
, arg4
, 0);
12682 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12683 * non-null pointer, even though this argument is ignored.
12688 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12692 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12693 #if defined(TARGET_NR_epoll_wait)
12694 case TARGET_NR_epoll_wait
:
12696 #if defined(TARGET_NR_epoll_pwait)
12697 case TARGET_NR_epoll_pwait
:
12700 struct target_epoll_event
*target_ep
;
12701 struct epoll_event
*ep
;
12703 int maxevents
= arg3
;
12704 int timeout
= arg4
;
12706 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12707 return -TARGET_EINVAL
;
12710 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12711 maxevents
* sizeof(struct target_epoll_event
), 1);
12713 return -TARGET_EFAULT
;
12716 ep
= g_try_new(struct epoll_event
, maxevents
);
12718 unlock_user(target_ep
, arg2
, 0);
12719 return -TARGET_ENOMEM
;
12723 #if defined(TARGET_NR_epoll_pwait)
12724 case TARGET_NR_epoll_pwait
:
12726 target_sigset_t
*target_set
;
12727 sigset_t _set
, *set
= &_set
;
12730 if (arg6
!= sizeof(target_sigset_t
)) {
12731 ret
= -TARGET_EINVAL
;
12735 target_set
= lock_user(VERIFY_READ
, arg5
,
12736 sizeof(target_sigset_t
), 1);
12738 ret
= -TARGET_EFAULT
;
12741 target_to_host_sigset(set
, target_set
);
12742 unlock_user(target_set
, arg5
, 0);
12747 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12748 set
, SIGSET_T_SIZE
));
12752 #if defined(TARGET_NR_epoll_wait)
12753 case TARGET_NR_epoll_wait
:
12754 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12759 ret
= -TARGET_ENOSYS
;
12761 if (!is_error(ret
)) {
12763 for (i
= 0; i
< ret
; i
++) {
12764 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12765 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12767 unlock_user(target_ep
, arg2
,
12768 ret
* sizeof(struct target_epoll_event
));
12770 unlock_user(target_ep
, arg2
, 0);
12777 #ifdef TARGET_NR_prlimit64
12778 case TARGET_NR_prlimit64
:
12780 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12781 struct target_rlimit64
*target_rnew
, *target_rold
;
12782 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12783 int resource
= target_to_host_resource(arg2
);
12785 if (arg3
&& (resource
!= RLIMIT_AS
&&
12786 resource
!= RLIMIT_DATA
&&
12787 resource
!= RLIMIT_STACK
)) {
12788 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12789 return -TARGET_EFAULT
;
12791 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12792 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12793 unlock_user_struct(target_rnew
, arg3
, 0);
12797 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12798 if (!is_error(ret
) && arg4
) {
12799 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12800 return -TARGET_EFAULT
;
12802 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12803 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12804 unlock_user_struct(target_rold
, arg4
, 1);
12809 #ifdef TARGET_NR_gethostname
12810 case TARGET_NR_gethostname
:
12812 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12814 ret
= get_errno(gethostname(name
, arg2
));
12815 unlock_user(name
, arg1
, arg2
);
12817 ret
= -TARGET_EFAULT
;
12822 #ifdef TARGET_NR_atomic_cmpxchg_32
12823 case TARGET_NR_atomic_cmpxchg_32
:
12825 /* should use start_exclusive from main.c */
12826 abi_ulong mem_value
;
12827 if (get_user_u32(mem_value
, arg6
)) {
12828 target_siginfo_t info
;
12829 info
.si_signo
= SIGSEGV
;
12831 info
.si_code
= TARGET_SEGV_MAPERR
;
12832 info
._sifields
._sigfault
._addr
= arg6
;
12833 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12834 QEMU_SI_FAULT
, &info
);
12838 if (mem_value
== arg2
)
12839 put_user_u32(arg1
, arg6
);
12843 #ifdef TARGET_NR_atomic_barrier
12844 case TARGET_NR_atomic_barrier
:
12845 /* Like the kernel implementation and the
12846 qemu arm barrier, no-op this? */
12850 #ifdef TARGET_NR_timer_create
12851 case TARGET_NR_timer_create
:
12853 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12855 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12858 int timer_index
= next_free_host_timer();
12860 if (timer_index
< 0) {
12861 ret
= -TARGET_EAGAIN
;
12863 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12866 phost_sevp
= &host_sevp
;
12867 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12873 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12877 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12878 return -TARGET_EFAULT
;
12886 #ifdef TARGET_NR_timer_settime
12887 case TARGET_NR_timer_settime
:
12889 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12890 * struct itimerspec * old_value */
12891 target_timer_t timerid
= get_timer_id(arg1
);
12895 } else if (arg3
== 0) {
12896 ret
= -TARGET_EINVAL
;
12898 timer_t htimer
= g_posix_timers
[timerid
];
12899 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12901 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12902 return -TARGET_EFAULT
;
12905 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12906 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12907 return -TARGET_EFAULT
;
12914 #ifdef TARGET_NR_timer_settime64
12915 case TARGET_NR_timer_settime64
:
12917 target_timer_t timerid
= get_timer_id(arg1
);
12921 } else if (arg3
== 0) {
12922 ret
= -TARGET_EINVAL
;
12924 timer_t htimer
= g_posix_timers
[timerid
];
12925 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12927 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12928 return -TARGET_EFAULT
;
12931 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12932 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12933 return -TARGET_EFAULT
;
12940 #ifdef TARGET_NR_timer_gettime
12941 case TARGET_NR_timer_gettime
:
12943 /* args: timer_t timerid, struct itimerspec *curr_value */
12944 target_timer_t timerid
= get_timer_id(arg1
);
12948 } else if (!arg2
) {
12949 ret
= -TARGET_EFAULT
;
12951 timer_t htimer
= g_posix_timers
[timerid
];
12952 struct itimerspec hspec
;
12953 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12955 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12956 ret
= -TARGET_EFAULT
;
12963 #ifdef TARGET_NR_timer_gettime64
12964 case TARGET_NR_timer_gettime64
:
12966 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12967 target_timer_t timerid
= get_timer_id(arg1
);
12971 } else if (!arg2
) {
12972 ret
= -TARGET_EFAULT
;
12974 timer_t htimer
= g_posix_timers
[timerid
];
12975 struct itimerspec hspec
;
12976 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12978 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
12979 ret
= -TARGET_EFAULT
;
12986 #ifdef TARGET_NR_timer_getoverrun
12987 case TARGET_NR_timer_getoverrun
:
12989 /* args: timer_t timerid */
12990 target_timer_t timerid
= get_timer_id(arg1
);
12995 timer_t htimer
= g_posix_timers
[timerid
];
12996 ret
= get_errno(timer_getoverrun(htimer
));
13002 #ifdef TARGET_NR_timer_delete
13003 case TARGET_NR_timer_delete
:
13005 /* args: timer_t timerid */
13006 target_timer_t timerid
= get_timer_id(arg1
);
13011 timer_t htimer
= g_posix_timers
[timerid
];
13012 ret
= get_errno(timer_delete(htimer
));
13013 g_posix_timers
[timerid
] = 0;
13019 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13020 case TARGET_NR_timerfd_create
:
13021 return get_errno(timerfd_create(arg1
,
13022 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
13025 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13026 case TARGET_NR_timerfd_gettime
:
13028 struct itimerspec its_curr
;
13030 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13032 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
13033 return -TARGET_EFAULT
;
13039 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13040 case TARGET_NR_timerfd_gettime64
:
13042 struct itimerspec its_curr
;
13044 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13046 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
13047 return -TARGET_EFAULT
;
13053 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13054 case TARGET_NR_timerfd_settime
:
13056 struct itimerspec its_new
, its_old
, *p_new
;
13059 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13060 return -TARGET_EFAULT
;
13067 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13069 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13070 return -TARGET_EFAULT
;
13076 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13077 case TARGET_NR_timerfd_settime64
:
13079 struct itimerspec its_new
, its_old
, *p_new
;
13082 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13083 return -TARGET_EFAULT
;
13090 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13092 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13093 return -TARGET_EFAULT
;
13099 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13100 case TARGET_NR_ioprio_get
:
13101 return get_errno(ioprio_get(arg1
, arg2
));
13104 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13105 case TARGET_NR_ioprio_set
:
13106 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13109 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13110 case TARGET_NR_setns
:
13111 return get_errno(setns(arg1
, arg2
));
13113 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13114 case TARGET_NR_unshare
:
13115 return get_errno(unshare(arg1
));
13117 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13118 case TARGET_NR_kcmp
:
13119 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13121 #ifdef TARGET_NR_swapcontext
13122 case TARGET_NR_swapcontext
:
13123 /* PowerPC specific. */
13124 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13126 #ifdef TARGET_NR_memfd_create
13127 case TARGET_NR_memfd_create
:
13128 p
= lock_user_string(arg1
);
13130 return -TARGET_EFAULT
;
13132 ret
= get_errno(memfd_create(p
, arg2
));
13133 fd_trans_unregister(ret
);
13134 unlock_user(p
, arg1
, 0);
13137 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13138 case TARGET_NR_membarrier
:
13139 return get_errno(membarrier(arg1
, arg2
));
13142 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13143 case TARGET_NR_copy_file_range
:
13145 loff_t inoff
, outoff
;
13146 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13149 if (get_user_u64(inoff
, arg2
)) {
13150 return -TARGET_EFAULT
;
13155 if (get_user_u64(outoff
, arg4
)) {
13156 return -TARGET_EFAULT
;
13160 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13162 if (!is_error(ret
) && ret
> 0) {
13164 if (put_user_u64(inoff
, arg2
)) {
13165 return -TARGET_EFAULT
;
13169 if (put_user_u64(outoff
, arg4
)) {
13170 return -TARGET_EFAULT
;
13179 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13180 return -TARGET_ENOSYS
;
13185 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
13186 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13187 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13190 CPUState
*cpu
= env_cpu(cpu_env
);
13193 #ifdef DEBUG_ERESTARTSYS
13194 /* Debug-only code for exercising the syscall-restart code paths
13195 * in the per-architecture cpu main loops: restart every syscall
13196 * the guest makes once before letting it through.
13202 return -TARGET_ERESTARTSYS
;
13207 record_syscall_start(cpu
, num
, arg1
,
13208 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13210 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13211 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13214 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13215 arg5
, arg6
, arg7
, arg8
);
13217 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13218 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13219 arg3
, arg4
, arg5
, arg6
);
13222 record_syscall_return(cpu
, num
, ret
);