4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
66 #include <sys/timerfd.h>
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu/xattr.h"
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
80 #ifdef HAVE_SYS_KCOV_H
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
97 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
120 #include <linux/btrfs.h>
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
126 #include "linux_loop.h"
130 #include "qemu/guest-random.h"
131 #include "qemu/selfmap.h"
132 #include "user/syscall-trace.h"
133 #include "qapi/error.h"
134 #include "fd-trans.h"
138 #define CLONE_IO 0x80000000 /* Clone io context */
141 /* We can't directly call the host clone syscall, because this will
142 * badly confuse libc (breaking mutexes, for example). So we must
143 * divide clone flags into:
144 * * flag combinations that look like pthread_create()
145 * * flag combinations that look like fork()
146 * * flags we can implement within QEMU itself
147 * * flags we can't support and will return an error for
149 /* For thread creation, all these flags must be present; for
150 * fork, none must be present.
152 #define CLONE_THREAD_FLAGS \
153 (CLONE_VM | CLONE_FS | CLONE_FILES | \
154 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
156 /* These flags are ignored:
157 * CLONE_DETACHED is now ignored by the kernel;
158 * CLONE_IO is just an optimisation hint to the I/O scheduler
160 #define CLONE_IGNORED_FLAGS \
161 (CLONE_DETACHED | CLONE_IO)
163 /* Flags for fork which we can implement within QEMU itself */
164 #define CLONE_OPTIONAL_FORK_FLAGS \
165 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
166 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
168 /* Flags for thread creation which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_THREAD_FLAGS \
170 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
171 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
173 #define CLONE_INVALID_FORK_FLAGS \
174 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
176 #define CLONE_INVALID_THREAD_FLAGS \
177 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
178 CLONE_IGNORED_FLAGS))
180 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181 * have almost all been allocated. We cannot support any of
182 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184 * The checks against the invalid thread masks above will catch these.
185 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
188 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189 * once. This exercises the codepaths for restart.
191 //#define DEBUG_ERESTARTSYS
193 //#include <linux/msdos_fs.h>
194 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
195 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
205 #define _syscall0(type,name) \
206 static type name (void) \
208 return syscall(__NR_##name); \
211 #define _syscall1(type,name,type1,arg1) \
212 static type name (type1 arg1) \
214 return syscall(__NR_##name, arg1); \
217 #define _syscall2(type,name,type1,arg1,type2,arg2) \
218 static type name (type1 arg1,type2 arg2) \
220 return syscall(__NR_##name, arg1, arg2); \
223 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
224 static type name (type1 arg1,type2 arg2,type3 arg3) \
226 return syscall(__NR_##name, arg1, arg2, arg3); \
229 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
232 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
235 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
239 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
243 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
244 type5,arg5,type6,arg6) \
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
252 #define __NR_sys_uname __NR_uname
253 #define __NR_sys_getcwd1 __NR_getcwd
254 #define __NR_sys_getdents __NR_getdents
255 #define __NR_sys_getdents64 __NR_getdents64
256 #define __NR_sys_getpriority __NR_getpriority
257 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259 #define __NR_sys_syslog __NR_syslog
260 #if defined(__NR_futex)
261 # define __NR_sys_futex __NR_futex
263 #if defined(__NR_futex_time64)
264 # define __NR_sys_futex_time64 __NR_futex_time64
266 #define __NR_sys_inotify_init __NR_inotify_init
267 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269 #define __NR_sys_statx __NR_statx
271 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272 #define __NR__llseek __NR_lseek
275 /* Newer kernel ports have llseek() instead of _llseek() */
276 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277 #define TARGET_NR__llseek TARGET_NR_llseek
280 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
281 #ifndef TARGET_O_NONBLOCK_MASK
282 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
285 #define __NR_sys_gettid __NR_gettid
286 _syscall0(int, sys_gettid
)
288 /* For the 64-bit guest on 32-bit host case we must emulate
289 * getdents using getdents64, because otherwise the host
290 * might hand us back more dirent records than we can fit
291 * into the guest buffer after structure format conversion.
292 * Otherwise we emulate getdents with getdents if the host has it.
294 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
295 #define EMULATE_GETDENTS_WITH_GETDENTS
298 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
299 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
301 #if (defined(TARGET_NR_getdents) && \
302 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
303 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
304 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
306 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
307 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
308 loff_t
*, res
, uint
, wh
);
310 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
311 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
313 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
314 #ifdef __NR_exit_group
315 _syscall1(int,exit_group
,int,error_code
)
317 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
318 _syscall1(int,set_tid_address
,int *,tidptr
)
320 #if defined(__NR_futex)
321 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
322 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
324 #if defined(__NR_futex_time64)
325 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
326 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
328 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
329 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
330 unsigned long *, user_mask_ptr
);
331 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
332 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
333 unsigned long *, user_mask_ptr
);
334 #define __NR_sys_getcpu __NR_getcpu
335 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
336 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
338 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
339 struct __user_cap_data_struct
*, data
);
340 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
341 struct __user_cap_data_struct
*, data
);
342 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
343 _syscall2(int, ioprio_get
, int, which
, int, who
)
345 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
346 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
348 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
349 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
352 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
353 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
354 unsigned long, idx1
, unsigned long, idx2
)
358 * It is assumed that struct statx is architecture independent.
360 #if defined(TARGET_NR_statx) && defined(__NR_statx)
361 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
362 unsigned int, mask
, struct target_statx
*, statxbuf
)
364 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
365 _syscall2(int, membarrier
, int, cmd
, int, flags
)
368 static const bitmask_transtbl fcntl_flags_tbl
[] = {
369 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
370 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
371 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
372 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
373 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
374 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
375 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
376 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
377 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
378 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
379 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
380 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
381 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
382 #if defined(O_DIRECT)
383 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
385 #if defined(O_NOATIME)
386 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
388 #if defined(O_CLOEXEC)
389 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
392 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
394 #if defined(O_TMPFILE)
395 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
397 /* Don't terminate the list prematurely on 64-bit host+guest. */
398 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
399 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
404 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
406 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
407 #if defined(__NR_utimensat)
408 #define __NR_sys_utimensat __NR_utimensat
409 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
410 const struct timespec
*,tsp
,int,flags
)
412 static int sys_utimensat(int dirfd
, const char *pathname
,
413 const struct timespec times
[2], int flags
)
419 #endif /* TARGET_NR_utimensat */
421 #ifdef TARGET_NR_renameat2
422 #if defined(__NR_renameat2)
423 #define __NR_sys_renameat2 __NR_renameat2
424 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
425 const char *, new, unsigned int, flags
)
427 static int sys_renameat2(int oldfd
, const char *old
,
428 int newfd
, const char *new, int flags
)
431 return renameat(oldfd
, old
, newfd
, new);
437 #endif /* TARGET_NR_renameat2 */
439 #ifdef CONFIG_INOTIFY
440 #include <sys/inotify.h>
442 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
443 static int sys_inotify_init(void)
445 return (inotify_init());
448 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
449 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
451 return (inotify_add_watch(fd
, pathname
, mask
));
454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
455 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
457 return (inotify_rm_watch(fd
, wd
));
460 #ifdef CONFIG_INOTIFY1
461 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
462 static int sys_inotify_init1(int flags
)
464 return (inotify_init1(flags
));
469 /* Userspace can usually survive runtime without inotify */
470 #undef TARGET_NR_inotify_init
471 #undef TARGET_NR_inotify_init1
472 #undef TARGET_NR_inotify_add_watch
473 #undef TARGET_NR_inotify_rm_watch
474 #endif /* CONFIG_INOTIFY */
476 #if defined(TARGET_NR_prlimit64)
477 #ifndef __NR_prlimit64
478 # define __NR_prlimit64 -1
480 #define __NR_sys_prlimit64 __NR_prlimit64
481 /* The glibc rlimit structure may not be that used by the underlying syscall */
482 struct host_rlimit64
{
486 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
487 const struct host_rlimit64
*, new_limit
,
488 struct host_rlimit64
*, old_limit
)
492 #if defined(TARGET_NR_timer_create)
493 /* Maximum of 32 active POSIX timers allowed at any one time. */
494 static timer_t g_posix_timers
[32] = { 0, } ;
496 static inline int next_free_host_timer(void)
499 /* FIXME: Does finding the next free slot require a lock? */
500 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
501 if (g_posix_timers
[k
] == 0) {
502 g_posix_timers
[k
] = (timer_t
) 1;
510 #define ERRNO_TABLE_SIZE 1200
512 /* target_to_host_errno_table[] is initialized from
513 * host_to_target_errno_table[] in syscall_init(). */
514 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
518 * This list is the union of errno values overridden in asm-<arch>/errno.h
519 * minus the errnos that are not actually generic to all archs.
521 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
522 [EAGAIN
] = TARGET_EAGAIN
,
523 [EIDRM
] = TARGET_EIDRM
,
524 [ECHRNG
] = TARGET_ECHRNG
,
525 [EL2NSYNC
] = TARGET_EL2NSYNC
,
526 [EL3HLT
] = TARGET_EL3HLT
,
527 [EL3RST
] = TARGET_EL3RST
,
528 [ELNRNG
] = TARGET_ELNRNG
,
529 [EUNATCH
] = TARGET_EUNATCH
,
530 [ENOCSI
] = TARGET_ENOCSI
,
531 [EL2HLT
] = TARGET_EL2HLT
,
532 [EDEADLK
] = TARGET_EDEADLK
,
533 [ENOLCK
] = TARGET_ENOLCK
,
534 [EBADE
] = TARGET_EBADE
,
535 [EBADR
] = TARGET_EBADR
,
536 [EXFULL
] = TARGET_EXFULL
,
537 [ENOANO
] = TARGET_ENOANO
,
538 [EBADRQC
] = TARGET_EBADRQC
,
539 [EBADSLT
] = TARGET_EBADSLT
,
540 [EBFONT
] = TARGET_EBFONT
,
541 [ENOSTR
] = TARGET_ENOSTR
,
542 [ENODATA
] = TARGET_ENODATA
,
543 [ETIME
] = TARGET_ETIME
,
544 [ENOSR
] = TARGET_ENOSR
,
545 [ENONET
] = TARGET_ENONET
,
546 [ENOPKG
] = TARGET_ENOPKG
,
547 [EREMOTE
] = TARGET_EREMOTE
,
548 [ENOLINK
] = TARGET_ENOLINK
,
549 [EADV
] = TARGET_EADV
,
550 [ESRMNT
] = TARGET_ESRMNT
,
551 [ECOMM
] = TARGET_ECOMM
,
552 [EPROTO
] = TARGET_EPROTO
,
553 [EDOTDOT
] = TARGET_EDOTDOT
,
554 [EMULTIHOP
] = TARGET_EMULTIHOP
,
555 [EBADMSG
] = TARGET_EBADMSG
,
556 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
557 [EOVERFLOW
] = TARGET_EOVERFLOW
,
558 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
559 [EBADFD
] = TARGET_EBADFD
,
560 [EREMCHG
] = TARGET_EREMCHG
,
561 [ELIBACC
] = TARGET_ELIBACC
,
562 [ELIBBAD
] = TARGET_ELIBBAD
,
563 [ELIBSCN
] = TARGET_ELIBSCN
,
564 [ELIBMAX
] = TARGET_ELIBMAX
,
565 [ELIBEXEC
] = TARGET_ELIBEXEC
,
566 [EILSEQ
] = TARGET_EILSEQ
,
567 [ENOSYS
] = TARGET_ENOSYS
,
568 [ELOOP
] = TARGET_ELOOP
,
569 [ERESTART
] = TARGET_ERESTART
,
570 [ESTRPIPE
] = TARGET_ESTRPIPE
,
571 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
572 [EUSERS
] = TARGET_EUSERS
,
573 [ENOTSOCK
] = TARGET_ENOTSOCK
,
574 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
575 [EMSGSIZE
] = TARGET_EMSGSIZE
,
576 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
577 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
578 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
579 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
580 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
581 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
582 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
583 [EADDRINUSE
] = TARGET_EADDRINUSE
,
584 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
585 [ENETDOWN
] = TARGET_ENETDOWN
,
586 [ENETUNREACH
] = TARGET_ENETUNREACH
,
587 [ENETRESET
] = TARGET_ENETRESET
,
588 [ECONNABORTED
] = TARGET_ECONNABORTED
,
589 [ECONNRESET
] = TARGET_ECONNRESET
,
590 [ENOBUFS
] = TARGET_ENOBUFS
,
591 [EISCONN
] = TARGET_EISCONN
,
592 [ENOTCONN
] = TARGET_ENOTCONN
,
593 [EUCLEAN
] = TARGET_EUCLEAN
,
594 [ENOTNAM
] = TARGET_ENOTNAM
,
595 [ENAVAIL
] = TARGET_ENAVAIL
,
596 [EISNAM
] = TARGET_EISNAM
,
597 [EREMOTEIO
] = TARGET_EREMOTEIO
,
598 [EDQUOT
] = TARGET_EDQUOT
,
599 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
600 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
601 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
602 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
603 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
604 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
605 [EALREADY
] = TARGET_EALREADY
,
606 [EINPROGRESS
] = TARGET_EINPROGRESS
,
607 [ESTALE
] = TARGET_ESTALE
,
608 [ECANCELED
] = TARGET_ECANCELED
,
609 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
610 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
612 [ENOKEY
] = TARGET_ENOKEY
,
615 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
618 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
621 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
624 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
626 #ifdef ENOTRECOVERABLE
627 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
630 [ENOMSG
] = TARGET_ENOMSG
,
633 [ERFKILL
] = TARGET_ERFKILL
,
636 [EHWPOISON
] = TARGET_EHWPOISON
,
640 static inline int host_to_target_errno(int err
)
642 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
643 host_to_target_errno_table
[err
]) {
644 return host_to_target_errno_table
[err
];
649 static inline int target_to_host_errno(int err
)
651 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
652 target_to_host_errno_table
[err
]) {
653 return target_to_host_errno_table
[err
];
658 static inline abi_long
get_errno(abi_long ret
)
661 return -host_to_target_errno(errno
);
666 const char *target_strerror(int err
)
668 if (err
== TARGET_ERESTARTSYS
) {
669 return "To be restarted";
671 if (err
== TARGET_QEMU_ESIGRETURN
) {
672 return "Successful exit from sigreturn";
675 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
678 return strerror(target_to_host_errno(err
));
681 #define safe_syscall0(type, name) \
682 static type safe_##name(void) \
684 return safe_syscall(__NR_##name); \
687 #define safe_syscall1(type, name, type1, arg1) \
688 static type safe_##name(type1 arg1) \
690 return safe_syscall(__NR_##name, arg1); \
693 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
694 static type safe_##name(type1 arg1, type2 arg2) \
696 return safe_syscall(__NR_##name, arg1, arg2); \
699 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
700 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
702 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
705 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
707 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
709 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
712 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
713 type4, arg4, type5, arg5) \
714 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
717 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
720 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
721 type4, arg4, type5, arg5, type6, arg6) \
722 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
723 type5 arg5, type6 arg6) \
725 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
728 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
729 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
730 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
731 int, flags
, mode_t
, mode
)
732 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
733 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
734 struct rusage
*, rusage
)
736 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
737 int, options
, struct rusage
*, rusage
)
738 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
739 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
740 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
741 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
742 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
744 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
745 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
746 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
749 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
750 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
752 #if defined(__NR_futex)
753 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
754 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
756 #if defined(__NR_futex_time64)
757 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
758 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
760 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
761 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
762 safe_syscall2(int, tkill
, int, tid
, int, sig
)
763 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
764 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
765 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
766 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
767 unsigned long, pos_l
, unsigned long, pos_h
)
768 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
769 unsigned long, pos_l
, unsigned long, pos_h
)
770 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
772 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
773 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
774 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
775 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
776 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
777 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
778 safe_syscall2(int, flock
, int, fd
, int, operation
)
779 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
780 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
781 const struct timespec
*, uts
, size_t, sigsetsize
)
783 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
785 #if defined(TARGET_NR_nanosleep)
786 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
787 struct timespec
*, rem
)
789 #if defined(TARGET_NR_clock_nanosleep) || \
790 defined(TARGET_NR_clock_nanosleep_time64)
791 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
792 const struct timespec
*, req
, struct timespec
*, rem
)
796 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
799 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
800 void *, ptr
, long, fifth
)
804 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
808 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
809 long, msgtype
, int, flags
)
811 #ifdef __NR_semtimedop
812 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
813 unsigned, nsops
, const struct timespec
*, timeout
)
815 #if defined(TARGET_NR_mq_timedsend) || \
816 defined(TARGET_NR_mq_timedsend_time64)
817 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
818 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
820 #if defined(TARGET_NR_mq_timedreceive) || \
821 defined(TARGET_NR_mq_timedreceive_time64)
822 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
823 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
825 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
826 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
827 int, outfd
, loff_t
*, poutoff
, size_t, length
,
831 /* We do ioctl like this rather than via safe_syscall3 to preserve the
832 * "third argument might be integer or pointer or not present" behaviour of
835 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
836 /* Similarly for fcntl. Note that callers must always:
837 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
838 * use the flock64 struct rather than unsuffixed flock
839 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
842 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
844 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
847 static inline int host_to_target_sock_type(int host_type
)
851 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
853 target_type
= TARGET_SOCK_DGRAM
;
856 target_type
= TARGET_SOCK_STREAM
;
859 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
863 #if defined(SOCK_CLOEXEC)
864 if (host_type
& SOCK_CLOEXEC
) {
865 target_type
|= TARGET_SOCK_CLOEXEC
;
869 #if defined(SOCK_NONBLOCK)
870 if (host_type
& SOCK_NONBLOCK
) {
871 target_type
|= TARGET_SOCK_NONBLOCK
;
878 static abi_ulong target_brk
;
879 static abi_ulong target_original_brk
;
880 static abi_ulong brk_page
;
882 void target_set_brk(abi_ulong new_brk
)
884 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
885 brk_page
= HOST_PAGE_ALIGN(target_brk
);
888 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
889 #define DEBUGF_BRK(message, args...)
891 /* do_brk() must return target values and target errnos. */
892 abi_long
do_brk(abi_ulong new_brk
)
894 abi_long mapped_addr
;
895 abi_ulong new_alloc_size
;
897 /* brk pointers are always untagged */
899 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
902 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
905 if (new_brk
< target_original_brk
) {
906 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
911 /* If the new brk is less than the highest page reserved to the
912 * target heap allocation, set it and we're almost done... */
913 if (new_brk
<= brk_page
) {
914 /* Heap contents are initialized to zero, as for anonymous
916 if (new_brk
> target_brk
) {
917 memset(g2h_untagged(target_brk
), 0, new_brk
- target_brk
);
919 target_brk
= new_brk
;
920 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
924 /* We need to allocate more memory after the brk... Note that
925 * we don't use MAP_FIXED because that will map over the top of
926 * any existing mapping (like the one with the host libc or qemu
927 * itself); instead we treat "mapped but at wrong address" as
928 * a failure and unmap again.
930 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
931 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
932 PROT_READ
|PROT_WRITE
,
933 MAP_ANON
|MAP_PRIVATE
, 0, 0));
935 if (mapped_addr
== brk_page
) {
936 /* Heap contents are initialized to zero, as for anonymous
937 * mapped pages. Technically the new pages are already
938 * initialized to zero since they *are* anonymous mapped
939 * pages, however we have to take care with the contents that
940 * come from the remaining part of the previous page: it may
941 * contains garbage data due to a previous heap usage (grown
943 memset(g2h_untagged(target_brk
), 0, brk_page
- target_brk
);
945 target_brk
= new_brk
;
946 brk_page
= HOST_PAGE_ALIGN(target_brk
);
947 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
950 } else if (mapped_addr
!= -1) {
951 /* Mapped but at wrong address, meaning there wasn't actually
952 * enough space for this brk.
954 target_munmap(mapped_addr
, new_alloc_size
);
956 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
959 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
962 #if defined(TARGET_ALPHA)
963 /* We (partially) emulate OSF/1 on Alpha, which requires we
964 return a proper errno, not an unchanged brk value. */
965 return -TARGET_ENOMEM
;
967 /* For everything else, return the previous break. */
971 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
972 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
973 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
974 abi_ulong target_fds_addr
,
978 abi_ulong b
, *target_fds
;
980 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
981 if (!(target_fds
= lock_user(VERIFY_READ
,
983 sizeof(abi_ulong
) * nw
,
985 return -TARGET_EFAULT
;
989 for (i
= 0; i
< nw
; i
++) {
990 /* grab the abi_ulong */
991 __get_user(b
, &target_fds
[i
]);
992 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
993 /* check the bit inside the abi_ulong */
1000 unlock_user(target_fds
, target_fds_addr
, 0);
1005 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1006 abi_ulong target_fds_addr
,
1009 if (target_fds_addr
) {
1010 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1011 return -TARGET_EFAULT
;
1019 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1025 abi_ulong
*target_fds
;
1027 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1028 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1030 sizeof(abi_ulong
) * nw
,
1032 return -TARGET_EFAULT
;
1035 for (i
= 0; i
< nw
; i
++) {
1037 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1038 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1041 __put_user(v
, &target_fds
[i
]);
1044 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1050 #if defined(__alpha__)
1051 #define HOST_HZ 1024
1056 static inline abi_long
host_to_target_clock_t(long ticks
)
1058 #if HOST_HZ == TARGET_HZ
1061 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1065 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1066 const struct rusage
*rusage
)
1068 struct target_rusage
*target_rusage
;
1070 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1071 return -TARGET_EFAULT
;
1072 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1073 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1074 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1075 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1076 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1077 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1078 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1079 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1080 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1081 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1082 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1083 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1084 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1085 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1086 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1087 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1088 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1089 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1090 unlock_user_struct(target_rusage
, target_addr
, 1);
1095 #ifdef TARGET_NR_setrlimit
1096 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1098 abi_ulong target_rlim_swap
;
1101 target_rlim_swap
= tswapal(target_rlim
);
1102 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1103 return RLIM_INFINITY
;
1105 result
= target_rlim_swap
;
1106 if (target_rlim_swap
!= (rlim_t
)result
)
1107 return RLIM_INFINITY
;
1113 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1114 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1116 abi_ulong target_rlim_swap
;
1119 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1120 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1122 target_rlim_swap
= rlim
;
1123 result
= tswapal(target_rlim_swap
);
1129 static inline int target_to_host_resource(int code
)
1132 case TARGET_RLIMIT_AS
:
1134 case TARGET_RLIMIT_CORE
:
1136 case TARGET_RLIMIT_CPU
:
1138 case TARGET_RLIMIT_DATA
:
1140 case TARGET_RLIMIT_FSIZE
:
1141 return RLIMIT_FSIZE
;
1142 case TARGET_RLIMIT_LOCKS
:
1143 return RLIMIT_LOCKS
;
1144 case TARGET_RLIMIT_MEMLOCK
:
1145 return RLIMIT_MEMLOCK
;
1146 case TARGET_RLIMIT_MSGQUEUE
:
1147 return RLIMIT_MSGQUEUE
;
1148 case TARGET_RLIMIT_NICE
:
1150 case TARGET_RLIMIT_NOFILE
:
1151 return RLIMIT_NOFILE
;
1152 case TARGET_RLIMIT_NPROC
:
1153 return RLIMIT_NPROC
;
1154 case TARGET_RLIMIT_RSS
:
1156 case TARGET_RLIMIT_RTPRIO
:
1157 return RLIMIT_RTPRIO
;
1158 case TARGET_RLIMIT_SIGPENDING
:
1159 return RLIMIT_SIGPENDING
;
1160 case TARGET_RLIMIT_STACK
:
1161 return RLIMIT_STACK
;
1167 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1168 abi_ulong target_tv_addr
)
1170 struct target_timeval
*target_tv
;
1172 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1173 return -TARGET_EFAULT
;
1176 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1177 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1179 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1184 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1185 const struct timeval
*tv
)
1187 struct target_timeval
*target_tv
;
1189 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1190 return -TARGET_EFAULT
;
1193 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1194 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1196 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1201 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1202 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1203 abi_ulong target_tv_addr
)
1205 struct target__kernel_sock_timeval
*target_tv
;
1207 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1208 return -TARGET_EFAULT
;
1211 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1212 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1214 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1220 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1221 const struct timeval
*tv
)
1223 struct target__kernel_sock_timeval
*target_tv
;
1225 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1226 return -TARGET_EFAULT
;
1229 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1230 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1232 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1237 #if defined(TARGET_NR_futex) || \
1238 defined(TARGET_NR_rt_sigtimedwait) || \
1239 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1240 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1241 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1242 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1243 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1244 defined(TARGET_NR_timer_settime) || \
1245 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1246 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1247 abi_ulong target_addr
)
1249 struct target_timespec
*target_ts
;
1251 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1252 return -TARGET_EFAULT
;
1254 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1255 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1256 unlock_user_struct(target_ts
, target_addr
, 0);
1261 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1262 defined(TARGET_NR_timer_settime64) || \
1263 defined(TARGET_NR_mq_timedsend_time64) || \
1264 defined(TARGET_NR_mq_timedreceive_time64) || \
1265 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1266 defined(TARGET_NR_clock_nanosleep_time64) || \
1267 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1268 defined(TARGET_NR_utimensat) || \
1269 defined(TARGET_NR_utimensat_time64) || \
1270 defined(TARGET_NR_semtimedop_time64) || \
1271 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1272 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1273 abi_ulong target_addr
)
1275 struct target__kernel_timespec
*target_ts
;
1277 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1278 return -TARGET_EFAULT
;
1280 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1281 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1282 /* in 32bit mode, this drops the padding */
1283 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1284 unlock_user_struct(target_ts
, target_addr
, 0);
1289 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1290 struct timespec
*host_ts
)
1292 struct target_timespec
*target_ts
;
1294 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1295 return -TARGET_EFAULT
;
1297 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1298 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1299 unlock_user_struct(target_ts
, target_addr
, 1);
1303 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1304 struct timespec
*host_ts
)
1306 struct target__kernel_timespec
*target_ts
;
1308 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1309 return -TARGET_EFAULT
;
1311 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1312 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1313 unlock_user_struct(target_ts
, target_addr
, 1);
1317 #if defined(TARGET_NR_gettimeofday)
1318 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1319 struct timezone
*tz
)
1321 struct target_timezone
*target_tz
;
1323 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1324 return -TARGET_EFAULT
;
1327 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1328 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1330 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1336 #if defined(TARGET_NR_settimeofday)
1337 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1338 abi_ulong target_tz_addr
)
1340 struct target_timezone
*target_tz
;
1342 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1343 return -TARGET_EFAULT
;
1346 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1347 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1349 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1355 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1358 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1359 abi_ulong target_mq_attr_addr
)
1361 struct target_mq_attr
*target_mq_attr
;
1363 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1364 target_mq_attr_addr
, 1))
1365 return -TARGET_EFAULT
;
1367 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1368 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1369 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1370 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1372 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1377 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1378 const struct mq_attr
*attr
)
1380 struct target_mq_attr
*target_mq_attr
;
1382 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1383 target_mq_attr_addr
, 0))
1384 return -TARGET_EFAULT
;
1386 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1387 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1388 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1389 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1391 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1397 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1398 /* do_select() must return target values and target errnos. */
1399 static abi_long
do_select(int n
,
1400 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1401 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1403 fd_set rfds
, wfds
, efds
;
1404 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1406 struct timespec ts
, *ts_ptr
;
1409 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1413 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1417 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1422 if (target_tv_addr
) {
1423 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1424 return -TARGET_EFAULT
;
1425 ts
.tv_sec
= tv
.tv_sec
;
1426 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1432 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1435 if (!is_error(ret
)) {
1436 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1437 return -TARGET_EFAULT
;
1438 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1439 return -TARGET_EFAULT
;
1440 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1441 return -TARGET_EFAULT
;
1443 if (target_tv_addr
) {
1444 tv
.tv_sec
= ts
.tv_sec
;
1445 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1446 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1447 return -TARGET_EFAULT
;
1455 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1456 static abi_long
do_old_select(abi_ulong arg1
)
1458 struct target_sel_arg_struct
*sel
;
1459 abi_ulong inp
, outp
, exp
, tvp
;
1462 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1463 return -TARGET_EFAULT
;
1466 nsel
= tswapal(sel
->n
);
1467 inp
= tswapal(sel
->inp
);
1468 outp
= tswapal(sel
->outp
);
1469 exp
= tswapal(sel
->exp
);
1470 tvp
= tswapal(sel
->tvp
);
1472 unlock_user_struct(sel
, arg1
, 0);
1474 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1479 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1480 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1481 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1484 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1485 fd_set rfds
, wfds
, efds
;
1486 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1487 struct timespec ts
, *ts_ptr
;
1491 * The 6th arg is actually two args smashed together,
1492 * so we cannot use the C library.
1500 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1501 target_sigset_t
*target_sigset
;
1509 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1513 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1517 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1523 * This takes a timespec, and not a timeval, so we cannot
1524 * use the do_select() helper ...
1528 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1529 return -TARGET_EFAULT
;
1532 if (target_to_host_timespec(&ts
, ts_addr
)) {
1533 return -TARGET_EFAULT
;
1541 /* Extract the two packed args for the sigset */
1544 sig
.size
= SIGSET_T_SIZE
;
1546 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1548 return -TARGET_EFAULT
;
1550 arg_sigset
= tswapal(arg7
[0]);
1551 arg_sigsize
= tswapal(arg7
[1]);
1552 unlock_user(arg7
, arg6
, 0);
1556 if (arg_sigsize
!= sizeof(*target_sigset
)) {
1557 /* Like the kernel, we enforce correct size sigsets */
1558 return -TARGET_EINVAL
;
1560 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
1561 sizeof(*target_sigset
), 1);
1562 if (!target_sigset
) {
1563 return -TARGET_EFAULT
;
1565 target_to_host_sigset(&set
, target_sigset
);
1566 unlock_user(target_sigset
, arg_sigset
, 0);
1574 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1577 if (!is_error(ret
)) {
1578 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1579 return -TARGET_EFAULT
;
1581 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1582 return -TARGET_EFAULT
;
1584 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1585 return -TARGET_EFAULT
;
1588 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1589 return -TARGET_EFAULT
;
1592 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1593 return -TARGET_EFAULT
;
1601 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1602 defined(TARGET_NR_ppoll_time64)
1603 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1604 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1606 struct target_pollfd
*target_pfd
;
1607 unsigned int nfds
= arg2
;
1615 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1616 return -TARGET_EINVAL
;
1618 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1619 sizeof(struct target_pollfd
) * nfds
, 1);
1621 return -TARGET_EFAULT
;
1624 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1625 for (i
= 0; i
< nfds
; i
++) {
1626 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1627 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1631 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1632 target_sigset_t
*target_set
;
1633 sigset_t _set
, *set
= &_set
;
1637 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1638 unlock_user(target_pfd
, arg1
, 0);
1639 return -TARGET_EFAULT
;
1642 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1643 unlock_user(target_pfd
, arg1
, 0);
1644 return -TARGET_EFAULT
;
1652 if (arg5
!= sizeof(target_sigset_t
)) {
1653 unlock_user(target_pfd
, arg1
, 0);
1654 return -TARGET_EINVAL
;
1657 target_set
= lock_user(VERIFY_READ
, arg4
,
1658 sizeof(target_sigset_t
), 1);
1660 unlock_user(target_pfd
, arg1
, 0);
1661 return -TARGET_EFAULT
;
1663 target_to_host_sigset(set
, target_set
);
1668 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1669 set
, SIGSET_T_SIZE
));
1671 if (!is_error(ret
) && arg3
) {
1673 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1674 return -TARGET_EFAULT
;
1677 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1678 return -TARGET_EFAULT
;
1683 unlock_user(target_set
, arg4
, 0);
1686 struct timespec ts
, *pts
;
1689 /* Convert ms to secs, ns */
1690 ts
.tv_sec
= arg3
/ 1000;
1691 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1694 /* -ve poll() timeout means "infinite" */
1697 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1700 if (!is_error(ret
)) {
1701 for (i
= 0; i
< nfds
; i
++) {
1702 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1705 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1710 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1713 return pipe2(host_pipe
, flags
);
1719 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1720 int flags
, int is_pipe2
)
1724 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1727 return get_errno(ret
);
1729 /* Several targets have special calling conventions for the original
1730 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1732 #if defined(TARGET_ALPHA)
1733 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1734 return host_pipe
[0];
1735 #elif defined(TARGET_MIPS)
1736 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1737 return host_pipe
[0];
1738 #elif defined(TARGET_SH4)
1739 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1740 return host_pipe
[0];
1741 #elif defined(TARGET_SPARC)
1742 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1743 return host_pipe
[0];
1747 if (put_user_s32(host_pipe
[0], pipedes
)
1748 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1749 return -TARGET_EFAULT
;
1750 return get_errno(ret
);
1753 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1754 abi_ulong target_addr
,
1757 struct target_ip_mreqn
*target_smreqn
;
1759 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1761 return -TARGET_EFAULT
;
1762 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1763 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1764 if (len
== sizeof(struct target_ip_mreqn
))
1765 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1766 unlock_user(target_smreqn
, target_addr
, 0);
1771 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1772 abi_ulong target_addr
,
1775 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1776 sa_family_t sa_family
;
1777 struct target_sockaddr
*target_saddr
;
1779 if (fd_trans_target_to_host_addr(fd
)) {
1780 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1783 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1785 return -TARGET_EFAULT
;
1787 sa_family
= tswap16(target_saddr
->sa_family
);
1789 /* Oops. The caller might send a incomplete sun_path; sun_path
1790 * must be terminated by \0 (see the manual page), but
1791 * unfortunately it is quite common to specify sockaddr_un
1792 * length as "strlen(x->sun_path)" while it should be
1793 * "strlen(...) + 1". We'll fix that here if needed.
1794 * Linux kernel has a similar feature.
1797 if (sa_family
== AF_UNIX
) {
1798 if (len
< unix_maxlen
&& len
> 0) {
1799 char *cp
= (char*)target_saddr
;
1801 if ( cp
[len
-1] && !cp
[len
] )
1804 if (len
> unix_maxlen
)
1808 memcpy(addr
, target_saddr
, len
);
1809 addr
->sa_family
= sa_family
;
1810 if (sa_family
== AF_NETLINK
) {
1811 struct sockaddr_nl
*nladdr
;
1813 nladdr
= (struct sockaddr_nl
*)addr
;
1814 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1815 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1816 } else if (sa_family
== AF_PACKET
) {
1817 struct target_sockaddr_ll
*lladdr
;
1819 lladdr
= (struct target_sockaddr_ll
*)addr
;
1820 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1821 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1823 unlock_user(target_saddr
, target_addr
, 0);
1828 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1829 struct sockaddr
*addr
,
1832 struct target_sockaddr
*target_saddr
;
1839 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1841 return -TARGET_EFAULT
;
1842 memcpy(target_saddr
, addr
, len
);
1843 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1844 sizeof(target_saddr
->sa_family
)) {
1845 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1847 if (addr
->sa_family
== AF_NETLINK
&&
1848 len
>= sizeof(struct target_sockaddr_nl
)) {
1849 struct target_sockaddr_nl
*target_nl
=
1850 (struct target_sockaddr_nl
*)target_saddr
;
1851 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1852 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1853 } else if (addr
->sa_family
== AF_PACKET
) {
1854 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1855 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1856 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1857 } else if (addr
->sa_family
== AF_INET6
&&
1858 len
>= sizeof(struct target_sockaddr_in6
)) {
1859 struct target_sockaddr_in6
*target_in6
=
1860 (struct target_sockaddr_in6
*)target_saddr
;
1861 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1863 unlock_user(target_saddr
, target_addr
, len
);
1868 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1869 struct target_msghdr
*target_msgh
)
1871 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1872 abi_long msg_controllen
;
1873 abi_ulong target_cmsg_addr
;
1874 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1875 socklen_t space
= 0;
1877 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1878 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1880 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1881 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1882 target_cmsg_start
= target_cmsg
;
1884 return -TARGET_EFAULT
;
1886 while (cmsg
&& target_cmsg
) {
1887 void *data
= CMSG_DATA(cmsg
);
1888 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1890 int len
= tswapal(target_cmsg
->cmsg_len
)
1891 - sizeof(struct target_cmsghdr
);
1893 space
+= CMSG_SPACE(len
);
1894 if (space
> msgh
->msg_controllen
) {
1895 space
-= CMSG_SPACE(len
);
1896 /* This is a QEMU bug, since we allocated the payload
1897 * area ourselves (unlike overflow in host-to-target
1898 * conversion, which is just the guest giving us a buffer
1899 * that's too small). It can't happen for the payload types
1900 * we currently support; if it becomes an issue in future
1901 * we would need to improve our allocation strategy to
1902 * something more intelligent than "twice the size of the
1903 * target buffer we're reading from".
1905 qemu_log_mask(LOG_UNIMP
,
1906 ("Unsupported ancillary data %d/%d: "
1907 "unhandled msg size\n"),
1908 tswap32(target_cmsg
->cmsg_level
),
1909 tswap32(target_cmsg
->cmsg_type
));
1913 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1914 cmsg
->cmsg_level
= SOL_SOCKET
;
1916 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1918 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1919 cmsg
->cmsg_len
= CMSG_LEN(len
);
1921 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1922 int *fd
= (int *)data
;
1923 int *target_fd
= (int *)target_data
;
1924 int i
, numfds
= len
/ sizeof(int);
1926 for (i
= 0; i
< numfds
; i
++) {
1927 __get_user(fd
[i
], target_fd
+ i
);
1929 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1930 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1931 struct ucred
*cred
= (struct ucred
*)data
;
1932 struct target_ucred
*target_cred
=
1933 (struct target_ucred
*)target_data
;
1935 __get_user(cred
->pid
, &target_cred
->pid
);
1936 __get_user(cred
->uid
, &target_cred
->uid
);
1937 __get_user(cred
->gid
, &target_cred
->gid
);
1939 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1940 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1941 memcpy(data
, target_data
, len
);
1944 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1945 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1948 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1950 msgh
->msg_controllen
= space
;
1954 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1955 struct msghdr
*msgh
)
1957 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1958 abi_long msg_controllen
;
1959 abi_ulong target_cmsg_addr
;
1960 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1961 socklen_t space
= 0;
1963 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1964 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1966 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1967 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1968 target_cmsg_start
= target_cmsg
;
1970 return -TARGET_EFAULT
;
1972 while (cmsg
&& target_cmsg
) {
1973 void *data
= CMSG_DATA(cmsg
);
1974 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1976 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1977 int tgt_len
, tgt_space
;
1979 /* We never copy a half-header but may copy half-data;
1980 * this is Linux's behaviour in put_cmsg(). Note that
1981 * truncation here is a guest problem (which we report
1982 * to the guest via the CTRUNC bit), unlike truncation
1983 * in target_to_host_cmsg, which is a QEMU bug.
1985 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1986 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1990 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1991 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1993 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1995 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1997 /* Payload types which need a different size of payload on
1998 * the target must adjust tgt_len here.
2001 switch (cmsg
->cmsg_level
) {
2003 switch (cmsg
->cmsg_type
) {
2005 tgt_len
= sizeof(struct target_timeval
);
2015 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
2016 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
2017 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
2020 /* We must now copy-and-convert len bytes of payload
2021 * into tgt_len bytes of destination space. Bear in mind
2022 * that in both source and destination we may be dealing
2023 * with a truncated value!
2025 switch (cmsg
->cmsg_level
) {
2027 switch (cmsg
->cmsg_type
) {
2030 int *fd
= (int *)data
;
2031 int *target_fd
= (int *)target_data
;
2032 int i
, numfds
= tgt_len
/ sizeof(int);
2034 for (i
= 0; i
< numfds
; i
++) {
2035 __put_user(fd
[i
], target_fd
+ i
);
2041 struct timeval
*tv
= (struct timeval
*)data
;
2042 struct target_timeval
*target_tv
=
2043 (struct target_timeval
*)target_data
;
2045 if (len
!= sizeof(struct timeval
) ||
2046 tgt_len
!= sizeof(struct target_timeval
)) {
2050 /* copy struct timeval to target */
2051 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
2052 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
2055 case SCM_CREDENTIALS
:
2057 struct ucred
*cred
= (struct ucred
*)data
;
2058 struct target_ucred
*target_cred
=
2059 (struct target_ucred
*)target_data
;
2061 __put_user(cred
->pid
, &target_cred
->pid
);
2062 __put_user(cred
->uid
, &target_cred
->uid
);
2063 __put_user(cred
->gid
, &target_cred
->gid
);
2072 switch (cmsg
->cmsg_type
) {
2075 uint32_t *v
= (uint32_t *)data
;
2076 uint32_t *t_int
= (uint32_t *)target_data
;
2078 if (len
!= sizeof(uint32_t) ||
2079 tgt_len
!= sizeof(uint32_t)) {
2082 __put_user(*v
, t_int
);
2088 struct sock_extended_err ee
;
2089 struct sockaddr_in offender
;
2091 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
2092 struct errhdr_t
*target_errh
=
2093 (struct errhdr_t
*)target_data
;
2095 if (len
!= sizeof(struct errhdr_t
) ||
2096 tgt_len
!= sizeof(struct errhdr_t
)) {
2099 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2100 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2101 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2102 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2103 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2104 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2105 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2106 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2107 (void *) &errh
->offender
, sizeof(errh
->offender
));
2116 switch (cmsg
->cmsg_type
) {
2119 uint32_t *v
= (uint32_t *)data
;
2120 uint32_t *t_int
= (uint32_t *)target_data
;
2122 if (len
!= sizeof(uint32_t) ||
2123 tgt_len
!= sizeof(uint32_t)) {
2126 __put_user(*v
, t_int
);
2132 struct sock_extended_err ee
;
2133 struct sockaddr_in6 offender
;
2135 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2136 struct errhdr6_t
*target_errh
=
2137 (struct errhdr6_t
*)target_data
;
2139 if (len
!= sizeof(struct errhdr6_t
) ||
2140 tgt_len
!= sizeof(struct errhdr6_t
)) {
2143 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2144 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2145 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2146 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2147 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2148 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2149 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2150 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2151 (void *) &errh
->offender
, sizeof(errh
->offender
));
2161 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2162 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2163 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2164 if (tgt_len
> len
) {
2165 memset(target_data
+ len
, 0, tgt_len
- len
);
2169 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2170 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2171 if (msg_controllen
< tgt_space
) {
2172 tgt_space
= msg_controllen
;
2174 msg_controllen
-= tgt_space
;
2176 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2177 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2180 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2182 target_msgh
->msg_controllen
= tswapal(space
);
2186 /* do_setsockopt() Must return target values and target errnos. */
2187 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2188 abi_ulong optval_addr
, socklen_t optlen
)
2192 struct ip_mreqn
*ip_mreq
;
2193 struct ip_mreq_source
*ip_mreq_source
;
2198 /* TCP and UDP options all take an 'int' value. */
2199 if (optlen
< sizeof(uint32_t))
2200 return -TARGET_EINVAL
;
2202 if (get_user_u32(val
, optval_addr
))
2203 return -TARGET_EFAULT
;
2204 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2211 case IP_ROUTER_ALERT
:
2215 case IP_MTU_DISCOVER
:
2222 case IP_MULTICAST_TTL
:
2223 case IP_MULTICAST_LOOP
:
2225 if (optlen
>= sizeof(uint32_t)) {
2226 if (get_user_u32(val
, optval_addr
))
2227 return -TARGET_EFAULT
;
2228 } else if (optlen
>= 1) {
2229 if (get_user_u8(val
, optval_addr
))
2230 return -TARGET_EFAULT
;
2232 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2234 case IP_ADD_MEMBERSHIP
:
2235 case IP_DROP_MEMBERSHIP
:
2236 if (optlen
< sizeof (struct target_ip_mreq
) ||
2237 optlen
> sizeof (struct target_ip_mreqn
))
2238 return -TARGET_EINVAL
;
2240 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2241 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2242 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2245 case IP_BLOCK_SOURCE
:
2246 case IP_UNBLOCK_SOURCE
:
2247 case IP_ADD_SOURCE_MEMBERSHIP
:
2248 case IP_DROP_SOURCE_MEMBERSHIP
:
2249 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2250 return -TARGET_EINVAL
;
2252 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2253 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2254 unlock_user (ip_mreq_source
, optval_addr
, 0);
2263 case IPV6_MTU_DISCOVER
:
2266 case IPV6_RECVPKTINFO
:
2267 case IPV6_UNICAST_HOPS
:
2268 case IPV6_MULTICAST_HOPS
:
2269 case IPV6_MULTICAST_LOOP
:
2271 case IPV6_RECVHOPLIMIT
:
2272 case IPV6_2292HOPLIMIT
:
2275 case IPV6_2292PKTINFO
:
2276 case IPV6_RECVTCLASS
:
2277 case IPV6_RECVRTHDR
:
2278 case IPV6_2292RTHDR
:
2279 case IPV6_RECVHOPOPTS
:
2280 case IPV6_2292HOPOPTS
:
2281 case IPV6_RECVDSTOPTS
:
2282 case IPV6_2292DSTOPTS
:
2284 case IPV6_ADDR_PREFERENCES
:
2285 #ifdef IPV6_RECVPATHMTU
2286 case IPV6_RECVPATHMTU
:
2288 #ifdef IPV6_TRANSPARENT
2289 case IPV6_TRANSPARENT
:
2291 #ifdef IPV6_FREEBIND
2294 #ifdef IPV6_RECVORIGDSTADDR
2295 case IPV6_RECVORIGDSTADDR
:
2298 if (optlen
< sizeof(uint32_t)) {
2299 return -TARGET_EINVAL
;
2301 if (get_user_u32(val
, optval_addr
)) {
2302 return -TARGET_EFAULT
;
2304 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2305 &val
, sizeof(val
)));
2309 struct in6_pktinfo pki
;
2311 if (optlen
< sizeof(pki
)) {
2312 return -TARGET_EINVAL
;
2315 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2316 return -TARGET_EFAULT
;
2319 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2321 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2322 &pki
, sizeof(pki
)));
2325 case IPV6_ADD_MEMBERSHIP
:
2326 case IPV6_DROP_MEMBERSHIP
:
2328 struct ipv6_mreq ipv6mreq
;
2330 if (optlen
< sizeof(ipv6mreq
)) {
2331 return -TARGET_EINVAL
;
2334 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2335 return -TARGET_EFAULT
;
2338 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2340 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2341 &ipv6mreq
, sizeof(ipv6mreq
)));
2352 struct icmp6_filter icmp6f
;
2354 if (optlen
> sizeof(icmp6f
)) {
2355 optlen
= sizeof(icmp6f
);
2358 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2359 return -TARGET_EFAULT
;
2362 for (val
= 0; val
< 8; val
++) {
2363 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2366 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2378 /* those take an u32 value */
2379 if (optlen
< sizeof(uint32_t)) {
2380 return -TARGET_EINVAL
;
2383 if (get_user_u32(val
, optval_addr
)) {
2384 return -TARGET_EFAULT
;
2386 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2387 &val
, sizeof(val
)));
2394 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2399 char *alg_key
= g_malloc(optlen
);
2402 return -TARGET_ENOMEM
;
2404 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2406 return -TARGET_EFAULT
;
2408 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2413 case ALG_SET_AEAD_AUTHSIZE
:
2415 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2424 case TARGET_SOL_SOCKET
:
2426 case TARGET_SO_RCVTIMEO
:
2430 optname
= SO_RCVTIMEO
;
2433 if (optlen
!= sizeof(struct target_timeval
)) {
2434 return -TARGET_EINVAL
;
2437 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2438 return -TARGET_EFAULT
;
2441 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2445 case TARGET_SO_SNDTIMEO
:
2446 optname
= SO_SNDTIMEO
;
2448 case TARGET_SO_ATTACH_FILTER
:
2450 struct target_sock_fprog
*tfprog
;
2451 struct target_sock_filter
*tfilter
;
2452 struct sock_fprog fprog
;
2453 struct sock_filter
*filter
;
2456 if (optlen
!= sizeof(*tfprog
)) {
2457 return -TARGET_EINVAL
;
2459 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2460 return -TARGET_EFAULT
;
2462 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2463 tswapal(tfprog
->filter
), 0)) {
2464 unlock_user_struct(tfprog
, optval_addr
, 1);
2465 return -TARGET_EFAULT
;
2468 fprog
.len
= tswap16(tfprog
->len
);
2469 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2470 if (filter
== NULL
) {
2471 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2472 unlock_user_struct(tfprog
, optval_addr
, 1);
2473 return -TARGET_ENOMEM
;
2475 for (i
= 0; i
< fprog
.len
; i
++) {
2476 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2477 filter
[i
].jt
= tfilter
[i
].jt
;
2478 filter
[i
].jf
= tfilter
[i
].jf
;
2479 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2481 fprog
.filter
= filter
;
2483 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2484 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2487 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2488 unlock_user_struct(tfprog
, optval_addr
, 1);
2491 case TARGET_SO_BINDTODEVICE
:
2493 char *dev_ifname
, *addr_ifname
;
2495 if (optlen
> IFNAMSIZ
- 1) {
2496 optlen
= IFNAMSIZ
- 1;
2498 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2500 return -TARGET_EFAULT
;
2502 optname
= SO_BINDTODEVICE
;
2503 addr_ifname
= alloca(IFNAMSIZ
);
2504 memcpy(addr_ifname
, dev_ifname
, optlen
);
2505 addr_ifname
[optlen
] = 0;
2506 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2507 addr_ifname
, optlen
));
2508 unlock_user (dev_ifname
, optval_addr
, 0);
2511 case TARGET_SO_LINGER
:
2514 struct target_linger
*tlg
;
2516 if (optlen
!= sizeof(struct target_linger
)) {
2517 return -TARGET_EINVAL
;
2519 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2520 return -TARGET_EFAULT
;
2522 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2523 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2524 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2526 unlock_user_struct(tlg
, optval_addr
, 0);
2529 /* Options with 'int' argument. */
2530 case TARGET_SO_DEBUG
:
2533 case TARGET_SO_REUSEADDR
:
2534 optname
= SO_REUSEADDR
;
2537 case TARGET_SO_REUSEPORT
:
2538 optname
= SO_REUSEPORT
;
2541 case TARGET_SO_TYPE
:
2544 case TARGET_SO_ERROR
:
2547 case TARGET_SO_DONTROUTE
:
2548 optname
= SO_DONTROUTE
;
2550 case TARGET_SO_BROADCAST
:
2551 optname
= SO_BROADCAST
;
2553 case TARGET_SO_SNDBUF
:
2554 optname
= SO_SNDBUF
;
2556 case TARGET_SO_SNDBUFFORCE
:
2557 optname
= SO_SNDBUFFORCE
;
2559 case TARGET_SO_RCVBUF
:
2560 optname
= SO_RCVBUF
;
2562 case TARGET_SO_RCVBUFFORCE
:
2563 optname
= SO_RCVBUFFORCE
;
2565 case TARGET_SO_KEEPALIVE
:
2566 optname
= SO_KEEPALIVE
;
2568 case TARGET_SO_OOBINLINE
:
2569 optname
= SO_OOBINLINE
;
2571 case TARGET_SO_NO_CHECK
:
2572 optname
= SO_NO_CHECK
;
2574 case TARGET_SO_PRIORITY
:
2575 optname
= SO_PRIORITY
;
2578 case TARGET_SO_BSDCOMPAT
:
2579 optname
= SO_BSDCOMPAT
;
2582 case TARGET_SO_PASSCRED
:
2583 optname
= SO_PASSCRED
;
2585 case TARGET_SO_PASSSEC
:
2586 optname
= SO_PASSSEC
;
2588 case TARGET_SO_TIMESTAMP
:
2589 optname
= SO_TIMESTAMP
;
2591 case TARGET_SO_RCVLOWAT
:
2592 optname
= SO_RCVLOWAT
;
2597 if (optlen
< sizeof(uint32_t))
2598 return -TARGET_EINVAL
;
2600 if (get_user_u32(val
, optval_addr
))
2601 return -TARGET_EFAULT
;
2602 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2607 case NETLINK_PKTINFO
:
2608 case NETLINK_ADD_MEMBERSHIP
:
2609 case NETLINK_DROP_MEMBERSHIP
:
2610 case NETLINK_BROADCAST_ERROR
:
2611 case NETLINK_NO_ENOBUFS
:
2612 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2613 case NETLINK_LISTEN_ALL_NSID
:
2614 case NETLINK_CAP_ACK
:
2615 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2616 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2617 case NETLINK_EXT_ACK
:
2618 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2619 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2620 case NETLINK_GET_STRICT_CHK
:
2621 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2627 if (optlen
< sizeof(uint32_t)) {
2628 return -TARGET_EINVAL
;
2630 if (get_user_u32(val
, optval_addr
)) {
2631 return -TARGET_EFAULT
;
2633 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2636 #endif /* SOL_NETLINK */
2639 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2641 ret
= -TARGET_ENOPROTOOPT
;
2646 /* do_getsockopt() Must return target values and target errnos. */
2647 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2648 abi_ulong optval_addr
, abi_ulong optlen
)
2655 case TARGET_SOL_SOCKET
:
2658 /* These don't just return a single integer */
2659 case TARGET_SO_PEERNAME
:
2661 case TARGET_SO_RCVTIMEO
: {
2665 optname
= SO_RCVTIMEO
;
2668 if (get_user_u32(len
, optlen
)) {
2669 return -TARGET_EFAULT
;
2672 return -TARGET_EINVAL
;
2676 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2681 if (len
> sizeof(struct target_timeval
)) {
2682 len
= sizeof(struct target_timeval
);
2684 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2685 return -TARGET_EFAULT
;
2687 if (put_user_u32(len
, optlen
)) {
2688 return -TARGET_EFAULT
;
2692 case TARGET_SO_SNDTIMEO
:
2693 optname
= SO_SNDTIMEO
;
2695 case TARGET_SO_PEERCRED
: {
2698 struct target_ucred
*tcr
;
2700 if (get_user_u32(len
, optlen
)) {
2701 return -TARGET_EFAULT
;
2704 return -TARGET_EINVAL
;
2708 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2716 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2717 return -TARGET_EFAULT
;
2719 __put_user(cr
.pid
, &tcr
->pid
);
2720 __put_user(cr
.uid
, &tcr
->uid
);
2721 __put_user(cr
.gid
, &tcr
->gid
);
2722 unlock_user_struct(tcr
, optval_addr
, 1);
2723 if (put_user_u32(len
, optlen
)) {
2724 return -TARGET_EFAULT
;
2728 case TARGET_SO_PEERSEC
: {
2731 if (get_user_u32(len
, optlen
)) {
2732 return -TARGET_EFAULT
;
2735 return -TARGET_EINVAL
;
2737 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2739 return -TARGET_EFAULT
;
2742 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2744 if (put_user_u32(lv
, optlen
)) {
2745 ret
= -TARGET_EFAULT
;
2747 unlock_user(name
, optval_addr
, lv
);
2750 case TARGET_SO_LINGER
:
2754 struct target_linger
*tlg
;
2756 if (get_user_u32(len
, optlen
)) {
2757 return -TARGET_EFAULT
;
2760 return -TARGET_EINVAL
;
2764 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2772 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2773 return -TARGET_EFAULT
;
2775 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2776 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2777 unlock_user_struct(tlg
, optval_addr
, 1);
2778 if (put_user_u32(len
, optlen
)) {
2779 return -TARGET_EFAULT
;
2783 /* Options with 'int' argument. */
2784 case TARGET_SO_DEBUG
:
2787 case TARGET_SO_REUSEADDR
:
2788 optname
= SO_REUSEADDR
;
2791 case TARGET_SO_REUSEPORT
:
2792 optname
= SO_REUSEPORT
;
2795 case TARGET_SO_TYPE
:
2798 case TARGET_SO_ERROR
:
2801 case TARGET_SO_DONTROUTE
:
2802 optname
= SO_DONTROUTE
;
2804 case TARGET_SO_BROADCAST
:
2805 optname
= SO_BROADCAST
;
2807 case TARGET_SO_SNDBUF
:
2808 optname
= SO_SNDBUF
;
2810 case TARGET_SO_RCVBUF
:
2811 optname
= SO_RCVBUF
;
2813 case TARGET_SO_KEEPALIVE
:
2814 optname
= SO_KEEPALIVE
;
2816 case TARGET_SO_OOBINLINE
:
2817 optname
= SO_OOBINLINE
;
2819 case TARGET_SO_NO_CHECK
:
2820 optname
= SO_NO_CHECK
;
2822 case TARGET_SO_PRIORITY
:
2823 optname
= SO_PRIORITY
;
2826 case TARGET_SO_BSDCOMPAT
:
2827 optname
= SO_BSDCOMPAT
;
2830 case TARGET_SO_PASSCRED
:
2831 optname
= SO_PASSCRED
;
2833 case TARGET_SO_TIMESTAMP
:
2834 optname
= SO_TIMESTAMP
;
2836 case TARGET_SO_RCVLOWAT
:
2837 optname
= SO_RCVLOWAT
;
2839 case TARGET_SO_ACCEPTCONN
:
2840 optname
= SO_ACCEPTCONN
;
2842 case TARGET_SO_PROTOCOL
:
2843 optname
= SO_PROTOCOL
;
2845 case TARGET_SO_DOMAIN
:
2846 optname
= SO_DOMAIN
;
2854 /* TCP and UDP options all take an 'int' value. */
2856 if (get_user_u32(len
, optlen
))
2857 return -TARGET_EFAULT
;
2859 return -TARGET_EINVAL
;
2861 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2864 if (optname
== SO_TYPE
) {
2865 val
= host_to_target_sock_type(val
);
2870 if (put_user_u32(val
, optval_addr
))
2871 return -TARGET_EFAULT
;
2873 if (put_user_u8(val
, optval_addr
))
2874 return -TARGET_EFAULT
;
2876 if (put_user_u32(len
, optlen
))
2877 return -TARGET_EFAULT
;
2884 case IP_ROUTER_ALERT
:
2888 case IP_MTU_DISCOVER
:
2894 case IP_MULTICAST_TTL
:
2895 case IP_MULTICAST_LOOP
:
2896 if (get_user_u32(len
, optlen
))
2897 return -TARGET_EFAULT
;
2899 return -TARGET_EINVAL
;
2901 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2904 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2906 if (put_user_u32(len
, optlen
)
2907 || put_user_u8(val
, optval_addr
))
2908 return -TARGET_EFAULT
;
2910 if (len
> sizeof(int))
2912 if (put_user_u32(len
, optlen
)
2913 || put_user_u32(val
, optval_addr
))
2914 return -TARGET_EFAULT
;
2918 ret
= -TARGET_ENOPROTOOPT
;
2924 case IPV6_MTU_DISCOVER
:
2927 case IPV6_RECVPKTINFO
:
2928 case IPV6_UNICAST_HOPS
:
2929 case IPV6_MULTICAST_HOPS
:
2930 case IPV6_MULTICAST_LOOP
:
2932 case IPV6_RECVHOPLIMIT
:
2933 case IPV6_2292HOPLIMIT
:
2936 case IPV6_2292PKTINFO
:
2937 case IPV6_RECVTCLASS
:
2938 case IPV6_RECVRTHDR
:
2939 case IPV6_2292RTHDR
:
2940 case IPV6_RECVHOPOPTS
:
2941 case IPV6_2292HOPOPTS
:
2942 case IPV6_RECVDSTOPTS
:
2943 case IPV6_2292DSTOPTS
:
2945 case IPV6_ADDR_PREFERENCES
:
2946 #ifdef IPV6_RECVPATHMTU
2947 case IPV6_RECVPATHMTU
:
2949 #ifdef IPV6_TRANSPARENT
2950 case IPV6_TRANSPARENT
:
2952 #ifdef IPV6_FREEBIND
2955 #ifdef IPV6_RECVORIGDSTADDR
2956 case IPV6_RECVORIGDSTADDR
:
2958 if (get_user_u32(len
, optlen
))
2959 return -TARGET_EFAULT
;
2961 return -TARGET_EINVAL
;
2963 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2966 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2968 if (put_user_u32(len
, optlen
)
2969 || put_user_u8(val
, optval_addr
))
2970 return -TARGET_EFAULT
;
2972 if (len
> sizeof(int))
2974 if (put_user_u32(len
, optlen
)
2975 || put_user_u32(val
, optval_addr
))
2976 return -TARGET_EFAULT
;
2980 ret
= -TARGET_ENOPROTOOPT
;
2987 case NETLINK_PKTINFO
:
2988 case NETLINK_BROADCAST_ERROR
:
2989 case NETLINK_NO_ENOBUFS
:
2990 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2991 case NETLINK_LISTEN_ALL_NSID
:
2992 case NETLINK_CAP_ACK
:
2993 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2994 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2995 case NETLINK_EXT_ACK
:
2996 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2997 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2998 case NETLINK_GET_STRICT_CHK
:
2999 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
3000 if (get_user_u32(len
, optlen
)) {
3001 return -TARGET_EFAULT
;
3003 if (len
!= sizeof(val
)) {
3004 return -TARGET_EINVAL
;
3007 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3011 if (put_user_u32(lv
, optlen
)
3012 || put_user_u32(val
, optval_addr
)) {
3013 return -TARGET_EFAULT
;
3016 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
3017 case NETLINK_LIST_MEMBERSHIPS
:
3021 if (get_user_u32(len
, optlen
)) {
3022 return -TARGET_EFAULT
;
3025 return -TARGET_EINVAL
;
3027 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
3028 if (!results
&& len
> 0) {
3029 return -TARGET_EFAULT
;
3032 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
3034 unlock_user(results
, optval_addr
, 0);
3037 /* swap host endianess to target endianess. */
3038 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
3039 results
[i
] = tswap32(results
[i
]);
3041 if (put_user_u32(lv
, optlen
)) {
3042 return -TARGET_EFAULT
;
3044 unlock_user(results
, optval_addr
, 0);
3047 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3052 #endif /* SOL_NETLINK */
3055 qemu_log_mask(LOG_UNIMP
,
3056 "getsockopt level=%d optname=%d not yet supported\n",
3058 ret
= -TARGET_EOPNOTSUPP
;
3064 /* Convert target low/high pair representing file offset into the host
3065 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3066 * as the kernel doesn't handle them either.
3068 static void target_to_host_low_high(abi_ulong tlow
,
3070 unsigned long *hlow
,
3071 unsigned long *hhigh
)
3073 uint64_t off
= tlow
|
3074 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
3075 TARGET_LONG_BITS
/ 2;
3078 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3081 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3082 abi_ulong count
, int copy
)
3084 struct target_iovec
*target_vec
;
3086 abi_ulong total_len
, max_len
;
3089 bool bad_address
= false;
3095 if (count
> IOV_MAX
) {
3100 vec
= g_try_new0(struct iovec
, count
);
3106 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3107 count
* sizeof(struct target_iovec
), 1);
3108 if (target_vec
== NULL
) {
3113 /* ??? If host page size > target page size, this will result in a
3114 value larger than what we can actually support. */
3115 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3118 for (i
= 0; i
< count
; i
++) {
3119 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3120 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3125 } else if (len
== 0) {
3126 /* Zero length pointer is ignored. */
3127 vec
[i
].iov_base
= 0;
3129 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3130 /* If the first buffer pointer is bad, this is a fault. But
3131 * subsequent bad buffers will result in a partial write; this
3132 * is realized by filling the vector with null pointers and
3134 if (!vec
[i
].iov_base
) {
3145 if (len
> max_len
- total_len
) {
3146 len
= max_len
- total_len
;
3149 vec
[i
].iov_len
= len
;
3153 unlock_user(target_vec
, target_addr
, 0);
3158 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3159 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3162 unlock_user(target_vec
, target_addr
, 0);
3169 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3170 abi_ulong count
, int copy
)
3172 struct target_iovec
*target_vec
;
3175 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3176 count
* sizeof(struct target_iovec
), 1);
3178 for (i
= 0; i
< count
; i
++) {
3179 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3180 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3184 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3186 unlock_user(target_vec
, target_addr
, 0);
3192 static inline int target_to_host_sock_type(int *type
)
3195 int target_type
= *type
;
3197 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3198 case TARGET_SOCK_DGRAM
:
3199 host_type
= SOCK_DGRAM
;
3201 case TARGET_SOCK_STREAM
:
3202 host_type
= SOCK_STREAM
;
3205 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3208 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3209 #if defined(SOCK_CLOEXEC)
3210 host_type
|= SOCK_CLOEXEC
;
3212 return -TARGET_EINVAL
;
3215 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3216 #if defined(SOCK_NONBLOCK)
3217 host_type
|= SOCK_NONBLOCK
;
3218 #elif !defined(O_NONBLOCK)
3219 return -TARGET_EINVAL
;
3226 /* Try to emulate socket type flags after socket creation. */
3227 static int sock_flags_fixup(int fd
, int target_type
)
3229 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3230 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3231 int flags
= fcntl(fd
, F_GETFL
);
3232 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3234 return -TARGET_EINVAL
;
3241 /* do_socket() Must return target values and target errnos. */
3242 static abi_long
do_socket(int domain
, int type
, int protocol
)
3244 int target_type
= type
;
3247 ret
= target_to_host_sock_type(&type
);
3252 if (domain
== PF_NETLINK
&& !(
3253 #ifdef CONFIG_RTNETLINK
3254 protocol
== NETLINK_ROUTE
||
3256 protocol
== NETLINK_KOBJECT_UEVENT
||
3257 protocol
== NETLINK_AUDIT
)) {
3258 return -TARGET_EPROTONOSUPPORT
;
3261 if (domain
== AF_PACKET
||
3262 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3263 protocol
= tswap16(protocol
);
3266 ret
= get_errno(socket(domain
, type
, protocol
));
3268 ret
= sock_flags_fixup(ret
, target_type
);
3269 if (type
== SOCK_PACKET
) {
3270 /* Manage an obsolete case :
3271 * if socket type is SOCK_PACKET, bind by name
3273 fd_trans_register(ret
, &target_packet_trans
);
3274 } else if (domain
== PF_NETLINK
) {
3276 #ifdef CONFIG_RTNETLINK
3278 fd_trans_register(ret
, &target_netlink_route_trans
);
3281 case NETLINK_KOBJECT_UEVENT
:
3282 /* nothing to do: messages are strings */
3285 fd_trans_register(ret
, &target_netlink_audit_trans
);
3288 g_assert_not_reached();
3295 /* do_bind() Must return target values and target errnos. */
3296 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3302 if ((int)addrlen
< 0) {
3303 return -TARGET_EINVAL
;
3306 addr
= alloca(addrlen
+1);
3308 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3312 return get_errno(bind(sockfd
, addr
, addrlen
));
3315 /* do_connect() Must return target values and target errnos. */
3316 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3322 if ((int)addrlen
< 0) {
3323 return -TARGET_EINVAL
;
3326 addr
= alloca(addrlen
+1);
3328 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3332 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3335 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3336 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3337 int flags
, int send
)
3343 abi_ulong target_vec
;
3345 if (msgp
->msg_name
) {
3346 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3347 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3348 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3349 tswapal(msgp
->msg_name
),
3351 if (ret
== -TARGET_EFAULT
) {
3352 /* For connected sockets msg_name and msg_namelen must
3353 * be ignored, so returning EFAULT immediately is wrong.
3354 * Instead, pass a bad msg_name to the host kernel, and
3355 * let it decide whether to return EFAULT or not.
3357 msg
.msg_name
= (void *)-1;
3362 msg
.msg_name
= NULL
;
3363 msg
.msg_namelen
= 0;
3365 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3366 msg
.msg_control
= alloca(msg
.msg_controllen
);
3367 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3369 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3371 count
= tswapal(msgp
->msg_iovlen
);
3372 target_vec
= tswapal(msgp
->msg_iov
);
3374 if (count
> IOV_MAX
) {
3375 /* sendrcvmsg returns a different errno for this condition than
3376 * readv/writev, so we must catch it here before lock_iovec() does.
3378 ret
= -TARGET_EMSGSIZE
;
3382 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3383 target_vec
, count
, send
);
3385 ret
= -host_to_target_errno(errno
);
3388 msg
.msg_iovlen
= count
;
3392 if (fd_trans_target_to_host_data(fd
)) {
3395 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3396 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3397 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3398 msg
.msg_iov
->iov_len
);
3400 msg
.msg_iov
->iov_base
= host_msg
;
3401 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3405 ret
= target_to_host_cmsg(&msg
, msgp
);
3407 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3411 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3412 if (!is_error(ret
)) {
3414 if (fd_trans_host_to_target_data(fd
)) {
3415 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3416 MIN(msg
.msg_iov
->iov_len
, len
));
3418 ret
= host_to_target_cmsg(msgp
, &msg
);
3420 if (!is_error(ret
)) {
3421 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3422 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3423 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3424 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3425 msg
.msg_name
, msg
.msg_namelen
);
3437 unlock_iovec(vec
, target_vec
, count
, !send
);
3442 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3443 int flags
, int send
)
3446 struct target_msghdr
*msgp
;
3448 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3452 return -TARGET_EFAULT
;
3454 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3455 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3459 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3460 * so it might not have this *mmsg-specific flag either.
3462 #ifndef MSG_WAITFORONE
3463 #define MSG_WAITFORONE 0x10000
3466 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3467 unsigned int vlen
, unsigned int flags
,
3470 struct target_mmsghdr
*mmsgp
;
3474 if (vlen
> UIO_MAXIOV
) {
3478 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3480 return -TARGET_EFAULT
;
3483 for (i
= 0; i
< vlen
; i
++) {
3484 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3485 if (is_error(ret
)) {
3488 mmsgp
[i
].msg_len
= tswap32(ret
);
3489 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3490 if (flags
& MSG_WAITFORONE
) {
3491 flags
|= MSG_DONTWAIT
;
3495 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3497 /* Return number of datagrams sent if we sent any at all;
3498 * otherwise return the error.
3506 /* do_accept4() Must return target values and target errnos. */
3507 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3508 abi_ulong target_addrlen_addr
, int flags
)
3510 socklen_t addrlen
, ret_addrlen
;
3515 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3517 if (target_addr
== 0) {
3518 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3521 /* linux returns EFAULT if addrlen pointer is invalid */
3522 if (get_user_u32(addrlen
, target_addrlen_addr
))
3523 return -TARGET_EFAULT
;
3525 if ((int)addrlen
< 0) {
3526 return -TARGET_EINVAL
;
3529 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3530 return -TARGET_EFAULT
;
3533 addr
= alloca(addrlen
);
3535 ret_addrlen
= addrlen
;
3536 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3537 if (!is_error(ret
)) {
3538 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3539 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3540 ret
= -TARGET_EFAULT
;
3546 /* do_getpeername() Must return target values and target errnos. */
3547 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3548 abi_ulong target_addrlen_addr
)
3550 socklen_t addrlen
, ret_addrlen
;
3554 if (get_user_u32(addrlen
, target_addrlen_addr
))
3555 return -TARGET_EFAULT
;
3557 if ((int)addrlen
< 0) {
3558 return -TARGET_EINVAL
;
3561 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3562 return -TARGET_EFAULT
;
3565 addr
= alloca(addrlen
);
3567 ret_addrlen
= addrlen
;
3568 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3569 if (!is_error(ret
)) {
3570 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3571 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3572 ret
= -TARGET_EFAULT
;
3578 /* do_getsockname() Must return target values and target errnos. */
3579 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3580 abi_ulong target_addrlen_addr
)
3582 socklen_t addrlen
, ret_addrlen
;
3586 if (get_user_u32(addrlen
, target_addrlen_addr
))
3587 return -TARGET_EFAULT
;
3589 if ((int)addrlen
< 0) {
3590 return -TARGET_EINVAL
;
3593 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3594 return -TARGET_EFAULT
;
3597 addr
= alloca(addrlen
);
3599 ret_addrlen
= addrlen
;
3600 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3601 if (!is_error(ret
)) {
3602 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3603 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3604 ret
= -TARGET_EFAULT
;
3610 /* do_socketpair() Must return target values and target errnos. */
3611 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3612 abi_ulong target_tab_addr
)
3617 target_to_host_sock_type(&type
);
3619 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3620 if (!is_error(ret
)) {
3621 if (put_user_s32(tab
[0], target_tab_addr
)
3622 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3623 ret
= -TARGET_EFAULT
;
3628 /* do_sendto() Must return target values and target errnos. */
3629 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3630 abi_ulong target_addr
, socklen_t addrlen
)
3634 void *copy_msg
= NULL
;
3637 if ((int)addrlen
< 0) {
3638 return -TARGET_EINVAL
;
3641 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3643 return -TARGET_EFAULT
;
3644 if (fd_trans_target_to_host_data(fd
)) {
3645 copy_msg
= host_msg
;
3646 host_msg
= g_malloc(len
);
3647 memcpy(host_msg
, copy_msg
, len
);
3648 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3654 addr
= alloca(addrlen
+1);
3655 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3659 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3661 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3666 host_msg
= copy_msg
;
3668 unlock_user(host_msg
, msg
, 0);
3672 /* do_recvfrom() Must return target values and target errnos. */
3673 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3674 abi_ulong target_addr
,
3675 abi_ulong target_addrlen
)
3677 socklen_t addrlen
, ret_addrlen
;
3685 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3687 return -TARGET_EFAULT
;
3691 if (get_user_u32(addrlen
, target_addrlen
)) {
3692 ret
= -TARGET_EFAULT
;
3695 if ((int)addrlen
< 0) {
3696 ret
= -TARGET_EINVAL
;
3699 addr
= alloca(addrlen
);
3700 ret_addrlen
= addrlen
;
3701 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3702 addr
, &ret_addrlen
));
3704 addr
= NULL
; /* To keep compiler quiet. */
3705 addrlen
= 0; /* To keep compiler quiet. */
3706 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3708 if (!is_error(ret
)) {
3709 if (fd_trans_host_to_target_data(fd
)) {
3711 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3712 if (is_error(trans
)) {
3718 host_to_target_sockaddr(target_addr
, addr
,
3719 MIN(addrlen
, ret_addrlen
));
3720 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3721 ret
= -TARGET_EFAULT
;
3725 unlock_user(host_msg
, msg
, len
);
3728 unlock_user(host_msg
, msg
, 0);
3733 #ifdef TARGET_NR_socketcall
3734 /* do_socketcall() must return target values and target errnos. */
3735 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3737 static const unsigned nargs
[] = { /* number of arguments per operation */
3738 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3739 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3740 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3741 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3742 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3743 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3744 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3745 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3746 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3747 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3748 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3749 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3750 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3751 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3752 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3753 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3754 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3755 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3756 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3757 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3759 abi_long a
[6]; /* max 6 args */
3762 /* check the range of the first argument num */
3763 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3764 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3765 return -TARGET_EINVAL
;
3767 /* ensure we have space for args */
3768 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3769 return -TARGET_EINVAL
;
3771 /* collect the arguments in a[] according to nargs[] */
3772 for (i
= 0; i
< nargs
[num
]; ++i
) {
3773 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3774 return -TARGET_EFAULT
;
3777 /* now when we have the args, invoke the appropriate underlying function */
3779 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3780 return do_socket(a
[0], a
[1], a
[2]);
3781 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3782 return do_bind(a
[0], a
[1], a
[2]);
3783 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3784 return do_connect(a
[0], a
[1], a
[2]);
3785 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3786 return get_errno(listen(a
[0], a
[1]));
3787 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3788 return do_accept4(a
[0], a
[1], a
[2], 0);
3789 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3790 return do_getsockname(a
[0], a
[1], a
[2]);
3791 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3792 return do_getpeername(a
[0], a
[1], a
[2]);
3793 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3794 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3795 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3796 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3797 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3798 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3799 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3800 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3801 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3802 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3803 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3804 return get_errno(shutdown(a
[0], a
[1]));
3805 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3806 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3807 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3808 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3809 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3810 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3811 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3812 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3813 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3814 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3815 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3816 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3817 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3818 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3820 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3821 return -TARGET_EINVAL
;
3826 #define N_SHM_REGIONS 32
3828 static struct shm_region
{
3832 } shm_regions
[N_SHM_REGIONS
];
3834 #ifndef TARGET_SEMID64_DS
3835 /* asm-generic version of this struct */
3836 struct target_semid64_ds
3838 struct target_ipc_perm sem_perm
;
3839 abi_ulong sem_otime
;
3840 #if TARGET_ABI_BITS == 32
3841 abi_ulong __unused1
;
3843 abi_ulong sem_ctime
;
3844 #if TARGET_ABI_BITS == 32
3845 abi_ulong __unused2
;
3847 abi_ulong sem_nsems
;
3848 abi_ulong __unused3
;
3849 abi_ulong __unused4
;
3853 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3854 abi_ulong target_addr
)
3856 struct target_ipc_perm
*target_ip
;
3857 struct target_semid64_ds
*target_sd
;
3859 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3860 return -TARGET_EFAULT
;
3861 target_ip
= &(target_sd
->sem_perm
);
3862 host_ip
->__key
= tswap32(target_ip
->__key
);
3863 host_ip
->uid
= tswap32(target_ip
->uid
);
3864 host_ip
->gid
= tswap32(target_ip
->gid
);
3865 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3866 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3867 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3868 host_ip
->mode
= tswap32(target_ip
->mode
);
3870 host_ip
->mode
= tswap16(target_ip
->mode
);
3872 #if defined(TARGET_PPC)
3873 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3875 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3877 unlock_user_struct(target_sd
, target_addr
, 0);
3881 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3882 struct ipc_perm
*host_ip
)
3884 struct target_ipc_perm
*target_ip
;
3885 struct target_semid64_ds
*target_sd
;
3887 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3888 return -TARGET_EFAULT
;
3889 target_ip
= &(target_sd
->sem_perm
);
3890 target_ip
->__key
= tswap32(host_ip
->__key
);
3891 target_ip
->uid
= tswap32(host_ip
->uid
);
3892 target_ip
->gid
= tswap32(host_ip
->gid
);
3893 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3894 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3895 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3896 target_ip
->mode
= tswap32(host_ip
->mode
);
3898 target_ip
->mode
= tswap16(host_ip
->mode
);
3900 #if defined(TARGET_PPC)
3901 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3903 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3905 unlock_user_struct(target_sd
, target_addr
, 1);
3909 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3910 abi_ulong target_addr
)
3912 struct target_semid64_ds
*target_sd
;
3914 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3915 return -TARGET_EFAULT
;
3916 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3917 return -TARGET_EFAULT
;
3918 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3919 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3920 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3921 unlock_user_struct(target_sd
, target_addr
, 0);
3925 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3926 struct semid_ds
*host_sd
)
3928 struct target_semid64_ds
*target_sd
;
3930 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3931 return -TARGET_EFAULT
;
3932 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3933 return -TARGET_EFAULT
;
3934 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3935 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3936 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3937 unlock_user_struct(target_sd
, target_addr
, 1);
3941 struct target_seminfo
{
3954 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3955 struct seminfo
*host_seminfo
)
3957 struct target_seminfo
*target_seminfo
;
3958 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3959 return -TARGET_EFAULT
;
3960 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3961 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3962 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3963 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3964 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3965 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3966 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3967 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3968 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3969 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3970 unlock_user_struct(target_seminfo
, target_addr
, 1);
3976 struct semid_ds
*buf
;
3977 unsigned short *array
;
3978 struct seminfo
*__buf
;
3981 union target_semun
{
3988 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3989 abi_ulong target_addr
)
3992 unsigned short *array
;
3994 struct semid_ds semid_ds
;
3997 semun
.buf
= &semid_ds
;
3999 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4001 return get_errno(ret
);
4003 nsems
= semid_ds
.sem_nsems
;
4005 *host_array
= g_try_new(unsigned short, nsems
);
4007 return -TARGET_ENOMEM
;
4009 array
= lock_user(VERIFY_READ
, target_addr
,
4010 nsems
*sizeof(unsigned short), 1);
4012 g_free(*host_array
);
4013 return -TARGET_EFAULT
;
4016 for(i
=0; i
<nsems
; i
++) {
4017 __get_user((*host_array
)[i
], &array
[i
]);
4019 unlock_user(array
, target_addr
, 0);
4024 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4025 unsigned short **host_array
)
4028 unsigned short *array
;
4030 struct semid_ds semid_ds
;
4033 semun
.buf
= &semid_ds
;
4035 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4037 return get_errno(ret
);
4039 nsems
= semid_ds
.sem_nsems
;
4041 array
= lock_user(VERIFY_WRITE
, target_addr
,
4042 nsems
*sizeof(unsigned short), 0);
4044 return -TARGET_EFAULT
;
4046 for(i
=0; i
<nsems
; i
++) {
4047 __put_user((*host_array
)[i
], &array
[i
]);
4049 g_free(*host_array
);
4050 unlock_user(array
, target_addr
, 1);
4055 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4056 abi_ulong target_arg
)
4058 union target_semun target_su
= { .buf
= target_arg
};
4060 struct semid_ds dsarg
;
4061 unsigned short *array
= NULL
;
4062 struct seminfo seminfo
;
4063 abi_long ret
= -TARGET_EINVAL
;
4070 /* In 64 bit cross-endian situations, we will erroneously pick up
4071 * the wrong half of the union for the "val" element. To rectify
4072 * this, the entire 8-byte structure is byteswapped, followed by
4073 * a swap of the 4 byte val field. In other cases, the data is
4074 * already in proper host byte order. */
4075 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4076 target_su
.buf
= tswapal(target_su
.buf
);
4077 arg
.val
= tswap32(target_su
.val
);
4079 arg
.val
= target_su
.val
;
4081 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4085 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4089 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4090 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4097 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4101 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4102 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4108 arg
.__buf
= &seminfo
;
4109 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4110 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4118 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4125 struct target_sembuf
{
4126 unsigned short sem_num
;
4131 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4132 abi_ulong target_addr
,
4135 struct target_sembuf
*target_sembuf
;
4138 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4139 nsops
*sizeof(struct target_sembuf
), 1);
4141 return -TARGET_EFAULT
;
4143 for(i
=0; i
<nsops
; i
++) {
4144 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4145 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4146 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4149 unlock_user(target_sembuf
, target_addr
, 0);
4154 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4155 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4158 * This macro is required to handle the s390 variants, which passes the
4159 * arguments in a different order than default.
4162 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4163 (__nsops), (__timeout), (__sops)
4165 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4166 (__nsops), 0, (__sops), (__timeout)
4169 static inline abi_long
do_semtimedop(int semid
,
4172 abi_long timeout
, bool time64
)
4174 struct sembuf
*sops
;
4175 struct timespec ts
, *pts
= NULL
;
4181 if (target_to_host_timespec64(pts
, timeout
)) {
4182 return -TARGET_EFAULT
;
4185 if (target_to_host_timespec(pts
, timeout
)) {
4186 return -TARGET_EFAULT
;
4191 if (nsops
> TARGET_SEMOPM
) {
4192 return -TARGET_E2BIG
;
4195 sops
= g_new(struct sembuf
, nsops
);
4197 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4199 return -TARGET_EFAULT
;
4202 ret
= -TARGET_ENOSYS
;
4203 #ifdef __NR_semtimedop
4204 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4207 if (ret
== -TARGET_ENOSYS
) {
4208 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4209 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4217 struct target_msqid_ds
4219 struct target_ipc_perm msg_perm
;
4220 abi_ulong msg_stime
;
4221 #if TARGET_ABI_BITS == 32
4222 abi_ulong __unused1
;
4224 abi_ulong msg_rtime
;
4225 #if TARGET_ABI_BITS == 32
4226 abi_ulong __unused2
;
4228 abi_ulong msg_ctime
;
4229 #if TARGET_ABI_BITS == 32
4230 abi_ulong __unused3
;
4232 abi_ulong __msg_cbytes
;
4234 abi_ulong msg_qbytes
;
4235 abi_ulong msg_lspid
;
4236 abi_ulong msg_lrpid
;
4237 abi_ulong __unused4
;
4238 abi_ulong __unused5
;
4241 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4242 abi_ulong target_addr
)
4244 struct target_msqid_ds
*target_md
;
4246 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4247 return -TARGET_EFAULT
;
4248 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4249 return -TARGET_EFAULT
;
4250 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4251 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4252 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4253 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4254 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4255 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4256 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4257 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4258 unlock_user_struct(target_md
, target_addr
, 0);
4262 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4263 struct msqid_ds
*host_md
)
4265 struct target_msqid_ds
*target_md
;
4267 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4268 return -TARGET_EFAULT
;
4269 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4270 return -TARGET_EFAULT
;
4271 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4272 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4273 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4274 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4275 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4276 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4277 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4278 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4279 unlock_user_struct(target_md
, target_addr
, 1);
4283 struct target_msginfo
{
4291 unsigned short int msgseg
;
4294 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4295 struct msginfo
*host_msginfo
)
4297 struct target_msginfo
*target_msginfo
;
4298 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4299 return -TARGET_EFAULT
;
4300 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4301 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4302 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4303 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4304 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4305 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4306 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4307 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4308 unlock_user_struct(target_msginfo
, target_addr
, 1);
4312 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4314 struct msqid_ds dsarg
;
4315 struct msginfo msginfo
;
4316 abi_long ret
= -TARGET_EINVAL
;
4324 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4325 return -TARGET_EFAULT
;
4326 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4327 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4328 return -TARGET_EFAULT
;
4331 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4335 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4336 if (host_to_target_msginfo(ptr
, &msginfo
))
4337 return -TARGET_EFAULT
;
4344 struct target_msgbuf
{
4349 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4350 ssize_t msgsz
, int msgflg
)
4352 struct target_msgbuf
*target_mb
;
4353 struct msgbuf
*host_mb
;
4357 return -TARGET_EINVAL
;
4360 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4361 return -TARGET_EFAULT
;
4362 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4364 unlock_user_struct(target_mb
, msgp
, 0);
4365 return -TARGET_ENOMEM
;
4367 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4368 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4369 ret
= -TARGET_ENOSYS
;
4371 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4374 if (ret
== -TARGET_ENOSYS
) {
4376 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4379 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4385 unlock_user_struct(target_mb
, msgp
, 0);
4391 #if defined(__sparc__)
4392 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4393 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4394 #elif defined(__s390x__)
4395 /* The s390 sys_ipc variant has only five parameters. */
4396 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4397 ((long int[]){(long int)__msgp, __msgtyp})
4399 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4400 ((long int[]){(long int)__msgp, __msgtyp}), 0
4404 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4405 ssize_t msgsz
, abi_long msgtyp
,
4408 struct target_msgbuf
*target_mb
;
4410 struct msgbuf
*host_mb
;
4414 return -TARGET_EINVAL
;
4417 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4418 return -TARGET_EFAULT
;
4420 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4422 ret
= -TARGET_ENOMEM
;
4425 ret
= -TARGET_ENOSYS
;
4427 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4430 if (ret
== -TARGET_ENOSYS
) {
4431 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4432 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4437 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4438 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4439 if (!target_mtext
) {
4440 ret
= -TARGET_EFAULT
;
4443 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4444 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4447 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4451 unlock_user_struct(target_mb
, msgp
, 1);
4456 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4457 abi_ulong target_addr
)
4459 struct target_shmid_ds
*target_sd
;
4461 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4462 return -TARGET_EFAULT
;
4463 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4464 return -TARGET_EFAULT
;
4465 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4466 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4467 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4468 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4469 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4470 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4471 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4472 unlock_user_struct(target_sd
, target_addr
, 0);
4476 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4477 struct shmid_ds
*host_sd
)
4479 struct target_shmid_ds
*target_sd
;
4481 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4482 return -TARGET_EFAULT
;
4483 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4484 return -TARGET_EFAULT
;
4485 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4486 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4487 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4488 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4489 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4490 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4491 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4492 unlock_user_struct(target_sd
, target_addr
, 1);
4496 struct target_shminfo
{
4504 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4505 struct shminfo
*host_shminfo
)
4507 struct target_shminfo
*target_shminfo
;
4508 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4509 return -TARGET_EFAULT
;
4510 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4511 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4512 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4513 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4514 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4515 unlock_user_struct(target_shminfo
, target_addr
, 1);
4519 struct target_shm_info
{
4524 abi_ulong swap_attempts
;
4525 abi_ulong swap_successes
;
4528 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4529 struct shm_info
*host_shm_info
)
4531 struct target_shm_info
*target_shm_info
;
4532 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4533 return -TARGET_EFAULT
;
4534 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4535 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4536 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4537 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4538 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4539 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4540 unlock_user_struct(target_shm_info
, target_addr
, 1);
4544 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4546 struct shmid_ds dsarg
;
4547 struct shminfo shminfo
;
4548 struct shm_info shm_info
;
4549 abi_long ret
= -TARGET_EINVAL
;
4557 if (target_to_host_shmid_ds(&dsarg
, buf
))
4558 return -TARGET_EFAULT
;
4559 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4560 if (host_to_target_shmid_ds(buf
, &dsarg
))
4561 return -TARGET_EFAULT
;
4564 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4565 if (host_to_target_shminfo(buf
, &shminfo
))
4566 return -TARGET_EFAULT
;
4569 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4570 if (host_to_target_shm_info(buf
, &shm_info
))
4571 return -TARGET_EFAULT
;
4576 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4583 #ifndef TARGET_FORCE_SHMLBA
4584 /* For most architectures, SHMLBA is the same as the page size;
4585 * some architectures have larger values, in which case they should
4586 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4587 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4588 * and defining its own value for SHMLBA.
4590 * The kernel also permits SHMLBA to be set by the architecture to a
4591 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4592 * this means that addresses are rounded to the large size if
4593 * SHM_RND is set but addresses not aligned to that size are not rejected
4594 * as long as they are at least page-aligned. Since the only architecture
4595 * which uses this is ia64 this code doesn't provide for that oddity.
4597 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4599 return TARGET_PAGE_SIZE
;
4603 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4604 int shmid
, abi_ulong shmaddr
, int shmflg
)
4606 CPUState
*cpu
= env_cpu(cpu_env
);
4609 struct shmid_ds shm_info
;
4613 /* shmat pointers are always untagged */
4615 /* find out the length of the shared memory segment */
4616 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4617 if (is_error(ret
)) {
4618 /* can't get length, bail out */
4622 shmlba
= target_shmlba(cpu_env
);
4624 if (shmaddr
& (shmlba
- 1)) {
4625 if (shmflg
& SHM_RND
) {
4626 shmaddr
&= ~(shmlba
- 1);
4628 return -TARGET_EINVAL
;
4631 if (!guest_range_valid_untagged(shmaddr
, shm_info
.shm_segsz
)) {
4632 return -TARGET_EINVAL
;
4638 * We're mapping shared memory, so ensure we generate code for parallel
4639 * execution and flush old translations. This will work up to the level
4640 * supported by the host -- anything that requires EXCP_ATOMIC will not
4641 * be atomic with respect to an external process.
4643 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
4644 cpu
->tcg_cflags
|= CF_PARALLEL
;
4649 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4651 abi_ulong mmap_start
;
4653 /* In order to use the host shmat, we need to honor host SHMLBA. */
4654 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4656 if (mmap_start
== -1) {
4658 host_raddr
= (void *)-1;
4660 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4661 shmflg
| SHM_REMAP
);
4664 if (host_raddr
== (void *)-1) {
4666 return get_errno((long)host_raddr
);
4668 raddr
=h2g((unsigned long)host_raddr
);
4670 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4671 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4672 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4674 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4675 if (!shm_regions
[i
].in_use
) {
4676 shm_regions
[i
].in_use
= true;
4677 shm_regions
[i
].start
= raddr
;
4678 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4688 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4693 /* shmdt pointers are always untagged */
4697 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4698 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4699 shm_regions
[i
].in_use
= false;
4700 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4704 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4711 #ifdef TARGET_NR_ipc
4712 /* ??? This only works with linear mappings. */
4713 /* do_ipc() must return target values and target errnos. */
4714 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4715 unsigned int call
, abi_long first
,
4716 abi_long second
, abi_long third
,
4717 abi_long ptr
, abi_long fifth
)
4722 version
= call
>> 16;
4727 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4729 case IPCOP_semtimedop
:
4731 * The s390 sys_ipc variant has only five parameters instead of six
4732 * (as for default variant) and the only difference is the handling of
4733 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4734 * to a struct timespec where the generic variant uses fifth parameter.
4736 #if defined(TARGET_S390X)
4737 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4739 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4744 ret
= get_errno(semget(first
, second
, third
));
4747 case IPCOP_semctl
: {
4748 /* The semun argument to semctl is passed by value, so dereference the
4751 get_user_ual(atptr
, ptr
);
4752 ret
= do_semctl(first
, second
, third
, atptr
);
4757 ret
= get_errno(msgget(first
, second
));
4761 ret
= do_msgsnd(first
, ptr
, second
, third
);
4765 ret
= do_msgctl(first
, second
, ptr
);
4772 struct target_ipc_kludge
{
4777 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4778 ret
= -TARGET_EFAULT
;
4782 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4784 unlock_user_struct(tmp
, ptr
, 0);
4788 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4797 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4798 if (is_error(raddr
))
4799 return get_errno(raddr
);
4800 if (put_user_ual(raddr
, third
))
4801 return -TARGET_EFAULT
;
4805 ret
= -TARGET_EINVAL
;
4810 ret
= do_shmdt(ptr
);
4814 /* IPC_* flag values are the same on all linux platforms */
4815 ret
= get_errno(shmget(first
, second
, third
));
4818 /* IPC_* and SHM_* command values are the same on all linux platforms */
4820 ret
= do_shmctl(first
, second
, ptr
);
4823 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4825 ret
= -TARGET_ENOSYS
;
4832 /* kernel structure types definitions */
4834 #define STRUCT(name, ...) STRUCT_ ## name,
4835 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4837 #include "syscall_types.h"
4841 #undef STRUCT_SPECIAL
4843 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4844 #define STRUCT_SPECIAL(name)
4845 #include "syscall_types.h"
4847 #undef STRUCT_SPECIAL
4849 #define MAX_STRUCT_SIZE 4096
4851 #ifdef CONFIG_FIEMAP
4852 /* So fiemap access checks don't overflow on 32 bit systems.
4853 * This is very slightly smaller than the limit imposed by
4854 * the underlying kernel.
4856 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4857 / sizeof(struct fiemap_extent))
4859 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4860 int fd
, int cmd
, abi_long arg
)
4862 /* The parameter for this ioctl is a struct fiemap followed
4863 * by an array of struct fiemap_extent whose size is set
4864 * in fiemap->fm_extent_count. The array is filled in by the
4867 int target_size_in
, target_size_out
;
4869 const argtype
*arg_type
= ie
->arg_type
;
4870 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4873 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4877 assert(arg_type
[0] == TYPE_PTR
);
4878 assert(ie
->access
== IOC_RW
);
4880 target_size_in
= thunk_type_size(arg_type
, 0);
4881 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4883 return -TARGET_EFAULT
;
4885 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4886 unlock_user(argptr
, arg
, 0);
4887 fm
= (struct fiemap
*)buf_temp
;
4888 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4889 return -TARGET_EINVAL
;
4892 outbufsz
= sizeof (*fm
) +
4893 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4895 if (outbufsz
> MAX_STRUCT_SIZE
) {
4896 /* We can't fit all the extents into the fixed size buffer.
4897 * Allocate one that is large enough and use it instead.
4899 fm
= g_try_malloc(outbufsz
);
4901 return -TARGET_ENOMEM
;
4903 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4906 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4907 if (!is_error(ret
)) {
4908 target_size_out
= target_size_in
;
4909 /* An extent_count of 0 means we were only counting the extents
4910 * so there are no structs to copy
4912 if (fm
->fm_extent_count
!= 0) {
4913 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4915 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4917 ret
= -TARGET_EFAULT
;
4919 /* Convert the struct fiemap */
4920 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4921 if (fm
->fm_extent_count
!= 0) {
4922 p
= argptr
+ target_size_in
;
4923 /* ...and then all the struct fiemap_extents */
4924 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4925 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4930 unlock_user(argptr
, arg
, target_size_out
);
4940 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4941 int fd
, int cmd
, abi_long arg
)
4943 const argtype
*arg_type
= ie
->arg_type
;
4947 struct ifconf
*host_ifconf
;
4949 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4950 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4951 int target_ifreq_size
;
4956 abi_long target_ifc_buf
;
4960 assert(arg_type
[0] == TYPE_PTR
);
4961 assert(ie
->access
== IOC_RW
);
4964 target_size
= thunk_type_size(arg_type
, 0);
4966 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4968 return -TARGET_EFAULT
;
4969 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4970 unlock_user(argptr
, arg
, 0);
4972 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4973 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4974 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4976 if (target_ifc_buf
!= 0) {
4977 target_ifc_len
= host_ifconf
->ifc_len
;
4978 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4979 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4981 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4982 if (outbufsz
> MAX_STRUCT_SIZE
) {
4984 * We can't fit all the extents into the fixed size buffer.
4985 * Allocate one that is large enough and use it instead.
4987 host_ifconf
= malloc(outbufsz
);
4989 return -TARGET_ENOMEM
;
4991 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4994 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4996 host_ifconf
->ifc_len
= host_ifc_len
;
4998 host_ifc_buf
= NULL
;
5000 host_ifconf
->ifc_buf
= host_ifc_buf
;
5002 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5003 if (!is_error(ret
)) {
5004 /* convert host ifc_len to target ifc_len */
5006 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5007 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5008 host_ifconf
->ifc_len
= target_ifc_len
;
5010 /* restore target ifc_buf */
5012 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5014 /* copy struct ifconf to target user */
5016 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5018 return -TARGET_EFAULT
;
5019 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5020 unlock_user(argptr
, arg
, target_size
);
5022 if (target_ifc_buf
!= 0) {
5023 /* copy ifreq[] to target user */
5024 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5025 for (i
= 0; i
< nb_ifreq
; i
++) {
5026 thunk_convert(argptr
+ i
* target_ifreq_size
,
5027 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5028 ifreq_arg_type
, THUNK_TARGET
);
5030 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5041 #if defined(CONFIG_USBFS)
5042 #if HOST_LONG_BITS > 64
5043 #error USBDEVFS thunks do not support >64 bit hosts yet.
5046 uint64_t target_urb_adr
;
5047 uint64_t target_buf_adr
;
5048 char *target_buf_ptr
;
5049 struct usbdevfs_urb host_urb
;
5052 static GHashTable
*usbdevfs_urb_hashtable(void)
5054 static GHashTable
*urb_hashtable
;
5056 if (!urb_hashtable
) {
5057 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
5059 return urb_hashtable
;
5062 static void urb_hashtable_insert(struct live_urb
*urb
)
5064 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5065 g_hash_table_insert(urb_hashtable
, urb
, urb
);
5068 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
5070 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5071 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
5074 static void urb_hashtable_remove(struct live_urb
*urb
)
5076 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5077 g_hash_table_remove(urb_hashtable
, urb
);
5081 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5082 int fd
, int cmd
, abi_long arg
)
5084 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
5085 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
5086 struct live_urb
*lurb
;
5090 uintptr_t target_urb_adr
;
5093 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
5095 memset(buf_temp
, 0, sizeof(uint64_t));
5096 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5097 if (is_error(ret
)) {
5101 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
5102 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
5103 if (!lurb
->target_urb_adr
) {
5104 return -TARGET_EFAULT
;
5106 urb_hashtable_remove(lurb
);
5107 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
5108 lurb
->host_urb
.buffer_length
);
5109 lurb
->target_buf_ptr
= NULL
;
5111 /* restore the guest buffer pointer */
5112 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
5114 /* update the guest urb struct */
5115 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5118 return -TARGET_EFAULT
;
5120 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5121 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5123 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5124 /* write back the urb handle */
5125 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5128 return -TARGET_EFAULT
;
5131 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5132 target_urb_adr
= lurb
->target_urb_adr
;
5133 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5134 unlock_user(argptr
, arg
, target_size
);
5141 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5142 uint8_t *buf_temp
__attribute__((unused
)),
5143 int fd
, int cmd
, abi_long arg
)
5145 struct live_urb
*lurb
;
5147 /* map target address back to host URB with metadata. */
5148 lurb
= urb_hashtable_lookup(arg
);
5150 return -TARGET_EFAULT
;
5152 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5156 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5157 int fd
, int cmd
, abi_long arg
)
5159 const argtype
*arg_type
= ie
->arg_type
;
5164 struct live_urb
*lurb
;
5167 * each submitted URB needs to map to a unique ID for the
5168 * kernel, and that unique ID needs to be a pointer to
5169 * host memory. hence, we need to malloc for each URB.
5170 * isochronous transfers have a variable length struct.
5173 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5175 /* construct host copy of urb and metadata */
5176 lurb
= g_try_malloc0(sizeof(struct live_urb
));
5178 return -TARGET_ENOMEM
;
5181 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5184 return -TARGET_EFAULT
;
5186 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5187 unlock_user(argptr
, arg
, 0);
5189 lurb
->target_urb_adr
= arg
;
5190 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5192 /* buffer space used depends on endpoint type so lock the entire buffer */
5193 /* control type urbs should check the buffer contents for true direction */
5194 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5195 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5196 lurb
->host_urb
.buffer_length
, 1);
5197 if (lurb
->target_buf_ptr
== NULL
) {
5199 return -TARGET_EFAULT
;
5202 /* update buffer pointer in host copy */
5203 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5205 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5206 if (is_error(ret
)) {
5207 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5210 urb_hashtable_insert(lurb
);
5215 #endif /* CONFIG_USBFS */
5217 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5218 int cmd
, abi_long arg
)
5221 struct dm_ioctl
*host_dm
;
5222 abi_long guest_data
;
5223 uint32_t guest_data_size
;
5225 const argtype
*arg_type
= ie
->arg_type
;
5227 void *big_buf
= NULL
;
5231 target_size
= thunk_type_size(arg_type
, 0);
5232 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5234 ret
= -TARGET_EFAULT
;
5237 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5238 unlock_user(argptr
, arg
, 0);
5240 /* buf_temp is too small, so fetch things into a bigger buffer */
5241 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5242 memcpy(big_buf
, buf_temp
, target_size
);
5246 guest_data
= arg
+ host_dm
->data_start
;
5247 if ((guest_data
- arg
) < 0) {
5248 ret
= -TARGET_EINVAL
;
5251 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5252 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5254 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5256 ret
= -TARGET_EFAULT
;
5260 switch (ie
->host_cmd
) {
5262 case DM_LIST_DEVICES
:
5265 case DM_DEV_SUSPEND
:
5268 case DM_TABLE_STATUS
:
5269 case DM_TABLE_CLEAR
:
5271 case DM_LIST_VERSIONS
:
5275 case DM_DEV_SET_GEOMETRY
:
5276 /* data contains only strings */
5277 memcpy(host_data
, argptr
, guest_data_size
);
5280 memcpy(host_data
, argptr
, guest_data_size
);
5281 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5285 void *gspec
= argptr
;
5286 void *cur_data
= host_data
;
5287 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5288 int spec_size
= thunk_type_size(arg_type
, 0);
5291 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5292 struct dm_target_spec
*spec
= cur_data
;
5296 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5297 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5299 spec
->next
= sizeof(*spec
) + slen
;
5300 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5302 cur_data
+= spec
->next
;
5307 ret
= -TARGET_EINVAL
;
5308 unlock_user(argptr
, guest_data
, 0);
5311 unlock_user(argptr
, guest_data
, 0);
5313 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5314 if (!is_error(ret
)) {
5315 guest_data
= arg
+ host_dm
->data_start
;
5316 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5317 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5318 switch (ie
->host_cmd
) {
5323 case DM_DEV_SUSPEND
:
5326 case DM_TABLE_CLEAR
:
5328 case DM_DEV_SET_GEOMETRY
:
5329 /* no return data */
5331 case DM_LIST_DEVICES
:
5333 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5334 uint32_t remaining_data
= guest_data_size
;
5335 void *cur_data
= argptr
;
5336 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5337 int nl_size
= 12; /* can't use thunk_size due to alignment */
5340 uint32_t next
= nl
->next
;
5342 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5344 if (remaining_data
< nl
->next
) {
5345 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5348 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5349 strcpy(cur_data
+ nl_size
, nl
->name
);
5350 cur_data
+= nl
->next
;
5351 remaining_data
-= nl
->next
;
5355 nl
= (void*)nl
+ next
;
5360 case DM_TABLE_STATUS
:
5362 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5363 void *cur_data
= argptr
;
5364 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5365 int spec_size
= thunk_type_size(arg_type
, 0);
5368 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5369 uint32_t next
= spec
->next
;
5370 int slen
= strlen((char*)&spec
[1]) + 1;
5371 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5372 if (guest_data_size
< spec
->next
) {
5373 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5376 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5377 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5378 cur_data
= argptr
+ spec
->next
;
5379 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5385 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5386 int count
= *(uint32_t*)hdata
;
5387 uint64_t *hdev
= hdata
+ 8;
5388 uint64_t *gdev
= argptr
+ 8;
5391 *(uint32_t*)argptr
= tswap32(count
);
5392 for (i
= 0; i
< count
; i
++) {
5393 *gdev
= tswap64(*hdev
);
5399 case DM_LIST_VERSIONS
:
5401 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5402 uint32_t remaining_data
= guest_data_size
;
5403 void *cur_data
= argptr
;
5404 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5405 int vers_size
= thunk_type_size(arg_type
, 0);
5408 uint32_t next
= vers
->next
;
5410 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5412 if (remaining_data
< vers
->next
) {
5413 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5416 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5417 strcpy(cur_data
+ vers_size
, vers
->name
);
5418 cur_data
+= vers
->next
;
5419 remaining_data
-= vers
->next
;
5423 vers
= (void*)vers
+ next
;
5428 unlock_user(argptr
, guest_data
, 0);
5429 ret
= -TARGET_EINVAL
;
5432 unlock_user(argptr
, guest_data
, guest_data_size
);
5434 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5436 ret
= -TARGET_EFAULT
;
5439 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5440 unlock_user(argptr
, arg
, target_size
);
5447 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5448 int cmd
, abi_long arg
)
5452 const argtype
*arg_type
= ie
->arg_type
;
5453 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5456 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5457 struct blkpg_partition host_part
;
5459 /* Read and convert blkpg */
5461 target_size
= thunk_type_size(arg_type
, 0);
5462 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5464 ret
= -TARGET_EFAULT
;
5467 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5468 unlock_user(argptr
, arg
, 0);
5470 switch (host_blkpg
->op
) {
5471 case BLKPG_ADD_PARTITION
:
5472 case BLKPG_DEL_PARTITION
:
5473 /* payload is struct blkpg_partition */
5476 /* Unknown opcode */
5477 ret
= -TARGET_EINVAL
;
5481 /* Read and convert blkpg->data */
5482 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5483 target_size
= thunk_type_size(part_arg_type
, 0);
5484 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5486 ret
= -TARGET_EFAULT
;
5489 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5490 unlock_user(argptr
, arg
, 0);
5492 /* Swizzle the data pointer to our local copy and call! */
5493 host_blkpg
->data
= &host_part
;
5494 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5500 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5501 int fd
, int cmd
, abi_long arg
)
5503 const argtype
*arg_type
= ie
->arg_type
;
5504 const StructEntry
*se
;
5505 const argtype
*field_types
;
5506 const int *dst_offsets
, *src_offsets
;
5509 abi_ulong
*target_rt_dev_ptr
= NULL
;
5510 unsigned long *host_rt_dev_ptr
= NULL
;
5514 assert(ie
->access
== IOC_W
);
5515 assert(*arg_type
== TYPE_PTR
);
5517 assert(*arg_type
== TYPE_STRUCT
);
5518 target_size
= thunk_type_size(arg_type
, 0);
5519 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5521 return -TARGET_EFAULT
;
5524 assert(*arg_type
== (int)STRUCT_rtentry
);
5525 se
= struct_entries
+ *arg_type
++;
5526 assert(se
->convert
[0] == NULL
);
5527 /* convert struct here to be able to catch rt_dev string */
5528 field_types
= se
->field_types
;
5529 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5530 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5531 for (i
= 0; i
< se
->nb_fields
; i
++) {
5532 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5533 assert(*field_types
== TYPE_PTRVOID
);
5534 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5535 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5536 if (*target_rt_dev_ptr
!= 0) {
5537 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5538 tswapal(*target_rt_dev_ptr
));
5539 if (!*host_rt_dev_ptr
) {
5540 unlock_user(argptr
, arg
, 0);
5541 return -TARGET_EFAULT
;
5544 *host_rt_dev_ptr
= 0;
5549 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5550 argptr
+ src_offsets
[i
],
5551 field_types
, THUNK_HOST
);
5553 unlock_user(argptr
, arg
, 0);
5555 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5557 assert(host_rt_dev_ptr
!= NULL
);
5558 assert(target_rt_dev_ptr
!= NULL
);
5559 if (*host_rt_dev_ptr
!= 0) {
5560 unlock_user((void *)*host_rt_dev_ptr
,
5561 *target_rt_dev_ptr
, 0);
5566 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5567 int fd
, int cmd
, abi_long arg
)
5569 int sig
= target_to_host_signal(arg
);
5570 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5573 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5574 int fd
, int cmd
, abi_long arg
)
5579 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5580 if (is_error(ret
)) {
5584 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5585 if (copy_to_user_timeval(arg
, &tv
)) {
5586 return -TARGET_EFAULT
;
5589 if (copy_to_user_timeval64(arg
, &tv
)) {
5590 return -TARGET_EFAULT
;
5597 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5598 int fd
, int cmd
, abi_long arg
)
5603 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5604 if (is_error(ret
)) {
5608 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5609 if (host_to_target_timespec(arg
, &ts
)) {
5610 return -TARGET_EFAULT
;
5613 if (host_to_target_timespec64(arg
, &ts
)) {
5614 return -TARGET_EFAULT
;
5622 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5623 int fd
, int cmd
, abi_long arg
)
5625 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5626 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5632 static void unlock_drm_version(struct drm_version
*host_ver
,
5633 struct target_drm_version
*target_ver
,
5636 unlock_user(host_ver
->name
, target_ver
->name
,
5637 copy
? host_ver
->name_len
: 0);
5638 unlock_user(host_ver
->date
, target_ver
->date
,
5639 copy
? host_ver
->date_len
: 0);
5640 unlock_user(host_ver
->desc
, target_ver
->desc
,
5641 copy
? host_ver
->desc_len
: 0);
5644 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5645 struct target_drm_version
*target_ver
)
5647 memset(host_ver
, 0, sizeof(*host_ver
));
5649 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5650 if (host_ver
->name_len
) {
5651 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5652 target_ver
->name_len
, 0);
5653 if (!host_ver
->name
) {
5658 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5659 if (host_ver
->date_len
) {
5660 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5661 target_ver
->date_len
, 0);
5662 if (!host_ver
->date
) {
5667 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5668 if (host_ver
->desc_len
) {
5669 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5670 target_ver
->desc_len
, 0);
5671 if (!host_ver
->desc
) {
5678 unlock_drm_version(host_ver
, target_ver
, false);
5682 static inline void host_to_target_drmversion(
5683 struct target_drm_version
*target_ver
,
5684 struct drm_version
*host_ver
)
5686 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5687 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5688 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5689 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5690 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5691 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5692 unlock_drm_version(host_ver
, target_ver
, true);
5695 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5696 int fd
, int cmd
, abi_long arg
)
5698 struct drm_version
*ver
;
5699 struct target_drm_version
*target_ver
;
5702 switch (ie
->host_cmd
) {
5703 case DRM_IOCTL_VERSION
:
5704 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5705 return -TARGET_EFAULT
;
5707 ver
= (struct drm_version
*)buf_temp
;
5708 ret
= target_to_host_drmversion(ver
, target_ver
);
5709 if (!is_error(ret
)) {
5710 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5711 if (is_error(ret
)) {
5712 unlock_drm_version(ver
, target_ver
, false);
5714 host_to_target_drmversion(target_ver
, ver
);
5717 unlock_user_struct(target_ver
, arg
, 0);
5720 return -TARGET_ENOSYS
;
5723 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5724 struct drm_i915_getparam
*gparam
,
5725 int fd
, abi_long arg
)
5729 struct target_drm_i915_getparam
*target_gparam
;
5731 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5732 return -TARGET_EFAULT
;
5735 __get_user(gparam
->param
, &target_gparam
->param
);
5736 gparam
->value
= &value
;
5737 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5738 put_user_s32(value
, target_gparam
->value
);
5740 unlock_user_struct(target_gparam
, arg
, 0);
5744 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5745 int fd
, int cmd
, abi_long arg
)
5747 switch (ie
->host_cmd
) {
5748 case DRM_IOCTL_I915_GETPARAM
:
5749 return do_ioctl_drm_i915_getparam(ie
,
5750 (struct drm_i915_getparam
*)buf_temp
,
5753 return -TARGET_ENOSYS
;
5759 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5760 int fd
, int cmd
, abi_long arg
)
5762 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5763 struct tun_filter
*target_filter
;
5766 assert(ie
->access
== IOC_W
);
5768 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5769 if (!target_filter
) {
5770 return -TARGET_EFAULT
;
5772 filter
->flags
= tswap16(target_filter
->flags
);
5773 filter
->count
= tswap16(target_filter
->count
);
5774 unlock_user(target_filter
, arg
, 0);
5776 if (filter
->count
) {
5777 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5779 return -TARGET_EFAULT
;
5782 target_addr
= lock_user(VERIFY_READ
,
5783 arg
+ offsetof(struct tun_filter
, addr
),
5784 filter
->count
* ETH_ALEN
, 1);
5786 return -TARGET_EFAULT
;
5788 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5789 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5792 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5795 IOCTLEntry ioctl_entries
[] = {
5796 #define IOCTL(cmd, access, ...) \
5797 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5798 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5799 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5800 #define IOCTL_IGNORE(cmd) \
5801 { TARGET_ ## cmd, 0, #cmd },
5806 /* ??? Implement proper locking for ioctls. */
5807 /* do_ioctl() Must return target values and target errnos. */
5808 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5810 const IOCTLEntry
*ie
;
5811 const argtype
*arg_type
;
5813 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5819 if (ie
->target_cmd
== 0) {
5821 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5822 return -TARGET_ENOSYS
;
5824 if (ie
->target_cmd
== cmd
)
5828 arg_type
= ie
->arg_type
;
5830 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5831 } else if (!ie
->host_cmd
) {
5832 /* Some architectures define BSD ioctls in their headers
5833 that are not implemented in Linux. */
5834 return -TARGET_ENOSYS
;
5837 switch(arg_type
[0]) {
5840 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5846 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5850 target_size
= thunk_type_size(arg_type
, 0);
5851 switch(ie
->access
) {
5853 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5854 if (!is_error(ret
)) {
5855 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5857 return -TARGET_EFAULT
;
5858 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5859 unlock_user(argptr
, arg
, target_size
);
5863 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5865 return -TARGET_EFAULT
;
5866 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5867 unlock_user(argptr
, arg
, 0);
5868 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5872 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5874 return -TARGET_EFAULT
;
5875 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5876 unlock_user(argptr
, arg
, 0);
5877 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5878 if (!is_error(ret
)) {
5879 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5881 return -TARGET_EFAULT
;
5882 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5883 unlock_user(argptr
, arg
, target_size
);
5889 qemu_log_mask(LOG_UNIMP
,
5890 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5891 (long)cmd
, arg_type
[0]);
5892 ret
= -TARGET_ENOSYS
;
5898 static const bitmask_transtbl iflag_tbl
[] = {
5899 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5900 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5901 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5902 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5903 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5904 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5905 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5906 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5907 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5908 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5909 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5910 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5911 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5912 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5913 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5917 static const bitmask_transtbl oflag_tbl
[] = {
5918 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5919 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5920 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5921 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5922 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5923 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5924 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5925 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5926 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5927 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5928 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5929 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5930 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5931 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5932 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5933 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5934 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5935 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5936 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5937 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5938 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5939 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5940 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5941 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5945 static const bitmask_transtbl cflag_tbl
[] = {
5946 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5947 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5948 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5949 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5950 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5951 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5952 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5953 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5954 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5955 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5956 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5957 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5958 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5959 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5960 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5961 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5962 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5963 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5964 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5965 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5966 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5967 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5968 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5969 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5970 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5971 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5972 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5973 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5974 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5975 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5976 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5980 static const bitmask_transtbl lflag_tbl
[] = {
5981 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5982 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5983 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5984 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5985 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5986 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5987 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5988 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5989 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5990 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5991 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5992 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5993 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5994 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5995 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5996 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
6000 static void target_to_host_termios (void *dst
, const void *src
)
6002 struct host_termios
*host
= dst
;
6003 const struct target_termios
*target
= src
;
6006 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
6008 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
6010 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
6012 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
6013 host
->c_line
= target
->c_line
;
6015 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
6016 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
6017 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
6018 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
6019 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
6020 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
6021 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
6022 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
6023 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
6024 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
6025 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
6026 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
6027 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
6028 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
6029 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
6030 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
6031 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
6032 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
6035 static void host_to_target_termios (void *dst
, const void *src
)
6037 struct target_termios
*target
= dst
;
6038 const struct host_termios
*host
= src
;
6041 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
6043 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
6045 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
6047 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
6048 target
->c_line
= host
->c_line
;
6050 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
6051 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
6052 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
6053 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
6054 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
6055 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
6056 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
6057 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
6058 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
6059 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
6060 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
6061 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
6062 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
6063 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
6064 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
6065 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
6066 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
6067 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
6070 static const StructEntry struct_termios_def
= {
6071 .convert
= { host_to_target_termios
, target_to_host_termios
},
6072 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
6073 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
6074 .print
= print_termios
,
6077 static const bitmask_transtbl mmap_flags_tbl
[] = {
6078 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
6079 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
6080 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
6081 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
6082 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
6083 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
6084 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
6085 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
6086 MAP_DENYWRITE
, MAP_DENYWRITE
},
6087 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
6088 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
6089 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
6090 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
6091 MAP_NORESERVE
, MAP_NORESERVE
},
6092 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
6093 /* MAP_STACK had been ignored by the kernel for quite some time.
6094 Recognize it for the target insofar as we do not want to pass
6095 it through to the host. */
6096 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6101 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6102 * TARGET_I386 is defined if TARGET_X86_64 is defined
6104 #if defined(TARGET_I386)
6106 /* NOTE: there is really one LDT for all the threads */
6107 static uint8_t *ldt_table
;
6109 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6116 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6117 if (size
> bytecount
)
6119 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6121 return -TARGET_EFAULT
;
6122 /* ??? Should this by byteswapped? */
6123 memcpy(p
, ldt_table
, size
);
6124 unlock_user(p
, ptr
, size
);
6128 /* XXX: add locking support */
6129 static abi_long
write_ldt(CPUX86State
*env
,
6130 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6132 struct target_modify_ldt_ldt_s ldt_info
;
6133 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6134 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6135 int seg_not_present
, useable
, lm
;
6136 uint32_t *lp
, entry_1
, entry_2
;
6138 if (bytecount
!= sizeof(ldt_info
))
6139 return -TARGET_EINVAL
;
6140 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6141 return -TARGET_EFAULT
;
6142 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6143 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6144 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6145 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6146 unlock_user_struct(target_ldt_info
, ptr
, 0);
6148 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6149 return -TARGET_EINVAL
;
6150 seg_32bit
= ldt_info
.flags
& 1;
6151 contents
= (ldt_info
.flags
>> 1) & 3;
6152 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6153 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6154 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6155 useable
= (ldt_info
.flags
>> 6) & 1;
6159 lm
= (ldt_info
.flags
>> 7) & 1;
6161 if (contents
== 3) {
6163 return -TARGET_EINVAL
;
6164 if (seg_not_present
== 0)
6165 return -TARGET_EINVAL
;
6167 /* allocate the LDT */
6169 env
->ldt
.base
= target_mmap(0,
6170 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6171 PROT_READ
|PROT_WRITE
,
6172 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6173 if (env
->ldt
.base
== -1)
6174 return -TARGET_ENOMEM
;
6175 memset(g2h_untagged(env
->ldt
.base
), 0,
6176 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6177 env
->ldt
.limit
= 0xffff;
6178 ldt_table
= g2h_untagged(env
->ldt
.base
);
6181 /* NOTE: same code as Linux kernel */
6182 /* Allow LDTs to be cleared by the user. */
6183 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6186 read_exec_only
== 1 &&
6188 limit_in_pages
== 0 &&
6189 seg_not_present
== 1 &&
6197 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6198 (ldt_info
.limit
& 0x0ffff);
6199 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6200 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6201 (ldt_info
.limit
& 0xf0000) |
6202 ((read_exec_only
^ 1) << 9) |
6204 ((seg_not_present
^ 1) << 15) |
6206 (limit_in_pages
<< 23) |
6210 entry_2
|= (useable
<< 20);
6212 /* Install the new entry ... */
6214 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6215 lp
[0] = tswap32(entry_1
);
6216 lp
[1] = tswap32(entry_2
);
6220 /* specific and weird i386 syscalls */
6221 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6222 unsigned long bytecount
)
6228 ret
= read_ldt(ptr
, bytecount
);
6231 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6234 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6237 ret
= -TARGET_ENOSYS
;
6243 #if defined(TARGET_ABI32)
6244 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6246 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6247 struct target_modify_ldt_ldt_s ldt_info
;
6248 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6249 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6250 int seg_not_present
, useable
, lm
;
6251 uint32_t *lp
, entry_1
, entry_2
;
6254 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6255 if (!target_ldt_info
)
6256 return -TARGET_EFAULT
;
6257 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6258 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6259 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6260 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6261 if (ldt_info
.entry_number
== -1) {
6262 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6263 if (gdt_table
[i
] == 0) {
6264 ldt_info
.entry_number
= i
;
6265 target_ldt_info
->entry_number
= tswap32(i
);
6270 unlock_user_struct(target_ldt_info
, ptr
, 1);
6272 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6273 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6274 return -TARGET_EINVAL
;
6275 seg_32bit
= ldt_info
.flags
& 1;
6276 contents
= (ldt_info
.flags
>> 1) & 3;
6277 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6278 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6279 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6280 useable
= (ldt_info
.flags
>> 6) & 1;
6284 lm
= (ldt_info
.flags
>> 7) & 1;
6287 if (contents
== 3) {
6288 if (seg_not_present
== 0)
6289 return -TARGET_EINVAL
;
6292 /* NOTE: same code as Linux kernel */
6293 /* Allow LDTs to be cleared by the user. */
6294 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6295 if ((contents
== 0 &&
6296 read_exec_only
== 1 &&
6298 limit_in_pages
== 0 &&
6299 seg_not_present
== 1 &&
6307 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6308 (ldt_info
.limit
& 0x0ffff);
6309 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6310 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6311 (ldt_info
.limit
& 0xf0000) |
6312 ((read_exec_only
^ 1) << 9) |
6314 ((seg_not_present
^ 1) << 15) |
6316 (limit_in_pages
<< 23) |
6321 /* Install the new entry ... */
6323 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6324 lp
[0] = tswap32(entry_1
);
6325 lp
[1] = tswap32(entry_2
);
6329 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6331 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6332 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6333 uint32_t base_addr
, limit
, flags
;
6334 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6335 int seg_not_present
, useable
, lm
;
6336 uint32_t *lp
, entry_1
, entry_2
;
6338 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6339 if (!target_ldt_info
)
6340 return -TARGET_EFAULT
;
6341 idx
= tswap32(target_ldt_info
->entry_number
);
6342 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6343 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6344 unlock_user_struct(target_ldt_info
, ptr
, 1);
6345 return -TARGET_EINVAL
;
6347 lp
= (uint32_t *)(gdt_table
+ idx
);
6348 entry_1
= tswap32(lp
[0]);
6349 entry_2
= tswap32(lp
[1]);
6351 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6352 contents
= (entry_2
>> 10) & 3;
6353 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6354 seg_32bit
= (entry_2
>> 22) & 1;
6355 limit_in_pages
= (entry_2
>> 23) & 1;
6356 useable
= (entry_2
>> 20) & 1;
6360 lm
= (entry_2
>> 21) & 1;
6362 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6363 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6364 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6365 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6366 base_addr
= (entry_1
>> 16) |
6367 (entry_2
& 0xff000000) |
6368 ((entry_2
& 0xff) << 16);
6369 target_ldt_info
->base_addr
= tswapal(base_addr
);
6370 target_ldt_info
->limit
= tswap32(limit
);
6371 target_ldt_info
->flags
= tswap32(flags
);
6372 unlock_user_struct(target_ldt_info
, ptr
, 1);
6376 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6378 return -TARGET_ENOSYS
;
6381 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6388 case TARGET_ARCH_SET_GS
:
6389 case TARGET_ARCH_SET_FS
:
6390 if (code
== TARGET_ARCH_SET_GS
)
6394 cpu_x86_load_seg(env
, idx
, 0);
6395 env
->segs
[idx
].base
= addr
;
6397 case TARGET_ARCH_GET_GS
:
6398 case TARGET_ARCH_GET_FS
:
6399 if (code
== TARGET_ARCH_GET_GS
)
6403 val
= env
->segs
[idx
].base
;
6404 if (put_user(val
, addr
, abi_ulong
))
6405 ret
= -TARGET_EFAULT
;
6408 ret
= -TARGET_EINVAL
;
6413 #endif /* defined(TARGET_ABI32 */
6415 #endif /* defined(TARGET_I386) */
6417 #define NEW_STACK_SIZE 0x40000
6420 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6423 pthread_mutex_t mutex
;
6424 pthread_cond_t cond
;
6427 abi_ulong child_tidptr
;
6428 abi_ulong parent_tidptr
;
6432 static void *clone_func(void *arg
)
6434 new_thread_info
*info
= arg
;
6439 rcu_register_thread();
6440 tcg_register_thread();
6444 ts
= (TaskState
*)cpu
->opaque
;
6445 info
->tid
= sys_gettid();
6447 if (info
->child_tidptr
)
6448 put_user_u32(info
->tid
, info
->child_tidptr
);
6449 if (info
->parent_tidptr
)
6450 put_user_u32(info
->tid
, info
->parent_tidptr
);
6451 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6452 /* Enable signals. */
6453 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6454 /* Signal to the parent that we're ready. */
6455 pthread_mutex_lock(&info
->mutex
);
6456 pthread_cond_broadcast(&info
->cond
);
6457 pthread_mutex_unlock(&info
->mutex
);
6458 /* Wait until the parent has finished initializing the tls state. */
6459 pthread_mutex_lock(&clone_lock
);
6460 pthread_mutex_unlock(&clone_lock
);
6466 /* do_fork() Must return host values and target errnos (unlike most
6467 do_*() functions). */
6468 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6469 abi_ulong parent_tidptr
, target_ulong newtls
,
6470 abi_ulong child_tidptr
)
6472 CPUState
*cpu
= env_cpu(env
);
6476 CPUArchState
*new_env
;
6479 flags
&= ~CLONE_IGNORED_FLAGS
;
6481 /* Emulate vfork() with fork() */
6482 if (flags
& CLONE_VFORK
)
6483 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6485 if (flags
& CLONE_VM
) {
6486 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6487 new_thread_info info
;
6488 pthread_attr_t attr
;
6490 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6491 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6492 return -TARGET_EINVAL
;
6495 ts
= g_new0(TaskState
, 1);
6496 init_task_state(ts
);
6498 /* Grab a mutex so that thread setup appears atomic. */
6499 pthread_mutex_lock(&clone_lock
);
6502 * If this is our first additional thread, we need to ensure we
6503 * generate code for parallel execution and flush old translations.
6504 * Do this now so that the copy gets CF_PARALLEL too.
6506 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
6507 cpu
->tcg_cflags
|= CF_PARALLEL
;
6511 /* we create a new CPU instance. */
6512 new_env
= cpu_copy(env
);
6513 /* Init regs that differ from the parent. */
6514 cpu_clone_regs_child(new_env
, newsp
, flags
);
6515 cpu_clone_regs_parent(env
, flags
);
6516 new_cpu
= env_cpu(new_env
);
6517 new_cpu
->opaque
= ts
;
6518 ts
->bprm
= parent_ts
->bprm
;
6519 ts
->info
= parent_ts
->info
;
6520 ts
->signal_mask
= parent_ts
->signal_mask
;
6522 if (flags
& CLONE_CHILD_CLEARTID
) {
6523 ts
->child_tidptr
= child_tidptr
;
6526 if (flags
& CLONE_SETTLS
) {
6527 cpu_set_tls (new_env
, newtls
);
6530 memset(&info
, 0, sizeof(info
));
6531 pthread_mutex_init(&info
.mutex
, NULL
);
6532 pthread_mutex_lock(&info
.mutex
);
6533 pthread_cond_init(&info
.cond
, NULL
);
6535 if (flags
& CLONE_CHILD_SETTID
) {
6536 info
.child_tidptr
= child_tidptr
;
6538 if (flags
& CLONE_PARENT_SETTID
) {
6539 info
.parent_tidptr
= parent_tidptr
;
6542 ret
= pthread_attr_init(&attr
);
6543 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6544 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6545 /* It is not safe to deliver signals until the child has finished
6546 initializing, so temporarily block all signals. */
6547 sigfillset(&sigmask
);
6548 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6549 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6551 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6552 /* TODO: Free new CPU state if thread creation failed. */
6554 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6555 pthread_attr_destroy(&attr
);
6557 /* Wait for the child to initialize. */
6558 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6563 pthread_mutex_unlock(&info
.mutex
);
6564 pthread_cond_destroy(&info
.cond
);
6565 pthread_mutex_destroy(&info
.mutex
);
6566 pthread_mutex_unlock(&clone_lock
);
6568 /* if no CLONE_VM, we consider it is a fork */
6569 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6570 return -TARGET_EINVAL
;
6573 /* We can't support custom termination signals */
6574 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6575 return -TARGET_EINVAL
;
6578 if (block_signals()) {
6579 return -TARGET_ERESTARTSYS
;
6585 /* Child Process. */
6586 cpu_clone_regs_child(env
, newsp
, flags
);
6588 /* There is a race condition here. The parent process could
6589 theoretically read the TID in the child process before the child
6590 tid is set. This would require using either ptrace
6591 (not implemented) or having *_tidptr to point at a shared memory
6592 mapping. We can't repeat the spinlock hack used above because
6593 the child process gets its own copy of the lock. */
6594 if (flags
& CLONE_CHILD_SETTID
)
6595 put_user_u32(sys_gettid(), child_tidptr
);
6596 if (flags
& CLONE_PARENT_SETTID
)
6597 put_user_u32(sys_gettid(), parent_tidptr
);
6598 ts
= (TaskState
*)cpu
->opaque
;
6599 if (flags
& CLONE_SETTLS
)
6600 cpu_set_tls (env
, newtls
);
6601 if (flags
& CLONE_CHILD_CLEARTID
)
6602 ts
->child_tidptr
= child_tidptr
;
6604 cpu_clone_regs_parent(env
, flags
);
6611 /* warning : doesn't handle linux specific flags... */
6612 static int target_to_host_fcntl_cmd(int cmd
)
6617 case TARGET_F_DUPFD
:
6618 case TARGET_F_GETFD
:
6619 case TARGET_F_SETFD
:
6620 case TARGET_F_GETFL
:
6621 case TARGET_F_SETFL
:
6622 case TARGET_F_OFD_GETLK
:
6623 case TARGET_F_OFD_SETLK
:
6624 case TARGET_F_OFD_SETLKW
:
6627 case TARGET_F_GETLK
:
6630 case TARGET_F_SETLK
:
6633 case TARGET_F_SETLKW
:
6636 case TARGET_F_GETOWN
:
6639 case TARGET_F_SETOWN
:
6642 case TARGET_F_GETSIG
:
6645 case TARGET_F_SETSIG
:
6648 #if TARGET_ABI_BITS == 32
6649 case TARGET_F_GETLK64
:
6652 case TARGET_F_SETLK64
:
6655 case TARGET_F_SETLKW64
:
6659 case TARGET_F_SETLEASE
:
6662 case TARGET_F_GETLEASE
:
6665 #ifdef F_DUPFD_CLOEXEC
6666 case TARGET_F_DUPFD_CLOEXEC
:
6667 ret
= F_DUPFD_CLOEXEC
;
6670 case TARGET_F_NOTIFY
:
6674 case TARGET_F_GETOWN_EX
:
6679 case TARGET_F_SETOWN_EX
:
6684 case TARGET_F_SETPIPE_SZ
:
6687 case TARGET_F_GETPIPE_SZ
:
6692 case TARGET_F_ADD_SEALS
:
6695 case TARGET_F_GET_SEALS
:
6700 ret
= -TARGET_EINVAL
;
6704 #if defined(__powerpc64__)
6705 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6706 * is not supported by kernel. The glibc fcntl call actually adjusts
6707 * them to 5, 6 and 7 before making the syscall(). Since we make the
6708 * syscall directly, adjust to what is supported by the kernel.
6710 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6711 ret
-= F_GETLK64
- 5;
6718 #define FLOCK_TRANSTBL \
6720 TRANSTBL_CONVERT(F_RDLCK); \
6721 TRANSTBL_CONVERT(F_WRLCK); \
6722 TRANSTBL_CONVERT(F_UNLCK); \
6725 static int target_to_host_flock(int type
)
6727 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6729 #undef TRANSTBL_CONVERT
6730 return -TARGET_EINVAL
;
6733 static int host_to_target_flock(int type
)
6735 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6737 #undef TRANSTBL_CONVERT
6738 /* if we don't know how to convert the value coming
6739 * from the host we copy to the target field as-is
6744 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6745 abi_ulong target_flock_addr
)
6747 struct target_flock
*target_fl
;
6750 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6751 return -TARGET_EFAULT
;
6754 __get_user(l_type
, &target_fl
->l_type
);
6755 l_type
= target_to_host_flock(l_type
);
6759 fl
->l_type
= l_type
;
6760 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6761 __get_user(fl
->l_start
, &target_fl
->l_start
);
6762 __get_user(fl
->l_len
, &target_fl
->l_len
);
6763 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6764 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6768 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6769 const struct flock64
*fl
)
6771 struct target_flock
*target_fl
;
6774 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6775 return -TARGET_EFAULT
;
6778 l_type
= host_to_target_flock(fl
->l_type
);
6779 __put_user(l_type
, &target_fl
->l_type
);
6780 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6781 __put_user(fl
->l_start
, &target_fl
->l_start
);
6782 __put_user(fl
->l_len
, &target_fl
->l_len
);
6783 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6784 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6788 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6789 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6791 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6792 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6793 abi_ulong target_flock_addr
)
6795 struct target_oabi_flock64
*target_fl
;
6798 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6799 return -TARGET_EFAULT
;
6802 __get_user(l_type
, &target_fl
->l_type
);
6803 l_type
= target_to_host_flock(l_type
);
6807 fl
->l_type
= l_type
;
6808 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6809 __get_user(fl
->l_start
, &target_fl
->l_start
);
6810 __get_user(fl
->l_len
, &target_fl
->l_len
);
6811 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6812 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6816 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6817 const struct flock64
*fl
)
6819 struct target_oabi_flock64
*target_fl
;
6822 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6823 return -TARGET_EFAULT
;
6826 l_type
= host_to_target_flock(fl
->l_type
);
6827 __put_user(l_type
, &target_fl
->l_type
);
6828 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6829 __put_user(fl
->l_start
, &target_fl
->l_start
);
6830 __put_user(fl
->l_len
, &target_fl
->l_len
);
6831 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6832 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6837 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6838 abi_ulong target_flock_addr
)
6840 struct target_flock64
*target_fl
;
6843 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6844 return -TARGET_EFAULT
;
6847 __get_user(l_type
, &target_fl
->l_type
);
6848 l_type
= target_to_host_flock(l_type
);
6852 fl
->l_type
= l_type
;
6853 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6854 __get_user(fl
->l_start
, &target_fl
->l_start
);
6855 __get_user(fl
->l_len
, &target_fl
->l_len
);
6856 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6857 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6861 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6862 const struct flock64
*fl
)
6864 struct target_flock64
*target_fl
;
6867 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6868 return -TARGET_EFAULT
;
6871 l_type
= host_to_target_flock(fl
->l_type
);
6872 __put_user(l_type
, &target_fl
->l_type
);
6873 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6874 __put_user(fl
->l_start
, &target_fl
->l_start
);
6875 __put_user(fl
->l_len
, &target_fl
->l_len
);
6876 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6877 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6881 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6883 struct flock64 fl64
;
6885 struct f_owner_ex fox
;
6886 struct target_f_owner_ex
*target_fox
;
6889 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6891 if (host_cmd
== -TARGET_EINVAL
)
6895 case TARGET_F_GETLK
:
6896 ret
= copy_from_user_flock(&fl64
, arg
);
6900 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6902 ret
= copy_to_user_flock(arg
, &fl64
);
6906 case TARGET_F_SETLK
:
6907 case TARGET_F_SETLKW
:
6908 ret
= copy_from_user_flock(&fl64
, arg
);
6912 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6915 case TARGET_F_GETLK64
:
6916 case TARGET_F_OFD_GETLK
:
6917 ret
= copy_from_user_flock64(&fl64
, arg
);
6921 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6923 ret
= copy_to_user_flock64(arg
, &fl64
);
6926 case TARGET_F_SETLK64
:
6927 case TARGET_F_SETLKW64
:
6928 case TARGET_F_OFD_SETLK
:
6929 case TARGET_F_OFD_SETLKW
:
6930 ret
= copy_from_user_flock64(&fl64
, arg
);
6934 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6937 case TARGET_F_GETFL
:
6938 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6940 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6944 case TARGET_F_SETFL
:
6945 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6946 target_to_host_bitmask(arg
,
6951 case TARGET_F_GETOWN_EX
:
6952 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6954 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6955 return -TARGET_EFAULT
;
6956 target_fox
->type
= tswap32(fox
.type
);
6957 target_fox
->pid
= tswap32(fox
.pid
);
6958 unlock_user_struct(target_fox
, arg
, 1);
6964 case TARGET_F_SETOWN_EX
:
6965 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6966 return -TARGET_EFAULT
;
6967 fox
.type
= tswap32(target_fox
->type
);
6968 fox
.pid
= tswap32(target_fox
->pid
);
6969 unlock_user_struct(target_fox
, arg
, 0);
6970 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6974 case TARGET_F_SETSIG
:
6975 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
6978 case TARGET_F_GETSIG
:
6979 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
6982 case TARGET_F_SETOWN
:
6983 case TARGET_F_GETOWN
:
6984 case TARGET_F_SETLEASE
:
6985 case TARGET_F_GETLEASE
:
6986 case TARGET_F_SETPIPE_SZ
:
6987 case TARGET_F_GETPIPE_SZ
:
6988 case TARGET_F_ADD_SEALS
:
6989 case TARGET_F_GET_SEALS
:
6990 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6994 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
7002 static inline int high2lowuid(int uid
)
7010 static inline int high2lowgid(int gid
)
7018 static inline int low2highuid(int uid
)
7020 if ((int16_t)uid
== -1)
7026 static inline int low2highgid(int gid
)
7028 if ((int16_t)gid
== -1)
7033 static inline int tswapid(int id
)
7038 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7040 #else /* !USE_UID16 */
7041 static inline int high2lowuid(int uid
)
7045 static inline int high2lowgid(int gid
)
7049 static inline int low2highuid(int uid
)
7053 static inline int low2highgid(int gid
)
7057 static inline int tswapid(int id
)
7062 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7064 #endif /* USE_UID16 */
7066 /* We must do direct syscalls for setting UID/GID, because we want to
7067 * implement the Linux system call semantics of "change only for this thread",
7068 * not the libc/POSIX semantics of "change for all threads in process".
7069 * (See http://ewontfix.com/17/ for more details.)
7070 * We use the 32-bit version of the syscalls if present; if it is not
7071 * then either the host architecture supports 32-bit UIDs natively with
7072 * the standard syscall, or the 16-bit UID is the best we can do.
7074 #ifdef __NR_setuid32
7075 #define __NR_sys_setuid __NR_setuid32
7077 #define __NR_sys_setuid __NR_setuid
7079 #ifdef __NR_setgid32
7080 #define __NR_sys_setgid __NR_setgid32
7082 #define __NR_sys_setgid __NR_setgid
7084 #ifdef __NR_setresuid32
7085 #define __NR_sys_setresuid __NR_setresuid32
7087 #define __NR_sys_setresuid __NR_setresuid
7089 #ifdef __NR_setresgid32
7090 #define __NR_sys_setresgid __NR_setresgid32
7092 #define __NR_sys_setresgid __NR_setresgid
7095 _syscall1(int, sys_setuid
, uid_t
, uid
)
7096 _syscall1(int, sys_setgid
, gid_t
, gid
)
7097 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7098 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7100 void syscall_init(void)
7103 const argtype
*arg_type
;
7107 thunk_init(STRUCT_MAX
);
7109 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7110 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7111 #include "syscall_types.h"
7113 #undef STRUCT_SPECIAL
7115 /* Build target_to_host_errno_table[] table from
7116 * host_to_target_errno_table[]. */
7117 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
7118 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
7121 /* we patch the ioctl size if necessary. We rely on the fact that
7122 no ioctl has all the bits at '1' in the size field */
7124 while (ie
->target_cmd
!= 0) {
7125 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7126 TARGET_IOC_SIZEMASK
) {
7127 arg_type
= ie
->arg_type
;
7128 if (arg_type
[0] != TYPE_PTR
) {
7129 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7134 size
= thunk_type_size(arg_type
, 0);
7135 ie
->target_cmd
= (ie
->target_cmd
&
7136 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7137 (size
<< TARGET_IOC_SIZESHIFT
);
7140 /* automatic consistency check if same arch */
7141 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7142 (defined(__x86_64__) && defined(TARGET_X86_64))
7143 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7144 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7145 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7152 #ifdef TARGET_NR_truncate64
7153 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
7158 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7162 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7166 #ifdef TARGET_NR_ftruncate64
7167 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
7172 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7176 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7180 #if defined(TARGET_NR_timer_settime) || \
7181 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7182 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7183 abi_ulong target_addr
)
7185 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7186 offsetof(struct target_itimerspec
,
7188 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7189 offsetof(struct target_itimerspec
,
7191 return -TARGET_EFAULT
;
7198 #if defined(TARGET_NR_timer_settime64) || \
7199 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7200 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7201 abi_ulong target_addr
)
7203 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7204 offsetof(struct target__kernel_itimerspec
,
7206 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7207 offsetof(struct target__kernel_itimerspec
,
7209 return -TARGET_EFAULT
;
7216 #if ((defined(TARGET_NR_timerfd_gettime) || \
7217 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7218 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7219 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7220 struct itimerspec
*host_its
)
7222 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7224 &host_its
->it_interval
) ||
7225 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7227 &host_its
->it_value
)) {
7228 return -TARGET_EFAULT
;
7234 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7235 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7236 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7237 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7238 struct itimerspec
*host_its
)
7240 if (host_to_target_timespec64(target_addr
+
7241 offsetof(struct target__kernel_itimerspec
,
7243 &host_its
->it_interval
) ||
7244 host_to_target_timespec64(target_addr
+
7245 offsetof(struct target__kernel_itimerspec
,
7247 &host_its
->it_value
)) {
7248 return -TARGET_EFAULT
;
7254 #if defined(TARGET_NR_adjtimex) || \
7255 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7256 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7257 abi_long target_addr
)
7259 struct target_timex
*target_tx
;
7261 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7262 return -TARGET_EFAULT
;
7265 __get_user(host_tx
->modes
, &target_tx
->modes
);
7266 __get_user(host_tx
->offset
, &target_tx
->offset
);
7267 __get_user(host_tx
->freq
, &target_tx
->freq
);
7268 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7269 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7270 __get_user(host_tx
->status
, &target_tx
->status
);
7271 __get_user(host_tx
->constant
, &target_tx
->constant
);
7272 __get_user(host_tx
->precision
, &target_tx
->precision
);
7273 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7274 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7275 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7276 __get_user(host_tx
->tick
, &target_tx
->tick
);
7277 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7278 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7279 __get_user(host_tx
->shift
, &target_tx
->shift
);
7280 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7281 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7282 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7283 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7284 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7285 __get_user(host_tx
->tai
, &target_tx
->tai
);
7287 unlock_user_struct(target_tx
, target_addr
, 0);
7291 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7292 struct timex
*host_tx
)
7294 struct target_timex
*target_tx
;
7296 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7297 return -TARGET_EFAULT
;
7300 __put_user(host_tx
->modes
, &target_tx
->modes
);
7301 __put_user(host_tx
->offset
, &target_tx
->offset
);
7302 __put_user(host_tx
->freq
, &target_tx
->freq
);
7303 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7304 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7305 __put_user(host_tx
->status
, &target_tx
->status
);
7306 __put_user(host_tx
->constant
, &target_tx
->constant
);
7307 __put_user(host_tx
->precision
, &target_tx
->precision
);
7308 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7309 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7310 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7311 __put_user(host_tx
->tick
, &target_tx
->tick
);
7312 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7313 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7314 __put_user(host_tx
->shift
, &target_tx
->shift
);
7315 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7316 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7317 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7318 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7319 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7320 __put_user(host_tx
->tai
, &target_tx
->tai
);
7322 unlock_user_struct(target_tx
, target_addr
, 1);
7328 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7329 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7330 abi_long target_addr
)
7332 struct target__kernel_timex
*target_tx
;
7334 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7335 offsetof(struct target__kernel_timex
,
7337 return -TARGET_EFAULT
;
7340 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7341 return -TARGET_EFAULT
;
7344 __get_user(host_tx
->modes
, &target_tx
->modes
);
7345 __get_user(host_tx
->offset
, &target_tx
->offset
);
7346 __get_user(host_tx
->freq
, &target_tx
->freq
);
7347 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7348 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7349 __get_user(host_tx
->status
, &target_tx
->status
);
7350 __get_user(host_tx
->constant
, &target_tx
->constant
);
7351 __get_user(host_tx
->precision
, &target_tx
->precision
);
7352 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7353 __get_user(host_tx
->tick
, &target_tx
->tick
);
7354 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7355 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7356 __get_user(host_tx
->shift
, &target_tx
->shift
);
7357 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7358 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7359 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7360 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7361 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7362 __get_user(host_tx
->tai
, &target_tx
->tai
);
7364 unlock_user_struct(target_tx
, target_addr
, 0);
7368 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7369 struct timex
*host_tx
)
7371 struct target__kernel_timex
*target_tx
;
7373 if (copy_to_user_timeval64(target_addr
+
7374 offsetof(struct target__kernel_timex
, time
),
7376 return -TARGET_EFAULT
;
7379 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7380 return -TARGET_EFAULT
;
7383 __put_user(host_tx
->modes
, &target_tx
->modes
);
7384 __put_user(host_tx
->offset
, &target_tx
->offset
);
7385 __put_user(host_tx
->freq
, &target_tx
->freq
);
7386 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7387 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7388 __put_user(host_tx
->status
, &target_tx
->status
);
7389 __put_user(host_tx
->constant
, &target_tx
->constant
);
7390 __put_user(host_tx
->precision
, &target_tx
->precision
);
7391 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7392 __put_user(host_tx
->tick
, &target_tx
->tick
);
7393 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7394 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7395 __put_user(host_tx
->shift
, &target_tx
->shift
);
7396 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7397 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7398 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7399 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7400 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7401 __put_user(host_tx
->tai
, &target_tx
->tai
);
7403 unlock_user_struct(target_tx
, target_addr
, 1);
7408 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7409 #define sigev_notify_thread_id _sigev_un._tid
7412 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7413 abi_ulong target_addr
)
7415 struct target_sigevent
*target_sevp
;
7417 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7418 return -TARGET_EFAULT
;
7421 /* This union is awkward on 64 bit systems because it has a 32 bit
7422 * integer and a pointer in it; we follow the conversion approach
7423 * used for handling sigval types in signal.c so the guest should get
7424 * the correct value back even if we did a 64 bit byteswap and it's
7425 * using the 32 bit integer.
7427 host_sevp
->sigev_value
.sival_ptr
=
7428 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7429 host_sevp
->sigev_signo
=
7430 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7431 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7432 host_sevp
->sigev_notify_thread_id
= tswap32(target_sevp
->_sigev_un
._tid
);
7434 unlock_user_struct(target_sevp
, target_addr
, 1);
7438 #if defined(TARGET_NR_mlockall)
7439 static inline int target_to_host_mlockall_arg(int arg
)
7443 if (arg
& TARGET_MCL_CURRENT
) {
7444 result
|= MCL_CURRENT
;
7446 if (arg
& TARGET_MCL_FUTURE
) {
7447 result
|= MCL_FUTURE
;
7450 if (arg
& TARGET_MCL_ONFAULT
) {
7451 result
|= MCL_ONFAULT
;
7459 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7460 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7461 defined(TARGET_NR_newfstatat))
7462 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7463 abi_ulong target_addr
,
7464 struct stat
*host_st
)
7466 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7467 if (((CPUARMState
*)cpu_env
)->eabi
) {
7468 struct target_eabi_stat64
*target_st
;
7470 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7471 return -TARGET_EFAULT
;
7472 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7473 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7474 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7475 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7476 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7478 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7479 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7480 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7481 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7482 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7483 __put_user(host_st
->st_size
, &target_st
->st_size
);
7484 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7485 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7486 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7487 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7488 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7489 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7490 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7491 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7492 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7494 unlock_user_struct(target_st
, target_addr
, 1);
7498 #if defined(TARGET_HAS_STRUCT_STAT64)
7499 struct target_stat64
*target_st
;
7501 struct target_stat
*target_st
;
7504 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7505 return -TARGET_EFAULT
;
7506 memset(target_st
, 0, sizeof(*target_st
));
7507 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7508 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7509 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7510 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7512 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7513 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7514 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7515 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7516 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7517 /* XXX: better use of kernel struct */
7518 __put_user(host_st
->st_size
, &target_st
->st_size
);
7519 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7520 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7521 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7522 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7523 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7524 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7525 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7526 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7527 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7529 unlock_user_struct(target_st
, target_addr
, 1);
7536 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7537 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7538 abi_ulong target_addr
)
7540 struct target_statx
*target_stx
;
7542 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7543 return -TARGET_EFAULT
;
7545 memset(target_stx
, 0, sizeof(*target_stx
));
7547 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7548 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7549 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7550 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7551 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7552 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7553 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7554 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7555 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7556 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7557 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7558 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7559 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7560 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7561 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7562 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7563 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7564 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7565 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7566 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7567 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7568 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7569 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7571 unlock_user_struct(target_stx
, target_addr
, 1);
7577 static int do_sys_futex(int *uaddr
, int op
, int val
,
7578 const struct timespec
*timeout
, int *uaddr2
,
7581 #if HOST_LONG_BITS == 64
7582 #if defined(__NR_futex)
7583 /* always a 64-bit time_t, it doesn't define _time64 version */
7584 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7587 #else /* HOST_LONG_BITS == 64 */
7588 #if defined(__NR_futex_time64)
7589 if (sizeof(timeout
->tv_sec
) == 8) {
7590 /* _time64 function on 32bit arch */
7591 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7594 #if defined(__NR_futex)
7595 /* old function on 32bit arch */
7596 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7598 #endif /* HOST_LONG_BITS == 64 */
7599 g_assert_not_reached();
7602 static int do_safe_futex(int *uaddr
, int op
, int val
,
7603 const struct timespec
*timeout
, int *uaddr2
,
7606 #if HOST_LONG_BITS == 64
7607 #if defined(__NR_futex)
7608 /* always a 64-bit time_t, it doesn't define _time64 version */
7609 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7611 #else /* HOST_LONG_BITS == 64 */
7612 #if defined(__NR_futex_time64)
7613 if (sizeof(timeout
->tv_sec
) == 8) {
7614 /* _time64 function on 32bit arch */
7615 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7619 #if defined(__NR_futex)
7620 /* old function on 32bit arch */
7621 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7623 #endif /* HOST_LONG_BITS == 64 */
7624 return -TARGET_ENOSYS
;
7627 /* ??? Using host futex calls even when target atomic operations
7628 are not really atomic probably breaks things. However implementing
7629 futexes locally would make futexes shared between multiple processes
7630 tricky. However they're probably useless because guest atomic
7631 operations won't work either. */
7632 #if defined(TARGET_NR_futex)
7633 static int do_futex(CPUState
*cpu
, target_ulong uaddr
, int op
, int val
,
7634 target_ulong timeout
, target_ulong uaddr2
, int val3
)
7636 struct timespec ts
, *pts
;
7639 /* ??? We assume FUTEX_* constants are the same on both host
7641 #ifdef FUTEX_CMD_MASK
7642 base_op
= op
& FUTEX_CMD_MASK
;
7648 case FUTEX_WAIT_BITSET
:
7651 target_to_host_timespec(pts
, timeout
);
7655 return do_safe_futex(g2h(cpu
, uaddr
),
7656 op
, tswap32(val
), pts
, NULL
, val3
);
7658 return do_safe_futex(g2h(cpu
, uaddr
),
7659 op
, val
, NULL
, NULL
, 0);
7661 return do_safe_futex(g2h(cpu
, uaddr
),
7662 op
, val
, NULL
, NULL
, 0);
7664 case FUTEX_CMP_REQUEUE
:
7666 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7667 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7668 But the prototype takes a `struct timespec *'; insert casts
7669 to satisfy the compiler. We do not need to tswap TIMEOUT
7670 since it's not compared to guest memory. */
7671 pts
= (struct timespec
*)(uintptr_t) timeout
;
7672 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7673 (base_op
== FUTEX_CMP_REQUEUE
7674 ? tswap32(val3
) : val3
));
7676 return -TARGET_ENOSYS
;
7681 #if defined(TARGET_NR_futex_time64)
7682 static int do_futex_time64(CPUState
*cpu
, target_ulong uaddr
, int op
,
7683 int val
, target_ulong timeout
,
7684 target_ulong uaddr2
, int val3
)
7686 struct timespec ts
, *pts
;
7689 /* ??? We assume FUTEX_* constants are the same on both host
7691 #ifdef FUTEX_CMD_MASK
7692 base_op
= op
& FUTEX_CMD_MASK
;
7698 case FUTEX_WAIT_BITSET
:
7701 if (target_to_host_timespec64(pts
, timeout
)) {
7702 return -TARGET_EFAULT
;
7707 return do_safe_futex(g2h(cpu
, uaddr
), op
,
7708 tswap32(val
), pts
, NULL
, val3
);
7710 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7712 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7714 case FUTEX_CMP_REQUEUE
:
7716 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7717 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7718 But the prototype takes a `struct timespec *'; insert casts
7719 to satisfy the compiler. We do not need to tswap TIMEOUT
7720 since it's not compared to guest memory. */
7721 pts
= (struct timespec
*)(uintptr_t) timeout
;
7722 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7723 (base_op
== FUTEX_CMP_REQUEUE
7724 ? tswap32(val3
) : val3
));
7726 return -TARGET_ENOSYS
;
7731 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7732 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7733 abi_long handle
, abi_long mount_id
,
7736 struct file_handle
*target_fh
;
7737 struct file_handle
*fh
;
7741 unsigned int size
, total_size
;
7743 if (get_user_s32(size
, handle
)) {
7744 return -TARGET_EFAULT
;
7747 name
= lock_user_string(pathname
);
7749 return -TARGET_EFAULT
;
7752 total_size
= sizeof(struct file_handle
) + size
;
7753 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7755 unlock_user(name
, pathname
, 0);
7756 return -TARGET_EFAULT
;
7759 fh
= g_malloc0(total_size
);
7760 fh
->handle_bytes
= size
;
7762 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7763 unlock_user(name
, pathname
, 0);
7765 /* man name_to_handle_at(2):
7766 * Other than the use of the handle_bytes field, the caller should treat
7767 * the file_handle structure as an opaque data type
7770 memcpy(target_fh
, fh
, total_size
);
7771 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7772 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7774 unlock_user(target_fh
, handle
, total_size
);
7776 if (put_user_s32(mid
, mount_id
)) {
7777 return -TARGET_EFAULT
;
7785 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7786 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7789 struct file_handle
*target_fh
;
7790 struct file_handle
*fh
;
7791 unsigned int size
, total_size
;
7794 if (get_user_s32(size
, handle
)) {
7795 return -TARGET_EFAULT
;
7798 total_size
= sizeof(struct file_handle
) + size
;
7799 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7801 return -TARGET_EFAULT
;
7804 fh
= g_memdup(target_fh
, total_size
);
7805 fh
->handle_bytes
= size
;
7806 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7808 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7809 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7813 unlock_user(target_fh
, handle
, total_size
);
7819 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7821 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7824 target_sigset_t
*target_mask
;
7828 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
7829 return -TARGET_EINVAL
;
7831 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7832 return -TARGET_EFAULT
;
7835 target_to_host_sigset(&host_mask
, target_mask
);
7837 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7839 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7841 fd_trans_register(ret
, &target_signalfd_trans
);
7844 unlock_user_struct(target_mask
, mask
, 0);
7850 /* Map host to target signal numbers for the wait family of syscalls.
7851 Assume all other status bits are the same. */
7852 int host_to_target_waitstatus(int status
)
7854 if (WIFSIGNALED(status
)) {
7855 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7857 if (WIFSTOPPED(status
)) {
7858 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7864 static int open_self_cmdline(void *cpu_env
, int fd
)
7866 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7867 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7870 for (i
= 0; i
< bprm
->argc
; i
++) {
7871 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7873 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7881 static int open_self_maps(void *cpu_env
, int fd
)
7883 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7884 TaskState
*ts
= cpu
->opaque
;
7885 GSList
*map_info
= read_self_maps();
7889 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7890 MapInfo
*e
= (MapInfo
*) s
->data
;
7892 if (h2g_valid(e
->start
)) {
7893 unsigned long min
= e
->start
;
7894 unsigned long max
= e
->end
;
7895 int flags
= page_get_flags(h2g(min
));
7898 max
= h2g_valid(max
- 1) ?
7899 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
7901 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7905 if (h2g(min
) == ts
->info
->stack_limit
) {
7911 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7912 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
7913 h2g(min
), h2g(max
- 1) + 1,
7914 (flags
& PAGE_READ
) ? 'r' : '-',
7915 (flags
& PAGE_WRITE_ORG
) ? 'w' : '-',
7916 (flags
& PAGE_EXEC
) ? 'x' : '-',
7917 e
->is_priv
? 'p' : '-',
7918 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
7920 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
7927 free_self_maps(map_info
);
7929 #ifdef TARGET_VSYSCALL_PAGE
7931 * We only support execution from the vsyscall page.
7932 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7934 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7935 " --xp 00000000 00:00 0",
7936 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7937 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
7943 static int open_self_stat(void *cpu_env
, int fd
)
7945 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7946 TaskState
*ts
= cpu
->opaque
;
7947 g_autoptr(GString
) buf
= g_string_new(NULL
);
7950 for (i
= 0; i
< 44; i
++) {
7953 g_string_printf(buf
, FMT_pid
" ", getpid());
7954 } else if (i
== 1) {
7956 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
7957 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
7958 g_string_printf(buf
, "(%.15s) ", bin
);
7959 } else if (i
== 27) {
7961 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
7963 /* for the rest, there is MasterCard */
7964 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
7967 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
7975 static int open_self_auxv(void *cpu_env
, int fd
)
7977 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7978 TaskState
*ts
= cpu
->opaque
;
7979 abi_ulong auxv
= ts
->info
->saved_auxv
;
7980 abi_ulong len
= ts
->info
->auxv_len
;
7984 * Auxiliary vector is stored in target process stack.
7985 * read in whole auxv vector and copy it to file
7987 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7991 r
= write(fd
, ptr
, len
);
7998 lseek(fd
, 0, SEEK_SET
);
7999 unlock_user(ptr
, auxv
, len
);
8005 static int is_proc_myself(const char *filename
, const char *entry
)
8007 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
8008 filename
+= strlen("/proc/");
8009 if (!strncmp(filename
, "self/", strlen("self/"))) {
8010 filename
+= strlen("self/");
8011 } else if (*filename
>= '1' && *filename
<= '9') {
8013 snprintf(myself
, sizeof(myself
), "%d/", getpid());
8014 if (!strncmp(filename
, myself
, strlen(myself
))) {
8015 filename
+= strlen(myself
);
8022 if (!strcmp(filename
, entry
)) {
8029 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8030 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8031 static int is_proc(const char *filename
, const char *entry
)
8033 return strcmp(filename
, entry
) == 0;
8037 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8038 static int open_net_route(void *cpu_env
, int fd
)
8045 fp
= fopen("/proc/net/route", "r");
8052 read
= getline(&line
, &len
, fp
);
8053 dprintf(fd
, "%s", line
);
8057 while ((read
= getline(&line
, &len
, fp
)) != -1) {
8059 uint32_t dest
, gw
, mask
;
8060 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8063 fields
= sscanf(line
,
8064 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8065 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8066 &mask
, &mtu
, &window
, &irtt
);
8070 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8071 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8072 metric
, tswap32(mask
), mtu
, window
, irtt
);
8082 #if defined(TARGET_SPARC)
8083 static int open_cpuinfo(void *cpu_env
, int fd
)
8085 dprintf(fd
, "type\t\t: sun4u\n");
8090 #if defined(TARGET_HPPA)
8091 static int open_cpuinfo(void *cpu_env
, int fd
)
8093 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
8094 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
8095 dprintf(fd
, "capabilities\t: os32\n");
8096 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
8097 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8102 #if defined(TARGET_M68K)
8103 static int open_hardware(void *cpu_env
, int fd
)
8105 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8110 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
8113 const char *filename
;
8114 int (*fill
)(void *cpu_env
, int fd
);
8115 int (*cmp
)(const char *s1
, const char *s2
);
8117 const struct fake_open
*fake_open
;
8118 static const struct fake_open fakes
[] = {
8119 { "maps", open_self_maps
, is_proc_myself
},
8120 { "stat", open_self_stat
, is_proc_myself
},
8121 { "auxv", open_self_auxv
, is_proc_myself
},
8122 { "cmdline", open_self_cmdline
, is_proc_myself
},
8123 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8124 { "/proc/net/route", open_net_route
, is_proc
},
8126 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8127 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8129 #if defined(TARGET_M68K)
8130 { "/proc/hardware", open_hardware
, is_proc
},
8132 { NULL
, NULL
, NULL
}
8135 if (is_proc_myself(pathname
, "exe")) {
8136 int execfd
= qemu_getauxval(AT_EXECFD
);
8137 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
8140 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8141 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8146 if (fake_open
->filename
) {
8148 char filename
[PATH_MAX
];
8151 /* create temporary file to map stat to */
8152 tmpdir
= getenv("TMPDIR");
8155 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8156 fd
= mkstemp(filename
);
8162 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8168 lseek(fd
, 0, SEEK_SET
);
8173 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8176 #define TIMER_MAGIC 0x0caf0000
8177 #define TIMER_MAGIC_MASK 0xffff0000
8179 /* Convert QEMU provided timer ID back to internal 16bit index format */
8180 static target_timer_t
get_timer_id(abi_long arg
)
8182 target_timer_t timerid
= arg
;
8184 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8185 return -TARGET_EINVAL
;
8190 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8191 return -TARGET_EINVAL
;
8197 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8199 abi_ulong target_addr
,
8202 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8203 unsigned host_bits
= sizeof(*host_mask
) * 8;
8204 abi_ulong
*target_mask
;
8207 assert(host_size
>= target_size
);
8209 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8211 return -TARGET_EFAULT
;
8213 memset(host_mask
, 0, host_size
);
8215 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8216 unsigned bit
= i
* target_bits
;
8219 __get_user(val
, &target_mask
[i
]);
8220 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8221 if (val
& (1UL << j
)) {
8222 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8227 unlock_user(target_mask
, target_addr
, 0);
8231 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8233 abi_ulong target_addr
,
8236 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8237 unsigned host_bits
= sizeof(*host_mask
) * 8;
8238 abi_ulong
*target_mask
;
8241 assert(host_size
>= target_size
);
8243 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8245 return -TARGET_EFAULT
;
8248 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8249 unsigned bit
= i
* target_bits
;
8252 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8253 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8257 __put_user(val
, &target_mask
[i
]);
8260 unlock_user(target_mask
, target_addr
, target_size
);
8264 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8265 _syscall2(int, pivot_root
, const char *, new_root
, const char *, put_old
)
8268 /* This is an internal helper for do_syscall so that it is easier
8269 * to have a single return point, so that actions, such as logging
8270 * of syscall results, can be performed.
8271 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8273 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
8274 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8275 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8278 CPUState
*cpu
= env_cpu(cpu_env
);
8280 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8281 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8282 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8283 || defined(TARGET_NR_statx)
8286 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8287 || defined(TARGET_NR_fstatfs)
8293 case TARGET_NR_exit
:
8294 /* In old applications this may be used to implement _exit(2).
8295 However in threaded applications it is used for thread termination,
8296 and _exit_group is used for application termination.
8297 Do thread termination if we have more then one thread. */
8299 if (block_signals()) {
8300 return -TARGET_ERESTARTSYS
;
8303 pthread_mutex_lock(&clone_lock
);
8305 if (CPU_NEXT(first_cpu
)) {
8306 TaskState
*ts
= cpu
->opaque
;
8308 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
8309 object_unref(OBJECT(cpu
));
8311 * At this point the CPU should be unrealized and removed
8312 * from cpu lists. We can clean-up the rest of the thread
8313 * data without the lock held.
8316 pthread_mutex_unlock(&clone_lock
);
8318 if (ts
->child_tidptr
) {
8319 put_user_u32(0, ts
->child_tidptr
);
8320 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
8321 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
8325 rcu_unregister_thread();
8329 pthread_mutex_unlock(&clone_lock
);
8330 preexit_cleanup(cpu_env
, arg1
);
8332 return 0; /* avoid warning */
8333 case TARGET_NR_read
:
8334 if (arg2
== 0 && arg3
== 0) {
8335 return get_errno(safe_read(arg1
, 0, 0));
8337 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8338 return -TARGET_EFAULT
;
8339 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8341 fd_trans_host_to_target_data(arg1
)) {
8342 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8344 unlock_user(p
, arg2
, ret
);
8347 case TARGET_NR_write
:
8348 if (arg2
== 0 && arg3
== 0) {
8349 return get_errno(safe_write(arg1
, 0, 0));
8351 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8352 return -TARGET_EFAULT
;
8353 if (fd_trans_target_to_host_data(arg1
)) {
8354 void *copy
= g_malloc(arg3
);
8355 memcpy(copy
, p
, arg3
);
8356 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8358 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8362 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8364 unlock_user(p
, arg2
, 0);
8367 #ifdef TARGET_NR_open
8368 case TARGET_NR_open
:
8369 if (!(p
= lock_user_string(arg1
)))
8370 return -TARGET_EFAULT
;
8371 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8372 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8374 fd_trans_unregister(ret
);
8375 unlock_user(p
, arg1
, 0);
8378 case TARGET_NR_openat
:
8379 if (!(p
= lock_user_string(arg2
)))
8380 return -TARGET_EFAULT
;
8381 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8382 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8384 fd_trans_unregister(ret
);
8385 unlock_user(p
, arg2
, 0);
8387 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8388 case TARGET_NR_name_to_handle_at
:
8389 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8392 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8393 case TARGET_NR_open_by_handle_at
:
8394 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8395 fd_trans_unregister(ret
);
8398 case TARGET_NR_close
:
8399 fd_trans_unregister(arg1
);
8400 return get_errno(close(arg1
));
8403 return do_brk(arg1
);
8404 #ifdef TARGET_NR_fork
8405 case TARGET_NR_fork
:
8406 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8408 #ifdef TARGET_NR_waitpid
8409 case TARGET_NR_waitpid
:
8412 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8413 if (!is_error(ret
) && arg2
&& ret
8414 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8415 return -TARGET_EFAULT
;
8419 #ifdef TARGET_NR_waitid
8420 case TARGET_NR_waitid
:
8424 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8425 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8426 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8427 return -TARGET_EFAULT
;
8428 host_to_target_siginfo(p
, &info
);
8429 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8434 #ifdef TARGET_NR_creat /* not on alpha */
8435 case TARGET_NR_creat
:
8436 if (!(p
= lock_user_string(arg1
)))
8437 return -TARGET_EFAULT
;
8438 ret
= get_errno(creat(p
, arg2
));
8439 fd_trans_unregister(ret
);
8440 unlock_user(p
, arg1
, 0);
8443 #ifdef TARGET_NR_link
8444 case TARGET_NR_link
:
8447 p
= lock_user_string(arg1
);
8448 p2
= lock_user_string(arg2
);
8450 ret
= -TARGET_EFAULT
;
8452 ret
= get_errno(link(p
, p2
));
8453 unlock_user(p2
, arg2
, 0);
8454 unlock_user(p
, arg1
, 0);
8458 #if defined(TARGET_NR_linkat)
8459 case TARGET_NR_linkat
:
8463 return -TARGET_EFAULT
;
8464 p
= lock_user_string(arg2
);
8465 p2
= lock_user_string(arg4
);
8467 ret
= -TARGET_EFAULT
;
8469 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8470 unlock_user(p
, arg2
, 0);
8471 unlock_user(p2
, arg4
, 0);
8475 #ifdef TARGET_NR_unlink
8476 case TARGET_NR_unlink
:
8477 if (!(p
= lock_user_string(arg1
)))
8478 return -TARGET_EFAULT
;
8479 ret
= get_errno(unlink(p
));
8480 unlock_user(p
, arg1
, 0);
8483 #if defined(TARGET_NR_unlinkat)
8484 case TARGET_NR_unlinkat
:
8485 if (!(p
= lock_user_string(arg2
)))
8486 return -TARGET_EFAULT
;
8487 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8488 unlock_user(p
, arg2
, 0);
8491 case TARGET_NR_execve
:
8493 char **argp
, **envp
;
8496 abi_ulong guest_argp
;
8497 abi_ulong guest_envp
;
8504 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8505 if (get_user_ual(addr
, gp
))
8506 return -TARGET_EFAULT
;
8513 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8514 if (get_user_ual(addr
, gp
))
8515 return -TARGET_EFAULT
;
8521 argp
= g_new0(char *, argc
+ 1);
8522 envp
= g_new0(char *, envc
+ 1);
8524 for (gp
= guest_argp
, q
= argp
; gp
;
8525 gp
+= sizeof(abi_ulong
), q
++) {
8526 if (get_user_ual(addr
, gp
))
8530 if (!(*q
= lock_user_string(addr
)))
8532 total_size
+= strlen(*q
) + 1;
8536 for (gp
= guest_envp
, q
= envp
; gp
;
8537 gp
+= sizeof(abi_ulong
), q
++) {
8538 if (get_user_ual(addr
, gp
))
8542 if (!(*q
= lock_user_string(addr
)))
8544 total_size
+= strlen(*q
) + 1;
8548 if (!(p
= lock_user_string(arg1
)))
8550 /* Although execve() is not an interruptible syscall it is
8551 * a special case where we must use the safe_syscall wrapper:
8552 * if we allow a signal to happen before we make the host
8553 * syscall then we will 'lose' it, because at the point of
8554 * execve the process leaves QEMU's control. So we use the
8555 * safe syscall wrapper to ensure that we either take the
8556 * signal as a guest signal, or else it does not happen
8557 * before the execve completes and makes it the other
8558 * program's problem.
8560 ret
= get_errno(safe_execve(p
, argp
, envp
));
8561 unlock_user(p
, arg1
, 0);
8566 ret
= -TARGET_EFAULT
;
8569 for (gp
= guest_argp
, q
= argp
; *q
;
8570 gp
+= sizeof(abi_ulong
), q
++) {
8571 if (get_user_ual(addr
, gp
)
8574 unlock_user(*q
, addr
, 0);
8576 for (gp
= guest_envp
, q
= envp
; *q
;
8577 gp
+= sizeof(abi_ulong
), q
++) {
8578 if (get_user_ual(addr
, gp
)
8581 unlock_user(*q
, addr
, 0);
8588 case TARGET_NR_chdir
:
8589 if (!(p
= lock_user_string(arg1
)))
8590 return -TARGET_EFAULT
;
8591 ret
= get_errno(chdir(p
));
8592 unlock_user(p
, arg1
, 0);
8594 #ifdef TARGET_NR_time
8595 case TARGET_NR_time
:
8598 ret
= get_errno(time(&host_time
));
8601 && put_user_sal(host_time
, arg1
))
8602 return -TARGET_EFAULT
;
8606 #ifdef TARGET_NR_mknod
8607 case TARGET_NR_mknod
:
8608 if (!(p
= lock_user_string(arg1
)))
8609 return -TARGET_EFAULT
;
8610 ret
= get_errno(mknod(p
, arg2
, arg3
));
8611 unlock_user(p
, arg1
, 0);
8614 #if defined(TARGET_NR_mknodat)
8615 case TARGET_NR_mknodat
:
8616 if (!(p
= lock_user_string(arg2
)))
8617 return -TARGET_EFAULT
;
8618 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8619 unlock_user(p
, arg2
, 0);
8622 #ifdef TARGET_NR_chmod
8623 case TARGET_NR_chmod
:
8624 if (!(p
= lock_user_string(arg1
)))
8625 return -TARGET_EFAULT
;
8626 ret
= get_errno(chmod(p
, arg2
));
8627 unlock_user(p
, arg1
, 0);
8630 #ifdef TARGET_NR_lseek
8631 case TARGET_NR_lseek
:
8632 return get_errno(lseek(arg1
, arg2
, arg3
));
8634 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8635 /* Alpha specific */
8636 case TARGET_NR_getxpid
:
8637 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8638 return get_errno(getpid());
8640 #ifdef TARGET_NR_getpid
8641 case TARGET_NR_getpid
:
8642 return get_errno(getpid());
8644 case TARGET_NR_mount
:
8646 /* need to look at the data field */
8650 p
= lock_user_string(arg1
);
8652 return -TARGET_EFAULT
;
8658 p2
= lock_user_string(arg2
);
8661 unlock_user(p
, arg1
, 0);
8663 return -TARGET_EFAULT
;
8667 p3
= lock_user_string(arg3
);
8670 unlock_user(p
, arg1
, 0);
8672 unlock_user(p2
, arg2
, 0);
8673 return -TARGET_EFAULT
;
8679 /* FIXME - arg5 should be locked, but it isn't clear how to
8680 * do that since it's not guaranteed to be a NULL-terminated
8684 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8686 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
8688 ret
= get_errno(ret
);
8691 unlock_user(p
, arg1
, 0);
8693 unlock_user(p2
, arg2
, 0);
8695 unlock_user(p3
, arg3
, 0);
8699 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8700 #if defined(TARGET_NR_umount)
8701 case TARGET_NR_umount
:
8703 #if defined(TARGET_NR_oldumount)
8704 case TARGET_NR_oldumount
:
8706 if (!(p
= lock_user_string(arg1
)))
8707 return -TARGET_EFAULT
;
8708 ret
= get_errno(umount(p
));
8709 unlock_user(p
, arg1
, 0);
8712 #ifdef TARGET_NR_stime /* not on alpha */
8713 case TARGET_NR_stime
:
8717 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8718 return -TARGET_EFAULT
;
8720 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8723 #ifdef TARGET_NR_alarm /* not on alpha */
8724 case TARGET_NR_alarm
:
8727 #ifdef TARGET_NR_pause /* not on alpha */
8728 case TARGET_NR_pause
:
8729 if (!block_signals()) {
8730 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8732 return -TARGET_EINTR
;
8734 #ifdef TARGET_NR_utime
8735 case TARGET_NR_utime
:
8737 struct utimbuf tbuf
, *host_tbuf
;
8738 struct target_utimbuf
*target_tbuf
;
8740 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8741 return -TARGET_EFAULT
;
8742 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8743 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8744 unlock_user_struct(target_tbuf
, arg2
, 0);
8749 if (!(p
= lock_user_string(arg1
)))
8750 return -TARGET_EFAULT
;
8751 ret
= get_errno(utime(p
, host_tbuf
));
8752 unlock_user(p
, arg1
, 0);
8756 #ifdef TARGET_NR_utimes
8757 case TARGET_NR_utimes
:
8759 struct timeval
*tvp
, tv
[2];
8761 if (copy_from_user_timeval(&tv
[0], arg2
)
8762 || copy_from_user_timeval(&tv
[1],
8763 arg2
+ sizeof(struct target_timeval
)))
8764 return -TARGET_EFAULT
;
8769 if (!(p
= lock_user_string(arg1
)))
8770 return -TARGET_EFAULT
;
8771 ret
= get_errno(utimes(p
, tvp
));
8772 unlock_user(p
, arg1
, 0);
8776 #if defined(TARGET_NR_futimesat)
8777 case TARGET_NR_futimesat
:
8779 struct timeval
*tvp
, tv
[2];
8781 if (copy_from_user_timeval(&tv
[0], arg3
)
8782 || copy_from_user_timeval(&tv
[1],
8783 arg3
+ sizeof(struct target_timeval
)))
8784 return -TARGET_EFAULT
;
8789 if (!(p
= lock_user_string(arg2
))) {
8790 return -TARGET_EFAULT
;
8792 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8793 unlock_user(p
, arg2
, 0);
8797 #ifdef TARGET_NR_access
8798 case TARGET_NR_access
:
8799 if (!(p
= lock_user_string(arg1
))) {
8800 return -TARGET_EFAULT
;
8802 ret
= get_errno(access(path(p
), arg2
));
8803 unlock_user(p
, arg1
, 0);
8806 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8807 case TARGET_NR_faccessat
:
8808 if (!(p
= lock_user_string(arg2
))) {
8809 return -TARGET_EFAULT
;
8811 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8812 unlock_user(p
, arg2
, 0);
8815 #ifdef TARGET_NR_nice /* not on alpha */
8816 case TARGET_NR_nice
:
8817 return get_errno(nice(arg1
));
8819 case TARGET_NR_sync
:
8822 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8823 case TARGET_NR_syncfs
:
8824 return get_errno(syncfs(arg1
));
8826 case TARGET_NR_kill
:
8827 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8828 #ifdef TARGET_NR_rename
8829 case TARGET_NR_rename
:
8832 p
= lock_user_string(arg1
);
8833 p2
= lock_user_string(arg2
);
8835 ret
= -TARGET_EFAULT
;
8837 ret
= get_errno(rename(p
, p2
));
8838 unlock_user(p2
, arg2
, 0);
8839 unlock_user(p
, arg1
, 0);
8843 #if defined(TARGET_NR_renameat)
8844 case TARGET_NR_renameat
:
8847 p
= lock_user_string(arg2
);
8848 p2
= lock_user_string(arg4
);
8850 ret
= -TARGET_EFAULT
;
8852 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8853 unlock_user(p2
, arg4
, 0);
8854 unlock_user(p
, arg2
, 0);
8858 #if defined(TARGET_NR_renameat2)
8859 case TARGET_NR_renameat2
:
8862 p
= lock_user_string(arg2
);
8863 p2
= lock_user_string(arg4
);
8865 ret
= -TARGET_EFAULT
;
8867 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8869 unlock_user(p2
, arg4
, 0);
8870 unlock_user(p
, arg2
, 0);
8874 #ifdef TARGET_NR_mkdir
8875 case TARGET_NR_mkdir
:
8876 if (!(p
= lock_user_string(arg1
)))
8877 return -TARGET_EFAULT
;
8878 ret
= get_errno(mkdir(p
, arg2
));
8879 unlock_user(p
, arg1
, 0);
8882 #if defined(TARGET_NR_mkdirat)
8883 case TARGET_NR_mkdirat
:
8884 if (!(p
= lock_user_string(arg2
)))
8885 return -TARGET_EFAULT
;
8886 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8887 unlock_user(p
, arg2
, 0);
8890 #ifdef TARGET_NR_rmdir
8891 case TARGET_NR_rmdir
:
8892 if (!(p
= lock_user_string(arg1
)))
8893 return -TARGET_EFAULT
;
8894 ret
= get_errno(rmdir(p
));
8895 unlock_user(p
, arg1
, 0);
8899 ret
= get_errno(dup(arg1
));
8901 fd_trans_dup(arg1
, ret
);
8904 #ifdef TARGET_NR_pipe
8905 case TARGET_NR_pipe
:
8906 return do_pipe(cpu_env
, arg1
, 0, 0);
8908 #ifdef TARGET_NR_pipe2
8909 case TARGET_NR_pipe2
:
8910 return do_pipe(cpu_env
, arg1
,
8911 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8913 case TARGET_NR_times
:
8915 struct target_tms
*tmsp
;
8917 ret
= get_errno(times(&tms
));
8919 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8921 return -TARGET_EFAULT
;
8922 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8923 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8924 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8925 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8928 ret
= host_to_target_clock_t(ret
);
8931 case TARGET_NR_acct
:
8933 ret
= get_errno(acct(NULL
));
8935 if (!(p
= lock_user_string(arg1
))) {
8936 return -TARGET_EFAULT
;
8938 ret
= get_errno(acct(path(p
)));
8939 unlock_user(p
, arg1
, 0);
8942 #ifdef TARGET_NR_umount2
8943 case TARGET_NR_umount2
:
8944 if (!(p
= lock_user_string(arg1
)))
8945 return -TARGET_EFAULT
;
8946 ret
= get_errno(umount2(p
, arg2
));
8947 unlock_user(p
, arg1
, 0);
8950 case TARGET_NR_ioctl
:
8951 return do_ioctl(arg1
, arg2
, arg3
);
8952 #ifdef TARGET_NR_fcntl
8953 case TARGET_NR_fcntl
:
8954 return do_fcntl(arg1
, arg2
, arg3
);
8956 case TARGET_NR_setpgid
:
8957 return get_errno(setpgid(arg1
, arg2
));
8958 case TARGET_NR_umask
:
8959 return get_errno(umask(arg1
));
8960 case TARGET_NR_chroot
:
8961 if (!(p
= lock_user_string(arg1
)))
8962 return -TARGET_EFAULT
;
8963 ret
= get_errno(chroot(p
));
8964 unlock_user(p
, arg1
, 0);
8966 #ifdef TARGET_NR_dup2
8967 case TARGET_NR_dup2
:
8968 ret
= get_errno(dup2(arg1
, arg2
));
8970 fd_trans_dup(arg1
, arg2
);
8974 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8975 case TARGET_NR_dup3
:
8979 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8982 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8983 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8985 fd_trans_dup(arg1
, arg2
);
8990 #ifdef TARGET_NR_getppid /* not on alpha */
8991 case TARGET_NR_getppid
:
8992 return get_errno(getppid());
8994 #ifdef TARGET_NR_getpgrp
8995 case TARGET_NR_getpgrp
:
8996 return get_errno(getpgrp());
8998 case TARGET_NR_setsid
:
8999 return get_errno(setsid());
9000 #ifdef TARGET_NR_sigaction
9001 case TARGET_NR_sigaction
:
9003 #if defined(TARGET_MIPS)
9004 struct target_sigaction act
, oact
, *pact
, *old_act
;
9007 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9008 return -TARGET_EFAULT
;
9009 act
._sa_handler
= old_act
->_sa_handler
;
9010 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
9011 act
.sa_flags
= old_act
->sa_flags
;
9012 unlock_user_struct(old_act
, arg2
, 0);
9018 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9020 if (!is_error(ret
) && arg3
) {
9021 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9022 return -TARGET_EFAULT
;
9023 old_act
->_sa_handler
= oact
._sa_handler
;
9024 old_act
->sa_flags
= oact
.sa_flags
;
9025 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
9026 old_act
->sa_mask
.sig
[1] = 0;
9027 old_act
->sa_mask
.sig
[2] = 0;
9028 old_act
->sa_mask
.sig
[3] = 0;
9029 unlock_user_struct(old_act
, arg3
, 1);
9032 struct target_old_sigaction
*old_act
;
9033 struct target_sigaction act
, oact
, *pact
;
9035 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9036 return -TARGET_EFAULT
;
9037 act
._sa_handler
= old_act
->_sa_handler
;
9038 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9039 act
.sa_flags
= old_act
->sa_flags
;
9040 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9041 act
.sa_restorer
= old_act
->sa_restorer
;
9043 unlock_user_struct(old_act
, arg2
, 0);
9048 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9049 if (!is_error(ret
) && arg3
) {
9050 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9051 return -TARGET_EFAULT
;
9052 old_act
->_sa_handler
= oact
._sa_handler
;
9053 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9054 old_act
->sa_flags
= oact
.sa_flags
;
9055 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9056 old_act
->sa_restorer
= oact
.sa_restorer
;
9058 unlock_user_struct(old_act
, arg3
, 1);
9064 case TARGET_NR_rt_sigaction
:
9067 * For Alpha and SPARC this is a 5 argument syscall, with
9068 * a 'restorer' parameter which must be copied into the
9069 * sa_restorer field of the sigaction struct.
9070 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9071 * and arg5 is the sigsetsize.
9073 #if defined(TARGET_ALPHA)
9074 target_ulong sigsetsize
= arg4
;
9075 target_ulong restorer
= arg5
;
9076 #elif defined(TARGET_SPARC)
9077 target_ulong restorer
= arg4
;
9078 target_ulong sigsetsize
= arg5
;
9080 target_ulong sigsetsize
= arg4
;
9081 target_ulong restorer
= 0;
9083 struct target_sigaction
*act
= NULL
;
9084 struct target_sigaction
*oact
= NULL
;
9086 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9087 return -TARGET_EINVAL
;
9089 if (arg2
&& !lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9090 return -TARGET_EFAULT
;
9092 if (arg3
&& !lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9093 ret
= -TARGET_EFAULT
;
9095 ret
= get_errno(do_sigaction(arg1
, act
, oact
, restorer
));
9097 unlock_user_struct(oact
, arg3
, 1);
9101 unlock_user_struct(act
, arg2
, 0);
9105 #ifdef TARGET_NR_sgetmask /* not on alpha */
9106 case TARGET_NR_sgetmask
:
9109 abi_ulong target_set
;
9110 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9112 host_to_target_old_sigset(&target_set
, &cur_set
);
9118 #ifdef TARGET_NR_ssetmask /* not on alpha */
9119 case TARGET_NR_ssetmask
:
9122 abi_ulong target_set
= arg1
;
9123 target_to_host_old_sigset(&set
, &target_set
);
9124 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9126 host_to_target_old_sigset(&target_set
, &oset
);
9132 #ifdef TARGET_NR_sigprocmask
9133 case TARGET_NR_sigprocmask
:
9135 #if defined(TARGET_ALPHA)
9136 sigset_t set
, oldset
;
9141 case TARGET_SIG_BLOCK
:
9144 case TARGET_SIG_UNBLOCK
:
9147 case TARGET_SIG_SETMASK
:
9151 return -TARGET_EINVAL
;
9154 target_to_host_old_sigset(&set
, &mask
);
9156 ret
= do_sigprocmask(how
, &set
, &oldset
);
9157 if (!is_error(ret
)) {
9158 host_to_target_old_sigset(&mask
, &oldset
);
9160 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
9163 sigset_t set
, oldset
, *set_ptr
;
9168 case TARGET_SIG_BLOCK
:
9171 case TARGET_SIG_UNBLOCK
:
9174 case TARGET_SIG_SETMASK
:
9178 return -TARGET_EINVAL
;
9180 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9181 return -TARGET_EFAULT
;
9182 target_to_host_old_sigset(&set
, p
);
9183 unlock_user(p
, arg2
, 0);
9189 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9190 if (!is_error(ret
) && arg3
) {
9191 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9192 return -TARGET_EFAULT
;
9193 host_to_target_old_sigset(p
, &oldset
);
9194 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9200 case TARGET_NR_rt_sigprocmask
:
9203 sigset_t set
, oldset
, *set_ptr
;
9205 if (arg4
!= sizeof(target_sigset_t
)) {
9206 return -TARGET_EINVAL
;
9211 case TARGET_SIG_BLOCK
:
9214 case TARGET_SIG_UNBLOCK
:
9217 case TARGET_SIG_SETMASK
:
9221 return -TARGET_EINVAL
;
9223 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9224 return -TARGET_EFAULT
;
9225 target_to_host_sigset(&set
, p
);
9226 unlock_user(p
, arg2
, 0);
9232 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9233 if (!is_error(ret
) && arg3
) {
9234 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9235 return -TARGET_EFAULT
;
9236 host_to_target_sigset(p
, &oldset
);
9237 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9241 #ifdef TARGET_NR_sigpending
9242 case TARGET_NR_sigpending
:
9245 ret
= get_errno(sigpending(&set
));
9246 if (!is_error(ret
)) {
9247 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9248 return -TARGET_EFAULT
;
9249 host_to_target_old_sigset(p
, &set
);
9250 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9255 case TARGET_NR_rt_sigpending
:
9259 /* Yes, this check is >, not != like most. We follow the kernel's
9260 * logic and it does it like this because it implements
9261 * NR_sigpending through the same code path, and in that case
9262 * the old_sigset_t is smaller in size.
9264 if (arg2
> sizeof(target_sigset_t
)) {
9265 return -TARGET_EINVAL
;
9268 ret
= get_errno(sigpending(&set
));
9269 if (!is_error(ret
)) {
9270 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9271 return -TARGET_EFAULT
;
9272 host_to_target_sigset(p
, &set
);
9273 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9277 #ifdef TARGET_NR_sigsuspend
9278 case TARGET_NR_sigsuspend
:
9280 TaskState
*ts
= cpu
->opaque
;
9281 #if defined(TARGET_ALPHA)
9282 abi_ulong mask
= arg1
;
9283 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
9285 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9286 return -TARGET_EFAULT
;
9287 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
9288 unlock_user(p
, arg1
, 0);
9290 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9292 if (ret
!= -TARGET_ERESTARTSYS
) {
9293 ts
->in_sigsuspend
= 1;
9298 case TARGET_NR_rt_sigsuspend
:
9300 TaskState
*ts
= cpu
->opaque
;
9302 if (arg2
!= sizeof(target_sigset_t
)) {
9303 return -TARGET_EINVAL
;
9305 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9306 return -TARGET_EFAULT
;
9307 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9308 unlock_user(p
, arg1
, 0);
9309 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9311 if (ret
!= -TARGET_ERESTARTSYS
) {
9312 ts
->in_sigsuspend
= 1;
9316 #ifdef TARGET_NR_rt_sigtimedwait
9317 case TARGET_NR_rt_sigtimedwait
:
9320 struct timespec uts
, *puts
;
9323 if (arg4
!= sizeof(target_sigset_t
)) {
9324 return -TARGET_EINVAL
;
9327 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9328 return -TARGET_EFAULT
;
9329 target_to_host_sigset(&set
, p
);
9330 unlock_user(p
, arg1
, 0);
9333 if (target_to_host_timespec(puts
, arg3
)) {
9334 return -TARGET_EFAULT
;
9339 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9341 if (!is_error(ret
)) {
9343 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9346 return -TARGET_EFAULT
;
9348 host_to_target_siginfo(p
, &uinfo
);
9349 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9351 ret
= host_to_target_signal(ret
);
9356 #ifdef TARGET_NR_rt_sigtimedwait_time64
9357 case TARGET_NR_rt_sigtimedwait_time64
:
9360 struct timespec uts
, *puts
;
9363 if (arg4
!= sizeof(target_sigset_t
)) {
9364 return -TARGET_EINVAL
;
9367 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9369 return -TARGET_EFAULT
;
9371 target_to_host_sigset(&set
, p
);
9372 unlock_user(p
, arg1
, 0);
9375 if (target_to_host_timespec64(puts
, arg3
)) {
9376 return -TARGET_EFAULT
;
9381 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9383 if (!is_error(ret
)) {
9385 p
= lock_user(VERIFY_WRITE
, arg2
,
9386 sizeof(target_siginfo_t
), 0);
9388 return -TARGET_EFAULT
;
9390 host_to_target_siginfo(p
, &uinfo
);
9391 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9393 ret
= host_to_target_signal(ret
);
9398 case TARGET_NR_rt_sigqueueinfo
:
9402 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9404 return -TARGET_EFAULT
;
9406 target_to_host_siginfo(&uinfo
, p
);
9407 unlock_user(p
, arg3
, 0);
9408 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9411 case TARGET_NR_rt_tgsigqueueinfo
:
9415 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9417 return -TARGET_EFAULT
;
9419 target_to_host_siginfo(&uinfo
, p
);
9420 unlock_user(p
, arg4
, 0);
9421 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9424 #ifdef TARGET_NR_sigreturn
9425 case TARGET_NR_sigreturn
:
9426 if (block_signals()) {
9427 return -TARGET_ERESTARTSYS
;
9429 return do_sigreturn(cpu_env
);
9431 case TARGET_NR_rt_sigreturn
:
9432 if (block_signals()) {
9433 return -TARGET_ERESTARTSYS
;
9435 return do_rt_sigreturn(cpu_env
);
9436 case TARGET_NR_sethostname
:
9437 if (!(p
= lock_user_string(arg1
)))
9438 return -TARGET_EFAULT
;
9439 ret
= get_errno(sethostname(p
, arg2
));
9440 unlock_user(p
, arg1
, 0);
9442 #ifdef TARGET_NR_setrlimit
9443 case TARGET_NR_setrlimit
:
9445 int resource
= target_to_host_resource(arg1
);
9446 struct target_rlimit
*target_rlim
;
9448 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9449 return -TARGET_EFAULT
;
9450 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9451 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9452 unlock_user_struct(target_rlim
, arg2
, 0);
9454 * If we just passed through resource limit settings for memory then
9455 * they would also apply to QEMU's own allocations, and QEMU will
9456 * crash or hang or die if its allocations fail. Ideally we would
9457 * track the guest allocations in QEMU and apply the limits ourselves.
9458 * For now, just tell the guest the call succeeded but don't actually
9461 if (resource
!= RLIMIT_AS
&&
9462 resource
!= RLIMIT_DATA
&&
9463 resource
!= RLIMIT_STACK
) {
9464 return get_errno(setrlimit(resource
, &rlim
));
9470 #ifdef TARGET_NR_getrlimit
9471 case TARGET_NR_getrlimit
:
9473 int resource
= target_to_host_resource(arg1
);
9474 struct target_rlimit
*target_rlim
;
9477 ret
= get_errno(getrlimit(resource
, &rlim
));
9478 if (!is_error(ret
)) {
9479 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9480 return -TARGET_EFAULT
;
9481 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9482 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9483 unlock_user_struct(target_rlim
, arg2
, 1);
9488 case TARGET_NR_getrusage
:
9490 struct rusage rusage
;
9491 ret
= get_errno(getrusage(arg1
, &rusage
));
9492 if (!is_error(ret
)) {
9493 ret
= host_to_target_rusage(arg2
, &rusage
);
9497 #if defined(TARGET_NR_gettimeofday)
9498 case TARGET_NR_gettimeofday
:
9503 ret
= get_errno(gettimeofday(&tv
, &tz
));
9504 if (!is_error(ret
)) {
9505 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9506 return -TARGET_EFAULT
;
9508 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9509 return -TARGET_EFAULT
;
9515 #if defined(TARGET_NR_settimeofday)
9516 case TARGET_NR_settimeofday
:
9518 struct timeval tv
, *ptv
= NULL
;
9519 struct timezone tz
, *ptz
= NULL
;
9522 if (copy_from_user_timeval(&tv
, arg1
)) {
9523 return -TARGET_EFAULT
;
9529 if (copy_from_user_timezone(&tz
, arg2
)) {
9530 return -TARGET_EFAULT
;
9535 return get_errno(settimeofday(ptv
, ptz
));
9538 #if defined(TARGET_NR_select)
9539 case TARGET_NR_select
:
9540 #if defined(TARGET_WANT_NI_OLD_SELECT)
9541 /* some architectures used to have old_select here
9542 * but now ENOSYS it.
9544 ret
= -TARGET_ENOSYS
;
9545 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9546 ret
= do_old_select(arg1
);
9548 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9552 #ifdef TARGET_NR_pselect6
9553 case TARGET_NR_pselect6
:
9554 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9556 #ifdef TARGET_NR_pselect6_time64
9557 case TARGET_NR_pselect6_time64
:
9558 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9560 #ifdef TARGET_NR_symlink
9561 case TARGET_NR_symlink
:
9564 p
= lock_user_string(arg1
);
9565 p2
= lock_user_string(arg2
);
9567 ret
= -TARGET_EFAULT
;
9569 ret
= get_errno(symlink(p
, p2
));
9570 unlock_user(p2
, arg2
, 0);
9571 unlock_user(p
, arg1
, 0);
9575 #if defined(TARGET_NR_symlinkat)
9576 case TARGET_NR_symlinkat
:
9579 p
= lock_user_string(arg1
);
9580 p2
= lock_user_string(arg3
);
9582 ret
= -TARGET_EFAULT
;
9584 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9585 unlock_user(p2
, arg3
, 0);
9586 unlock_user(p
, arg1
, 0);
9590 #ifdef TARGET_NR_readlink
9591 case TARGET_NR_readlink
:
9594 p
= lock_user_string(arg1
);
9595 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9597 ret
= -TARGET_EFAULT
;
9599 /* Short circuit this for the magic exe check. */
9600 ret
= -TARGET_EINVAL
;
9601 } else if (is_proc_myself((const char *)p
, "exe")) {
9602 char real
[PATH_MAX
], *temp
;
9603 temp
= realpath(exec_path
, real
);
9604 /* Return value is # of bytes that we wrote to the buffer. */
9606 ret
= get_errno(-1);
9608 /* Don't worry about sign mismatch as earlier mapping
9609 * logic would have thrown a bad address error. */
9610 ret
= MIN(strlen(real
), arg3
);
9611 /* We cannot NUL terminate the string. */
9612 memcpy(p2
, real
, ret
);
9615 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9617 unlock_user(p2
, arg2
, ret
);
9618 unlock_user(p
, arg1
, 0);
9622 #if defined(TARGET_NR_readlinkat)
9623 case TARGET_NR_readlinkat
:
9626 p
= lock_user_string(arg2
);
9627 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9629 ret
= -TARGET_EFAULT
;
9630 } else if (is_proc_myself((const char *)p
, "exe")) {
9631 char real
[PATH_MAX
], *temp
;
9632 temp
= realpath(exec_path
, real
);
9633 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9634 snprintf((char *)p2
, arg4
, "%s", real
);
9636 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9638 unlock_user(p2
, arg3
, ret
);
9639 unlock_user(p
, arg2
, 0);
9643 #ifdef TARGET_NR_swapon
9644 case TARGET_NR_swapon
:
9645 if (!(p
= lock_user_string(arg1
)))
9646 return -TARGET_EFAULT
;
9647 ret
= get_errno(swapon(p
, arg2
));
9648 unlock_user(p
, arg1
, 0);
9651 case TARGET_NR_reboot
:
9652 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9653 /* arg4 must be ignored in all other cases */
9654 p
= lock_user_string(arg4
);
9656 return -TARGET_EFAULT
;
9658 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9659 unlock_user(p
, arg4
, 0);
9661 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9664 #ifdef TARGET_NR_mmap
9665 case TARGET_NR_mmap
:
9666 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9667 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9668 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9669 || defined(TARGET_S390X)
9672 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9673 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9674 return -TARGET_EFAULT
;
9681 unlock_user(v
, arg1
, 0);
9682 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9683 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9687 /* mmap pointers are always untagged */
9688 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9689 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9695 #ifdef TARGET_NR_mmap2
9696 case TARGET_NR_mmap2
:
9698 #define MMAP_SHIFT 12
9700 ret
= target_mmap(arg1
, arg2
, arg3
,
9701 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9702 arg5
, arg6
<< MMAP_SHIFT
);
9703 return get_errno(ret
);
9705 case TARGET_NR_munmap
:
9706 arg1
= cpu_untagged_addr(cpu
, arg1
);
9707 return get_errno(target_munmap(arg1
, arg2
));
9708 case TARGET_NR_mprotect
:
9709 arg1
= cpu_untagged_addr(cpu
, arg1
);
9711 TaskState
*ts
= cpu
->opaque
;
9712 /* Special hack to detect libc making the stack executable. */
9713 if ((arg3
& PROT_GROWSDOWN
)
9714 && arg1
>= ts
->info
->stack_limit
9715 && arg1
<= ts
->info
->start_stack
) {
9716 arg3
&= ~PROT_GROWSDOWN
;
9717 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9718 arg1
= ts
->info
->stack_limit
;
9721 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9722 #ifdef TARGET_NR_mremap
9723 case TARGET_NR_mremap
:
9724 arg1
= cpu_untagged_addr(cpu
, arg1
);
9725 /* mremap new_addr (arg5) is always untagged */
9726 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9728 /* ??? msync/mlock/munlock are broken for softmmu. */
9729 #ifdef TARGET_NR_msync
9730 case TARGET_NR_msync
:
9731 return get_errno(msync(g2h(cpu
, arg1
), arg2
, arg3
));
9733 #ifdef TARGET_NR_mlock
9734 case TARGET_NR_mlock
:
9735 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
9737 #ifdef TARGET_NR_munlock
9738 case TARGET_NR_munlock
:
9739 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
9741 #ifdef TARGET_NR_mlockall
9742 case TARGET_NR_mlockall
:
9743 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9745 #ifdef TARGET_NR_munlockall
9746 case TARGET_NR_munlockall
:
9747 return get_errno(munlockall());
9749 #ifdef TARGET_NR_truncate
9750 case TARGET_NR_truncate
:
9751 if (!(p
= lock_user_string(arg1
)))
9752 return -TARGET_EFAULT
;
9753 ret
= get_errno(truncate(p
, arg2
));
9754 unlock_user(p
, arg1
, 0);
9757 #ifdef TARGET_NR_ftruncate
9758 case TARGET_NR_ftruncate
:
9759 return get_errno(ftruncate(arg1
, arg2
));
9761 case TARGET_NR_fchmod
:
9762 return get_errno(fchmod(arg1
, arg2
));
9763 #if defined(TARGET_NR_fchmodat)
9764 case TARGET_NR_fchmodat
:
9765 if (!(p
= lock_user_string(arg2
)))
9766 return -TARGET_EFAULT
;
9767 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9768 unlock_user(p
, arg2
, 0);
9771 case TARGET_NR_getpriority
:
9772 /* Note that negative values are valid for getpriority, so we must
9773 differentiate based on errno settings. */
9775 ret
= getpriority(arg1
, arg2
);
9776 if (ret
== -1 && errno
!= 0) {
9777 return -host_to_target_errno(errno
);
9780 /* Return value is the unbiased priority. Signal no error. */
9781 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9783 /* Return value is a biased priority to avoid negative numbers. */
9787 case TARGET_NR_setpriority
:
9788 return get_errno(setpriority(arg1
, arg2
, arg3
));
9789 #ifdef TARGET_NR_statfs
9790 case TARGET_NR_statfs
:
9791 if (!(p
= lock_user_string(arg1
))) {
9792 return -TARGET_EFAULT
;
9794 ret
= get_errno(statfs(path(p
), &stfs
));
9795 unlock_user(p
, arg1
, 0);
9797 if (!is_error(ret
)) {
9798 struct target_statfs
*target_stfs
;
9800 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9801 return -TARGET_EFAULT
;
9802 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9803 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9804 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9805 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9806 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9807 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9808 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9809 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9810 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9811 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9812 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9813 #ifdef _STATFS_F_FLAGS
9814 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9816 __put_user(0, &target_stfs
->f_flags
);
9818 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9819 unlock_user_struct(target_stfs
, arg2
, 1);
9823 #ifdef TARGET_NR_fstatfs
9824 case TARGET_NR_fstatfs
:
9825 ret
= get_errno(fstatfs(arg1
, &stfs
));
9826 goto convert_statfs
;
9828 #ifdef TARGET_NR_statfs64
9829 case TARGET_NR_statfs64
:
9830 if (!(p
= lock_user_string(arg1
))) {
9831 return -TARGET_EFAULT
;
9833 ret
= get_errno(statfs(path(p
), &stfs
));
9834 unlock_user(p
, arg1
, 0);
9836 if (!is_error(ret
)) {
9837 struct target_statfs64
*target_stfs
;
9839 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9840 return -TARGET_EFAULT
;
9841 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9842 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9843 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9844 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9845 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9846 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9847 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9848 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9849 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9850 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9851 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9852 #ifdef _STATFS_F_FLAGS
9853 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9855 __put_user(0, &target_stfs
->f_flags
);
9857 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9858 unlock_user_struct(target_stfs
, arg3
, 1);
9861 case TARGET_NR_fstatfs64
:
9862 ret
= get_errno(fstatfs(arg1
, &stfs
));
9863 goto convert_statfs64
;
9865 #ifdef TARGET_NR_socketcall
9866 case TARGET_NR_socketcall
:
9867 return do_socketcall(arg1
, arg2
);
9869 #ifdef TARGET_NR_accept
9870 case TARGET_NR_accept
:
9871 return do_accept4(arg1
, arg2
, arg3
, 0);
9873 #ifdef TARGET_NR_accept4
9874 case TARGET_NR_accept4
:
9875 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9877 #ifdef TARGET_NR_bind
9878 case TARGET_NR_bind
:
9879 return do_bind(arg1
, arg2
, arg3
);
9881 #ifdef TARGET_NR_connect
9882 case TARGET_NR_connect
:
9883 return do_connect(arg1
, arg2
, arg3
);
9885 #ifdef TARGET_NR_getpeername
9886 case TARGET_NR_getpeername
:
9887 return do_getpeername(arg1
, arg2
, arg3
);
9889 #ifdef TARGET_NR_getsockname
9890 case TARGET_NR_getsockname
:
9891 return do_getsockname(arg1
, arg2
, arg3
);
9893 #ifdef TARGET_NR_getsockopt
9894 case TARGET_NR_getsockopt
:
9895 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9897 #ifdef TARGET_NR_listen
9898 case TARGET_NR_listen
:
9899 return get_errno(listen(arg1
, arg2
));
9901 #ifdef TARGET_NR_recv
9902 case TARGET_NR_recv
:
9903 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9905 #ifdef TARGET_NR_recvfrom
9906 case TARGET_NR_recvfrom
:
9907 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9909 #ifdef TARGET_NR_recvmsg
9910 case TARGET_NR_recvmsg
:
9911 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9913 #ifdef TARGET_NR_send
9914 case TARGET_NR_send
:
9915 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9917 #ifdef TARGET_NR_sendmsg
9918 case TARGET_NR_sendmsg
:
9919 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9921 #ifdef TARGET_NR_sendmmsg
9922 case TARGET_NR_sendmmsg
:
9923 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9925 #ifdef TARGET_NR_recvmmsg
9926 case TARGET_NR_recvmmsg
:
9927 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9929 #ifdef TARGET_NR_sendto
9930 case TARGET_NR_sendto
:
9931 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9933 #ifdef TARGET_NR_shutdown
9934 case TARGET_NR_shutdown
:
9935 return get_errno(shutdown(arg1
, arg2
));
9937 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9938 case TARGET_NR_getrandom
:
9939 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9941 return -TARGET_EFAULT
;
9943 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9944 unlock_user(p
, arg1
, ret
);
9947 #ifdef TARGET_NR_socket
9948 case TARGET_NR_socket
:
9949 return do_socket(arg1
, arg2
, arg3
);
9951 #ifdef TARGET_NR_socketpair
9952 case TARGET_NR_socketpair
:
9953 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9955 #ifdef TARGET_NR_setsockopt
9956 case TARGET_NR_setsockopt
:
9957 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9959 #if defined(TARGET_NR_syslog)
9960 case TARGET_NR_syslog
:
9965 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9966 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9967 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9968 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9969 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9970 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9971 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9972 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9973 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9974 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9975 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9976 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9979 return -TARGET_EINVAL
;
9984 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9986 return -TARGET_EFAULT
;
9988 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9989 unlock_user(p
, arg2
, arg3
);
9993 return -TARGET_EINVAL
;
9998 case TARGET_NR_setitimer
:
10000 struct itimerval value
, ovalue
, *pvalue
;
10004 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10005 || copy_from_user_timeval(&pvalue
->it_value
,
10006 arg2
+ sizeof(struct target_timeval
)))
10007 return -TARGET_EFAULT
;
10011 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10012 if (!is_error(ret
) && arg3
) {
10013 if (copy_to_user_timeval(arg3
,
10014 &ovalue
.it_interval
)
10015 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10017 return -TARGET_EFAULT
;
10021 case TARGET_NR_getitimer
:
10023 struct itimerval value
;
10025 ret
= get_errno(getitimer(arg1
, &value
));
10026 if (!is_error(ret
) && arg2
) {
10027 if (copy_to_user_timeval(arg2
,
10028 &value
.it_interval
)
10029 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10031 return -TARGET_EFAULT
;
10035 #ifdef TARGET_NR_stat
10036 case TARGET_NR_stat
:
10037 if (!(p
= lock_user_string(arg1
))) {
10038 return -TARGET_EFAULT
;
10040 ret
= get_errno(stat(path(p
), &st
));
10041 unlock_user(p
, arg1
, 0);
10044 #ifdef TARGET_NR_lstat
10045 case TARGET_NR_lstat
:
10046 if (!(p
= lock_user_string(arg1
))) {
10047 return -TARGET_EFAULT
;
10049 ret
= get_errno(lstat(path(p
), &st
));
10050 unlock_user(p
, arg1
, 0);
10053 #ifdef TARGET_NR_fstat
10054 case TARGET_NR_fstat
:
10056 ret
= get_errno(fstat(arg1
, &st
));
10057 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10060 if (!is_error(ret
)) {
10061 struct target_stat
*target_st
;
10063 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10064 return -TARGET_EFAULT
;
10065 memset(target_st
, 0, sizeof(*target_st
));
10066 __put_user(st
.st_dev
, &target_st
->st_dev
);
10067 __put_user(st
.st_ino
, &target_st
->st_ino
);
10068 __put_user(st
.st_mode
, &target_st
->st_mode
);
10069 __put_user(st
.st_uid
, &target_st
->st_uid
);
10070 __put_user(st
.st_gid
, &target_st
->st_gid
);
10071 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10072 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10073 __put_user(st
.st_size
, &target_st
->st_size
);
10074 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10075 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10076 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10077 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10078 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10079 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10080 __put_user(st
.st_atim
.tv_nsec
,
10081 &target_st
->target_st_atime_nsec
);
10082 __put_user(st
.st_mtim
.tv_nsec
,
10083 &target_st
->target_st_mtime_nsec
);
10084 __put_user(st
.st_ctim
.tv_nsec
,
10085 &target_st
->target_st_ctime_nsec
);
10087 unlock_user_struct(target_st
, arg2
, 1);
10092 case TARGET_NR_vhangup
:
10093 return get_errno(vhangup());
10094 #ifdef TARGET_NR_syscall
10095 case TARGET_NR_syscall
:
10096 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10097 arg6
, arg7
, arg8
, 0);
10099 #if defined(TARGET_NR_wait4)
10100 case TARGET_NR_wait4
:
10103 abi_long status_ptr
= arg2
;
10104 struct rusage rusage
, *rusage_ptr
;
10105 abi_ulong target_rusage
= arg4
;
10106 abi_long rusage_err
;
10108 rusage_ptr
= &rusage
;
10111 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10112 if (!is_error(ret
)) {
10113 if (status_ptr
&& ret
) {
10114 status
= host_to_target_waitstatus(status
);
10115 if (put_user_s32(status
, status_ptr
))
10116 return -TARGET_EFAULT
;
10118 if (target_rusage
) {
10119 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10128 #ifdef TARGET_NR_swapoff
10129 case TARGET_NR_swapoff
:
10130 if (!(p
= lock_user_string(arg1
)))
10131 return -TARGET_EFAULT
;
10132 ret
= get_errno(swapoff(p
));
10133 unlock_user(p
, arg1
, 0);
10136 case TARGET_NR_sysinfo
:
10138 struct target_sysinfo
*target_value
;
10139 struct sysinfo value
;
10140 ret
= get_errno(sysinfo(&value
));
10141 if (!is_error(ret
) && arg1
)
10143 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10144 return -TARGET_EFAULT
;
10145 __put_user(value
.uptime
, &target_value
->uptime
);
10146 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10147 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10148 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10149 __put_user(value
.totalram
, &target_value
->totalram
);
10150 __put_user(value
.freeram
, &target_value
->freeram
);
10151 __put_user(value
.sharedram
, &target_value
->sharedram
);
10152 __put_user(value
.bufferram
, &target_value
->bufferram
);
10153 __put_user(value
.totalswap
, &target_value
->totalswap
);
10154 __put_user(value
.freeswap
, &target_value
->freeswap
);
10155 __put_user(value
.procs
, &target_value
->procs
);
10156 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10157 __put_user(value
.freehigh
, &target_value
->freehigh
);
10158 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10159 unlock_user_struct(target_value
, arg1
, 1);
10163 #ifdef TARGET_NR_ipc
10164 case TARGET_NR_ipc
:
10165 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10167 #ifdef TARGET_NR_semget
10168 case TARGET_NR_semget
:
10169 return get_errno(semget(arg1
, arg2
, arg3
));
10171 #ifdef TARGET_NR_semop
10172 case TARGET_NR_semop
:
10173 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10175 #ifdef TARGET_NR_semtimedop
10176 case TARGET_NR_semtimedop
:
10177 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10179 #ifdef TARGET_NR_semtimedop_time64
10180 case TARGET_NR_semtimedop_time64
:
10181 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10183 #ifdef TARGET_NR_semctl
10184 case TARGET_NR_semctl
:
10185 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10187 #ifdef TARGET_NR_msgctl
10188 case TARGET_NR_msgctl
:
10189 return do_msgctl(arg1
, arg2
, arg3
);
10191 #ifdef TARGET_NR_msgget
10192 case TARGET_NR_msgget
:
10193 return get_errno(msgget(arg1
, arg2
));
10195 #ifdef TARGET_NR_msgrcv
10196 case TARGET_NR_msgrcv
:
10197 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10199 #ifdef TARGET_NR_msgsnd
10200 case TARGET_NR_msgsnd
:
10201 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10203 #ifdef TARGET_NR_shmget
10204 case TARGET_NR_shmget
:
10205 return get_errno(shmget(arg1
, arg2
, arg3
));
10207 #ifdef TARGET_NR_shmctl
10208 case TARGET_NR_shmctl
:
10209 return do_shmctl(arg1
, arg2
, arg3
);
10211 #ifdef TARGET_NR_shmat
10212 case TARGET_NR_shmat
:
10213 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10215 #ifdef TARGET_NR_shmdt
10216 case TARGET_NR_shmdt
:
10217 return do_shmdt(arg1
);
10219 case TARGET_NR_fsync
:
10220 return get_errno(fsync(arg1
));
10221 case TARGET_NR_clone
:
10222 /* Linux manages to have three different orderings for its
10223 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10224 * match the kernel's CONFIG_CLONE_* settings.
10225 * Microblaze is further special in that it uses a sixth
10226 * implicit argument to clone for the TLS pointer.
10228 #if defined(TARGET_MICROBLAZE)
10229 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10230 #elif defined(TARGET_CLONE_BACKWARDS)
10231 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10232 #elif defined(TARGET_CLONE_BACKWARDS2)
10233 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10235 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10238 #ifdef __NR_exit_group
10239 /* new thread calls */
10240 case TARGET_NR_exit_group
:
10241 preexit_cleanup(cpu_env
, arg1
);
10242 return get_errno(exit_group(arg1
));
10244 case TARGET_NR_setdomainname
:
10245 if (!(p
= lock_user_string(arg1
)))
10246 return -TARGET_EFAULT
;
10247 ret
= get_errno(setdomainname(p
, arg2
));
10248 unlock_user(p
, arg1
, 0);
10250 case TARGET_NR_uname
:
10251 /* no need to transcode because we use the linux syscall */
10253 struct new_utsname
* buf
;
10255 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10256 return -TARGET_EFAULT
;
10257 ret
= get_errno(sys_uname(buf
));
10258 if (!is_error(ret
)) {
10259 /* Overwrite the native machine name with whatever is being
10261 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10262 sizeof(buf
->machine
));
10263 /* Allow the user to override the reported release. */
10264 if (qemu_uname_release
&& *qemu_uname_release
) {
10265 g_strlcpy(buf
->release
, qemu_uname_release
,
10266 sizeof(buf
->release
));
10269 unlock_user_struct(buf
, arg1
, 1);
10273 case TARGET_NR_modify_ldt
:
10274 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10275 #if !defined(TARGET_X86_64)
10276 case TARGET_NR_vm86
:
10277 return do_vm86(cpu_env
, arg1
, arg2
);
10280 #if defined(TARGET_NR_adjtimex)
10281 case TARGET_NR_adjtimex
:
10283 struct timex host_buf
;
10285 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10286 return -TARGET_EFAULT
;
10288 ret
= get_errno(adjtimex(&host_buf
));
10289 if (!is_error(ret
)) {
10290 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10291 return -TARGET_EFAULT
;
10297 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10298 case TARGET_NR_clock_adjtime
:
10300 struct timex htx
, *phtx
= &htx
;
10302 if (target_to_host_timex(phtx
, arg2
) != 0) {
10303 return -TARGET_EFAULT
;
10305 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10306 if (!is_error(ret
) && phtx
) {
10307 if (host_to_target_timex(arg2
, phtx
) != 0) {
10308 return -TARGET_EFAULT
;
10314 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10315 case TARGET_NR_clock_adjtime64
:
10319 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10320 return -TARGET_EFAULT
;
10322 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10323 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10324 return -TARGET_EFAULT
;
10329 case TARGET_NR_getpgid
:
10330 return get_errno(getpgid(arg1
));
10331 case TARGET_NR_fchdir
:
10332 return get_errno(fchdir(arg1
));
10333 case TARGET_NR_personality
:
10334 return get_errno(personality(arg1
));
10335 #ifdef TARGET_NR__llseek /* Not on alpha */
10336 case TARGET_NR__llseek
:
10339 #if !defined(__NR_llseek)
10340 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10342 ret
= get_errno(res
);
10347 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10349 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10350 return -TARGET_EFAULT
;
10355 #ifdef TARGET_NR_getdents
10356 case TARGET_NR_getdents
:
10357 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10358 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10360 struct target_dirent
*target_dirp
;
10361 struct linux_dirent
*dirp
;
10362 abi_long count
= arg3
;
10364 dirp
= g_try_malloc(count
);
10366 return -TARGET_ENOMEM
;
10369 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10370 if (!is_error(ret
)) {
10371 struct linux_dirent
*de
;
10372 struct target_dirent
*tde
;
10374 int reclen
, treclen
;
10375 int count1
, tnamelen
;
10379 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10380 return -TARGET_EFAULT
;
10383 reclen
= de
->d_reclen
;
10384 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10385 assert(tnamelen
>= 0);
10386 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10387 assert(count1
+ treclen
<= count
);
10388 tde
->d_reclen
= tswap16(treclen
);
10389 tde
->d_ino
= tswapal(de
->d_ino
);
10390 tde
->d_off
= tswapal(de
->d_off
);
10391 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10392 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10394 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10398 unlock_user(target_dirp
, arg2
, ret
);
10404 struct linux_dirent
*dirp
;
10405 abi_long count
= arg3
;
10407 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10408 return -TARGET_EFAULT
;
10409 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10410 if (!is_error(ret
)) {
10411 struct linux_dirent
*de
;
10416 reclen
= de
->d_reclen
;
10419 de
->d_reclen
= tswap16(reclen
);
10420 tswapls(&de
->d_ino
);
10421 tswapls(&de
->d_off
);
10422 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10426 unlock_user(dirp
, arg2
, ret
);
10430 /* Implement getdents in terms of getdents64 */
10432 struct linux_dirent64
*dirp
;
10433 abi_long count
= arg3
;
10435 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10437 return -TARGET_EFAULT
;
10439 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10440 if (!is_error(ret
)) {
10441 /* Convert the dirent64 structs to target dirent. We do this
10442 * in-place, since we can guarantee that a target_dirent is no
10443 * larger than a dirent64; however this means we have to be
10444 * careful to read everything before writing in the new format.
10446 struct linux_dirent64
*de
;
10447 struct target_dirent
*tde
;
10452 tde
= (struct target_dirent
*)dirp
;
10454 int namelen
, treclen
;
10455 int reclen
= de
->d_reclen
;
10456 uint64_t ino
= de
->d_ino
;
10457 int64_t off
= de
->d_off
;
10458 uint8_t type
= de
->d_type
;
10460 namelen
= strlen(de
->d_name
);
10461 treclen
= offsetof(struct target_dirent
, d_name
)
10463 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10465 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10466 tde
->d_ino
= tswapal(ino
);
10467 tde
->d_off
= tswapal(off
);
10468 tde
->d_reclen
= tswap16(treclen
);
10469 /* The target_dirent type is in what was formerly a padding
10470 * byte at the end of the structure:
10472 *(((char *)tde
) + treclen
- 1) = type
;
10474 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10475 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10481 unlock_user(dirp
, arg2
, ret
);
10485 #endif /* TARGET_NR_getdents */
10486 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10487 case TARGET_NR_getdents64
:
10489 struct linux_dirent64
*dirp
;
10490 abi_long count
= arg3
;
10491 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10492 return -TARGET_EFAULT
;
10493 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10494 if (!is_error(ret
)) {
10495 struct linux_dirent64
*de
;
10500 reclen
= de
->d_reclen
;
10503 de
->d_reclen
= tswap16(reclen
);
10504 tswap64s((uint64_t *)&de
->d_ino
);
10505 tswap64s((uint64_t *)&de
->d_off
);
10506 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10510 unlock_user(dirp
, arg2
, ret
);
10513 #endif /* TARGET_NR_getdents64 */
10514 #if defined(TARGET_NR__newselect)
10515 case TARGET_NR__newselect
:
10516 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10518 #ifdef TARGET_NR_poll
10519 case TARGET_NR_poll
:
10520 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10522 #ifdef TARGET_NR_ppoll
10523 case TARGET_NR_ppoll
:
10524 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10526 #ifdef TARGET_NR_ppoll_time64
10527 case TARGET_NR_ppoll_time64
:
10528 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10530 case TARGET_NR_flock
:
10531 /* NOTE: the flock constant seems to be the same for every
10533 return get_errno(safe_flock(arg1
, arg2
));
10534 case TARGET_NR_readv
:
10536 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10538 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10539 unlock_iovec(vec
, arg2
, arg3
, 1);
10541 ret
= -host_to_target_errno(errno
);
10545 case TARGET_NR_writev
:
10547 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10549 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10550 unlock_iovec(vec
, arg2
, arg3
, 0);
10552 ret
= -host_to_target_errno(errno
);
10556 #if defined(TARGET_NR_preadv)
10557 case TARGET_NR_preadv
:
10559 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10561 unsigned long low
, high
;
10563 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10564 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10565 unlock_iovec(vec
, arg2
, arg3
, 1);
10567 ret
= -host_to_target_errno(errno
);
10572 #if defined(TARGET_NR_pwritev)
10573 case TARGET_NR_pwritev
:
10575 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10577 unsigned long low
, high
;
10579 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10580 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10581 unlock_iovec(vec
, arg2
, arg3
, 0);
10583 ret
= -host_to_target_errno(errno
);
10588 case TARGET_NR_getsid
:
10589 return get_errno(getsid(arg1
));
10590 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10591 case TARGET_NR_fdatasync
:
10592 return get_errno(fdatasync(arg1
));
10594 case TARGET_NR_sched_getaffinity
:
10596 unsigned int mask_size
;
10597 unsigned long *mask
;
10600 * sched_getaffinity needs multiples of ulong, so need to take
10601 * care of mismatches between target ulong and host ulong sizes.
10603 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10604 return -TARGET_EINVAL
;
10606 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10608 mask
= alloca(mask_size
);
10609 memset(mask
, 0, mask_size
);
10610 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10612 if (!is_error(ret
)) {
10614 /* More data returned than the caller's buffer will fit.
10615 * This only happens if sizeof(abi_long) < sizeof(long)
10616 * and the caller passed us a buffer holding an odd number
10617 * of abi_longs. If the host kernel is actually using the
10618 * extra 4 bytes then fail EINVAL; otherwise we can just
10619 * ignore them and only copy the interesting part.
10621 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10622 if (numcpus
> arg2
* 8) {
10623 return -TARGET_EINVAL
;
10628 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10629 return -TARGET_EFAULT
;
10634 case TARGET_NR_sched_setaffinity
:
10636 unsigned int mask_size
;
10637 unsigned long *mask
;
10640 * sched_setaffinity needs multiples of ulong, so need to take
10641 * care of mismatches between target ulong and host ulong sizes.
10643 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10644 return -TARGET_EINVAL
;
10646 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10647 mask
= alloca(mask_size
);
10649 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10654 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10656 case TARGET_NR_getcpu
:
10658 unsigned cpu
, node
;
10659 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10660 arg2
? &node
: NULL
,
10662 if (is_error(ret
)) {
10665 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10666 return -TARGET_EFAULT
;
10668 if (arg2
&& put_user_u32(node
, arg2
)) {
10669 return -TARGET_EFAULT
;
10673 case TARGET_NR_sched_setparam
:
10675 struct sched_param
*target_schp
;
10676 struct sched_param schp
;
10679 return -TARGET_EINVAL
;
10681 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10682 return -TARGET_EFAULT
;
10683 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10684 unlock_user_struct(target_schp
, arg2
, 0);
10685 return get_errno(sched_setparam(arg1
, &schp
));
10687 case TARGET_NR_sched_getparam
:
10689 struct sched_param
*target_schp
;
10690 struct sched_param schp
;
10693 return -TARGET_EINVAL
;
10695 ret
= get_errno(sched_getparam(arg1
, &schp
));
10696 if (!is_error(ret
)) {
10697 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10698 return -TARGET_EFAULT
;
10699 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10700 unlock_user_struct(target_schp
, arg2
, 1);
10704 case TARGET_NR_sched_setscheduler
:
10706 struct sched_param
*target_schp
;
10707 struct sched_param schp
;
10709 return -TARGET_EINVAL
;
10711 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10712 return -TARGET_EFAULT
;
10713 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10714 unlock_user_struct(target_schp
, arg3
, 0);
10715 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10717 case TARGET_NR_sched_getscheduler
:
10718 return get_errno(sched_getscheduler(arg1
));
10719 case TARGET_NR_sched_yield
:
10720 return get_errno(sched_yield());
10721 case TARGET_NR_sched_get_priority_max
:
10722 return get_errno(sched_get_priority_max(arg1
));
10723 case TARGET_NR_sched_get_priority_min
:
10724 return get_errno(sched_get_priority_min(arg1
));
10725 #ifdef TARGET_NR_sched_rr_get_interval
10726 case TARGET_NR_sched_rr_get_interval
:
10728 struct timespec ts
;
10729 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10730 if (!is_error(ret
)) {
10731 ret
= host_to_target_timespec(arg2
, &ts
);
10736 #ifdef TARGET_NR_sched_rr_get_interval_time64
10737 case TARGET_NR_sched_rr_get_interval_time64
:
10739 struct timespec ts
;
10740 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10741 if (!is_error(ret
)) {
10742 ret
= host_to_target_timespec64(arg2
, &ts
);
10747 #if defined(TARGET_NR_nanosleep)
10748 case TARGET_NR_nanosleep
:
10750 struct timespec req
, rem
;
10751 target_to_host_timespec(&req
, arg1
);
10752 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10753 if (is_error(ret
) && arg2
) {
10754 host_to_target_timespec(arg2
, &rem
);
10759 case TARGET_NR_prctl
:
10761 case PR_GET_PDEATHSIG
:
10764 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10765 if (!is_error(ret
) && arg2
10766 && put_user_s32(deathsig
, arg2
)) {
10767 return -TARGET_EFAULT
;
10774 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10776 return -TARGET_EFAULT
;
10778 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10779 arg3
, arg4
, arg5
));
10780 unlock_user(name
, arg2
, 16);
10785 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10787 return -TARGET_EFAULT
;
10789 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10790 arg3
, arg4
, arg5
));
10791 unlock_user(name
, arg2
, 0);
10796 case TARGET_PR_GET_FP_MODE
:
10798 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10800 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10801 ret
|= TARGET_PR_FP_MODE_FR
;
10803 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10804 ret
|= TARGET_PR_FP_MODE_FRE
;
10808 case TARGET_PR_SET_FP_MODE
:
10810 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10811 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10812 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10813 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10814 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10816 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10817 TARGET_PR_FP_MODE_FRE
;
10819 /* If nothing to change, return right away, successfully. */
10820 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10823 /* Check the value is valid */
10824 if (arg2
& ~known_bits
) {
10825 return -TARGET_EOPNOTSUPP
;
10827 /* Setting FRE without FR is not supported. */
10828 if (new_fre
&& !new_fr
) {
10829 return -TARGET_EOPNOTSUPP
;
10831 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10832 /* FR1 is not supported */
10833 return -TARGET_EOPNOTSUPP
;
10835 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10836 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10837 /* cannot set FR=0 */
10838 return -TARGET_EOPNOTSUPP
;
10840 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10841 /* Cannot set FRE=1 */
10842 return -TARGET_EOPNOTSUPP
;
10846 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10847 for (i
= 0; i
< 32 ; i
+= 2) {
10848 if (!old_fr
&& new_fr
) {
10849 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10850 } else if (old_fr
&& !new_fr
) {
10851 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10856 env
->CP0_Status
|= (1 << CP0St_FR
);
10857 env
->hflags
|= MIPS_HFLAG_F64
;
10859 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10860 env
->hflags
&= ~MIPS_HFLAG_F64
;
10863 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10864 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10865 env
->hflags
|= MIPS_HFLAG_FRE
;
10868 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10869 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10875 #ifdef TARGET_AARCH64
10876 case TARGET_PR_SVE_SET_VL
:
10878 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10879 * PR_SVE_VL_INHERIT. Note the kernel definition
10880 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10881 * even though the current architectural maximum is VQ=16.
10883 ret
= -TARGET_EINVAL
;
10884 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10885 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10886 CPUARMState
*env
= cpu_env
;
10887 ARMCPU
*cpu
= env_archcpu(env
);
10888 uint32_t vq
, old_vq
;
10890 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10891 vq
= MAX(arg2
/ 16, 1);
10892 vq
= MIN(vq
, cpu
->sve_max_vq
);
10895 aarch64_sve_narrow_vq(env
, vq
);
10897 env
->vfp
.zcr_el
[1] = vq
- 1;
10898 arm_rebuild_hflags(env
);
10902 case TARGET_PR_SVE_GET_VL
:
10903 ret
= -TARGET_EINVAL
;
10905 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10906 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10907 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10911 case TARGET_PR_PAC_RESET_KEYS
:
10913 CPUARMState
*env
= cpu_env
;
10914 ARMCPU
*cpu
= env_archcpu(env
);
10916 if (arg3
|| arg4
|| arg5
) {
10917 return -TARGET_EINVAL
;
10919 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10920 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10921 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10922 TARGET_PR_PAC_APGAKEY
);
10928 } else if (arg2
& ~all
) {
10929 return -TARGET_EINVAL
;
10931 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10932 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10933 sizeof(ARMPACKey
), &err
);
10935 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10936 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10937 sizeof(ARMPACKey
), &err
);
10939 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10940 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10941 sizeof(ARMPACKey
), &err
);
10943 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10944 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10945 sizeof(ARMPACKey
), &err
);
10947 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10948 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10949 sizeof(ARMPACKey
), &err
);
10953 * Some unknown failure in the crypto. The best
10954 * we can do is log it and fail the syscall.
10955 * The real syscall cannot fail this way.
10957 qemu_log_mask(LOG_UNIMP
,
10958 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10959 error_get_pretty(err
));
10961 return -TARGET_EIO
;
10966 return -TARGET_EINVAL
;
10967 case TARGET_PR_SET_TAGGED_ADDR_CTRL
:
10969 abi_ulong valid_mask
= TARGET_PR_TAGGED_ADDR_ENABLE
;
10970 CPUARMState
*env
= cpu_env
;
10971 ARMCPU
*cpu
= env_archcpu(env
);
10973 if (cpu_isar_feature(aa64_mte
, cpu
)) {
10974 valid_mask
|= TARGET_PR_MTE_TCF_MASK
;
10975 valid_mask
|= TARGET_PR_MTE_TAG_MASK
;
10978 if ((arg2
& ~valid_mask
) || arg3
|| arg4
|| arg5
) {
10979 return -TARGET_EINVAL
;
10981 env
->tagged_addr_enable
= arg2
& TARGET_PR_TAGGED_ADDR_ENABLE
;
10983 if (cpu_isar_feature(aa64_mte
, cpu
)) {
10984 switch (arg2
& TARGET_PR_MTE_TCF_MASK
) {
10985 case TARGET_PR_MTE_TCF_NONE
:
10986 case TARGET_PR_MTE_TCF_SYNC
:
10987 case TARGET_PR_MTE_TCF_ASYNC
:
10994 * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10995 * Note that the syscall values are consistent with hw.
10997 env
->cp15
.sctlr_el
[1] =
10998 deposit64(env
->cp15
.sctlr_el
[1], 38, 2,
10999 arg2
>> TARGET_PR_MTE_TCF_SHIFT
);
11002 * Write PR_MTE_TAG to GCR_EL1[Exclude].
11003 * Note that the syscall uses an include mask,
11004 * and hardware uses an exclude mask -- invert.
11006 env
->cp15
.gcr_el1
=
11007 deposit64(env
->cp15
.gcr_el1
, 0, 16,
11008 ~arg2
>> TARGET_PR_MTE_TAG_SHIFT
);
11009 arm_rebuild_hflags(env
);
11013 case TARGET_PR_GET_TAGGED_ADDR_CTRL
:
11016 CPUARMState
*env
= cpu_env
;
11017 ARMCPU
*cpu
= env_archcpu(env
);
11019 if (arg2
|| arg3
|| arg4
|| arg5
) {
11020 return -TARGET_EINVAL
;
11022 if (env
->tagged_addr_enable
) {
11023 ret
|= TARGET_PR_TAGGED_ADDR_ENABLE
;
11025 if (cpu_isar_feature(aa64_mte
, cpu
)) {
11027 ret
|= (extract64(env
->cp15
.sctlr_el
[1], 38, 2)
11028 << TARGET_PR_MTE_TCF_SHIFT
);
11029 ret
= deposit64(ret
, TARGET_PR_MTE_TAG_SHIFT
, 16,
11030 ~env
->cp15
.gcr_el1
);
11034 #endif /* AARCH64 */
11035 case PR_GET_SECCOMP
:
11036 case PR_SET_SECCOMP
:
11037 /* Disable seccomp to prevent the target disabling syscalls we
11039 return -TARGET_EINVAL
;
11041 /* Most prctl options have no pointer arguments */
11042 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
11045 #ifdef TARGET_NR_arch_prctl
11046 case TARGET_NR_arch_prctl
:
11047 return do_arch_prctl(cpu_env
, arg1
, arg2
);
11049 #ifdef TARGET_NR_pread64
11050 case TARGET_NR_pread64
:
11051 if (regpairs_aligned(cpu_env
, num
)) {
11055 if (arg2
== 0 && arg3
== 0) {
11056 /* Special-case NULL buffer and zero length, which should succeed */
11059 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11061 return -TARGET_EFAULT
;
11064 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11065 unlock_user(p
, arg2
, ret
);
11067 case TARGET_NR_pwrite64
:
11068 if (regpairs_aligned(cpu_env
, num
)) {
11072 if (arg2
== 0 && arg3
== 0) {
11073 /* Special-case NULL buffer and zero length, which should succeed */
11076 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
11078 return -TARGET_EFAULT
;
11081 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11082 unlock_user(p
, arg2
, 0);
11085 case TARGET_NR_getcwd
:
11086 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11087 return -TARGET_EFAULT
;
11088 ret
= get_errno(sys_getcwd1(p
, arg2
));
11089 unlock_user(p
, arg1
, ret
);
11091 case TARGET_NR_capget
:
11092 case TARGET_NR_capset
:
11094 struct target_user_cap_header
*target_header
;
11095 struct target_user_cap_data
*target_data
= NULL
;
11096 struct __user_cap_header_struct header
;
11097 struct __user_cap_data_struct data
[2];
11098 struct __user_cap_data_struct
*dataptr
= NULL
;
11099 int i
, target_datalen
;
11100 int data_items
= 1;
11102 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11103 return -TARGET_EFAULT
;
11105 header
.version
= tswap32(target_header
->version
);
11106 header
.pid
= tswap32(target_header
->pid
);
11108 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11109 /* Version 2 and up takes pointer to two user_data structs */
11113 target_datalen
= sizeof(*target_data
) * data_items
;
11116 if (num
== TARGET_NR_capget
) {
11117 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11119 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11121 if (!target_data
) {
11122 unlock_user_struct(target_header
, arg1
, 0);
11123 return -TARGET_EFAULT
;
11126 if (num
== TARGET_NR_capset
) {
11127 for (i
= 0; i
< data_items
; i
++) {
11128 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11129 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11130 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11137 if (num
== TARGET_NR_capget
) {
11138 ret
= get_errno(capget(&header
, dataptr
));
11140 ret
= get_errno(capset(&header
, dataptr
));
11143 /* The kernel always updates version for both capget and capset */
11144 target_header
->version
= tswap32(header
.version
);
11145 unlock_user_struct(target_header
, arg1
, 1);
11148 if (num
== TARGET_NR_capget
) {
11149 for (i
= 0; i
< data_items
; i
++) {
11150 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11151 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11152 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11154 unlock_user(target_data
, arg2
, target_datalen
);
11156 unlock_user(target_data
, arg2
, 0);
11161 case TARGET_NR_sigaltstack
:
11162 return do_sigaltstack(arg1
, arg2
, cpu_env
);
11164 #ifdef CONFIG_SENDFILE
11165 #ifdef TARGET_NR_sendfile
11166 case TARGET_NR_sendfile
:
11168 off_t
*offp
= NULL
;
11171 ret
= get_user_sal(off
, arg3
);
11172 if (is_error(ret
)) {
11177 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11178 if (!is_error(ret
) && arg3
) {
11179 abi_long ret2
= put_user_sal(off
, arg3
);
11180 if (is_error(ret2
)) {
11187 #ifdef TARGET_NR_sendfile64
11188 case TARGET_NR_sendfile64
:
11190 off_t
*offp
= NULL
;
11193 ret
= get_user_s64(off
, arg3
);
11194 if (is_error(ret
)) {
11199 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11200 if (!is_error(ret
) && arg3
) {
11201 abi_long ret2
= put_user_s64(off
, arg3
);
11202 if (is_error(ret2
)) {
11210 #ifdef TARGET_NR_vfork
11211 case TARGET_NR_vfork
:
11212 return get_errno(do_fork(cpu_env
,
11213 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11216 #ifdef TARGET_NR_ugetrlimit
11217 case TARGET_NR_ugetrlimit
:
11219 struct rlimit rlim
;
11220 int resource
= target_to_host_resource(arg1
);
11221 ret
= get_errno(getrlimit(resource
, &rlim
));
11222 if (!is_error(ret
)) {
11223 struct target_rlimit
*target_rlim
;
11224 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11225 return -TARGET_EFAULT
;
11226 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11227 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11228 unlock_user_struct(target_rlim
, arg2
, 1);
11233 #ifdef TARGET_NR_truncate64
11234 case TARGET_NR_truncate64
:
11235 if (!(p
= lock_user_string(arg1
)))
11236 return -TARGET_EFAULT
;
11237 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11238 unlock_user(p
, arg1
, 0);
11241 #ifdef TARGET_NR_ftruncate64
11242 case TARGET_NR_ftruncate64
:
11243 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11245 #ifdef TARGET_NR_stat64
11246 case TARGET_NR_stat64
:
11247 if (!(p
= lock_user_string(arg1
))) {
11248 return -TARGET_EFAULT
;
11250 ret
= get_errno(stat(path(p
), &st
));
11251 unlock_user(p
, arg1
, 0);
11252 if (!is_error(ret
))
11253 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11256 #ifdef TARGET_NR_lstat64
11257 case TARGET_NR_lstat64
:
11258 if (!(p
= lock_user_string(arg1
))) {
11259 return -TARGET_EFAULT
;
11261 ret
= get_errno(lstat(path(p
), &st
));
11262 unlock_user(p
, arg1
, 0);
11263 if (!is_error(ret
))
11264 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11267 #ifdef TARGET_NR_fstat64
11268 case TARGET_NR_fstat64
:
11269 ret
= get_errno(fstat(arg1
, &st
));
11270 if (!is_error(ret
))
11271 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11274 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11275 #ifdef TARGET_NR_fstatat64
11276 case TARGET_NR_fstatat64
:
11278 #ifdef TARGET_NR_newfstatat
11279 case TARGET_NR_newfstatat
:
11281 if (!(p
= lock_user_string(arg2
))) {
11282 return -TARGET_EFAULT
;
11284 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11285 unlock_user(p
, arg2
, 0);
11286 if (!is_error(ret
))
11287 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11290 #if defined(TARGET_NR_statx)
11291 case TARGET_NR_statx
:
11293 struct target_statx
*target_stx
;
11297 p
= lock_user_string(arg2
);
11299 return -TARGET_EFAULT
;
11301 #if defined(__NR_statx)
11304 * It is assumed that struct statx is architecture independent.
11306 struct target_statx host_stx
;
11309 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11310 if (!is_error(ret
)) {
11311 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11312 unlock_user(p
, arg2
, 0);
11313 return -TARGET_EFAULT
;
11317 if (ret
!= -TARGET_ENOSYS
) {
11318 unlock_user(p
, arg2
, 0);
11323 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11324 unlock_user(p
, arg2
, 0);
11326 if (!is_error(ret
)) {
11327 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11328 return -TARGET_EFAULT
;
11330 memset(target_stx
, 0, sizeof(*target_stx
));
11331 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11332 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11333 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11334 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11335 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11336 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11337 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11338 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11339 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11340 __put_user(st
.st_size
, &target_stx
->stx_size
);
11341 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11342 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11343 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11344 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11345 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11346 unlock_user_struct(target_stx
, arg5
, 1);
11351 #ifdef TARGET_NR_lchown
11352 case TARGET_NR_lchown
:
11353 if (!(p
= lock_user_string(arg1
)))
11354 return -TARGET_EFAULT
;
11355 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11356 unlock_user(p
, arg1
, 0);
11359 #ifdef TARGET_NR_getuid
11360 case TARGET_NR_getuid
:
11361 return get_errno(high2lowuid(getuid()));
11363 #ifdef TARGET_NR_getgid
11364 case TARGET_NR_getgid
:
11365 return get_errno(high2lowgid(getgid()));
11367 #ifdef TARGET_NR_geteuid
11368 case TARGET_NR_geteuid
:
11369 return get_errno(high2lowuid(geteuid()));
11371 #ifdef TARGET_NR_getegid
11372 case TARGET_NR_getegid
:
11373 return get_errno(high2lowgid(getegid()));
11375 case TARGET_NR_setreuid
:
11376 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11377 case TARGET_NR_setregid
:
11378 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11379 case TARGET_NR_getgroups
:
11381 int gidsetsize
= arg1
;
11382 target_id
*target_grouplist
;
11386 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11387 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11388 if (gidsetsize
== 0)
11390 if (!is_error(ret
)) {
11391 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11392 if (!target_grouplist
)
11393 return -TARGET_EFAULT
;
11394 for(i
= 0;i
< ret
; i
++)
11395 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11396 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11400 case TARGET_NR_setgroups
:
11402 int gidsetsize
= arg1
;
11403 target_id
*target_grouplist
;
11404 gid_t
*grouplist
= NULL
;
11407 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11408 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11409 if (!target_grouplist
) {
11410 return -TARGET_EFAULT
;
11412 for (i
= 0; i
< gidsetsize
; i
++) {
11413 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11415 unlock_user(target_grouplist
, arg2
, 0);
11417 return get_errno(setgroups(gidsetsize
, grouplist
));
11419 case TARGET_NR_fchown
:
11420 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11421 #if defined(TARGET_NR_fchownat)
11422 case TARGET_NR_fchownat
:
11423 if (!(p
= lock_user_string(arg2
)))
11424 return -TARGET_EFAULT
;
11425 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11426 low2highgid(arg4
), arg5
));
11427 unlock_user(p
, arg2
, 0);
11430 #ifdef TARGET_NR_setresuid
11431 case TARGET_NR_setresuid
:
11432 return get_errno(sys_setresuid(low2highuid(arg1
),
11434 low2highuid(arg3
)));
11436 #ifdef TARGET_NR_getresuid
11437 case TARGET_NR_getresuid
:
11439 uid_t ruid
, euid
, suid
;
11440 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11441 if (!is_error(ret
)) {
11442 if (put_user_id(high2lowuid(ruid
), arg1
)
11443 || put_user_id(high2lowuid(euid
), arg2
)
11444 || put_user_id(high2lowuid(suid
), arg3
))
11445 return -TARGET_EFAULT
;
11450 #ifdef TARGET_NR_getresgid
11451 case TARGET_NR_setresgid
:
11452 return get_errno(sys_setresgid(low2highgid(arg1
),
11454 low2highgid(arg3
)));
11456 #ifdef TARGET_NR_getresgid
11457 case TARGET_NR_getresgid
:
11459 gid_t rgid
, egid
, sgid
;
11460 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11461 if (!is_error(ret
)) {
11462 if (put_user_id(high2lowgid(rgid
), arg1
)
11463 || put_user_id(high2lowgid(egid
), arg2
)
11464 || put_user_id(high2lowgid(sgid
), arg3
))
11465 return -TARGET_EFAULT
;
11470 #ifdef TARGET_NR_chown
11471 case TARGET_NR_chown
:
11472 if (!(p
= lock_user_string(arg1
)))
11473 return -TARGET_EFAULT
;
11474 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11475 unlock_user(p
, arg1
, 0);
11478 case TARGET_NR_setuid
:
11479 return get_errno(sys_setuid(low2highuid(arg1
)));
11480 case TARGET_NR_setgid
:
11481 return get_errno(sys_setgid(low2highgid(arg1
)));
11482 case TARGET_NR_setfsuid
:
11483 return get_errno(setfsuid(arg1
));
11484 case TARGET_NR_setfsgid
:
11485 return get_errno(setfsgid(arg1
));
11487 #ifdef TARGET_NR_lchown32
11488 case TARGET_NR_lchown32
:
11489 if (!(p
= lock_user_string(arg1
)))
11490 return -TARGET_EFAULT
;
11491 ret
= get_errno(lchown(p
, arg2
, arg3
));
11492 unlock_user(p
, arg1
, 0);
11495 #ifdef TARGET_NR_getuid32
11496 case TARGET_NR_getuid32
:
11497 return get_errno(getuid());
11500 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11501 /* Alpha specific */
11502 case TARGET_NR_getxuid
:
11506 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11508 return get_errno(getuid());
11510 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11511 /* Alpha specific */
11512 case TARGET_NR_getxgid
:
11516 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11518 return get_errno(getgid());
11520 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11521 /* Alpha specific */
11522 case TARGET_NR_osf_getsysinfo
:
11523 ret
= -TARGET_EOPNOTSUPP
;
11525 case TARGET_GSI_IEEE_FP_CONTROL
:
11527 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11528 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11530 swcr
&= ~SWCR_STATUS_MASK
;
11531 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11533 if (put_user_u64 (swcr
, arg2
))
11534 return -TARGET_EFAULT
;
11539 /* case GSI_IEEE_STATE_AT_SIGNAL:
11540 -- Not implemented in linux kernel.
11542 -- Retrieves current unaligned access state; not much used.
11543 case GSI_PROC_TYPE:
11544 -- Retrieves implver information; surely not used.
11545 case GSI_GET_HWRPB:
11546 -- Grabs a copy of the HWRPB; surely not used.
11551 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11552 /* Alpha specific */
11553 case TARGET_NR_osf_setsysinfo
:
11554 ret
= -TARGET_EOPNOTSUPP
;
11556 case TARGET_SSI_IEEE_FP_CONTROL
:
11558 uint64_t swcr
, fpcr
;
11560 if (get_user_u64 (swcr
, arg2
)) {
11561 return -TARGET_EFAULT
;
11565 * The kernel calls swcr_update_status to update the
11566 * status bits from the fpcr at every point that it
11567 * could be queried. Therefore, we store the status
11568 * bits only in FPCR.
11570 ((CPUAlphaState
*)cpu_env
)->swcr
11571 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11573 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11574 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11575 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11576 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11581 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11583 uint64_t exc
, fpcr
, fex
;
11585 if (get_user_u64(exc
, arg2
)) {
11586 return -TARGET_EFAULT
;
11588 exc
&= SWCR_STATUS_MASK
;
11589 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11591 /* Old exceptions are not signaled. */
11592 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11594 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11595 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11597 /* Update the hardware fpcr. */
11598 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11599 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11602 int si_code
= TARGET_FPE_FLTUNK
;
11603 target_siginfo_t info
;
11605 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11606 si_code
= TARGET_FPE_FLTUND
;
11608 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11609 si_code
= TARGET_FPE_FLTRES
;
11611 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11612 si_code
= TARGET_FPE_FLTUND
;
11614 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11615 si_code
= TARGET_FPE_FLTOVF
;
11617 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11618 si_code
= TARGET_FPE_FLTDIV
;
11620 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11621 si_code
= TARGET_FPE_FLTINV
;
11624 info
.si_signo
= SIGFPE
;
11626 info
.si_code
= si_code
;
11627 info
._sifields
._sigfault
._addr
11628 = ((CPUArchState
*)cpu_env
)->pc
;
11629 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11630 QEMU_SI_FAULT
, &info
);
11636 /* case SSI_NVPAIRS:
11637 -- Used with SSIN_UACPROC to enable unaligned accesses.
11638 case SSI_IEEE_STATE_AT_SIGNAL:
11639 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11640 -- Not implemented in linux kernel
11645 #ifdef TARGET_NR_osf_sigprocmask
11646 /* Alpha specific. */
11647 case TARGET_NR_osf_sigprocmask
:
11651 sigset_t set
, oldset
;
11654 case TARGET_SIG_BLOCK
:
11657 case TARGET_SIG_UNBLOCK
:
11660 case TARGET_SIG_SETMASK
:
11664 return -TARGET_EINVAL
;
11667 target_to_host_old_sigset(&set
, &mask
);
11668 ret
= do_sigprocmask(how
, &set
, &oldset
);
11670 host_to_target_old_sigset(&mask
, &oldset
);
11677 #ifdef TARGET_NR_getgid32
11678 case TARGET_NR_getgid32
:
11679 return get_errno(getgid());
11681 #ifdef TARGET_NR_geteuid32
11682 case TARGET_NR_geteuid32
:
11683 return get_errno(geteuid());
11685 #ifdef TARGET_NR_getegid32
11686 case TARGET_NR_getegid32
:
11687 return get_errno(getegid());
11689 #ifdef TARGET_NR_setreuid32
11690 case TARGET_NR_setreuid32
:
11691 return get_errno(setreuid(arg1
, arg2
));
11693 #ifdef TARGET_NR_setregid32
11694 case TARGET_NR_setregid32
:
11695 return get_errno(setregid(arg1
, arg2
));
11697 #ifdef TARGET_NR_getgroups32
11698 case TARGET_NR_getgroups32
:
11700 int gidsetsize
= arg1
;
11701 uint32_t *target_grouplist
;
11705 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11706 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11707 if (gidsetsize
== 0)
11709 if (!is_error(ret
)) {
11710 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11711 if (!target_grouplist
) {
11712 return -TARGET_EFAULT
;
11714 for(i
= 0;i
< ret
; i
++)
11715 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11716 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11721 #ifdef TARGET_NR_setgroups32
11722 case TARGET_NR_setgroups32
:
11724 int gidsetsize
= arg1
;
11725 uint32_t *target_grouplist
;
11729 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11730 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11731 if (!target_grouplist
) {
11732 return -TARGET_EFAULT
;
11734 for(i
= 0;i
< gidsetsize
; i
++)
11735 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11736 unlock_user(target_grouplist
, arg2
, 0);
11737 return get_errno(setgroups(gidsetsize
, grouplist
));
11740 #ifdef TARGET_NR_fchown32
11741 case TARGET_NR_fchown32
:
11742 return get_errno(fchown(arg1
, arg2
, arg3
));
11744 #ifdef TARGET_NR_setresuid32
11745 case TARGET_NR_setresuid32
:
11746 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11748 #ifdef TARGET_NR_getresuid32
11749 case TARGET_NR_getresuid32
:
11751 uid_t ruid
, euid
, suid
;
11752 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11753 if (!is_error(ret
)) {
11754 if (put_user_u32(ruid
, arg1
)
11755 || put_user_u32(euid
, arg2
)
11756 || put_user_u32(suid
, arg3
))
11757 return -TARGET_EFAULT
;
11762 #ifdef TARGET_NR_setresgid32
11763 case TARGET_NR_setresgid32
:
11764 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11766 #ifdef TARGET_NR_getresgid32
11767 case TARGET_NR_getresgid32
:
11769 gid_t rgid
, egid
, sgid
;
11770 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11771 if (!is_error(ret
)) {
11772 if (put_user_u32(rgid
, arg1
)
11773 || put_user_u32(egid
, arg2
)
11774 || put_user_u32(sgid
, arg3
))
11775 return -TARGET_EFAULT
;
11780 #ifdef TARGET_NR_chown32
11781 case TARGET_NR_chown32
:
11782 if (!(p
= lock_user_string(arg1
)))
11783 return -TARGET_EFAULT
;
11784 ret
= get_errno(chown(p
, arg2
, arg3
));
11785 unlock_user(p
, arg1
, 0);
11788 #ifdef TARGET_NR_setuid32
11789 case TARGET_NR_setuid32
:
11790 return get_errno(sys_setuid(arg1
));
11792 #ifdef TARGET_NR_setgid32
11793 case TARGET_NR_setgid32
:
11794 return get_errno(sys_setgid(arg1
));
11796 #ifdef TARGET_NR_setfsuid32
11797 case TARGET_NR_setfsuid32
:
11798 return get_errno(setfsuid(arg1
));
11800 #ifdef TARGET_NR_setfsgid32
11801 case TARGET_NR_setfsgid32
:
11802 return get_errno(setfsgid(arg1
));
11804 #ifdef TARGET_NR_mincore
11805 case TARGET_NR_mincore
:
11807 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11809 return -TARGET_ENOMEM
;
11811 p
= lock_user_string(arg3
);
11813 ret
= -TARGET_EFAULT
;
11815 ret
= get_errno(mincore(a
, arg2
, p
));
11816 unlock_user(p
, arg3
, ret
);
11818 unlock_user(a
, arg1
, 0);
11822 #ifdef TARGET_NR_arm_fadvise64_64
11823 case TARGET_NR_arm_fadvise64_64
:
11824 /* arm_fadvise64_64 looks like fadvise64_64 but
11825 * with different argument order: fd, advice, offset, len
11826 * rather than the usual fd, offset, len, advice.
11827 * Note that offset and len are both 64-bit so appear as
11828 * pairs of 32-bit registers.
11830 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11831 target_offset64(arg5
, arg6
), arg2
);
11832 return -host_to_target_errno(ret
);
11835 #if TARGET_ABI_BITS == 32
11837 #ifdef TARGET_NR_fadvise64_64
11838 case TARGET_NR_fadvise64_64
:
11839 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11840 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11848 /* 6 args: fd, offset (high, low), len (high, low), advice */
11849 if (regpairs_aligned(cpu_env
, num
)) {
11850 /* offset is in (3,4), len in (5,6) and advice in 7 */
11858 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11859 target_offset64(arg4
, arg5
), arg6
);
11860 return -host_to_target_errno(ret
);
11863 #ifdef TARGET_NR_fadvise64
11864 case TARGET_NR_fadvise64
:
11865 /* 5 args: fd, offset (high, low), len, advice */
11866 if (regpairs_aligned(cpu_env
, num
)) {
11867 /* offset is in (3,4), len in 5 and advice in 6 */
11873 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11874 return -host_to_target_errno(ret
);
11877 #else /* not a 32-bit ABI */
11878 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11879 #ifdef TARGET_NR_fadvise64_64
11880 case TARGET_NR_fadvise64_64
:
11882 #ifdef TARGET_NR_fadvise64
11883 case TARGET_NR_fadvise64
:
11885 #ifdef TARGET_S390X
11887 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11888 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11889 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11890 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11894 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11896 #endif /* end of 64-bit ABI fadvise handling */
11898 #ifdef TARGET_NR_madvise
11899 case TARGET_NR_madvise
:
11900 /* A straight passthrough may not be safe because qemu sometimes
11901 turns private file-backed mappings into anonymous mappings.
11902 This will break MADV_DONTNEED.
11903 This is a hint, so ignoring and returning success is ok. */
11906 #ifdef TARGET_NR_fcntl64
11907 case TARGET_NR_fcntl64
:
11911 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11912 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11915 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11916 copyfrom
= copy_from_user_oabi_flock64
;
11917 copyto
= copy_to_user_oabi_flock64
;
11921 cmd
= target_to_host_fcntl_cmd(arg2
);
11922 if (cmd
== -TARGET_EINVAL
) {
11927 case TARGET_F_GETLK64
:
11928 ret
= copyfrom(&fl
, arg3
);
11932 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11934 ret
= copyto(arg3
, &fl
);
11938 case TARGET_F_SETLK64
:
11939 case TARGET_F_SETLKW64
:
11940 ret
= copyfrom(&fl
, arg3
);
11944 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11947 ret
= do_fcntl(arg1
, arg2
, arg3
);
11953 #ifdef TARGET_NR_cacheflush
11954 case TARGET_NR_cacheflush
:
11955 /* self-modifying code is handled automatically, so nothing needed */
11958 #ifdef TARGET_NR_getpagesize
11959 case TARGET_NR_getpagesize
:
11960 return TARGET_PAGE_SIZE
;
11962 case TARGET_NR_gettid
:
11963 return get_errno(sys_gettid());
11964 #ifdef TARGET_NR_readahead
11965 case TARGET_NR_readahead
:
11966 #if TARGET_ABI_BITS == 32
11967 if (regpairs_aligned(cpu_env
, num
)) {
11972 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11974 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11979 #ifdef TARGET_NR_setxattr
11980 case TARGET_NR_listxattr
:
11981 case TARGET_NR_llistxattr
:
11985 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11987 return -TARGET_EFAULT
;
11990 p
= lock_user_string(arg1
);
11992 if (num
== TARGET_NR_listxattr
) {
11993 ret
= get_errno(listxattr(p
, b
, arg3
));
11995 ret
= get_errno(llistxattr(p
, b
, arg3
));
11998 ret
= -TARGET_EFAULT
;
12000 unlock_user(p
, arg1
, 0);
12001 unlock_user(b
, arg2
, arg3
);
12004 case TARGET_NR_flistxattr
:
12008 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
12010 return -TARGET_EFAULT
;
12013 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
12014 unlock_user(b
, arg2
, arg3
);
12017 case TARGET_NR_setxattr
:
12018 case TARGET_NR_lsetxattr
:
12020 void *p
, *n
, *v
= 0;
12022 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12024 return -TARGET_EFAULT
;
12027 p
= lock_user_string(arg1
);
12028 n
= lock_user_string(arg2
);
12030 if (num
== TARGET_NR_setxattr
) {
12031 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
12033 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
12036 ret
= -TARGET_EFAULT
;
12038 unlock_user(p
, arg1
, 0);
12039 unlock_user(n
, arg2
, 0);
12040 unlock_user(v
, arg3
, 0);
12043 case TARGET_NR_fsetxattr
:
12047 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12049 return -TARGET_EFAULT
;
12052 n
= lock_user_string(arg2
);
12054 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
12056 ret
= -TARGET_EFAULT
;
12058 unlock_user(n
, arg2
, 0);
12059 unlock_user(v
, arg3
, 0);
12062 case TARGET_NR_getxattr
:
12063 case TARGET_NR_lgetxattr
:
12065 void *p
, *n
, *v
= 0;
12067 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12069 return -TARGET_EFAULT
;
12072 p
= lock_user_string(arg1
);
12073 n
= lock_user_string(arg2
);
12075 if (num
== TARGET_NR_getxattr
) {
12076 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
12078 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
12081 ret
= -TARGET_EFAULT
;
12083 unlock_user(p
, arg1
, 0);
12084 unlock_user(n
, arg2
, 0);
12085 unlock_user(v
, arg3
, arg4
);
12088 case TARGET_NR_fgetxattr
:
12092 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12094 return -TARGET_EFAULT
;
12097 n
= lock_user_string(arg2
);
12099 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12101 ret
= -TARGET_EFAULT
;
12103 unlock_user(n
, arg2
, 0);
12104 unlock_user(v
, arg3
, arg4
);
12107 case TARGET_NR_removexattr
:
12108 case TARGET_NR_lremovexattr
:
12111 p
= lock_user_string(arg1
);
12112 n
= lock_user_string(arg2
);
12114 if (num
== TARGET_NR_removexattr
) {
12115 ret
= get_errno(removexattr(p
, n
));
12117 ret
= get_errno(lremovexattr(p
, n
));
12120 ret
= -TARGET_EFAULT
;
12122 unlock_user(p
, arg1
, 0);
12123 unlock_user(n
, arg2
, 0);
12126 case TARGET_NR_fremovexattr
:
12129 n
= lock_user_string(arg2
);
12131 ret
= get_errno(fremovexattr(arg1
, n
));
12133 ret
= -TARGET_EFAULT
;
12135 unlock_user(n
, arg2
, 0);
12139 #endif /* CONFIG_ATTR */
12140 #ifdef TARGET_NR_set_thread_area
12141 case TARGET_NR_set_thread_area
:
12142 #if defined(TARGET_MIPS)
12143 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
12145 #elif defined(TARGET_CRIS)
12147 ret
= -TARGET_EINVAL
;
12149 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
12153 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12154 return do_set_thread_area(cpu_env
, arg1
);
12155 #elif defined(TARGET_M68K)
12157 TaskState
*ts
= cpu
->opaque
;
12158 ts
->tp_value
= arg1
;
12162 return -TARGET_ENOSYS
;
12165 #ifdef TARGET_NR_get_thread_area
12166 case TARGET_NR_get_thread_area
:
12167 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12168 return do_get_thread_area(cpu_env
, arg1
);
12169 #elif defined(TARGET_M68K)
12171 TaskState
*ts
= cpu
->opaque
;
12172 return ts
->tp_value
;
12175 return -TARGET_ENOSYS
;
12178 #ifdef TARGET_NR_getdomainname
12179 case TARGET_NR_getdomainname
:
12180 return -TARGET_ENOSYS
;
12183 #ifdef TARGET_NR_clock_settime
12184 case TARGET_NR_clock_settime
:
12186 struct timespec ts
;
12188 ret
= target_to_host_timespec(&ts
, arg2
);
12189 if (!is_error(ret
)) {
12190 ret
= get_errno(clock_settime(arg1
, &ts
));
12195 #ifdef TARGET_NR_clock_settime64
12196 case TARGET_NR_clock_settime64
:
12198 struct timespec ts
;
12200 ret
= target_to_host_timespec64(&ts
, arg2
);
12201 if (!is_error(ret
)) {
12202 ret
= get_errno(clock_settime(arg1
, &ts
));
12207 #ifdef TARGET_NR_clock_gettime
12208 case TARGET_NR_clock_gettime
:
12210 struct timespec ts
;
12211 ret
= get_errno(clock_gettime(arg1
, &ts
));
12212 if (!is_error(ret
)) {
12213 ret
= host_to_target_timespec(arg2
, &ts
);
12218 #ifdef TARGET_NR_clock_gettime64
12219 case TARGET_NR_clock_gettime64
:
12221 struct timespec ts
;
12222 ret
= get_errno(clock_gettime(arg1
, &ts
));
12223 if (!is_error(ret
)) {
12224 ret
= host_to_target_timespec64(arg2
, &ts
);
12229 #ifdef TARGET_NR_clock_getres
12230 case TARGET_NR_clock_getres
:
12232 struct timespec ts
;
12233 ret
= get_errno(clock_getres(arg1
, &ts
));
12234 if (!is_error(ret
)) {
12235 host_to_target_timespec(arg2
, &ts
);
12240 #ifdef TARGET_NR_clock_getres_time64
12241 case TARGET_NR_clock_getres_time64
:
12243 struct timespec ts
;
12244 ret
= get_errno(clock_getres(arg1
, &ts
));
12245 if (!is_error(ret
)) {
12246 host_to_target_timespec64(arg2
, &ts
);
12251 #ifdef TARGET_NR_clock_nanosleep
12252 case TARGET_NR_clock_nanosleep
:
12254 struct timespec ts
;
12255 if (target_to_host_timespec(&ts
, arg3
)) {
12256 return -TARGET_EFAULT
;
12258 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12259 &ts
, arg4
? &ts
: NULL
));
12261 * if the call is interrupted by a signal handler, it fails
12262 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12263 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12265 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12266 host_to_target_timespec(arg4
, &ts
)) {
12267 return -TARGET_EFAULT
;
12273 #ifdef TARGET_NR_clock_nanosleep_time64
12274 case TARGET_NR_clock_nanosleep_time64
:
12276 struct timespec ts
;
12278 if (target_to_host_timespec64(&ts
, arg3
)) {
12279 return -TARGET_EFAULT
;
12282 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12283 &ts
, arg4
? &ts
: NULL
));
12285 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12286 host_to_target_timespec64(arg4
, &ts
)) {
12287 return -TARGET_EFAULT
;
12293 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12294 case TARGET_NR_set_tid_address
:
12295 return get_errno(set_tid_address((int *)g2h(cpu
, arg1
)));
12298 case TARGET_NR_tkill
:
12299 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12301 case TARGET_NR_tgkill
:
12302 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12303 target_to_host_signal(arg3
)));
12305 #ifdef TARGET_NR_set_robust_list
12306 case TARGET_NR_set_robust_list
:
12307 case TARGET_NR_get_robust_list
:
12308 /* The ABI for supporting robust futexes has userspace pass
12309 * the kernel a pointer to a linked list which is updated by
12310 * userspace after the syscall; the list is walked by the kernel
12311 * when the thread exits. Since the linked list in QEMU guest
12312 * memory isn't a valid linked list for the host and we have
12313 * no way to reliably intercept the thread-death event, we can't
12314 * support these. Silently return ENOSYS so that guest userspace
12315 * falls back to a non-robust futex implementation (which should
12316 * be OK except in the corner case of the guest crashing while
12317 * holding a mutex that is shared with another process via
12320 return -TARGET_ENOSYS
;
12323 #if defined(TARGET_NR_utimensat)
12324 case TARGET_NR_utimensat
:
12326 struct timespec
*tsp
, ts
[2];
12330 if (target_to_host_timespec(ts
, arg3
)) {
12331 return -TARGET_EFAULT
;
12333 if (target_to_host_timespec(ts
+ 1, arg3
+
12334 sizeof(struct target_timespec
))) {
12335 return -TARGET_EFAULT
;
12340 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12342 if (!(p
= lock_user_string(arg2
))) {
12343 return -TARGET_EFAULT
;
12345 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12346 unlock_user(p
, arg2
, 0);
12351 #ifdef TARGET_NR_utimensat_time64
12352 case TARGET_NR_utimensat_time64
:
12354 struct timespec
*tsp
, ts
[2];
12358 if (target_to_host_timespec64(ts
, arg3
)) {
12359 return -TARGET_EFAULT
;
12361 if (target_to_host_timespec64(ts
+ 1, arg3
+
12362 sizeof(struct target__kernel_timespec
))) {
12363 return -TARGET_EFAULT
;
12368 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12370 p
= lock_user_string(arg2
);
12372 return -TARGET_EFAULT
;
12374 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12375 unlock_user(p
, arg2
, 0);
12380 #ifdef TARGET_NR_futex
12381 case TARGET_NR_futex
:
12382 return do_futex(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12384 #ifdef TARGET_NR_futex_time64
12385 case TARGET_NR_futex_time64
:
12386 return do_futex_time64(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12388 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12389 case TARGET_NR_inotify_init
:
12390 ret
= get_errno(sys_inotify_init());
12392 fd_trans_register(ret
, &target_inotify_trans
);
12396 #ifdef CONFIG_INOTIFY1
12397 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12398 case TARGET_NR_inotify_init1
:
12399 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
12400 fcntl_flags_tbl
)));
12402 fd_trans_register(ret
, &target_inotify_trans
);
12407 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12408 case TARGET_NR_inotify_add_watch
:
12409 p
= lock_user_string(arg2
);
12410 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12411 unlock_user(p
, arg2
, 0);
12414 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12415 case TARGET_NR_inotify_rm_watch
:
12416 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12419 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12420 case TARGET_NR_mq_open
:
12422 struct mq_attr posix_mq_attr
;
12423 struct mq_attr
*pposix_mq_attr
;
12426 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12427 pposix_mq_attr
= NULL
;
12429 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12430 return -TARGET_EFAULT
;
12432 pposix_mq_attr
= &posix_mq_attr
;
12434 p
= lock_user_string(arg1
- 1);
12436 return -TARGET_EFAULT
;
12438 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12439 unlock_user (p
, arg1
, 0);
12443 case TARGET_NR_mq_unlink
:
12444 p
= lock_user_string(arg1
- 1);
12446 return -TARGET_EFAULT
;
12448 ret
= get_errno(mq_unlink(p
));
12449 unlock_user (p
, arg1
, 0);
12452 #ifdef TARGET_NR_mq_timedsend
12453 case TARGET_NR_mq_timedsend
:
12455 struct timespec ts
;
12457 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12459 if (target_to_host_timespec(&ts
, arg5
)) {
12460 return -TARGET_EFAULT
;
12462 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12463 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12464 return -TARGET_EFAULT
;
12467 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12469 unlock_user (p
, arg2
, arg3
);
12473 #ifdef TARGET_NR_mq_timedsend_time64
12474 case TARGET_NR_mq_timedsend_time64
:
12476 struct timespec ts
;
12478 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12480 if (target_to_host_timespec64(&ts
, arg5
)) {
12481 return -TARGET_EFAULT
;
12483 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12484 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12485 return -TARGET_EFAULT
;
12488 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12490 unlock_user(p
, arg2
, arg3
);
12495 #ifdef TARGET_NR_mq_timedreceive
12496 case TARGET_NR_mq_timedreceive
:
12498 struct timespec ts
;
12501 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12503 if (target_to_host_timespec(&ts
, arg5
)) {
12504 return -TARGET_EFAULT
;
12506 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12508 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12509 return -TARGET_EFAULT
;
12512 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12515 unlock_user (p
, arg2
, arg3
);
12517 put_user_u32(prio
, arg4
);
12521 #ifdef TARGET_NR_mq_timedreceive_time64
12522 case TARGET_NR_mq_timedreceive_time64
:
12524 struct timespec ts
;
12527 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12529 if (target_to_host_timespec64(&ts
, arg5
)) {
12530 return -TARGET_EFAULT
;
12532 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12534 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12535 return -TARGET_EFAULT
;
12538 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12541 unlock_user(p
, arg2
, arg3
);
12543 put_user_u32(prio
, arg4
);
12549 /* Not implemented for now... */
12550 /* case TARGET_NR_mq_notify: */
12553 case TARGET_NR_mq_getsetattr
:
12555 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12558 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12559 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12560 &posix_mq_attr_out
));
12561 } else if (arg3
!= 0) {
12562 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12564 if (ret
== 0 && arg3
!= 0) {
12565 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12571 #ifdef CONFIG_SPLICE
12572 #ifdef TARGET_NR_tee
12573 case TARGET_NR_tee
:
12575 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12579 #ifdef TARGET_NR_splice
12580 case TARGET_NR_splice
:
12582 loff_t loff_in
, loff_out
;
12583 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12585 if (get_user_u64(loff_in
, arg2
)) {
12586 return -TARGET_EFAULT
;
12588 ploff_in
= &loff_in
;
12591 if (get_user_u64(loff_out
, arg4
)) {
12592 return -TARGET_EFAULT
;
12594 ploff_out
= &loff_out
;
12596 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12598 if (put_user_u64(loff_in
, arg2
)) {
12599 return -TARGET_EFAULT
;
12603 if (put_user_u64(loff_out
, arg4
)) {
12604 return -TARGET_EFAULT
;
12610 #ifdef TARGET_NR_vmsplice
12611 case TARGET_NR_vmsplice
:
12613 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12615 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12616 unlock_iovec(vec
, arg2
, arg3
, 0);
12618 ret
= -host_to_target_errno(errno
);
12623 #endif /* CONFIG_SPLICE */
12624 #ifdef CONFIG_EVENTFD
12625 #if defined(TARGET_NR_eventfd)
12626 case TARGET_NR_eventfd
:
12627 ret
= get_errno(eventfd(arg1
, 0));
12629 fd_trans_register(ret
, &target_eventfd_trans
);
12633 #if defined(TARGET_NR_eventfd2)
12634 case TARGET_NR_eventfd2
:
12636 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
12637 if (arg2
& TARGET_O_NONBLOCK
) {
12638 host_flags
|= O_NONBLOCK
;
12640 if (arg2
& TARGET_O_CLOEXEC
) {
12641 host_flags
|= O_CLOEXEC
;
12643 ret
= get_errno(eventfd(arg1
, host_flags
));
12645 fd_trans_register(ret
, &target_eventfd_trans
);
12650 #endif /* CONFIG_EVENTFD */
12651 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12652 case TARGET_NR_fallocate
:
12653 #if TARGET_ABI_BITS == 32
12654 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12655 target_offset64(arg5
, arg6
)));
12657 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12661 #if defined(CONFIG_SYNC_FILE_RANGE)
12662 #if defined(TARGET_NR_sync_file_range)
12663 case TARGET_NR_sync_file_range
:
12664 #if TARGET_ABI_BITS == 32
12665 #if defined(TARGET_MIPS)
12666 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12667 target_offset64(arg5
, arg6
), arg7
));
12669 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12670 target_offset64(arg4
, arg5
), arg6
));
12671 #endif /* !TARGET_MIPS */
12673 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12677 #if defined(TARGET_NR_sync_file_range2) || \
12678 defined(TARGET_NR_arm_sync_file_range)
12679 #if defined(TARGET_NR_sync_file_range2)
12680 case TARGET_NR_sync_file_range2
:
12682 #if defined(TARGET_NR_arm_sync_file_range)
12683 case TARGET_NR_arm_sync_file_range
:
12685 /* This is like sync_file_range but the arguments are reordered */
12686 #if TARGET_ABI_BITS == 32
12687 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12688 target_offset64(arg5
, arg6
), arg2
));
12690 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12695 #if defined(TARGET_NR_signalfd4)
12696 case TARGET_NR_signalfd4
:
12697 return do_signalfd4(arg1
, arg2
, arg4
);
12699 #if defined(TARGET_NR_signalfd)
12700 case TARGET_NR_signalfd
:
12701 return do_signalfd4(arg1
, arg2
, 0);
12703 #if defined(CONFIG_EPOLL)
12704 #if defined(TARGET_NR_epoll_create)
12705 case TARGET_NR_epoll_create
:
12706 return get_errno(epoll_create(arg1
));
12708 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12709 case TARGET_NR_epoll_create1
:
12710 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12712 #if defined(TARGET_NR_epoll_ctl)
12713 case TARGET_NR_epoll_ctl
:
12715 struct epoll_event ep
;
12716 struct epoll_event
*epp
= 0;
12718 if (arg2
!= EPOLL_CTL_DEL
) {
12719 struct target_epoll_event
*target_ep
;
12720 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12721 return -TARGET_EFAULT
;
12723 ep
.events
= tswap32(target_ep
->events
);
12725 * The epoll_data_t union is just opaque data to the kernel,
12726 * so we transfer all 64 bits across and need not worry what
12727 * actual data type it is.
12729 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12730 unlock_user_struct(target_ep
, arg4
, 0);
12733 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12734 * non-null pointer, even though this argument is ignored.
12739 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12743 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12744 #if defined(TARGET_NR_epoll_wait)
12745 case TARGET_NR_epoll_wait
:
12747 #if defined(TARGET_NR_epoll_pwait)
12748 case TARGET_NR_epoll_pwait
:
12751 struct target_epoll_event
*target_ep
;
12752 struct epoll_event
*ep
;
12754 int maxevents
= arg3
;
12755 int timeout
= arg4
;
12757 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12758 return -TARGET_EINVAL
;
12761 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12762 maxevents
* sizeof(struct target_epoll_event
), 1);
12764 return -TARGET_EFAULT
;
12767 ep
= g_try_new(struct epoll_event
, maxevents
);
12769 unlock_user(target_ep
, arg2
, 0);
12770 return -TARGET_ENOMEM
;
12774 #if defined(TARGET_NR_epoll_pwait)
12775 case TARGET_NR_epoll_pwait
:
12777 target_sigset_t
*target_set
;
12778 sigset_t _set
, *set
= &_set
;
12781 if (arg6
!= sizeof(target_sigset_t
)) {
12782 ret
= -TARGET_EINVAL
;
12786 target_set
= lock_user(VERIFY_READ
, arg5
,
12787 sizeof(target_sigset_t
), 1);
12789 ret
= -TARGET_EFAULT
;
12792 target_to_host_sigset(set
, target_set
);
12793 unlock_user(target_set
, arg5
, 0);
12798 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12799 set
, SIGSET_T_SIZE
));
12803 #if defined(TARGET_NR_epoll_wait)
12804 case TARGET_NR_epoll_wait
:
12805 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12810 ret
= -TARGET_ENOSYS
;
12812 if (!is_error(ret
)) {
12814 for (i
= 0; i
< ret
; i
++) {
12815 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12816 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12818 unlock_user(target_ep
, arg2
,
12819 ret
* sizeof(struct target_epoll_event
));
12821 unlock_user(target_ep
, arg2
, 0);
12828 #ifdef TARGET_NR_prlimit64
12829 case TARGET_NR_prlimit64
:
12831 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12832 struct target_rlimit64
*target_rnew
, *target_rold
;
12833 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12834 int resource
= target_to_host_resource(arg2
);
12836 if (arg3
&& (resource
!= RLIMIT_AS
&&
12837 resource
!= RLIMIT_DATA
&&
12838 resource
!= RLIMIT_STACK
)) {
12839 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12840 return -TARGET_EFAULT
;
12842 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12843 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12844 unlock_user_struct(target_rnew
, arg3
, 0);
12848 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12849 if (!is_error(ret
) && arg4
) {
12850 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12851 return -TARGET_EFAULT
;
12853 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12854 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12855 unlock_user_struct(target_rold
, arg4
, 1);
12860 #ifdef TARGET_NR_gethostname
12861 case TARGET_NR_gethostname
:
12863 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12865 ret
= get_errno(gethostname(name
, arg2
));
12866 unlock_user(name
, arg1
, arg2
);
12868 ret
= -TARGET_EFAULT
;
12873 #ifdef TARGET_NR_atomic_cmpxchg_32
12874 case TARGET_NR_atomic_cmpxchg_32
:
12876 /* should use start_exclusive from main.c */
12877 abi_ulong mem_value
;
12878 if (get_user_u32(mem_value
, arg6
)) {
12879 target_siginfo_t info
;
12880 info
.si_signo
= SIGSEGV
;
12882 info
.si_code
= TARGET_SEGV_MAPERR
;
12883 info
._sifields
._sigfault
._addr
= arg6
;
12884 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12885 QEMU_SI_FAULT
, &info
);
12889 if (mem_value
== arg2
)
12890 put_user_u32(arg1
, arg6
);
12894 #ifdef TARGET_NR_atomic_barrier
12895 case TARGET_NR_atomic_barrier
:
12896 /* Like the kernel implementation and the
12897 qemu arm barrier, no-op this? */
12901 #ifdef TARGET_NR_timer_create
12902 case TARGET_NR_timer_create
:
12904 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12906 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12909 int timer_index
= next_free_host_timer();
12911 if (timer_index
< 0) {
12912 ret
= -TARGET_EAGAIN
;
12914 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12917 phost_sevp
= &host_sevp
;
12918 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12924 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12928 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12929 return -TARGET_EFAULT
;
12937 #ifdef TARGET_NR_timer_settime
12938 case TARGET_NR_timer_settime
:
12940 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12941 * struct itimerspec * old_value */
12942 target_timer_t timerid
= get_timer_id(arg1
);
12946 } else if (arg3
== 0) {
12947 ret
= -TARGET_EINVAL
;
12949 timer_t htimer
= g_posix_timers
[timerid
];
12950 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12952 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12953 return -TARGET_EFAULT
;
12956 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12957 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12958 return -TARGET_EFAULT
;
12965 #ifdef TARGET_NR_timer_settime64
12966 case TARGET_NR_timer_settime64
:
12968 target_timer_t timerid
= get_timer_id(arg1
);
12972 } else if (arg3
== 0) {
12973 ret
= -TARGET_EINVAL
;
12975 timer_t htimer
= g_posix_timers
[timerid
];
12976 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12978 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12979 return -TARGET_EFAULT
;
12982 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12983 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12984 return -TARGET_EFAULT
;
12991 #ifdef TARGET_NR_timer_gettime
12992 case TARGET_NR_timer_gettime
:
12994 /* args: timer_t timerid, struct itimerspec *curr_value */
12995 target_timer_t timerid
= get_timer_id(arg1
);
12999 } else if (!arg2
) {
13000 ret
= -TARGET_EFAULT
;
13002 timer_t htimer
= g_posix_timers
[timerid
];
13003 struct itimerspec hspec
;
13004 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13006 if (host_to_target_itimerspec(arg2
, &hspec
)) {
13007 ret
= -TARGET_EFAULT
;
13014 #ifdef TARGET_NR_timer_gettime64
13015 case TARGET_NR_timer_gettime64
:
13017 /* args: timer_t timerid, struct itimerspec64 *curr_value */
13018 target_timer_t timerid
= get_timer_id(arg1
);
13022 } else if (!arg2
) {
13023 ret
= -TARGET_EFAULT
;
13025 timer_t htimer
= g_posix_timers
[timerid
];
13026 struct itimerspec hspec
;
13027 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13029 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
13030 ret
= -TARGET_EFAULT
;
13037 #ifdef TARGET_NR_timer_getoverrun
13038 case TARGET_NR_timer_getoverrun
:
13040 /* args: timer_t timerid */
13041 target_timer_t timerid
= get_timer_id(arg1
);
13046 timer_t htimer
= g_posix_timers
[timerid
];
13047 ret
= get_errno(timer_getoverrun(htimer
));
13053 #ifdef TARGET_NR_timer_delete
13054 case TARGET_NR_timer_delete
:
13056 /* args: timer_t timerid */
13057 target_timer_t timerid
= get_timer_id(arg1
);
13062 timer_t htimer
= g_posix_timers
[timerid
];
13063 ret
= get_errno(timer_delete(htimer
));
13064 g_posix_timers
[timerid
] = 0;
13070 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13071 case TARGET_NR_timerfd_create
:
13072 return get_errno(timerfd_create(arg1
,
13073 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
13076 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13077 case TARGET_NR_timerfd_gettime
:
13079 struct itimerspec its_curr
;
13081 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13083 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
13084 return -TARGET_EFAULT
;
13090 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13091 case TARGET_NR_timerfd_gettime64
:
13093 struct itimerspec its_curr
;
13095 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13097 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
13098 return -TARGET_EFAULT
;
13104 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13105 case TARGET_NR_timerfd_settime
:
13107 struct itimerspec its_new
, its_old
, *p_new
;
13110 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13111 return -TARGET_EFAULT
;
13118 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13120 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13121 return -TARGET_EFAULT
;
13127 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13128 case TARGET_NR_timerfd_settime64
:
13130 struct itimerspec its_new
, its_old
, *p_new
;
13133 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13134 return -TARGET_EFAULT
;
13141 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13143 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13144 return -TARGET_EFAULT
;
13150 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13151 case TARGET_NR_ioprio_get
:
13152 return get_errno(ioprio_get(arg1
, arg2
));
13155 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13156 case TARGET_NR_ioprio_set
:
13157 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13160 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13161 case TARGET_NR_setns
:
13162 return get_errno(setns(arg1
, arg2
));
13164 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13165 case TARGET_NR_unshare
:
13166 return get_errno(unshare(arg1
));
13168 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13169 case TARGET_NR_kcmp
:
13170 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13172 #ifdef TARGET_NR_swapcontext
13173 case TARGET_NR_swapcontext
:
13174 /* PowerPC specific. */
13175 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13177 #ifdef TARGET_NR_memfd_create
13178 case TARGET_NR_memfd_create
:
13179 p
= lock_user_string(arg1
);
13181 return -TARGET_EFAULT
;
13183 ret
= get_errno(memfd_create(p
, arg2
));
13184 fd_trans_unregister(ret
);
13185 unlock_user(p
, arg1
, 0);
13188 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13189 case TARGET_NR_membarrier
:
13190 return get_errno(membarrier(arg1
, arg2
));
13193 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13194 case TARGET_NR_copy_file_range
:
13196 loff_t inoff
, outoff
;
13197 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13200 if (get_user_u64(inoff
, arg2
)) {
13201 return -TARGET_EFAULT
;
13206 if (get_user_u64(outoff
, arg4
)) {
13207 return -TARGET_EFAULT
;
13211 /* Do not sign-extend the count parameter. */
13212 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13213 (abi_ulong
)arg5
, arg6
));
13214 if (!is_error(ret
) && ret
> 0) {
13216 if (put_user_u64(inoff
, arg2
)) {
13217 return -TARGET_EFAULT
;
13221 if (put_user_u64(outoff
, arg4
)) {
13222 return -TARGET_EFAULT
;
13230 #if defined(TARGET_NR_pivot_root)
13231 case TARGET_NR_pivot_root
:
13234 p
= lock_user_string(arg1
); /* new_root */
13235 p2
= lock_user_string(arg2
); /* put_old */
13237 ret
= -TARGET_EFAULT
;
13239 ret
= get_errno(pivot_root(p
, p2
));
13241 unlock_user(p2
, arg2
, 0);
13242 unlock_user(p
, arg1
, 0);
13248 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13249 return -TARGET_ENOSYS
;
13254 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
13255 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13256 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13259 CPUState
*cpu
= env_cpu(cpu_env
);
13262 #ifdef DEBUG_ERESTARTSYS
13263 /* Debug-only code for exercising the syscall-restart code paths
13264 * in the per-architecture cpu main loops: restart every syscall
13265 * the guest makes once before letting it through.
13271 return -TARGET_ERESTARTSYS
;
13276 record_syscall_start(cpu
, num
, arg1
,
13277 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13279 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13280 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13283 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13284 arg5
, arg6
, arg7
, arg8
);
13286 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13287 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13288 arg3
, arg4
, arg5
, arg6
);
13291 record_syscall_return(cpu
, num
, ret
);