4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
62 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
93 #include <linux/mtio.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
116 #include <libdrm/drm.h>
118 #include "linux_loop.h"
122 #include "qemu/guest-random.h"
123 #include "qemu/selfmap.h"
124 #include "user/syscall-trace.h"
125 #include "qapi/error.h"
126 #include "fd-trans.h"
130 #define CLONE_IO 0x80000000 /* Clone io context */
133 /* We can't directly call the host clone syscall, because this will
134 * badly confuse libc (breaking mutexes, for example). So we must
135 * divide clone flags into:
136 * * flag combinations that look like pthread_create()
137 * * flag combinations that look like fork()
138 * * flags we can implement within QEMU itself
139 * * flags we can't support and will return an error for
141 /* For thread creation, all these flags must be present; for
142 * fork, none must be present.
144 #define CLONE_THREAD_FLAGS \
145 (CLONE_VM | CLONE_FS | CLONE_FILES | \
146 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
148 /* These flags are ignored:
149 * CLONE_DETACHED is now ignored by the kernel;
150 * CLONE_IO is just an optimisation hint to the I/O scheduler
152 #define CLONE_IGNORED_FLAGS \
153 (CLONE_DETACHED | CLONE_IO)
155 /* Flags for fork which we can implement within QEMU itself */
156 #define CLONE_OPTIONAL_FORK_FLAGS \
157 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
158 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
160 /* Flags for thread creation which we can implement within QEMU itself */
161 #define CLONE_OPTIONAL_THREAD_FLAGS \
162 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
163 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
165 #define CLONE_INVALID_FORK_FLAGS \
166 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
168 #define CLONE_INVALID_THREAD_FLAGS \
169 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
170 CLONE_IGNORED_FLAGS))
172 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
173 * have almost all been allocated. We cannot support any of
174 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
175 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
176 * The checks against the invalid thread masks above will catch these.
177 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
180 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
181 * once. This exercises the codepaths for restart.
183 //#define DEBUG_ERESTARTSYS
185 //#include <linux/msdos_fs.h>
186 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
187 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
197 #define _syscall0(type,name) \
198 static type name (void) \
200 return syscall(__NR_##name); \
203 #define _syscall1(type,name,type1,arg1) \
204 static type name (type1 arg1) \
206 return syscall(__NR_##name, arg1); \
209 #define _syscall2(type,name,type1,arg1,type2,arg2) \
210 static type name (type1 arg1,type2 arg2) \
212 return syscall(__NR_##name, arg1, arg2); \
215 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
216 static type name (type1 arg1,type2 arg2,type3 arg3) \
218 return syscall(__NR_##name, arg1, arg2, arg3); \
221 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
224 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
227 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
229 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
231 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
235 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
236 type5,arg5,type6,arg6) \
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
240 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
244 #define __NR_sys_uname __NR_uname
245 #define __NR_sys_getcwd1 __NR_getcwd
246 #define __NR_sys_getdents __NR_getdents
247 #define __NR_sys_getdents64 __NR_getdents64
248 #define __NR_sys_getpriority __NR_getpriority
249 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
250 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
251 #define __NR_sys_syslog __NR_syslog
252 #if defined(__NR_futex)
253 # define __NR_sys_futex __NR_futex
255 #if defined(__NR_futex_time64)
256 # define __NR_sys_futex_time64 __NR_futex_time64
258 #define __NR_sys_inotify_init __NR_inotify_init
259 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
260 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
261 #define __NR_sys_statx __NR_statx
263 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
264 #define __NR__llseek __NR_lseek
267 /* Newer kernel ports have llseek() instead of _llseek() */
268 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
269 #define TARGET_NR__llseek TARGET_NR_llseek
272 #define __NR_sys_gettid __NR_gettid
273 _syscall0(int, sys_gettid
)
275 /* For the 64-bit guest on 32-bit host case we must emulate
276 * getdents using getdents64, because otherwise the host
277 * might hand us back more dirent records than we can fit
278 * into the guest buffer after structure format conversion.
279 * Otherwise we emulate getdents with getdents if the host has it.
281 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
282 #define EMULATE_GETDENTS_WITH_GETDENTS
285 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
286 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
288 #if (defined(TARGET_NR_getdents) && \
289 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
290 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
291 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
293 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
294 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
295 loff_t
*, res
, uint
, wh
);
297 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
298 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
300 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
301 #ifdef __NR_exit_group
302 _syscall1(int,exit_group
,int,error_code
)
304 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
305 _syscall1(int,set_tid_address
,int *,tidptr
)
307 #if defined(__NR_futex)
308 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
309 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
311 #if defined(__NR_futex_time64)
312 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
313 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
315 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
316 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
317 unsigned long *, user_mask_ptr
);
318 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
319 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
320 unsigned long *, user_mask_ptr
);
321 #define __NR_sys_getcpu __NR_getcpu
322 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
323 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
325 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
326 struct __user_cap_data_struct
*, data
);
327 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
328 struct __user_cap_data_struct
*, data
);
329 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
330 _syscall2(int, ioprio_get
, int, which
, int, who
)
332 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
333 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
335 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
336 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
339 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
340 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
341 unsigned long, idx1
, unsigned long, idx2
)
345 * It is assumed that struct statx is architecture independent.
347 #if defined(TARGET_NR_statx) && defined(__NR_statx)
348 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
349 unsigned int, mask
, struct target_statx
*, statxbuf
)
351 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
352 _syscall2(int, membarrier
, int, cmd
, int, flags
)
355 static bitmask_transtbl fcntl_flags_tbl
[] = {
356 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
357 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
358 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
359 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
360 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
361 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
362 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
363 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
364 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
365 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
366 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
367 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
368 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
369 #if defined(O_DIRECT)
370 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
372 #if defined(O_NOATIME)
373 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
375 #if defined(O_CLOEXEC)
376 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
379 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
381 #if defined(O_TMPFILE)
382 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
384 /* Don't terminate the list prematurely on 64-bit host+guest. */
385 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
386 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
391 static int sys_getcwd1(char *buf
, size_t size
)
393 if (getcwd(buf
, size
) == NULL
) {
394 /* getcwd() sets errno */
397 return strlen(buf
)+1;
400 #ifdef TARGET_NR_utimensat
401 #if defined(__NR_utimensat)
402 #define __NR_sys_utimensat __NR_utimensat
403 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
404 const struct timespec
*,tsp
,int,flags
)
406 static int sys_utimensat(int dirfd
, const char *pathname
,
407 const struct timespec times
[2], int flags
)
413 #endif /* TARGET_NR_utimensat */
415 #ifdef TARGET_NR_renameat2
416 #if defined(__NR_renameat2)
417 #define __NR_sys_renameat2 __NR_renameat2
418 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
419 const char *, new, unsigned int, flags
)
421 static int sys_renameat2(int oldfd
, const char *old
,
422 int newfd
, const char *new, int flags
)
425 return renameat(oldfd
, old
, newfd
, new);
431 #endif /* TARGET_NR_renameat2 */
433 #ifdef CONFIG_INOTIFY
434 #include <sys/inotify.h>
436 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
437 static int sys_inotify_init(void)
439 return (inotify_init());
442 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
443 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
445 return (inotify_add_watch(fd
, pathname
, mask
));
448 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
449 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
451 return (inotify_rm_watch(fd
, wd
));
454 #ifdef CONFIG_INOTIFY1
455 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
456 static int sys_inotify_init1(int flags
)
458 return (inotify_init1(flags
));
463 /* Userspace can usually survive runtime without inotify */
464 #undef TARGET_NR_inotify_init
465 #undef TARGET_NR_inotify_init1
466 #undef TARGET_NR_inotify_add_watch
467 #undef TARGET_NR_inotify_rm_watch
468 #endif /* CONFIG_INOTIFY */
470 #if defined(TARGET_NR_prlimit64)
471 #ifndef __NR_prlimit64
472 # define __NR_prlimit64 -1
474 #define __NR_sys_prlimit64 __NR_prlimit64
475 /* The glibc rlimit structure may not be that used by the underlying syscall */
476 struct host_rlimit64
{
480 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
481 const struct host_rlimit64
*, new_limit
,
482 struct host_rlimit64
*, old_limit
)
486 #if defined(TARGET_NR_timer_create)
487 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
488 static timer_t g_posix_timers
[32] = { 0, } ;
490 static inline int next_free_host_timer(void)
493 /* FIXME: Does finding the next free slot require a lock? */
494 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
495 if (g_posix_timers
[k
] == 0) {
496 g_posix_timers
[k
] = (timer_t
) 1;
504 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
506 static inline int regpairs_aligned(void *cpu_env
, int num
)
508 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
510 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
511 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
512 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
513 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
514 * of registers which translates to the same as ARM/MIPS, because we start with
516 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
517 #elif defined(TARGET_SH4)
518 /* SH4 doesn't align register pairs, except for p{read,write}64 */
519 static inline int regpairs_aligned(void *cpu_env
, int num
)
522 case TARGET_NR_pread64
:
523 case TARGET_NR_pwrite64
:
530 #elif defined(TARGET_XTENSA)
531 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
533 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
536 #define ERRNO_TABLE_SIZE 1200
538 /* target_to_host_errno_table[] is initialized from
539 * host_to_target_errno_table[] in syscall_init(). */
540 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
544 * This list is the union of errno values overridden in asm-<arch>/errno.h
545 * minus the errnos that are not actually generic to all archs.
547 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
548 [EAGAIN
] = TARGET_EAGAIN
,
549 [EIDRM
] = TARGET_EIDRM
,
550 [ECHRNG
] = TARGET_ECHRNG
,
551 [EL2NSYNC
] = TARGET_EL2NSYNC
,
552 [EL3HLT
] = TARGET_EL3HLT
,
553 [EL3RST
] = TARGET_EL3RST
,
554 [ELNRNG
] = TARGET_ELNRNG
,
555 [EUNATCH
] = TARGET_EUNATCH
,
556 [ENOCSI
] = TARGET_ENOCSI
,
557 [EL2HLT
] = TARGET_EL2HLT
,
558 [EDEADLK
] = TARGET_EDEADLK
,
559 [ENOLCK
] = TARGET_ENOLCK
,
560 [EBADE
] = TARGET_EBADE
,
561 [EBADR
] = TARGET_EBADR
,
562 [EXFULL
] = TARGET_EXFULL
,
563 [ENOANO
] = TARGET_ENOANO
,
564 [EBADRQC
] = TARGET_EBADRQC
,
565 [EBADSLT
] = TARGET_EBADSLT
,
566 [EBFONT
] = TARGET_EBFONT
,
567 [ENOSTR
] = TARGET_ENOSTR
,
568 [ENODATA
] = TARGET_ENODATA
,
569 [ETIME
] = TARGET_ETIME
,
570 [ENOSR
] = TARGET_ENOSR
,
571 [ENONET
] = TARGET_ENONET
,
572 [ENOPKG
] = TARGET_ENOPKG
,
573 [EREMOTE
] = TARGET_EREMOTE
,
574 [ENOLINK
] = TARGET_ENOLINK
,
575 [EADV
] = TARGET_EADV
,
576 [ESRMNT
] = TARGET_ESRMNT
,
577 [ECOMM
] = TARGET_ECOMM
,
578 [EPROTO
] = TARGET_EPROTO
,
579 [EDOTDOT
] = TARGET_EDOTDOT
,
580 [EMULTIHOP
] = TARGET_EMULTIHOP
,
581 [EBADMSG
] = TARGET_EBADMSG
,
582 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
583 [EOVERFLOW
] = TARGET_EOVERFLOW
,
584 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
585 [EBADFD
] = TARGET_EBADFD
,
586 [EREMCHG
] = TARGET_EREMCHG
,
587 [ELIBACC
] = TARGET_ELIBACC
,
588 [ELIBBAD
] = TARGET_ELIBBAD
,
589 [ELIBSCN
] = TARGET_ELIBSCN
,
590 [ELIBMAX
] = TARGET_ELIBMAX
,
591 [ELIBEXEC
] = TARGET_ELIBEXEC
,
592 [EILSEQ
] = TARGET_EILSEQ
,
593 [ENOSYS
] = TARGET_ENOSYS
,
594 [ELOOP
] = TARGET_ELOOP
,
595 [ERESTART
] = TARGET_ERESTART
,
596 [ESTRPIPE
] = TARGET_ESTRPIPE
,
597 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
598 [EUSERS
] = TARGET_EUSERS
,
599 [ENOTSOCK
] = TARGET_ENOTSOCK
,
600 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
601 [EMSGSIZE
] = TARGET_EMSGSIZE
,
602 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
603 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
604 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
605 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
606 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
607 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
608 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
609 [EADDRINUSE
] = TARGET_EADDRINUSE
,
610 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
611 [ENETDOWN
] = TARGET_ENETDOWN
,
612 [ENETUNREACH
] = TARGET_ENETUNREACH
,
613 [ENETRESET
] = TARGET_ENETRESET
,
614 [ECONNABORTED
] = TARGET_ECONNABORTED
,
615 [ECONNRESET
] = TARGET_ECONNRESET
,
616 [ENOBUFS
] = TARGET_ENOBUFS
,
617 [EISCONN
] = TARGET_EISCONN
,
618 [ENOTCONN
] = TARGET_ENOTCONN
,
619 [EUCLEAN
] = TARGET_EUCLEAN
,
620 [ENOTNAM
] = TARGET_ENOTNAM
,
621 [ENAVAIL
] = TARGET_ENAVAIL
,
622 [EISNAM
] = TARGET_EISNAM
,
623 [EREMOTEIO
] = TARGET_EREMOTEIO
,
624 [EDQUOT
] = TARGET_EDQUOT
,
625 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
626 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
627 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
628 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
629 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
630 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
631 [EALREADY
] = TARGET_EALREADY
,
632 [EINPROGRESS
] = TARGET_EINPROGRESS
,
633 [ESTALE
] = TARGET_ESTALE
,
634 [ECANCELED
] = TARGET_ECANCELED
,
635 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
636 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
638 [ENOKEY
] = TARGET_ENOKEY
,
641 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
644 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
647 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
650 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
652 #ifdef ENOTRECOVERABLE
653 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
656 [ENOMSG
] = TARGET_ENOMSG
,
659 [ERFKILL
] = TARGET_ERFKILL
,
662 [EHWPOISON
] = TARGET_EHWPOISON
,
666 static inline int host_to_target_errno(int err
)
668 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
669 host_to_target_errno_table
[err
]) {
670 return host_to_target_errno_table
[err
];
675 static inline int target_to_host_errno(int err
)
677 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
678 target_to_host_errno_table
[err
]) {
679 return target_to_host_errno_table
[err
];
684 static inline abi_long
get_errno(abi_long ret
)
687 return -host_to_target_errno(errno
);
692 const char *target_strerror(int err
)
694 if (err
== TARGET_ERESTARTSYS
) {
695 return "To be restarted";
697 if (err
== TARGET_QEMU_ESIGRETURN
) {
698 return "Successful exit from sigreturn";
701 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
704 return strerror(target_to_host_errno(err
));
707 #define safe_syscall0(type, name) \
708 static type safe_##name(void) \
710 return safe_syscall(__NR_##name); \
713 #define safe_syscall1(type, name, type1, arg1) \
714 static type safe_##name(type1 arg1) \
716 return safe_syscall(__NR_##name, arg1); \
719 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
720 static type safe_##name(type1 arg1, type2 arg2) \
722 return safe_syscall(__NR_##name, arg1, arg2); \
725 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
726 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
728 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
731 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
733 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
735 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
738 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
739 type4, arg4, type5, arg5) \
740 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
743 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
746 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
747 type4, arg4, type5, arg5, type6, arg6) \
748 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
749 type5 arg5, type6 arg6) \
751 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
754 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
755 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
756 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
757 int, flags
, mode_t
, mode
)
758 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
759 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
760 struct rusage
*, rusage
)
762 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
763 int, options
, struct rusage
*, rusage
)
764 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
765 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
766 defined(TARGET_NR_pselect6)
767 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
768 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
770 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
771 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
772 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
775 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
776 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
778 #if defined(__NR_futex)
779 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
780 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
782 #if defined(__NR_futex_time64)
783 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
784 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
786 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
787 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
788 safe_syscall2(int, tkill
, int, tid
, int, sig
)
789 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
790 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
791 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
792 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
793 unsigned long, pos_l
, unsigned long, pos_h
)
794 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
795 unsigned long, pos_l
, unsigned long, pos_h
)
796 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
798 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
799 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
800 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
801 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
802 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
803 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
804 safe_syscall2(int, flock
, int, fd
, int, operation
)
805 #ifdef TARGET_NR_rt_sigtimedwait
806 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
807 const struct timespec
*, uts
, size_t, sigsetsize
)
809 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
811 #if defined(TARGET_NR_nanosleep)
812 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
813 struct timespec
*, rem
)
815 #ifdef TARGET_NR_clock_nanosleep
816 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
817 const struct timespec
*, req
, struct timespec
*, rem
)
821 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
824 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
825 void *, ptr
, long, fifth
)
829 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
833 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
834 long, msgtype
, int, flags
)
836 #ifdef __NR_semtimedop
837 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
838 unsigned, nsops
, const struct timespec
*, timeout
)
840 #ifdef TARGET_NR_mq_timedsend
841 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
842 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
844 #ifdef TARGET_NR_mq_timedreceive
845 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
846 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
848 /* We do ioctl like this rather than via safe_syscall3 to preserve the
849 * "third argument might be integer or pointer or not present" behaviour of
852 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
853 /* Similarly for fcntl. Note that callers must always:
854 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
855 * use the flock64 struct rather than unsuffixed flock
856 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
859 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
861 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
864 static inline int host_to_target_sock_type(int host_type
)
868 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
870 target_type
= TARGET_SOCK_DGRAM
;
873 target_type
= TARGET_SOCK_STREAM
;
876 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
880 #if defined(SOCK_CLOEXEC)
881 if (host_type
& SOCK_CLOEXEC
) {
882 target_type
|= TARGET_SOCK_CLOEXEC
;
886 #if defined(SOCK_NONBLOCK)
887 if (host_type
& SOCK_NONBLOCK
) {
888 target_type
|= TARGET_SOCK_NONBLOCK
;
895 static abi_ulong target_brk
;
896 static abi_ulong target_original_brk
;
897 static abi_ulong brk_page
;
899 void target_set_brk(abi_ulong new_brk
)
901 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
902 brk_page
= HOST_PAGE_ALIGN(target_brk
);
905 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
906 #define DEBUGF_BRK(message, args...)
908 /* do_brk() must return target values and target errnos. */
909 abi_long
do_brk(abi_ulong new_brk
)
911 abi_long mapped_addr
;
912 abi_ulong new_alloc_size
;
914 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
917 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
920 if (new_brk
< target_original_brk
) {
921 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
926 /* If the new brk is less than the highest page reserved to the
927 * target heap allocation, set it and we're almost done... */
928 if (new_brk
<= brk_page
) {
929 /* Heap contents are initialized to zero, as for anonymous
931 if (new_brk
> target_brk
) {
932 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
934 target_brk
= new_brk
;
935 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
939 /* We need to allocate more memory after the brk... Note that
940 * we don't use MAP_FIXED because that will map over the top of
941 * any existing mapping (like the one with the host libc or qemu
942 * itself); instead we treat "mapped but at wrong address" as
943 * a failure and unmap again.
945 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
946 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
947 PROT_READ
|PROT_WRITE
,
948 MAP_ANON
|MAP_PRIVATE
, 0, 0));
950 if (mapped_addr
== brk_page
) {
951 /* Heap contents are initialized to zero, as for anonymous
952 * mapped pages. Technically the new pages are already
953 * initialized to zero since they *are* anonymous mapped
954 * pages, however we have to take care with the contents that
955 * come from the remaining part of the previous page: it may
956 * contains garbage data due to a previous heap usage (grown
958 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
960 target_brk
= new_brk
;
961 brk_page
= HOST_PAGE_ALIGN(target_brk
);
962 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
965 } else if (mapped_addr
!= -1) {
966 /* Mapped but at wrong address, meaning there wasn't actually
967 * enough space for this brk.
969 target_munmap(mapped_addr
, new_alloc_size
);
971 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
974 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
977 #if defined(TARGET_ALPHA)
978 /* We (partially) emulate OSF/1 on Alpha, which requires we
979 return a proper errno, not an unchanged brk value. */
980 return -TARGET_ENOMEM
;
982 /* For everything else, return the previous break. */
986 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
987 defined(TARGET_NR_pselect6)
988 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
989 abi_ulong target_fds_addr
,
993 abi_ulong b
, *target_fds
;
995 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
996 if (!(target_fds
= lock_user(VERIFY_READ
,
998 sizeof(abi_ulong
) * nw
,
1000 return -TARGET_EFAULT
;
1004 for (i
= 0; i
< nw
; i
++) {
1005 /* grab the abi_ulong */
1006 __get_user(b
, &target_fds
[i
]);
1007 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1008 /* check the bit inside the abi_ulong */
1015 unlock_user(target_fds
, target_fds_addr
, 0);
1020 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1021 abi_ulong target_fds_addr
,
1024 if (target_fds_addr
) {
1025 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1026 return -TARGET_EFAULT
;
1034 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1040 abi_ulong
*target_fds
;
1042 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1043 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1045 sizeof(abi_ulong
) * nw
,
1047 return -TARGET_EFAULT
;
1050 for (i
= 0; i
< nw
; i
++) {
1052 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1053 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1056 __put_user(v
, &target_fds
[i
]);
1059 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1065 #if defined(__alpha__)
1066 #define HOST_HZ 1024
1071 static inline abi_long
host_to_target_clock_t(long ticks
)
1073 #if HOST_HZ == TARGET_HZ
1076 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1080 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1081 const struct rusage
*rusage
)
1083 struct target_rusage
*target_rusage
;
1085 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1086 return -TARGET_EFAULT
;
1087 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1088 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1089 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1090 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1091 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1092 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1093 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1094 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1095 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1096 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1097 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1098 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1099 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1100 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1101 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1102 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1103 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1104 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1105 unlock_user_struct(target_rusage
, target_addr
, 1);
1110 #ifdef TARGET_NR_setrlimit
1111 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1113 abi_ulong target_rlim_swap
;
1116 target_rlim_swap
= tswapal(target_rlim
);
1117 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1118 return RLIM_INFINITY
;
1120 result
= target_rlim_swap
;
1121 if (target_rlim_swap
!= (rlim_t
)result
)
1122 return RLIM_INFINITY
;
1128 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1129 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1131 abi_ulong target_rlim_swap
;
1134 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1135 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1137 target_rlim_swap
= rlim
;
1138 result
= tswapal(target_rlim_swap
);
1144 static inline int target_to_host_resource(int code
)
1147 case TARGET_RLIMIT_AS
:
1149 case TARGET_RLIMIT_CORE
:
1151 case TARGET_RLIMIT_CPU
:
1153 case TARGET_RLIMIT_DATA
:
1155 case TARGET_RLIMIT_FSIZE
:
1156 return RLIMIT_FSIZE
;
1157 case TARGET_RLIMIT_LOCKS
:
1158 return RLIMIT_LOCKS
;
1159 case TARGET_RLIMIT_MEMLOCK
:
1160 return RLIMIT_MEMLOCK
;
1161 case TARGET_RLIMIT_MSGQUEUE
:
1162 return RLIMIT_MSGQUEUE
;
1163 case TARGET_RLIMIT_NICE
:
1165 case TARGET_RLIMIT_NOFILE
:
1166 return RLIMIT_NOFILE
;
1167 case TARGET_RLIMIT_NPROC
:
1168 return RLIMIT_NPROC
;
1169 case TARGET_RLIMIT_RSS
:
1171 case TARGET_RLIMIT_RTPRIO
:
1172 return RLIMIT_RTPRIO
;
1173 case TARGET_RLIMIT_SIGPENDING
:
1174 return RLIMIT_SIGPENDING
;
1175 case TARGET_RLIMIT_STACK
:
1176 return RLIMIT_STACK
;
1182 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1183 abi_ulong target_tv_addr
)
1185 struct target_timeval
*target_tv
;
1187 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1188 return -TARGET_EFAULT
;
1191 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1192 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1194 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1199 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1200 const struct timeval
*tv
)
1202 struct target_timeval
*target_tv
;
1204 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1205 return -TARGET_EFAULT
;
1208 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1209 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1211 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1216 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1217 const struct timeval
*tv
)
1219 struct target__kernel_sock_timeval
*target_tv
;
1221 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1222 return -TARGET_EFAULT
;
1225 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1226 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1228 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1233 #if defined(TARGET_NR_futex) || \
1234 defined(TARGET_NR_rt_sigtimedwait) || \
1235 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1236 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1237 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1238 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1239 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop)
1240 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1241 abi_ulong target_addr
)
1243 struct target_timespec
*target_ts
;
1245 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1246 return -TARGET_EFAULT
;
1248 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1249 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1250 unlock_user_struct(target_ts
, target_addr
, 0);
1255 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64)
1256 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1257 abi_ulong target_addr
)
1259 struct target__kernel_timespec
*target_ts
;
1261 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1262 return -TARGET_EFAULT
;
1264 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1265 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1266 unlock_user_struct(target_ts
, target_addr
, 0);
1271 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1272 struct timespec
*host_ts
)
1274 struct target_timespec
*target_ts
;
1276 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1277 return -TARGET_EFAULT
;
1279 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1280 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1281 unlock_user_struct(target_ts
, target_addr
, 1);
1285 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1286 struct timespec
*host_ts
)
1288 struct target__kernel_timespec
*target_ts
;
1290 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1291 return -TARGET_EFAULT
;
1293 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1294 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1295 unlock_user_struct(target_ts
, target_addr
, 1);
1299 #if defined(TARGET_NR_gettimeofday)
1300 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1301 struct timezone
*tz
)
1303 struct target_timezone
*target_tz
;
1305 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1306 return -TARGET_EFAULT
;
1309 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1310 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1312 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1318 #if defined(TARGET_NR_settimeofday)
1319 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1320 abi_ulong target_tz_addr
)
1322 struct target_timezone
*target_tz
;
1324 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1325 return -TARGET_EFAULT
;
1328 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1329 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1331 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1337 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1340 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1341 abi_ulong target_mq_attr_addr
)
1343 struct target_mq_attr
*target_mq_attr
;
1345 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1346 target_mq_attr_addr
, 1))
1347 return -TARGET_EFAULT
;
1349 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1350 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1351 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1352 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1354 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1359 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1360 const struct mq_attr
*attr
)
1362 struct target_mq_attr
*target_mq_attr
;
1364 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1365 target_mq_attr_addr
, 0))
1366 return -TARGET_EFAULT
;
1368 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1369 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1370 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1371 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1373 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1379 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1380 /* do_select() must return target values and target errnos. */
1381 static abi_long
do_select(int n
,
1382 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1383 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1385 fd_set rfds
, wfds
, efds
;
1386 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1388 struct timespec ts
, *ts_ptr
;
1391 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1395 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1399 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1404 if (target_tv_addr
) {
1405 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1406 return -TARGET_EFAULT
;
1407 ts
.tv_sec
= tv
.tv_sec
;
1408 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1414 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1417 if (!is_error(ret
)) {
1418 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1419 return -TARGET_EFAULT
;
1420 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1421 return -TARGET_EFAULT
;
1422 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1423 return -TARGET_EFAULT
;
1425 if (target_tv_addr
) {
1426 tv
.tv_sec
= ts
.tv_sec
;
1427 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1428 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1429 return -TARGET_EFAULT
;
1437 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1438 static abi_long
do_old_select(abi_ulong arg1
)
1440 struct target_sel_arg_struct
*sel
;
1441 abi_ulong inp
, outp
, exp
, tvp
;
1444 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1445 return -TARGET_EFAULT
;
1448 nsel
= tswapal(sel
->n
);
1449 inp
= tswapal(sel
->inp
);
1450 outp
= tswapal(sel
->outp
);
1451 exp
= tswapal(sel
->exp
);
1452 tvp
= tswapal(sel
->tvp
);
1454 unlock_user_struct(sel
, arg1
, 0);
1456 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1461 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1464 return pipe2(host_pipe
, flags
);
1470 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1471 int flags
, int is_pipe2
)
1475 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1478 return get_errno(ret
);
1480 /* Several targets have special calling conventions for the original
1481 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1483 #if defined(TARGET_ALPHA)
1484 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1485 return host_pipe
[0];
1486 #elif defined(TARGET_MIPS)
1487 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1488 return host_pipe
[0];
1489 #elif defined(TARGET_SH4)
1490 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1491 return host_pipe
[0];
1492 #elif defined(TARGET_SPARC)
1493 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1494 return host_pipe
[0];
1498 if (put_user_s32(host_pipe
[0], pipedes
)
1499 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1500 return -TARGET_EFAULT
;
1501 return get_errno(ret
);
1504 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1505 abi_ulong target_addr
,
1508 struct target_ip_mreqn
*target_smreqn
;
1510 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1512 return -TARGET_EFAULT
;
1513 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1514 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1515 if (len
== sizeof(struct target_ip_mreqn
))
1516 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1517 unlock_user(target_smreqn
, target_addr
, 0);
1522 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1523 abi_ulong target_addr
,
1526 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1527 sa_family_t sa_family
;
1528 struct target_sockaddr
*target_saddr
;
1530 if (fd_trans_target_to_host_addr(fd
)) {
1531 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1534 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1536 return -TARGET_EFAULT
;
1538 sa_family
= tswap16(target_saddr
->sa_family
);
1540 /* Oops. The caller might send a incomplete sun_path; sun_path
1541 * must be terminated by \0 (see the manual page), but
1542 * unfortunately it is quite common to specify sockaddr_un
1543 * length as "strlen(x->sun_path)" while it should be
1544 * "strlen(...) + 1". We'll fix that here if needed.
1545 * Linux kernel has a similar feature.
1548 if (sa_family
== AF_UNIX
) {
1549 if (len
< unix_maxlen
&& len
> 0) {
1550 char *cp
= (char*)target_saddr
;
1552 if ( cp
[len
-1] && !cp
[len
] )
1555 if (len
> unix_maxlen
)
1559 memcpy(addr
, target_saddr
, len
);
1560 addr
->sa_family
= sa_family
;
1561 if (sa_family
== AF_NETLINK
) {
1562 struct sockaddr_nl
*nladdr
;
1564 nladdr
= (struct sockaddr_nl
*)addr
;
1565 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1566 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1567 } else if (sa_family
== AF_PACKET
) {
1568 struct target_sockaddr_ll
*lladdr
;
1570 lladdr
= (struct target_sockaddr_ll
*)addr
;
1571 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1572 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1574 unlock_user(target_saddr
, target_addr
, 0);
1579 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1580 struct sockaddr
*addr
,
1583 struct target_sockaddr
*target_saddr
;
1590 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1592 return -TARGET_EFAULT
;
1593 memcpy(target_saddr
, addr
, len
);
1594 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1595 sizeof(target_saddr
->sa_family
)) {
1596 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1598 if (addr
->sa_family
== AF_NETLINK
&&
1599 len
>= sizeof(struct target_sockaddr_nl
)) {
1600 struct target_sockaddr_nl
*target_nl
=
1601 (struct target_sockaddr_nl
*)target_saddr
;
1602 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1603 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1604 } else if (addr
->sa_family
== AF_PACKET
) {
1605 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1606 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1607 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1608 } else if (addr
->sa_family
== AF_INET6
&&
1609 len
>= sizeof(struct target_sockaddr_in6
)) {
1610 struct target_sockaddr_in6
*target_in6
=
1611 (struct target_sockaddr_in6
*)target_saddr
;
1612 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1614 unlock_user(target_saddr
, target_addr
, len
);
1619 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1620 struct target_msghdr
*target_msgh
)
1622 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1623 abi_long msg_controllen
;
1624 abi_ulong target_cmsg_addr
;
1625 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1626 socklen_t space
= 0;
1628 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1629 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1631 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1632 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1633 target_cmsg_start
= target_cmsg
;
1635 return -TARGET_EFAULT
;
1637 while (cmsg
&& target_cmsg
) {
1638 void *data
= CMSG_DATA(cmsg
);
1639 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1641 int len
= tswapal(target_cmsg
->cmsg_len
)
1642 - sizeof(struct target_cmsghdr
);
1644 space
+= CMSG_SPACE(len
);
1645 if (space
> msgh
->msg_controllen
) {
1646 space
-= CMSG_SPACE(len
);
1647 /* This is a QEMU bug, since we allocated the payload
1648 * area ourselves (unlike overflow in host-to-target
1649 * conversion, which is just the guest giving us a buffer
1650 * that's too small). It can't happen for the payload types
1651 * we currently support; if it becomes an issue in future
1652 * we would need to improve our allocation strategy to
1653 * something more intelligent than "twice the size of the
1654 * target buffer we're reading from".
1656 qemu_log_mask(LOG_UNIMP
,
1657 ("Unsupported ancillary data %d/%d: "
1658 "unhandled msg size\n"),
1659 tswap32(target_cmsg
->cmsg_level
),
1660 tswap32(target_cmsg
->cmsg_type
));
1664 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1665 cmsg
->cmsg_level
= SOL_SOCKET
;
1667 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1669 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1670 cmsg
->cmsg_len
= CMSG_LEN(len
);
1672 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1673 int *fd
= (int *)data
;
1674 int *target_fd
= (int *)target_data
;
1675 int i
, numfds
= len
/ sizeof(int);
1677 for (i
= 0; i
< numfds
; i
++) {
1678 __get_user(fd
[i
], target_fd
+ i
);
1680 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1681 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1682 struct ucred
*cred
= (struct ucred
*)data
;
1683 struct target_ucred
*target_cred
=
1684 (struct target_ucred
*)target_data
;
1686 __get_user(cred
->pid
, &target_cred
->pid
);
1687 __get_user(cred
->uid
, &target_cred
->uid
);
1688 __get_user(cred
->gid
, &target_cred
->gid
);
1690 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1691 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1692 memcpy(data
, target_data
, len
);
1695 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1696 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1699 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1701 msgh
->msg_controllen
= space
;
1705 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1706 struct msghdr
*msgh
)
1708 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1709 abi_long msg_controllen
;
1710 abi_ulong target_cmsg_addr
;
1711 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1712 socklen_t space
= 0;
1714 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1715 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1717 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1718 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1719 target_cmsg_start
= target_cmsg
;
1721 return -TARGET_EFAULT
;
1723 while (cmsg
&& target_cmsg
) {
1724 void *data
= CMSG_DATA(cmsg
);
1725 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1727 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1728 int tgt_len
, tgt_space
;
1730 /* We never copy a half-header but may copy half-data;
1731 * this is Linux's behaviour in put_cmsg(). Note that
1732 * truncation here is a guest problem (which we report
1733 * to the guest via the CTRUNC bit), unlike truncation
1734 * in target_to_host_cmsg, which is a QEMU bug.
1736 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1737 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1741 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1742 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1744 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1746 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1748 /* Payload types which need a different size of payload on
1749 * the target must adjust tgt_len here.
1752 switch (cmsg
->cmsg_level
) {
1754 switch (cmsg
->cmsg_type
) {
1756 tgt_len
= sizeof(struct target_timeval
);
1766 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1767 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1768 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1771 /* We must now copy-and-convert len bytes of payload
1772 * into tgt_len bytes of destination space. Bear in mind
1773 * that in both source and destination we may be dealing
1774 * with a truncated value!
1776 switch (cmsg
->cmsg_level
) {
1778 switch (cmsg
->cmsg_type
) {
1781 int *fd
= (int *)data
;
1782 int *target_fd
= (int *)target_data
;
1783 int i
, numfds
= tgt_len
/ sizeof(int);
1785 for (i
= 0; i
< numfds
; i
++) {
1786 __put_user(fd
[i
], target_fd
+ i
);
1792 struct timeval
*tv
= (struct timeval
*)data
;
1793 struct target_timeval
*target_tv
=
1794 (struct target_timeval
*)target_data
;
1796 if (len
!= sizeof(struct timeval
) ||
1797 tgt_len
!= sizeof(struct target_timeval
)) {
1801 /* copy struct timeval to target */
1802 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1803 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1806 case SCM_CREDENTIALS
:
1808 struct ucred
*cred
= (struct ucred
*)data
;
1809 struct target_ucred
*target_cred
=
1810 (struct target_ucred
*)target_data
;
1812 __put_user(cred
->pid
, &target_cred
->pid
);
1813 __put_user(cred
->uid
, &target_cred
->uid
);
1814 __put_user(cred
->gid
, &target_cred
->gid
);
1823 switch (cmsg
->cmsg_type
) {
1826 uint32_t *v
= (uint32_t *)data
;
1827 uint32_t *t_int
= (uint32_t *)target_data
;
1829 if (len
!= sizeof(uint32_t) ||
1830 tgt_len
!= sizeof(uint32_t)) {
1833 __put_user(*v
, t_int
);
1839 struct sock_extended_err ee
;
1840 struct sockaddr_in offender
;
1842 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1843 struct errhdr_t
*target_errh
=
1844 (struct errhdr_t
*)target_data
;
1846 if (len
!= sizeof(struct errhdr_t
) ||
1847 tgt_len
!= sizeof(struct errhdr_t
)) {
1850 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1851 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1852 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1853 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1854 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1855 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1856 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1857 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1858 (void *) &errh
->offender
, sizeof(errh
->offender
));
1867 switch (cmsg
->cmsg_type
) {
1870 uint32_t *v
= (uint32_t *)data
;
1871 uint32_t *t_int
= (uint32_t *)target_data
;
1873 if (len
!= sizeof(uint32_t) ||
1874 tgt_len
!= sizeof(uint32_t)) {
1877 __put_user(*v
, t_int
);
1883 struct sock_extended_err ee
;
1884 struct sockaddr_in6 offender
;
1886 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1887 struct errhdr6_t
*target_errh
=
1888 (struct errhdr6_t
*)target_data
;
1890 if (len
!= sizeof(struct errhdr6_t
) ||
1891 tgt_len
!= sizeof(struct errhdr6_t
)) {
1894 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1895 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1896 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1897 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1898 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1899 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1900 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1901 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1902 (void *) &errh
->offender
, sizeof(errh
->offender
));
1912 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1913 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1914 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1915 if (tgt_len
> len
) {
1916 memset(target_data
+ len
, 0, tgt_len
- len
);
1920 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1921 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1922 if (msg_controllen
< tgt_space
) {
1923 tgt_space
= msg_controllen
;
1925 msg_controllen
-= tgt_space
;
1927 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1928 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1931 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1933 target_msgh
->msg_controllen
= tswapal(space
);
1937 /* do_setsockopt() Must return target values and target errnos. */
1938 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1939 abi_ulong optval_addr
, socklen_t optlen
)
1943 struct ip_mreqn
*ip_mreq
;
1944 struct ip_mreq_source
*ip_mreq_source
;
1948 /* TCP options all take an 'int' value. */
1949 if (optlen
< sizeof(uint32_t))
1950 return -TARGET_EINVAL
;
1952 if (get_user_u32(val
, optval_addr
))
1953 return -TARGET_EFAULT
;
1954 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1961 case IP_ROUTER_ALERT
:
1965 case IP_MTU_DISCOVER
:
1972 case IP_MULTICAST_TTL
:
1973 case IP_MULTICAST_LOOP
:
1975 if (optlen
>= sizeof(uint32_t)) {
1976 if (get_user_u32(val
, optval_addr
))
1977 return -TARGET_EFAULT
;
1978 } else if (optlen
>= 1) {
1979 if (get_user_u8(val
, optval_addr
))
1980 return -TARGET_EFAULT
;
1982 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1984 case IP_ADD_MEMBERSHIP
:
1985 case IP_DROP_MEMBERSHIP
:
1986 if (optlen
< sizeof (struct target_ip_mreq
) ||
1987 optlen
> sizeof (struct target_ip_mreqn
))
1988 return -TARGET_EINVAL
;
1990 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1991 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1992 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1995 case IP_BLOCK_SOURCE
:
1996 case IP_UNBLOCK_SOURCE
:
1997 case IP_ADD_SOURCE_MEMBERSHIP
:
1998 case IP_DROP_SOURCE_MEMBERSHIP
:
1999 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2000 return -TARGET_EINVAL
;
2002 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2003 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2004 unlock_user (ip_mreq_source
, optval_addr
, 0);
2013 case IPV6_MTU_DISCOVER
:
2016 case IPV6_RECVPKTINFO
:
2017 case IPV6_UNICAST_HOPS
:
2018 case IPV6_MULTICAST_HOPS
:
2019 case IPV6_MULTICAST_LOOP
:
2021 case IPV6_RECVHOPLIMIT
:
2022 case IPV6_2292HOPLIMIT
:
2025 case IPV6_2292PKTINFO
:
2026 case IPV6_RECVTCLASS
:
2027 case IPV6_RECVRTHDR
:
2028 case IPV6_2292RTHDR
:
2029 case IPV6_RECVHOPOPTS
:
2030 case IPV6_2292HOPOPTS
:
2031 case IPV6_RECVDSTOPTS
:
2032 case IPV6_2292DSTOPTS
:
2034 #ifdef IPV6_RECVPATHMTU
2035 case IPV6_RECVPATHMTU
:
2037 #ifdef IPV6_TRANSPARENT
2038 case IPV6_TRANSPARENT
:
2040 #ifdef IPV6_FREEBIND
2043 #ifdef IPV6_RECVORIGDSTADDR
2044 case IPV6_RECVORIGDSTADDR
:
2047 if (optlen
< sizeof(uint32_t)) {
2048 return -TARGET_EINVAL
;
2050 if (get_user_u32(val
, optval_addr
)) {
2051 return -TARGET_EFAULT
;
2053 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2054 &val
, sizeof(val
)));
2058 struct in6_pktinfo pki
;
2060 if (optlen
< sizeof(pki
)) {
2061 return -TARGET_EINVAL
;
2064 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2065 return -TARGET_EFAULT
;
2068 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2070 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2071 &pki
, sizeof(pki
)));
2074 case IPV6_ADD_MEMBERSHIP
:
2075 case IPV6_DROP_MEMBERSHIP
:
2077 struct ipv6_mreq ipv6mreq
;
2079 if (optlen
< sizeof(ipv6mreq
)) {
2080 return -TARGET_EINVAL
;
2083 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2084 return -TARGET_EFAULT
;
2087 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2089 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2090 &ipv6mreq
, sizeof(ipv6mreq
)));
2101 struct icmp6_filter icmp6f
;
2103 if (optlen
> sizeof(icmp6f
)) {
2104 optlen
= sizeof(icmp6f
);
2107 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2108 return -TARGET_EFAULT
;
2111 for (val
= 0; val
< 8; val
++) {
2112 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2115 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2127 /* those take an u32 value */
2128 if (optlen
< sizeof(uint32_t)) {
2129 return -TARGET_EINVAL
;
2132 if (get_user_u32(val
, optval_addr
)) {
2133 return -TARGET_EFAULT
;
2135 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2136 &val
, sizeof(val
)));
2143 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2148 char *alg_key
= g_malloc(optlen
);
2151 return -TARGET_ENOMEM
;
2153 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2155 return -TARGET_EFAULT
;
2157 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2162 case ALG_SET_AEAD_AUTHSIZE
:
2164 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2173 case TARGET_SOL_SOCKET
:
2175 case TARGET_SO_RCVTIMEO
:
2179 optname
= SO_RCVTIMEO
;
2182 if (optlen
!= sizeof(struct target_timeval
)) {
2183 return -TARGET_EINVAL
;
2186 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2187 return -TARGET_EFAULT
;
2190 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2194 case TARGET_SO_SNDTIMEO
:
2195 optname
= SO_SNDTIMEO
;
2197 case TARGET_SO_ATTACH_FILTER
:
2199 struct target_sock_fprog
*tfprog
;
2200 struct target_sock_filter
*tfilter
;
2201 struct sock_fprog fprog
;
2202 struct sock_filter
*filter
;
2205 if (optlen
!= sizeof(*tfprog
)) {
2206 return -TARGET_EINVAL
;
2208 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2209 return -TARGET_EFAULT
;
2211 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2212 tswapal(tfprog
->filter
), 0)) {
2213 unlock_user_struct(tfprog
, optval_addr
, 1);
2214 return -TARGET_EFAULT
;
2217 fprog
.len
= tswap16(tfprog
->len
);
2218 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2219 if (filter
== NULL
) {
2220 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2221 unlock_user_struct(tfprog
, optval_addr
, 1);
2222 return -TARGET_ENOMEM
;
2224 for (i
= 0; i
< fprog
.len
; i
++) {
2225 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2226 filter
[i
].jt
= tfilter
[i
].jt
;
2227 filter
[i
].jf
= tfilter
[i
].jf
;
2228 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2230 fprog
.filter
= filter
;
2232 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2233 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2236 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2237 unlock_user_struct(tfprog
, optval_addr
, 1);
2240 case TARGET_SO_BINDTODEVICE
:
2242 char *dev_ifname
, *addr_ifname
;
2244 if (optlen
> IFNAMSIZ
- 1) {
2245 optlen
= IFNAMSIZ
- 1;
2247 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2249 return -TARGET_EFAULT
;
2251 optname
= SO_BINDTODEVICE
;
2252 addr_ifname
= alloca(IFNAMSIZ
);
2253 memcpy(addr_ifname
, dev_ifname
, optlen
);
2254 addr_ifname
[optlen
] = 0;
2255 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2256 addr_ifname
, optlen
));
2257 unlock_user (dev_ifname
, optval_addr
, 0);
2260 case TARGET_SO_LINGER
:
2263 struct target_linger
*tlg
;
2265 if (optlen
!= sizeof(struct target_linger
)) {
2266 return -TARGET_EINVAL
;
2268 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2269 return -TARGET_EFAULT
;
2271 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2272 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2273 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2275 unlock_user_struct(tlg
, optval_addr
, 0);
2278 /* Options with 'int' argument. */
2279 case TARGET_SO_DEBUG
:
2282 case TARGET_SO_REUSEADDR
:
2283 optname
= SO_REUSEADDR
;
2286 case TARGET_SO_REUSEPORT
:
2287 optname
= SO_REUSEPORT
;
2290 case TARGET_SO_TYPE
:
2293 case TARGET_SO_ERROR
:
2296 case TARGET_SO_DONTROUTE
:
2297 optname
= SO_DONTROUTE
;
2299 case TARGET_SO_BROADCAST
:
2300 optname
= SO_BROADCAST
;
2302 case TARGET_SO_SNDBUF
:
2303 optname
= SO_SNDBUF
;
2305 case TARGET_SO_SNDBUFFORCE
:
2306 optname
= SO_SNDBUFFORCE
;
2308 case TARGET_SO_RCVBUF
:
2309 optname
= SO_RCVBUF
;
2311 case TARGET_SO_RCVBUFFORCE
:
2312 optname
= SO_RCVBUFFORCE
;
2314 case TARGET_SO_KEEPALIVE
:
2315 optname
= SO_KEEPALIVE
;
2317 case TARGET_SO_OOBINLINE
:
2318 optname
= SO_OOBINLINE
;
2320 case TARGET_SO_NO_CHECK
:
2321 optname
= SO_NO_CHECK
;
2323 case TARGET_SO_PRIORITY
:
2324 optname
= SO_PRIORITY
;
2327 case TARGET_SO_BSDCOMPAT
:
2328 optname
= SO_BSDCOMPAT
;
2331 case TARGET_SO_PASSCRED
:
2332 optname
= SO_PASSCRED
;
2334 case TARGET_SO_PASSSEC
:
2335 optname
= SO_PASSSEC
;
2337 case TARGET_SO_TIMESTAMP
:
2338 optname
= SO_TIMESTAMP
;
2340 case TARGET_SO_RCVLOWAT
:
2341 optname
= SO_RCVLOWAT
;
2346 if (optlen
< sizeof(uint32_t))
2347 return -TARGET_EINVAL
;
2349 if (get_user_u32(val
, optval_addr
))
2350 return -TARGET_EFAULT
;
2351 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2356 case NETLINK_PKTINFO
:
2357 case NETLINK_ADD_MEMBERSHIP
:
2358 case NETLINK_DROP_MEMBERSHIP
:
2359 case NETLINK_BROADCAST_ERROR
:
2360 case NETLINK_NO_ENOBUFS
:
2361 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2362 case NETLINK_LISTEN_ALL_NSID
:
2363 case NETLINK_CAP_ACK
:
2364 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2365 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2366 case NETLINK_EXT_ACK
:
2367 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2368 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2369 case NETLINK_GET_STRICT_CHK
:
2370 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2376 if (optlen
< sizeof(uint32_t)) {
2377 return -TARGET_EINVAL
;
2379 if (get_user_u32(val
, optval_addr
)) {
2380 return -TARGET_EFAULT
;
2382 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2385 #endif /* SOL_NETLINK */
2388 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2390 ret
= -TARGET_ENOPROTOOPT
;
2395 /* do_getsockopt() Must return target values and target errnos. */
2396 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2397 abi_ulong optval_addr
, abi_ulong optlen
)
2404 case TARGET_SOL_SOCKET
:
2407 /* These don't just return a single integer */
2408 case TARGET_SO_PEERNAME
:
2410 case TARGET_SO_RCVTIMEO
: {
2414 optname
= SO_RCVTIMEO
;
2417 if (get_user_u32(len
, optlen
)) {
2418 return -TARGET_EFAULT
;
2421 return -TARGET_EINVAL
;
2425 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2430 if (len
> sizeof(struct target_timeval
)) {
2431 len
= sizeof(struct target_timeval
);
2433 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2434 return -TARGET_EFAULT
;
2436 if (put_user_u32(len
, optlen
)) {
2437 return -TARGET_EFAULT
;
2441 case TARGET_SO_SNDTIMEO
:
2442 optname
= SO_SNDTIMEO
;
2444 case TARGET_SO_PEERCRED
: {
2447 struct target_ucred
*tcr
;
2449 if (get_user_u32(len
, optlen
)) {
2450 return -TARGET_EFAULT
;
2453 return -TARGET_EINVAL
;
2457 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2465 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2466 return -TARGET_EFAULT
;
2468 __put_user(cr
.pid
, &tcr
->pid
);
2469 __put_user(cr
.uid
, &tcr
->uid
);
2470 __put_user(cr
.gid
, &tcr
->gid
);
2471 unlock_user_struct(tcr
, optval_addr
, 1);
2472 if (put_user_u32(len
, optlen
)) {
2473 return -TARGET_EFAULT
;
2477 case TARGET_SO_PEERSEC
: {
2480 if (get_user_u32(len
, optlen
)) {
2481 return -TARGET_EFAULT
;
2484 return -TARGET_EINVAL
;
2486 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2488 return -TARGET_EFAULT
;
2491 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2493 if (put_user_u32(lv
, optlen
)) {
2494 ret
= -TARGET_EFAULT
;
2496 unlock_user(name
, optval_addr
, lv
);
2499 case TARGET_SO_LINGER
:
2503 struct target_linger
*tlg
;
2505 if (get_user_u32(len
, optlen
)) {
2506 return -TARGET_EFAULT
;
2509 return -TARGET_EINVAL
;
2513 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2521 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2522 return -TARGET_EFAULT
;
2524 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2525 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2526 unlock_user_struct(tlg
, optval_addr
, 1);
2527 if (put_user_u32(len
, optlen
)) {
2528 return -TARGET_EFAULT
;
2532 /* Options with 'int' argument. */
2533 case TARGET_SO_DEBUG
:
2536 case TARGET_SO_REUSEADDR
:
2537 optname
= SO_REUSEADDR
;
2540 case TARGET_SO_REUSEPORT
:
2541 optname
= SO_REUSEPORT
;
2544 case TARGET_SO_TYPE
:
2547 case TARGET_SO_ERROR
:
2550 case TARGET_SO_DONTROUTE
:
2551 optname
= SO_DONTROUTE
;
2553 case TARGET_SO_BROADCAST
:
2554 optname
= SO_BROADCAST
;
2556 case TARGET_SO_SNDBUF
:
2557 optname
= SO_SNDBUF
;
2559 case TARGET_SO_RCVBUF
:
2560 optname
= SO_RCVBUF
;
2562 case TARGET_SO_KEEPALIVE
:
2563 optname
= SO_KEEPALIVE
;
2565 case TARGET_SO_OOBINLINE
:
2566 optname
= SO_OOBINLINE
;
2568 case TARGET_SO_NO_CHECK
:
2569 optname
= SO_NO_CHECK
;
2571 case TARGET_SO_PRIORITY
:
2572 optname
= SO_PRIORITY
;
2575 case TARGET_SO_BSDCOMPAT
:
2576 optname
= SO_BSDCOMPAT
;
2579 case TARGET_SO_PASSCRED
:
2580 optname
= SO_PASSCRED
;
2582 case TARGET_SO_TIMESTAMP
:
2583 optname
= SO_TIMESTAMP
;
2585 case TARGET_SO_RCVLOWAT
:
2586 optname
= SO_RCVLOWAT
;
2588 case TARGET_SO_ACCEPTCONN
:
2589 optname
= SO_ACCEPTCONN
;
2596 /* TCP options all take an 'int' value. */
2598 if (get_user_u32(len
, optlen
))
2599 return -TARGET_EFAULT
;
2601 return -TARGET_EINVAL
;
2603 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2606 if (optname
== SO_TYPE
) {
2607 val
= host_to_target_sock_type(val
);
2612 if (put_user_u32(val
, optval_addr
))
2613 return -TARGET_EFAULT
;
2615 if (put_user_u8(val
, optval_addr
))
2616 return -TARGET_EFAULT
;
2618 if (put_user_u32(len
, optlen
))
2619 return -TARGET_EFAULT
;
2626 case IP_ROUTER_ALERT
:
2630 case IP_MTU_DISCOVER
:
2636 case IP_MULTICAST_TTL
:
2637 case IP_MULTICAST_LOOP
:
2638 if (get_user_u32(len
, optlen
))
2639 return -TARGET_EFAULT
;
2641 return -TARGET_EINVAL
;
2643 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2646 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2648 if (put_user_u32(len
, optlen
)
2649 || put_user_u8(val
, optval_addr
))
2650 return -TARGET_EFAULT
;
2652 if (len
> sizeof(int))
2654 if (put_user_u32(len
, optlen
)
2655 || put_user_u32(val
, optval_addr
))
2656 return -TARGET_EFAULT
;
2660 ret
= -TARGET_ENOPROTOOPT
;
2666 case IPV6_MTU_DISCOVER
:
2669 case IPV6_RECVPKTINFO
:
2670 case IPV6_UNICAST_HOPS
:
2671 case IPV6_MULTICAST_HOPS
:
2672 case IPV6_MULTICAST_LOOP
:
2674 case IPV6_RECVHOPLIMIT
:
2675 case IPV6_2292HOPLIMIT
:
2678 case IPV6_2292PKTINFO
:
2679 case IPV6_RECVTCLASS
:
2680 case IPV6_RECVRTHDR
:
2681 case IPV6_2292RTHDR
:
2682 case IPV6_RECVHOPOPTS
:
2683 case IPV6_2292HOPOPTS
:
2684 case IPV6_RECVDSTOPTS
:
2685 case IPV6_2292DSTOPTS
:
2687 #ifdef IPV6_RECVPATHMTU
2688 case IPV6_RECVPATHMTU
:
2690 #ifdef IPV6_TRANSPARENT
2691 case IPV6_TRANSPARENT
:
2693 #ifdef IPV6_FREEBIND
2696 #ifdef IPV6_RECVORIGDSTADDR
2697 case IPV6_RECVORIGDSTADDR
:
2699 if (get_user_u32(len
, optlen
))
2700 return -TARGET_EFAULT
;
2702 return -TARGET_EINVAL
;
2704 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2707 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2709 if (put_user_u32(len
, optlen
)
2710 || put_user_u8(val
, optval_addr
))
2711 return -TARGET_EFAULT
;
2713 if (len
> sizeof(int))
2715 if (put_user_u32(len
, optlen
)
2716 || put_user_u32(val
, optval_addr
))
2717 return -TARGET_EFAULT
;
2721 ret
= -TARGET_ENOPROTOOPT
;
2728 case NETLINK_PKTINFO
:
2729 case NETLINK_BROADCAST_ERROR
:
2730 case NETLINK_NO_ENOBUFS
:
2731 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2732 case NETLINK_LISTEN_ALL_NSID
:
2733 case NETLINK_CAP_ACK
:
2734 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2735 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2736 case NETLINK_EXT_ACK
:
2737 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2738 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2739 case NETLINK_GET_STRICT_CHK
:
2740 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2741 if (get_user_u32(len
, optlen
)) {
2742 return -TARGET_EFAULT
;
2744 if (len
!= sizeof(val
)) {
2745 return -TARGET_EINVAL
;
2748 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2752 if (put_user_u32(lv
, optlen
)
2753 || put_user_u32(val
, optval_addr
)) {
2754 return -TARGET_EFAULT
;
2757 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2758 case NETLINK_LIST_MEMBERSHIPS
:
2762 if (get_user_u32(len
, optlen
)) {
2763 return -TARGET_EFAULT
;
2766 return -TARGET_EINVAL
;
2768 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2770 return -TARGET_EFAULT
;
2773 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2775 unlock_user(results
, optval_addr
, 0);
2778 /* swap host endianess to target endianess. */
2779 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2780 results
[i
] = tswap32(results
[i
]);
2782 if (put_user_u32(lv
, optlen
)) {
2783 return -TARGET_EFAULT
;
2785 unlock_user(results
, optval_addr
, 0);
2788 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2793 #endif /* SOL_NETLINK */
2796 qemu_log_mask(LOG_UNIMP
,
2797 "getsockopt level=%d optname=%d not yet supported\n",
2799 ret
= -TARGET_EOPNOTSUPP
;
2805 /* Convert target low/high pair representing file offset into the host
2806 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2807 * as the kernel doesn't handle them either.
2809 static void target_to_host_low_high(abi_ulong tlow
,
2811 unsigned long *hlow
,
2812 unsigned long *hhigh
)
2814 uint64_t off
= tlow
|
2815 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2816 TARGET_LONG_BITS
/ 2;
2819 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2822 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2823 abi_ulong count
, int copy
)
2825 struct target_iovec
*target_vec
;
2827 abi_ulong total_len
, max_len
;
2830 bool bad_address
= false;
2836 if (count
> IOV_MAX
) {
2841 vec
= g_try_new0(struct iovec
, count
);
2847 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2848 count
* sizeof(struct target_iovec
), 1);
2849 if (target_vec
== NULL
) {
2854 /* ??? If host page size > target page size, this will result in a
2855 value larger than what we can actually support. */
2856 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2859 for (i
= 0; i
< count
; i
++) {
2860 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2861 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2866 } else if (len
== 0) {
2867 /* Zero length pointer is ignored. */
2868 vec
[i
].iov_base
= 0;
2870 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2871 /* If the first buffer pointer is bad, this is a fault. But
2872 * subsequent bad buffers will result in a partial write; this
2873 * is realized by filling the vector with null pointers and
2875 if (!vec
[i
].iov_base
) {
2886 if (len
> max_len
- total_len
) {
2887 len
= max_len
- total_len
;
2890 vec
[i
].iov_len
= len
;
2894 unlock_user(target_vec
, target_addr
, 0);
2899 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2900 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2903 unlock_user(target_vec
, target_addr
, 0);
2910 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2911 abi_ulong count
, int copy
)
2913 struct target_iovec
*target_vec
;
2916 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2917 count
* sizeof(struct target_iovec
), 1);
2919 for (i
= 0; i
< count
; i
++) {
2920 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2921 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2925 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2927 unlock_user(target_vec
, target_addr
, 0);
2933 static inline int target_to_host_sock_type(int *type
)
2936 int target_type
= *type
;
2938 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2939 case TARGET_SOCK_DGRAM
:
2940 host_type
= SOCK_DGRAM
;
2942 case TARGET_SOCK_STREAM
:
2943 host_type
= SOCK_STREAM
;
2946 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2949 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2950 #if defined(SOCK_CLOEXEC)
2951 host_type
|= SOCK_CLOEXEC
;
2953 return -TARGET_EINVAL
;
2956 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2957 #if defined(SOCK_NONBLOCK)
2958 host_type
|= SOCK_NONBLOCK
;
2959 #elif !defined(O_NONBLOCK)
2960 return -TARGET_EINVAL
;
2967 /* Try to emulate socket type flags after socket creation. */
2968 static int sock_flags_fixup(int fd
, int target_type
)
2970 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2971 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2972 int flags
= fcntl(fd
, F_GETFL
);
2973 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2975 return -TARGET_EINVAL
;
2982 /* do_socket() Must return target values and target errnos. */
2983 static abi_long
do_socket(int domain
, int type
, int protocol
)
2985 int target_type
= type
;
2988 ret
= target_to_host_sock_type(&type
);
2993 if (domain
== PF_NETLINK
&& !(
2994 #ifdef CONFIG_RTNETLINK
2995 protocol
== NETLINK_ROUTE
||
2997 protocol
== NETLINK_KOBJECT_UEVENT
||
2998 protocol
== NETLINK_AUDIT
)) {
2999 return -TARGET_EPROTONOSUPPORT
;
3002 if (domain
== AF_PACKET
||
3003 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3004 protocol
= tswap16(protocol
);
3007 ret
= get_errno(socket(domain
, type
, protocol
));
3009 ret
= sock_flags_fixup(ret
, target_type
);
3010 if (type
== SOCK_PACKET
) {
3011 /* Manage an obsolete case :
3012 * if socket type is SOCK_PACKET, bind by name
3014 fd_trans_register(ret
, &target_packet_trans
);
3015 } else if (domain
== PF_NETLINK
) {
3017 #ifdef CONFIG_RTNETLINK
3019 fd_trans_register(ret
, &target_netlink_route_trans
);
3022 case NETLINK_KOBJECT_UEVENT
:
3023 /* nothing to do: messages are strings */
3026 fd_trans_register(ret
, &target_netlink_audit_trans
);
3029 g_assert_not_reached();
3036 /* do_bind() Must return target values and target errnos. */
3037 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3043 if ((int)addrlen
< 0) {
3044 return -TARGET_EINVAL
;
3047 addr
= alloca(addrlen
+1);
3049 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3053 return get_errno(bind(sockfd
, addr
, addrlen
));
3056 /* do_connect() Must return target values and target errnos. */
3057 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3063 if ((int)addrlen
< 0) {
3064 return -TARGET_EINVAL
;
3067 addr
= alloca(addrlen
+1);
3069 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3073 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3076 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3077 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3078 int flags
, int send
)
3084 abi_ulong target_vec
;
3086 if (msgp
->msg_name
) {
3087 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3088 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3089 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3090 tswapal(msgp
->msg_name
),
3092 if (ret
== -TARGET_EFAULT
) {
3093 /* For connected sockets msg_name and msg_namelen must
3094 * be ignored, so returning EFAULT immediately is wrong.
3095 * Instead, pass a bad msg_name to the host kernel, and
3096 * let it decide whether to return EFAULT or not.
3098 msg
.msg_name
= (void *)-1;
3103 msg
.msg_name
= NULL
;
3104 msg
.msg_namelen
= 0;
3106 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3107 msg
.msg_control
= alloca(msg
.msg_controllen
);
3108 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3110 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3112 count
= tswapal(msgp
->msg_iovlen
);
3113 target_vec
= tswapal(msgp
->msg_iov
);
3115 if (count
> IOV_MAX
) {
3116 /* sendrcvmsg returns a different errno for this condition than
3117 * readv/writev, so we must catch it here before lock_iovec() does.
3119 ret
= -TARGET_EMSGSIZE
;
3123 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3124 target_vec
, count
, send
);
3126 ret
= -host_to_target_errno(errno
);
3129 msg
.msg_iovlen
= count
;
3133 if (fd_trans_target_to_host_data(fd
)) {
3136 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3137 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3138 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3139 msg
.msg_iov
->iov_len
);
3141 msg
.msg_iov
->iov_base
= host_msg
;
3142 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3146 ret
= target_to_host_cmsg(&msg
, msgp
);
3148 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3152 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3153 if (!is_error(ret
)) {
3155 if (fd_trans_host_to_target_data(fd
)) {
3156 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3157 MIN(msg
.msg_iov
->iov_len
, len
));
3159 ret
= host_to_target_cmsg(msgp
, &msg
);
3161 if (!is_error(ret
)) {
3162 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3163 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3164 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3165 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3166 msg
.msg_name
, msg
.msg_namelen
);
3178 unlock_iovec(vec
, target_vec
, count
, !send
);
3183 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3184 int flags
, int send
)
3187 struct target_msghdr
*msgp
;
3189 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3193 return -TARGET_EFAULT
;
3195 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3196 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3200 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3201 * so it might not have this *mmsg-specific flag either.
3203 #ifndef MSG_WAITFORONE
3204 #define MSG_WAITFORONE 0x10000
3207 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3208 unsigned int vlen
, unsigned int flags
,
3211 struct target_mmsghdr
*mmsgp
;
3215 if (vlen
> UIO_MAXIOV
) {
3219 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3221 return -TARGET_EFAULT
;
3224 for (i
= 0; i
< vlen
; i
++) {
3225 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3226 if (is_error(ret
)) {
3229 mmsgp
[i
].msg_len
= tswap32(ret
);
3230 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3231 if (flags
& MSG_WAITFORONE
) {
3232 flags
|= MSG_DONTWAIT
;
3236 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3238 /* Return number of datagrams sent if we sent any at all;
3239 * otherwise return the error.
3247 /* do_accept4() Must return target values and target errnos. */
3248 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3249 abi_ulong target_addrlen_addr
, int flags
)
3251 socklen_t addrlen
, ret_addrlen
;
3256 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3258 if (target_addr
== 0) {
3259 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3262 /* linux returns EINVAL if addrlen pointer is invalid */
3263 if (get_user_u32(addrlen
, target_addrlen_addr
))
3264 return -TARGET_EINVAL
;
3266 if ((int)addrlen
< 0) {
3267 return -TARGET_EINVAL
;
3270 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3271 return -TARGET_EINVAL
;
3273 addr
= alloca(addrlen
);
3275 ret_addrlen
= addrlen
;
3276 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3277 if (!is_error(ret
)) {
3278 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3279 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3280 ret
= -TARGET_EFAULT
;
3286 /* do_getpeername() Must return target values and target errnos. */
3287 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3288 abi_ulong target_addrlen_addr
)
3290 socklen_t addrlen
, ret_addrlen
;
3294 if (get_user_u32(addrlen
, target_addrlen_addr
))
3295 return -TARGET_EFAULT
;
3297 if ((int)addrlen
< 0) {
3298 return -TARGET_EINVAL
;
3301 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3302 return -TARGET_EFAULT
;
3304 addr
= alloca(addrlen
);
3306 ret_addrlen
= addrlen
;
3307 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3308 if (!is_error(ret
)) {
3309 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3310 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3311 ret
= -TARGET_EFAULT
;
3317 /* do_getsockname() Must return target values and target errnos. */
3318 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3319 abi_ulong target_addrlen_addr
)
3321 socklen_t addrlen
, ret_addrlen
;
3325 if (get_user_u32(addrlen
, target_addrlen_addr
))
3326 return -TARGET_EFAULT
;
3328 if ((int)addrlen
< 0) {
3329 return -TARGET_EINVAL
;
3332 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3333 return -TARGET_EFAULT
;
3335 addr
= alloca(addrlen
);
3337 ret_addrlen
= addrlen
;
3338 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3339 if (!is_error(ret
)) {
3340 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3341 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3342 ret
= -TARGET_EFAULT
;
3348 /* do_socketpair() Must return target values and target errnos. */
3349 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3350 abi_ulong target_tab_addr
)
3355 target_to_host_sock_type(&type
);
3357 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3358 if (!is_error(ret
)) {
3359 if (put_user_s32(tab
[0], target_tab_addr
)
3360 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3361 ret
= -TARGET_EFAULT
;
3366 /* do_sendto() Must return target values and target errnos. */
3367 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3368 abi_ulong target_addr
, socklen_t addrlen
)
3372 void *copy_msg
= NULL
;
3375 if ((int)addrlen
< 0) {
3376 return -TARGET_EINVAL
;
3379 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3381 return -TARGET_EFAULT
;
3382 if (fd_trans_target_to_host_data(fd
)) {
3383 copy_msg
= host_msg
;
3384 host_msg
= g_malloc(len
);
3385 memcpy(host_msg
, copy_msg
, len
);
3386 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3392 addr
= alloca(addrlen
+1);
3393 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3397 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3399 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3404 host_msg
= copy_msg
;
3406 unlock_user(host_msg
, msg
, 0);
3410 /* do_recvfrom() Must return target values and target errnos. */
3411 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3412 abi_ulong target_addr
,
3413 abi_ulong target_addrlen
)
3415 socklen_t addrlen
, ret_addrlen
;
3420 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3422 return -TARGET_EFAULT
;
3424 if (get_user_u32(addrlen
, target_addrlen
)) {
3425 ret
= -TARGET_EFAULT
;
3428 if ((int)addrlen
< 0) {
3429 ret
= -TARGET_EINVAL
;
3432 addr
= alloca(addrlen
);
3433 ret_addrlen
= addrlen
;
3434 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3435 addr
, &ret_addrlen
));
3437 addr
= NULL
; /* To keep compiler quiet. */
3438 addrlen
= 0; /* To keep compiler quiet. */
3439 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3441 if (!is_error(ret
)) {
3442 if (fd_trans_host_to_target_data(fd
)) {
3444 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3445 if (is_error(trans
)) {
3451 host_to_target_sockaddr(target_addr
, addr
,
3452 MIN(addrlen
, ret_addrlen
));
3453 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3454 ret
= -TARGET_EFAULT
;
3458 unlock_user(host_msg
, msg
, len
);
3461 unlock_user(host_msg
, msg
, 0);
3466 #ifdef TARGET_NR_socketcall
3467 /* do_socketcall() must return target values and target errnos. */
3468 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3470 static const unsigned nargs
[] = { /* number of arguments per operation */
3471 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3472 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3473 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3474 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3475 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3476 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3477 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3478 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3479 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3480 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3481 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3482 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3483 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3484 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3485 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3486 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3487 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3488 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3489 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3490 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3492 abi_long a
[6]; /* max 6 args */
3495 /* check the range of the first argument num */
3496 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3497 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3498 return -TARGET_EINVAL
;
3500 /* ensure we have space for args */
3501 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3502 return -TARGET_EINVAL
;
3504 /* collect the arguments in a[] according to nargs[] */
3505 for (i
= 0; i
< nargs
[num
]; ++i
) {
3506 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3507 return -TARGET_EFAULT
;
3510 /* now when we have the args, invoke the appropriate underlying function */
3512 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3513 return do_socket(a
[0], a
[1], a
[2]);
3514 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3515 return do_bind(a
[0], a
[1], a
[2]);
3516 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3517 return do_connect(a
[0], a
[1], a
[2]);
3518 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3519 return get_errno(listen(a
[0], a
[1]));
3520 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3521 return do_accept4(a
[0], a
[1], a
[2], 0);
3522 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3523 return do_getsockname(a
[0], a
[1], a
[2]);
3524 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3525 return do_getpeername(a
[0], a
[1], a
[2]);
3526 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3527 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3528 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3529 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3530 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3531 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3532 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3533 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3534 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3535 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3536 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3537 return get_errno(shutdown(a
[0], a
[1]));
3538 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3539 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3540 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3541 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3542 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3543 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3544 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3545 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3546 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3547 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3548 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3549 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3550 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3551 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3553 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3554 return -TARGET_EINVAL
;
3559 #define N_SHM_REGIONS 32
3561 static struct shm_region
{
3565 } shm_regions
[N_SHM_REGIONS
];
3567 #ifndef TARGET_SEMID64_DS
3568 /* asm-generic version of this struct */
3569 struct target_semid64_ds
3571 struct target_ipc_perm sem_perm
;
3572 abi_ulong sem_otime
;
3573 #if TARGET_ABI_BITS == 32
3574 abi_ulong __unused1
;
3576 abi_ulong sem_ctime
;
3577 #if TARGET_ABI_BITS == 32
3578 abi_ulong __unused2
;
3580 abi_ulong sem_nsems
;
3581 abi_ulong __unused3
;
3582 abi_ulong __unused4
;
3586 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3587 abi_ulong target_addr
)
3589 struct target_ipc_perm
*target_ip
;
3590 struct target_semid64_ds
*target_sd
;
3592 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3593 return -TARGET_EFAULT
;
3594 target_ip
= &(target_sd
->sem_perm
);
3595 host_ip
->__key
= tswap32(target_ip
->__key
);
3596 host_ip
->uid
= tswap32(target_ip
->uid
);
3597 host_ip
->gid
= tswap32(target_ip
->gid
);
3598 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3599 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3600 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3601 host_ip
->mode
= tswap32(target_ip
->mode
);
3603 host_ip
->mode
= tswap16(target_ip
->mode
);
3605 #if defined(TARGET_PPC)
3606 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3608 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3610 unlock_user_struct(target_sd
, target_addr
, 0);
3614 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3615 struct ipc_perm
*host_ip
)
3617 struct target_ipc_perm
*target_ip
;
3618 struct target_semid64_ds
*target_sd
;
3620 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3621 return -TARGET_EFAULT
;
3622 target_ip
= &(target_sd
->sem_perm
);
3623 target_ip
->__key
= tswap32(host_ip
->__key
);
3624 target_ip
->uid
= tswap32(host_ip
->uid
);
3625 target_ip
->gid
= tswap32(host_ip
->gid
);
3626 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3627 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3628 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3629 target_ip
->mode
= tswap32(host_ip
->mode
);
3631 target_ip
->mode
= tswap16(host_ip
->mode
);
3633 #if defined(TARGET_PPC)
3634 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3636 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3638 unlock_user_struct(target_sd
, target_addr
, 1);
3642 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3643 abi_ulong target_addr
)
3645 struct target_semid64_ds
*target_sd
;
3647 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3648 return -TARGET_EFAULT
;
3649 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3650 return -TARGET_EFAULT
;
3651 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3652 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3653 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3654 unlock_user_struct(target_sd
, target_addr
, 0);
3658 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3659 struct semid_ds
*host_sd
)
3661 struct target_semid64_ds
*target_sd
;
3663 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3664 return -TARGET_EFAULT
;
3665 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3666 return -TARGET_EFAULT
;
3667 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3668 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3669 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3670 unlock_user_struct(target_sd
, target_addr
, 1);
3674 struct target_seminfo
{
3687 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3688 struct seminfo
*host_seminfo
)
3690 struct target_seminfo
*target_seminfo
;
3691 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3692 return -TARGET_EFAULT
;
3693 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3694 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3695 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3696 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3697 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3698 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3699 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3700 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3701 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3702 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3703 unlock_user_struct(target_seminfo
, target_addr
, 1);
3709 struct semid_ds
*buf
;
3710 unsigned short *array
;
3711 struct seminfo
*__buf
;
3714 union target_semun
{
3721 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3722 abi_ulong target_addr
)
3725 unsigned short *array
;
3727 struct semid_ds semid_ds
;
3730 semun
.buf
= &semid_ds
;
3732 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3734 return get_errno(ret
);
3736 nsems
= semid_ds
.sem_nsems
;
3738 *host_array
= g_try_new(unsigned short, nsems
);
3740 return -TARGET_ENOMEM
;
3742 array
= lock_user(VERIFY_READ
, target_addr
,
3743 nsems
*sizeof(unsigned short), 1);
3745 g_free(*host_array
);
3746 return -TARGET_EFAULT
;
3749 for(i
=0; i
<nsems
; i
++) {
3750 __get_user((*host_array
)[i
], &array
[i
]);
3752 unlock_user(array
, target_addr
, 0);
3757 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3758 unsigned short **host_array
)
3761 unsigned short *array
;
3763 struct semid_ds semid_ds
;
3766 semun
.buf
= &semid_ds
;
3768 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3770 return get_errno(ret
);
3772 nsems
= semid_ds
.sem_nsems
;
3774 array
= lock_user(VERIFY_WRITE
, target_addr
,
3775 nsems
*sizeof(unsigned short), 0);
3777 return -TARGET_EFAULT
;
3779 for(i
=0; i
<nsems
; i
++) {
3780 __put_user((*host_array
)[i
], &array
[i
]);
3782 g_free(*host_array
);
3783 unlock_user(array
, target_addr
, 1);
3788 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3789 abi_ulong target_arg
)
3791 union target_semun target_su
= { .buf
= target_arg
};
3793 struct semid_ds dsarg
;
3794 unsigned short *array
= NULL
;
3795 struct seminfo seminfo
;
3796 abi_long ret
= -TARGET_EINVAL
;
3803 /* In 64 bit cross-endian situations, we will erroneously pick up
3804 * the wrong half of the union for the "val" element. To rectify
3805 * this, the entire 8-byte structure is byteswapped, followed by
3806 * a swap of the 4 byte val field. In other cases, the data is
3807 * already in proper host byte order. */
3808 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3809 target_su
.buf
= tswapal(target_su
.buf
);
3810 arg
.val
= tswap32(target_su
.val
);
3812 arg
.val
= target_su
.val
;
3814 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3818 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3822 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3823 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3830 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3834 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3835 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3841 arg
.__buf
= &seminfo
;
3842 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3843 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3851 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3858 struct target_sembuf
{
3859 unsigned short sem_num
;
3864 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3865 abi_ulong target_addr
,
3868 struct target_sembuf
*target_sembuf
;
3871 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3872 nsops
*sizeof(struct target_sembuf
), 1);
3874 return -TARGET_EFAULT
;
3876 for(i
=0; i
<nsops
; i
++) {
3877 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3878 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3879 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3882 unlock_user(target_sembuf
, target_addr
, 0);
3887 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3888 defined(TARGET_NR_semtimedop)
3891 * This macro is required to handle the s390 variants, which passes the
3892 * arguments in a different order than default.
3895 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3896 (__nsops), (__timeout), (__sops)
3898 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3899 (__nsops), 0, (__sops), (__timeout)
3902 static inline abi_long
do_semtimedop(int semid
,
3907 struct sembuf sops
[nsops
];
3908 struct timespec ts
, *pts
= NULL
;
3913 if (target_to_host_timespec(pts
, timeout
)) {
3914 return -TARGET_EFAULT
;
3918 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3919 return -TARGET_EFAULT
;
3921 ret
= -TARGET_ENOSYS
;
3922 #ifdef __NR_semtimedop
3923 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
3926 if (ret
== -TARGET_ENOSYS
) {
3927 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
3928 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
3935 struct target_msqid_ds
3937 struct target_ipc_perm msg_perm
;
3938 abi_ulong msg_stime
;
3939 #if TARGET_ABI_BITS == 32
3940 abi_ulong __unused1
;
3942 abi_ulong msg_rtime
;
3943 #if TARGET_ABI_BITS == 32
3944 abi_ulong __unused2
;
3946 abi_ulong msg_ctime
;
3947 #if TARGET_ABI_BITS == 32
3948 abi_ulong __unused3
;
3950 abi_ulong __msg_cbytes
;
3952 abi_ulong msg_qbytes
;
3953 abi_ulong msg_lspid
;
3954 abi_ulong msg_lrpid
;
3955 abi_ulong __unused4
;
3956 abi_ulong __unused5
;
3959 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3960 abi_ulong target_addr
)
3962 struct target_msqid_ds
*target_md
;
3964 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3965 return -TARGET_EFAULT
;
3966 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3967 return -TARGET_EFAULT
;
3968 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3969 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3970 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3971 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3972 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3973 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3974 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3975 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3976 unlock_user_struct(target_md
, target_addr
, 0);
3980 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3981 struct msqid_ds
*host_md
)
3983 struct target_msqid_ds
*target_md
;
3985 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3986 return -TARGET_EFAULT
;
3987 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3988 return -TARGET_EFAULT
;
3989 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3990 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3991 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3992 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3993 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3994 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3995 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3996 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3997 unlock_user_struct(target_md
, target_addr
, 1);
4001 struct target_msginfo
{
4009 unsigned short int msgseg
;
4012 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4013 struct msginfo
*host_msginfo
)
4015 struct target_msginfo
*target_msginfo
;
4016 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4017 return -TARGET_EFAULT
;
4018 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4019 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4020 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4021 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4022 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4023 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4024 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4025 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4026 unlock_user_struct(target_msginfo
, target_addr
, 1);
4030 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4032 struct msqid_ds dsarg
;
4033 struct msginfo msginfo
;
4034 abi_long ret
= -TARGET_EINVAL
;
4042 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4043 return -TARGET_EFAULT
;
4044 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4045 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4046 return -TARGET_EFAULT
;
4049 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4053 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4054 if (host_to_target_msginfo(ptr
, &msginfo
))
4055 return -TARGET_EFAULT
;
4062 struct target_msgbuf
{
4067 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4068 ssize_t msgsz
, int msgflg
)
4070 struct target_msgbuf
*target_mb
;
4071 struct msgbuf
*host_mb
;
4075 return -TARGET_EINVAL
;
4078 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4079 return -TARGET_EFAULT
;
4080 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4082 unlock_user_struct(target_mb
, msgp
, 0);
4083 return -TARGET_ENOMEM
;
4085 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4086 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4087 ret
= -TARGET_ENOSYS
;
4089 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4092 if (ret
== -TARGET_ENOSYS
) {
4094 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4097 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4103 unlock_user_struct(target_mb
, msgp
, 0);
4109 #if defined(__sparc__)
4110 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4111 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4112 #elif defined(__s390x__)
4113 /* The s390 sys_ipc variant has only five parameters. */
4114 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4115 ((long int[]){(long int)__msgp, __msgtyp})
4117 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4118 ((long int[]){(long int)__msgp, __msgtyp}), 0
4122 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4123 ssize_t msgsz
, abi_long msgtyp
,
4126 struct target_msgbuf
*target_mb
;
4128 struct msgbuf
*host_mb
;
4132 return -TARGET_EINVAL
;
4135 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4136 return -TARGET_EFAULT
;
4138 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4140 ret
= -TARGET_ENOMEM
;
4143 ret
= -TARGET_ENOSYS
;
4145 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4148 if (ret
== -TARGET_ENOSYS
) {
4149 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4150 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4155 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4156 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4157 if (!target_mtext
) {
4158 ret
= -TARGET_EFAULT
;
4161 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4162 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4165 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4169 unlock_user_struct(target_mb
, msgp
, 1);
4174 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4175 abi_ulong target_addr
)
4177 struct target_shmid_ds
*target_sd
;
4179 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4180 return -TARGET_EFAULT
;
4181 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4182 return -TARGET_EFAULT
;
4183 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4184 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4185 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4186 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4187 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4188 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4189 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4190 unlock_user_struct(target_sd
, target_addr
, 0);
4194 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4195 struct shmid_ds
*host_sd
)
4197 struct target_shmid_ds
*target_sd
;
4199 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4200 return -TARGET_EFAULT
;
4201 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4202 return -TARGET_EFAULT
;
4203 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4204 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4205 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4206 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4207 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4208 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4209 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4210 unlock_user_struct(target_sd
, target_addr
, 1);
4214 struct target_shminfo
{
4222 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4223 struct shminfo
*host_shminfo
)
4225 struct target_shminfo
*target_shminfo
;
4226 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4227 return -TARGET_EFAULT
;
4228 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4229 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4230 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4231 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4232 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4233 unlock_user_struct(target_shminfo
, target_addr
, 1);
4237 struct target_shm_info
{
4242 abi_ulong swap_attempts
;
4243 abi_ulong swap_successes
;
4246 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4247 struct shm_info
*host_shm_info
)
4249 struct target_shm_info
*target_shm_info
;
4250 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4251 return -TARGET_EFAULT
;
4252 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4253 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4254 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4255 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4256 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4257 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4258 unlock_user_struct(target_shm_info
, target_addr
, 1);
4262 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4264 struct shmid_ds dsarg
;
4265 struct shminfo shminfo
;
4266 struct shm_info shm_info
;
4267 abi_long ret
= -TARGET_EINVAL
;
4275 if (target_to_host_shmid_ds(&dsarg
, buf
))
4276 return -TARGET_EFAULT
;
4277 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4278 if (host_to_target_shmid_ds(buf
, &dsarg
))
4279 return -TARGET_EFAULT
;
4282 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4283 if (host_to_target_shminfo(buf
, &shminfo
))
4284 return -TARGET_EFAULT
;
4287 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4288 if (host_to_target_shm_info(buf
, &shm_info
))
4289 return -TARGET_EFAULT
;
4294 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4301 #ifndef TARGET_FORCE_SHMLBA
4302 /* For most architectures, SHMLBA is the same as the page size;
4303 * some architectures have larger values, in which case they should
4304 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4305 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4306 * and defining its own value for SHMLBA.
4308 * The kernel also permits SHMLBA to be set by the architecture to a
4309 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4310 * this means that addresses are rounded to the large size if
4311 * SHM_RND is set but addresses not aligned to that size are not rejected
4312 * as long as they are at least page-aligned. Since the only architecture
4313 * which uses this is ia64 this code doesn't provide for that oddity.
4315 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4317 return TARGET_PAGE_SIZE
;
4321 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4322 int shmid
, abi_ulong shmaddr
, int shmflg
)
4326 struct shmid_ds shm_info
;
4330 /* find out the length of the shared memory segment */
4331 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4332 if (is_error(ret
)) {
4333 /* can't get length, bail out */
4337 shmlba
= target_shmlba(cpu_env
);
4339 if (shmaddr
& (shmlba
- 1)) {
4340 if (shmflg
& SHM_RND
) {
4341 shmaddr
&= ~(shmlba
- 1);
4343 return -TARGET_EINVAL
;
4346 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4347 return -TARGET_EINVAL
;
4353 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4355 abi_ulong mmap_start
;
4357 /* In order to use the host shmat, we need to honor host SHMLBA. */
4358 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4360 if (mmap_start
== -1) {
4362 host_raddr
= (void *)-1;
4364 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4367 if (host_raddr
== (void *)-1) {
4369 return get_errno((long)host_raddr
);
4371 raddr
=h2g((unsigned long)host_raddr
);
4373 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4374 PAGE_VALID
| PAGE_READ
|
4375 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4377 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4378 if (!shm_regions
[i
].in_use
) {
4379 shm_regions
[i
].in_use
= true;
4380 shm_regions
[i
].start
= raddr
;
4381 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4391 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4398 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4399 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4400 shm_regions
[i
].in_use
= false;
4401 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4405 rv
= get_errno(shmdt(g2h(shmaddr
)));
4412 #ifdef TARGET_NR_ipc
4413 /* ??? This only works with linear mappings. */
4414 /* do_ipc() must return target values and target errnos. */
4415 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4416 unsigned int call
, abi_long first
,
4417 abi_long second
, abi_long third
,
4418 abi_long ptr
, abi_long fifth
)
4423 version
= call
>> 16;
4428 ret
= do_semtimedop(first
, ptr
, second
, 0);
4430 case IPCOP_semtimedop
:
4432 * The s390 sys_ipc variant has only five parameters instead of six
4433 * (as for default variant) and the only difference is the handling of
4434 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4435 * to a struct timespec where the generic variant uses fifth parameter.
4437 #if defined(TARGET_S390X)
4438 ret
= do_semtimedop(first
, ptr
, second
, third
);
4440 ret
= do_semtimedop(first
, ptr
, second
, fifth
);
4445 ret
= get_errno(semget(first
, second
, third
));
4448 case IPCOP_semctl
: {
4449 /* The semun argument to semctl is passed by value, so dereference the
4452 get_user_ual(atptr
, ptr
);
4453 ret
= do_semctl(first
, second
, third
, atptr
);
4458 ret
= get_errno(msgget(first
, second
));
4462 ret
= do_msgsnd(first
, ptr
, second
, third
);
4466 ret
= do_msgctl(first
, second
, ptr
);
4473 struct target_ipc_kludge
{
4478 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4479 ret
= -TARGET_EFAULT
;
4483 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4485 unlock_user_struct(tmp
, ptr
, 0);
4489 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4498 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4499 if (is_error(raddr
))
4500 return get_errno(raddr
);
4501 if (put_user_ual(raddr
, third
))
4502 return -TARGET_EFAULT
;
4506 ret
= -TARGET_EINVAL
;
4511 ret
= do_shmdt(ptr
);
4515 /* IPC_* flag values are the same on all linux platforms */
4516 ret
= get_errno(shmget(first
, second
, third
));
4519 /* IPC_* and SHM_* command values are the same on all linux platforms */
4521 ret
= do_shmctl(first
, second
, ptr
);
4524 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4526 ret
= -TARGET_ENOSYS
;
4533 /* kernel structure types definitions */
4535 #define STRUCT(name, ...) STRUCT_ ## name,
4536 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4538 #include "syscall_types.h"
4542 #undef STRUCT_SPECIAL
4544 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4545 #define STRUCT_SPECIAL(name)
4546 #include "syscall_types.h"
4548 #undef STRUCT_SPECIAL
4550 #define MAX_STRUCT_SIZE 4096
4552 #ifdef CONFIG_FIEMAP
4553 /* So fiemap access checks don't overflow on 32 bit systems.
4554 * This is very slightly smaller than the limit imposed by
4555 * the underlying kernel.
4557 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4558 / sizeof(struct fiemap_extent))
4560 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4561 int fd
, int cmd
, abi_long arg
)
4563 /* The parameter for this ioctl is a struct fiemap followed
4564 * by an array of struct fiemap_extent whose size is set
4565 * in fiemap->fm_extent_count. The array is filled in by the
4568 int target_size_in
, target_size_out
;
4570 const argtype
*arg_type
= ie
->arg_type
;
4571 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4574 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4578 assert(arg_type
[0] == TYPE_PTR
);
4579 assert(ie
->access
== IOC_RW
);
4581 target_size_in
= thunk_type_size(arg_type
, 0);
4582 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4584 return -TARGET_EFAULT
;
4586 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4587 unlock_user(argptr
, arg
, 0);
4588 fm
= (struct fiemap
*)buf_temp
;
4589 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4590 return -TARGET_EINVAL
;
4593 outbufsz
= sizeof (*fm
) +
4594 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4596 if (outbufsz
> MAX_STRUCT_SIZE
) {
4597 /* We can't fit all the extents into the fixed size buffer.
4598 * Allocate one that is large enough and use it instead.
4600 fm
= g_try_malloc(outbufsz
);
4602 return -TARGET_ENOMEM
;
4604 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4607 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4608 if (!is_error(ret
)) {
4609 target_size_out
= target_size_in
;
4610 /* An extent_count of 0 means we were only counting the extents
4611 * so there are no structs to copy
4613 if (fm
->fm_extent_count
!= 0) {
4614 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4616 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4618 ret
= -TARGET_EFAULT
;
4620 /* Convert the struct fiemap */
4621 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4622 if (fm
->fm_extent_count
!= 0) {
4623 p
= argptr
+ target_size_in
;
4624 /* ...and then all the struct fiemap_extents */
4625 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4626 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4631 unlock_user(argptr
, arg
, target_size_out
);
4641 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4642 int fd
, int cmd
, abi_long arg
)
4644 const argtype
*arg_type
= ie
->arg_type
;
4648 struct ifconf
*host_ifconf
;
4650 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4651 int target_ifreq_size
;
4656 abi_long target_ifc_buf
;
4660 assert(arg_type
[0] == TYPE_PTR
);
4661 assert(ie
->access
== IOC_RW
);
4664 target_size
= thunk_type_size(arg_type
, 0);
4666 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4668 return -TARGET_EFAULT
;
4669 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4670 unlock_user(argptr
, arg
, 0);
4672 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4673 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4674 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4676 if (target_ifc_buf
!= 0) {
4677 target_ifc_len
= host_ifconf
->ifc_len
;
4678 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4679 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4681 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4682 if (outbufsz
> MAX_STRUCT_SIZE
) {
4684 * We can't fit all the extents into the fixed size buffer.
4685 * Allocate one that is large enough and use it instead.
4687 host_ifconf
= malloc(outbufsz
);
4689 return -TARGET_ENOMEM
;
4691 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4694 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4696 host_ifconf
->ifc_len
= host_ifc_len
;
4698 host_ifc_buf
= NULL
;
4700 host_ifconf
->ifc_buf
= host_ifc_buf
;
4702 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4703 if (!is_error(ret
)) {
4704 /* convert host ifc_len to target ifc_len */
4706 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4707 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4708 host_ifconf
->ifc_len
= target_ifc_len
;
4710 /* restore target ifc_buf */
4712 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4714 /* copy struct ifconf to target user */
4716 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4718 return -TARGET_EFAULT
;
4719 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4720 unlock_user(argptr
, arg
, target_size
);
4722 if (target_ifc_buf
!= 0) {
4723 /* copy ifreq[] to target user */
4724 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4725 for (i
= 0; i
< nb_ifreq
; i
++) {
4726 thunk_convert(argptr
+ i
* target_ifreq_size
,
4727 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4728 ifreq_arg_type
, THUNK_TARGET
);
4730 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4741 #if defined(CONFIG_USBFS)
4742 #if HOST_LONG_BITS > 64
4743 #error USBDEVFS thunks do not support >64 bit hosts yet.
4746 uint64_t target_urb_adr
;
4747 uint64_t target_buf_adr
;
4748 char *target_buf_ptr
;
4749 struct usbdevfs_urb host_urb
;
4752 static GHashTable
*usbdevfs_urb_hashtable(void)
4754 static GHashTable
*urb_hashtable
;
4756 if (!urb_hashtable
) {
4757 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4759 return urb_hashtable
;
4762 static void urb_hashtable_insert(struct live_urb
*urb
)
4764 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4765 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4768 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4770 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4771 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4774 static void urb_hashtable_remove(struct live_urb
*urb
)
4776 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4777 g_hash_table_remove(urb_hashtable
, urb
);
4781 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4782 int fd
, int cmd
, abi_long arg
)
4784 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4785 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4786 struct live_urb
*lurb
;
4790 uintptr_t target_urb_adr
;
4793 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4795 memset(buf_temp
, 0, sizeof(uint64_t));
4796 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4797 if (is_error(ret
)) {
4801 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4802 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4803 if (!lurb
->target_urb_adr
) {
4804 return -TARGET_EFAULT
;
4806 urb_hashtable_remove(lurb
);
4807 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4808 lurb
->host_urb
.buffer_length
);
4809 lurb
->target_buf_ptr
= NULL
;
4811 /* restore the guest buffer pointer */
4812 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4814 /* update the guest urb struct */
4815 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4818 return -TARGET_EFAULT
;
4820 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4821 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4823 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4824 /* write back the urb handle */
4825 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4828 return -TARGET_EFAULT
;
4831 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4832 target_urb_adr
= lurb
->target_urb_adr
;
4833 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
4834 unlock_user(argptr
, arg
, target_size
);
4841 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
4842 uint8_t *buf_temp
__attribute__((unused
)),
4843 int fd
, int cmd
, abi_long arg
)
4845 struct live_urb
*lurb
;
4847 /* map target address back to host URB with metadata. */
4848 lurb
= urb_hashtable_lookup(arg
);
4850 return -TARGET_EFAULT
;
4852 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4856 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4857 int fd
, int cmd
, abi_long arg
)
4859 const argtype
*arg_type
= ie
->arg_type
;
4864 struct live_urb
*lurb
;
4867 * each submitted URB needs to map to a unique ID for the
4868 * kernel, and that unique ID needs to be a pointer to
4869 * host memory. hence, we need to malloc for each URB.
4870 * isochronous transfers have a variable length struct.
4873 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
4875 /* construct host copy of urb and metadata */
4876 lurb
= g_try_malloc0(sizeof(struct live_urb
));
4878 return -TARGET_ENOMEM
;
4881 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4884 return -TARGET_EFAULT
;
4886 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
4887 unlock_user(argptr
, arg
, 0);
4889 lurb
->target_urb_adr
= arg
;
4890 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
4892 /* buffer space used depends on endpoint type so lock the entire buffer */
4893 /* control type urbs should check the buffer contents for true direction */
4894 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
4895 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
4896 lurb
->host_urb
.buffer_length
, 1);
4897 if (lurb
->target_buf_ptr
== NULL
) {
4899 return -TARGET_EFAULT
;
4902 /* update buffer pointer in host copy */
4903 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
4905 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4906 if (is_error(ret
)) {
4907 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
4910 urb_hashtable_insert(lurb
);
4915 #endif /* CONFIG_USBFS */
4917 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4918 int cmd
, abi_long arg
)
4921 struct dm_ioctl
*host_dm
;
4922 abi_long guest_data
;
4923 uint32_t guest_data_size
;
4925 const argtype
*arg_type
= ie
->arg_type
;
4927 void *big_buf
= NULL
;
4931 target_size
= thunk_type_size(arg_type
, 0);
4932 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4934 ret
= -TARGET_EFAULT
;
4937 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4938 unlock_user(argptr
, arg
, 0);
4940 /* buf_temp is too small, so fetch things into a bigger buffer */
4941 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4942 memcpy(big_buf
, buf_temp
, target_size
);
4946 guest_data
= arg
+ host_dm
->data_start
;
4947 if ((guest_data
- arg
) < 0) {
4948 ret
= -TARGET_EINVAL
;
4951 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4952 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4954 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4956 ret
= -TARGET_EFAULT
;
4960 switch (ie
->host_cmd
) {
4962 case DM_LIST_DEVICES
:
4965 case DM_DEV_SUSPEND
:
4968 case DM_TABLE_STATUS
:
4969 case DM_TABLE_CLEAR
:
4971 case DM_LIST_VERSIONS
:
4975 case DM_DEV_SET_GEOMETRY
:
4976 /* data contains only strings */
4977 memcpy(host_data
, argptr
, guest_data_size
);
4980 memcpy(host_data
, argptr
, guest_data_size
);
4981 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4985 void *gspec
= argptr
;
4986 void *cur_data
= host_data
;
4987 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4988 int spec_size
= thunk_type_size(arg_type
, 0);
4991 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4992 struct dm_target_spec
*spec
= cur_data
;
4996 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4997 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4999 spec
->next
= sizeof(*spec
) + slen
;
5000 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5002 cur_data
+= spec
->next
;
5007 ret
= -TARGET_EINVAL
;
5008 unlock_user(argptr
, guest_data
, 0);
5011 unlock_user(argptr
, guest_data
, 0);
5013 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5014 if (!is_error(ret
)) {
5015 guest_data
= arg
+ host_dm
->data_start
;
5016 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5017 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5018 switch (ie
->host_cmd
) {
5023 case DM_DEV_SUSPEND
:
5026 case DM_TABLE_CLEAR
:
5028 case DM_DEV_SET_GEOMETRY
:
5029 /* no return data */
5031 case DM_LIST_DEVICES
:
5033 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5034 uint32_t remaining_data
= guest_data_size
;
5035 void *cur_data
= argptr
;
5036 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5037 int nl_size
= 12; /* can't use thunk_size due to alignment */
5040 uint32_t next
= nl
->next
;
5042 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5044 if (remaining_data
< nl
->next
) {
5045 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5048 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5049 strcpy(cur_data
+ nl_size
, nl
->name
);
5050 cur_data
+= nl
->next
;
5051 remaining_data
-= nl
->next
;
5055 nl
= (void*)nl
+ next
;
5060 case DM_TABLE_STATUS
:
5062 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5063 void *cur_data
= argptr
;
5064 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5065 int spec_size
= thunk_type_size(arg_type
, 0);
5068 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5069 uint32_t next
= spec
->next
;
5070 int slen
= strlen((char*)&spec
[1]) + 1;
5071 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5072 if (guest_data_size
< spec
->next
) {
5073 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5076 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5077 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5078 cur_data
= argptr
+ spec
->next
;
5079 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5085 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5086 int count
= *(uint32_t*)hdata
;
5087 uint64_t *hdev
= hdata
+ 8;
5088 uint64_t *gdev
= argptr
+ 8;
5091 *(uint32_t*)argptr
= tswap32(count
);
5092 for (i
= 0; i
< count
; i
++) {
5093 *gdev
= tswap64(*hdev
);
5099 case DM_LIST_VERSIONS
:
5101 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5102 uint32_t remaining_data
= guest_data_size
;
5103 void *cur_data
= argptr
;
5104 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5105 int vers_size
= thunk_type_size(arg_type
, 0);
5108 uint32_t next
= vers
->next
;
5110 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5112 if (remaining_data
< vers
->next
) {
5113 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5116 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5117 strcpy(cur_data
+ vers_size
, vers
->name
);
5118 cur_data
+= vers
->next
;
5119 remaining_data
-= vers
->next
;
5123 vers
= (void*)vers
+ next
;
5128 unlock_user(argptr
, guest_data
, 0);
5129 ret
= -TARGET_EINVAL
;
5132 unlock_user(argptr
, guest_data
, guest_data_size
);
5134 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5136 ret
= -TARGET_EFAULT
;
5139 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5140 unlock_user(argptr
, arg
, target_size
);
5147 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5148 int cmd
, abi_long arg
)
5152 const argtype
*arg_type
= ie
->arg_type
;
5153 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5156 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5157 struct blkpg_partition host_part
;
5159 /* Read and convert blkpg */
5161 target_size
= thunk_type_size(arg_type
, 0);
5162 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5164 ret
= -TARGET_EFAULT
;
5167 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5168 unlock_user(argptr
, arg
, 0);
5170 switch (host_blkpg
->op
) {
5171 case BLKPG_ADD_PARTITION
:
5172 case BLKPG_DEL_PARTITION
:
5173 /* payload is struct blkpg_partition */
5176 /* Unknown opcode */
5177 ret
= -TARGET_EINVAL
;
5181 /* Read and convert blkpg->data */
5182 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5183 target_size
= thunk_type_size(part_arg_type
, 0);
5184 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5186 ret
= -TARGET_EFAULT
;
5189 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5190 unlock_user(argptr
, arg
, 0);
5192 /* Swizzle the data pointer to our local copy and call! */
5193 host_blkpg
->data
= &host_part
;
5194 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5200 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5201 int fd
, int cmd
, abi_long arg
)
5203 const argtype
*arg_type
= ie
->arg_type
;
5204 const StructEntry
*se
;
5205 const argtype
*field_types
;
5206 const int *dst_offsets
, *src_offsets
;
5209 abi_ulong
*target_rt_dev_ptr
= NULL
;
5210 unsigned long *host_rt_dev_ptr
= NULL
;
5214 assert(ie
->access
== IOC_W
);
5215 assert(*arg_type
== TYPE_PTR
);
5217 assert(*arg_type
== TYPE_STRUCT
);
5218 target_size
= thunk_type_size(arg_type
, 0);
5219 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5221 return -TARGET_EFAULT
;
5224 assert(*arg_type
== (int)STRUCT_rtentry
);
5225 se
= struct_entries
+ *arg_type
++;
5226 assert(se
->convert
[0] == NULL
);
5227 /* convert struct here to be able to catch rt_dev string */
5228 field_types
= se
->field_types
;
5229 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5230 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5231 for (i
= 0; i
< se
->nb_fields
; i
++) {
5232 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5233 assert(*field_types
== TYPE_PTRVOID
);
5234 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5235 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5236 if (*target_rt_dev_ptr
!= 0) {
5237 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5238 tswapal(*target_rt_dev_ptr
));
5239 if (!*host_rt_dev_ptr
) {
5240 unlock_user(argptr
, arg
, 0);
5241 return -TARGET_EFAULT
;
5244 *host_rt_dev_ptr
= 0;
5249 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5250 argptr
+ src_offsets
[i
],
5251 field_types
, THUNK_HOST
);
5253 unlock_user(argptr
, arg
, 0);
5255 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5257 assert(host_rt_dev_ptr
!= NULL
);
5258 assert(target_rt_dev_ptr
!= NULL
);
5259 if (*host_rt_dev_ptr
!= 0) {
5260 unlock_user((void *)*host_rt_dev_ptr
,
5261 *target_rt_dev_ptr
, 0);
5266 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5267 int fd
, int cmd
, abi_long arg
)
5269 int sig
= target_to_host_signal(arg
);
5270 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5273 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5274 int fd
, int cmd
, abi_long arg
)
5279 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5280 if (is_error(ret
)) {
5284 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5285 if (copy_to_user_timeval(arg
, &tv
)) {
5286 return -TARGET_EFAULT
;
5289 if (copy_to_user_timeval64(arg
, &tv
)) {
5290 return -TARGET_EFAULT
;
5297 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5298 int fd
, int cmd
, abi_long arg
)
5303 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5304 if (is_error(ret
)) {
5308 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5309 if (host_to_target_timespec(arg
, &ts
)) {
5310 return -TARGET_EFAULT
;
5313 if (host_to_target_timespec64(arg
, &ts
)) {
5314 return -TARGET_EFAULT
;
5322 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5323 int fd
, int cmd
, abi_long arg
)
5325 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5326 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5332 static void unlock_drm_version(struct drm_version
*host_ver
,
5333 struct target_drm_version
*target_ver
,
5336 unlock_user(host_ver
->name
, target_ver
->name
,
5337 copy
? host_ver
->name_len
: 0);
5338 unlock_user(host_ver
->date
, target_ver
->date
,
5339 copy
? host_ver
->date_len
: 0);
5340 unlock_user(host_ver
->desc
, target_ver
->desc
,
5341 copy
? host_ver
->desc_len
: 0);
5344 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5345 struct target_drm_version
*target_ver
)
5347 memset(host_ver
, 0, sizeof(*host_ver
));
5349 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5350 if (host_ver
->name_len
) {
5351 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5352 target_ver
->name_len
, 0);
5353 if (!host_ver
->name
) {
5358 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5359 if (host_ver
->date_len
) {
5360 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5361 target_ver
->date_len
, 0);
5362 if (!host_ver
->date
) {
5367 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5368 if (host_ver
->desc_len
) {
5369 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5370 target_ver
->desc_len
, 0);
5371 if (!host_ver
->desc
) {
5378 unlock_drm_version(host_ver
, target_ver
, false);
5382 static inline void host_to_target_drmversion(
5383 struct target_drm_version
*target_ver
,
5384 struct drm_version
*host_ver
)
5386 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5387 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5388 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5389 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5390 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5391 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5392 unlock_drm_version(host_ver
, target_ver
, true);
5395 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5396 int fd
, int cmd
, abi_long arg
)
5398 struct drm_version
*ver
;
5399 struct target_drm_version
*target_ver
;
5402 switch (ie
->host_cmd
) {
5403 case DRM_IOCTL_VERSION
:
5404 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5405 return -TARGET_EFAULT
;
5407 ver
= (struct drm_version
*)buf_temp
;
5408 ret
= target_to_host_drmversion(ver
, target_ver
);
5409 if (!is_error(ret
)) {
5410 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5411 if (is_error(ret
)) {
5412 unlock_drm_version(ver
, target_ver
, false);
5414 host_to_target_drmversion(target_ver
, ver
);
5417 unlock_user_struct(target_ver
, arg
, 0);
5420 return -TARGET_ENOSYS
;
5425 IOCTLEntry ioctl_entries
[] = {
5426 #define IOCTL(cmd, access, ...) \
5427 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5428 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5429 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5430 #define IOCTL_IGNORE(cmd) \
5431 { TARGET_ ## cmd, 0, #cmd },
5436 /* ??? Implement proper locking for ioctls. */
5437 /* do_ioctl() Must return target values and target errnos. */
5438 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5440 const IOCTLEntry
*ie
;
5441 const argtype
*arg_type
;
5443 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5449 if (ie
->target_cmd
== 0) {
5451 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5452 return -TARGET_ENOSYS
;
5454 if (ie
->target_cmd
== cmd
)
5458 arg_type
= ie
->arg_type
;
5460 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5461 } else if (!ie
->host_cmd
) {
5462 /* Some architectures define BSD ioctls in their headers
5463 that are not implemented in Linux. */
5464 return -TARGET_ENOSYS
;
5467 switch(arg_type
[0]) {
5470 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5476 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5480 target_size
= thunk_type_size(arg_type
, 0);
5481 switch(ie
->access
) {
5483 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5484 if (!is_error(ret
)) {
5485 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5487 return -TARGET_EFAULT
;
5488 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5489 unlock_user(argptr
, arg
, target_size
);
5493 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5495 return -TARGET_EFAULT
;
5496 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5497 unlock_user(argptr
, arg
, 0);
5498 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5502 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5504 return -TARGET_EFAULT
;
5505 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5506 unlock_user(argptr
, arg
, 0);
5507 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5508 if (!is_error(ret
)) {
5509 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5511 return -TARGET_EFAULT
;
5512 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5513 unlock_user(argptr
, arg
, target_size
);
5519 qemu_log_mask(LOG_UNIMP
,
5520 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5521 (long)cmd
, arg_type
[0]);
5522 ret
= -TARGET_ENOSYS
;
5528 static const bitmask_transtbl iflag_tbl
[] = {
5529 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5530 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5531 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5532 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5533 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5534 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5535 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5536 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5537 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5538 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5539 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5540 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5541 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5542 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5546 static const bitmask_transtbl oflag_tbl
[] = {
5547 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5548 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5549 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5550 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5551 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5552 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5553 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5554 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5555 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5556 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5557 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5558 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5559 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5560 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5561 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5562 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5563 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5564 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5565 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5566 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5567 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5568 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5569 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5570 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5574 static const bitmask_transtbl cflag_tbl
[] = {
5575 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5576 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5577 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5578 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5579 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5580 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5581 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5582 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5583 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5584 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5585 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5586 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5587 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5588 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5589 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5590 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5591 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5592 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5593 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5594 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5595 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5596 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5597 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5598 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5599 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5600 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5601 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5602 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5603 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5604 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5605 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5609 static const bitmask_transtbl lflag_tbl
[] = {
5610 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5611 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5612 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5613 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5614 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5615 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5616 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5617 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5618 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5619 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5620 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5621 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5622 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5623 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5624 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5628 static void target_to_host_termios (void *dst
, const void *src
)
5630 struct host_termios
*host
= dst
;
5631 const struct target_termios
*target
= src
;
5634 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5636 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5638 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5640 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5641 host
->c_line
= target
->c_line
;
5643 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5644 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5645 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5646 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5647 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5648 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5649 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5650 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5651 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5652 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5653 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5654 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5655 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5656 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5657 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5658 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5659 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5660 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5663 static void host_to_target_termios (void *dst
, const void *src
)
5665 struct target_termios
*target
= dst
;
5666 const struct host_termios
*host
= src
;
5669 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5671 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5673 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5675 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5676 target
->c_line
= host
->c_line
;
5678 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5679 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5680 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5681 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5682 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5683 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5684 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5685 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5686 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5687 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5688 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5689 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5690 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5691 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5692 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5693 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5694 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5695 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5698 static const StructEntry struct_termios_def
= {
5699 .convert
= { host_to_target_termios
, target_to_host_termios
},
5700 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5701 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5704 static bitmask_transtbl mmap_flags_tbl
[] = {
5705 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5706 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5707 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5708 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5709 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5710 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5711 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5712 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5713 MAP_DENYWRITE
, MAP_DENYWRITE
},
5714 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5715 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5716 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5717 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5718 MAP_NORESERVE
, MAP_NORESERVE
},
5719 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5720 /* MAP_STACK had been ignored by the kernel for quite some time.
5721 Recognize it for the target insofar as we do not want to pass
5722 it through to the host. */
5723 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5728 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5729 * TARGET_I386 is defined if TARGET_X86_64 is defined
5731 #if defined(TARGET_I386)
5733 /* NOTE: there is really one LDT for all the threads */
5734 static uint8_t *ldt_table
;
5736 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5743 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5744 if (size
> bytecount
)
5746 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5748 return -TARGET_EFAULT
;
5749 /* ??? Should this by byteswapped? */
5750 memcpy(p
, ldt_table
, size
);
5751 unlock_user(p
, ptr
, size
);
5755 /* XXX: add locking support */
5756 static abi_long
write_ldt(CPUX86State
*env
,
5757 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5759 struct target_modify_ldt_ldt_s ldt_info
;
5760 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5761 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5762 int seg_not_present
, useable
, lm
;
5763 uint32_t *lp
, entry_1
, entry_2
;
5765 if (bytecount
!= sizeof(ldt_info
))
5766 return -TARGET_EINVAL
;
5767 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5768 return -TARGET_EFAULT
;
5769 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5770 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5771 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5772 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5773 unlock_user_struct(target_ldt_info
, ptr
, 0);
5775 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5776 return -TARGET_EINVAL
;
5777 seg_32bit
= ldt_info
.flags
& 1;
5778 contents
= (ldt_info
.flags
>> 1) & 3;
5779 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5780 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5781 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5782 useable
= (ldt_info
.flags
>> 6) & 1;
5786 lm
= (ldt_info
.flags
>> 7) & 1;
5788 if (contents
== 3) {
5790 return -TARGET_EINVAL
;
5791 if (seg_not_present
== 0)
5792 return -TARGET_EINVAL
;
5794 /* allocate the LDT */
5796 env
->ldt
.base
= target_mmap(0,
5797 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5798 PROT_READ
|PROT_WRITE
,
5799 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5800 if (env
->ldt
.base
== -1)
5801 return -TARGET_ENOMEM
;
5802 memset(g2h(env
->ldt
.base
), 0,
5803 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5804 env
->ldt
.limit
= 0xffff;
5805 ldt_table
= g2h(env
->ldt
.base
);
5808 /* NOTE: same code as Linux kernel */
5809 /* Allow LDTs to be cleared by the user. */
5810 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5813 read_exec_only
== 1 &&
5815 limit_in_pages
== 0 &&
5816 seg_not_present
== 1 &&
5824 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5825 (ldt_info
.limit
& 0x0ffff);
5826 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5827 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5828 (ldt_info
.limit
& 0xf0000) |
5829 ((read_exec_only
^ 1) << 9) |
5831 ((seg_not_present
^ 1) << 15) |
5833 (limit_in_pages
<< 23) |
5837 entry_2
|= (useable
<< 20);
5839 /* Install the new entry ... */
5841 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5842 lp
[0] = tswap32(entry_1
);
5843 lp
[1] = tswap32(entry_2
);
5847 /* specific and weird i386 syscalls */
5848 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5849 unsigned long bytecount
)
5855 ret
= read_ldt(ptr
, bytecount
);
5858 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5861 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5864 ret
= -TARGET_ENOSYS
;
5870 #if defined(TARGET_ABI32)
5871 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5873 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5874 struct target_modify_ldt_ldt_s ldt_info
;
5875 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5876 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5877 int seg_not_present
, useable
, lm
;
5878 uint32_t *lp
, entry_1
, entry_2
;
5881 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5882 if (!target_ldt_info
)
5883 return -TARGET_EFAULT
;
5884 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5885 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5886 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5887 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5888 if (ldt_info
.entry_number
== -1) {
5889 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5890 if (gdt_table
[i
] == 0) {
5891 ldt_info
.entry_number
= i
;
5892 target_ldt_info
->entry_number
= tswap32(i
);
5897 unlock_user_struct(target_ldt_info
, ptr
, 1);
5899 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5900 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5901 return -TARGET_EINVAL
;
5902 seg_32bit
= ldt_info
.flags
& 1;
5903 contents
= (ldt_info
.flags
>> 1) & 3;
5904 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5905 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5906 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5907 useable
= (ldt_info
.flags
>> 6) & 1;
5911 lm
= (ldt_info
.flags
>> 7) & 1;
5914 if (contents
== 3) {
5915 if (seg_not_present
== 0)
5916 return -TARGET_EINVAL
;
5919 /* NOTE: same code as Linux kernel */
5920 /* Allow LDTs to be cleared by the user. */
5921 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5922 if ((contents
== 0 &&
5923 read_exec_only
== 1 &&
5925 limit_in_pages
== 0 &&
5926 seg_not_present
== 1 &&
5934 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5935 (ldt_info
.limit
& 0x0ffff);
5936 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5937 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5938 (ldt_info
.limit
& 0xf0000) |
5939 ((read_exec_only
^ 1) << 9) |
5941 ((seg_not_present
^ 1) << 15) |
5943 (limit_in_pages
<< 23) |
5948 /* Install the new entry ... */
5950 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5951 lp
[0] = tswap32(entry_1
);
5952 lp
[1] = tswap32(entry_2
);
5956 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5958 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5959 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5960 uint32_t base_addr
, limit
, flags
;
5961 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5962 int seg_not_present
, useable
, lm
;
5963 uint32_t *lp
, entry_1
, entry_2
;
5965 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5966 if (!target_ldt_info
)
5967 return -TARGET_EFAULT
;
5968 idx
= tswap32(target_ldt_info
->entry_number
);
5969 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5970 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5971 unlock_user_struct(target_ldt_info
, ptr
, 1);
5972 return -TARGET_EINVAL
;
5974 lp
= (uint32_t *)(gdt_table
+ idx
);
5975 entry_1
= tswap32(lp
[0]);
5976 entry_2
= tswap32(lp
[1]);
5978 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5979 contents
= (entry_2
>> 10) & 3;
5980 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5981 seg_32bit
= (entry_2
>> 22) & 1;
5982 limit_in_pages
= (entry_2
>> 23) & 1;
5983 useable
= (entry_2
>> 20) & 1;
5987 lm
= (entry_2
>> 21) & 1;
5989 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5990 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5991 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5992 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5993 base_addr
= (entry_1
>> 16) |
5994 (entry_2
& 0xff000000) |
5995 ((entry_2
& 0xff) << 16);
5996 target_ldt_info
->base_addr
= tswapal(base_addr
);
5997 target_ldt_info
->limit
= tswap32(limit
);
5998 target_ldt_info
->flags
= tswap32(flags
);
5999 unlock_user_struct(target_ldt_info
, ptr
, 1);
6003 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6005 return -TARGET_ENOSYS
;
6008 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6015 case TARGET_ARCH_SET_GS
:
6016 case TARGET_ARCH_SET_FS
:
6017 if (code
== TARGET_ARCH_SET_GS
)
6021 cpu_x86_load_seg(env
, idx
, 0);
6022 env
->segs
[idx
].base
= addr
;
6024 case TARGET_ARCH_GET_GS
:
6025 case TARGET_ARCH_GET_FS
:
6026 if (code
== TARGET_ARCH_GET_GS
)
6030 val
= env
->segs
[idx
].base
;
6031 if (put_user(val
, addr
, abi_ulong
))
6032 ret
= -TARGET_EFAULT
;
6035 ret
= -TARGET_EINVAL
;
6040 #endif /* defined(TARGET_ABI32 */
6042 #endif /* defined(TARGET_I386) */
6044 #define NEW_STACK_SIZE 0x40000
6047 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6050 pthread_mutex_t mutex
;
6051 pthread_cond_t cond
;
6054 abi_ulong child_tidptr
;
6055 abi_ulong parent_tidptr
;
6059 static void *clone_func(void *arg
)
6061 new_thread_info
*info
= arg
;
6066 rcu_register_thread();
6067 tcg_register_thread();
6071 ts
= (TaskState
*)cpu
->opaque
;
6072 info
->tid
= sys_gettid();
6074 if (info
->child_tidptr
)
6075 put_user_u32(info
->tid
, info
->child_tidptr
);
6076 if (info
->parent_tidptr
)
6077 put_user_u32(info
->tid
, info
->parent_tidptr
);
6078 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6079 /* Enable signals. */
6080 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6081 /* Signal to the parent that we're ready. */
6082 pthread_mutex_lock(&info
->mutex
);
6083 pthread_cond_broadcast(&info
->cond
);
6084 pthread_mutex_unlock(&info
->mutex
);
6085 /* Wait until the parent has finished initializing the tls state. */
6086 pthread_mutex_lock(&clone_lock
);
6087 pthread_mutex_unlock(&clone_lock
);
6093 /* do_fork() Must return host values and target errnos (unlike most
6094 do_*() functions). */
6095 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6096 abi_ulong parent_tidptr
, target_ulong newtls
,
6097 abi_ulong child_tidptr
)
6099 CPUState
*cpu
= env_cpu(env
);
6103 CPUArchState
*new_env
;
6106 flags
&= ~CLONE_IGNORED_FLAGS
;
6108 /* Emulate vfork() with fork() */
6109 if (flags
& CLONE_VFORK
)
6110 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6112 if (flags
& CLONE_VM
) {
6113 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6114 new_thread_info info
;
6115 pthread_attr_t attr
;
6117 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6118 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6119 return -TARGET_EINVAL
;
6122 ts
= g_new0(TaskState
, 1);
6123 init_task_state(ts
);
6125 /* Grab a mutex so that thread setup appears atomic. */
6126 pthread_mutex_lock(&clone_lock
);
6128 /* we create a new CPU instance. */
6129 new_env
= cpu_copy(env
);
6130 /* Init regs that differ from the parent. */
6131 cpu_clone_regs_child(new_env
, newsp
, flags
);
6132 cpu_clone_regs_parent(env
, flags
);
6133 new_cpu
= env_cpu(new_env
);
6134 new_cpu
->opaque
= ts
;
6135 ts
->bprm
= parent_ts
->bprm
;
6136 ts
->info
= parent_ts
->info
;
6137 ts
->signal_mask
= parent_ts
->signal_mask
;
6139 if (flags
& CLONE_CHILD_CLEARTID
) {
6140 ts
->child_tidptr
= child_tidptr
;
6143 if (flags
& CLONE_SETTLS
) {
6144 cpu_set_tls (new_env
, newtls
);
6147 memset(&info
, 0, sizeof(info
));
6148 pthread_mutex_init(&info
.mutex
, NULL
);
6149 pthread_mutex_lock(&info
.mutex
);
6150 pthread_cond_init(&info
.cond
, NULL
);
6152 if (flags
& CLONE_CHILD_SETTID
) {
6153 info
.child_tidptr
= child_tidptr
;
6155 if (flags
& CLONE_PARENT_SETTID
) {
6156 info
.parent_tidptr
= parent_tidptr
;
6159 ret
= pthread_attr_init(&attr
);
6160 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6161 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6162 /* It is not safe to deliver signals until the child has finished
6163 initializing, so temporarily block all signals. */
6164 sigfillset(&sigmask
);
6165 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6166 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6168 /* If this is our first additional thread, we need to ensure we
6169 * generate code for parallel execution and flush old translations.
6171 if (!parallel_cpus
) {
6172 parallel_cpus
= true;
6176 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6177 /* TODO: Free new CPU state if thread creation failed. */
6179 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6180 pthread_attr_destroy(&attr
);
6182 /* Wait for the child to initialize. */
6183 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6188 pthread_mutex_unlock(&info
.mutex
);
6189 pthread_cond_destroy(&info
.cond
);
6190 pthread_mutex_destroy(&info
.mutex
);
6191 pthread_mutex_unlock(&clone_lock
);
6193 /* if no CLONE_VM, we consider it is a fork */
6194 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6195 return -TARGET_EINVAL
;
6198 /* We can't support custom termination signals */
6199 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6200 return -TARGET_EINVAL
;
6203 if (block_signals()) {
6204 return -TARGET_ERESTARTSYS
;
6210 /* Child Process. */
6211 cpu_clone_regs_child(env
, newsp
, flags
);
6213 /* There is a race condition here. The parent process could
6214 theoretically read the TID in the child process before the child
6215 tid is set. This would require using either ptrace
6216 (not implemented) or having *_tidptr to point at a shared memory
6217 mapping. We can't repeat the spinlock hack used above because
6218 the child process gets its own copy of the lock. */
6219 if (flags
& CLONE_CHILD_SETTID
)
6220 put_user_u32(sys_gettid(), child_tidptr
);
6221 if (flags
& CLONE_PARENT_SETTID
)
6222 put_user_u32(sys_gettid(), parent_tidptr
);
6223 ts
= (TaskState
*)cpu
->opaque
;
6224 if (flags
& CLONE_SETTLS
)
6225 cpu_set_tls (env
, newtls
);
6226 if (flags
& CLONE_CHILD_CLEARTID
)
6227 ts
->child_tidptr
= child_tidptr
;
6229 cpu_clone_regs_parent(env
, flags
);
6236 /* warning : doesn't handle linux specific flags... */
6237 static int target_to_host_fcntl_cmd(int cmd
)
6242 case TARGET_F_DUPFD
:
6243 case TARGET_F_GETFD
:
6244 case TARGET_F_SETFD
:
6245 case TARGET_F_GETFL
:
6246 case TARGET_F_SETFL
:
6247 case TARGET_F_OFD_GETLK
:
6248 case TARGET_F_OFD_SETLK
:
6249 case TARGET_F_OFD_SETLKW
:
6252 case TARGET_F_GETLK
:
6255 case TARGET_F_SETLK
:
6258 case TARGET_F_SETLKW
:
6261 case TARGET_F_GETOWN
:
6264 case TARGET_F_SETOWN
:
6267 case TARGET_F_GETSIG
:
6270 case TARGET_F_SETSIG
:
6273 #if TARGET_ABI_BITS == 32
6274 case TARGET_F_GETLK64
:
6277 case TARGET_F_SETLK64
:
6280 case TARGET_F_SETLKW64
:
6284 case TARGET_F_SETLEASE
:
6287 case TARGET_F_GETLEASE
:
6290 #ifdef F_DUPFD_CLOEXEC
6291 case TARGET_F_DUPFD_CLOEXEC
:
6292 ret
= F_DUPFD_CLOEXEC
;
6295 case TARGET_F_NOTIFY
:
6299 case TARGET_F_GETOWN_EX
:
6304 case TARGET_F_SETOWN_EX
:
6309 case TARGET_F_SETPIPE_SZ
:
6312 case TARGET_F_GETPIPE_SZ
:
6317 ret
= -TARGET_EINVAL
;
6321 #if defined(__powerpc64__)
6322 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6323 * is not supported by kernel. The glibc fcntl call actually adjusts
6324 * them to 5, 6 and 7 before making the syscall(). Since we make the
6325 * syscall directly, adjust to what is supported by the kernel.
6327 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6328 ret
-= F_GETLK64
- 5;
6335 #define FLOCK_TRANSTBL \
6337 TRANSTBL_CONVERT(F_RDLCK); \
6338 TRANSTBL_CONVERT(F_WRLCK); \
6339 TRANSTBL_CONVERT(F_UNLCK); \
6340 TRANSTBL_CONVERT(F_EXLCK); \
6341 TRANSTBL_CONVERT(F_SHLCK); \
6344 static int target_to_host_flock(int type
)
6346 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6348 #undef TRANSTBL_CONVERT
6349 return -TARGET_EINVAL
;
6352 static int host_to_target_flock(int type
)
6354 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6356 #undef TRANSTBL_CONVERT
6357 /* if we don't know how to convert the value coming
6358 * from the host we copy to the target field as-is
6363 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6364 abi_ulong target_flock_addr
)
6366 struct target_flock
*target_fl
;
6369 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6370 return -TARGET_EFAULT
;
6373 __get_user(l_type
, &target_fl
->l_type
);
6374 l_type
= target_to_host_flock(l_type
);
6378 fl
->l_type
= l_type
;
6379 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6380 __get_user(fl
->l_start
, &target_fl
->l_start
);
6381 __get_user(fl
->l_len
, &target_fl
->l_len
);
6382 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6383 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6387 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6388 const struct flock64
*fl
)
6390 struct target_flock
*target_fl
;
6393 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6394 return -TARGET_EFAULT
;
6397 l_type
= host_to_target_flock(fl
->l_type
);
6398 __put_user(l_type
, &target_fl
->l_type
);
6399 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6400 __put_user(fl
->l_start
, &target_fl
->l_start
);
6401 __put_user(fl
->l_len
, &target_fl
->l_len
);
6402 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6403 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6407 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6408 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6410 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6411 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6412 abi_ulong target_flock_addr
)
6414 struct target_oabi_flock64
*target_fl
;
6417 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6418 return -TARGET_EFAULT
;
6421 __get_user(l_type
, &target_fl
->l_type
);
6422 l_type
= target_to_host_flock(l_type
);
6426 fl
->l_type
= l_type
;
6427 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6428 __get_user(fl
->l_start
, &target_fl
->l_start
);
6429 __get_user(fl
->l_len
, &target_fl
->l_len
);
6430 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6431 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6435 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6436 const struct flock64
*fl
)
6438 struct target_oabi_flock64
*target_fl
;
6441 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6442 return -TARGET_EFAULT
;
6445 l_type
= host_to_target_flock(fl
->l_type
);
6446 __put_user(l_type
, &target_fl
->l_type
);
6447 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6448 __put_user(fl
->l_start
, &target_fl
->l_start
);
6449 __put_user(fl
->l_len
, &target_fl
->l_len
);
6450 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6451 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6456 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6457 abi_ulong target_flock_addr
)
6459 struct target_flock64
*target_fl
;
6462 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6463 return -TARGET_EFAULT
;
6466 __get_user(l_type
, &target_fl
->l_type
);
6467 l_type
= target_to_host_flock(l_type
);
6471 fl
->l_type
= l_type
;
6472 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6473 __get_user(fl
->l_start
, &target_fl
->l_start
);
6474 __get_user(fl
->l_len
, &target_fl
->l_len
);
6475 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6476 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6480 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6481 const struct flock64
*fl
)
6483 struct target_flock64
*target_fl
;
6486 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6487 return -TARGET_EFAULT
;
6490 l_type
= host_to_target_flock(fl
->l_type
);
6491 __put_user(l_type
, &target_fl
->l_type
);
6492 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6493 __put_user(fl
->l_start
, &target_fl
->l_start
);
6494 __put_user(fl
->l_len
, &target_fl
->l_len
);
6495 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6496 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6500 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6502 struct flock64 fl64
;
6504 struct f_owner_ex fox
;
6505 struct target_f_owner_ex
*target_fox
;
6508 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6510 if (host_cmd
== -TARGET_EINVAL
)
6514 case TARGET_F_GETLK
:
6515 ret
= copy_from_user_flock(&fl64
, arg
);
6519 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6521 ret
= copy_to_user_flock(arg
, &fl64
);
6525 case TARGET_F_SETLK
:
6526 case TARGET_F_SETLKW
:
6527 ret
= copy_from_user_flock(&fl64
, arg
);
6531 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6534 case TARGET_F_GETLK64
:
6535 case TARGET_F_OFD_GETLK
:
6536 ret
= copy_from_user_flock64(&fl64
, arg
);
6540 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6542 ret
= copy_to_user_flock64(arg
, &fl64
);
6545 case TARGET_F_SETLK64
:
6546 case TARGET_F_SETLKW64
:
6547 case TARGET_F_OFD_SETLK
:
6548 case TARGET_F_OFD_SETLKW
:
6549 ret
= copy_from_user_flock64(&fl64
, arg
);
6553 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6556 case TARGET_F_GETFL
:
6557 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6559 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6563 case TARGET_F_SETFL
:
6564 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6565 target_to_host_bitmask(arg
,
6570 case TARGET_F_GETOWN_EX
:
6571 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6573 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6574 return -TARGET_EFAULT
;
6575 target_fox
->type
= tswap32(fox
.type
);
6576 target_fox
->pid
= tswap32(fox
.pid
);
6577 unlock_user_struct(target_fox
, arg
, 1);
6583 case TARGET_F_SETOWN_EX
:
6584 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6585 return -TARGET_EFAULT
;
6586 fox
.type
= tswap32(target_fox
->type
);
6587 fox
.pid
= tswap32(target_fox
->pid
);
6588 unlock_user_struct(target_fox
, arg
, 0);
6589 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6593 case TARGET_F_SETOWN
:
6594 case TARGET_F_GETOWN
:
6595 case TARGET_F_SETSIG
:
6596 case TARGET_F_GETSIG
:
6597 case TARGET_F_SETLEASE
:
6598 case TARGET_F_GETLEASE
:
6599 case TARGET_F_SETPIPE_SZ
:
6600 case TARGET_F_GETPIPE_SZ
:
6601 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6605 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6613 static inline int high2lowuid(int uid
)
6621 static inline int high2lowgid(int gid
)
6629 static inline int low2highuid(int uid
)
6631 if ((int16_t)uid
== -1)
6637 static inline int low2highgid(int gid
)
6639 if ((int16_t)gid
== -1)
6644 static inline int tswapid(int id
)
6649 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6651 #else /* !USE_UID16 */
6652 static inline int high2lowuid(int uid
)
6656 static inline int high2lowgid(int gid
)
6660 static inline int low2highuid(int uid
)
6664 static inline int low2highgid(int gid
)
6668 static inline int tswapid(int id
)
6673 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6675 #endif /* USE_UID16 */
6677 /* We must do direct syscalls for setting UID/GID, because we want to
6678 * implement the Linux system call semantics of "change only for this thread",
6679 * not the libc/POSIX semantics of "change for all threads in process".
6680 * (See http://ewontfix.com/17/ for more details.)
6681 * We use the 32-bit version of the syscalls if present; if it is not
6682 * then either the host architecture supports 32-bit UIDs natively with
6683 * the standard syscall, or the 16-bit UID is the best we can do.
6685 #ifdef __NR_setuid32
6686 #define __NR_sys_setuid __NR_setuid32
6688 #define __NR_sys_setuid __NR_setuid
6690 #ifdef __NR_setgid32
6691 #define __NR_sys_setgid __NR_setgid32
6693 #define __NR_sys_setgid __NR_setgid
6695 #ifdef __NR_setresuid32
6696 #define __NR_sys_setresuid __NR_setresuid32
6698 #define __NR_sys_setresuid __NR_setresuid
6700 #ifdef __NR_setresgid32
6701 #define __NR_sys_setresgid __NR_setresgid32
6703 #define __NR_sys_setresgid __NR_setresgid
6706 _syscall1(int, sys_setuid
, uid_t
, uid
)
6707 _syscall1(int, sys_setgid
, gid_t
, gid
)
6708 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6709 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6711 void syscall_init(void)
6714 const argtype
*arg_type
;
6718 thunk_init(STRUCT_MAX
);
6720 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6721 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6722 #include "syscall_types.h"
6724 #undef STRUCT_SPECIAL
6726 /* Build target_to_host_errno_table[] table from
6727 * host_to_target_errno_table[]. */
6728 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6729 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6732 /* we patch the ioctl size if necessary. We rely on the fact that
6733 no ioctl has all the bits at '1' in the size field */
6735 while (ie
->target_cmd
!= 0) {
6736 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6737 TARGET_IOC_SIZEMASK
) {
6738 arg_type
= ie
->arg_type
;
6739 if (arg_type
[0] != TYPE_PTR
) {
6740 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6745 size
= thunk_type_size(arg_type
, 0);
6746 ie
->target_cmd
= (ie
->target_cmd
&
6747 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6748 (size
<< TARGET_IOC_SIZESHIFT
);
6751 /* automatic consistency check if same arch */
6752 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6753 (defined(__x86_64__) && defined(TARGET_X86_64))
6754 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6755 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6756 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6763 #ifdef TARGET_NR_truncate64
6764 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6769 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6773 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6777 #ifdef TARGET_NR_ftruncate64
6778 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6783 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6787 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6791 #if defined(TARGET_NR_timer_settime) || \
6792 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6793 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6794 abi_ulong target_addr
)
6796 struct target_itimerspec
*target_itspec
;
6798 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6799 return -TARGET_EFAULT
;
6802 host_itspec
->it_interval
.tv_sec
=
6803 tswapal(target_itspec
->it_interval
.tv_sec
);
6804 host_itspec
->it_interval
.tv_nsec
=
6805 tswapal(target_itspec
->it_interval
.tv_nsec
);
6806 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6807 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6809 unlock_user_struct(target_itspec
, target_addr
, 1);
6814 #if ((defined(TARGET_NR_timerfd_gettime) || \
6815 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6816 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6817 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6818 struct itimerspec
*host_its
)
6820 struct target_itimerspec
*target_itspec
;
6822 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6823 return -TARGET_EFAULT
;
6826 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6827 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6829 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6830 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6832 unlock_user_struct(target_itspec
, target_addr
, 0);
6837 #if defined(TARGET_NR_adjtimex) || \
6838 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6839 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6840 abi_long target_addr
)
6842 struct target_timex
*target_tx
;
6844 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6845 return -TARGET_EFAULT
;
6848 __get_user(host_tx
->modes
, &target_tx
->modes
);
6849 __get_user(host_tx
->offset
, &target_tx
->offset
);
6850 __get_user(host_tx
->freq
, &target_tx
->freq
);
6851 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6852 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6853 __get_user(host_tx
->status
, &target_tx
->status
);
6854 __get_user(host_tx
->constant
, &target_tx
->constant
);
6855 __get_user(host_tx
->precision
, &target_tx
->precision
);
6856 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6857 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6858 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6859 __get_user(host_tx
->tick
, &target_tx
->tick
);
6860 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6861 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6862 __get_user(host_tx
->shift
, &target_tx
->shift
);
6863 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6864 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6865 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6866 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6867 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6868 __get_user(host_tx
->tai
, &target_tx
->tai
);
6870 unlock_user_struct(target_tx
, target_addr
, 0);
6874 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6875 struct timex
*host_tx
)
6877 struct target_timex
*target_tx
;
6879 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6880 return -TARGET_EFAULT
;
6883 __put_user(host_tx
->modes
, &target_tx
->modes
);
6884 __put_user(host_tx
->offset
, &target_tx
->offset
);
6885 __put_user(host_tx
->freq
, &target_tx
->freq
);
6886 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6887 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6888 __put_user(host_tx
->status
, &target_tx
->status
);
6889 __put_user(host_tx
->constant
, &target_tx
->constant
);
6890 __put_user(host_tx
->precision
, &target_tx
->precision
);
6891 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6892 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6893 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6894 __put_user(host_tx
->tick
, &target_tx
->tick
);
6895 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6896 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6897 __put_user(host_tx
->shift
, &target_tx
->shift
);
6898 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6899 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6900 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6901 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6902 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6903 __put_user(host_tx
->tai
, &target_tx
->tai
);
6905 unlock_user_struct(target_tx
, target_addr
, 1);
6910 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6911 abi_ulong target_addr
)
6913 struct target_sigevent
*target_sevp
;
6915 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6916 return -TARGET_EFAULT
;
6919 /* This union is awkward on 64 bit systems because it has a 32 bit
6920 * integer and a pointer in it; we follow the conversion approach
6921 * used for handling sigval types in signal.c so the guest should get
6922 * the correct value back even if we did a 64 bit byteswap and it's
6923 * using the 32 bit integer.
6925 host_sevp
->sigev_value
.sival_ptr
=
6926 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6927 host_sevp
->sigev_signo
=
6928 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6929 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6930 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6932 unlock_user_struct(target_sevp
, target_addr
, 1);
6936 #if defined(TARGET_NR_mlockall)
6937 static inline int target_to_host_mlockall_arg(int arg
)
6941 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6942 result
|= MCL_CURRENT
;
6944 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6945 result
|= MCL_FUTURE
;
6951 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6952 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6953 defined(TARGET_NR_newfstatat))
6954 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6955 abi_ulong target_addr
,
6956 struct stat
*host_st
)
6958 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6959 if (((CPUARMState
*)cpu_env
)->eabi
) {
6960 struct target_eabi_stat64
*target_st
;
6962 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6963 return -TARGET_EFAULT
;
6964 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6965 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6966 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6967 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6968 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6970 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6971 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6972 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6973 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6974 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6975 __put_user(host_st
->st_size
, &target_st
->st_size
);
6976 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6977 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6978 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6979 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6980 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6981 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6982 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6983 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6984 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6986 unlock_user_struct(target_st
, target_addr
, 1);
6990 #if defined(TARGET_HAS_STRUCT_STAT64)
6991 struct target_stat64
*target_st
;
6993 struct target_stat
*target_st
;
6996 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6997 return -TARGET_EFAULT
;
6998 memset(target_st
, 0, sizeof(*target_st
));
6999 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7000 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7001 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7002 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7004 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7005 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7006 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7007 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7008 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7009 /* XXX: better use of kernel struct */
7010 __put_user(host_st
->st_size
, &target_st
->st_size
);
7011 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7012 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7013 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7014 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7015 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7016 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7017 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7018 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7019 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7021 unlock_user_struct(target_st
, target_addr
, 1);
7028 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7029 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7030 abi_ulong target_addr
)
7032 struct target_statx
*target_stx
;
7034 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7035 return -TARGET_EFAULT
;
7037 memset(target_stx
, 0, sizeof(*target_stx
));
7039 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7040 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7041 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7042 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7043 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7044 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7045 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7046 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7047 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7048 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7049 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7050 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7051 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7052 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7053 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7054 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7055 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7056 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7057 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7058 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7059 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7060 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7061 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7063 unlock_user_struct(target_stx
, target_addr
, 1);
7069 static int do_sys_futex(int *uaddr
, int op
, int val
,
7070 const struct timespec
*timeout
, int *uaddr2
,
7073 #if HOST_LONG_BITS == 64
7074 #if defined(__NR_futex)
7075 /* always a 64-bit time_t, it doesn't define _time64 version */
7076 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7079 #else /* HOST_LONG_BITS == 64 */
7080 #if defined(__NR_futex_time64)
7081 if (sizeof(timeout
->tv_sec
) == 8) {
7082 /* _time64 function on 32bit arch */
7083 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7086 #if defined(__NR_futex)
7087 /* old function on 32bit arch */
7088 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7090 #endif /* HOST_LONG_BITS == 64 */
7091 g_assert_not_reached();
7094 static int do_safe_futex(int *uaddr
, int op
, int val
,
7095 const struct timespec
*timeout
, int *uaddr2
,
7098 #if HOST_LONG_BITS == 64
7099 #if defined(__NR_futex)
7100 /* always a 64-bit time_t, it doesn't define _time64 version */
7101 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7103 #else /* HOST_LONG_BITS == 64 */
7104 #if defined(__NR_futex_time64)
7105 if (sizeof(timeout
->tv_sec
) == 8) {
7106 /* _time64 function on 32bit arch */
7107 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7111 #if defined(__NR_futex)
7112 /* old function on 32bit arch */
7113 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7115 #endif /* HOST_LONG_BITS == 64 */
7116 return -TARGET_ENOSYS
;
7119 /* ??? Using host futex calls even when target atomic operations
7120 are not really atomic probably breaks things. However implementing
7121 futexes locally would make futexes shared between multiple processes
7122 tricky. However they're probably useless because guest atomic
7123 operations won't work either. */
7124 #if defined(TARGET_NR_futex)
7125 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7126 target_ulong uaddr2
, int val3
)
7128 struct timespec ts
, *pts
;
7131 /* ??? We assume FUTEX_* constants are the same on both host
7133 #ifdef FUTEX_CMD_MASK
7134 base_op
= op
& FUTEX_CMD_MASK
;
7140 case FUTEX_WAIT_BITSET
:
7143 target_to_host_timespec(pts
, timeout
);
7147 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7149 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7151 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7153 case FUTEX_CMP_REQUEUE
:
7155 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7156 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7157 But the prototype takes a `struct timespec *'; insert casts
7158 to satisfy the compiler. We do not need to tswap TIMEOUT
7159 since it's not compared to guest memory. */
7160 pts
= (struct timespec
*)(uintptr_t) timeout
;
7161 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7162 (base_op
== FUTEX_CMP_REQUEUE
7166 return -TARGET_ENOSYS
;
7171 #if defined(TARGET_NR_futex_time64)
7172 static int do_futex_time64(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7173 target_ulong uaddr2
, int val3
)
7175 struct timespec ts
, *pts
;
7178 /* ??? We assume FUTEX_* constants are the same on both host
7180 #ifdef FUTEX_CMD_MASK
7181 base_op
= op
& FUTEX_CMD_MASK
;
7187 case FUTEX_WAIT_BITSET
:
7190 target_to_host_timespec64(pts
, timeout
);
7194 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7196 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7198 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7200 case FUTEX_CMP_REQUEUE
:
7202 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7203 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7204 But the prototype takes a `struct timespec *'; insert casts
7205 to satisfy the compiler. We do not need to tswap TIMEOUT
7206 since it's not compared to guest memory. */
7207 pts
= (struct timespec
*)(uintptr_t) timeout
;
7208 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7209 (base_op
== FUTEX_CMP_REQUEUE
7213 return -TARGET_ENOSYS
;
7218 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7219 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7220 abi_long handle
, abi_long mount_id
,
7223 struct file_handle
*target_fh
;
7224 struct file_handle
*fh
;
7228 unsigned int size
, total_size
;
7230 if (get_user_s32(size
, handle
)) {
7231 return -TARGET_EFAULT
;
7234 name
= lock_user_string(pathname
);
7236 return -TARGET_EFAULT
;
7239 total_size
= sizeof(struct file_handle
) + size
;
7240 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7242 unlock_user(name
, pathname
, 0);
7243 return -TARGET_EFAULT
;
7246 fh
= g_malloc0(total_size
);
7247 fh
->handle_bytes
= size
;
7249 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7250 unlock_user(name
, pathname
, 0);
7252 /* man name_to_handle_at(2):
7253 * Other than the use of the handle_bytes field, the caller should treat
7254 * the file_handle structure as an opaque data type
7257 memcpy(target_fh
, fh
, total_size
);
7258 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7259 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7261 unlock_user(target_fh
, handle
, total_size
);
7263 if (put_user_s32(mid
, mount_id
)) {
7264 return -TARGET_EFAULT
;
7272 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7273 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7276 struct file_handle
*target_fh
;
7277 struct file_handle
*fh
;
7278 unsigned int size
, total_size
;
7281 if (get_user_s32(size
, handle
)) {
7282 return -TARGET_EFAULT
;
7285 total_size
= sizeof(struct file_handle
) + size
;
7286 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7288 return -TARGET_EFAULT
;
7291 fh
= g_memdup(target_fh
, total_size
);
7292 fh
->handle_bytes
= size
;
7293 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7295 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7296 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7300 unlock_user(target_fh
, handle
, total_size
);
7306 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7308 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7311 target_sigset_t
*target_mask
;
7315 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7316 return -TARGET_EINVAL
;
7318 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7319 return -TARGET_EFAULT
;
7322 target_to_host_sigset(&host_mask
, target_mask
);
7324 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7326 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7328 fd_trans_register(ret
, &target_signalfd_trans
);
7331 unlock_user_struct(target_mask
, mask
, 0);
7337 /* Map host to target signal numbers for the wait family of syscalls.
7338 Assume all other status bits are the same. */
7339 int host_to_target_waitstatus(int status
)
7341 if (WIFSIGNALED(status
)) {
7342 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7344 if (WIFSTOPPED(status
)) {
7345 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7351 static int open_self_cmdline(void *cpu_env
, int fd
)
7353 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7354 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7357 for (i
= 0; i
< bprm
->argc
; i
++) {
7358 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7360 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7368 static int open_self_maps(void *cpu_env
, int fd
)
7370 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7371 TaskState
*ts
= cpu
->opaque
;
7372 GSList
*map_info
= read_self_maps();
7376 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7377 MapInfo
*e
= (MapInfo
*) s
->data
;
7379 if (h2g_valid(e
->start
)) {
7380 unsigned long min
= e
->start
;
7381 unsigned long max
= e
->end
;
7382 int flags
= page_get_flags(h2g(min
));
7385 max
= h2g_valid(max
- 1) ?
7386 max
: (uintptr_t) g2h(GUEST_ADDR_MAX
) + 1;
7388 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7392 if (h2g(min
) == ts
->info
->stack_limit
) {
7398 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7399 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
7400 h2g(min
), h2g(max
- 1) + 1,
7401 e
->is_read
? 'r' : '-',
7402 e
->is_write
? 'w' : '-',
7403 e
->is_exec
? 'x' : '-',
7404 e
->is_priv
? 'p' : '-',
7405 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
7407 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
7414 free_self_maps(map_info
);
7416 #ifdef TARGET_VSYSCALL_PAGE
7418 * We only support execution from the vsyscall page.
7419 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7421 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7422 " --xp 00000000 00:00 0",
7423 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7424 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
7430 static int open_self_stat(void *cpu_env
, int fd
)
7432 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7433 TaskState
*ts
= cpu
->opaque
;
7434 g_autoptr(GString
) buf
= g_string_new(NULL
);
7437 for (i
= 0; i
< 44; i
++) {
7440 g_string_printf(buf
, FMT_pid
" ", getpid());
7441 } else if (i
== 1) {
7443 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
7444 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
7445 g_string_printf(buf
, "(%.15s) ", bin
);
7446 } else if (i
== 27) {
7448 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
7450 /* for the rest, there is MasterCard */
7451 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
7454 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
7462 static int open_self_auxv(void *cpu_env
, int fd
)
7464 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7465 TaskState
*ts
= cpu
->opaque
;
7466 abi_ulong auxv
= ts
->info
->saved_auxv
;
7467 abi_ulong len
= ts
->info
->auxv_len
;
7471 * Auxiliary vector is stored in target process stack.
7472 * read in whole auxv vector and copy it to file
7474 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7478 r
= write(fd
, ptr
, len
);
7485 lseek(fd
, 0, SEEK_SET
);
7486 unlock_user(ptr
, auxv
, len
);
7492 static int is_proc_myself(const char *filename
, const char *entry
)
7494 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7495 filename
+= strlen("/proc/");
7496 if (!strncmp(filename
, "self/", strlen("self/"))) {
7497 filename
+= strlen("self/");
7498 } else if (*filename
>= '1' && *filename
<= '9') {
7500 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7501 if (!strncmp(filename
, myself
, strlen(myself
))) {
7502 filename
+= strlen(myself
);
7509 if (!strcmp(filename
, entry
)) {
7516 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7517 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7518 static int is_proc(const char *filename
, const char *entry
)
7520 return strcmp(filename
, entry
) == 0;
7524 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7525 static int open_net_route(void *cpu_env
, int fd
)
7532 fp
= fopen("/proc/net/route", "r");
7539 read
= getline(&line
, &len
, fp
);
7540 dprintf(fd
, "%s", line
);
7544 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7546 uint32_t dest
, gw
, mask
;
7547 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7550 fields
= sscanf(line
,
7551 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7552 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7553 &mask
, &mtu
, &window
, &irtt
);
7557 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7558 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7559 metric
, tswap32(mask
), mtu
, window
, irtt
);
7569 #if defined(TARGET_SPARC)
7570 static int open_cpuinfo(void *cpu_env
, int fd
)
7572 dprintf(fd
, "type\t\t: sun4u\n");
7577 #if defined(TARGET_HPPA)
7578 static int open_cpuinfo(void *cpu_env
, int fd
)
7580 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
7581 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
7582 dprintf(fd
, "capabilities\t: os32\n");
7583 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
7584 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7589 #if defined(TARGET_M68K)
7590 static int open_hardware(void *cpu_env
, int fd
)
7592 dprintf(fd
, "Model:\t\tqemu-m68k\n");
7597 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7600 const char *filename
;
7601 int (*fill
)(void *cpu_env
, int fd
);
7602 int (*cmp
)(const char *s1
, const char *s2
);
7604 const struct fake_open
*fake_open
;
7605 static const struct fake_open fakes
[] = {
7606 { "maps", open_self_maps
, is_proc_myself
},
7607 { "stat", open_self_stat
, is_proc_myself
},
7608 { "auxv", open_self_auxv
, is_proc_myself
},
7609 { "cmdline", open_self_cmdline
, is_proc_myself
},
7610 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7611 { "/proc/net/route", open_net_route
, is_proc
},
7613 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7614 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
7616 #if defined(TARGET_M68K)
7617 { "/proc/hardware", open_hardware
, is_proc
},
7619 { NULL
, NULL
, NULL
}
7622 if (is_proc_myself(pathname
, "exe")) {
7623 int execfd
= qemu_getauxval(AT_EXECFD
);
7624 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7627 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7628 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7633 if (fake_open
->filename
) {
7635 char filename
[PATH_MAX
];
7638 /* create temporary file to map stat to */
7639 tmpdir
= getenv("TMPDIR");
7642 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7643 fd
= mkstemp(filename
);
7649 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7655 lseek(fd
, 0, SEEK_SET
);
7660 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7663 #define TIMER_MAGIC 0x0caf0000
7664 #define TIMER_MAGIC_MASK 0xffff0000
7666 /* Convert QEMU provided timer ID back to internal 16bit index format */
7667 static target_timer_t
get_timer_id(abi_long arg
)
7669 target_timer_t timerid
= arg
;
7671 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7672 return -TARGET_EINVAL
;
7677 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7678 return -TARGET_EINVAL
;
7684 static int target_to_host_cpu_mask(unsigned long *host_mask
,
7686 abi_ulong target_addr
,
7689 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7690 unsigned host_bits
= sizeof(*host_mask
) * 8;
7691 abi_ulong
*target_mask
;
7694 assert(host_size
>= target_size
);
7696 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
7698 return -TARGET_EFAULT
;
7700 memset(host_mask
, 0, host_size
);
7702 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7703 unsigned bit
= i
* target_bits
;
7706 __get_user(val
, &target_mask
[i
]);
7707 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7708 if (val
& (1UL << j
)) {
7709 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
7714 unlock_user(target_mask
, target_addr
, 0);
7718 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7720 abi_ulong target_addr
,
7723 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7724 unsigned host_bits
= sizeof(*host_mask
) * 8;
7725 abi_ulong
*target_mask
;
7728 assert(host_size
>= target_size
);
7730 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7732 return -TARGET_EFAULT
;
7735 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7736 unsigned bit
= i
* target_bits
;
7739 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7740 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7744 __put_user(val
, &target_mask
[i
]);
7747 unlock_user(target_mask
, target_addr
, target_size
);
7751 /* This is an internal helper for do_syscall so that it is easier
7752 * to have a single return point, so that actions, such as logging
7753 * of syscall results, can be performed.
7754 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7756 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
7757 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7758 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7761 CPUState
*cpu
= env_cpu(cpu_env
);
7763 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7764 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7765 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7766 || defined(TARGET_NR_statx)
7769 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7770 || defined(TARGET_NR_fstatfs)
7776 case TARGET_NR_exit
:
7777 /* In old applications this may be used to implement _exit(2).
7778 However in threaded applictions it is used for thread termination,
7779 and _exit_group is used for application termination.
7780 Do thread termination if we have more then one thread. */
7782 if (block_signals()) {
7783 return -TARGET_ERESTARTSYS
;
7786 pthread_mutex_lock(&clone_lock
);
7788 if (CPU_NEXT(first_cpu
)) {
7789 TaskState
*ts
= cpu
->opaque
;
7791 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
7792 object_unref(OBJECT(cpu
));
7794 * At this point the CPU should be unrealized and removed
7795 * from cpu lists. We can clean-up the rest of the thread
7796 * data without the lock held.
7799 pthread_mutex_unlock(&clone_lock
);
7801 if (ts
->child_tidptr
) {
7802 put_user_u32(0, ts
->child_tidptr
);
7803 do_sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7808 rcu_unregister_thread();
7812 pthread_mutex_unlock(&clone_lock
);
7813 preexit_cleanup(cpu_env
, arg1
);
7815 return 0; /* avoid warning */
7816 case TARGET_NR_read
:
7817 if (arg2
== 0 && arg3
== 0) {
7818 return get_errno(safe_read(arg1
, 0, 0));
7820 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7821 return -TARGET_EFAULT
;
7822 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7824 fd_trans_host_to_target_data(arg1
)) {
7825 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7827 unlock_user(p
, arg2
, ret
);
7830 case TARGET_NR_write
:
7831 if (arg2
== 0 && arg3
== 0) {
7832 return get_errno(safe_write(arg1
, 0, 0));
7834 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7835 return -TARGET_EFAULT
;
7836 if (fd_trans_target_to_host_data(arg1
)) {
7837 void *copy
= g_malloc(arg3
);
7838 memcpy(copy
, p
, arg3
);
7839 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7841 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7845 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7847 unlock_user(p
, arg2
, 0);
7850 #ifdef TARGET_NR_open
7851 case TARGET_NR_open
:
7852 if (!(p
= lock_user_string(arg1
)))
7853 return -TARGET_EFAULT
;
7854 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7855 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7857 fd_trans_unregister(ret
);
7858 unlock_user(p
, arg1
, 0);
7861 case TARGET_NR_openat
:
7862 if (!(p
= lock_user_string(arg2
)))
7863 return -TARGET_EFAULT
;
7864 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7865 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7867 fd_trans_unregister(ret
);
7868 unlock_user(p
, arg2
, 0);
7870 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7871 case TARGET_NR_name_to_handle_at
:
7872 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7875 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7876 case TARGET_NR_open_by_handle_at
:
7877 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7878 fd_trans_unregister(ret
);
7881 case TARGET_NR_close
:
7882 fd_trans_unregister(arg1
);
7883 return get_errno(close(arg1
));
7886 return do_brk(arg1
);
7887 #ifdef TARGET_NR_fork
7888 case TARGET_NR_fork
:
7889 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7891 #ifdef TARGET_NR_waitpid
7892 case TARGET_NR_waitpid
:
7895 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7896 if (!is_error(ret
) && arg2
&& ret
7897 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7898 return -TARGET_EFAULT
;
7902 #ifdef TARGET_NR_waitid
7903 case TARGET_NR_waitid
:
7907 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7908 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7909 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7910 return -TARGET_EFAULT
;
7911 host_to_target_siginfo(p
, &info
);
7912 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7917 #ifdef TARGET_NR_creat /* not on alpha */
7918 case TARGET_NR_creat
:
7919 if (!(p
= lock_user_string(arg1
)))
7920 return -TARGET_EFAULT
;
7921 ret
= get_errno(creat(p
, arg2
));
7922 fd_trans_unregister(ret
);
7923 unlock_user(p
, arg1
, 0);
7926 #ifdef TARGET_NR_link
7927 case TARGET_NR_link
:
7930 p
= lock_user_string(arg1
);
7931 p2
= lock_user_string(arg2
);
7933 ret
= -TARGET_EFAULT
;
7935 ret
= get_errno(link(p
, p2
));
7936 unlock_user(p2
, arg2
, 0);
7937 unlock_user(p
, arg1
, 0);
7941 #if defined(TARGET_NR_linkat)
7942 case TARGET_NR_linkat
:
7946 return -TARGET_EFAULT
;
7947 p
= lock_user_string(arg2
);
7948 p2
= lock_user_string(arg4
);
7950 ret
= -TARGET_EFAULT
;
7952 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7953 unlock_user(p
, arg2
, 0);
7954 unlock_user(p2
, arg4
, 0);
7958 #ifdef TARGET_NR_unlink
7959 case TARGET_NR_unlink
:
7960 if (!(p
= lock_user_string(arg1
)))
7961 return -TARGET_EFAULT
;
7962 ret
= get_errno(unlink(p
));
7963 unlock_user(p
, arg1
, 0);
7966 #if defined(TARGET_NR_unlinkat)
7967 case TARGET_NR_unlinkat
:
7968 if (!(p
= lock_user_string(arg2
)))
7969 return -TARGET_EFAULT
;
7970 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7971 unlock_user(p
, arg2
, 0);
7974 case TARGET_NR_execve
:
7976 char **argp
, **envp
;
7979 abi_ulong guest_argp
;
7980 abi_ulong guest_envp
;
7987 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7988 if (get_user_ual(addr
, gp
))
7989 return -TARGET_EFAULT
;
7996 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7997 if (get_user_ual(addr
, gp
))
7998 return -TARGET_EFAULT
;
8004 argp
= g_new0(char *, argc
+ 1);
8005 envp
= g_new0(char *, envc
+ 1);
8007 for (gp
= guest_argp
, q
= argp
; gp
;
8008 gp
+= sizeof(abi_ulong
), q
++) {
8009 if (get_user_ual(addr
, gp
))
8013 if (!(*q
= lock_user_string(addr
)))
8015 total_size
+= strlen(*q
) + 1;
8019 for (gp
= guest_envp
, q
= envp
; gp
;
8020 gp
+= sizeof(abi_ulong
), q
++) {
8021 if (get_user_ual(addr
, gp
))
8025 if (!(*q
= lock_user_string(addr
)))
8027 total_size
+= strlen(*q
) + 1;
8031 if (!(p
= lock_user_string(arg1
)))
8033 /* Although execve() is not an interruptible syscall it is
8034 * a special case where we must use the safe_syscall wrapper:
8035 * if we allow a signal to happen before we make the host
8036 * syscall then we will 'lose' it, because at the point of
8037 * execve the process leaves QEMU's control. So we use the
8038 * safe syscall wrapper to ensure that we either take the
8039 * signal as a guest signal, or else it does not happen
8040 * before the execve completes and makes it the other
8041 * program's problem.
8043 ret
= get_errno(safe_execve(p
, argp
, envp
));
8044 unlock_user(p
, arg1
, 0);
8049 ret
= -TARGET_EFAULT
;
8052 for (gp
= guest_argp
, q
= argp
; *q
;
8053 gp
+= sizeof(abi_ulong
), q
++) {
8054 if (get_user_ual(addr
, gp
)
8057 unlock_user(*q
, addr
, 0);
8059 for (gp
= guest_envp
, q
= envp
; *q
;
8060 gp
+= sizeof(abi_ulong
), q
++) {
8061 if (get_user_ual(addr
, gp
)
8064 unlock_user(*q
, addr
, 0);
8071 case TARGET_NR_chdir
:
8072 if (!(p
= lock_user_string(arg1
)))
8073 return -TARGET_EFAULT
;
8074 ret
= get_errno(chdir(p
));
8075 unlock_user(p
, arg1
, 0);
8077 #ifdef TARGET_NR_time
8078 case TARGET_NR_time
:
8081 ret
= get_errno(time(&host_time
));
8084 && put_user_sal(host_time
, arg1
))
8085 return -TARGET_EFAULT
;
8089 #ifdef TARGET_NR_mknod
8090 case TARGET_NR_mknod
:
8091 if (!(p
= lock_user_string(arg1
)))
8092 return -TARGET_EFAULT
;
8093 ret
= get_errno(mknod(p
, arg2
, arg3
));
8094 unlock_user(p
, arg1
, 0);
8097 #if defined(TARGET_NR_mknodat)
8098 case TARGET_NR_mknodat
:
8099 if (!(p
= lock_user_string(arg2
)))
8100 return -TARGET_EFAULT
;
8101 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8102 unlock_user(p
, arg2
, 0);
8105 #ifdef TARGET_NR_chmod
8106 case TARGET_NR_chmod
:
8107 if (!(p
= lock_user_string(arg1
)))
8108 return -TARGET_EFAULT
;
8109 ret
= get_errno(chmod(p
, arg2
));
8110 unlock_user(p
, arg1
, 0);
8113 #ifdef TARGET_NR_lseek
8114 case TARGET_NR_lseek
:
8115 return get_errno(lseek(arg1
, arg2
, arg3
));
8117 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8118 /* Alpha specific */
8119 case TARGET_NR_getxpid
:
8120 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8121 return get_errno(getpid());
8123 #ifdef TARGET_NR_getpid
8124 case TARGET_NR_getpid
:
8125 return get_errno(getpid());
8127 case TARGET_NR_mount
:
8129 /* need to look at the data field */
8133 p
= lock_user_string(arg1
);
8135 return -TARGET_EFAULT
;
8141 p2
= lock_user_string(arg2
);
8144 unlock_user(p
, arg1
, 0);
8146 return -TARGET_EFAULT
;
8150 p3
= lock_user_string(arg3
);
8153 unlock_user(p
, arg1
, 0);
8155 unlock_user(p2
, arg2
, 0);
8156 return -TARGET_EFAULT
;
8162 /* FIXME - arg5 should be locked, but it isn't clear how to
8163 * do that since it's not guaranteed to be a NULL-terminated
8167 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8169 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8171 ret
= get_errno(ret
);
8174 unlock_user(p
, arg1
, 0);
8176 unlock_user(p2
, arg2
, 0);
8178 unlock_user(p3
, arg3
, 0);
8182 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8183 #if defined(TARGET_NR_umount)
8184 case TARGET_NR_umount
:
8186 #if defined(TARGET_NR_oldumount)
8187 case TARGET_NR_oldumount
:
8189 if (!(p
= lock_user_string(arg1
)))
8190 return -TARGET_EFAULT
;
8191 ret
= get_errno(umount(p
));
8192 unlock_user(p
, arg1
, 0);
8195 #ifdef TARGET_NR_stime /* not on alpha */
8196 case TARGET_NR_stime
:
8200 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8201 return -TARGET_EFAULT
;
8203 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8206 #ifdef TARGET_NR_alarm /* not on alpha */
8207 case TARGET_NR_alarm
:
8210 #ifdef TARGET_NR_pause /* not on alpha */
8211 case TARGET_NR_pause
:
8212 if (!block_signals()) {
8213 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8215 return -TARGET_EINTR
;
8217 #ifdef TARGET_NR_utime
8218 case TARGET_NR_utime
:
8220 struct utimbuf tbuf
, *host_tbuf
;
8221 struct target_utimbuf
*target_tbuf
;
8223 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8224 return -TARGET_EFAULT
;
8225 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8226 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8227 unlock_user_struct(target_tbuf
, arg2
, 0);
8232 if (!(p
= lock_user_string(arg1
)))
8233 return -TARGET_EFAULT
;
8234 ret
= get_errno(utime(p
, host_tbuf
));
8235 unlock_user(p
, arg1
, 0);
8239 #ifdef TARGET_NR_utimes
8240 case TARGET_NR_utimes
:
8242 struct timeval
*tvp
, tv
[2];
8244 if (copy_from_user_timeval(&tv
[0], arg2
)
8245 || copy_from_user_timeval(&tv
[1],
8246 arg2
+ sizeof(struct target_timeval
)))
8247 return -TARGET_EFAULT
;
8252 if (!(p
= lock_user_string(arg1
)))
8253 return -TARGET_EFAULT
;
8254 ret
= get_errno(utimes(p
, tvp
));
8255 unlock_user(p
, arg1
, 0);
8259 #if defined(TARGET_NR_futimesat)
8260 case TARGET_NR_futimesat
:
8262 struct timeval
*tvp
, tv
[2];
8264 if (copy_from_user_timeval(&tv
[0], arg3
)
8265 || copy_from_user_timeval(&tv
[1],
8266 arg3
+ sizeof(struct target_timeval
)))
8267 return -TARGET_EFAULT
;
8272 if (!(p
= lock_user_string(arg2
))) {
8273 return -TARGET_EFAULT
;
8275 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8276 unlock_user(p
, arg2
, 0);
8280 #ifdef TARGET_NR_access
8281 case TARGET_NR_access
:
8282 if (!(p
= lock_user_string(arg1
))) {
8283 return -TARGET_EFAULT
;
8285 ret
= get_errno(access(path(p
), arg2
));
8286 unlock_user(p
, arg1
, 0);
8289 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8290 case TARGET_NR_faccessat
:
8291 if (!(p
= lock_user_string(arg2
))) {
8292 return -TARGET_EFAULT
;
8294 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8295 unlock_user(p
, arg2
, 0);
8298 #ifdef TARGET_NR_nice /* not on alpha */
8299 case TARGET_NR_nice
:
8300 return get_errno(nice(arg1
));
8302 case TARGET_NR_sync
:
8305 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8306 case TARGET_NR_syncfs
:
8307 return get_errno(syncfs(arg1
));
8309 case TARGET_NR_kill
:
8310 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8311 #ifdef TARGET_NR_rename
8312 case TARGET_NR_rename
:
8315 p
= lock_user_string(arg1
);
8316 p2
= lock_user_string(arg2
);
8318 ret
= -TARGET_EFAULT
;
8320 ret
= get_errno(rename(p
, p2
));
8321 unlock_user(p2
, arg2
, 0);
8322 unlock_user(p
, arg1
, 0);
8326 #if defined(TARGET_NR_renameat)
8327 case TARGET_NR_renameat
:
8330 p
= lock_user_string(arg2
);
8331 p2
= lock_user_string(arg4
);
8333 ret
= -TARGET_EFAULT
;
8335 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8336 unlock_user(p2
, arg4
, 0);
8337 unlock_user(p
, arg2
, 0);
8341 #if defined(TARGET_NR_renameat2)
8342 case TARGET_NR_renameat2
:
8345 p
= lock_user_string(arg2
);
8346 p2
= lock_user_string(arg4
);
8348 ret
= -TARGET_EFAULT
;
8350 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8352 unlock_user(p2
, arg4
, 0);
8353 unlock_user(p
, arg2
, 0);
8357 #ifdef TARGET_NR_mkdir
8358 case TARGET_NR_mkdir
:
8359 if (!(p
= lock_user_string(arg1
)))
8360 return -TARGET_EFAULT
;
8361 ret
= get_errno(mkdir(p
, arg2
));
8362 unlock_user(p
, arg1
, 0);
8365 #if defined(TARGET_NR_mkdirat)
8366 case TARGET_NR_mkdirat
:
8367 if (!(p
= lock_user_string(arg2
)))
8368 return -TARGET_EFAULT
;
8369 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8370 unlock_user(p
, arg2
, 0);
8373 #ifdef TARGET_NR_rmdir
8374 case TARGET_NR_rmdir
:
8375 if (!(p
= lock_user_string(arg1
)))
8376 return -TARGET_EFAULT
;
8377 ret
= get_errno(rmdir(p
));
8378 unlock_user(p
, arg1
, 0);
8382 ret
= get_errno(dup(arg1
));
8384 fd_trans_dup(arg1
, ret
);
8387 #ifdef TARGET_NR_pipe
8388 case TARGET_NR_pipe
:
8389 return do_pipe(cpu_env
, arg1
, 0, 0);
8391 #ifdef TARGET_NR_pipe2
8392 case TARGET_NR_pipe2
:
8393 return do_pipe(cpu_env
, arg1
,
8394 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8396 case TARGET_NR_times
:
8398 struct target_tms
*tmsp
;
8400 ret
= get_errno(times(&tms
));
8402 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8404 return -TARGET_EFAULT
;
8405 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8406 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8407 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8408 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8411 ret
= host_to_target_clock_t(ret
);
8414 case TARGET_NR_acct
:
8416 ret
= get_errno(acct(NULL
));
8418 if (!(p
= lock_user_string(arg1
))) {
8419 return -TARGET_EFAULT
;
8421 ret
= get_errno(acct(path(p
)));
8422 unlock_user(p
, arg1
, 0);
8425 #ifdef TARGET_NR_umount2
8426 case TARGET_NR_umount2
:
8427 if (!(p
= lock_user_string(arg1
)))
8428 return -TARGET_EFAULT
;
8429 ret
= get_errno(umount2(p
, arg2
));
8430 unlock_user(p
, arg1
, 0);
8433 case TARGET_NR_ioctl
:
8434 return do_ioctl(arg1
, arg2
, arg3
);
8435 #ifdef TARGET_NR_fcntl
8436 case TARGET_NR_fcntl
:
8437 return do_fcntl(arg1
, arg2
, arg3
);
8439 case TARGET_NR_setpgid
:
8440 return get_errno(setpgid(arg1
, arg2
));
8441 case TARGET_NR_umask
:
8442 return get_errno(umask(arg1
));
8443 case TARGET_NR_chroot
:
8444 if (!(p
= lock_user_string(arg1
)))
8445 return -TARGET_EFAULT
;
8446 ret
= get_errno(chroot(p
));
8447 unlock_user(p
, arg1
, 0);
8449 #ifdef TARGET_NR_dup2
8450 case TARGET_NR_dup2
:
8451 ret
= get_errno(dup2(arg1
, arg2
));
8453 fd_trans_dup(arg1
, arg2
);
8457 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8458 case TARGET_NR_dup3
:
8462 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8465 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8466 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8468 fd_trans_dup(arg1
, arg2
);
8473 #ifdef TARGET_NR_getppid /* not on alpha */
8474 case TARGET_NR_getppid
:
8475 return get_errno(getppid());
8477 #ifdef TARGET_NR_getpgrp
8478 case TARGET_NR_getpgrp
:
8479 return get_errno(getpgrp());
8481 case TARGET_NR_setsid
:
8482 return get_errno(setsid());
8483 #ifdef TARGET_NR_sigaction
8484 case TARGET_NR_sigaction
:
8486 #if defined(TARGET_ALPHA)
8487 struct target_sigaction act
, oact
, *pact
= 0;
8488 struct target_old_sigaction
*old_act
;
8490 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8491 return -TARGET_EFAULT
;
8492 act
._sa_handler
= old_act
->_sa_handler
;
8493 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8494 act
.sa_flags
= old_act
->sa_flags
;
8495 act
.sa_restorer
= 0;
8496 unlock_user_struct(old_act
, arg2
, 0);
8499 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8500 if (!is_error(ret
) && arg3
) {
8501 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8502 return -TARGET_EFAULT
;
8503 old_act
->_sa_handler
= oact
._sa_handler
;
8504 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8505 old_act
->sa_flags
= oact
.sa_flags
;
8506 unlock_user_struct(old_act
, arg3
, 1);
8508 #elif defined(TARGET_MIPS)
8509 struct target_sigaction act
, oact
, *pact
, *old_act
;
8512 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8513 return -TARGET_EFAULT
;
8514 act
._sa_handler
= old_act
->_sa_handler
;
8515 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8516 act
.sa_flags
= old_act
->sa_flags
;
8517 unlock_user_struct(old_act
, arg2
, 0);
8523 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8525 if (!is_error(ret
) && arg3
) {
8526 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8527 return -TARGET_EFAULT
;
8528 old_act
->_sa_handler
= oact
._sa_handler
;
8529 old_act
->sa_flags
= oact
.sa_flags
;
8530 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8531 old_act
->sa_mask
.sig
[1] = 0;
8532 old_act
->sa_mask
.sig
[2] = 0;
8533 old_act
->sa_mask
.sig
[3] = 0;
8534 unlock_user_struct(old_act
, arg3
, 1);
8537 struct target_old_sigaction
*old_act
;
8538 struct target_sigaction act
, oact
, *pact
;
8540 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8541 return -TARGET_EFAULT
;
8542 act
._sa_handler
= old_act
->_sa_handler
;
8543 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8544 act
.sa_flags
= old_act
->sa_flags
;
8545 act
.sa_restorer
= old_act
->sa_restorer
;
8546 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8547 act
.ka_restorer
= 0;
8549 unlock_user_struct(old_act
, arg2
, 0);
8554 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8555 if (!is_error(ret
) && arg3
) {
8556 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8557 return -TARGET_EFAULT
;
8558 old_act
->_sa_handler
= oact
._sa_handler
;
8559 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8560 old_act
->sa_flags
= oact
.sa_flags
;
8561 old_act
->sa_restorer
= oact
.sa_restorer
;
8562 unlock_user_struct(old_act
, arg3
, 1);
8568 case TARGET_NR_rt_sigaction
:
8570 #if defined(TARGET_ALPHA)
8571 /* For Alpha and SPARC this is a 5 argument syscall, with
8572 * a 'restorer' parameter which must be copied into the
8573 * sa_restorer field of the sigaction struct.
8574 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8575 * and arg5 is the sigsetsize.
8576 * Alpha also has a separate rt_sigaction struct that it uses
8577 * here; SPARC uses the usual sigaction struct.
8579 struct target_rt_sigaction
*rt_act
;
8580 struct target_sigaction act
, oact
, *pact
= 0;
8582 if (arg4
!= sizeof(target_sigset_t
)) {
8583 return -TARGET_EINVAL
;
8586 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8587 return -TARGET_EFAULT
;
8588 act
._sa_handler
= rt_act
->_sa_handler
;
8589 act
.sa_mask
= rt_act
->sa_mask
;
8590 act
.sa_flags
= rt_act
->sa_flags
;
8591 act
.sa_restorer
= arg5
;
8592 unlock_user_struct(rt_act
, arg2
, 0);
8595 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8596 if (!is_error(ret
) && arg3
) {
8597 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8598 return -TARGET_EFAULT
;
8599 rt_act
->_sa_handler
= oact
._sa_handler
;
8600 rt_act
->sa_mask
= oact
.sa_mask
;
8601 rt_act
->sa_flags
= oact
.sa_flags
;
8602 unlock_user_struct(rt_act
, arg3
, 1);
8606 target_ulong restorer
= arg4
;
8607 target_ulong sigsetsize
= arg5
;
8609 target_ulong sigsetsize
= arg4
;
8611 struct target_sigaction
*act
;
8612 struct target_sigaction
*oact
;
8614 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8615 return -TARGET_EINVAL
;
8618 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8619 return -TARGET_EFAULT
;
8621 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8622 act
->ka_restorer
= restorer
;
8628 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8629 ret
= -TARGET_EFAULT
;
8630 goto rt_sigaction_fail
;
8634 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8637 unlock_user_struct(act
, arg2
, 0);
8639 unlock_user_struct(oact
, arg3
, 1);
8643 #ifdef TARGET_NR_sgetmask /* not on alpha */
8644 case TARGET_NR_sgetmask
:
8647 abi_ulong target_set
;
8648 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8650 host_to_target_old_sigset(&target_set
, &cur_set
);
8656 #ifdef TARGET_NR_ssetmask /* not on alpha */
8657 case TARGET_NR_ssetmask
:
8660 abi_ulong target_set
= arg1
;
8661 target_to_host_old_sigset(&set
, &target_set
);
8662 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8664 host_to_target_old_sigset(&target_set
, &oset
);
8670 #ifdef TARGET_NR_sigprocmask
8671 case TARGET_NR_sigprocmask
:
8673 #if defined(TARGET_ALPHA)
8674 sigset_t set
, oldset
;
8679 case TARGET_SIG_BLOCK
:
8682 case TARGET_SIG_UNBLOCK
:
8685 case TARGET_SIG_SETMASK
:
8689 return -TARGET_EINVAL
;
8692 target_to_host_old_sigset(&set
, &mask
);
8694 ret
= do_sigprocmask(how
, &set
, &oldset
);
8695 if (!is_error(ret
)) {
8696 host_to_target_old_sigset(&mask
, &oldset
);
8698 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8701 sigset_t set
, oldset
, *set_ptr
;
8706 case TARGET_SIG_BLOCK
:
8709 case TARGET_SIG_UNBLOCK
:
8712 case TARGET_SIG_SETMASK
:
8716 return -TARGET_EINVAL
;
8718 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8719 return -TARGET_EFAULT
;
8720 target_to_host_old_sigset(&set
, p
);
8721 unlock_user(p
, arg2
, 0);
8727 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8728 if (!is_error(ret
) && arg3
) {
8729 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8730 return -TARGET_EFAULT
;
8731 host_to_target_old_sigset(p
, &oldset
);
8732 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8738 case TARGET_NR_rt_sigprocmask
:
8741 sigset_t set
, oldset
, *set_ptr
;
8743 if (arg4
!= sizeof(target_sigset_t
)) {
8744 return -TARGET_EINVAL
;
8749 case TARGET_SIG_BLOCK
:
8752 case TARGET_SIG_UNBLOCK
:
8755 case TARGET_SIG_SETMASK
:
8759 return -TARGET_EINVAL
;
8761 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8762 return -TARGET_EFAULT
;
8763 target_to_host_sigset(&set
, p
);
8764 unlock_user(p
, arg2
, 0);
8770 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8771 if (!is_error(ret
) && arg3
) {
8772 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8773 return -TARGET_EFAULT
;
8774 host_to_target_sigset(p
, &oldset
);
8775 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8779 #ifdef TARGET_NR_sigpending
8780 case TARGET_NR_sigpending
:
8783 ret
= get_errno(sigpending(&set
));
8784 if (!is_error(ret
)) {
8785 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8786 return -TARGET_EFAULT
;
8787 host_to_target_old_sigset(p
, &set
);
8788 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8793 case TARGET_NR_rt_sigpending
:
8797 /* Yes, this check is >, not != like most. We follow the kernel's
8798 * logic and it does it like this because it implements
8799 * NR_sigpending through the same code path, and in that case
8800 * the old_sigset_t is smaller in size.
8802 if (arg2
> sizeof(target_sigset_t
)) {
8803 return -TARGET_EINVAL
;
8806 ret
= get_errno(sigpending(&set
));
8807 if (!is_error(ret
)) {
8808 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8809 return -TARGET_EFAULT
;
8810 host_to_target_sigset(p
, &set
);
8811 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8815 #ifdef TARGET_NR_sigsuspend
8816 case TARGET_NR_sigsuspend
:
8818 TaskState
*ts
= cpu
->opaque
;
8819 #if defined(TARGET_ALPHA)
8820 abi_ulong mask
= arg1
;
8821 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8823 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8824 return -TARGET_EFAULT
;
8825 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8826 unlock_user(p
, arg1
, 0);
8828 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8830 if (ret
!= -TARGET_ERESTARTSYS
) {
8831 ts
->in_sigsuspend
= 1;
8836 case TARGET_NR_rt_sigsuspend
:
8838 TaskState
*ts
= cpu
->opaque
;
8840 if (arg2
!= sizeof(target_sigset_t
)) {
8841 return -TARGET_EINVAL
;
8843 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8844 return -TARGET_EFAULT
;
8845 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8846 unlock_user(p
, arg1
, 0);
8847 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8849 if (ret
!= -TARGET_ERESTARTSYS
) {
8850 ts
->in_sigsuspend
= 1;
8854 #ifdef TARGET_NR_rt_sigtimedwait
8855 case TARGET_NR_rt_sigtimedwait
:
8858 struct timespec uts
, *puts
;
8861 if (arg4
!= sizeof(target_sigset_t
)) {
8862 return -TARGET_EINVAL
;
8865 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8866 return -TARGET_EFAULT
;
8867 target_to_host_sigset(&set
, p
);
8868 unlock_user(p
, arg1
, 0);
8871 target_to_host_timespec(puts
, arg3
);
8875 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8877 if (!is_error(ret
)) {
8879 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8882 return -TARGET_EFAULT
;
8884 host_to_target_siginfo(p
, &uinfo
);
8885 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8887 ret
= host_to_target_signal(ret
);
8892 case TARGET_NR_rt_sigqueueinfo
:
8896 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8898 return -TARGET_EFAULT
;
8900 target_to_host_siginfo(&uinfo
, p
);
8901 unlock_user(p
, arg3
, 0);
8902 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8905 case TARGET_NR_rt_tgsigqueueinfo
:
8909 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8911 return -TARGET_EFAULT
;
8913 target_to_host_siginfo(&uinfo
, p
);
8914 unlock_user(p
, arg4
, 0);
8915 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8918 #ifdef TARGET_NR_sigreturn
8919 case TARGET_NR_sigreturn
:
8920 if (block_signals()) {
8921 return -TARGET_ERESTARTSYS
;
8923 return do_sigreturn(cpu_env
);
8925 case TARGET_NR_rt_sigreturn
:
8926 if (block_signals()) {
8927 return -TARGET_ERESTARTSYS
;
8929 return do_rt_sigreturn(cpu_env
);
8930 case TARGET_NR_sethostname
:
8931 if (!(p
= lock_user_string(arg1
)))
8932 return -TARGET_EFAULT
;
8933 ret
= get_errno(sethostname(p
, arg2
));
8934 unlock_user(p
, arg1
, 0);
8936 #ifdef TARGET_NR_setrlimit
8937 case TARGET_NR_setrlimit
:
8939 int resource
= target_to_host_resource(arg1
);
8940 struct target_rlimit
*target_rlim
;
8942 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8943 return -TARGET_EFAULT
;
8944 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8945 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8946 unlock_user_struct(target_rlim
, arg2
, 0);
8948 * If we just passed through resource limit settings for memory then
8949 * they would also apply to QEMU's own allocations, and QEMU will
8950 * crash or hang or die if its allocations fail. Ideally we would
8951 * track the guest allocations in QEMU and apply the limits ourselves.
8952 * For now, just tell the guest the call succeeded but don't actually
8955 if (resource
!= RLIMIT_AS
&&
8956 resource
!= RLIMIT_DATA
&&
8957 resource
!= RLIMIT_STACK
) {
8958 return get_errno(setrlimit(resource
, &rlim
));
8964 #ifdef TARGET_NR_getrlimit
8965 case TARGET_NR_getrlimit
:
8967 int resource
= target_to_host_resource(arg1
);
8968 struct target_rlimit
*target_rlim
;
8971 ret
= get_errno(getrlimit(resource
, &rlim
));
8972 if (!is_error(ret
)) {
8973 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8974 return -TARGET_EFAULT
;
8975 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8976 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8977 unlock_user_struct(target_rlim
, arg2
, 1);
8982 case TARGET_NR_getrusage
:
8984 struct rusage rusage
;
8985 ret
= get_errno(getrusage(arg1
, &rusage
));
8986 if (!is_error(ret
)) {
8987 ret
= host_to_target_rusage(arg2
, &rusage
);
8991 #if defined(TARGET_NR_gettimeofday)
8992 case TARGET_NR_gettimeofday
:
8997 ret
= get_errno(gettimeofday(&tv
, &tz
));
8998 if (!is_error(ret
)) {
8999 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9000 return -TARGET_EFAULT
;
9002 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9003 return -TARGET_EFAULT
;
9009 #if defined(TARGET_NR_settimeofday)
9010 case TARGET_NR_settimeofday
:
9012 struct timeval tv
, *ptv
= NULL
;
9013 struct timezone tz
, *ptz
= NULL
;
9016 if (copy_from_user_timeval(&tv
, arg1
)) {
9017 return -TARGET_EFAULT
;
9023 if (copy_from_user_timezone(&tz
, arg2
)) {
9024 return -TARGET_EFAULT
;
9029 return get_errno(settimeofday(ptv
, ptz
));
9032 #if defined(TARGET_NR_select)
9033 case TARGET_NR_select
:
9034 #if defined(TARGET_WANT_NI_OLD_SELECT)
9035 /* some architectures used to have old_select here
9036 * but now ENOSYS it.
9038 ret
= -TARGET_ENOSYS
;
9039 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9040 ret
= do_old_select(arg1
);
9042 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9046 #ifdef TARGET_NR_pselect6
9047 case TARGET_NR_pselect6
:
9049 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
9050 fd_set rfds
, wfds
, efds
;
9051 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
9052 struct timespec ts
, *ts_ptr
;
9055 * The 6th arg is actually two args smashed together,
9056 * so we cannot use the C library.
9064 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
9065 target_sigset_t
*target_sigset
;
9073 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
9077 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
9081 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
9087 * This takes a timespec, and not a timeval, so we cannot
9088 * use the do_select() helper ...
9091 if (target_to_host_timespec(&ts
, ts_addr
)) {
9092 return -TARGET_EFAULT
;
9099 /* Extract the two packed args for the sigset */
9102 sig
.size
= SIGSET_T_SIZE
;
9104 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
9106 return -TARGET_EFAULT
;
9108 arg_sigset
= tswapal(arg7
[0]);
9109 arg_sigsize
= tswapal(arg7
[1]);
9110 unlock_user(arg7
, arg6
, 0);
9114 if (arg_sigsize
!= sizeof(*target_sigset
)) {
9115 /* Like the kernel, we enforce correct size sigsets */
9116 return -TARGET_EINVAL
;
9118 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
9119 sizeof(*target_sigset
), 1);
9120 if (!target_sigset
) {
9121 return -TARGET_EFAULT
;
9123 target_to_host_sigset(&set
, target_sigset
);
9124 unlock_user(target_sigset
, arg_sigset
, 0);
9132 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
9135 if (!is_error(ret
)) {
9136 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
9137 return -TARGET_EFAULT
;
9138 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
9139 return -TARGET_EFAULT
;
9140 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
9141 return -TARGET_EFAULT
;
9143 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
9144 return -TARGET_EFAULT
;
9149 #ifdef TARGET_NR_symlink
9150 case TARGET_NR_symlink
:
9153 p
= lock_user_string(arg1
);
9154 p2
= lock_user_string(arg2
);
9156 ret
= -TARGET_EFAULT
;
9158 ret
= get_errno(symlink(p
, p2
));
9159 unlock_user(p2
, arg2
, 0);
9160 unlock_user(p
, arg1
, 0);
9164 #if defined(TARGET_NR_symlinkat)
9165 case TARGET_NR_symlinkat
:
9168 p
= lock_user_string(arg1
);
9169 p2
= lock_user_string(arg3
);
9171 ret
= -TARGET_EFAULT
;
9173 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9174 unlock_user(p2
, arg3
, 0);
9175 unlock_user(p
, arg1
, 0);
9179 #ifdef TARGET_NR_readlink
9180 case TARGET_NR_readlink
:
9183 p
= lock_user_string(arg1
);
9184 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9186 ret
= -TARGET_EFAULT
;
9188 /* Short circuit this for the magic exe check. */
9189 ret
= -TARGET_EINVAL
;
9190 } else if (is_proc_myself((const char *)p
, "exe")) {
9191 char real
[PATH_MAX
], *temp
;
9192 temp
= realpath(exec_path
, real
);
9193 /* Return value is # of bytes that we wrote to the buffer. */
9195 ret
= get_errno(-1);
9197 /* Don't worry about sign mismatch as earlier mapping
9198 * logic would have thrown a bad address error. */
9199 ret
= MIN(strlen(real
), arg3
);
9200 /* We cannot NUL terminate the string. */
9201 memcpy(p2
, real
, ret
);
9204 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9206 unlock_user(p2
, arg2
, ret
);
9207 unlock_user(p
, arg1
, 0);
9211 #if defined(TARGET_NR_readlinkat)
9212 case TARGET_NR_readlinkat
:
9215 p
= lock_user_string(arg2
);
9216 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9218 ret
= -TARGET_EFAULT
;
9219 } else if (is_proc_myself((const char *)p
, "exe")) {
9220 char real
[PATH_MAX
], *temp
;
9221 temp
= realpath(exec_path
, real
);
9222 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9223 snprintf((char *)p2
, arg4
, "%s", real
);
9225 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9227 unlock_user(p2
, arg3
, ret
);
9228 unlock_user(p
, arg2
, 0);
9232 #ifdef TARGET_NR_swapon
9233 case TARGET_NR_swapon
:
9234 if (!(p
= lock_user_string(arg1
)))
9235 return -TARGET_EFAULT
;
9236 ret
= get_errno(swapon(p
, arg2
));
9237 unlock_user(p
, arg1
, 0);
9240 case TARGET_NR_reboot
:
9241 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9242 /* arg4 must be ignored in all other cases */
9243 p
= lock_user_string(arg4
);
9245 return -TARGET_EFAULT
;
9247 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9248 unlock_user(p
, arg4
, 0);
9250 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9253 #ifdef TARGET_NR_mmap
9254 case TARGET_NR_mmap
:
9255 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9256 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9257 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9258 || defined(TARGET_S390X)
9261 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9262 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9263 return -TARGET_EFAULT
;
9270 unlock_user(v
, arg1
, 0);
9271 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9272 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9276 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9277 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9283 #ifdef TARGET_NR_mmap2
9284 case TARGET_NR_mmap2
:
9286 #define MMAP_SHIFT 12
9288 ret
= target_mmap(arg1
, arg2
, arg3
,
9289 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9290 arg5
, arg6
<< MMAP_SHIFT
);
9291 return get_errno(ret
);
9293 case TARGET_NR_munmap
:
9294 return get_errno(target_munmap(arg1
, arg2
));
9295 case TARGET_NR_mprotect
:
9297 TaskState
*ts
= cpu
->opaque
;
9298 /* Special hack to detect libc making the stack executable. */
9299 if ((arg3
& PROT_GROWSDOWN
)
9300 && arg1
>= ts
->info
->stack_limit
9301 && arg1
<= ts
->info
->start_stack
) {
9302 arg3
&= ~PROT_GROWSDOWN
;
9303 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9304 arg1
= ts
->info
->stack_limit
;
9307 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9308 #ifdef TARGET_NR_mremap
9309 case TARGET_NR_mremap
:
9310 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9312 /* ??? msync/mlock/munlock are broken for softmmu. */
9313 #ifdef TARGET_NR_msync
9314 case TARGET_NR_msync
:
9315 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
9317 #ifdef TARGET_NR_mlock
9318 case TARGET_NR_mlock
:
9319 return get_errno(mlock(g2h(arg1
), arg2
));
9321 #ifdef TARGET_NR_munlock
9322 case TARGET_NR_munlock
:
9323 return get_errno(munlock(g2h(arg1
), arg2
));
9325 #ifdef TARGET_NR_mlockall
9326 case TARGET_NR_mlockall
:
9327 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9329 #ifdef TARGET_NR_munlockall
9330 case TARGET_NR_munlockall
:
9331 return get_errno(munlockall());
9333 #ifdef TARGET_NR_truncate
9334 case TARGET_NR_truncate
:
9335 if (!(p
= lock_user_string(arg1
)))
9336 return -TARGET_EFAULT
;
9337 ret
= get_errno(truncate(p
, arg2
));
9338 unlock_user(p
, arg1
, 0);
9341 #ifdef TARGET_NR_ftruncate
9342 case TARGET_NR_ftruncate
:
9343 return get_errno(ftruncate(arg1
, arg2
));
9345 case TARGET_NR_fchmod
:
9346 return get_errno(fchmod(arg1
, arg2
));
9347 #if defined(TARGET_NR_fchmodat)
9348 case TARGET_NR_fchmodat
:
9349 if (!(p
= lock_user_string(arg2
)))
9350 return -TARGET_EFAULT
;
9351 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9352 unlock_user(p
, arg2
, 0);
9355 case TARGET_NR_getpriority
:
9356 /* Note that negative values are valid for getpriority, so we must
9357 differentiate based on errno settings. */
9359 ret
= getpriority(arg1
, arg2
);
9360 if (ret
== -1 && errno
!= 0) {
9361 return -host_to_target_errno(errno
);
9364 /* Return value is the unbiased priority. Signal no error. */
9365 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9367 /* Return value is a biased priority to avoid negative numbers. */
9371 case TARGET_NR_setpriority
:
9372 return get_errno(setpriority(arg1
, arg2
, arg3
));
9373 #ifdef TARGET_NR_statfs
9374 case TARGET_NR_statfs
:
9375 if (!(p
= lock_user_string(arg1
))) {
9376 return -TARGET_EFAULT
;
9378 ret
= get_errno(statfs(path(p
), &stfs
));
9379 unlock_user(p
, arg1
, 0);
9381 if (!is_error(ret
)) {
9382 struct target_statfs
*target_stfs
;
9384 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9385 return -TARGET_EFAULT
;
9386 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9387 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9388 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9389 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9390 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9391 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9392 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9393 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9394 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9395 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9396 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9397 #ifdef _STATFS_F_FLAGS
9398 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9400 __put_user(0, &target_stfs
->f_flags
);
9402 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9403 unlock_user_struct(target_stfs
, arg2
, 1);
9407 #ifdef TARGET_NR_fstatfs
9408 case TARGET_NR_fstatfs
:
9409 ret
= get_errno(fstatfs(arg1
, &stfs
));
9410 goto convert_statfs
;
9412 #ifdef TARGET_NR_statfs64
9413 case TARGET_NR_statfs64
:
9414 if (!(p
= lock_user_string(arg1
))) {
9415 return -TARGET_EFAULT
;
9417 ret
= get_errno(statfs(path(p
), &stfs
));
9418 unlock_user(p
, arg1
, 0);
9420 if (!is_error(ret
)) {
9421 struct target_statfs64
*target_stfs
;
9423 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9424 return -TARGET_EFAULT
;
9425 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9426 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9427 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9428 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9429 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9430 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9431 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9432 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9433 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9434 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9435 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9436 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9437 unlock_user_struct(target_stfs
, arg3
, 1);
9440 case TARGET_NR_fstatfs64
:
9441 ret
= get_errno(fstatfs(arg1
, &stfs
));
9442 goto convert_statfs64
;
9444 #ifdef TARGET_NR_socketcall
9445 case TARGET_NR_socketcall
:
9446 return do_socketcall(arg1
, arg2
);
9448 #ifdef TARGET_NR_accept
9449 case TARGET_NR_accept
:
9450 return do_accept4(arg1
, arg2
, arg3
, 0);
9452 #ifdef TARGET_NR_accept4
9453 case TARGET_NR_accept4
:
9454 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9456 #ifdef TARGET_NR_bind
9457 case TARGET_NR_bind
:
9458 return do_bind(arg1
, arg2
, arg3
);
9460 #ifdef TARGET_NR_connect
9461 case TARGET_NR_connect
:
9462 return do_connect(arg1
, arg2
, arg3
);
9464 #ifdef TARGET_NR_getpeername
9465 case TARGET_NR_getpeername
:
9466 return do_getpeername(arg1
, arg2
, arg3
);
9468 #ifdef TARGET_NR_getsockname
9469 case TARGET_NR_getsockname
:
9470 return do_getsockname(arg1
, arg2
, arg3
);
9472 #ifdef TARGET_NR_getsockopt
9473 case TARGET_NR_getsockopt
:
9474 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9476 #ifdef TARGET_NR_listen
9477 case TARGET_NR_listen
:
9478 return get_errno(listen(arg1
, arg2
));
9480 #ifdef TARGET_NR_recv
9481 case TARGET_NR_recv
:
9482 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9484 #ifdef TARGET_NR_recvfrom
9485 case TARGET_NR_recvfrom
:
9486 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9488 #ifdef TARGET_NR_recvmsg
9489 case TARGET_NR_recvmsg
:
9490 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9492 #ifdef TARGET_NR_send
9493 case TARGET_NR_send
:
9494 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9496 #ifdef TARGET_NR_sendmsg
9497 case TARGET_NR_sendmsg
:
9498 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9500 #ifdef TARGET_NR_sendmmsg
9501 case TARGET_NR_sendmmsg
:
9502 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9504 #ifdef TARGET_NR_recvmmsg
9505 case TARGET_NR_recvmmsg
:
9506 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9508 #ifdef TARGET_NR_sendto
9509 case TARGET_NR_sendto
:
9510 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9512 #ifdef TARGET_NR_shutdown
9513 case TARGET_NR_shutdown
:
9514 return get_errno(shutdown(arg1
, arg2
));
9516 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9517 case TARGET_NR_getrandom
:
9518 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9520 return -TARGET_EFAULT
;
9522 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9523 unlock_user(p
, arg1
, ret
);
9526 #ifdef TARGET_NR_socket
9527 case TARGET_NR_socket
:
9528 return do_socket(arg1
, arg2
, arg3
);
9530 #ifdef TARGET_NR_socketpair
9531 case TARGET_NR_socketpair
:
9532 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9534 #ifdef TARGET_NR_setsockopt
9535 case TARGET_NR_setsockopt
:
9536 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9538 #if defined(TARGET_NR_syslog)
9539 case TARGET_NR_syslog
:
9544 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9545 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9546 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9547 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9548 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9549 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9550 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9551 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9552 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9553 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9554 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9555 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9558 return -TARGET_EINVAL
;
9563 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9565 return -TARGET_EFAULT
;
9567 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9568 unlock_user(p
, arg2
, arg3
);
9572 return -TARGET_EINVAL
;
9577 case TARGET_NR_setitimer
:
9579 struct itimerval value
, ovalue
, *pvalue
;
9583 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9584 || copy_from_user_timeval(&pvalue
->it_value
,
9585 arg2
+ sizeof(struct target_timeval
)))
9586 return -TARGET_EFAULT
;
9590 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9591 if (!is_error(ret
) && arg3
) {
9592 if (copy_to_user_timeval(arg3
,
9593 &ovalue
.it_interval
)
9594 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9596 return -TARGET_EFAULT
;
9600 case TARGET_NR_getitimer
:
9602 struct itimerval value
;
9604 ret
= get_errno(getitimer(arg1
, &value
));
9605 if (!is_error(ret
) && arg2
) {
9606 if (copy_to_user_timeval(arg2
,
9608 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9610 return -TARGET_EFAULT
;
9614 #ifdef TARGET_NR_stat
9615 case TARGET_NR_stat
:
9616 if (!(p
= lock_user_string(arg1
))) {
9617 return -TARGET_EFAULT
;
9619 ret
= get_errno(stat(path(p
), &st
));
9620 unlock_user(p
, arg1
, 0);
9623 #ifdef TARGET_NR_lstat
9624 case TARGET_NR_lstat
:
9625 if (!(p
= lock_user_string(arg1
))) {
9626 return -TARGET_EFAULT
;
9628 ret
= get_errno(lstat(path(p
), &st
));
9629 unlock_user(p
, arg1
, 0);
9632 #ifdef TARGET_NR_fstat
9633 case TARGET_NR_fstat
:
9635 ret
= get_errno(fstat(arg1
, &st
));
9636 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9639 if (!is_error(ret
)) {
9640 struct target_stat
*target_st
;
9642 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9643 return -TARGET_EFAULT
;
9644 memset(target_st
, 0, sizeof(*target_st
));
9645 __put_user(st
.st_dev
, &target_st
->st_dev
);
9646 __put_user(st
.st_ino
, &target_st
->st_ino
);
9647 __put_user(st
.st_mode
, &target_st
->st_mode
);
9648 __put_user(st
.st_uid
, &target_st
->st_uid
);
9649 __put_user(st
.st_gid
, &target_st
->st_gid
);
9650 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9651 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9652 __put_user(st
.st_size
, &target_st
->st_size
);
9653 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9654 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9655 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9656 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9657 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9658 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9659 defined(TARGET_STAT_HAVE_NSEC)
9660 __put_user(st
.st_atim
.tv_nsec
,
9661 &target_st
->target_st_atime_nsec
);
9662 __put_user(st
.st_mtim
.tv_nsec
,
9663 &target_st
->target_st_mtime_nsec
);
9664 __put_user(st
.st_ctim
.tv_nsec
,
9665 &target_st
->target_st_ctime_nsec
);
9667 unlock_user_struct(target_st
, arg2
, 1);
9672 case TARGET_NR_vhangup
:
9673 return get_errno(vhangup());
9674 #ifdef TARGET_NR_syscall
9675 case TARGET_NR_syscall
:
9676 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9677 arg6
, arg7
, arg8
, 0);
9679 #if defined(TARGET_NR_wait4)
9680 case TARGET_NR_wait4
:
9683 abi_long status_ptr
= arg2
;
9684 struct rusage rusage
, *rusage_ptr
;
9685 abi_ulong target_rusage
= arg4
;
9686 abi_long rusage_err
;
9688 rusage_ptr
= &rusage
;
9691 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9692 if (!is_error(ret
)) {
9693 if (status_ptr
&& ret
) {
9694 status
= host_to_target_waitstatus(status
);
9695 if (put_user_s32(status
, status_ptr
))
9696 return -TARGET_EFAULT
;
9698 if (target_rusage
) {
9699 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9708 #ifdef TARGET_NR_swapoff
9709 case TARGET_NR_swapoff
:
9710 if (!(p
= lock_user_string(arg1
)))
9711 return -TARGET_EFAULT
;
9712 ret
= get_errno(swapoff(p
));
9713 unlock_user(p
, arg1
, 0);
9716 case TARGET_NR_sysinfo
:
9718 struct target_sysinfo
*target_value
;
9719 struct sysinfo value
;
9720 ret
= get_errno(sysinfo(&value
));
9721 if (!is_error(ret
) && arg1
)
9723 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9724 return -TARGET_EFAULT
;
9725 __put_user(value
.uptime
, &target_value
->uptime
);
9726 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9727 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9728 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9729 __put_user(value
.totalram
, &target_value
->totalram
);
9730 __put_user(value
.freeram
, &target_value
->freeram
);
9731 __put_user(value
.sharedram
, &target_value
->sharedram
);
9732 __put_user(value
.bufferram
, &target_value
->bufferram
);
9733 __put_user(value
.totalswap
, &target_value
->totalswap
);
9734 __put_user(value
.freeswap
, &target_value
->freeswap
);
9735 __put_user(value
.procs
, &target_value
->procs
);
9736 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9737 __put_user(value
.freehigh
, &target_value
->freehigh
);
9738 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9739 unlock_user_struct(target_value
, arg1
, 1);
9743 #ifdef TARGET_NR_ipc
9745 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9747 #ifdef TARGET_NR_semget
9748 case TARGET_NR_semget
:
9749 return get_errno(semget(arg1
, arg2
, arg3
));
9751 #ifdef TARGET_NR_semop
9752 case TARGET_NR_semop
:
9753 return do_semtimedop(arg1
, arg2
, arg3
, 0);
9755 #ifdef TARGET_NR_semtimedop
9756 case TARGET_NR_semtimedop
:
9757 return do_semtimedop(arg1
, arg2
, arg3
, arg4
);
9759 #ifdef TARGET_NR_semctl
9760 case TARGET_NR_semctl
:
9761 return do_semctl(arg1
, arg2
, arg3
, arg4
);
9763 #ifdef TARGET_NR_msgctl
9764 case TARGET_NR_msgctl
:
9765 return do_msgctl(arg1
, arg2
, arg3
);
9767 #ifdef TARGET_NR_msgget
9768 case TARGET_NR_msgget
:
9769 return get_errno(msgget(arg1
, arg2
));
9771 #ifdef TARGET_NR_msgrcv
9772 case TARGET_NR_msgrcv
:
9773 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9775 #ifdef TARGET_NR_msgsnd
9776 case TARGET_NR_msgsnd
:
9777 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9779 #ifdef TARGET_NR_shmget
9780 case TARGET_NR_shmget
:
9781 return get_errno(shmget(arg1
, arg2
, arg3
));
9783 #ifdef TARGET_NR_shmctl
9784 case TARGET_NR_shmctl
:
9785 return do_shmctl(arg1
, arg2
, arg3
);
9787 #ifdef TARGET_NR_shmat
9788 case TARGET_NR_shmat
:
9789 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9791 #ifdef TARGET_NR_shmdt
9792 case TARGET_NR_shmdt
:
9793 return do_shmdt(arg1
);
9795 case TARGET_NR_fsync
:
9796 return get_errno(fsync(arg1
));
9797 case TARGET_NR_clone
:
9798 /* Linux manages to have three different orderings for its
9799 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9800 * match the kernel's CONFIG_CLONE_* settings.
9801 * Microblaze is further special in that it uses a sixth
9802 * implicit argument to clone for the TLS pointer.
9804 #if defined(TARGET_MICROBLAZE)
9805 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9806 #elif defined(TARGET_CLONE_BACKWARDS)
9807 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9808 #elif defined(TARGET_CLONE_BACKWARDS2)
9809 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9811 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9814 #ifdef __NR_exit_group
9815 /* new thread calls */
9816 case TARGET_NR_exit_group
:
9817 preexit_cleanup(cpu_env
, arg1
);
9818 return get_errno(exit_group(arg1
));
9820 case TARGET_NR_setdomainname
:
9821 if (!(p
= lock_user_string(arg1
)))
9822 return -TARGET_EFAULT
;
9823 ret
= get_errno(setdomainname(p
, arg2
));
9824 unlock_user(p
, arg1
, 0);
9826 case TARGET_NR_uname
:
9827 /* no need to transcode because we use the linux syscall */
9829 struct new_utsname
* buf
;
9831 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9832 return -TARGET_EFAULT
;
9833 ret
= get_errno(sys_uname(buf
));
9834 if (!is_error(ret
)) {
9835 /* Overwrite the native machine name with whatever is being
9837 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
9838 sizeof(buf
->machine
));
9839 /* Allow the user to override the reported release. */
9840 if (qemu_uname_release
&& *qemu_uname_release
) {
9841 g_strlcpy(buf
->release
, qemu_uname_release
,
9842 sizeof(buf
->release
));
9845 unlock_user_struct(buf
, arg1
, 1);
9849 case TARGET_NR_modify_ldt
:
9850 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9851 #if !defined(TARGET_X86_64)
9852 case TARGET_NR_vm86
:
9853 return do_vm86(cpu_env
, arg1
, arg2
);
9856 #if defined(TARGET_NR_adjtimex)
9857 case TARGET_NR_adjtimex
:
9859 struct timex host_buf
;
9861 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9862 return -TARGET_EFAULT
;
9864 ret
= get_errno(adjtimex(&host_buf
));
9865 if (!is_error(ret
)) {
9866 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9867 return -TARGET_EFAULT
;
9873 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9874 case TARGET_NR_clock_adjtime
:
9876 struct timex htx
, *phtx
= &htx
;
9878 if (target_to_host_timex(phtx
, arg2
) != 0) {
9879 return -TARGET_EFAULT
;
9881 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9882 if (!is_error(ret
) && phtx
) {
9883 if (host_to_target_timex(arg2
, phtx
) != 0) {
9884 return -TARGET_EFAULT
;
9890 case TARGET_NR_getpgid
:
9891 return get_errno(getpgid(arg1
));
9892 case TARGET_NR_fchdir
:
9893 return get_errno(fchdir(arg1
));
9894 case TARGET_NR_personality
:
9895 return get_errno(personality(arg1
));
9896 #ifdef TARGET_NR__llseek /* Not on alpha */
9897 case TARGET_NR__llseek
:
9900 #if !defined(__NR_llseek)
9901 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9903 ret
= get_errno(res
);
9908 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9910 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9911 return -TARGET_EFAULT
;
9916 #ifdef TARGET_NR_getdents
9917 case TARGET_NR_getdents
:
9918 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9919 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9921 struct target_dirent
*target_dirp
;
9922 struct linux_dirent
*dirp
;
9923 abi_long count
= arg3
;
9925 dirp
= g_try_malloc(count
);
9927 return -TARGET_ENOMEM
;
9930 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9931 if (!is_error(ret
)) {
9932 struct linux_dirent
*de
;
9933 struct target_dirent
*tde
;
9935 int reclen
, treclen
;
9936 int count1
, tnamelen
;
9940 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9941 return -TARGET_EFAULT
;
9944 reclen
= de
->d_reclen
;
9945 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9946 assert(tnamelen
>= 0);
9947 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9948 assert(count1
+ treclen
<= count
);
9949 tde
->d_reclen
= tswap16(treclen
);
9950 tde
->d_ino
= tswapal(de
->d_ino
);
9951 tde
->d_off
= tswapal(de
->d_off
);
9952 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9953 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9955 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9959 unlock_user(target_dirp
, arg2
, ret
);
9965 struct linux_dirent
*dirp
;
9966 abi_long count
= arg3
;
9968 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9969 return -TARGET_EFAULT
;
9970 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9971 if (!is_error(ret
)) {
9972 struct linux_dirent
*de
;
9977 reclen
= de
->d_reclen
;
9980 de
->d_reclen
= tswap16(reclen
);
9981 tswapls(&de
->d_ino
);
9982 tswapls(&de
->d_off
);
9983 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9987 unlock_user(dirp
, arg2
, ret
);
9991 /* Implement getdents in terms of getdents64 */
9993 struct linux_dirent64
*dirp
;
9994 abi_long count
= arg3
;
9996 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9998 return -TARGET_EFAULT
;
10000 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10001 if (!is_error(ret
)) {
10002 /* Convert the dirent64 structs to target dirent. We do this
10003 * in-place, since we can guarantee that a target_dirent is no
10004 * larger than a dirent64; however this means we have to be
10005 * careful to read everything before writing in the new format.
10007 struct linux_dirent64
*de
;
10008 struct target_dirent
*tde
;
10013 tde
= (struct target_dirent
*)dirp
;
10015 int namelen
, treclen
;
10016 int reclen
= de
->d_reclen
;
10017 uint64_t ino
= de
->d_ino
;
10018 int64_t off
= de
->d_off
;
10019 uint8_t type
= de
->d_type
;
10021 namelen
= strlen(de
->d_name
);
10022 treclen
= offsetof(struct target_dirent
, d_name
)
10024 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10026 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10027 tde
->d_ino
= tswapal(ino
);
10028 tde
->d_off
= tswapal(off
);
10029 tde
->d_reclen
= tswap16(treclen
);
10030 /* The target_dirent type is in what was formerly a padding
10031 * byte at the end of the structure:
10033 *(((char *)tde
) + treclen
- 1) = type
;
10035 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10036 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10042 unlock_user(dirp
, arg2
, ret
);
10046 #endif /* TARGET_NR_getdents */
10047 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10048 case TARGET_NR_getdents64
:
10050 struct linux_dirent64
*dirp
;
10051 abi_long count
= arg3
;
10052 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10053 return -TARGET_EFAULT
;
10054 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10055 if (!is_error(ret
)) {
10056 struct linux_dirent64
*de
;
10061 reclen
= de
->d_reclen
;
10064 de
->d_reclen
= tswap16(reclen
);
10065 tswap64s((uint64_t *)&de
->d_ino
);
10066 tswap64s((uint64_t *)&de
->d_off
);
10067 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10071 unlock_user(dirp
, arg2
, ret
);
10074 #endif /* TARGET_NR_getdents64 */
10075 #if defined(TARGET_NR__newselect)
10076 case TARGET_NR__newselect
:
10077 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10079 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10080 # ifdef TARGET_NR_poll
10081 case TARGET_NR_poll
:
10083 # ifdef TARGET_NR_ppoll
10084 case TARGET_NR_ppoll
:
10087 struct target_pollfd
*target_pfd
;
10088 unsigned int nfds
= arg2
;
10089 struct pollfd
*pfd
;
10095 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
10096 return -TARGET_EINVAL
;
10099 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
10100 sizeof(struct target_pollfd
) * nfds
, 1);
10102 return -TARGET_EFAULT
;
10105 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
10106 for (i
= 0; i
< nfds
; i
++) {
10107 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
10108 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
10113 # ifdef TARGET_NR_ppoll
10114 case TARGET_NR_ppoll
:
10116 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
10117 target_sigset_t
*target_set
;
10118 sigset_t _set
, *set
= &_set
;
10121 if (target_to_host_timespec(timeout_ts
, arg3
)) {
10122 unlock_user(target_pfd
, arg1
, 0);
10123 return -TARGET_EFAULT
;
10130 if (arg5
!= sizeof(target_sigset_t
)) {
10131 unlock_user(target_pfd
, arg1
, 0);
10132 return -TARGET_EINVAL
;
10135 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10137 unlock_user(target_pfd
, arg1
, 0);
10138 return -TARGET_EFAULT
;
10140 target_to_host_sigset(set
, target_set
);
10145 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10146 set
, SIGSET_T_SIZE
));
10148 if (!is_error(ret
) && arg3
) {
10149 host_to_target_timespec(arg3
, timeout_ts
);
10152 unlock_user(target_set
, arg4
, 0);
10157 # ifdef TARGET_NR_poll
10158 case TARGET_NR_poll
:
10160 struct timespec ts
, *pts
;
10163 /* Convert ms to secs, ns */
10164 ts
.tv_sec
= arg3
/ 1000;
10165 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10168 /* -ve poll() timeout means "infinite" */
10171 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10176 g_assert_not_reached();
10179 if (!is_error(ret
)) {
10180 for(i
= 0; i
< nfds
; i
++) {
10181 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10184 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10188 case TARGET_NR_flock
:
10189 /* NOTE: the flock constant seems to be the same for every
10191 return get_errno(safe_flock(arg1
, arg2
));
10192 case TARGET_NR_readv
:
10194 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10196 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10197 unlock_iovec(vec
, arg2
, arg3
, 1);
10199 ret
= -host_to_target_errno(errno
);
10203 case TARGET_NR_writev
:
10205 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10207 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10208 unlock_iovec(vec
, arg2
, arg3
, 0);
10210 ret
= -host_to_target_errno(errno
);
10214 #if defined(TARGET_NR_preadv)
10215 case TARGET_NR_preadv
:
10217 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10219 unsigned long low
, high
;
10221 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10222 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10223 unlock_iovec(vec
, arg2
, arg3
, 1);
10225 ret
= -host_to_target_errno(errno
);
10230 #if defined(TARGET_NR_pwritev)
10231 case TARGET_NR_pwritev
:
10233 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10235 unsigned long low
, high
;
10237 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10238 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10239 unlock_iovec(vec
, arg2
, arg3
, 0);
10241 ret
= -host_to_target_errno(errno
);
10246 case TARGET_NR_getsid
:
10247 return get_errno(getsid(arg1
));
10248 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10249 case TARGET_NR_fdatasync
:
10250 return get_errno(fdatasync(arg1
));
10252 #ifdef TARGET_NR__sysctl
10253 case TARGET_NR__sysctl
:
10254 /* We don't implement this, but ENOTDIR is always a safe
10256 return -TARGET_ENOTDIR
;
10258 case TARGET_NR_sched_getaffinity
:
10260 unsigned int mask_size
;
10261 unsigned long *mask
;
10264 * sched_getaffinity needs multiples of ulong, so need to take
10265 * care of mismatches between target ulong and host ulong sizes.
10267 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10268 return -TARGET_EINVAL
;
10270 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10272 mask
= alloca(mask_size
);
10273 memset(mask
, 0, mask_size
);
10274 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10276 if (!is_error(ret
)) {
10278 /* More data returned than the caller's buffer will fit.
10279 * This only happens if sizeof(abi_long) < sizeof(long)
10280 * and the caller passed us a buffer holding an odd number
10281 * of abi_longs. If the host kernel is actually using the
10282 * extra 4 bytes then fail EINVAL; otherwise we can just
10283 * ignore them and only copy the interesting part.
10285 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10286 if (numcpus
> arg2
* 8) {
10287 return -TARGET_EINVAL
;
10292 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10293 return -TARGET_EFAULT
;
10298 case TARGET_NR_sched_setaffinity
:
10300 unsigned int mask_size
;
10301 unsigned long *mask
;
10304 * sched_setaffinity needs multiples of ulong, so need to take
10305 * care of mismatches between target ulong and host ulong sizes.
10307 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10308 return -TARGET_EINVAL
;
10310 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10311 mask
= alloca(mask_size
);
10313 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10318 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10320 case TARGET_NR_getcpu
:
10322 unsigned cpu
, node
;
10323 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10324 arg2
? &node
: NULL
,
10326 if (is_error(ret
)) {
10329 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10330 return -TARGET_EFAULT
;
10332 if (arg2
&& put_user_u32(node
, arg2
)) {
10333 return -TARGET_EFAULT
;
10337 case TARGET_NR_sched_setparam
:
10339 struct sched_param
*target_schp
;
10340 struct sched_param schp
;
10343 return -TARGET_EINVAL
;
10345 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10346 return -TARGET_EFAULT
;
10347 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10348 unlock_user_struct(target_schp
, arg2
, 0);
10349 return get_errno(sched_setparam(arg1
, &schp
));
10351 case TARGET_NR_sched_getparam
:
10353 struct sched_param
*target_schp
;
10354 struct sched_param schp
;
10357 return -TARGET_EINVAL
;
10359 ret
= get_errno(sched_getparam(arg1
, &schp
));
10360 if (!is_error(ret
)) {
10361 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10362 return -TARGET_EFAULT
;
10363 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10364 unlock_user_struct(target_schp
, arg2
, 1);
10368 case TARGET_NR_sched_setscheduler
:
10370 struct sched_param
*target_schp
;
10371 struct sched_param schp
;
10373 return -TARGET_EINVAL
;
10375 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10376 return -TARGET_EFAULT
;
10377 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10378 unlock_user_struct(target_schp
, arg3
, 0);
10379 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10381 case TARGET_NR_sched_getscheduler
:
10382 return get_errno(sched_getscheduler(arg1
));
10383 case TARGET_NR_sched_yield
:
10384 return get_errno(sched_yield());
10385 case TARGET_NR_sched_get_priority_max
:
10386 return get_errno(sched_get_priority_max(arg1
));
10387 case TARGET_NR_sched_get_priority_min
:
10388 return get_errno(sched_get_priority_min(arg1
));
10389 #ifdef TARGET_NR_sched_rr_get_interval
10390 case TARGET_NR_sched_rr_get_interval
:
10392 struct timespec ts
;
10393 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10394 if (!is_error(ret
)) {
10395 ret
= host_to_target_timespec(arg2
, &ts
);
10400 #if defined(TARGET_NR_nanosleep)
10401 case TARGET_NR_nanosleep
:
10403 struct timespec req
, rem
;
10404 target_to_host_timespec(&req
, arg1
);
10405 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10406 if (is_error(ret
) && arg2
) {
10407 host_to_target_timespec(arg2
, &rem
);
10412 case TARGET_NR_prctl
:
10414 case PR_GET_PDEATHSIG
:
10417 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10418 if (!is_error(ret
) && arg2
10419 && put_user_ual(deathsig
, arg2
)) {
10420 return -TARGET_EFAULT
;
10427 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10429 return -TARGET_EFAULT
;
10431 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10432 arg3
, arg4
, arg5
));
10433 unlock_user(name
, arg2
, 16);
10438 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10440 return -TARGET_EFAULT
;
10442 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10443 arg3
, arg4
, arg5
));
10444 unlock_user(name
, arg2
, 0);
10449 case TARGET_PR_GET_FP_MODE
:
10451 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10453 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10454 ret
|= TARGET_PR_FP_MODE_FR
;
10456 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10457 ret
|= TARGET_PR_FP_MODE_FRE
;
10461 case TARGET_PR_SET_FP_MODE
:
10463 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10464 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10465 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10466 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10467 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10469 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10470 TARGET_PR_FP_MODE_FRE
;
10472 /* If nothing to change, return right away, successfully. */
10473 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10476 /* Check the value is valid */
10477 if (arg2
& ~known_bits
) {
10478 return -TARGET_EOPNOTSUPP
;
10480 /* Setting FRE without FR is not supported. */
10481 if (new_fre
&& !new_fr
) {
10482 return -TARGET_EOPNOTSUPP
;
10484 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10485 /* FR1 is not supported */
10486 return -TARGET_EOPNOTSUPP
;
10488 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10489 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10490 /* cannot set FR=0 */
10491 return -TARGET_EOPNOTSUPP
;
10493 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10494 /* Cannot set FRE=1 */
10495 return -TARGET_EOPNOTSUPP
;
10499 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10500 for (i
= 0; i
< 32 ; i
+= 2) {
10501 if (!old_fr
&& new_fr
) {
10502 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10503 } else if (old_fr
&& !new_fr
) {
10504 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10509 env
->CP0_Status
|= (1 << CP0St_FR
);
10510 env
->hflags
|= MIPS_HFLAG_F64
;
10512 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10513 env
->hflags
&= ~MIPS_HFLAG_F64
;
10516 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10517 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10518 env
->hflags
|= MIPS_HFLAG_FRE
;
10521 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10522 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10528 #ifdef TARGET_AARCH64
10529 case TARGET_PR_SVE_SET_VL
:
10531 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10532 * PR_SVE_VL_INHERIT. Note the kernel definition
10533 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10534 * even though the current architectural maximum is VQ=16.
10536 ret
= -TARGET_EINVAL
;
10537 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10538 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10539 CPUARMState
*env
= cpu_env
;
10540 ARMCPU
*cpu
= env_archcpu(env
);
10541 uint32_t vq
, old_vq
;
10543 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10544 vq
= MAX(arg2
/ 16, 1);
10545 vq
= MIN(vq
, cpu
->sve_max_vq
);
10548 aarch64_sve_narrow_vq(env
, vq
);
10550 env
->vfp
.zcr_el
[1] = vq
- 1;
10551 arm_rebuild_hflags(env
);
10555 case TARGET_PR_SVE_GET_VL
:
10556 ret
= -TARGET_EINVAL
;
10558 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10559 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10560 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10564 case TARGET_PR_PAC_RESET_KEYS
:
10566 CPUARMState
*env
= cpu_env
;
10567 ARMCPU
*cpu
= env_archcpu(env
);
10569 if (arg3
|| arg4
|| arg5
) {
10570 return -TARGET_EINVAL
;
10572 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10573 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10574 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10575 TARGET_PR_PAC_APGAKEY
);
10581 } else if (arg2
& ~all
) {
10582 return -TARGET_EINVAL
;
10584 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10585 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10586 sizeof(ARMPACKey
), &err
);
10588 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10589 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10590 sizeof(ARMPACKey
), &err
);
10592 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10593 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10594 sizeof(ARMPACKey
), &err
);
10596 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10597 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10598 sizeof(ARMPACKey
), &err
);
10600 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10601 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10602 sizeof(ARMPACKey
), &err
);
10606 * Some unknown failure in the crypto. The best
10607 * we can do is log it and fail the syscall.
10608 * The real syscall cannot fail this way.
10610 qemu_log_mask(LOG_UNIMP
,
10611 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10612 error_get_pretty(err
));
10614 return -TARGET_EIO
;
10619 return -TARGET_EINVAL
;
10620 #endif /* AARCH64 */
10621 case PR_GET_SECCOMP
:
10622 case PR_SET_SECCOMP
:
10623 /* Disable seccomp to prevent the target disabling syscalls we
10625 return -TARGET_EINVAL
;
10627 /* Most prctl options have no pointer arguments */
10628 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10631 #ifdef TARGET_NR_arch_prctl
10632 case TARGET_NR_arch_prctl
:
10633 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10635 #ifdef TARGET_NR_pread64
10636 case TARGET_NR_pread64
:
10637 if (regpairs_aligned(cpu_env
, num
)) {
10641 if (arg2
== 0 && arg3
== 0) {
10642 /* Special-case NULL buffer and zero length, which should succeed */
10645 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10647 return -TARGET_EFAULT
;
10650 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10651 unlock_user(p
, arg2
, ret
);
10653 case TARGET_NR_pwrite64
:
10654 if (regpairs_aligned(cpu_env
, num
)) {
10658 if (arg2
== 0 && arg3
== 0) {
10659 /* Special-case NULL buffer and zero length, which should succeed */
10662 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10664 return -TARGET_EFAULT
;
10667 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10668 unlock_user(p
, arg2
, 0);
10671 case TARGET_NR_getcwd
:
10672 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10673 return -TARGET_EFAULT
;
10674 ret
= get_errno(sys_getcwd1(p
, arg2
));
10675 unlock_user(p
, arg1
, ret
);
10677 case TARGET_NR_capget
:
10678 case TARGET_NR_capset
:
10680 struct target_user_cap_header
*target_header
;
10681 struct target_user_cap_data
*target_data
= NULL
;
10682 struct __user_cap_header_struct header
;
10683 struct __user_cap_data_struct data
[2];
10684 struct __user_cap_data_struct
*dataptr
= NULL
;
10685 int i
, target_datalen
;
10686 int data_items
= 1;
10688 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10689 return -TARGET_EFAULT
;
10691 header
.version
= tswap32(target_header
->version
);
10692 header
.pid
= tswap32(target_header
->pid
);
10694 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10695 /* Version 2 and up takes pointer to two user_data structs */
10699 target_datalen
= sizeof(*target_data
) * data_items
;
10702 if (num
== TARGET_NR_capget
) {
10703 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10705 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10707 if (!target_data
) {
10708 unlock_user_struct(target_header
, arg1
, 0);
10709 return -TARGET_EFAULT
;
10712 if (num
== TARGET_NR_capset
) {
10713 for (i
= 0; i
< data_items
; i
++) {
10714 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10715 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10716 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10723 if (num
== TARGET_NR_capget
) {
10724 ret
= get_errno(capget(&header
, dataptr
));
10726 ret
= get_errno(capset(&header
, dataptr
));
10729 /* The kernel always updates version for both capget and capset */
10730 target_header
->version
= tswap32(header
.version
);
10731 unlock_user_struct(target_header
, arg1
, 1);
10734 if (num
== TARGET_NR_capget
) {
10735 for (i
= 0; i
< data_items
; i
++) {
10736 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10737 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10738 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10740 unlock_user(target_data
, arg2
, target_datalen
);
10742 unlock_user(target_data
, arg2
, 0);
10747 case TARGET_NR_sigaltstack
:
10748 return do_sigaltstack(arg1
, arg2
,
10749 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10751 #ifdef CONFIG_SENDFILE
10752 #ifdef TARGET_NR_sendfile
10753 case TARGET_NR_sendfile
:
10755 off_t
*offp
= NULL
;
10758 ret
= get_user_sal(off
, arg3
);
10759 if (is_error(ret
)) {
10764 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10765 if (!is_error(ret
) && arg3
) {
10766 abi_long ret2
= put_user_sal(off
, arg3
);
10767 if (is_error(ret2
)) {
10774 #ifdef TARGET_NR_sendfile64
10775 case TARGET_NR_sendfile64
:
10777 off_t
*offp
= NULL
;
10780 ret
= get_user_s64(off
, arg3
);
10781 if (is_error(ret
)) {
10786 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10787 if (!is_error(ret
) && arg3
) {
10788 abi_long ret2
= put_user_s64(off
, arg3
);
10789 if (is_error(ret2
)) {
10797 #ifdef TARGET_NR_vfork
10798 case TARGET_NR_vfork
:
10799 return get_errno(do_fork(cpu_env
,
10800 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10803 #ifdef TARGET_NR_ugetrlimit
10804 case TARGET_NR_ugetrlimit
:
10806 struct rlimit rlim
;
10807 int resource
= target_to_host_resource(arg1
);
10808 ret
= get_errno(getrlimit(resource
, &rlim
));
10809 if (!is_error(ret
)) {
10810 struct target_rlimit
*target_rlim
;
10811 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10812 return -TARGET_EFAULT
;
10813 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10814 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10815 unlock_user_struct(target_rlim
, arg2
, 1);
10820 #ifdef TARGET_NR_truncate64
10821 case TARGET_NR_truncate64
:
10822 if (!(p
= lock_user_string(arg1
)))
10823 return -TARGET_EFAULT
;
10824 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10825 unlock_user(p
, arg1
, 0);
10828 #ifdef TARGET_NR_ftruncate64
10829 case TARGET_NR_ftruncate64
:
10830 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10832 #ifdef TARGET_NR_stat64
10833 case TARGET_NR_stat64
:
10834 if (!(p
= lock_user_string(arg1
))) {
10835 return -TARGET_EFAULT
;
10837 ret
= get_errno(stat(path(p
), &st
));
10838 unlock_user(p
, arg1
, 0);
10839 if (!is_error(ret
))
10840 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10843 #ifdef TARGET_NR_lstat64
10844 case TARGET_NR_lstat64
:
10845 if (!(p
= lock_user_string(arg1
))) {
10846 return -TARGET_EFAULT
;
10848 ret
= get_errno(lstat(path(p
), &st
));
10849 unlock_user(p
, arg1
, 0);
10850 if (!is_error(ret
))
10851 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10854 #ifdef TARGET_NR_fstat64
10855 case TARGET_NR_fstat64
:
10856 ret
= get_errno(fstat(arg1
, &st
));
10857 if (!is_error(ret
))
10858 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10861 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10862 #ifdef TARGET_NR_fstatat64
10863 case TARGET_NR_fstatat64
:
10865 #ifdef TARGET_NR_newfstatat
10866 case TARGET_NR_newfstatat
:
10868 if (!(p
= lock_user_string(arg2
))) {
10869 return -TARGET_EFAULT
;
10871 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10872 unlock_user(p
, arg2
, 0);
10873 if (!is_error(ret
))
10874 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10877 #if defined(TARGET_NR_statx)
10878 case TARGET_NR_statx
:
10880 struct target_statx
*target_stx
;
10884 p
= lock_user_string(arg2
);
10886 return -TARGET_EFAULT
;
10888 #if defined(__NR_statx)
10891 * It is assumed that struct statx is architecture independent.
10893 struct target_statx host_stx
;
10896 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
10897 if (!is_error(ret
)) {
10898 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
10899 unlock_user(p
, arg2
, 0);
10900 return -TARGET_EFAULT
;
10904 if (ret
!= -TARGET_ENOSYS
) {
10905 unlock_user(p
, arg2
, 0);
10910 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
10911 unlock_user(p
, arg2
, 0);
10913 if (!is_error(ret
)) {
10914 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
10915 return -TARGET_EFAULT
;
10917 memset(target_stx
, 0, sizeof(*target_stx
));
10918 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
10919 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
10920 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
10921 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
10922 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
10923 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
10924 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
10925 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
10926 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
10927 __put_user(st
.st_size
, &target_stx
->stx_size
);
10928 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
10929 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
10930 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
10931 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
10932 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
10933 unlock_user_struct(target_stx
, arg5
, 1);
10938 #ifdef TARGET_NR_lchown
10939 case TARGET_NR_lchown
:
10940 if (!(p
= lock_user_string(arg1
)))
10941 return -TARGET_EFAULT
;
10942 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10943 unlock_user(p
, arg1
, 0);
10946 #ifdef TARGET_NR_getuid
10947 case TARGET_NR_getuid
:
10948 return get_errno(high2lowuid(getuid()));
10950 #ifdef TARGET_NR_getgid
10951 case TARGET_NR_getgid
:
10952 return get_errno(high2lowgid(getgid()));
10954 #ifdef TARGET_NR_geteuid
10955 case TARGET_NR_geteuid
:
10956 return get_errno(high2lowuid(geteuid()));
10958 #ifdef TARGET_NR_getegid
10959 case TARGET_NR_getegid
:
10960 return get_errno(high2lowgid(getegid()));
10962 case TARGET_NR_setreuid
:
10963 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10964 case TARGET_NR_setregid
:
10965 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10966 case TARGET_NR_getgroups
:
10968 int gidsetsize
= arg1
;
10969 target_id
*target_grouplist
;
10973 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10974 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10975 if (gidsetsize
== 0)
10977 if (!is_error(ret
)) {
10978 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10979 if (!target_grouplist
)
10980 return -TARGET_EFAULT
;
10981 for(i
= 0;i
< ret
; i
++)
10982 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10983 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10987 case TARGET_NR_setgroups
:
10989 int gidsetsize
= arg1
;
10990 target_id
*target_grouplist
;
10991 gid_t
*grouplist
= NULL
;
10994 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10995 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10996 if (!target_grouplist
) {
10997 return -TARGET_EFAULT
;
10999 for (i
= 0; i
< gidsetsize
; i
++) {
11000 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11002 unlock_user(target_grouplist
, arg2
, 0);
11004 return get_errno(setgroups(gidsetsize
, grouplist
));
11006 case TARGET_NR_fchown
:
11007 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11008 #if defined(TARGET_NR_fchownat)
11009 case TARGET_NR_fchownat
:
11010 if (!(p
= lock_user_string(arg2
)))
11011 return -TARGET_EFAULT
;
11012 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11013 low2highgid(arg4
), arg5
));
11014 unlock_user(p
, arg2
, 0);
11017 #ifdef TARGET_NR_setresuid
11018 case TARGET_NR_setresuid
:
11019 return get_errno(sys_setresuid(low2highuid(arg1
),
11021 low2highuid(arg3
)));
11023 #ifdef TARGET_NR_getresuid
11024 case TARGET_NR_getresuid
:
11026 uid_t ruid
, euid
, suid
;
11027 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11028 if (!is_error(ret
)) {
11029 if (put_user_id(high2lowuid(ruid
), arg1
)
11030 || put_user_id(high2lowuid(euid
), arg2
)
11031 || put_user_id(high2lowuid(suid
), arg3
))
11032 return -TARGET_EFAULT
;
11037 #ifdef TARGET_NR_getresgid
11038 case TARGET_NR_setresgid
:
11039 return get_errno(sys_setresgid(low2highgid(arg1
),
11041 low2highgid(arg3
)));
11043 #ifdef TARGET_NR_getresgid
11044 case TARGET_NR_getresgid
:
11046 gid_t rgid
, egid
, sgid
;
11047 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11048 if (!is_error(ret
)) {
11049 if (put_user_id(high2lowgid(rgid
), arg1
)
11050 || put_user_id(high2lowgid(egid
), arg2
)
11051 || put_user_id(high2lowgid(sgid
), arg3
))
11052 return -TARGET_EFAULT
;
11057 #ifdef TARGET_NR_chown
11058 case TARGET_NR_chown
:
11059 if (!(p
= lock_user_string(arg1
)))
11060 return -TARGET_EFAULT
;
11061 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11062 unlock_user(p
, arg1
, 0);
11065 case TARGET_NR_setuid
:
11066 return get_errno(sys_setuid(low2highuid(arg1
)));
11067 case TARGET_NR_setgid
:
11068 return get_errno(sys_setgid(low2highgid(arg1
)));
11069 case TARGET_NR_setfsuid
:
11070 return get_errno(setfsuid(arg1
));
11071 case TARGET_NR_setfsgid
:
11072 return get_errno(setfsgid(arg1
));
11074 #ifdef TARGET_NR_lchown32
11075 case TARGET_NR_lchown32
:
11076 if (!(p
= lock_user_string(arg1
)))
11077 return -TARGET_EFAULT
;
11078 ret
= get_errno(lchown(p
, arg2
, arg3
));
11079 unlock_user(p
, arg1
, 0);
11082 #ifdef TARGET_NR_getuid32
11083 case TARGET_NR_getuid32
:
11084 return get_errno(getuid());
11087 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11088 /* Alpha specific */
11089 case TARGET_NR_getxuid
:
11093 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11095 return get_errno(getuid());
11097 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11098 /* Alpha specific */
11099 case TARGET_NR_getxgid
:
11103 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11105 return get_errno(getgid());
11107 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11108 /* Alpha specific */
11109 case TARGET_NR_osf_getsysinfo
:
11110 ret
= -TARGET_EOPNOTSUPP
;
11112 case TARGET_GSI_IEEE_FP_CONTROL
:
11114 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11115 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11117 swcr
&= ~SWCR_STATUS_MASK
;
11118 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11120 if (put_user_u64 (swcr
, arg2
))
11121 return -TARGET_EFAULT
;
11126 /* case GSI_IEEE_STATE_AT_SIGNAL:
11127 -- Not implemented in linux kernel.
11129 -- Retrieves current unaligned access state; not much used.
11130 case GSI_PROC_TYPE:
11131 -- Retrieves implver information; surely not used.
11132 case GSI_GET_HWRPB:
11133 -- Grabs a copy of the HWRPB; surely not used.
11138 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11139 /* Alpha specific */
11140 case TARGET_NR_osf_setsysinfo
:
11141 ret
= -TARGET_EOPNOTSUPP
;
11143 case TARGET_SSI_IEEE_FP_CONTROL
:
11145 uint64_t swcr
, fpcr
;
11147 if (get_user_u64 (swcr
, arg2
)) {
11148 return -TARGET_EFAULT
;
11152 * The kernel calls swcr_update_status to update the
11153 * status bits from the fpcr at every point that it
11154 * could be queried. Therefore, we store the status
11155 * bits only in FPCR.
11157 ((CPUAlphaState
*)cpu_env
)->swcr
11158 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11160 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11161 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11162 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11163 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11168 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11170 uint64_t exc
, fpcr
, fex
;
11172 if (get_user_u64(exc
, arg2
)) {
11173 return -TARGET_EFAULT
;
11175 exc
&= SWCR_STATUS_MASK
;
11176 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11178 /* Old exceptions are not signaled. */
11179 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11181 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11182 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11184 /* Update the hardware fpcr. */
11185 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11186 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11189 int si_code
= TARGET_FPE_FLTUNK
;
11190 target_siginfo_t info
;
11192 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11193 si_code
= TARGET_FPE_FLTUND
;
11195 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11196 si_code
= TARGET_FPE_FLTRES
;
11198 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11199 si_code
= TARGET_FPE_FLTUND
;
11201 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11202 si_code
= TARGET_FPE_FLTOVF
;
11204 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11205 si_code
= TARGET_FPE_FLTDIV
;
11207 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11208 si_code
= TARGET_FPE_FLTINV
;
11211 info
.si_signo
= SIGFPE
;
11213 info
.si_code
= si_code
;
11214 info
._sifields
._sigfault
._addr
11215 = ((CPUArchState
*)cpu_env
)->pc
;
11216 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11217 QEMU_SI_FAULT
, &info
);
11223 /* case SSI_NVPAIRS:
11224 -- Used with SSIN_UACPROC to enable unaligned accesses.
11225 case SSI_IEEE_STATE_AT_SIGNAL:
11226 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11227 -- Not implemented in linux kernel
11232 #ifdef TARGET_NR_osf_sigprocmask
11233 /* Alpha specific. */
11234 case TARGET_NR_osf_sigprocmask
:
11238 sigset_t set
, oldset
;
11241 case TARGET_SIG_BLOCK
:
11244 case TARGET_SIG_UNBLOCK
:
11247 case TARGET_SIG_SETMASK
:
11251 return -TARGET_EINVAL
;
11254 target_to_host_old_sigset(&set
, &mask
);
11255 ret
= do_sigprocmask(how
, &set
, &oldset
);
11257 host_to_target_old_sigset(&mask
, &oldset
);
11264 #ifdef TARGET_NR_getgid32
11265 case TARGET_NR_getgid32
:
11266 return get_errno(getgid());
11268 #ifdef TARGET_NR_geteuid32
11269 case TARGET_NR_geteuid32
:
11270 return get_errno(geteuid());
11272 #ifdef TARGET_NR_getegid32
11273 case TARGET_NR_getegid32
:
11274 return get_errno(getegid());
11276 #ifdef TARGET_NR_setreuid32
11277 case TARGET_NR_setreuid32
:
11278 return get_errno(setreuid(arg1
, arg2
));
11280 #ifdef TARGET_NR_setregid32
11281 case TARGET_NR_setregid32
:
11282 return get_errno(setregid(arg1
, arg2
));
11284 #ifdef TARGET_NR_getgroups32
11285 case TARGET_NR_getgroups32
:
11287 int gidsetsize
= arg1
;
11288 uint32_t *target_grouplist
;
11292 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11293 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11294 if (gidsetsize
== 0)
11296 if (!is_error(ret
)) {
11297 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11298 if (!target_grouplist
) {
11299 return -TARGET_EFAULT
;
11301 for(i
= 0;i
< ret
; i
++)
11302 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11303 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11308 #ifdef TARGET_NR_setgroups32
11309 case TARGET_NR_setgroups32
:
11311 int gidsetsize
= arg1
;
11312 uint32_t *target_grouplist
;
11316 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11317 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11318 if (!target_grouplist
) {
11319 return -TARGET_EFAULT
;
11321 for(i
= 0;i
< gidsetsize
; i
++)
11322 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11323 unlock_user(target_grouplist
, arg2
, 0);
11324 return get_errno(setgroups(gidsetsize
, grouplist
));
11327 #ifdef TARGET_NR_fchown32
11328 case TARGET_NR_fchown32
:
11329 return get_errno(fchown(arg1
, arg2
, arg3
));
11331 #ifdef TARGET_NR_setresuid32
11332 case TARGET_NR_setresuid32
:
11333 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11335 #ifdef TARGET_NR_getresuid32
11336 case TARGET_NR_getresuid32
:
11338 uid_t ruid
, euid
, suid
;
11339 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11340 if (!is_error(ret
)) {
11341 if (put_user_u32(ruid
, arg1
)
11342 || put_user_u32(euid
, arg2
)
11343 || put_user_u32(suid
, arg3
))
11344 return -TARGET_EFAULT
;
11349 #ifdef TARGET_NR_setresgid32
11350 case TARGET_NR_setresgid32
:
11351 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11353 #ifdef TARGET_NR_getresgid32
11354 case TARGET_NR_getresgid32
:
11356 gid_t rgid
, egid
, sgid
;
11357 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11358 if (!is_error(ret
)) {
11359 if (put_user_u32(rgid
, arg1
)
11360 || put_user_u32(egid
, arg2
)
11361 || put_user_u32(sgid
, arg3
))
11362 return -TARGET_EFAULT
;
11367 #ifdef TARGET_NR_chown32
11368 case TARGET_NR_chown32
:
11369 if (!(p
= lock_user_string(arg1
)))
11370 return -TARGET_EFAULT
;
11371 ret
= get_errno(chown(p
, arg2
, arg3
));
11372 unlock_user(p
, arg1
, 0);
11375 #ifdef TARGET_NR_setuid32
11376 case TARGET_NR_setuid32
:
11377 return get_errno(sys_setuid(arg1
));
11379 #ifdef TARGET_NR_setgid32
11380 case TARGET_NR_setgid32
:
11381 return get_errno(sys_setgid(arg1
));
11383 #ifdef TARGET_NR_setfsuid32
11384 case TARGET_NR_setfsuid32
:
11385 return get_errno(setfsuid(arg1
));
11387 #ifdef TARGET_NR_setfsgid32
11388 case TARGET_NR_setfsgid32
:
11389 return get_errno(setfsgid(arg1
));
11391 #ifdef TARGET_NR_mincore
11392 case TARGET_NR_mincore
:
11394 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11396 return -TARGET_ENOMEM
;
11398 p
= lock_user_string(arg3
);
11400 ret
= -TARGET_EFAULT
;
11402 ret
= get_errno(mincore(a
, arg2
, p
));
11403 unlock_user(p
, arg3
, ret
);
11405 unlock_user(a
, arg1
, 0);
11409 #ifdef TARGET_NR_arm_fadvise64_64
11410 case TARGET_NR_arm_fadvise64_64
:
11411 /* arm_fadvise64_64 looks like fadvise64_64 but
11412 * with different argument order: fd, advice, offset, len
11413 * rather than the usual fd, offset, len, advice.
11414 * Note that offset and len are both 64-bit so appear as
11415 * pairs of 32-bit registers.
11417 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11418 target_offset64(arg5
, arg6
), arg2
);
11419 return -host_to_target_errno(ret
);
11422 #if TARGET_ABI_BITS == 32
11424 #ifdef TARGET_NR_fadvise64_64
11425 case TARGET_NR_fadvise64_64
:
11426 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11427 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11435 /* 6 args: fd, offset (high, low), len (high, low), advice */
11436 if (regpairs_aligned(cpu_env
, num
)) {
11437 /* offset is in (3,4), len in (5,6) and advice in 7 */
11445 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11446 target_offset64(arg4
, arg5
), arg6
);
11447 return -host_to_target_errno(ret
);
11450 #ifdef TARGET_NR_fadvise64
11451 case TARGET_NR_fadvise64
:
11452 /* 5 args: fd, offset (high, low), len, advice */
11453 if (regpairs_aligned(cpu_env
, num
)) {
11454 /* offset is in (3,4), len in 5 and advice in 6 */
11460 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11461 return -host_to_target_errno(ret
);
11464 #else /* not a 32-bit ABI */
11465 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11466 #ifdef TARGET_NR_fadvise64_64
11467 case TARGET_NR_fadvise64_64
:
11469 #ifdef TARGET_NR_fadvise64
11470 case TARGET_NR_fadvise64
:
11472 #ifdef TARGET_S390X
11474 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11475 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11476 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11477 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11481 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11483 #endif /* end of 64-bit ABI fadvise handling */
11485 #ifdef TARGET_NR_madvise
11486 case TARGET_NR_madvise
:
11487 /* A straight passthrough may not be safe because qemu sometimes
11488 turns private file-backed mappings into anonymous mappings.
11489 This will break MADV_DONTNEED.
11490 This is a hint, so ignoring and returning success is ok. */
11493 #ifdef TARGET_NR_fcntl64
11494 case TARGET_NR_fcntl64
:
11498 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11499 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11502 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11503 copyfrom
= copy_from_user_oabi_flock64
;
11504 copyto
= copy_to_user_oabi_flock64
;
11508 cmd
= target_to_host_fcntl_cmd(arg2
);
11509 if (cmd
== -TARGET_EINVAL
) {
11514 case TARGET_F_GETLK64
:
11515 ret
= copyfrom(&fl
, arg3
);
11519 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11521 ret
= copyto(arg3
, &fl
);
11525 case TARGET_F_SETLK64
:
11526 case TARGET_F_SETLKW64
:
11527 ret
= copyfrom(&fl
, arg3
);
11531 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11534 ret
= do_fcntl(arg1
, arg2
, arg3
);
11540 #ifdef TARGET_NR_cacheflush
11541 case TARGET_NR_cacheflush
:
11542 /* self-modifying code is handled automatically, so nothing needed */
11545 #ifdef TARGET_NR_getpagesize
11546 case TARGET_NR_getpagesize
:
11547 return TARGET_PAGE_SIZE
;
11549 case TARGET_NR_gettid
:
11550 return get_errno(sys_gettid());
11551 #ifdef TARGET_NR_readahead
11552 case TARGET_NR_readahead
:
11553 #if TARGET_ABI_BITS == 32
11554 if (regpairs_aligned(cpu_env
, num
)) {
11559 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11561 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11566 #ifdef TARGET_NR_setxattr
11567 case TARGET_NR_listxattr
:
11568 case TARGET_NR_llistxattr
:
11572 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11574 return -TARGET_EFAULT
;
11577 p
= lock_user_string(arg1
);
11579 if (num
== TARGET_NR_listxattr
) {
11580 ret
= get_errno(listxattr(p
, b
, arg3
));
11582 ret
= get_errno(llistxattr(p
, b
, arg3
));
11585 ret
= -TARGET_EFAULT
;
11587 unlock_user(p
, arg1
, 0);
11588 unlock_user(b
, arg2
, arg3
);
11591 case TARGET_NR_flistxattr
:
11595 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11597 return -TARGET_EFAULT
;
11600 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11601 unlock_user(b
, arg2
, arg3
);
11604 case TARGET_NR_setxattr
:
11605 case TARGET_NR_lsetxattr
:
11607 void *p
, *n
, *v
= 0;
11609 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11611 return -TARGET_EFAULT
;
11614 p
= lock_user_string(arg1
);
11615 n
= lock_user_string(arg2
);
11617 if (num
== TARGET_NR_setxattr
) {
11618 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11620 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11623 ret
= -TARGET_EFAULT
;
11625 unlock_user(p
, arg1
, 0);
11626 unlock_user(n
, arg2
, 0);
11627 unlock_user(v
, arg3
, 0);
11630 case TARGET_NR_fsetxattr
:
11634 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11636 return -TARGET_EFAULT
;
11639 n
= lock_user_string(arg2
);
11641 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11643 ret
= -TARGET_EFAULT
;
11645 unlock_user(n
, arg2
, 0);
11646 unlock_user(v
, arg3
, 0);
11649 case TARGET_NR_getxattr
:
11650 case TARGET_NR_lgetxattr
:
11652 void *p
, *n
, *v
= 0;
11654 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11656 return -TARGET_EFAULT
;
11659 p
= lock_user_string(arg1
);
11660 n
= lock_user_string(arg2
);
11662 if (num
== TARGET_NR_getxattr
) {
11663 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11665 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11668 ret
= -TARGET_EFAULT
;
11670 unlock_user(p
, arg1
, 0);
11671 unlock_user(n
, arg2
, 0);
11672 unlock_user(v
, arg3
, arg4
);
11675 case TARGET_NR_fgetxattr
:
11679 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11681 return -TARGET_EFAULT
;
11684 n
= lock_user_string(arg2
);
11686 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11688 ret
= -TARGET_EFAULT
;
11690 unlock_user(n
, arg2
, 0);
11691 unlock_user(v
, arg3
, arg4
);
11694 case TARGET_NR_removexattr
:
11695 case TARGET_NR_lremovexattr
:
11698 p
= lock_user_string(arg1
);
11699 n
= lock_user_string(arg2
);
11701 if (num
== TARGET_NR_removexattr
) {
11702 ret
= get_errno(removexattr(p
, n
));
11704 ret
= get_errno(lremovexattr(p
, n
));
11707 ret
= -TARGET_EFAULT
;
11709 unlock_user(p
, arg1
, 0);
11710 unlock_user(n
, arg2
, 0);
11713 case TARGET_NR_fremovexattr
:
11716 n
= lock_user_string(arg2
);
11718 ret
= get_errno(fremovexattr(arg1
, n
));
11720 ret
= -TARGET_EFAULT
;
11722 unlock_user(n
, arg2
, 0);
11726 #endif /* CONFIG_ATTR */
11727 #ifdef TARGET_NR_set_thread_area
11728 case TARGET_NR_set_thread_area
:
11729 #if defined(TARGET_MIPS)
11730 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11732 #elif defined(TARGET_CRIS)
11734 ret
= -TARGET_EINVAL
;
11736 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11740 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11741 return do_set_thread_area(cpu_env
, arg1
);
11742 #elif defined(TARGET_M68K)
11744 TaskState
*ts
= cpu
->opaque
;
11745 ts
->tp_value
= arg1
;
11749 return -TARGET_ENOSYS
;
11752 #ifdef TARGET_NR_get_thread_area
11753 case TARGET_NR_get_thread_area
:
11754 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11755 return do_get_thread_area(cpu_env
, arg1
);
11756 #elif defined(TARGET_M68K)
11758 TaskState
*ts
= cpu
->opaque
;
11759 return ts
->tp_value
;
11762 return -TARGET_ENOSYS
;
11765 #ifdef TARGET_NR_getdomainname
11766 case TARGET_NR_getdomainname
:
11767 return -TARGET_ENOSYS
;
11770 #ifdef TARGET_NR_clock_settime
11771 case TARGET_NR_clock_settime
:
11773 struct timespec ts
;
11775 ret
= target_to_host_timespec(&ts
, arg2
);
11776 if (!is_error(ret
)) {
11777 ret
= get_errno(clock_settime(arg1
, &ts
));
11782 #ifdef TARGET_NR_clock_settime64
11783 case TARGET_NR_clock_settime64
:
11785 struct timespec ts
;
11787 ret
= target_to_host_timespec64(&ts
, arg2
);
11788 if (!is_error(ret
)) {
11789 ret
= get_errno(clock_settime(arg1
, &ts
));
11794 #ifdef TARGET_NR_clock_gettime
11795 case TARGET_NR_clock_gettime
:
11797 struct timespec ts
;
11798 ret
= get_errno(clock_gettime(arg1
, &ts
));
11799 if (!is_error(ret
)) {
11800 ret
= host_to_target_timespec(arg2
, &ts
);
11805 #ifdef TARGET_NR_clock_gettime64
11806 case TARGET_NR_clock_gettime64
:
11808 struct timespec ts
;
11809 ret
= get_errno(clock_gettime(arg1
, &ts
));
11810 if (!is_error(ret
)) {
11811 ret
= host_to_target_timespec64(arg2
, &ts
);
11816 #ifdef TARGET_NR_clock_getres
11817 case TARGET_NR_clock_getres
:
11819 struct timespec ts
;
11820 ret
= get_errno(clock_getres(arg1
, &ts
));
11821 if (!is_error(ret
)) {
11822 host_to_target_timespec(arg2
, &ts
);
11827 #ifdef TARGET_NR_clock_nanosleep
11828 case TARGET_NR_clock_nanosleep
:
11830 struct timespec ts
;
11831 target_to_host_timespec(&ts
, arg3
);
11832 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11833 &ts
, arg4
? &ts
: NULL
));
11835 host_to_target_timespec(arg4
, &ts
);
11837 #if defined(TARGET_PPC)
11838 /* clock_nanosleep is odd in that it returns positive errno values.
11839 * On PPC, CR0 bit 3 should be set in such a situation. */
11840 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11841 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11848 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11849 case TARGET_NR_set_tid_address
:
11850 return get_errno(set_tid_address((int *)g2h(arg1
)));
11853 case TARGET_NR_tkill
:
11854 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11856 case TARGET_NR_tgkill
:
11857 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11858 target_to_host_signal(arg3
)));
11860 #ifdef TARGET_NR_set_robust_list
11861 case TARGET_NR_set_robust_list
:
11862 case TARGET_NR_get_robust_list
:
11863 /* The ABI for supporting robust futexes has userspace pass
11864 * the kernel a pointer to a linked list which is updated by
11865 * userspace after the syscall; the list is walked by the kernel
11866 * when the thread exits. Since the linked list in QEMU guest
11867 * memory isn't a valid linked list for the host and we have
11868 * no way to reliably intercept the thread-death event, we can't
11869 * support these. Silently return ENOSYS so that guest userspace
11870 * falls back to a non-robust futex implementation (which should
11871 * be OK except in the corner case of the guest crashing while
11872 * holding a mutex that is shared with another process via
11875 return -TARGET_ENOSYS
;
11878 #if defined(TARGET_NR_utimensat)
11879 case TARGET_NR_utimensat
:
11881 struct timespec
*tsp
, ts
[2];
11885 target_to_host_timespec(ts
, arg3
);
11886 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11890 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11892 if (!(p
= lock_user_string(arg2
))) {
11893 return -TARGET_EFAULT
;
11895 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11896 unlock_user(p
, arg2
, 0);
11901 #ifdef TARGET_NR_futex
11902 case TARGET_NR_futex
:
11903 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11905 #ifdef TARGET_NR_futex_time64
11906 case TARGET_NR_futex_time64
:
11907 return do_futex_time64(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11909 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11910 case TARGET_NR_inotify_init
:
11911 ret
= get_errno(sys_inotify_init());
11913 fd_trans_register(ret
, &target_inotify_trans
);
11917 #ifdef CONFIG_INOTIFY1
11918 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11919 case TARGET_NR_inotify_init1
:
11920 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11921 fcntl_flags_tbl
)));
11923 fd_trans_register(ret
, &target_inotify_trans
);
11928 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11929 case TARGET_NR_inotify_add_watch
:
11930 p
= lock_user_string(arg2
);
11931 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11932 unlock_user(p
, arg2
, 0);
11935 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11936 case TARGET_NR_inotify_rm_watch
:
11937 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11940 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11941 case TARGET_NR_mq_open
:
11943 struct mq_attr posix_mq_attr
;
11944 struct mq_attr
*pposix_mq_attr
;
11947 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11948 pposix_mq_attr
= NULL
;
11950 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11951 return -TARGET_EFAULT
;
11953 pposix_mq_attr
= &posix_mq_attr
;
11955 p
= lock_user_string(arg1
- 1);
11957 return -TARGET_EFAULT
;
11959 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
11960 unlock_user (p
, arg1
, 0);
11964 case TARGET_NR_mq_unlink
:
11965 p
= lock_user_string(arg1
- 1);
11967 return -TARGET_EFAULT
;
11969 ret
= get_errno(mq_unlink(p
));
11970 unlock_user (p
, arg1
, 0);
11973 #ifdef TARGET_NR_mq_timedsend
11974 case TARGET_NR_mq_timedsend
:
11976 struct timespec ts
;
11978 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11980 target_to_host_timespec(&ts
, arg5
);
11981 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11982 host_to_target_timespec(arg5
, &ts
);
11984 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11986 unlock_user (p
, arg2
, arg3
);
11991 #ifdef TARGET_NR_mq_timedreceive
11992 case TARGET_NR_mq_timedreceive
:
11994 struct timespec ts
;
11997 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11999 target_to_host_timespec(&ts
, arg5
);
12000 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12002 host_to_target_timespec(arg5
, &ts
);
12004 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12007 unlock_user (p
, arg2
, arg3
);
12009 put_user_u32(prio
, arg4
);
12014 /* Not implemented for now... */
12015 /* case TARGET_NR_mq_notify: */
12018 case TARGET_NR_mq_getsetattr
:
12020 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12023 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12024 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12025 &posix_mq_attr_out
));
12026 } else if (arg3
!= 0) {
12027 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12029 if (ret
== 0 && arg3
!= 0) {
12030 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12036 #ifdef CONFIG_SPLICE
12037 #ifdef TARGET_NR_tee
12038 case TARGET_NR_tee
:
12040 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12044 #ifdef TARGET_NR_splice
12045 case TARGET_NR_splice
:
12047 loff_t loff_in
, loff_out
;
12048 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12050 if (get_user_u64(loff_in
, arg2
)) {
12051 return -TARGET_EFAULT
;
12053 ploff_in
= &loff_in
;
12056 if (get_user_u64(loff_out
, arg4
)) {
12057 return -TARGET_EFAULT
;
12059 ploff_out
= &loff_out
;
12061 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12063 if (put_user_u64(loff_in
, arg2
)) {
12064 return -TARGET_EFAULT
;
12068 if (put_user_u64(loff_out
, arg4
)) {
12069 return -TARGET_EFAULT
;
12075 #ifdef TARGET_NR_vmsplice
12076 case TARGET_NR_vmsplice
:
12078 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12080 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12081 unlock_iovec(vec
, arg2
, arg3
, 0);
12083 ret
= -host_to_target_errno(errno
);
12088 #endif /* CONFIG_SPLICE */
12089 #ifdef CONFIG_EVENTFD
12090 #if defined(TARGET_NR_eventfd)
12091 case TARGET_NR_eventfd
:
12092 ret
= get_errno(eventfd(arg1
, 0));
12094 fd_trans_register(ret
, &target_eventfd_trans
);
12098 #if defined(TARGET_NR_eventfd2)
12099 case TARGET_NR_eventfd2
:
12101 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
12102 if (arg2
& TARGET_O_NONBLOCK
) {
12103 host_flags
|= O_NONBLOCK
;
12105 if (arg2
& TARGET_O_CLOEXEC
) {
12106 host_flags
|= O_CLOEXEC
;
12108 ret
= get_errno(eventfd(arg1
, host_flags
));
12110 fd_trans_register(ret
, &target_eventfd_trans
);
12115 #endif /* CONFIG_EVENTFD */
12116 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12117 case TARGET_NR_fallocate
:
12118 #if TARGET_ABI_BITS == 32
12119 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12120 target_offset64(arg5
, arg6
)));
12122 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12126 #if defined(CONFIG_SYNC_FILE_RANGE)
12127 #if defined(TARGET_NR_sync_file_range)
12128 case TARGET_NR_sync_file_range
:
12129 #if TARGET_ABI_BITS == 32
12130 #if defined(TARGET_MIPS)
12131 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12132 target_offset64(arg5
, arg6
), arg7
));
12134 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12135 target_offset64(arg4
, arg5
), arg6
));
12136 #endif /* !TARGET_MIPS */
12138 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12142 #if defined(TARGET_NR_sync_file_range2) || \
12143 defined(TARGET_NR_arm_sync_file_range)
12144 #if defined(TARGET_NR_sync_file_range2)
12145 case TARGET_NR_sync_file_range2
:
12147 #if defined(TARGET_NR_arm_sync_file_range)
12148 case TARGET_NR_arm_sync_file_range
:
12150 /* This is like sync_file_range but the arguments are reordered */
12151 #if TARGET_ABI_BITS == 32
12152 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12153 target_offset64(arg5
, arg6
), arg2
));
12155 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12160 #if defined(TARGET_NR_signalfd4)
12161 case TARGET_NR_signalfd4
:
12162 return do_signalfd4(arg1
, arg2
, arg4
);
12164 #if defined(TARGET_NR_signalfd)
12165 case TARGET_NR_signalfd
:
12166 return do_signalfd4(arg1
, arg2
, 0);
12168 #if defined(CONFIG_EPOLL)
12169 #if defined(TARGET_NR_epoll_create)
12170 case TARGET_NR_epoll_create
:
12171 return get_errno(epoll_create(arg1
));
12173 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12174 case TARGET_NR_epoll_create1
:
12175 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12177 #if defined(TARGET_NR_epoll_ctl)
12178 case TARGET_NR_epoll_ctl
:
12180 struct epoll_event ep
;
12181 struct epoll_event
*epp
= 0;
12183 struct target_epoll_event
*target_ep
;
12184 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12185 return -TARGET_EFAULT
;
12187 ep
.events
= tswap32(target_ep
->events
);
12188 /* The epoll_data_t union is just opaque data to the kernel,
12189 * so we transfer all 64 bits across and need not worry what
12190 * actual data type it is.
12192 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12193 unlock_user_struct(target_ep
, arg4
, 0);
12196 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12200 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12201 #if defined(TARGET_NR_epoll_wait)
12202 case TARGET_NR_epoll_wait
:
12204 #if defined(TARGET_NR_epoll_pwait)
12205 case TARGET_NR_epoll_pwait
:
12208 struct target_epoll_event
*target_ep
;
12209 struct epoll_event
*ep
;
12211 int maxevents
= arg3
;
12212 int timeout
= arg4
;
12214 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12215 return -TARGET_EINVAL
;
12218 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12219 maxevents
* sizeof(struct target_epoll_event
), 1);
12221 return -TARGET_EFAULT
;
12224 ep
= g_try_new(struct epoll_event
, maxevents
);
12226 unlock_user(target_ep
, arg2
, 0);
12227 return -TARGET_ENOMEM
;
12231 #if defined(TARGET_NR_epoll_pwait)
12232 case TARGET_NR_epoll_pwait
:
12234 target_sigset_t
*target_set
;
12235 sigset_t _set
, *set
= &_set
;
12238 if (arg6
!= sizeof(target_sigset_t
)) {
12239 ret
= -TARGET_EINVAL
;
12243 target_set
= lock_user(VERIFY_READ
, arg5
,
12244 sizeof(target_sigset_t
), 1);
12246 ret
= -TARGET_EFAULT
;
12249 target_to_host_sigset(set
, target_set
);
12250 unlock_user(target_set
, arg5
, 0);
12255 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12256 set
, SIGSET_T_SIZE
));
12260 #if defined(TARGET_NR_epoll_wait)
12261 case TARGET_NR_epoll_wait
:
12262 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12267 ret
= -TARGET_ENOSYS
;
12269 if (!is_error(ret
)) {
12271 for (i
= 0; i
< ret
; i
++) {
12272 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12273 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12275 unlock_user(target_ep
, arg2
,
12276 ret
* sizeof(struct target_epoll_event
));
12278 unlock_user(target_ep
, arg2
, 0);
12285 #ifdef TARGET_NR_prlimit64
12286 case TARGET_NR_prlimit64
:
12288 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12289 struct target_rlimit64
*target_rnew
, *target_rold
;
12290 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12291 int resource
= target_to_host_resource(arg2
);
12293 if (arg3
&& (resource
!= RLIMIT_AS
&&
12294 resource
!= RLIMIT_DATA
&&
12295 resource
!= RLIMIT_STACK
)) {
12296 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12297 return -TARGET_EFAULT
;
12299 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12300 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12301 unlock_user_struct(target_rnew
, arg3
, 0);
12305 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12306 if (!is_error(ret
) && arg4
) {
12307 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12308 return -TARGET_EFAULT
;
12310 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12311 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12312 unlock_user_struct(target_rold
, arg4
, 1);
12317 #ifdef TARGET_NR_gethostname
12318 case TARGET_NR_gethostname
:
12320 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12322 ret
= get_errno(gethostname(name
, arg2
));
12323 unlock_user(name
, arg1
, arg2
);
12325 ret
= -TARGET_EFAULT
;
12330 #ifdef TARGET_NR_atomic_cmpxchg_32
12331 case TARGET_NR_atomic_cmpxchg_32
:
12333 /* should use start_exclusive from main.c */
12334 abi_ulong mem_value
;
12335 if (get_user_u32(mem_value
, arg6
)) {
12336 target_siginfo_t info
;
12337 info
.si_signo
= SIGSEGV
;
12339 info
.si_code
= TARGET_SEGV_MAPERR
;
12340 info
._sifields
._sigfault
._addr
= arg6
;
12341 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12342 QEMU_SI_FAULT
, &info
);
12346 if (mem_value
== arg2
)
12347 put_user_u32(arg1
, arg6
);
12351 #ifdef TARGET_NR_atomic_barrier
12352 case TARGET_NR_atomic_barrier
:
12353 /* Like the kernel implementation and the
12354 qemu arm barrier, no-op this? */
12358 #ifdef TARGET_NR_timer_create
12359 case TARGET_NR_timer_create
:
12361 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12363 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12366 int timer_index
= next_free_host_timer();
12368 if (timer_index
< 0) {
12369 ret
= -TARGET_EAGAIN
;
12371 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12374 phost_sevp
= &host_sevp
;
12375 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12381 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12385 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12386 return -TARGET_EFAULT
;
12394 #ifdef TARGET_NR_timer_settime
12395 case TARGET_NR_timer_settime
:
12397 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12398 * struct itimerspec * old_value */
12399 target_timer_t timerid
= get_timer_id(arg1
);
12403 } else if (arg3
== 0) {
12404 ret
= -TARGET_EINVAL
;
12406 timer_t htimer
= g_posix_timers
[timerid
];
12407 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12409 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12410 return -TARGET_EFAULT
;
12413 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12414 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12415 return -TARGET_EFAULT
;
12422 #ifdef TARGET_NR_timer_gettime
12423 case TARGET_NR_timer_gettime
:
12425 /* args: timer_t timerid, struct itimerspec *curr_value */
12426 target_timer_t timerid
= get_timer_id(arg1
);
12430 } else if (!arg2
) {
12431 ret
= -TARGET_EFAULT
;
12433 timer_t htimer
= g_posix_timers
[timerid
];
12434 struct itimerspec hspec
;
12435 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12437 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12438 ret
= -TARGET_EFAULT
;
12445 #ifdef TARGET_NR_timer_getoverrun
12446 case TARGET_NR_timer_getoverrun
:
12448 /* args: timer_t timerid */
12449 target_timer_t timerid
= get_timer_id(arg1
);
12454 timer_t htimer
= g_posix_timers
[timerid
];
12455 ret
= get_errno(timer_getoverrun(htimer
));
12461 #ifdef TARGET_NR_timer_delete
12462 case TARGET_NR_timer_delete
:
12464 /* args: timer_t timerid */
12465 target_timer_t timerid
= get_timer_id(arg1
);
12470 timer_t htimer
= g_posix_timers
[timerid
];
12471 ret
= get_errno(timer_delete(htimer
));
12472 g_posix_timers
[timerid
] = 0;
12478 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12479 case TARGET_NR_timerfd_create
:
12480 return get_errno(timerfd_create(arg1
,
12481 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12484 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12485 case TARGET_NR_timerfd_gettime
:
12487 struct itimerspec its_curr
;
12489 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12491 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12492 return -TARGET_EFAULT
;
12498 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12499 case TARGET_NR_timerfd_settime
:
12501 struct itimerspec its_new
, its_old
, *p_new
;
12504 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12505 return -TARGET_EFAULT
;
12512 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12514 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12515 return -TARGET_EFAULT
;
12521 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12522 case TARGET_NR_ioprio_get
:
12523 return get_errno(ioprio_get(arg1
, arg2
));
12526 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12527 case TARGET_NR_ioprio_set
:
12528 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
12531 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12532 case TARGET_NR_setns
:
12533 return get_errno(setns(arg1
, arg2
));
12535 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12536 case TARGET_NR_unshare
:
12537 return get_errno(unshare(arg1
));
12539 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12540 case TARGET_NR_kcmp
:
12541 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12543 #ifdef TARGET_NR_swapcontext
12544 case TARGET_NR_swapcontext
:
12545 /* PowerPC specific. */
12546 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
12548 #ifdef TARGET_NR_memfd_create
12549 case TARGET_NR_memfd_create
:
12550 p
= lock_user_string(arg1
);
12552 return -TARGET_EFAULT
;
12554 ret
= get_errno(memfd_create(p
, arg2
));
12555 fd_trans_unregister(ret
);
12556 unlock_user(p
, arg1
, 0);
12559 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12560 case TARGET_NR_membarrier
:
12561 return get_errno(membarrier(arg1
, arg2
));
12565 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
12566 return -TARGET_ENOSYS
;
12571 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
12572 abi_long arg2
, abi_long arg3
, abi_long arg4
,
12573 abi_long arg5
, abi_long arg6
, abi_long arg7
,
12576 CPUState
*cpu
= env_cpu(cpu_env
);
12579 #ifdef DEBUG_ERESTARTSYS
12580 /* Debug-only code for exercising the syscall-restart code paths
12581 * in the per-architecture cpu main loops: restart every syscall
12582 * the guest makes once before letting it through.
12588 return -TARGET_ERESTARTSYS
;
12593 record_syscall_start(cpu
, num
, arg1
,
12594 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
12596 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
12597 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12600 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
12601 arg5
, arg6
, arg7
, arg8
);
12603 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
12604 print_syscall_ret(num
, ret
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12607 record_syscall_return(cpu
, num
, ret
);