4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
62 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
93 #include <linux/mtio.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
116 #include <libdrm/drm.h>
118 #include "linux_loop.h"
122 #include "qemu/guest-random.h"
123 #include "qemu/selfmap.h"
124 #include "user/syscall-trace.h"
125 #include "qapi/error.h"
126 #include "fd-trans.h"
130 #define CLONE_IO 0x80000000 /* Clone io context */
133 /* We can't directly call the host clone syscall, because this will
134 * badly confuse libc (breaking mutexes, for example). So we must
135 * divide clone flags into:
136 * * flag combinations that look like pthread_create()
137 * * flag combinations that look like fork()
138 * * flags we can implement within QEMU itself
139 * * flags we can't support and will return an error for
141 /* For thread creation, all these flags must be present; for
142 * fork, none must be present.
144 #define CLONE_THREAD_FLAGS \
145 (CLONE_VM | CLONE_FS | CLONE_FILES | \
146 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
148 /* These flags are ignored:
149 * CLONE_DETACHED is now ignored by the kernel;
150 * CLONE_IO is just an optimisation hint to the I/O scheduler
152 #define CLONE_IGNORED_FLAGS \
153 (CLONE_DETACHED | CLONE_IO)
155 /* Flags for fork which we can implement within QEMU itself */
156 #define CLONE_OPTIONAL_FORK_FLAGS \
157 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
158 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
160 /* Flags for thread creation which we can implement within QEMU itself */
161 #define CLONE_OPTIONAL_THREAD_FLAGS \
162 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
163 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
165 #define CLONE_INVALID_FORK_FLAGS \
166 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
168 #define CLONE_INVALID_THREAD_FLAGS \
169 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
170 CLONE_IGNORED_FLAGS))
172 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
173 * have almost all been allocated. We cannot support any of
174 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
175 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
176 * The checks against the invalid thread masks above will catch these.
177 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
180 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
181 * once. This exercises the codepaths for restart.
183 //#define DEBUG_ERESTARTSYS
185 //#include <linux/msdos_fs.h>
186 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
187 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
197 #define _syscall0(type,name) \
198 static type name (void) \
200 return syscall(__NR_##name); \
203 #define _syscall1(type,name,type1,arg1) \
204 static type name (type1 arg1) \
206 return syscall(__NR_##name, arg1); \
209 #define _syscall2(type,name,type1,arg1,type2,arg2) \
210 static type name (type1 arg1,type2 arg2) \
212 return syscall(__NR_##name, arg1, arg2); \
215 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
216 static type name (type1 arg1,type2 arg2,type3 arg3) \
218 return syscall(__NR_##name, arg1, arg2, arg3); \
221 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
224 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
227 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
229 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
231 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
235 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
236 type5,arg5,type6,arg6) \
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
240 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
244 #define __NR_sys_uname __NR_uname
245 #define __NR_sys_getcwd1 __NR_getcwd
246 #define __NR_sys_getdents __NR_getdents
247 #define __NR_sys_getdents64 __NR_getdents64
248 #define __NR_sys_getpriority __NR_getpriority
249 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
250 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
251 #define __NR_sys_syslog __NR_syslog
252 #if defined(__NR_futex)
253 # define __NR_sys_futex __NR_futex
255 #if defined(__NR_futex_time64)
256 # define __NR_sys_futex_time64 __NR_futex_time64
258 #define __NR_sys_inotify_init __NR_inotify_init
259 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
260 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
261 #define __NR_sys_statx __NR_statx
263 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
264 #define __NR__llseek __NR_lseek
267 /* Newer kernel ports have llseek() instead of _llseek() */
268 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
269 #define TARGET_NR__llseek TARGET_NR_llseek
272 #define __NR_sys_gettid __NR_gettid
273 _syscall0(int, sys_gettid
)
275 /* For the 64-bit guest on 32-bit host case we must emulate
276 * getdents using getdents64, because otherwise the host
277 * might hand us back more dirent records than we can fit
278 * into the guest buffer after structure format conversion.
279 * Otherwise we emulate getdents with getdents if the host has it.
281 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
282 #define EMULATE_GETDENTS_WITH_GETDENTS
285 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
286 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
288 #if (defined(TARGET_NR_getdents) && \
289 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
290 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
291 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
293 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
294 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
295 loff_t
*, res
, uint
, wh
);
297 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
298 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
300 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
301 #ifdef __NR_exit_group
302 _syscall1(int,exit_group
,int,error_code
)
304 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
305 _syscall1(int,set_tid_address
,int *,tidptr
)
307 #if defined(__NR_futex)
308 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
309 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
311 #if defined(__NR_futex_time64)
312 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
313 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
315 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
316 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
317 unsigned long *, user_mask_ptr
);
318 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
319 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
320 unsigned long *, user_mask_ptr
);
321 #define __NR_sys_getcpu __NR_getcpu
322 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
323 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
325 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
326 struct __user_cap_data_struct
*, data
);
327 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
328 struct __user_cap_data_struct
*, data
);
329 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
330 _syscall2(int, ioprio_get
, int, which
, int, who
)
332 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
333 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
335 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
336 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
339 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
340 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
341 unsigned long, idx1
, unsigned long, idx2
)
345 * It is assumed that struct statx is architecture independent.
347 #if defined(TARGET_NR_statx) && defined(__NR_statx)
348 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
349 unsigned int, mask
, struct target_statx
*, statxbuf
)
351 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
352 _syscall2(int, membarrier
, int, cmd
, int, flags
)
355 static bitmask_transtbl fcntl_flags_tbl
[] = {
356 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
357 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
358 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
359 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
360 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
361 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
362 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
363 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
364 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
365 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
366 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
367 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
368 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
369 #if defined(O_DIRECT)
370 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
372 #if defined(O_NOATIME)
373 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
375 #if defined(O_CLOEXEC)
376 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
379 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
381 #if defined(O_TMPFILE)
382 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
384 /* Don't terminate the list prematurely on 64-bit host+guest. */
385 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
386 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
391 static int sys_getcwd1(char *buf
, size_t size
)
393 if (getcwd(buf
, size
) == NULL
) {
394 /* getcwd() sets errno */
397 return strlen(buf
)+1;
400 #ifdef TARGET_NR_utimensat
401 #if defined(__NR_utimensat)
402 #define __NR_sys_utimensat __NR_utimensat
403 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
404 const struct timespec
*,tsp
,int,flags
)
406 static int sys_utimensat(int dirfd
, const char *pathname
,
407 const struct timespec times
[2], int flags
)
413 #endif /* TARGET_NR_utimensat */
415 #ifdef TARGET_NR_renameat2
416 #if defined(__NR_renameat2)
417 #define __NR_sys_renameat2 __NR_renameat2
418 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
419 const char *, new, unsigned int, flags
)
421 static int sys_renameat2(int oldfd
, const char *old
,
422 int newfd
, const char *new, int flags
)
425 return renameat(oldfd
, old
, newfd
, new);
431 #endif /* TARGET_NR_renameat2 */
433 #ifdef CONFIG_INOTIFY
434 #include <sys/inotify.h>
436 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
437 static int sys_inotify_init(void)
439 return (inotify_init());
442 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
443 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
445 return (inotify_add_watch(fd
, pathname
, mask
));
448 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
449 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
451 return (inotify_rm_watch(fd
, wd
));
454 #ifdef CONFIG_INOTIFY1
455 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
456 static int sys_inotify_init1(int flags
)
458 return (inotify_init1(flags
));
463 /* Userspace can usually survive runtime without inotify */
464 #undef TARGET_NR_inotify_init
465 #undef TARGET_NR_inotify_init1
466 #undef TARGET_NR_inotify_add_watch
467 #undef TARGET_NR_inotify_rm_watch
468 #endif /* CONFIG_INOTIFY */
470 #if defined(TARGET_NR_prlimit64)
471 #ifndef __NR_prlimit64
472 # define __NR_prlimit64 -1
474 #define __NR_sys_prlimit64 __NR_prlimit64
475 /* The glibc rlimit structure may not be that used by the underlying syscall */
476 struct host_rlimit64
{
480 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
481 const struct host_rlimit64
*, new_limit
,
482 struct host_rlimit64
*, old_limit
)
486 #if defined(TARGET_NR_timer_create)
487 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
488 static timer_t g_posix_timers
[32] = { 0, } ;
490 static inline int next_free_host_timer(void)
493 /* FIXME: Does finding the next free slot require a lock? */
494 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
495 if (g_posix_timers
[k
] == 0) {
496 g_posix_timers
[k
] = (timer_t
) 1;
504 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
506 static inline int regpairs_aligned(void *cpu_env
, int num
)
508 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
510 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
511 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
512 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
513 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
514 * of registers which translates to the same as ARM/MIPS, because we start with
516 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
517 #elif defined(TARGET_SH4)
518 /* SH4 doesn't align register pairs, except for p{read,write}64 */
519 static inline int regpairs_aligned(void *cpu_env
, int num
)
522 case TARGET_NR_pread64
:
523 case TARGET_NR_pwrite64
:
530 #elif defined(TARGET_XTENSA)
531 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
533 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
536 #define ERRNO_TABLE_SIZE 1200
538 /* target_to_host_errno_table[] is initialized from
539 * host_to_target_errno_table[] in syscall_init(). */
540 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
544 * This list is the union of errno values overridden in asm-<arch>/errno.h
545 * minus the errnos that are not actually generic to all archs.
547 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
548 [EAGAIN
] = TARGET_EAGAIN
,
549 [EIDRM
] = TARGET_EIDRM
,
550 [ECHRNG
] = TARGET_ECHRNG
,
551 [EL2NSYNC
] = TARGET_EL2NSYNC
,
552 [EL3HLT
] = TARGET_EL3HLT
,
553 [EL3RST
] = TARGET_EL3RST
,
554 [ELNRNG
] = TARGET_ELNRNG
,
555 [EUNATCH
] = TARGET_EUNATCH
,
556 [ENOCSI
] = TARGET_ENOCSI
,
557 [EL2HLT
] = TARGET_EL2HLT
,
558 [EDEADLK
] = TARGET_EDEADLK
,
559 [ENOLCK
] = TARGET_ENOLCK
,
560 [EBADE
] = TARGET_EBADE
,
561 [EBADR
] = TARGET_EBADR
,
562 [EXFULL
] = TARGET_EXFULL
,
563 [ENOANO
] = TARGET_ENOANO
,
564 [EBADRQC
] = TARGET_EBADRQC
,
565 [EBADSLT
] = TARGET_EBADSLT
,
566 [EBFONT
] = TARGET_EBFONT
,
567 [ENOSTR
] = TARGET_ENOSTR
,
568 [ENODATA
] = TARGET_ENODATA
,
569 [ETIME
] = TARGET_ETIME
,
570 [ENOSR
] = TARGET_ENOSR
,
571 [ENONET
] = TARGET_ENONET
,
572 [ENOPKG
] = TARGET_ENOPKG
,
573 [EREMOTE
] = TARGET_EREMOTE
,
574 [ENOLINK
] = TARGET_ENOLINK
,
575 [EADV
] = TARGET_EADV
,
576 [ESRMNT
] = TARGET_ESRMNT
,
577 [ECOMM
] = TARGET_ECOMM
,
578 [EPROTO
] = TARGET_EPROTO
,
579 [EDOTDOT
] = TARGET_EDOTDOT
,
580 [EMULTIHOP
] = TARGET_EMULTIHOP
,
581 [EBADMSG
] = TARGET_EBADMSG
,
582 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
583 [EOVERFLOW
] = TARGET_EOVERFLOW
,
584 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
585 [EBADFD
] = TARGET_EBADFD
,
586 [EREMCHG
] = TARGET_EREMCHG
,
587 [ELIBACC
] = TARGET_ELIBACC
,
588 [ELIBBAD
] = TARGET_ELIBBAD
,
589 [ELIBSCN
] = TARGET_ELIBSCN
,
590 [ELIBMAX
] = TARGET_ELIBMAX
,
591 [ELIBEXEC
] = TARGET_ELIBEXEC
,
592 [EILSEQ
] = TARGET_EILSEQ
,
593 [ENOSYS
] = TARGET_ENOSYS
,
594 [ELOOP
] = TARGET_ELOOP
,
595 [ERESTART
] = TARGET_ERESTART
,
596 [ESTRPIPE
] = TARGET_ESTRPIPE
,
597 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
598 [EUSERS
] = TARGET_EUSERS
,
599 [ENOTSOCK
] = TARGET_ENOTSOCK
,
600 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
601 [EMSGSIZE
] = TARGET_EMSGSIZE
,
602 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
603 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
604 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
605 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
606 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
607 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
608 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
609 [EADDRINUSE
] = TARGET_EADDRINUSE
,
610 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
611 [ENETDOWN
] = TARGET_ENETDOWN
,
612 [ENETUNREACH
] = TARGET_ENETUNREACH
,
613 [ENETRESET
] = TARGET_ENETRESET
,
614 [ECONNABORTED
] = TARGET_ECONNABORTED
,
615 [ECONNRESET
] = TARGET_ECONNRESET
,
616 [ENOBUFS
] = TARGET_ENOBUFS
,
617 [EISCONN
] = TARGET_EISCONN
,
618 [ENOTCONN
] = TARGET_ENOTCONN
,
619 [EUCLEAN
] = TARGET_EUCLEAN
,
620 [ENOTNAM
] = TARGET_ENOTNAM
,
621 [ENAVAIL
] = TARGET_ENAVAIL
,
622 [EISNAM
] = TARGET_EISNAM
,
623 [EREMOTEIO
] = TARGET_EREMOTEIO
,
624 [EDQUOT
] = TARGET_EDQUOT
,
625 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
626 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
627 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
628 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
629 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
630 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
631 [EALREADY
] = TARGET_EALREADY
,
632 [EINPROGRESS
] = TARGET_EINPROGRESS
,
633 [ESTALE
] = TARGET_ESTALE
,
634 [ECANCELED
] = TARGET_ECANCELED
,
635 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
636 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
638 [ENOKEY
] = TARGET_ENOKEY
,
641 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
644 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
647 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
650 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
652 #ifdef ENOTRECOVERABLE
653 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
656 [ENOMSG
] = TARGET_ENOMSG
,
659 [ERFKILL
] = TARGET_ERFKILL
,
662 [EHWPOISON
] = TARGET_EHWPOISON
,
666 static inline int host_to_target_errno(int err
)
668 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
669 host_to_target_errno_table
[err
]) {
670 return host_to_target_errno_table
[err
];
675 static inline int target_to_host_errno(int err
)
677 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
678 target_to_host_errno_table
[err
]) {
679 return target_to_host_errno_table
[err
];
684 static inline abi_long
get_errno(abi_long ret
)
687 return -host_to_target_errno(errno
);
692 const char *target_strerror(int err
)
694 if (err
== TARGET_ERESTARTSYS
) {
695 return "To be restarted";
697 if (err
== TARGET_QEMU_ESIGRETURN
) {
698 return "Successful exit from sigreturn";
701 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
704 return strerror(target_to_host_errno(err
));
707 #define safe_syscall0(type, name) \
708 static type safe_##name(void) \
710 return safe_syscall(__NR_##name); \
713 #define safe_syscall1(type, name, type1, arg1) \
714 static type safe_##name(type1 arg1) \
716 return safe_syscall(__NR_##name, arg1); \
719 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
720 static type safe_##name(type1 arg1, type2 arg2) \
722 return safe_syscall(__NR_##name, arg1, arg2); \
725 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
726 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
728 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
731 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
733 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
735 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
738 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
739 type4, arg4, type5, arg5) \
740 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
743 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
746 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
747 type4, arg4, type5, arg5, type6, arg6) \
748 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
749 type5 arg5, type6 arg6) \
751 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
754 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
755 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
756 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
757 int, flags
, mode_t
, mode
)
758 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
759 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
760 struct rusage
*, rusage
)
762 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
763 int, options
, struct rusage
*, rusage
)
764 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
765 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
766 defined(TARGET_NR_pselect6)
767 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
768 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
770 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
771 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
772 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
775 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
776 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
778 #if defined(__NR_futex)
779 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
780 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
782 #if defined(__NR_futex_time64)
783 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
784 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
786 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
787 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
788 safe_syscall2(int, tkill
, int, tid
, int, sig
)
789 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
790 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
791 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
792 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
793 unsigned long, pos_l
, unsigned long, pos_h
)
794 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
795 unsigned long, pos_l
, unsigned long, pos_h
)
796 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
798 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
799 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
800 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
801 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
802 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
803 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
804 safe_syscall2(int, flock
, int, fd
, int, operation
)
805 #ifdef TARGET_NR_rt_sigtimedwait
806 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
807 const struct timespec
*, uts
, size_t, sigsetsize
)
809 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
811 #if defined(TARGET_NR_nanosleep)
812 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
813 struct timespec
*, rem
)
815 #ifdef TARGET_NR_clock_nanosleep
816 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
817 const struct timespec
*, req
, struct timespec
*, rem
)
820 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
821 void *, ptr
, long, fifth
)
824 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
828 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
829 long, msgtype
, int, flags
)
831 #ifdef __NR_semtimedop
832 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
833 unsigned, nsops
, const struct timespec
*, timeout
)
835 #ifdef TARGET_NR_mq_timedsend
836 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
837 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
839 #ifdef TARGET_NR_mq_timedreceive
840 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
841 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
843 /* We do ioctl like this rather than via safe_syscall3 to preserve the
844 * "third argument might be integer or pointer or not present" behaviour of
847 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
848 /* Similarly for fcntl. Note that callers must always:
849 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
850 * use the flock64 struct rather than unsuffixed flock
851 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
854 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
856 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
859 static inline int host_to_target_sock_type(int host_type
)
863 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
865 target_type
= TARGET_SOCK_DGRAM
;
868 target_type
= TARGET_SOCK_STREAM
;
871 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
875 #if defined(SOCK_CLOEXEC)
876 if (host_type
& SOCK_CLOEXEC
) {
877 target_type
|= TARGET_SOCK_CLOEXEC
;
881 #if defined(SOCK_NONBLOCK)
882 if (host_type
& SOCK_NONBLOCK
) {
883 target_type
|= TARGET_SOCK_NONBLOCK
;
890 static abi_ulong target_brk
;
891 static abi_ulong target_original_brk
;
892 static abi_ulong brk_page
;
894 void target_set_brk(abi_ulong new_brk
)
896 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
897 brk_page
= HOST_PAGE_ALIGN(target_brk
);
900 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
901 #define DEBUGF_BRK(message, args...)
903 /* do_brk() must return target values and target errnos. */
904 abi_long
do_brk(abi_ulong new_brk
)
906 abi_long mapped_addr
;
907 abi_ulong new_alloc_size
;
909 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
912 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
915 if (new_brk
< target_original_brk
) {
916 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
921 /* If the new brk is less than the highest page reserved to the
922 * target heap allocation, set it and we're almost done... */
923 if (new_brk
<= brk_page
) {
924 /* Heap contents are initialized to zero, as for anonymous
926 if (new_brk
> target_brk
) {
927 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
929 target_brk
= new_brk
;
930 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
934 /* We need to allocate more memory after the brk... Note that
935 * we don't use MAP_FIXED because that will map over the top of
936 * any existing mapping (like the one with the host libc or qemu
937 * itself); instead we treat "mapped but at wrong address" as
938 * a failure and unmap again.
940 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
941 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
942 PROT_READ
|PROT_WRITE
,
943 MAP_ANON
|MAP_PRIVATE
, 0, 0));
945 if (mapped_addr
== brk_page
) {
946 /* Heap contents are initialized to zero, as for anonymous
947 * mapped pages. Technically the new pages are already
948 * initialized to zero since they *are* anonymous mapped
949 * pages, however we have to take care with the contents that
950 * come from the remaining part of the previous page: it may
951 * contains garbage data due to a previous heap usage (grown
953 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
955 target_brk
= new_brk
;
956 brk_page
= HOST_PAGE_ALIGN(target_brk
);
957 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
960 } else if (mapped_addr
!= -1) {
961 /* Mapped but at wrong address, meaning there wasn't actually
962 * enough space for this brk.
964 target_munmap(mapped_addr
, new_alloc_size
);
966 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
969 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
972 #if defined(TARGET_ALPHA)
973 /* We (partially) emulate OSF/1 on Alpha, which requires we
974 return a proper errno, not an unchanged brk value. */
975 return -TARGET_ENOMEM
;
977 /* For everything else, return the previous break. */
981 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
982 defined(TARGET_NR_pselect6)
983 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
984 abi_ulong target_fds_addr
,
988 abi_ulong b
, *target_fds
;
990 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
991 if (!(target_fds
= lock_user(VERIFY_READ
,
993 sizeof(abi_ulong
) * nw
,
995 return -TARGET_EFAULT
;
999 for (i
= 0; i
< nw
; i
++) {
1000 /* grab the abi_ulong */
1001 __get_user(b
, &target_fds
[i
]);
1002 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1003 /* check the bit inside the abi_ulong */
1010 unlock_user(target_fds
, target_fds_addr
, 0);
1015 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1016 abi_ulong target_fds_addr
,
1019 if (target_fds_addr
) {
1020 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1021 return -TARGET_EFAULT
;
1029 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1035 abi_ulong
*target_fds
;
1037 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1038 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1040 sizeof(abi_ulong
) * nw
,
1042 return -TARGET_EFAULT
;
1045 for (i
= 0; i
< nw
; i
++) {
1047 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1048 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1051 __put_user(v
, &target_fds
[i
]);
1054 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1060 #if defined(__alpha__)
1061 #define HOST_HZ 1024
1066 static inline abi_long
host_to_target_clock_t(long ticks
)
1068 #if HOST_HZ == TARGET_HZ
1071 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1075 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1076 const struct rusage
*rusage
)
1078 struct target_rusage
*target_rusage
;
1080 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1081 return -TARGET_EFAULT
;
1082 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1083 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1084 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1085 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1086 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1087 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1088 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1089 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1090 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1091 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1092 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1093 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1094 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1095 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1096 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1097 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1098 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1099 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1100 unlock_user_struct(target_rusage
, target_addr
, 1);
1105 #ifdef TARGET_NR_setrlimit
1106 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1108 abi_ulong target_rlim_swap
;
1111 target_rlim_swap
= tswapal(target_rlim
);
1112 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1113 return RLIM_INFINITY
;
1115 result
= target_rlim_swap
;
1116 if (target_rlim_swap
!= (rlim_t
)result
)
1117 return RLIM_INFINITY
;
1123 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1124 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1126 abi_ulong target_rlim_swap
;
1129 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1130 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1132 target_rlim_swap
= rlim
;
1133 result
= tswapal(target_rlim_swap
);
1139 static inline int target_to_host_resource(int code
)
1142 case TARGET_RLIMIT_AS
:
1144 case TARGET_RLIMIT_CORE
:
1146 case TARGET_RLIMIT_CPU
:
1148 case TARGET_RLIMIT_DATA
:
1150 case TARGET_RLIMIT_FSIZE
:
1151 return RLIMIT_FSIZE
;
1152 case TARGET_RLIMIT_LOCKS
:
1153 return RLIMIT_LOCKS
;
1154 case TARGET_RLIMIT_MEMLOCK
:
1155 return RLIMIT_MEMLOCK
;
1156 case TARGET_RLIMIT_MSGQUEUE
:
1157 return RLIMIT_MSGQUEUE
;
1158 case TARGET_RLIMIT_NICE
:
1160 case TARGET_RLIMIT_NOFILE
:
1161 return RLIMIT_NOFILE
;
1162 case TARGET_RLIMIT_NPROC
:
1163 return RLIMIT_NPROC
;
1164 case TARGET_RLIMIT_RSS
:
1166 case TARGET_RLIMIT_RTPRIO
:
1167 return RLIMIT_RTPRIO
;
1168 case TARGET_RLIMIT_SIGPENDING
:
1169 return RLIMIT_SIGPENDING
;
1170 case TARGET_RLIMIT_STACK
:
1171 return RLIMIT_STACK
;
1177 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1178 abi_ulong target_tv_addr
)
1180 struct target_timeval
*target_tv
;
1182 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1183 return -TARGET_EFAULT
;
1186 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1187 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1189 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1194 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1195 const struct timeval
*tv
)
1197 struct target_timeval
*target_tv
;
1199 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1200 return -TARGET_EFAULT
;
1203 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1204 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1206 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1211 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1212 const struct timeval
*tv
)
1214 struct target__kernel_sock_timeval
*target_tv
;
1216 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1217 return -TARGET_EFAULT
;
1220 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1221 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1223 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1228 #if defined(TARGET_NR_futex) || \
1229 defined(TARGET_NR_rt_sigtimedwait) || \
1230 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1231 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1232 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1233 defined(TARGET_NR_mq_timedreceive)
1234 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1235 abi_ulong target_addr
)
1237 struct target_timespec
*target_ts
;
1239 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1240 return -TARGET_EFAULT
;
1242 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1243 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1244 unlock_user_struct(target_ts
, target_addr
, 0);
1249 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64)
1250 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1251 abi_ulong target_addr
)
1253 struct target__kernel_timespec
*target_ts
;
1255 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1256 return -TARGET_EFAULT
;
1258 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1259 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1260 unlock_user_struct(target_ts
, target_addr
, 0);
1265 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1266 struct timespec
*host_ts
)
1268 struct target_timespec
*target_ts
;
1270 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1271 return -TARGET_EFAULT
;
1273 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1274 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1275 unlock_user_struct(target_ts
, target_addr
, 1);
1279 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1280 struct timespec
*host_ts
)
1282 struct target__kernel_timespec
*target_ts
;
1284 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1285 return -TARGET_EFAULT
;
1287 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1288 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1289 unlock_user_struct(target_ts
, target_addr
, 1);
1293 #if defined(TARGET_NR_gettimeofday)
1294 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1295 struct timezone
*tz
)
1297 struct target_timezone
*target_tz
;
1299 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1300 return -TARGET_EFAULT
;
1303 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1304 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1306 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1312 #if defined(TARGET_NR_settimeofday)
1313 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1314 abi_ulong target_tz_addr
)
1316 struct target_timezone
*target_tz
;
1318 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1319 return -TARGET_EFAULT
;
1322 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1323 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1325 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1331 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1334 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1335 abi_ulong target_mq_attr_addr
)
1337 struct target_mq_attr
*target_mq_attr
;
1339 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1340 target_mq_attr_addr
, 1))
1341 return -TARGET_EFAULT
;
1343 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1344 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1345 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1346 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1348 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1353 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1354 const struct mq_attr
*attr
)
1356 struct target_mq_attr
*target_mq_attr
;
1358 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1359 target_mq_attr_addr
, 0))
1360 return -TARGET_EFAULT
;
1362 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1363 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1364 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1365 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1367 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1373 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1374 /* do_select() must return target values and target errnos. */
1375 static abi_long
do_select(int n
,
1376 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1377 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1379 fd_set rfds
, wfds
, efds
;
1380 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1382 struct timespec ts
, *ts_ptr
;
1385 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1389 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1393 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1398 if (target_tv_addr
) {
1399 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1400 return -TARGET_EFAULT
;
1401 ts
.tv_sec
= tv
.tv_sec
;
1402 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1408 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1411 if (!is_error(ret
)) {
1412 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1413 return -TARGET_EFAULT
;
1414 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1415 return -TARGET_EFAULT
;
1416 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1417 return -TARGET_EFAULT
;
1419 if (target_tv_addr
) {
1420 tv
.tv_sec
= ts
.tv_sec
;
1421 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1422 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1423 return -TARGET_EFAULT
;
1431 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1432 static abi_long
do_old_select(abi_ulong arg1
)
1434 struct target_sel_arg_struct
*sel
;
1435 abi_ulong inp
, outp
, exp
, tvp
;
1438 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1439 return -TARGET_EFAULT
;
1442 nsel
= tswapal(sel
->n
);
1443 inp
= tswapal(sel
->inp
);
1444 outp
= tswapal(sel
->outp
);
1445 exp
= tswapal(sel
->exp
);
1446 tvp
= tswapal(sel
->tvp
);
1448 unlock_user_struct(sel
, arg1
, 0);
1450 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1455 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1458 return pipe2(host_pipe
, flags
);
1464 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1465 int flags
, int is_pipe2
)
1469 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1472 return get_errno(ret
);
1474 /* Several targets have special calling conventions for the original
1475 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1477 #if defined(TARGET_ALPHA)
1478 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1479 return host_pipe
[0];
1480 #elif defined(TARGET_MIPS)
1481 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1482 return host_pipe
[0];
1483 #elif defined(TARGET_SH4)
1484 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1485 return host_pipe
[0];
1486 #elif defined(TARGET_SPARC)
1487 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1488 return host_pipe
[0];
1492 if (put_user_s32(host_pipe
[0], pipedes
)
1493 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1494 return -TARGET_EFAULT
;
1495 return get_errno(ret
);
1498 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1499 abi_ulong target_addr
,
1502 struct target_ip_mreqn
*target_smreqn
;
1504 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1506 return -TARGET_EFAULT
;
1507 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1508 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1509 if (len
== sizeof(struct target_ip_mreqn
))
1510 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1511 unlock_user(target_smreqn
, target_addr
, 0);
1516 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1517 abi_ulong target_addr
,
1520 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1521 sa_family_t sa_family
;
1522 struct target_sockaddr
*target_saddr
;
1524 if (fd_trans_target_to_host_addr(fd
)) {
1525 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1528 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1530 return -TARGET_EFAULT
;
1532 sa_family
= tswap16(target_saddr
->sa_family
);
1534 /* Oops. The caller might send a incomplete sun_path; sun_path
1535 * must be terminated by \0 (see the manual page), but
1536 * unfortunately it is quite common to specify sockaddr_un
1537 * length as "strlen(x->sun_path)" while it should be
1538 * "strlen(...) + 1". We'll fix that here if needed.
1539 * Linux kernel has a similar feature.
1542 if (sa_family
== AF_UNIX
) {
1543 if (len
< unix_maxlen
&& len
> 0) {
1544 char *cp
= (char*)target_saddr
;
1546 if ( cp
[len
-1] && !cp
[len
] )
1549 if (len
> unix_maxlen
)
1553 memcpy(addr
, target_saddr
, len
);
1554 addr
->sa_family
= sa_family
;
1555 if (sa_family
== AF_NETLINK
) {
1556 struct sockaddr_nl
*nladdr
;
1558 nladdr
= (struct sockaddr_nl
*)addr
;
1559 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1560 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1561 } else if (sa_family
== AF_PACKET
) {
1562 struct target_sockaddr_ll
*lladdr
;
1564 lladdr
= (struct target_sockaddr_ll
*)addr
;
1565 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1566 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1568 unlock_user(target_saddr
, target_addr
, 0);
1573 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1574 struct sockaddr
*addr
,
1577 struct target_sockaddr
*target_saddr
;
1584 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1586 return -TARGET_EFAULT
;
1587 memcpy(target_saddr
, addr
, len
);
1588 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1589 sizeof(target_saddr
->sa_family
)) {
1590 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1592 if (addr
->sa_family
== AF_NETLINK
&&
1593 len
>= sizeof(struct target_sockaddr_nl
)) {
1594 struct target_sockaddr_nl
*target_nl
=
1595 (struct target_sockaddr_nl
*)target_saddr
;
1596 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1597 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1598 } else if (addr
->sa_family
== AF_PACKET
) {
1599 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1600 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1601 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1602 } else if (addr
->sa_family
== AF_INET6
&&
1603 len
>= sizeof(struct target_sockaddr_in6
)) {
1604 struct target_sockaddr_in6
*target_in6
=
1605 (struct target_sockaddr_in6
*)target_saddr
;
1606 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1608 unlock_user(target_saddr
, target_addr
, len
);
1613 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1614 struct target_msghdr
*target_msgh
)
1616 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1617 abi_long msg_controllen
;
1618 abi_ulong target_cmsg_addr
;
1619 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1620 socklen_t space
= 0;
1622 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1623 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1625 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1626 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1627 target_cmsg_start
= target_cmsg
;
1629 return -TARGET_EFAULT
;
1631 while (cmsg
&& target_cmsg
) {
1632 void *data
= CMSG_DATA(cmsg
);
1633 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1635 int len
= tswapal(target_cmsg
->cmsg_len
)
1636 - sizeof(struct target_cmsghdr
);
1638 space
+= CMSG_SPACE(len
);
1639 if (space
> msgh
->msg_controllen
) {
1640 space
-= CMSG_SPACE(len
);
1641 /* This is a QEMU bug, since we allocated the payload
1642 * area ourselves (unlike overflow in host-to-target
1643 * conversion, which is just the guest giving us a buffer
1644 * that's too small). It can't happen for the payload types
1645 * we currently support; if it becomes an issue in future
1646 * we would need to improve our allocation strategy to
1647 * something more intelligent than "twice the size of the
1648 * target buffer we're reading from".
1650 qemu_log_mask(LOG_UNIMP
,
1651 ("Unsupported ancillary data %d/%d: "
1652 "unhandled msg size\n"),
1653 tswap32(target_cmsg
->cmsg_level
),
1654 tswap32(target_cmsg
->cmsg_type
));
1658 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1659 cmsg
->cmsg_level
= SOL_SOCKET
;
1661 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1663 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1664 cmsg
->cmsg_len
= CMSG_LEN(len
);
1666 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1667 int *fd
= (int *)data
;
1668 int *target_fd
= (int *)target_data
;
1669 int i
, numfds
= len
/ sizeof(int);
1671 for (i
= 0; i
< numfds
; i
++) {
1672 __get_user(fd
[i
], target_fd
+ i
);
1674 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1675 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1676 struct ucred
*cred
= (struct ucred
*)data
;
1677 struct target_ucred
*target_cred
=
1678 (struct target_ucred
*)target_data
;
1680 __get_user(cred
->pid
, &target_cred
->pid
);
1681 __get_user(cred
->uid
, &target_cred
->uid
);
1682 __get_user(cred
->gid
, &target_cred
->gid
);
1684 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1685 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1686 memcpy(data
, target_data
, len
);
1689 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1690 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1693 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1695 msgh
->msg_controllen
= space
;
1699 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1700 struct msghdr
*msgh
)
1702 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1703 abi_long msg_controllen
;
1704 abi_ulong target_cmsg_addr
;
1705 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1706 socklen_t space
= 0;
1708 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1709 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1711 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1712 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1713 target_cmsg_start
= target_cmsg
;
1715 return -TARGET_EFAULT
;
1717 while (cmsg
&& target_cmsg
) {
1718 void *data
= CMSG_DATA(cmsg
);
1719 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1721 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1722 int tgt_len
, tgt_space
;
1724 /* We never copy a half-header but may copy half-data;
1725 * this is Linux's behaviour in put_cmsg(). Note that
1726 * truncation here is a guest problem (which we report
1727 * to the guest via the CTRUNC bit), unlike truncation
1728 * in target_to_host_cmsg, which is a QEMU bug.
1730 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1731 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1735 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1736 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1738 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1740 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1742 /* Payload types which need a different size of payload on
1743 * the target must adjust tgt_len here.
1746 switch (cmsg
->cmsg_level
) {
1748 switch (cmsg
->cmsg_type
) {
1750 tgt_len
= sizeof(struct target_timeval
);
1760 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1761 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1762 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1765 /* We must now copy-and-convert len bytes of payload
1766 * into tgt_len bytes of destination space. Bear in mind
1767 * that in both source and destination we may be dealing
1768 * with a truncated value!
1770 switch (cmsg
->cmsg_level
) {
1772 switch (cmsg
->cmsg_type
) {
1775 int *fd
= (int *)data
;
1776 int *target_fd
= (int *)target_data
;
1777 int i
, numfds
= tgt_len
/ sizeof(int);
1779 for (i
= 0; i
< numfds
; i
++) {
1780 __put_user(fd
[i
], target_fd
+ i
);
1786 struct timeval
*tv
= (struct timeval
*)data
;
1787 struct target_timeval
*target_tv
=
1788 (struct target_timeval
*)target_data
;
1790 if (len
!= sizeof(struct timeval
) ||
1791 tgt_len
!= sizeof(struct target_timeval
)) {
1795 /* copy struct timeval to target */
1796 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1797 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1800 case SCM_CREDENTIALS
:
1802 struct ucred
*cred
= (struct ucred
*)data
;
1803 struct target_ucred
*target_cred
=
1804 (struct target_ucred
*)target_data
;
1806 __put_user(cred
->pid
, &target_cred
->pid
);
1807 __put_user(cred
->uid
, &target_cred
->uid
);
1808 __put_user(cred
->gid
, &target_cred
->gid
);
1817 switch (cmsg
->cmsg_type
) {
1820 uint32_t *v
= (uint32_t *)data
;
1821 uint32_t *t_int
= (uint32_t *)target_data
;
1823 if (len
!= sizeof(uint32_t) ||
1824 tgt_len
!= sizeof(uint32_t)) {
1827 __put_user(*v
, t_int
);
1833 struct sock_extended_err ee
;
1834 struct sockaddr_in offender
;
1836 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1837 struct errhdr_t
*target_errh
=
1838 (struct errhdr_t
*)target_data
;
1840 if (len
!= sizeof(struct errhdr_t
) ||
1841 tgt_len
!= sizeof(struct errhdr_t
)) {
1844 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1845 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1846 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1847 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1848 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1849 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1850 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1851 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1852 (void *) &errh
->offender
, sizeof(errh
->offender
));
1861 switch (cmsg
->cmsg_type
) {
1864 uint32_t *v
= (uint32_t *)data
;
1865 uint32_t *t_int
= (uint32_t *)target_data
;
1867 if (len
!= sizeof(uint32_t) ||
1868 tgt_len
!= sizeof(uint32_t)) {
1871 __put_user(*v
, t_int
);
1877 struct sock_extended_err ee
;
1878 struct sockaddr_in6 offender
;
1880 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1881 struct errhdr6_t
*target_errh
=
1882 (struct errhdr6_t
*)target_data
;
1884 if (len
!= sizeof(struct errhdr6_t
) ||
1885 tgt_len
!= sizeof(struct errhdr6_t
)) {
1888 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1889 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1890 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1891 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1892 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1893 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1894 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1895 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1896 (void *) &errh
->offender
, sizeof(errh
->offender
));
1906 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1907 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1908 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1909 if (tgt_len
> len
) {
1910 memset(target_data
+ len
, 0, tgt_len
- len
);
1914 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1915 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1916 if (msg_controllen
< tgt_space
) {
1917 tgt_space
= msg_controllen
;
1919 msg_controllen
-= tgt_space
;
1921 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1922 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1925 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1927 target_msgh
->msg_controllen
= tswapal(space
);
1931 /* do_setsockopt() Must return target values and target errnos. */
1932 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1933 abi_ulong optval_addr
, socklen_t optlen
)
1937 struct ip_mreqn
*ip_mreq
;
1938 struct ip_mreq_source
*ip_mreq_source
;
1942 /* TCP options all take an 'int' value. */
1943 if (optlen
< sizeof(uint32_t))
1944 return -TARGET_EINVAL
;
1946 if (get_user_u32(val
, optval_addr
))
1947 return -TARGET_EFAULT
;
1948 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1955 case IP_ROUTER_ALERT
:
1959 case IP_MTU_DISCOVER
:
1966 case IP_MULTICAST_TTL
:
1967 case IP_MULTICAST_LOOP
:
1969 if (optlen
>= sizeof(uint32_t)) {
1970 if (get_user_u32(val
, optval_addr
))
1971 return -TARGET_EFAULT
;
1972 } else if (optlen
>= 1) {
1973 if (get_user_u8(val
, optval_addr
))
1974 return -TARGET_EFAULT
;
1976 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1978 case IP_ADD_MEMBERSHIP
:
1979 case IP_DROP_MEMBERSHIP
:
1980 if (optlen
< sizeof (struct target_ip_mreq
) ||
1981 optlen
> sizeof (struct target_ip_mreqn
))
1982 return -TARGET_EINVAL
;
1984 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1985 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1986 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1989 case IP_BLOCK_SOURCE
:
1990 case IP_UNBLOCK_SOURCE
:
1991 case IP_ADD_SOURCE_MEMBERSHIP
:
1992 case IP_DROP_SOURCE_MEMBERSHIP
:
1993 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1994 return -TARGET_EINVAL
;
1996 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1997 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1998 unlock_user (ip_mreq_source
, optval_addr
, 0);
2007 case IPV6_MTU_DISCOVER
:
2010 case IPV6_RECVPKTINFO
:
2011 case IPV6_UNICAST_HOPS
:
2012 case IPV6_MULTICAST_HOPS
:
2013 case IPV6_MULTICAST_LOOP
:
2015 case IPV6_RECVHOPLIMIT
:
2016 case IPV6_2292HOPLIMIT
:
2019 case IPV6_2292PKTINFO
:
2020 case IPV6_RECVTCLASS
:
2021 case IPV6_RECVRTHDR
:
2022 case IPV6_2292RTHDR
:
2023 case IPV6_RECVHOPOPTS
:
2024 case IPV6_2292HOPOPTS
:
2025 case IPV6_RECVDSTOPTS
:
2026 case IPV6_2292DSTOPTS
:
2028 #ifdef IPV6_RECVPATHMTU
2029 case IPV6_RECVPATHMTU
:
2031 #ifdef IPV6_TRANSPARENT
2032 case IPV6_TRANSPARENT
:
2034 #ifdef IPV6_FREEBIND
2037 #ifdef IPV6_RECVORIGDSTADDR
2038 case IPV6_RECVORIGDSTADDR
:
2041 if (optlen
< sizeof(uint32_t)) {
2042 return -TARGET_EINVAL
;
2044 if (get_user_u32(val
, optval_addr
)) {
2045 return -TARGET_EFAULT
;
2047 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2048 &val
, sizeof(val
)));
2052 struct in6_pktinfo pki
;
2054 if (optlen
< sizeof(pki
)) {
2055 return -TARGET_EINVAL
;
2058 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2059 return -TARGET_EFAULT
;
2062 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2064 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2065 &pki
, sizeof(pki
)));
2068 case IPV6_ADD_MEMBERSHIP
:
2069 case IPV6_DROP_MEMBERSHIP
:
2071 struct ipv6_mreq ipv6mreq
;
2073 if (optlen
< sizeof(ipv6mreq
)) {
2074 return -TARGET_EINVAL
;
2077 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2078 return -TARGET_EFAULT
;
2081 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2083 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2084 &ipv6mreq
, sizeof(ipv6mreq
)));
2095 struct icmp6_filter icmp6f
;
2097 if (optlen
> sizeof(icmp6f
)) {
2098 optlen
= sizeof(icmp6f
);
2101 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2102 return -TARGET_EFAULT
;
2105 for (val
= 0; val
< 8; val
++) {
2106 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2109 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2121 /* those take an u32 value */
2122 if (optlen
< sizeof(uint32_t)) {
2123 return -TARGET_EINVAL
;
2126 if (get_user_u32(val
, optval_addr
)) {
2127 return -TARGET_EFAULT
;
2129 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2130 &val
, sizeof(val
)));
2137 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2142 char *alg_key
= g_malloc(optlen
);
2145 return -TARGET_ENOMEM
;
2147 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2149 return -TARGET_EFAULT
;
2151 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2156 case ALG_SET_AEAD_AUTHSIZE
:
2158 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2167 case TARGET_SOL_SOCKET
:
2169 case TARGET_SO_RCVTIMEO
:
2173 optname
= SO_RCVTIMEO
;
2176 if (optlen
!= sizeof(struct target_timeval
)) {
2177 return -TARGET_EINVAL
;
2180 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2181 return -TARGET_EFAULT
;
2184 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2188 case TARGET_SO_SNDTIMEO
:
2189 optname
= SO_SNDTIMEO
;
2191 case TARGET_SO_ATTACH_FILTER
:
2193 struct target_sock_fprog
*tfprog
;
2194 struct target_sock_filter
*tfilter
;
2195 struct sock_fprog fprog
;
2196 struct sock_filter
*filter
;
2199 if (optlen
!= sizeof(*tfprog
)) {
2200 return -TARGET_EINVAL
;
2202 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2203 return -TARGET_EFAULT
;
2205 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2206 tswapal(tfprog
->filter
), 0)) {
2207 unlock_user_struct(tfprog
, optval_addr
, 1);
2208 return -TARGET_EFAULT
;
2211 fprog
.len
= tswap16(tfprog
->len
);
2212 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2213 if (filter
== NULL
) {
2214 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2215 unlock_user_struct(tfprog
, optval_addr
, 1);
2216 return -TARGET_ENOMEM
;
2218 for (i
= 0; i
< fprog
.len
; i
++) {
2219 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2220 filter
[i
].jt
= tfilter
[i
].jt
;
2221 filter
[i
].jf
= tfilter
[i
].jf
;
2222 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2224 fprog
.filter
= filter
;
2226 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2227 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2230 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2231 unlock_user_struct(tfprog
, optval_addr
, 1);
2234 case TARGET_SO_BINDTODEVICE
:
2236 char *dev_ifname
, *addr_ifname
;
2238 if (optlen
> IFNAMSIZ
- 1) {
2239 optlen
= IFNAMSIZ
- 1;
2241 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2243 return -TARGET_EFAULT
;
2245 optname
= SO_BINDTODEVICE
;
2246 addr_ifname
= alloca(IFNAMSIZ
);
2247 memcpy(addr_ifname
, dev_ifname
, optlen
);
2248 addr_ifname
[optlen
] = 0;
2249 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2250 addr_ifname
, optlen
));
2251 unlock_user (dev_ifname
, optval_addr
, 0);
2254 case TARGET_SO_LINGER
:
2257 struct target_linger
*tlg
;
2259 if (optlen
!= sizeof(struct target_linger
)) {
2260 return -TARGET_EINVAL
;
2262 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2263 return -TARGET_EFAULT
;
2265 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2266 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2267 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2269 unlock_user_struct(tlg
, optval_addr
, 0);
2272 /* Options with 'int' argument. */
2273 case TARGET_SO_DEBUG
:
2276 case TARGET_SO_REUSEADDR
:
2277 optname
= SO_REUSEADDR
;
2280 case TARGET_SO_REUSEPORT
:
2281 optname
= SO_REUSEPORT
;
2284 case TARGET_SO_TYPE
:
2287 case TARGET_SO_ERROR
:
2290 case TARGET_SO_DONTROUTE
:
2291 optname
= SO_DONTROUTE
;
2293 case TARGET_SO_BROADCAST
:
2294 optname
= SO_BROADCAST
;
2296 case TARGET_SO_SNDBUF
:
2297 optname
= SO_SNDBUF
;
2299 case TARGET_SO_SNDBUFFORCE
:
2300 optname
= SO_SNDBUFFORCE
;
2302 case TARGET_SO_RCVBUF
:
2303 optname
= SO_RCVBUF
;
2305 case TARGET_SO_RCVBUFFORCE
:
2306 optname
= SO_RCVBUFFORCE
;
2308 case TARGET_SO_KEEPALIVE
:
2309 optname
= SO_KEEPALIVE
;
2311 case TARGET_SO_OOBINLINE
:
2312 optname
= SO_OOBINLINE
;
2314 case TARGET_SO_NO_CHECK
:
2315 optname
= SO_NO_CHECK
;
2317 case TARGET_SO_PRIORITY
:
2318 optname
= SO_PRIORITY
;
2321 case TARGET_SO_BSDCOMPAT
:
2322 optname
= SO_BSDCOMPAT
;
2325 case TARGET_SO_PASSCRED
:
2326 optname
= SO_PASSCRED
;
2328 case TARGET_SO_PASSSEC
:
2329 optname
= SO_PASSSEC
;
2331 case TARGET_SO_TIMESTAMP
:
2332 optname
= SO_TIMESTAMP
;
2334 case TARGET_SO_RCVLOWAT
:
2335 optname
= SO_RCVLOWAT
;
2340 if (optlen
< sizeof(uint32_t))
2341 return -TARGET_EINVAL
;
2343 if (get_user_u32(val
, optval_addr
))
2344 return -TARGET_EFAULT
;
2345 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2350 case NETLINK_PKTINFO
:
2351 case NETLINK_ADD_MEMBERSHIP
:
2352 case NETLINK_DROP_MEMBERSHIP
:
2353 case NETLINK_BROADCAST_ERROR
:
2354 case NETLINK_NO_ENOBUFS
:
2355 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2356 case NETLINK_LISTEN_ALL_NSID
:
2357 case NETLINK_CAP_ACK
:
2358 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2359 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2360 case NETLINK_EXT_ACK
:
2361 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2362 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2363 case NETLINK_GET_STRICT_CHK
:
2364 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2370 if (optlen
< sizeof(uint32_t)) {
2371 return -TARGET_EINVAL
;
2373 if (get_user_u32(val
, optval_addr
)) {
2374 return -TARGET_EFAULT
;
2376 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2379 #endif /* SOL_NETLINK */
2382 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2384 ret
= -TARGET_ENOPROTOOPT
;
2389 /* do_getsockopt() Must return target values and target errnos. */
2390 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2391 abi_ulong optval_addr
, abi_ulong optlen
)
2398 case TARGET_SOL_SOCKET
:
2401 /* These don't just return a single integer */
2402 case TARGET_SO_PEERNAME
:
2404 case TARGET_SO_RCVTIMEO
: {
2408 optname
= SO_RCVTIMEO
;
2411 if (get_user_u32(len
, optlen
)) {
2412 return -TARGET_EFAULT
;
2415 return -TARGET_EINVAL
;
2419 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2424 if (len
> sizeof(struct target_timeval
)) {
2425 len
= sizeof(struct target_timeval
);
2427 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2428 return -TARGET_EFAULT
;
2430 if (put_user_u32(len
, optlen
)) {
2431 return -TARGET_EFAULT
;
2435 case TARGET_SO_SNDTIMEO
:
2436 optname
= SO_SNDTIMEO
;
2438 case TARGET_SO_PEERCRED
: {
2441 struct target_ucred
*tcr
;
2443 if (get_user_u32(len
, optlen
)) {
2444 return -TARGET_EFAULT
;
2447 return -TARGET_EINVAL
;
2451 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2459 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2460 return -TARGET_EFAULT
;
2462 __put_user(cr
.pid
, &tcr
->pid
);
2463 __put_user(cr
.uid
, &tcr
->uid
);
2464 __put_user(cr
.gid
, &tcr
->gid
);
2465 unlock_user_struct(tcr
, optval_addr
, 1);
2466 if (put_user_u32(len
, optlen
)) {
2467 return -TARGET_EFAULT
;
2471 case TARGET_SO_PEERSEC
: {
2474 if (get_user_u32(len
, optlen
)) {
2475 return -TARGET_EFAULT
;
2478 return -TARGET_EINVAL
;
2480 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2482 return -TARGET_EFAULT
;
2485 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2487 if (put_user_u32(lv
, optlen
)) {
2488 ret
= -TARGET_EFAULT
;
2490 unlock_user(name
, optval_addr
, lv
);
2493 case TARGET_SO_LINGER
:
2497 struct target_linger
*tlg
;
2499 if (get_user_u32(len
, optlen
)) {
2500 return -TARGET_EFAULT
;
2503 return -TARGET_EINVAL
;
2507 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2515 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2516 return -TARGET_EFAULT
;
2518 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2519 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2520 unlock_user_struct(tlg
, optval_addr
, 1);
2521 if (put_user_u32(len
, optlen
)) {
2522 return -TARGET_EFAULT
;
2526 /* Options with 'int' argument. */
2527 case TARGET_SO_DEBUG
:
2530 case TARGET_SO_REUSEADDR
:
2531 optname
= SO_REUSEADDR
;
2534 case TARGET_SO_REUSEPORT
:
2535 optname
= SO_REUSEPORT
;
2538 case TARGET_SO_TYPE
:
2541 case TARGET_SO_ERROR
:
2544 case TARGET_SO_DONTROUTE
:
2545 optname
= SO_DONTROUTE
;
2547 case TARGET_SO_BROADCAST
:
2548 optname
= SO_BROADCAST
;
2550 case TARGET_SO_SNDBUF
:
2551 optname
= SO_SNDBUF
;
2553 case TARGET_SO_RCVBUF
:
2554 optname
= SO_RCVBUF
;
2556 case TARGET_SO_KEEPALIVE
:
2557 optname
= SO_KEEPALIVE
;
2559 case TARGET_SO_OOBINLINE
:
2560 optname
= SO_OOBINLINE
;
2562 case TARGET_SO_NO_CHECK
:
2563 optname
= SO_NO_CHECK
;
2565 case TARGET_SO_PRIORITY
:
2566 optname
= SO_PRIORITY
;
2569 case TARGET_SO_BSDCOMPAT
:
2570 optname
= SO_BSDCOMPAT
;
2573 case TARGET_SO_PASSCRED
:
2574 optname
= SO_PASSCRED
;
2576 case TARGET_SO_TIMESTAMP
:
2577 optname
= SO_TIMESTAMP
;
2579 case TARGET_SO_RCVLOWAT
:
2580 optname
= SO_RCVLOWAT
;
2582 case TARGET_SO_ACCEPTCONN
:
2583 optname
= SO_ACCEPTCONN
;
2590 /* TCP options all take an 'int' value. */
2592 if (get_user_u32(len
, optlen
))
2593 return -TARGET_EFAULT
;
2595 return -TARGET_EINVAL
;
2597 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2600 if (optname
== SO_TYPE
) {
2601 val
= host_to_target_sock_type(val
);
2606 if (put_user_u32(val
, optval_addr
))
2607 return -TARGET_EFAULT
;
2609 if (put_user_u8(val
, optval_addr
))
2610 return -TARGET_EFAULT
;
2612 if (put_user_u32(len
, optlen
))
2613 return -TARGET_EFAULT
;
2620 case IP_ROUTER_ALERT
:
2624 case IP_MTU_DISCOVER
:
2630 case IP_MULTICAST_TTL
:
2631 case IP_MULTICAST_LOOP
:
2632 if (get_user_u32(len
, optlen
))
2633 return -TARGET_EFAULT
;
2635 return -TARGET_EINVAL
;
2637 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2640 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2642 if (put_user_u32(len
, optlen
)
2643 || put_user_u8(val
, optval_addr
))
2644 return -TARGET_EFAULT
;
2646 if (len
> sizeof(int))
2648 if (put_user_u32(len
, optlen
)
2649 || put_user_u32(val
, optval_addr
))
2650 return -TARGET_EFAULT
;
2654 ret
= -TARGET_ENOPROTOOPT
;
2660 case IPV6_MTU_DISCOVER
:
2663 case IPV6_RECVPKTINFO
:
2664 case IPV6_UNICAST_HOPS
:
2665 case IPV6_MULTICAST_HOPS
:
2666 case IPV6_MULTICAST_LOOP
:
2668 case IPV6_RECVHOPLIMIT
:
2669 case IPV6_2292HOPLIMIT
:
2672 case IPV6_2292PKTINFO
:
2673 case IPV6_RECVTCLASS
:
2674 case IPV6_RECVRTHDR
:
2675 case IPV6_2292RTHDR
:
2676 case IPV6_RECVHOPOPTS
:
2677 case IPV6_2292HOPOPTS
:
2678 case IPV6_RECVDSTOPTS
:
2679 case IPV6_2292DSTOPTS
:
2681 #ifdef IPV6_RECVPATHMTU
2682 case IPV6_RECVPATHMTU
:
2684 #ifdef IPV6_TRANSPARENT
2685 case IPV6_TRANSPARENT
:
2687 #ifdef IPV6_FREEBIND
2690 #ifdef IPV6_RECVORIGDSTADDR
2691 case IPV6_RECVORIGDSTADDR
:
2693 if (get_user_u32(len
, optlen
))
2694 return -TARGET_EFAULT
;
2696 return -TARGET_EINVAL
;
2698 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2701 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2703 if (put_user_u32(len
, optlen
)
2704 || put_user_u8(val
, optval_addr
))
2705 return -TARGET_EFAULT
;
2707 if (len
> sizeof(int))
2709 if (put_user_u32(len
, optlen
)
2710 || put_user_u32(val
, optval_addr
))
2711 return -TARGET_EFAULT
;
2715 ret
= -TARGET_ENOPROTOOPT
;
2722 case NETLINK_PKTINFO
:
2723 case NETLINK_BROADCAST_ERROR
:
2724 case NETLINK_NO_ENOBUFS
:
2725 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2726 case NETLINK_LISTEN_ALL_NSID
:
2727 case NETLINK_CAP_ACK
:
2728 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2729 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2730 case NETLINK_EXT_ACK
:
2731 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2732 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2733 case NETLINK_GET_STRICT_CHK
:
2734 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2735 if (get_user_u32(len
, optlen
)) {
2736 return -TARGET_EFAULT
;
2738 if (len
!= sizeof(val
)) {
2739 return -TARGET_EINVAL
;
2742 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2746 if (put_user_u32(lv
, optlen
)
2747 || put_user_u32(val
, optval_addr
)) {
2748 return -TARGET_EFAULT
;
2751 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2752 case NETLINK_LIST_MEMBERSHIPS
:
2756 if (get_user_u32(len
, optlen
)) {
2757 return -TARGET_EFAULT
;
2760 return -TARGET_EINVAL
;
2762 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2764 return -TARGET_EFAULT
;
2767 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2769 unlock_user(results
, optval_addr
, 0);
2772 /* swap host endianess to target endianess. */
2773 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2774 results
[i
] = tswap32(results
[i
]);
2776 if (put_user_u32(lv
, optlen
)) {
2777 return -TARGET_EFAULT
;
2779 unlock_user(results
, optval_addr
, 0);
2782 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2787 #endif /* SOL_NETLINK */
2790 qemu_log_mask(LOG_UNIMP
,
2791 "getsockopt level=%d optname=%d not yet supported\n",
2793 ret
= -TARGET_EOPNOTSUPP
;
2799 /* Convert target low/high pair representing file offset into the host
2800 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2801 * as the kernel doesn't handle them either.
2803 static void target_to_host_low_high(abi_ulong tlow
,
2805 unsigned long *hlow
,
2806 unsigned long *hhigh
)
2808 uint64_t off
= tlow
|
2809 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2810 TARGET_LONG_BITS
/ 2;
2813 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2816 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2817 abi_ulong count
, int copy
)
2819 struct target_iovec
*target_vec
;
2821 abi_ulong total_len
, max_len
;
2824 bool bad_address
= false;
2830 if (count
> IOV_MAX
) {
2835 vec
= g_try_new0(struct iovec
, count
);
2841 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2842 count
* sizeof(struct target_iovec
), 1);
2843 if (target_vec
== NULL
) {
2848 /* ??? If host page size > target page size, this will result in a
2849 value larger than what we can actually support. */
2850 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2853 for (i
= 0; i
< count
; i
++) {
2854 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2855 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2860 } else if (len
== 0) {
2861 /* Zero length pointer is ignored. */
2862 vec
[i
].iov_base
= 0;
2864 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2865 /* If the first buffer pointer is bad, this is a fault. But
2866 * subsequent bad buffers will result in a partial write; this
2867 * is realized by filling the vector with null pointers and
2869 if (!vec
[i
].iov_base
) {
2880 if (len
> max_len
- total_len
) {
2881 len
= max_len
- total_len
;
2884 vec
[i
].iov_len
= len
;
2888 unlock_user(target_vec
, target_addr
, 0);
2893 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2894 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2897 unlock_user(target_vec
, target_addr
, 0);
2904 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2905 abi_ulong count
, int copy
)
2907 struct target_iovec
*target_vec
;
2910 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2911 count
* sizeof(struct target_iovec
), 1);
2913 for (i
= 0; i
< count
; i
++) {
2914 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2915 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2919 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2921 unlock_user(target_vec
, target_addr
, 0);
2927 static inline int target_to_host_sock_type(int *type
)
2930 int target_type
= *type
;
2932 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2933 case TARGET_SOCK_DGRAM
:
2934 host_type
= SOCK_DGRAM
;
2936 case TARGET_SOCK_STREAM
:
2937 host_type
= SOCK_STREAM
;
2940 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2943 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2944 #if defined(SOCK_CLOEXEC)
2945 host_type
|= SOCK_CLOEXEC
;
2947 return -TARGET_EINVAL
;
2950 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2951 #if defined(SOCK_NONBLOCK)
2952 host_type
|= SOCK_NONBLOCK
;
2953 #elif !defined(O_NONBLOCK)
2954 return -TARGET_EINVAL
;
2961 /* Try to emulate socket type flags after socket creation. */
2962 static int sock_flags_fixup(int fd
, int target_type
)
2964 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2965 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2966 int flags
= fcntl(fd
, F_GETFL
);
2967 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2969 return -TARGET_EINVAL
;
2976 /* do_socket() Must return target values and target errnos. */
2977 static abi_long
do_socket(int domain
, int type
, int protocol
)
2979 int target_type
= type
;
2982 ret
= target_to_host_sock_type(&type
);
2987 if (domain
== PF_NETLINK
&& !(
2988 #ifdef CONFIG_RTNETLINK
2989 protocol
== NETLINK_ROUTE
||
2991 protocol
== NETLINK_KOBJECT_UEVENT
||
2992 protocol
== NETLINK_AUDIT
)) {
2993 return -TARGET_EPFNOSUPPORT
;
2996 if (domain
== AF_PACKET
||
2997 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2998 protocol
= tswap16(protocol
);
3001 ret
= get_errno(socket(domain
, type
, protocol
));
3003 ret
= sock_flags_fixup(ret
, target_type
);
3004 if (type
== SOCK_PACKET
) {
3005 /* Manage an obsolete case :
3006 * if socket type is SOCK_PACKET, bind by name
3008 fd_trans_register(ret
, &target_packet_trans
);
3009 } else if (domain
== PF_NETLINK
) {
3011 #ifdef CONFIG_RTNETLINK
3013 fd_trans_register(ret
, &target_netlink_route_trans
);
3016 case NETLINK_KOBJECT_UEVENT
:
3017 /* nothing to do: messages are strings */
3020 fd_trans_register(ret
, &target_netlink_audit_trans
);
3023 g_assert_not_reached();
3030 /* do_bind() Must return target values and target errnos. */
3031 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3037 if ((int)addrlen
< 0) {
3038 return -TARGET_EINVAL
;
3041 addr
= alloca(addrlen
+1);
3043 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3047 return get_errno(bind(sockfd
, addr
, addrlen
));
3050 /* do_connect() Must return target values and target errnos. */
3051 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3057 if ((int)addrlen
< 0) {
3058 return -TARGET_EINVAL
;
3061 addr
= alloca(addrlen
+1);
3063 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3067 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3070 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3071 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3072 int flags
, int send
)
3078 abi_ulong target_vec
;
3080 if (msgp
->msg_name
) {
3081 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3082 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3083 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3084 tswapal(msgp
->msg_name
),
3086 if (ret
== -TARGET_EFAULT
) {
3087 /* For connected sockets msg_name and msg_namelen must
3088 * be ignored, so returning EFAULT immediately is wrong.
3089 * Instead, pass a bad msg_name to the host kernel, and
3090 * let it decide whether to return EFAULT or not.
3092 msg
.msg_name
= (void *)-1;
3097 msg
.msg_name
= NULL
;
3098 msg
.msg_namelen
= 0;
3100 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3101 msg
.msg_control
= alloca(msg
.msg_controllen
);
3102 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3104 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3106 count
= tswapal(msgp
->msg_iovlen
);
3107 target_vec
= tswapal(msgp
->msg_iov
);
3109 if (count
> IOV_MAX
) {
3110 /* sendrcvmsg returns a different errno for this condition than
3111 * readv/writev, so we must catch it here before lock_iovec() does.
3113 ret
= -TARGET_EMSGSIZE
;
3117 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3118 target_vec
, count
, send
);
3120 ret
= -host_to_target_errno(errno
);
3123 msg
.msg_iovlen
= count
;
3127 if (fd_trans_target_to_host_data(fd
)) {
3130 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3131 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3132 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3133 msg
.msg_iov
->iov_len
);
3135 msg
.msg_iov
->iov_base
= host_msg
;
3136 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3140 ret
= target_to_host_cmsg(&msg
, msgp
);
3142 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3146 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3147 if (!is_error(ret
)) {
3149 if (fd_trans_host_to_target_data(fd
)) {
3150 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3151 MIN(msg
.msg_iov
->iov_len
, len
));
3153 ret
= host_to_target_cmsg(msgp
, &msg
);
3155 if (!is_error(ret
)) {
3156 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3157 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3158 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3159 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3160 msg
.msg_name
, msg
.msg_namelen
);
3172 unlock_iovec(vec
, target_vec
, count
, !send
);
3177 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3178 int flags
, int send
)
3181 struct target_msghdr
*msgp
;
3183 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3187 return -TARGET_EFAULT
;
3189 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3190 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3194 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3195 * so it might not have this *mmsg-specific flag either.
3197 #ifndef MSG_WAITFORONE
3198 #define MSG_WAITFORONE 0x10000
3201 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3202 unsigned int vlen
, unsigned int flags
,
3205 struct target_mmsghdr
*mmsgp
;
3209 if (vlen
> UIO_MAXIOV
) {
3213 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3215 return -TARGET_EFAULT
;
3218 for (i
= 0; i
< vlen
; i
++) {
3219 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3220 if (is_error(ret
)) {
3223 mmsgp
[i
].msg_len
= tswap32(ret
);
3224 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3225 if (flags
& MSG_WAITFORONE
) {
3226 flags
|= MSG_DONTWAIT
;
3230 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3232 /* Return number of datagrams sent if we sent any at all;
3233 * otherwise return the error.
3241 /* do_accept4() Must return target values and target errnos. */
3242 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3243 abi_ulong target_addrlen_addr
, int flags
)
3245 socklen_t addrlen
, ret_addrlen
;
3250 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3252 if (target_addr
== 0) {
3253 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3256 /* linux returns EINVAL if addrlen pointer is invalid */
3257 if (get_user_u32(addrlen
, target_addrlen_addr
))
3258 return -TARGET_EINVAL
;
3260 if ((int)addrlen
< 0) {
3261 return -TARGET_EINVAL
;
3264 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3265 return -TARGET_EINVAL
;
3267 addr
= alloca(addrlen
);
3269 ret_addrlen
= addrlen
;
3270 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3271 if (!is_error(ret
)) {
3272 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3273 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3274 ret
= -TARGET_EFAULT
;
3280 /* do_getpeername() Must return target values and target errnos. */
3281 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3282 abi_ulong target_addrlen_addr
)
3284 socklen_t addrlen
, ret_addrlen
;
3288 if (get_user_u32(addrlen
, target_addrlen_addr
))
3289 return -TARGET_EFAULT
;
3291 if ((int)addrlen
< 0) {
3292 return -TARGET_EINVAL
;
3295 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3296 return -TARGET_EFAULT
;
3298 addr
= alloca(addrlen
);
3300 ret_addrlen
= addrlen
;
3301 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3302 if (!is_error(ret
)) {
3303 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3304 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3305 ret
= -TARGET_EFAULT
;
3311 /* do_getsockname() Must return target values and target errnos. */
3312 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3313 abi_ulong target_addrlen_addr
)
3315 socklen_t addrlen
, ret_addrlen
;
3319 if (get_user_u32(addrlen
, target_addrlen_addr
))
3320 return -TARGET_EFAULT
;
3322 if ((int)addrlen
< 0) {
3323 return -TARGET_EINVAL
;
3326 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3327 return -TARGET_EFAULT
;
3329 addr
= alloca(addrlen
);
3331 ret_addrlen
= addrlen
;
3332 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3333 if (!is_error(ret
)) {
3334 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3335 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3336 ret
= -TARGET_EFAULT
;
3342 /* do_socketpair() Must return target values and target errnos. */
3343 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3344 abi_ulong target_tab_addr
)
3349 target_to_host_sock_type(&type
);
3351 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3352 if (!is_error(ret
)) {
3353 if (put_user_s32(tab
[0], target_tab_addr
)
3354 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3355 ret
= -TARGET_EFAULT
;
3360 /* do_sendto() Must return target values and target errnos. */
3361 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3362 abi_ulong target_addr
, socklen_t addrlen
)
3366 void *copy_msg
= NULL
;
3369 if ((int)addrlen
< 0) {
3370 return -TARGET_EINVAL
;
3373 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3375 return -TARGET_EFAULT
;
3376 if (fd_trans_target_to_host_data(fd
)) {
3377 copy_msg
= host_msg
;
3378 host_msg
= g_malloc(len
);
3379 memcpy(host_msg
, copy_msg
, len
);
3380 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3386 addr
= alloca(addrlen
+1);
3387 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3391 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3393 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3398 host_msg
= copy_msg
;
3400 unlock_user(host_msg
, msg
, 0);
3404 /* do_recvfrom() Must return target values and target errnos. */
3405 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3406 abi_ulong target_addr
,
3407 abi_ulong target_addrlen
)
3409 socklen_t addrlen
, ret_addrlen
;
3414 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3416 return -TARGET_EFAULT
;
3418 if (get_user_u32(addrlen
, target_addrlen
)) {
3419 ret
= -TARGET_EFAULT
;
3422 if ((int)addrlen
< 0) {
3423 ret
= -TARGET_EINVAL
;
3426 addr
= alloca(addrlen
);
3427 ret_addrlen
= addrlen
;
3428 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3429 addr
, &ret_addrlen
));
3431 addr
= NULL
; /* To keep compiler quiet. */
3432 addrlen
= 0; /* To keep compiler quiet. */
3433 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3435 if (!is_error(ret
)) {
3436 if (fd_trans_host_to_target_data(fd
)) {
3438 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3439 if (is_error(trans
)) {
3445 host_to_target_sockaddr(target_addr
, addr
,
3446 MIN(addrlen
, ret_addrlen
));
3447 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3448 ret
= -TARGET_EFAULT
;
3452 unlock_user(host_msg
, msg
, len
);
3455 unlock_user(host_msg
, msg
, 0);
3460 #ifdef TARGET_NR_socketcall
3461 /* do_socketcall() must return target values and target errnos. */
3462 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3464 static const unsigned nargs
[] = { /* number of arguments per operation */
3465 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3466 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3467 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3468 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3469 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3470 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3471 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3472 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3473 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3474 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3475 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3476 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3477 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3478 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3479 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3480 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3481 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3482 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3483 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3484 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3486 abi_long a
[6]; /* max 6 args */
3489 /* check the range of the first argument num */
3490 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3491 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3492 return -TARGET_EINVAL
;
3494 /* ensure we have space for args */
3495 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3496 return -TARGET_EINVAL
;
3498 /* collect the arguments in a[] according to nargs[] */
3499 for (i
= 0; i
< nargs
[num
]; ++i
) {
3500 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3501 return -TARGET_EFAULT
;
3504 /* now when we have the args, invoke the appropriate underlying function */
3506 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3507 return do_socket(a
[0], a
[1], a
[2]);
3508 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3509 return do_bind(a
[0], a
[1], a
[2]);
3510 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3511 return do_connect(a
[0], a
[1], a
[2]);
3512 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3513 return get_errno(listen(a
[0], a
[1]));
3514 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3515 return do_accept4(a
[0], a
[1], a
[2], 0);
3516 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3517 return do_getsockname(a
[0], a
[1], a
[2]);
3518 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3519 return do_getpeername(a
[0], a
[1], a
[2]);
3520 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3521 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3522 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3523 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3524 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3525 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3526 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3527 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3528 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3529 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3530 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3531 return get_errno(shutdown(a
[0], a
[1]));
3532 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3533 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3534 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3535 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3536 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3537 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3538 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3539 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3540 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3541 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3542 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3543 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3544 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3545 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3547 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3548 return -TARGET_EINVAL
;
3553 #define N_SHM_REGIONS 32
3555 static struct shm_region
{
3559 } shm_regions
[N_SHM_REGIONS
];
3561 #ifndef TARGET_SEMID64_DS
3562 /* asm-generic version of this struct */
3563 struct target_semid64_ds
3565 struct target_ipc_perm sem_perm
;
3566 abi_ulong sem_otime
;
3567 #if TARGET_ABI_BITS == 32
3568 abi_ulong __unused1
;
3570 abi_ulong sem_ctime
;
3571 #if TARGET_ABI_BITS == 32
3572 abi_ulong __unused2
;
3574 abi_ulong sem_nsems
;
3575 abi_ulong __unused3
;
3576 abi_ulong __unused4
;
3580 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3581 abi_ulong target_addr
)
3583 struct target_ipc_perm
*target_ip
;
3584 struct target_semid64_ds
*target_sd
;
3586 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3587 return -TARGET_EFAULT
;
3588 target_ip
= &(target_sd
->sem_perm
);
3589 host_ip
->__key
= tswap32(target_ip
->__key
);
3590 host_ip
->uid
= tswap32(target_ip
->uid
);
3591 host_ip
->gid
= tswap32(target_ip
->gid
);
3592 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3593 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3594 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3595 host_ip
->mode
= tswap32(target_ip
->mode
);
3597 host_ip
->mode
= tswap16(target_ip
->mode
);
3599 #if defined(TARGET_PPC)
3600 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3602 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3604 unlock_user_struct(target_sd
, target_addr
, 0);
3608 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3609 struct ipc_perm
*host_ip
)
3611 struct target_ipc_perm
*target_ip
;
3612 struct target_semid64_ds
*target_sd
;
3614 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3615 return -TARGET_EFAULT
;
3616 target_ip
= &(target_sd
->sem_perm
);
3617 target_ip
->__key
= tswap32(host_ip
->__key
);
3618 target_ip
->uid
= tswap32(host_ip
->uid
);
3619 target_ip
->gid
= tswap32(host_ip
->gid
);
3620 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3621 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3622 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3623 target_ip
->mode
= tswap32(host_ip
->mode
);
3625 target_ip
->mode
= tswap16(host_ip
->mode
);
3627 #if defined(TARGET_PPC)
3628 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3630 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3632 unlock_user_struct(target_sd
, target_addr
, 1);
3636 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3637 abi_ulong target_addr
)
3639 struct target_semid64_ds
*target_sd
;
3641 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3642 return -TARGET_EFAULT
;
3643 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3644 return -TARGET_EFAULT
;
3645 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3646 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3647 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3648 unlock_user_struct(target_sd
, target_addr
, 0);
3652 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3653 struct semid_ds
*host_sd
)
3655 struct target_semid64_ds
*target_sd
;
3657 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3658 return -TARGET_EFAULT
;
3659 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3660 return -TARGET_EFAULT
;
3661 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3662 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3663 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3664 unlock_user_struct(target_sd
, target_addr
, 1);
3668 struct target_seminfo
{
3681 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3682 struct seminfo
*host_seminfo
)
3684 struct target_seminfo
*target_seminfo
;
3685 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3686 return -TARGET_EFAULT
;
3687 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3688 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3689 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3690 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3691 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3692 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3693 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3694 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3695 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3696 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3697 unlock_user_struct(target_seminfo
, target_addr
, 1);
3703 struct semid_ds
*buf
;
3704 unsigned short *array
;
3705 struct seminfo
*__buf
;
3708 union target_semun
{
3715 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3716 abi_ulong target_addr
)
3719 unsigned short *array
;
3721 struct semid_ds semid_ds
;
3724 semun
.buf
= &semid_ds
;
3726 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3728 return get_errno(ret
);
3730 nsems
= semid_ds
.sem_nsems
;
3732 *host_array
= g_try_new(unsigned short, nsems
);
3734 return -TARGET_ENOMEM
;
3736 array
= lock_user(VERIFY_READ
, target_addr
,
3737 nsems
*sizeof(unsigned short), 1);
3739 g_free(*host_array
);
3740 return -TARGET_EFAULT
;
3743 for(i
=0; i
<nsems
; i
++) {
3744 __get_user((*host_array
)[i
], &array
[i
]);
3746 unlock_user(array
, target_addr
, 0);
3751 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3752 unsigned short **host_array
)
3755 unsigned short *array
;
3757 struct semid_ds semid_ds
;
3760 semun
.buf
= &semid_ds
;
3762 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3764 return get_errno(ret
);
3766 nsems
= semid_ds
.sem_nsems
;
3768 array
= lock_user(VERIFY_WRITE
, target_addr
,
3769 nsems
*sizeof(unsigned short), 0);
3771 return -TARGET_EFAULT
;
3773 for(i
=0; i
<nsems
; i
++) {
3774 __put_user((*host_array
)[i
], &array
[i
]);
3776 g_free(*host_array
);
3777 unlock_user(array
, target_addr
, 1);
3782 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3783 abi_ulong target_arg
)
3785 union target_semun target_su
= { .buf
= target_arg
};
3787 struct semid_ds dsarg
;
3788 unsigned short *array
= NULL
;
3789 struct seminfo seminfo
;
3790 abi_long ret
= -TARGET_EINVAL
;
3797 /* In 64 bit cross-endian situations, we will erroneously pick up
3798 * the wrong half of the union for the "val" element. To rectify
3799 * this, the entire 8-byte structure is byteswapped, followed by
3800 * a swap of the 4 byte val field. In other cases, the data is
3801 * already in proper host byte order. */
3802 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3803 target_su
.buf
= tswapal(target_su
.buf
);
3804 arg
.val
= tswap32(target_su
.val
);
3806 arg
.val
= target_su
.val
;
3808 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3812 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3816 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3817 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3824 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3828 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3829 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3835 arg
.__buf
= &seminfo
;
3836 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3837 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3845 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3852 struct target_sembuf
{
3853 unsigned short sem_num
;
3858 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3859 abi_ulong target_addr
,
3862 struct target_sembuf
*target_sembuf
;
3865 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3866 nsops
*sizeof(struct target_sembuf
), 1);
3868 return -TARGET_EFAULT
;
3870 for(i
=0; i
<nsops
; i
++) {
3871 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3872 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3873 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3876 unlock_user(target_sembuf
, target_addr
, 0);
3881 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3883 struct sembuf sops
[nsops
];
3886 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3887 return -TARGET_EFAULT
;
3889 ret
= -TARGET_ENOSYS
;
3890 #ifdef __NR_semtimedop
3891 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
3894 if (ret
== -TARGET_ENOSYS
) {
3895 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
, nsops
, 0, sops
, 0));
3901 struct target_msqid_ds
3903 struct target_ipc_perm msg_perm
;
3904 abi_ulong msg_stime
;
3905 #if TARGET_ABI_BITS == 32
3906 abi_ulong __unused1
;
3908 abi_ulong msg_rtime
;
3909 #if TARGET_ABI_BITS == 32
3910 abi_ulong __unused2
;
3912 abi_ulong msg_ctime
;
3913 #if TARGET_ABI_BITS == 32
3914 abi_ulong __unused3
;
3916 abi_ulong __msg_cbytes
;
3918 abi_ulong msg_qbytes
;
3919 abi_ulong msg_lspid
;
3920 abi_ulong msg_lrpid
;
3921 abi_ulong __unused4
;
3922 abi_ulong __unused5
;
3925 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3926 abi_ulong target_addr
)
3928 struct target_msqid_ds
*target_md
;
3930 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3931 return -TARGET_EFAULT
;
3932 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3933 return -TARGET_EFAULT
;
3934 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3935 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3936 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3937 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3938 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3939 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3940 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3941 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3942 unlock_user_struct(target_md
, target_addr
, 0);
3946 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3947 struct msqid_ds
*host_md
)
3949 struct target_msqid_ds
*target_md
;
3951 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3952 return -TARGET_EFAULT
;
3953 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3954 return -TARGET_EFAULT
;
3955 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3956 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3957 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3958 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3959 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3960 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3961 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3962 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3963 unlock_user_struct(target_md
, target_addr
, 1);
3967 struct target_msginfo
{
3975 unsigned short int msgseg
;
3978 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3979 struct msginfo
*host_msginfo
)
3981 struct target_msginfo
*target_msginfo
;
3982 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3983 return -TARGET_EFAULT
;
3984 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3985 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3986 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3987 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3988 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3989 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3990 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3991 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3992 unlock_user_struct(target_msginfo
, target_addr
, 1);
3996 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3998 struct msqid_ds dsarg
;
3999 struct msginfo msginfo
;
4000 abi_long ret
= -TARGET_EINVAL
;
4008 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4009 return -TARGET_EFAULT
;
4010 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4011 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4012 return -TARGET_EFAULT
;
4015 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4019 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4020 if (host_to_target_msginfo(ptr
, &msginfo
))
4021 return -TARGET_EFAULT
;
4028 struct target_msgbuf
{
4033 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4034 ssize_t msgsz
, int msgflg
)
4036 struct target_msgbuf
*target_mb
;
4037 struct msgbuf
*host_mb
;
4041 return -TARGET_EINVAL
;
4044 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4045 return -TARGET_EFAULT
;
4046 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4048 unlock_user_struct(target_mb
, msgp
, 0);
4049 return -TARGET_ENOMEM
;
4051 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4052 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4053 ret
= -TARGET_ENOSYS
;
4055 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4058 if (ret
== -TARGET_ENOSYS
) {
4059 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4064 unlock_user_struct(target_mb
, msgp
, 0);
4069 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4070 ssize_t msgsz
, abi_long msgtyp
,
4073 struct target_msgbuf
*target_mb
;
4075 struct msgbuf
*host_mb
;
4079 return -TARGET_EINVAL
;
4082 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4083 return -TARGET_EFAULT
;
4085 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4087 ret
= -TARGET_ENOMEM
;
4090 ret
= -TARGET_ENOSYS
;
4092 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4095 if (ret
== -TARGET_ENOSYS
) {
4096 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4097 msgflg
, host_mb
, msgtyp
));
4102 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4103 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4104 if (!target_mtext
) {
4105 ret
= -TARGET_EFAULT
;
4108 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4109 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4112 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4116 unlock_user_struct(target_mb
, msgp
, 1);
4121 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4122 abi_ulong target_addr
)
4124 struct target_shmid_ds
*target_sd
;
4126 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4127 return -TARGET_EFAULT
;
4128 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4129 return -TARGET_EFAULT
;
4130 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4131 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4132 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4133 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4134 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4135 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4136 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4137 unlock_user_struct(target_sd
, target_addr
, 0);
4141 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4142 struct shmid_ds
*host_sd
)
4144 struct target_shmid_ds
*target_sd
;
4146 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4147 return -TARGET_EFAULT
;
4148 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4149 return -TARGET_EFAULT
;
4150 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4151 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4152 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4153 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4154 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4155 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4156 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4157 unlock_user_struct(target_sd
, target_addr
, 1);
4161 struct target_shminfo
{
4169 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4170 struct shminfo
*host_shminfo
)
4172 struct target_shminfo
*target_shminfo
;
4173 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4174 return -TARGET_EFAULT
;
4175 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4176 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4177 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4178 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4179 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4180 unlock_user_struct(target_shminfo
, target_addr
, 1);
4184 struct target_shm_info
{
4189 abi_ulong swap_attempts
;
4190 abi_ulong swap_successes
;
4193 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4194 struct shm_info
*host_shm_info
)
4196 struct target_shm_info
*target_shm_info
;
4197 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4198 return -TARGET_EFAULT
;
4199 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4200 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4201 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4202 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4203 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4204 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4205 unlock_user_struct(target_shm_info
, target_addr
, 1);
4209 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4211 struct shmid_ds dsarg
;
4212 struct shminfo shminfo
;
4213 struct shm_info shm_info
;
4214 abi_long ret
= -TARGET_EINVAL
;
4222 if (target_to_host_shmid_ds(&dsarg
, buf
))
4223 return -TARGET_EFAULT
;
4224 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4225 if (host_to_target_shmid_ds(buf
, &dsarg
))
4226 return -TARGET_EFAULT
;
4229 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4230 if (host_to_target_shminfo(buf
, &shminfo
))
4231 return -TARGET_EFAULT
;
4234 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4235 if (host_to_target_shm_info(buf
, &shm_info
))
4236 return -TARGET_EFAULT
;
4241 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4248 #ifndef TARGET_FORCE_SHMLBA
4249 /* For most architectures, SHMLBA is the same as the page size;
4250 * some architectures have larger values, in which case they should
4251 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4252 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4253 * and defining its own value for SHMLBA.
4255 * The kernel also permits SHMLBA to be set by the architecture to a
4256 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4257 * this means that addresses are rounded to the large size if
4258 * SHM_RND is set but addresses not aligned to that size are not rejected
4259 * as long as they are at least page-aligned. Since the only architecture
4260 * which uses this is ia64 this code doesn't provide for that oddity.
4262 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4264 return TARGET_PAGE_SIZE
;
4268 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4269 int shmid
, abi_ulong shmaddr
, int shmflg
)
4273 struct shmid_ds shm_info
;
4277 /* find out the length of the shared memory segment */
4278 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4279 if (is_error(ret
)) {
4280 /* can't get length, bail out */
4284 shmlba
= target_shmlba(cpu_env
);
4286 if (shmaddr
& (shmlba
- 1)) {
4287 if (shmflg
& SHM_RND
) {
4288 shmaddr
&= ~(shmlba
- 1);
4290 return -TARGET_EINVAL
;
4293 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4294 return -TARGET_EINVAL
;
4300 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4302 abi_ulong mmap_start
;
4304 /* In order to use the host shmat, we need to honor host SHMLBA. */
4305 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4307 if (mmap_start
== -1) {
4309 host_raddr
= (void *)-1;
4311 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4314 if (host_raddr
== (void *)-1) {
4316 return get_errno((long)host_raddr
);
4318 raddr
=h2g((unsigned long)host_raddr
);
4320 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4321 PAGE_VALID
| PAGE_READ
|
4322 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4324 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4325 if (!shm_regions
[i
].in_use
) {
4326 shm_regions
[i
].in_use
= true;
4327 shm_regions
[i
].start
= raddr
;
4328 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4338 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4345 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4346 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4347 shm_regions
[i
].in_use
= false;
4348 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4352 rv
= get_errno(shmdt(g2h(shmaddr
)));
4359 #ifdef TARGET_NR_ipc
4360 /* ??? This only works with linear mappings. */
4361 /* do_ipc() must return target values and target errnos. */
4362 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4363 unsigned int call
, abi_long first
,
4364 abi_long second
, abi_long third
,
4365 abi_long ptr
, abi_long fifth
)
4370 version
= call
>> 16;
4375 ret
= do_semop(first
, ptr
, second
);
4379 ret
= get_errno(semget(first
, second
, third
));
4382 case IPCOP_semctl
: {
4383 /* The semun argument to semctl is passed by value, so dereference the
4386 get_user_ual(atptr
, ptr
);
4387 ret
= do_semctl(first
, second
, third
, atptr
);
4392 ret
= get_errno(msgget(first
, second
));
4396 ret
= do_msgsnd(first
, ptr
, second
, third
);
4400 ret
= do_msgctl(first
, second
, ptr
);
4407 struct target_ipc_kludge
{
4412 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4413 ret
= -TARGET_EFAULT
;
4417 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4419 unlock_user_struct(tmp
, ptr
, 0);
4423 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4432 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4433 if (is_error(raddr
))
4434 return get_errno(raddr
);
4435 if (put_user_ual(raddr
, third
))
4436 return -TARGET_EFAULT
;
4440 ret
= -TARGET_EINVAL
;
4445 ret
= do_shmdt(ptr
);
4449 /* IPC_* flag values are the same on all linux platforms */
4450 ret
= get_errno(shmget(first
, second
, third
));
4453 /* IPC_* and SHM_* command values are the same on all linux platforms */
4455 ret
= do_shmctl(first
, second
, ptr
);
4458 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4460 ret
= -TARGET_ENOSYS
;
4467 /* kernel structure types definitions */
4469 #define STRUCT(name, ...) STRUCT_ ## name,
4470 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4472 #include "syscall_types.h"
4476 #undef STRUCT_SPECIAL
4478 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4479 #define STRUCT_SPECIAL(name)
4480 #include "syscall_types.h"
4482 #undef STRUCT_SPECIAL
4484 #define MAX_STRUCT_SIZE 4096
4486 #ifdef CONFIG_FIEMAP
4487 /* So fiemap access checks don't overflow on 32 bit systems.
4488 * This is very slightly smaller than the limit imposed by
4489 * the underlying kernel.
4491 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4492 / sizeof(struct fiemap_extent))
4494 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4495 int fd
, int cmd
, abi_long arg
)
4497 /* The parameter for this ioctl is a struct fiemap followed
4498 * by an array of struct fiemap_extent whose size is set
4499 * in fiemap->fm_extent_count. The array is filled in by the
4502 int target_size_in
, target_size_out
;
4504 const argtype
*arg_type
= ie
->arg_type
;
4505 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4508 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4512 assert(arg_type
[0] == TYPE_PTR
);
4513 assert(ie
->access
== IOC_RW
);
4515 target_size_in
= thunk_type_size(arg_type
, 0);
4516 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4518 return -TARGET_EFAULT
;
4520 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4521 unlock_user(argptr
, arg
, 0);
4522 fm
= (struct fiemap
*)buf_temp
;
4523 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4524 return -TARGET_EINVAL
;
4527 outbufsz
= sizeof (*fm
) +
4528 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4530 if (outbufsz
> MAX_STRUCT_SIZE
) {
4531 /* We can't fit all the extents into the fixed size buffer.
4532 * Allocate one that is large enough and use it instead.
4534 fm
= g_try_malloc(outbufsz
);
4536 return -TARGET_ENOMEM
;
4538 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4541 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4542 if (!is_error(ret
)) {
4543 target_size_out
= target_size_in
;
4544 /* An extent_count of 0 means we were only counting the extents
4545 * so there are no structs to copy
4547 if (fm
->fm_extent_count
!= 0) {
4548 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4550 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4552 ret
= -TARGET_EFAULT
;
4554 /* Convert the struct fiemap */
4555 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4556 if (fm
->fm_extent_count
!= 0) {
4557 p
= argptr
+ target_size_in
;
4558 /* ...and then all the struct fiemap_extents */
4559 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4560 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4565 unlock_user(argptr
, arg
, target_size_out
);
4575 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4576 int fd
, int cmd
, abi_long arg
)
4578 const argtype
*arg_type
= ie
->arg_type
;
4582 struct ifconf
*host_ifconf
;
4584 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4585 int target_ifreq_size
;
4590 abi_long target_ifc_buf
;
4594 assert(arg_type
[0] == TYPE_PTR
);
4595 assert(ie
->access
== IOC_RW
);
4598 target_size
= thunk_type_size(arg_type
, 0);
4600 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4602 return -TARGET_EFAULT
;
4603 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4604 unlock_user(argptr
, arg
, 0);
4606 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4607 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4608 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4610 if (target_ifc_buf
!= 0) {
4611 target_ifc_len
= host_ifconf
->ifc_len
;
4612 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4613 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4615 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4616 if (outbufsz
> MAX_STRUCT_SIZE
) {
4618 * We can't fit all the extents into the fixed size buffer.
4619 * Allocate one that is large enough and use it instead.
4621 host_ifconf
= malloc(outbufsz
);
4623 return -TARGET_ENOMEM
;
4625 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4628 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4630 host_ifconf
->ifc_len
= host_ifc_len
;
4632 host_ifc_buf
= NULL
;
4634 host_ifconf
->ifc_buf
= host_ifc_buf
;
4636 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4637 if (!is_error(ret
)) {
4638 /* convert host ifc_len to target ifc_len */
4640 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4641 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4642 host_ifconf
->ifc_len
= target_ifc_len
;
4644 /* restore target ifc_buf */
4646 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4648 /* copy struct ifconf to target user */
4650 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4652 return -TARGET_EFAULT
;
4653 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4654 unlock_user(argptr
, arg
, target_size
);
4656 if (target_ifc_buf
!= 0) {
4657 /* copy ifreq[] to target user */
4658 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4659 for (i
= 0; i
< nb_ifreq
; i
++) {
4660 thunk_convert(argptr
+ i
* target_ifreq_size
,
4661 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4662 ifreq_arg_type
, THUNK_TARGET
);
4664 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4675 #if defined(CONFIG_USBFS)
4676 #if HOST_LONG_BITS > 64
4677 #error USBDEVFS thunks do not support >64 bit hosts yet.
4680 uint64_t target_urb_adr
;
4681 uint64_t target_buf_adr
;
4682 char *target_buf_ptr
;
4683 struct usbdevfs_urb host_urb
;
4686 static GHashTable
*usbdevfs_urb_hashtable(void)
4688 static GHashTable
*urb_hashtable
;
4690 if (!urb_hashtable
) {
4691 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4693 return urb_hashtable
;
4696 static void urb_hashtable_insert(struct live_urb
*urb
)
4698 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4699 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4702 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4704 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4705 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4708 static void urb_hashtable_remove(struct live_urb
*urb
)
4710 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4711 g_hash_table_remove(urb_hashtable
, urb
);
4715 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4716 int fd
, int cmd
, abi_long arg
)
4718 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4719 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4720 struct live_urb
*lurb
;
4724 uintptr_t target_urb_adr
;
4727 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4729 memset(buf_temp
, 0, sizeof(uint64_t));
4730 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4731 if (is_error(ret
)) {
4735 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4736 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4737 if (!lurb
->target_urb_adr
) {
4738 return -TARGET_EFAULT
;
4740 urb_hashtable_remove(lurb
);
4741 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4742 lurb
->host_urb
.buffer_length
);
4743 lurb
->target_buf_ptr
= NULL
;
4745 /* restore the guest buffer pointer */
4746 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4748 /* update the guest urb struct */
4749 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4752 return -TARGET_EFAULT
;
4754 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4755 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4757 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4758 /* write back the urb handle */
4759 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4762 return -TARGET_EFAULT
;
4765 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4766 target_urb_adr
= lurb
->target_urb_adr
;
4767 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
4768 unlock_user(argptr
, arg
, target_size
);
4775 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
4776 uint8_t *buf_temp
__attribute__((unused
)),
4777 int fd
, int cmd
, abi_long arg
)
4779 struct live_urb
*lurb
;
4781 /* map target address back to host URB with metadata. */
4782 lurb
= urb_hashtable_lookup(arg
);
4784 return -TARGET_EFAULT
;
4786 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4790 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4791 int fd
, int cmd
, abi_long arg
)
4793 const argtype
*arg_type
= ie
->arg_type
;
4798 struct live_urb
*lurb
;
4801 * each submitted URB needs to map to a unique ID for the
4802 * kernel, and that unique ID needs to be a pointer to
4803 * host memory. hence, we need to malloc for each URB.
4804 * isochronous transfers have a variable length struct.
4807 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
4809 /* construct host copy of urb and metadata */
4810 lurb
= g_try_malloc0(sizeof(struct live_urb
));
4812 return -TARGET_ENOMEM
;
4815 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4818 return -TARGET_EFAULT
;
4820 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
4821 unlock_user(argptr
, arg
, 0);
4823 lurb
->target_urb_adr
= arg
;
4824 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
4826 /* buffer space used depends on endpoint type so lock the entire buffer */
4827 /* control type urbs should check the buffer contents for true direction */
4828 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
4829 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
4830 lurb
->host_urb
.buffer_length
, 1);
4831 if (lurb
->target_buf_ptr
== NULL
) {
4833 return -TARGET_EFAULT
;
4836 /* update buffer pointer in host copy */
4837 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
4839 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4840 if (is_error(ret
)) {
4841 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
4844 urb_hashtable_insert(lurb
);
4849 #endif /* CONFIG_USBFS */
4851 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4852 int cmd
, abi_long arg
)
4855 struct dm_ioctl
*host_dm
;
4856 abi_long guest_data
;
4857 uint32_t guest_data_size
;
4859 const argtype
*arg_type
= ie
->arg_type
;
4861 void *big_buf
= NULL
;
4865 target_size
= thunk_type_size(arg_type
, 0);
4866 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4868 ret
= -TARGET_EFAULT
;
4871 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4872 unlock_user(argptr
, arg
, 0);
4874 /* buf_temp is too small, so fetch things into a bigger buffer */
4875 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4876 memcpy(big_buf
, buf_temp
, target_size
);
4880 guest_data
= arg
+ host_dm
->data_start
;
4881 if ((guest_data
- arg
) < 0) {
4882 ret
= -TARGET_EINVAL
;
4885 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4886 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4888 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4890 ret
= -TARGET_EFAULT
;
4894 switch (ie
->host_cmd
) {
4896 case DM_LIST_DEVICES
:
4899 case DM_DEV_SUSPEND
:
4902 case DM_TABLE_STATUS
:
4903 case DM_TABLE_CLEAR
:
4905 case DM_LIST_VERSIONS
:
4909 case DM_DEV_SET_GEOMETRY
:
4910 /* data contains only strings */
4911 memcpy(host_data
, argptr
, guest_data_size
);
4914 memcpy(host_data
, argptr
, guest_data_size
);
4915 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4919 void *gspec
= argptr
;
4920 void *cur_data
= host_data
;
4921 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4922 int spec_size
= thunk_type_size(arg_type
, 0);
4925 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4926 struct dm_target_spec
*spec
= cur_data
;
4930 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4931 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4933 spec
->next
= sizeof(*spec
) + slen
;
4934 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4936 cur_data
+= spec
->next
;
4941 ret
= -TARGET_EINVAL
;
4942 unlock_user(argptr
, guest_data
, 0);
4945 unlock_user(argptr
, guest_data
, 0);
4947 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4948 if (!is_error(ret
)) {
4949 guest_data
= arg
+ host_dm
->data_start
;
4950 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4951 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4952 switch (ie
->host_cmd
) {
4957 case DM_DEV_SUSPEND
:
4960 case DM_TABLE_CLEAR
:
4962 case DM_DEV_SET_GEOMETRY
:
4963 /* no return data */
4965 case DM_LIST_DEVICES
:
4967 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4968 uint32_t remaining_data
= guest_data_size
;
4969 void *cur_data
= argptr
;
4970 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4971 int nl_size
= 12; /* can't use thunk_size due to alignment */
4974 uint32_t next
= nl
->next
;
4976 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4978 if (remaining_data
< nl
->next
) {
4979 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4982 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4983 strcpy(cur_data
+ nl_size
, nl
->name
);
4984 cur_data
+= nl
->next
;
4985 remaining_data
-= nl
->next
;
4989 nl
= (void*)nl
+ next
;
4994 case DM_TABLE_STATUS
:
4996 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4997 void *cur_data
= argptr
;
4998 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4999 int spec_size
= thunk_type_size(arg_type
, 0);
5002 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5003 uint32_t next
= spec
->next
;
5004 int slen
= strlen((char*)&spec
[1]) + 1;
5005 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5006 if (guest_data_size
< spec
->next
) {
5007 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5010 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5011 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5012 cur_data
= argptr
+ spec
->next
;
5013 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5019 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5020 int count
= *(uint32_t*)hdata
;
5021 uint64_t *hdev
= hdata
+ 8;
5022 uint64_t *gdev
= argptr
+ 8;
5025 *(uint32_t*)argptr
= tswap32(count
);
5026 for (i
= 0; i
< count
; i
++) {
5027 *gdev
= tswap64(*hdev
);
5033 case DM_LIST_VERSIONS
:
5035 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5036 uint32_t remaining_data
= guest_data_size
;
5037 void *cur_data
= argptr
;
5038 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5039 int vers_size
= thunk_type_size(arg_type
, 0);
5042 uint32_t next
= vers
->next
;
5044 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5046 if (remaining_data
< vers
->next
) {
5047 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5050 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5051 strcpy(cur_data
+ vers_size
, vers
->name
);
5052 cur_data
+= vers
->next
;
5053 remaining_data
-= vers
->next
;
5057 vers
= (void*)vers
+ next
;
5062 unlock_user(argptr
, guest_data
, 0);
5063 ret
= -TARGET_EINVAL
;
5066 unlock_user(argptr
, guest_data
, guest_data_size
);
5068 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5070 ret
= -TARGET_EFAULT
;
5073 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5074 unlock_user(argptr
, arg
, target_size
);
5081 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5082 int cmd
, abi_long arg
)
5086 const argtype
*arg_type
= ie
->arg_type
;
5087 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5090 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5091 struct blkpg_partition host_part
;
5093 /* Read and convert blkpg */
5095 target_size
= thunk_type_size(arg_type
, 0);
5096 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5098 ret
= -TARGET_EFAULT
;
5101 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5102 unlock_user(argptr
, arg
, 0);
5104 switch (host_blkpg
->op
) {
5105 case BLKPG_ADD_PARTITION
:
5106 case BLKPG_DEL_PARTITION
:
5107 /* payload is struct blkpg_partition */
5110 /* Unknown opcode */
5111 ret
= -TARGET_EINVAL
;
5115 /* Read and convert blkpg->data */
5116 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5117 target_size
= thunk_type_size(part_arg_type
, 0);
5118 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5120 ret
= -TARGET_EFAULT
;
5123 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5124 unlock_user(argptr
, arg
, 0);
5126 /* Swizzle the data pointer to our local copy and call! */
5127 host_blkpg
->data
= &host_part
;
5128 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5134 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5135 int fd
, int cmd
, abi_long arg
)
5137 const argtype
*arg_type
= ie
->arg_type
;
5138 const StructEntry
*se
;
5139 const argtype
*field_types
;
5140 const int *dst_offsets
, *src_offsets
;
5143 abi_ulong
*target_rt_dev_ptr
= NULL
;
5144 unsigned long *host_rt_dev_ptr
= NULL
;
5148 assert(ie
->access
== IOC_W
);
5149 assert(*arg_type
== TYPE_PTR
);
5151 assert(*arg_type
== TYPE_STRUCT
);
5152 target_size
= thunk_type_size(arg_type
, 0);
5153 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5155 return -TARGET_EFAULT
;
5158 assert(*arg_type
== (int)STRUCT_rtentry
);
5159 se
= struct_entries
+ *arg_type
++;
5160 assert(se
->convert
[0] == NULL
);
5161 /* convert struct here to be able to catch rt_dev string */
5162 field_types
= se
->field_types
;
5163 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5164 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5165 for (i
= 0; i
< se
->nb_fields
; i
++) {
5166 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5167 assert(*field_types
== TYPE_PTRVOID
);
5168 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5169 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5170 if (*target_rt_dev_ptr
!= 0) {
5171 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5172 tswapal(*target_rt_dev_ptr
));
5173 if (!*host_rt_dev_ptr
) {
5174 unlock_user(argptr
, arg
, 0);
5175 return -TARGET_EFAULT
;
5178 *host_rt_dev_ptr
= 0;
5183 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5184 argptr
+ src_offsets
[i
],
5185 field_types
, THUNK_HOST
);
5187 unlock_user(argptr
, arg
, 0);
5189 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5191 assert(host_rt_dev_ptr
!= NULL
);
5192 assert(target_rt_dev_ptr
!= NULL
);
5193 if (*host_rt_dev_ptr
!= 0) {
5194 unlock_user((void *)*host_rt_dev_ptr
,
5195 *target_rt_dev_ptr
, 0);
5200 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5201 int fd
, int cmd
, abi_long arg
)
5203 int sig
= target_to_host_signal(arg
);
5204 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5207 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5208 int fd
, int cmd
, abi_long arg
)
5213 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5214 if (is_error(ret
)) {
5218 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5219 if (copy_to_user_timeval(arg
, &tv
)) {
5220 return -TARGET_EFAULT
;
5223 if (copy_to_user_timeval64(arg
, &tv
)) {
5224 return -TARGET_EFAULT
;
5231 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5232 int fd
, int cmd
, abi_long arg
)
5237 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5238 if (is_error(ret
)) {
5242 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5243 if (host_to_target_timespec(arg
, &ts
)) {
5244 return -TARGET_EFAULT
;
5247 if (host_to_target_timespec64(arg
, &ts
)) {
5248 return -TARGET_EFAULT
;
5256 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5257 int fd
, int cmd
, abi_long arg
)
5259 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5260 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5266 static void unlock_drm_version(struct drm_version
*host_ver
,
5267 struct target_drm_version
*target_ver
,
5270 unlock_user(host_ver
->name
, target_ver
->name
,
5271 copy
? host_ver
->name_len
: 0);
5272 unlock_user(host_ver
->date
, target_ver
->date
,
5273 copy
? host_ver
->date_len
: 0);
5274 unlock_user(host_ver
->desc
, target_ver
->desc
,
5275 copy
? host_ver
->desc_len
: 0);
5278 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5279 struct target_drm_version
*target_ver
)
5281 memset(host_ver
, 0, sizeof(*host_ver
));
5283 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5284 if (host_ver
->name_len
) {
5285 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5286 target_ver
->name_len
, 0);
5287 if (!host_ver
->name
) {
5292 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5293 if (host_ver
->date_len
) {
5294 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5295 target_ver
->date_len
, 0);
5296 if (!host_ver
->date
) {
5301 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5302 if (host_ver
->desc_len
) {
5303 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5304 target_ver
->desc_len
, 0);
5305 if (!host_ver
->desc
) {
5312 unlock_drm_version(host_ver
, target_ver
, false);
5316 static inline void host_to_target_drmversion(
5317 struct target_drm_version
*target_ver
,
5318 struct drm_version
*host_ver
)
5320 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5321 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5322 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5323 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5324 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5325 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5326 unlock_drm_version(host_ver
, target_ver
, true);
5329 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5330 int fd
, int cmd
, abi_long arg
)
5332 struct drm_version
*ver
;
5333 struct target_drm_version
*target_ver
;
5336 switch (ie
->host_cmd
) {
5337 case DRM_IOCTL_VERSION
:
5338 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5339 return -TARGET_EFAULT
;
5341 ver
= (struct drm_version
*)buf_temp
;
5342 ret
= target_to_host_drmversion(ver
, target_ver
);
5343 if (!is_error(ret
)) {
5344 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5345 if (is_error(ret
)) {
5346 unlock_drm_version(ver
, target_ver
, false);
5348 host_to_target_drmversion(target_ver
, ver
);
5351 unlock_user_struct(target_ver
, arg
, 0);
5354 return -TARGET_ENOSYS
;
5359 IOCTLEntry ioctl_entries
[] = {
5360 #define IOCTL(cmd, access, ...) \
5361 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5362 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5363 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5364 #define IOCTL_IGNORE(cmd) \
5365 { TARGET_ ## cmd, 0, #cmd },
5370 /* ??? Implement proper locking for ioctls. */
5371 /* do_ioctl() Must return target values and target errnos. */
5372 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5374 const IOCTLEntry
*ie
;
5375 const argtype
*arg_type
;
5377 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5383 if (ie
->target_cmd
== 0) {
5385 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5386 return -TARGET_ENOSYS
;
5388 if (ie
->target_cmd
== cmd
)
5392 arg_type
= ie
->arg_type
;
5394 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5395 } else if (!ie
->host_cmd
) {
5396 /* Some architectures define BSD ioctls in their headers
5397 that are not implemented in Linux. */
5398 return -TARGET_ENOSYS
;
5401 switch(arg_type
[0]) {
5404 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5410 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5414 target_size
= thunk_type_size(arg_type
, 0);
5415 switch(ie
->access
) {
5417 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5418 if (!is_error(ret
)) {
5419 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5421 return -TARGET_EFAULT
;
5422 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5423 unlock_user(argptr
, arg
, target_size
);
5427 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5429 return -TARGET_EFAULT
;
5430 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5431 unlock_user(argptr
, arg
, 0);
5432 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5436 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5438 return -TARGET_EFAULT
;
5439 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5440 unlock_user(argptr
, arg
, 0);
5441 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5442 if (!is_error(ret
)) {
5443 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5445 return -TARGET_EFAULT
;
5446 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5447 unlock_user(argptr
, arg
, target_size
);
5453 qemu_log_mask(LOG_UNIMP
,
5454 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5455 (long)cmd
, arg_type
[0]);
5456 ret
= -TARGET_ENOSYS
;
5462 static const bitmask_transtbl iflag_tbl
[] = {
5463 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5464 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5465 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5466 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5467 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5468 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5469 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5470 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5471 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5472 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5473 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5474 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5475 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5476 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5480 static const bitmask_transtbl oflag_tbl
[] = {
5481 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5482 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5483 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5484 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5485 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5486 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5487 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5488 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5489 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5490 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5491 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5492 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5493 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5494 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5495 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5496 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5497 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5498 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5499 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5500 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5501 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5502 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5503 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5504 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5508 static const bitmask_transtbl cflag_tbl
[] = {
5509 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5510 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5511 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5512 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5513 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5514 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5515 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5516 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5517 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5518 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5519 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5520 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5521 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5522 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5523 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5524 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5525 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5526 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5527 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5528 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5529 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5530 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5531 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5532 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5533 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5534 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5535 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5536 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5537 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5538 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5539 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5543 static const bitmask_transtbl lflag_tbl
[] = {
5544 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5545 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5546 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5547 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5548 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5549 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5550 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5551 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5552 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5553 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5554 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5555 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5556 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5557 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5558 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5562 static void target_to_host_termios (void *dst
, const void *src
)
5564 struct host_termios
*host
= dst
;
5565 const struct target_termios
*target
= src
;
5568 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5570 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5572 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5574 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5575 host
->c_line
= target
->c_line
;
5577 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5578 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5579 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5580 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5581 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5582 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5583 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5584 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5585 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5586 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5587 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5588 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5589 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5590 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5591 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5592 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5593 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5594 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5597 static void host_to_target_termios (void *dst
, const void *src
)
5599 struct target_termios
*target
= dst
;
5600 const struct host_termios
*host
= src
;
5603 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5605 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5607 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5609 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5610 target
->c_line
= host
->c_line
;
5612 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5613 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5614 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5615 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5616 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5617 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5618 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5619 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5620 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5621 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5622 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5623 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5624 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5625 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5626 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5627 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5628 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5629 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5632 static const StructEntry struct_termios_def
= {
5633 .convert
= { host_to_target_termios
, target_to_host_termios
},
5634 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5635 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5638 static bitmask_transtbl mmap_flags_tbl
[] = {
5639 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5640 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5641 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5642 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5643 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5644 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5645 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5646 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5647 MAP_DENYWRITE
, MAP_DENYWRITE
},
5648 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5649 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5650 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5651 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5652 MAP_NORESERVE
, MAP_NORESERVE
},
5653 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5654 /* MAP_STACK had been ignored by the kernel for quite some time.
5655 Recognize it for the target insofar as we do not want to pass
5656 it through to the host. */
5657 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5662 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5663 * TARGET_I386 is defined if TARGET_X86_64 is defined
5665 #if defined(TARGET_I386)
5667 /* NOTE: there is really one LDT for all the threads */
5668 static uint8_t *ldt_table
;
5670 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5677 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5678 if (size
> bytecount
)
5680 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5682 return -TARGET_EFAULT
;
5683 /* ??? Should this by byteswapped? */
5684 memcpy(p
, ldt_table
, size
);
5685 unlock_user(p
, ptr
, size
);
5689 /* XXX: add locking support */
5690 static abi_long
write_ldt(CPUX86State
*env
,
5691 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5693 struct target_modify_ldt_ldt_s ldt_info
;
5694 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5695 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5696 int seg_not_present
, useable
, lm
;
5697 uint32_t *lp
, entry_1
, entry_2
;
5699 if (bytecount
!= sizeof(ldt_info
))
5700 return -TARGET_EINVAL
;
5701 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5702 return -TARGET_EFAULT
;
5703 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5704 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5705 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5706 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5707 unlock_user_struct(target_ldt_info
, ptr
, 0);
5709 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5710 return -TARGET_EINVAL
;
5711 seg_32bit
= ldt_info
.flags
& 1;
5712 contents
= (ldt_info
.flags
>> 1) & 3;
5713 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5714 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5715 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5716 useable
= (ldt_info
.flags
>> 6) & 1;
5720 lm
= (ldt_info
.flags
>> 7) & 1;
5722 if (contents
== 3) {
5724 return -TARGET_EINVAL
;
5725 if (seg_not_present
== 0)
5726 return -TARGET_EINVAL
;
5728 /* allocate the LDT */
5730 env
->ldt
.base
= target_mmap(0,
5731 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5732 PROT_READ
|PROT_WRITE
,
5733 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5734 if (env
->ldt
.base
== -1)
5735 return -TARGET_ENOMEM
;
5736 memset(g2h(env
->ldt
.base
), 0,
5737 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5738 env
->ldt
.limit
= 0xffff;
5739 ldt_table
= g2h(env
->ldt
.base
);
5742 /* NOTE: same code as Linux kernel */
5743 /* Allow LDTs to be cleared by the user. */
5744 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5747 read_exec_only
== 1 &&
5749 limit_in_pages
== 0 &&
5750 seg_not_present
== 1 &&
5758 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5759 (ldt_info
.limit
& 0x0ffff);
5760 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5761 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5762 (ldt_info
.limit
& 0xf0000) |
5763 ((read_exec_only
^ 1) << 9) |
5765 ((seg_not_present
^ 1) << 15) |
5767 (limit_in_pages
<< 23) |
5771 entry_2
|= (useable
<< 20);
5773 /* Install the new entry ... */
5775 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5776 lp
[0] = tswap32(entry_1
);
5777 lp
[1] = tswap32(entry_2
);
5781 /* specific and weird i386 syscalls */
5782 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5783 unsigned long bytecount
)
5789 ret
= read_ldt(ptr
, bytecount
);
5792 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5795 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5798 ret
= -TARGET_ENOSYS
;
5804 #if defined(TARGET_ABI32)
5805 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5807 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5808 struct target_modify_ldt_ldt_s ldt_info
;
5809 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5810 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5811 int seg_not_present
, useable
, lm
;
5812 uint32_t *lp
, entry_1
, entry_2
;
5815 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5816 if (!target_ldt_info
)
5817 return -TARGET_EFAULT
;
5818 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5819 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5820 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5821 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5822 if (ldt_info
.entry_number
== -1) {
5823 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5824 if (gdt_table
[i
] == 0) {
5825 ldt_info
.entry_number
= i
;
5826 target_ldt_info
->entry_number
= tswap32(i
);
5831 unlock_user_struct(target_ldt_info
, ptr
, 1);
5833 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5834 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5835 return -TARGET_EINVAL
;
5836 seg_32bit
= ldt_info
.flags
& 1;
5837 contents
= (ldt_info
.flags
>> 1) & 3;
5838 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5839 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5840 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5841 useable
= (ldt_info
.flags
>> 6) & 1;
5845 lm
= (ldt_info
.flags
>> 7) & 1;
5848 if (contents
== 3) {
5849 if (seg_not_present
== 0)
5850 return -TARGET_EINVAL
;
5853 /* NOTE: same code as Linux kernel */
5854 /* Allow LDTs to be cleared by the user. */
5855 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5856 if ((contents
== 0 &&
5857 read_exec_only
== 1 &&
5859 limit_in_pages
== 0 &&
5860 seg_not_present
== 1 &&
5868 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5869 (ldt_info
.limit
& 0x0ffff);
5870 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5871 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5872 (ldt_info
.limit
& 0xf0000) |
5873 ((read_exec_only
^ 1) << 9) |
5875 ((seg_not_present
^ 1) << 15) |
5877 (limit_in_pages
<< 23) |
5882 /* Install the new entry ... */
5884 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5885 lp
[0] = tswap32(entry_1
);
5886 lp
[1] = tswap32(entry_2
);
5890 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5892 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5893 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5894 uint32_t base_addr
, limit
, flags
;
5895 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5896 int seg_not_present
, useable
, lm
;
5897 uint32_t *lp
, entry_1
, entry_2
;
5899 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5900 if (!target_ldt_info
)
5901 return -TARGET_EFAULT
;
5902 idx
= tswap32(target_ldt_info
->entry_number
);
5903 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5904 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5905 unlock_user_struct(target_ldt_info
, ptr
, 1);
5906 return -TARGET_EINVAL
;
5908 lp
= (uint32_t *)(gdt_table
+ idx
);
5909 entry_1
= tswap32(lp
[0]);
5910 entry_2
= tswap32(lp
[1]);
5912 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5913 contents
= (entry_2
>> 10) & 3;
5914 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5915 seg_32bit
= (entry_2
>> 22) & 1;
5916 limit_in_pages
= (entry_2
>> 23) & 1;
5917 useable
= (entry_2
>> 20) & 1;
5921 lm
= (entry_2
>> 21) & 1;
5923 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5924 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5925 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5926 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5927 base_addr
= (entry_1
>> 16) |
5928 (entry_2
& 0xff000000) |
5929 ((entry_2
& 0xff) << 16);
5930 target_ldt_info
->base_addr
= tswapal(base_addr
);
5931 target_ldt_info
->limit
= tswap32(limit
);
5932 target_ldt_info
->flags
= tswap32(flags
);
5933 unlock_user_struct(target_ldt_info
, ptr
, 1);
5937 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5939 return -TARGET_ENOSYS
;
5942 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5949 case TARGET_ARCH_SET_GS
:
5950 case TARGET_ARCH_SET_FS
:
5951 if (code
== TARGET_ARCH_SET_GS
)
5955 cpu_x86_load_seg(env
, idx
, 0);
5956 env
->segs
[idx
].base
= addr
;
5958 case TARGET_ARCH_GET_GS
:
5959 case TARGET_ARCH_GET_FS
:
5960 if (code
== TARGET_ARCH_GET_GS
)
5964 val
= env
->segs
[idx
].base
;
5965 if (put_user(val
, addr
, abi_ulong
))
5966 ret
= -TARGET_EFAULT
;
5969 ret
= -TARGET_EINVAL
;
5974 #endif /* defined(TARGET_ABI32 */
5976 #endif /* defined(TARGET_I386) */
5978 #define NEW_STACK_SIZE 0x40000
5981 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5984 pthread_mutex_t mutex
;
5985 pthread_cond_t cond
;
5988 abi_ulong child_tidptr
;
5989 abi_ulong parent_tidptr
;
5993 static void *clone_func(void *arg
)
5995 new_thread_info
*info
= arg
;
6000 rcu_register_thread();
6001 tcg_register_thread();
6005 ts
= (TaskState
*)cpu
->opaque
;
6006 info
->tid
= sys_gettid();
6008 if (info
->child_tidptr
)
6009 put_user_u32(info
->tid
, info
->child_tidptr
);
6010 if (info
->parent_tidptr
)
6011 put_user_u32(info
->tid
, info
->parent_tidptr
);
6012 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6013 /* Enable signals. */
6014 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6015 /* Signal to the parent that we're ready. */
6016 pthread_mutex_lock(&info
->mutex
);
6017 pthread_cond_broadcast(&info
->cond
);
6018 pthread_mutex_unlock(&info
->mutex
);
6019 /* Wait until the parent has finished initializing the tls state. */
6020 pthread_mutex_lock(&clone_lock
);
6021 pthread_mutex_unlock(&clone_lock
);
6027 /* do_fork() Must return host values and target errnos (unlike most
6028 do_*() functions). */
6029 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6030 abi_ulong parent_tidptr
, target_ulong newtls
,
6031 abi_ulong child_tidptr
)
6033 CPUState
*cpu
= env_cpu(env
);
6037 CPUArchState
*new_env
;
6040 flags
&= ~CLONE_IGNORED_FLAGS
;
6042 /* Emulate vfork() with fork() */
6043 if (flags
& CLONE_VFORK
)
6044 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6046 if (flags
& CLONE_VM
) {
6047 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6048 new_thread_info info
;
6049 pthread_attr_t attr
;
6051 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6052 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6053 return -TARGET_EINVAL
;
6056 ts
= g_new0(TaskState
, 1);
6057 init_task_state(ts
);
6059 /* Grab a mutex so that thread setup appears atomic. */
6060 pthread_mutex_lock(&clone_lock
);
6062 /* we create a new CPU instance. */
6063 new_env
= cpu_copy(env
);
6064 /* Init regs that differ from the parent. */
6065 cpu_clone_regs_child(new_env
, newsp
, flags
);
6066 cpu_clone_regs_parent(env
, flags
);
6067 new_cpu
= env_cpu(new_env
);
6068 new_cpu
->opaque
= ts
;
6069 ts
->bprm
= parent_ts
->bprm
;
6070 ts
->info
= parent_ts
->info
;
6071 ts
->signal_mask
= parent_ts
->signal_mask
;
6073 if (flags
& CLONE_CHILD_CLEARTID
) {
6074 ts
->child_tidptr
= child_tidptr
;
6077 if (flags
& CLONE_SETTLS
) {
6078 cpu_set_tls (new_env
, newtls
);
6081 memset(&info
, 0, sizeof(info
));
6082 pthread_mutex_init(&info
.mutex
, NULL
);
6083 pthread_mutex_lock(&info
.mutex
);
6084 pthread_cond_init(&info
.cond
, NULL
);
6086 if (flags
& CLONE_CHILD_SETTID
) {
6087 info
.child_tidptr
= child_tidptr
;
6089 if (flags
& CLONE_PARENT_SETTID
) {
6090 info
.parent_tidptr
= parent_tidptr
;
6093 ret
= pthread_attr_init(&attr
);
6094 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6095 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6096 /* It is not safe to deliver signals until the child has finished
6097 initializing, so temporarily block all signals. */
6098 sigfillset(&sigmask
);
6099 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6100 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6102 /* If this is our first additional thread, we need to ensure we
6103 * generate code for parallel execution and flush old translations.
6105 if (!parallel_cpus
) {
6106 parallel_cpus
= true;
6110 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6111 /* TODO: Free new CPU state if thread creation failed. */
6113 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6114 pthread_attr_destroy(&attr
);
6116 /* Wait for the child to initialize. */
6117 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6122 pthread_mutex_unlock(&info
.mutex
);
6123 pthread_cond_destroy(&info
.cond
);
6124 pthread_mutex_destroy(&info
.mutex
);
6125 pthread_mutex_unlock(&clone_lock
);
6127 /* if no CLONE_VM, we consider it is a fork */
6128 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6129 return -TARGET_EINVAL
;
6132 /* We can't support custom termination signals */
6133 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6134 return -TARGET_EINVAL
;
6137 if (block_signals()) {
6138 return -TARGET_ERESTARTSYS
;
6144 /* Child Process. */
6145 cpu_clone_regs_child(env
, newsp
, flags
);
6147 /* There is a race condition here. The parent process could
6148 theoretically read the TID in the child process before the child
6149 tid is set. This would require using either ptrace
6150 (not implemented) or having *_tidptr to point at a shared memory
6151 mapping. We can't repeat the spinlock hack used above because
6152 the child process gets its own copy of the lock. */
6153 if (flags
& CLONE_CHILD_SETTID
)
6154 put_user_u32(sys_gettid(), child_tidptr
);
6155 if (flags
& CLONE_PARENT_SETTID
)
6156 put_user_u32(sys_gettid(), parent_tidptr
);
6157 ts
= (TaskState
*)cpu
->opaque
;
6158 if (flags
& CLONE_SETTLS
)
6159 cpu_set_tls (env
, newtls
);
6160 if (flags
& CLONE_CHILD_CLEARTID
)
6161 ts
->child_tidptr
= child_tidptr
;
6163 cpu_clone_regs_parent(env
, flags
);
6170 /* warning : doesn't handle linux specific flags... */
6171 static int target_to_host_fcntl_cmd(int cmd
)
6176 case TARGET_F_DUPFD
:
6177 case TARGET_F_GETFD
:
6178 case TARGET_F_SETFD
:
6179 case TARGET_F_GETFL
:
6180 case TARGET_F_SETFL
:
6181 case TARGET_F_OFD_GETLK
:
6182 case TARGET_F_OFD_SETLK
:
6183 case TARGET_F_OFD_SETLKW
:
6186 case TARGET_F_GETLK
:
6189 case TARGET_F_SETLK
:
6192 case TARGET_F_SETLKW
:
6195 case TARGET_F_GETOWN
:
6198 case TARGET_F_SETOWN
:
6201 case TARGET_F_GETSIG
:
6204 case TARGET_F_SETSIG
:
6207 #if TARGET_ABI_BITS == 32
6208 case TARGET_F_GETLK64
:
6211 case TARGET_F_SETLK64
:
6214 case TARGET_F_SETLKW64
:
6218 case TARGET_F_SETLEASE
:
6221 case TARGET_F_GETLEASE
:
6224 #ifdef F_DUPFD_CLOEXEC
6225 case TARGET_F_DUPFD_CLOEXEC
:
6226 ret
= F_DUPFD_CLOEXEC
;
6229 case TARGET_F_NOTIFY
:
6233 case TARGET_F_GETOWN_EX
:
6238 case TARGET_F_SETOWN_EX
:
6243 case TARGET_F_SETPIPE_SZ
:
6246 case TARGET_F_GETPIPE_SZ
:
6251 ret
= -TARGET_EINVAL
;
6255 #if defined(__powerpc64__)
6256 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6257 * is not supported by kernel. The glibc fcntl call actually adjusts
6258 * them to 5, 6 and 7 before making the syscall(). Since we make the
6259 * syscall directly, adjust to what is supported by the kernel.
6261 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6262 ret
-= F_GETLK64
- 5;
6269 #define FLOCK_TRANSTBL \
6271 TRANSTBL_CONVERT(F_RDLCK); \
6272 TRANSTBL_CONVERT(F_WRLCK); \
6273 TRANSTBL_CONVERT(F_UNLCK); \
6274 TRANSTBL_CONVERT(F_EXLCK); \
6275 TRANSTBL_CONVERT(F_SHLCK); \
6278 static int target_to_host_flock(int type
)
6280 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6282 #undef TRANSTBL_CONVERT
6283 return -TARGET_EINVAL
;
6286 static int host_to_target_flock(int type
)
6288 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6290 #undef TRANSTBL_CONVERT
6291 /* if we don't know how to convert the value coming
6292 * from the host we copy to the target field as-is
6297 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6298 abi_ulong target_flock_addr
)
6300 struct target_flock
*target_fl
;
6303 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6304 return -TARGET_EFAULT
;
6307 __get_user(l_type
, &target_fl
->l_type
);
6308 l_type
= target_to_host_flock(l_type
);
6312 fl
->l_type
= l_type
;
6313 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6314 __get_user(fl
->l_start
, &target_fl
->l_start
);
6315 __get_user(fl
->l_len
, &target_fl
->l_len
);
6316 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6317 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6321 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6322 const struct flock64
*fl
)
6324 struct target_flock
*target_fl
;
6327 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6328 return -TARGET_EFAULT
;
6331 l_type
= host_to_target_flock(fl
->l_type
);
6332 __put_user(l_type
, &target_fl
->l_type
);
6333 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6334 __put_user(fl
->l_start
, &target_fl
->l_start
);
6335 __put_user(fl
->l_len
, &target_fl
->l_len
);
6336 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6337 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6341 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6342 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6344 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6345 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6346 abi_ulong target_flock_addr
)
6348 struct target_oabi_flock64
*target_fl
;
6351 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6352 return -TARGET_EFAULT
;
6355 __get_user(l_type
, &target_fl
->l_type
);
6356 l_type
= target_to_host_flock(l_type
);
6360 fl
->l_type
= l_type
;
6361 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6362 __get_user(fl
->l_start
, &target_fl
->l_start
);
6363 __get_user(fl
->l_len
, &target_fl
->l_len
);
6364 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6365 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6369 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6370 const struct flock64
*fl
)
6372 struct target_oabi_flock64
*target_fl
;
6375 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6376 return -TARGET_EFAULT
;
6379 l_type
= host_to_target_flock(fl
->l_type
);
6380 __put_user(l_type
, &target_fl
->l_type
);
6381 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6382 __put_user(fl
->l_start
, &target_fl
->l_start
);
6383 __put_user(fl
->l_len
, &target_fl
->l_len
);
6384 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6385 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6390 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6391 abi_ulong target_flock_addr
)
6393 struct target_flock64
*target_fl
;
6396 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6397 return -TARGET_EFAULT
;
6400 __get_user(l_type
, &target_fl
->l_type
);
6401 l_type
= target_to_host_flock(l_type
);
6405 fl
->l_type
= l_type
;
6406 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6407 __get_user(fl
->l_start
, &target_fl
->l_start
);
6408 __get_user(fl
->l_len
, &target_fl
->l_len
);
6409 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6410 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6414 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6415 const struct flock64
*fl
)
6417 struct target_flock64
*target_fl
;
6420 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6421 return -TARGET_EFAULT
;
6424 l_type
= host_to_target_flock(fl
->l_type
);
6425 __put_user(l_type
, &target_fl
->l_type
);
6426 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6427 __put_user(fl
->l_start
, &target_fl
->l_start
);
6428 __put_user(fl
->l_len
, &target_fl
->l_len
);
6429 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6430 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6434 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6436 struct flock64 fl64
;
6438 struct f_owner_ex fox
;
6439 struct target_f_owner_ex
*target_fox
;
6442 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6444 if (host_cmd
== -TARGET_EINVAL
)
6448 case TARGET_F_GETLK
:
6449 ret
= copy_from_user_flock(&fl64
, arg
);
6453 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6455 ret
= copy_to_user_flock(arg
, &fl64
);
6459 case TARGET_F_SETLK
:
6460 case TARGET_F_SETLKW
:
6461 ret
= copy_from_user_flock(&fl64
, arg
);
6465 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6468 case TARGET_F_GETLK64
:
6469 case TARGET_F_OFD_GETLK
:
6470 ret
= copy_from_user_flock64(&fl64
, arg
);
6474 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6476 ret
= copy_to_user_flock64(arg
, &fl64
);
6479 case TARGET_F_SETLK64
:
6480 case TARGET_F_SETLKW64
:
6481 case TARGET_F_OFD_SETLK
:
6482 case TARGET_F_OFD_SETLKW
:
6483 ret
= copy_from_user_flock64(&fl64
, arg
);
6487 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6490 case TARGET_F_GETFL
:
6491 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6493 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6497 case TARGET_F_SETFL
:
6498 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6499 target_to_host_bitmask(arg
,
6504 case TARGET_F_GETOWN_EX
:
6505 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6507 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6508 return -TARGET_EFAULT
;
6509 target_fox
->type
= tswap32(fox
.type
);
6510 target_fox
->pid
= tswap32(fox
.pid
);
6511 unlock_user_struct(target_fox
, arg
, 1);
6517 case TARGET_F_SETOWN_EX
:
6518 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6519 return -TARGET_EFAULT
;
6520 fox
.type
= tswap32(target_fox
->type
);
6521 fox
.pid
= tswap32(target_fox
->pid
);
6522 unlock_user_struct(target_fox
, arg
, 0);
6523 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6527 case TARGET_F_SETOWN
:
6528 case TARGET_F_GETOWN
:
6529 case TARGET_F_SETSIG
:
6530 case TARGET_F_GETSIG
:
6531 case TARGET_F_SETLEASE
:
6532 case TARGET_F_GETLEASE
:
6533 case TARGET_F_SETPIPE_SZ
:
6534 case TARGET_F_GETPIPE_SZ
:
6535 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6539 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6547 static inline int high2lowuid(int uid
)
6555 static inline int high2lowgid(int gid
)
6563 static inline int low2highuid(int uid
)
6565 if ((int16_t)uid
== -1)
6571 static inline int low2highgid(int gid
)
6573 if ((int16_t)gid
== -1)
6578 static inline int tswapid(int id
)
6583 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6585 #else /* !USE_UID16 */
6586 static inline int high2lowuid(int uid
)
6590 static inline int high2lowgid(int gid
)
6594 static inline int low2highuid(int uid
)
6598 static inline int low2highgid(int gid
)
6602 static inline int tswapid(int id
)
6607 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6609 #endif /* USE_UID16 */
6611 /* We must do direct syscalls for setting UID/GID, because we want to
6612 * implement the Linux system call semantics of "change only for this thread",
6613 * not the libc/POSIX semantics of "change for all threads in process".
6614 * (See http://ewontfix.com/17/ for more details.)
6615 * We use the 32-bit version of the syscalls if present; if it is not
6616 * then either the host architecture supports 32-bit UIDs natively with
6617 * the standard syscall, or the 16-bit UID is the best we can do.
6619 #ifdef __NR_setuid32
6620 #define __NR_sys_setuid __NR_setuid32
6622 #define __NR_sys_setuid __NR_setuid
6624 #ifdef __NR_setgid32
6625 #define __NR_sys_setgid __NR_setgid32
6627 #define __NR_sys_setgid __NR_setgid
6629 #ifdef __NR_setresuid32
6630 #define __NR_sys_setresuid __NR_setresuid32
6632 #define __NR_sys_setresuid __NR_setresuid
6634 #ifdef __NR_setresgid32
6635 #define __NR_sys_setresgid __NR_setresgid32
6637 #define __NR_sys_setresgid __NR_setresgid
6640 _syscall1(int, sys_setuid
, uid_t
, uid
)
6641 _syscall1(int, sys_setgid
, gid_t
, gid
)
6642 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6643 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6645 void syscall_init(void)
6648 const argtype
*arg_type
;
6652 thunk_init(STRUCT_MAX
);
6654 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6655 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6656 #include "syscall_types.h"
6658 #undef STRUCT_SPECIAL
6660 /* Build target_to_host_errno_table[] table from
6661 * host_to_target_errno_table[]. */
6662 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6663 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6666 /* we patch the ioctl size if necessary. We rely on the fact that
6667 no ioctl has all the bits at '1' in the size field */
6669 while (ie
->target_cmd
!= 0) {
6670 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6671 TARGET_IOC_SIZEMASK
) {
6672 arg_type
= ie
->arg_type
;
6673 if (arg_type
[0] != TYPE_PTR
) {
6674 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6679 size
= thunk_type_size(arg_type
, 0);
6680 ie
->target_cmd
= (ie
->target_cmd
&
6681 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6682 (size
<< TARGET_IOC_SIZESHIFT
);
6685 /* automatic consistency check if same arch */
6686 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6687 (defined(__x86_64__) && defined(TARGET_X86_64))
6688 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6689 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6690 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6697 #ifdef TARGET_NR_truncate64
6698 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6703 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6707 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6711 #ifdef TARGET_NR_ftruncate64
6712 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6717 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6721 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6725 #if defined(TARGET_NR_timer_settime) || \
6726 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6727 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6728 abi_ulong target_addr
)
6730 struct target_itimerspec
*target_itspec
;
6732 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6733 return -TARGET_EFAULT
;
6736 host_itspec
->it_interval
.tv_sec
=
6737 tswapal(target_itspec
->it_interval
.tv_sec
);
6738 host_itspec
->it_interval
.tv_nsec
=
6739 tswapal(target_itspec
->it_interval
.tv_nsec
);
6740 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6741 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6743 unlock_user_struct(target_itspec
, target_addr
, 1);
6748 #if ((defined(TARGET_NR_timerfd_gettime) || \
6749 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6750 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6751 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6752 struct itimerspec
*host_its
)
6754 struct target_itimerspec
*target_itspec
;
6756 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6757 return -TARGET_EFAULT
;
6760 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6761 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6763 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6764 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6766 unlock_user_struct(target_itspec
, target_addr
, 0);
6771 #if defined(TARGET_NR_adjtimex) || \
6772 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6773 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6774 abi_long target_addr
)
6776 struct target_timex
*target_tx
;
6778 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6779 return -TARGET_EFAULT
;
6782 __get_user(host_tx
->modes
, &target_tx
->modes
);
6783 __get_user(host_tx
->offset
, &target_tx
->offset
);
6784 __get_user(host_tx
->freq
, &target_tx
->freq
);
6785 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6786 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6787 __get_user(host_tx
->status
, &target_tx
->status
);
6788 __get_user(host_tx
->constant
, &target_tx
->constant
);
6789 __get_user(host_tx
->precision
, &target_tx
->precision
);
6790 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6791 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6792 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6793 __get_user(host_tx
->tick
, &target_tx
->tick
);
6794 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6795 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6796 __get_user(host_tx
->shift
, &target_tx
->shift
);
6797 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6798 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6799 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6800 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6801 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6802 __get_user(host_tx
->tai
, &target_tx
->tai
);
6804 unlock_user_struct(target_tx
, target_addr
, 0);
6808 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6809 struct timex
*host_tx
)
6811 struct target_timex
*target_tx
;
6813 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6814 return -TARGET_EFAULT
;
6817 __put_user(host_tx
->modes
, &target_tx
->modes
);
6818 __put_user(host_tx
->offset
, &target_tx
->offset
);
6819 __put_user(host_tx
->freq
, &target_tx
->freq
);
6820 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6821 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6822 __put_user(host_tx
->status
, &target_tx
->status
);
6823 __put_user(host_tx
->constant
, &target_tx
->constant
);
6824 __put_user(host_tx
->precision
, &target_tx
->precision
);
6825 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6826 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6827 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6828 __put_user(host_tx
->tick
, &target_tx
->tick
);
6829 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6830 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6831 __put_user(host_tx
->shift
, &target_tx
->shift
);
6832 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6833 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6834 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6835 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6836 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6837 __put_user(host_tx
->tai
, &target_tx
->tai
);
6839 unlock_user_struct(target_tx
, target_addr
, 1);
6844 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6845 abi_ulong target_addr
)
6847 struct target_sigevent
*target_sevp
;
6849 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6850 return -TARGET_EFAULT
;
6853 /* This union is awkward on 64 bit systems because it has a 32 bit
6854 * integer and a pointer in it; we follow the conversion approach
6855 * used for handling sigval types in signal.c so the guest should get
6856 * the correct value back even if we did a 64 bit byteswap and it's
6857 * using the 32 bit integer.
6859 host_sevp
->sigev_value
.sival_ptr
=
6860 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6861 host_sevp
->sigev_signo
=
6862 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6863 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6864 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6866 unlock_user_struct(target_sevp
, target_addr
, 1);
6870 #if defined(TARGET_NR_mlockall)
6871 static inline int target_to_host_mlockall_arg(int arg
)
6875 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6876 result
|= MCL_CURRENT
;
6878 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6879 result
|= MCL_FUTURE
;
6885 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6886 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6887 defined(TARGET_NR_newfstatat))
6888 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6889 abi_ulong target_addr
,
6890 struct stat
*host_st
)
6892 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6893 if (((CPUARMState
*)cpu_env
)->eabi
) {
6894 struct target_eabi_stat64
*target_st
;
6896 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6897 return -TARGET_EFAULT
;
6898 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6899 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6900 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6901 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6902 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6904 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6905 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6906 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6907 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6908 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6909 __put_user(host_st
->st_size
, &target_st
->st_size
);
6910 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6911 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6912 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6913 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6914 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6915 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6916 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6917 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6918 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6920 unlock_user_struct(target_st
, target_addr
, 1);
6924 #if defined(TARGET_HAS_STRUCT_STAT64)
6925 struct target_stat64
*target_st
;
6927 struct target_stat
*target_st
;
6930 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6931 return -TARGET_EFAULT
;
6932 memset(target_st
, 0, sizeof(*target_st
));
6933 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6934 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6935 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6936 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6938 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6939 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6940 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6941 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6942 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6943 /* XXX: better use of kernel struct */
6944 __put_user(host_st
->st_size
, &target_st
->st_size
);
6945 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6946 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6947 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6948 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6949 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6950 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6951 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6952 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6953 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6955 unlock_user_struct(target_st
, target_addr
, 1);
6962 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6963 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
6964 abi_ulong target_addr
)
6966 struct target_statx
*target_stx
;
6968 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
6969 return -TARGET_EFAULT
;
6971 memset(target_stx
, 0, sizeof(*target_stx
));
6973 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
6974 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
6975 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
6976 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
6977 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
6978 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
6979 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
6980 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
6981 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
6982 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
6983 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
6984 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
6985 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
6986 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
6987 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
6988 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
6989 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
6990 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
6991 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
6992 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
6993 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
6994 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
6995 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
6997 unlock_user_struct(target_stx
, target_addr
, 1);
7003 static int do_sys_futex(int *uaddr
, int op
, int val
,
7004 const struct timespec
*timeout
, int *uaddr2
,
7007 #if HOST_LONG_BITS == 64
7008 #if defined(__NR_futex)
7009 /* always a 64-bit time_t, it doesn't define _time64 version */
7010 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7013 #else /* HOST_LONG_BITS == 64 */
7014 #if defined(__NR_futex_time64)
7015 if (sizeof(timeout
->tv_sec
) == 8) {
7016 /* _time64 function on 32bit arch */
7017 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7020 #if defined(__NR_futex)
7021 /* old function on 32bit arch */
7022 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7024 #endif /* HOST_LONG_BITS == 64 */
7025 g_assert_not_reached();
7028 static int do_safe_futex(int *uaddr
, int op
, int val
,
7029 const struct timespec
*timeout
, int *uaddr2
,
7032 #if HOST_LONG_BITS == 64
7033 #if defined(__NR_futex)
7034 /* always a 64-bit time_t, it doesn't define _time64 version */
7035 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7037 #else /* HOST_LONG_BITS == 64 */
7038 #if defined(__NR_futex_time64)
7039 if (sizeof(timeout
->tv_sec
) == 8) {
7040 /* _time64 function on 32bit arch */
7041 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7045 #if defined(__NR_futex)
7046 /* old function on 32bit arch */
7047 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7049 #endif /* HOST_LONG_BITS == 64 */
7050 return -TARGET_ENOSYS
;
7053 /* ??? Using host futex calls even when target atomic operations
7054 are not really atomic probably breaks things. However implementing
7055 futexes locally would make futexes shared between multiple processes
7056 tricky. However they're probably useless because guest atomic
7057 operations won't work either. */
7058 #if defined(TARGET_NR_futex)
7059 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7060 target_ulong uaddr2
, int val3
)
7062 struct timespec ts
, *pts
;
7065 /* ??? We assume FUTEX_* constants are the same on both host
7067 #ifdef FUTEX_CMD_MASK
7068 base_op
= op
& FUTEX_CMD_MASK
;
7074 case FUTEX_WAIT_BITSET
:
7077 target_to_host_timespec(pts
, timeout
);
7081 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7083 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7085 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7087 case FUTEX_CMP_REQUEUE
:
7089 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7090 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7091 But the prototype takes a `struct timespec *'; insert casts
7092 to satisfy the compiler. We do not need to tswap TIMEOUT
7093 since it's not compared to guest memory. */
7094 pts
= (struct timespec
*)(uintptr_t) timeout
;
7095 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7096 (base_op
== FUTEX_CMP_REQUEUE
7100 return -TARGET_ENOSYS
;
7105 #if defined(TARGET_NR_futex_time64)
7106 static int do_futex_time64(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7107 target_ulong uaddr2
, int val3
)
7109 struct timespec ts
, *pts
;
7112 /* ??? We assume FUTEX_* constants are the same on both host
7114 #ifdef FUTEX_CMD_MASK
7115 base_op
= op
& FUTEX_CMD_MASK
;
7121 case FUTEX_WAIT_BITSET
:
7124 target_to_host_timespec64(pts
, timeout
);
7128 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7130 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7132 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7134 case FUTEX_CMP_REQUEUE
:
7136 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7137 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7138 But the prototype takes a `struct timespec *'; insert casts
7139 to satisfy the compiler. We do not need to tswap TIMEOUT
7140 since it's not compared to guest memory. */
7141 pts
= (struct timespec
*)(uintptr_t) timeout
;
7142 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7143 (base_op
== FUTEX_CMP_REQUEUE
7147 return -TARGET_ENOSYS
;
7152 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7153 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7154 abi_long handle
, abi_long mount_id
,
7157 struct file_handle
*target_fh
;
7158 struct file_handle
*fh
;
7162 unsigned int size
, total_size
;
7164 if (get_user_s32(size
, handle
)) {
7165 return -TARGET_EFAULT
;
7168 name
= lock_user_string(pathname
);
7170 return -TARGET_EFAULT
;
7173 total_size
= sizeof(struct file_handle
) + size
;
7174 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7176 unlock_user(name
, pathname
, 0);
7177 return -TARGET_EFAULT
;
7180 fh
= g_malloc0(total_size
);
7181 fh
->handle_bytes
= size
;
7183 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7184 unlock_user(name
, pathname
, 0);
7186 /* man name_to_handle_at(2):
7187 * Other than the use of the handle_bytes field, the caller should treat
7188 * the file_handle structure as an opaque data type
7191 memcpy(target_fh
, fh
, total_size
);
7192 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7193 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7195 unlock_user(target_fh
, handle
, total_size
);
7197 if (put_user_s32(mid
, mount_id
)) {
7198 return -TARGET_EFAULT
;
7206 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7207 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7210 struct file_handle
*target_fh
;
7211 struct file_handle
*fh
;
7212 unsigned int size
, total_size
;
7215 if (get_user_s32(size
, handle
)) {
7216 return -TARGET_EFAULT
;
7219 total_size
= sizeof(struct file_handle
) + size
;
7220 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7222 return -TARGET_EFAULT
;
7225 fh
= g_memdup(target_fh
, total_size
);
7226 fh
->handle_bytes
= size
;
7227 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7229 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7230 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7234 unlock_user(target_fh
, handle
, total_size
);
7240 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7242 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7245 target_sigset_t
*target_mask
;
7249 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7250 return -TARGET_EINVAL
;
7252 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7253 return -TARGET_EFAULT
;
7256 target_to_host_sigset(&host_mask
, target_mask
);
7258 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7260 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7262 fd_trans_register(ret
, &target_signalfd_trans
);
7265 unlock_user_struct(target_mask
, mask
, 0);
7271 /* Map host to target signal numbers for the wait family of syscalls.
7272 Assume all other status bits are the same. */
7273 int host_to_target_waitstatus(int status
)
7275 if (WIFSIGNALED(status
)) {
7276 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7278 if (WIFSTOPPED(status
)) {
7279 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7285 static int open_self_cmdline(void *cpu_env
, int fd
)
7287 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7288 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7291 for (i
= 0; i
< bprm
->argc
; i
++) {
7292 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7294 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7302 static int open_self_maps(void *cpu_env
, int fd
)
7304 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7305 TaskState
*ts
= cpu
->opaque
;
7306 GSList
*map_info
= read_self_maps();
7310 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7311 MapInfo
*e
= (MapInfo
*) s
->data
;
7313 if (h2g_valid(e
->start
)) {
7314 unsigned long min
= e
->start
;
7315 unsigned long max
= e
->end
;
7316 int flags
= page_get_flags(h2g(min
));
7319 max
= h2g_valid(max
- 1) ?
7320 max
: (uintptr_t) g2h(GUEST_ADDR_MAX
) + 1;
7322 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7326 if (h2g(min
) == ts
->info
->stack_limit
) {
7332 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7333 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
7334 h2g(min
), h2g(max
- 1) + 1,
7335 e
->is_read
? 'r' : '-',
7336 e
->is_write
? 'w' : '-',
7337 e
->is_exec
? 'x' : '-',
7338 e
->is_priv
? 'p' : '-',
7339 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
7341 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
7348 free_self_maps(map_info
);
7350 #ifdef TARGET_VSYSCALL_PAGE
7352 * We only support execution from the vsyscall page.
7353 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7355 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7356 " --xp 00000000 00:00 0",
7357 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7358 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
7364 static int open_self_stat(void *cpu_env
, int fd
)
7366 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7367 TaskState
*ts
= cpu
->opaque
;
7368 g_autoptr(GString
) buf
= g_string_new(NULL
);
7371 for (i
= 0; i
< 44; i
++) {
7374 g_string_printf(buf
, FMT_pid
" ", getpid());
7375 } else if (i
== 1) {
7377 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
7378 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
7379 g_string_printf(buf
, "(%.15s) ", bin
);
7380 } else if (i
== 27) {
7382 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
7384 /* for the rest, there is MasterCard */
7385 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
7388 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
7396 static int open_self_auxv(void *cpu_env
, int fd
)
7398 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7399 TaskState
*ts
= cpu
->opaque
;
7400 abi_ulong auxv
= ts
->info
->saved_auxv
;
7401 abi_ulong len
= ts
->info
->auxv_len
;
7405 * Auxiliary vector is stored in target process stack.
7406 * read in whole auxv vector and copy it to file
7408 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7412 r
= write(fd
, ptr
, len
);
7419 lseek(fd
, 0, SEEK_SET
);
7420 unlock_user(ptr
, auxv
, len
);
7426 static int is_proc_myself(const char *filename
, const char *entry
)
7428 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7429 filename
+= strlen("/proc/");
7430 if (!strncmp(filename
, "self/", strlen("self/"))) {
7431 filename
+= strlen("self/");
7432 } else if (*filename
>= '1' && *filename
<= '9') {
7434 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7435 if (!strncmp(filename
, myself
, strlen(myself
))) {
7436 filename
+= strlen(myself
);
7443 if (!strcmp(filename
, entry
)) {
7450 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7451 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7452 static int is_proc(const char *filename
, const char *entry
)
7454 return strcmp(filename
, entry
) == 0;
7458 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7459 static int open_net_route(void *cpu_env
, int fd
)
7466 fp
= fopen("/proc/net/route", "r");
7473 read
= getline(&line
, &len
, fp
);
7474 dprintf(fd
, "%s", line
);
7478 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7480 uint32_t dest
, gw
, mask
;
7481 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7484 fields
= sscanf(line
,
7485 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7486 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7487 &mask
, &mtu
, &window
, &irtt
);
7491 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7492 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7493 metric
, tswap32(mask
), mtu
, window
, irtt
);
7503 #if defined(TARGET_SPARC)
7504 static int open_cpuinfo(void *cpu_env
, int fd
)
7506 dprintf(fd
, "type\t\t: sun4u\n");
7511 #if defined(TARGET_HPPA)
7512 static int open_cpuinfo(void *cpu_env
, int fd
)
7514 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
7515 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
7516 dprintf(fd
, "capabilities\t: os32\n");
7517 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
7518 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7523 #if defined(TARGET_M68K)
7524 static int open_hardware(void *cpu_env
, int fd
)
7526 dprintf(fd
, "Model:\t\tqemu-m68k\n");
7531 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7534 const char *filename
;
7535 int (*fill
)(void *cpu_env
, int fd
);
7536 int (*cmp
)(const char *s1
, const char *s2
);
7538 const struct fake_open
*fake_open
;
7539 static const struct fake_open fakes
[] = {
7540 { "maps", open_self_maps
, is_proc_myself
},
7541 { "stat", open_self_stat
, is_proc_myself
},
7542 { "auxv", open_self_auxv
, is_proc_myself
},
7543 { "cmdline", open_self_cmdline
, is_proc_myself
},
7544 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7545 { "/proc/net/route", open_net_route
, is_proc
},
7547 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7548 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
7550 #if defined(TARGET_M68K)
7551 { "/proc/hardware", open_hardware
, is_proc
},
7553 { NULL
, NULL
, NULL
}
7556 if (is_proc_myself(pathname
, "exe")) {
7557 int execfd
= qemu_getauxval(AT_EXECFD
);
7558 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7561 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7562 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7567 if (fake_open
->filename
) {
7569 char filename
[PATH_MAX
];
7572 /* create temporary file to map stat to */
7573 tmpdir
= getenv("TMPDIR");
7576 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7577 fd
= mkstemp(filename
);
7583 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7589 lseek(fd
, 0, SEEK_SET
);
7594 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7597 #define TIMER_MAGIC 0x0caf0000
7598 #define TIMER_MAGIC_MASK 0xffff0000
7600 /* Convert QEMU provided timer ID back to internal 16bit index format */
7601 static target_timer_t
get_timer_id(abi_long arg
)
7603 target_timer_t timerid
= arg
;
7605 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7606 return -TARGET_EINVAL
;
7611 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7612 return -TARGET_EINVAL
;
7618 static int target_to_host_cpu_mask(unsigned long *host_mask
,
7620 abi_ulong target_addr
,
7623 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7624 unsigned host_bits
= sizeof(*host_mask
) * 8;
7625 abi_ulong
*target_mask
;
7628 assert(host_size
>= target_size
);
7630 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
7632 return -TARGET_EFAULT
;
7634 memset(host_mask
, 0, host_size
);
7636 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7637 unsigned bit
= i
* target_bits
;
7640 __get_user(val
, &target_mask
[i
]);
7641 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7642 if (val
& (1UL << j
)) {
7643 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
7648 unlock_user(target_mask
, target_addr
, 0);
7652 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7654 abi_ulong target_addr
,
7657 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7658 unsigned host_bits
= sizeof(*host_mask
) * 8;
7659 abi_ulong
*target_mask
;
7662 assert(host_size
>= target_size
);
7664 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7666 return -TARGET_EFAULT
;
7669 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7670 unsigned bit
= i
* target_bits
;
7673 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7674 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7678 __put_user(val
, &target_mask
[i
]);
7681 unlock_user(target_mask
, target_addr
, target_size
);
7685 /* This is an internal helper for do_syscall so that it is easier
7686 * to have a single return point, so that actions, such as logging
7687 * of syscall results, can be performed.
7688 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7690 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
7691 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7692 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7695 CPUState
*cpu
= env_cpu(cpu_env
);
7697 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7698 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7699 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7700 || defined(TARGET_NR_statx)
7703 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7704 || defined(TARGET_NR_fstatfs)
7710 case TARGET_NR_exit
:
7711 /* In old applications this may be used to implement _exit(2).
7712 However in threaded applictions it is used for thread termination,
7713 and _exit_group is used for application termination.
7714 Do thread termination if we have more then one thread. */
7716 if (block_signals()) {
7717 return -TARGET_ERESTARTSYS
;
7720 pthread_mutex_lock(&clone_lock
);
7722 if (CPU_NEXT(first_cpu
)) {
7723 TaskState
*ts
= cpu
->opaque
;
7725 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
7726 object_unref(OBJECT(cpu
));
7728 * At this point the CPU should be unrealized and removed
7729 * from cpu lists. We can clean-up the rest of the thread
7730 * data without the lock held.
7733 pthread_mutex_unlock(&clone_lock
);
7735 if (ts
->child_tidptr
) {
7736 put_user_u32(0, ts
->child_tidptr
);
7737 do_sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7742 rcu_unregister_thread();
7746 pthread_mutex_unlock(&clone_lock
);
7747 preexit_cleanup(cpu_env
, arg1
);
7749 return 0; /* avoid warning */
7750 case TARGET_NR_read
:
7751 if (arg2
== 0 && arg3
== 0) {
7752 return get_errno(safe_read(arg1
, 0, 0));
7754 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7755 return -TARGET_EFAULT
;
7756 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7758 fd_trans_host_to_target_data(arg1
)) {
7759 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7761 unlock_user(p
, arg2
, ret
);
7764 case TARGET_NR_write
:
7765 if (arg2
== 0 && arg3
== 0) {
7766 return get_errno(safe_write(arg1
, 0, 0));
7768 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7769 return -TARGET_EFAULT
;
7770 if (fd_trans_target_to_host_data(arg1
)) {
7771 void *copy
= g_malloc(arg3
);
7772 memcpy(copy
, p
, arg3
);
7773 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7775 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7779 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7781 unlock_user(p
, arg2
, 0);
7784 #ifdef TARGET_NR_open
7785 case TARGET_NR_open
:
7786 if (!(p
= lock_user_string(arg1
)))
7787 return -TARGET_EFAULT
;
7788 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7789 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7791 fd_trans_unregister(ret
);
7792 unlock_user(p
, arg1
, 0);
7795 case TARGET_NR_openat
:
7796 if (!(p
= lock_user_string(arg2
)))
7797 return -TARGET_EFAULT
;
7798 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7799 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7801 fd_trans_unregister(ret
);
7802 unlock_user(p
, arg2
, 0);
7804 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7805 case TARGET_NR_name_to_handle_at
:
7806 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7809 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7810 case TARGET_NR_open_by_handle_at
:
7811 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7812 fd_trans_unregister(ret
);
7815 case TARGET_NR_close
:
7816 fd_trans_unregister(arg1
);
7817 return get_errno(close(arg1
));
7820 return do_brk(arg1
);
7821 #ifdef TARGET_NR_fork
7822 case TARGET_NR_fork
:
7823 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7825 #ifdef TARGET_NR_waitpid
7826 case TARGET_NR_waitpid
:
7829 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7830 if (!is_error(ret
) && arg2
&& ret
7831 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7832 return -TARGET_EFAULT
;
7836 #ifdef TARGET_NR_waitid
7837 case TARGET_NR_waitid
:
7841 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7842 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7843 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7844 return -TARGET_EFAULT
;
7845 host_to_target_siginfo(p
, &info
);
7846 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7851 #ifdef TARGET_NR_creat /* not on alpha */
7852 case TARGET_NR_creat
:
7853 if (!(p
= lock_user_string(arg1
)))
7854 return -TARGET_EFAULT
;
7855 ret
= get_errno(creat(p
, arg2
));
7856 fd_trans_unregister(ret
);
7857 unlock_user(p
, arg1
, 0);
7860 #ifdef TARGET_NR_link
7861 case TARGET_NR_link
:
7864 p
= lock_user_string(arg1
);
7865 p2
= lock_user_string(arg2
);
7867 ret
= -TARGET_EFAULT
;
7869 ret
= get_errno(link(p
, p2
));
7870 unlock_user(p2
, arg2
, 0);
7871 unlock_user(p
, arg1
, 0);
7875 #if defined(TARGET_NR_linkat)
7876 case TARGET_NR_linkat
:
7880 return -TARGET_EFAULT
;
7881 p
= lock_user_string(arg2
);
7882 p2
= lock_user_string(arg4
);
7884 ret
= -TARGET_EFAULT
;
7886 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7887 unlock_user(p
, arg2
, 0);
7888 unlock_user(p2
, arg4
, 0);
7892 #ifdef TARGET_NR_unlink
7893 case TARGET_NR_unlink
:
7894 if (!(p
= lock_user_string(arg1
)))
7895 return -TARGET_EFAULT
;
7896 ret
= get_errno(unlink(p
));
7897 unlock_user(p
, arg1
, 0);
7900 #if defined(TARGET_NR_unlinkat)
7901 case TARGET_NR_unlinkat
:
7902 if (!(p
= lock_user_string(arg2
)))
7903 return -TARGET_EFAULT
;
7904 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7905 unlock_user(p
, arg2
, 0);
7908 case TARGET_NR_execve
:
7910 char **argp
, **envp
;
7913 abi_ulong guest_argp
;
7914 abi_ulong guest_envp
;
7921 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7922 if (get_user_ual(addr
, gp
))
7923 return -TARGET_EFAULT
;
7930 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7931 if (get_user_ual(addr
, gp
))
7932 return -TARGET_EFAULT
;
7938 argp
= g_new0(char *, argc
+ 1);
7939 envp
= g_new0(char *, envc
+ 1);
7941 for (gp
= guest_argp
, q
= argp
; gp
;
7942 gp
+= sizeof(abi_ulong
), q
++) {
7943 if (get_user_ual(addr
, gp
))
7947 if (!(*q
= lock_user_string(addr
)))
7949 total_size
+= strlen(*q
) + 1;
7953 for (gp
= guest_envp
, q
= envp
; gp
;
7954 gp
+= sizeof(abi_ulong
), q
++) {
7955 if (get_user_ual(addr
, gp
))
7959 if (!(*q
= lock_user_string(addr
)))
7961 total_size
+= strlen(*q
) + 1;
7965 if (!(p
= lock_user_string(arg1
)))
7967 /* Although execve() is not an interruptible syscall it is
7968 * a special case where we must use the safe_syscall wrapper:
7969 * if we allow a signal to happen before we make the host
7970 * syscall then we will 'lose' it, because at the point of
7971 * execve the process leaves QEMU's control. So we use the
7972 * safe syscall wrapper to ensure that we either take the
7973 * signal as a guest signal, or else it does not happen
7974 * before the execve completes and makes it the other
7975 * program's problem.
7977 ret
= get_errno(safe_execve(p
, argp
, envp
));
7978 unlock_user(p
, arg1
, 0);
7983 ret
= -TARGET_EFAULT
;
7986 for (gp
= guest_argp
, q
= argp
; *q
;
7987 gp
+= sizeof(abi_ulong
), q
++) {
7988 if (get_user_ual(addr
, gp
)
7991 unlock_user(*q
, addr
, 0);
7993 for (gp
= guest_envp
, q
= envp
; *q
;
7994 gp
+= sizeof(abi_ulong
), q
++) {
7995 if (get_user_ual(addr
, gp
)
7998 unlock_user(*q
, addr
, 0);
8005 case TARGET_NR_chdir
:
8006 if (!(p
= lock_user_string(arg1
)))
8007 return -TARGET_EFAULT
;
8008 ret
= get_errno(chdir(p
));
8009 unlock_user(p
, arg1
, 0);
8011 #ifdef TARGET_NR_time
8012 case TARGET_NR_time
:
8015 ret
= get_errno(time(&host_time
));
8018 && put_user_sal(host_time
, arg1
))
8019 return -TARGET_EFAULT
;
8023 #ifdef TARGET_NR_mknod
8024 case TARGET_NR_mknod
:
8025 if (!(p
= lock_user_string(arg1
)))
8026 return -TARGET_EFAULT
;
8027 ret
= get_errno(mknod(p
, arg2
, arg3
));
8028 unlock_user(p
, arg1
, 0);
8031 #if defined(TARGET_NR_mknodat)
8032 case TARGET_NR_mknodat
:
8033 if (!(p
= lock_user_string(arg2
)))
8034 return -TARGET_EFAULT
;
8035 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8036 unlock_user(p
, arg2
, 0);
8039 #ifdef TARGET_NR_chmod
8040 case TARGET_NR_chmod
:
8041 if (!(p
= lock_user_string(arg1
)))
8042 return -TARGET_EFAULT
;
8043 ret
= get_errno(chmod(p
, arg2
));
8044 unlock_user(p
, arg1
, 0);
8047 #ifdef TARGET_NR_lseek
8048 case TARGET_NR_lseek
:
8049 return get_errno(lseek(arg1
, arg2
, arg3
));
8051 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8052 /* Alpha specific */
8053 case TARGET_NR_getxpid
:
8054 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8055 return get_errno(getpid());
8057 #ifdef TARGET_NR_getpid
8058 case TARGET_NR_getpid
:
8059 return get_errno(getpid());
8061 case TARGET_NR_mount
:
8063 /* need to look at the data field */
8067 p
= lock_user_string(arg1
);
8069 return -TARGET_EFAULT
;
8075 p2
= lock_user_string(arg2
);
8078 unlock_user(p
, arg1
, 0);
8080 return -TARGET_EFAULT
;
8084 p3
= lock_user_string(arg3
);
8087 unlock_user(p
, arg1
, 0);
8089 unlock_user(p2
, arg2
, 0);
8090 return -TARGET_EFAULT
;
8096 /* FIXME - arg5 should be locked, but it isn't clear how to
8097 * do that since it's not guaranteed to be a NULL-terminated
8101 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8103 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8105 ret
= get_errno(ret
);
8108 unlock_user(p
, arg1
, 0);
8110 unlock_user(p2
, arg2
, 0);
8112 unlock_user(p3
, arg3
, 0);
8116 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8117 #if defined(TARGET_NR_umount)
8118 case TARGET_NR_umount
:
8120 #if defined(TARGET_NR_oldumount)
8121 case TARGET_NR_oldumount
:
8123 if (!(p
= lock_user_string(arg1
)))
8124 return -TARGET_EFAULT
;
8125 ret
= get_errno(umount(p
));
8126 unlock_user(p
, arg1
, 0);
8129 #ifdef TARGET_NR_stime /* not on alpha */
8130 case TARGET_NR_stime
:
8134 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8135 return -TARGET_EFAULT
;
8137 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8140 #ifdef TARGET_NR_alarm /* not on alpha */
8141 case TARGET_NR_alarm
:
8144 #ifdef TARGET_NR_pause /* not on alpha */
8145 case TARGET_NR_pause
:
8146 if (!block_signals()) {
8147 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8149 return -TARGET_EINTR
;
8151 #ifdef TARGET_NR_utime
8152 case TARGET_NR_utime
:
8154 struct utimbuf tbuf
, *host_tbuf
;
8155 struct target_utimbuf
*target_tbuf
;
8157 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8158 return -TARGET_EFAULT
;
8159 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8160 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8161 unlock_user_struct(target_tbuf
, arg2
, 0);
8166 if (!(p
= lock_user_string(arg1
)))
8167 return -TARGET_EFAULT
;
8168 ret
= get_errno(utime(p
, host_tbuf
));
8169 unlock_user(p
, arg1
, 0);
8173 #ifdef TARGET_NR_utimes
8174 case TARGET_NR_utimes
:
8176 struct timeval
*tvp
, tv
[2];
8178 if (copy_from_user_timeval(&tv
[0], arg2
)
8179 || copy_from_user_timeval(&tv
[1],
8180 arg2
+ sizeof(struct target_timeval
)))
8181 return -TARGET_EFAULT
;
8186 if (!(p
= lock_user_string(arg1
)))
8187 return -TARGET_EFAULT
;
8188 ret
= get_errno(utimes(p
, tvp
));
8189 unlock_user(p
, arg1
, 0);
8193 #if defined(TARGET_NR_futimesat)
8194 case TARGET_NR_futimesat
:
8196 struct timeval
*tvp
, tv
[2];
8198 if (copy_from_user_timeval(&tv
[0], arg3
)
8199 || copy_from_user_timeval(&tv
[1],
8200 arg3
+ sizeof(struct target_timeval
)))
8201 return -TARGET_EFAULT
;
8206 if (!(p
= lock_user_string(arg2
))) {
8207 return -TARGET_EFAULT
;
8209 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8210 unlock_user(p
, arg2
, 0);
8214 #ifdef TARGET_NR_access
8215 case TARGET_NR_access
:
8216 if (!(p
= lock_user_string(arg1
))) {
8217 return -TARGET_EFAULT
;
8219 ret
= get_errno(access(path(p
), arg2
));
8220 unlock_user(p
, arg1
, 0);
8223 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8224 case TARGET_NR_faccessat
:
8225 if (!(p
= lock_user_string(arg2
))) {
8226 return -TARGET_EFAULT
;
8228 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8229 unlock_user(p
, arg2
, 0);
8232 #ifdef TARGET_NR_nice /* not on alpha */
8233 case TARGET_NR_nice
:
8234 return get_errno(nice(arg1
));
8236 case TARGET_NR_sync
:
8239 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8240 case TARGET_NR_syncfs
:
8241 return get_errno(syncfs(arg1
));
8243 case TARGET_NR_kill
:
8244 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8245 #ifdef TARGET_NR_rename
8246 case TARGET_NR_rename
:
8249 p
= lock_user_string(arg1
);
8250 p2
= lock_user_string(arg2
);
8252 ret
= -TARGET_EFAULT
;
8254 ret
= get_errno(rename(p
, p2
));
8255 unlock_user(p2
, arg2
, 0);
8256 unlock_user(p
, arg1
, 0);
8260 #if defined(TARGET_NR_renameat)
8261 case TARGET_NR_renameat
:
8264 p
= lock_user_string(arg2
);
8265 p2
= lock_user_string(arg4
);
8267 ret
= -TARGET_EFAULT
;
8269 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8270 unlock_user(p2
, arg4
, 0);
8271 unlock_user(p
, arg2
, 0);
8275 #if defined(TARGET_NR_renameat2)
8276 case TARGET_NR_renameat2
:
8279 p
= lock_user_string(arg2
);
8280 p2
= lock_user_string(arg4
);
8282 ret
= -TARGET_EFAULT
;
8284 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8286 unlock_user(p2
, arg4
, 0);
8287 unlock_user(p
, arg2
, 0);
8291 #ifdef TARGET_NR_mkdir
8292 case TARGET_NR_mkdir
:
8293 if (!(p
= lock_user_string(arg1
)))
8294 return -TARGET_EFAULT
;
8295 ret
= get_errno(mkdir(p
, arg2
));
8296 unlock_user(p
, arg1
, 0);
8299 #if defined(TARGET_NR_mkdirat)
8300 case TARGET_NR_mkdirat
:
8301 if (!(p
= lock_user_string(arg2
)))
8302 return -TARGET_EFAULT
;
8303 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8304 unlock_user(p
, arg2
, 0);
8307 #ifdef TARGET_NR_rmdir
8308 case TARGET_NR_rmdir
:
8309 if (!(p
= lock_user_string(arg1
)))
8310 return -TARGET_EFAULT
;
8311 ret
= get_errno(rmdir(p
));
8312 unlock_user(p
, arg1
, 0);
8316 ret
= get_errno(dup(arg1
));
8318 fd_trans_dup(arg1
, ret
);
8321 #ifdef TARGET_NR_pipe
8322 case TARGET_NR_pipe
:
8323 return do_pipe(cpu_env
, arg1
, 0, 0);
8325 #ifdef TARGET_NR_pipe2
8326 case TARGET_NR_pipe2
:
8327 return do_pipe(cpu_env
, arg1
,
8328 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8330 case TARGET_NR_times
:
8332 struct target_tms
*tmsp
;
8334 ret
= get_errno(times(&tms
));
8336 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8338 return -TARGET_EFAULT
;
8339 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8340 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8341 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8342 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8345 ret
= host_to_target_clock_t(ret
);
8348 case TARGET_NR_acct
:
8350 ret
= get_errno(acct(NULL
));
8352 if (!(p
= lock_user_string(arg1
))) {
8353 return -TARGET_EFAULT
;
8355 ret
= get_errno(acct(path(p
)));
8356 unlock_user(p
, arg1
, 0);
8359 #ifdef TARGET_NR_umount2
8360 case TARGET_NR_umount2
:
8361 if (!(p
= lock_user_string(arg1
)))
8362 return -TARGET_EFAULT
;
8363 ret
= get_errno(umount2(p
, arg2
));
8364 unlock_user(p
, arg1
, 0);
8367 case TARGET_NR_ioctl
:
8368 return do_ioctl(arg1
, arg2
, arg3
);
8369 #ifdef TARGET_NR_fcntl
8370 case TARGET_NR_fcntl
:
8371 return do_fcntl(arg1
, arg2
, arg3
);
8373 case TARGET_NR_setpgid
:
8374 return get_errno(setpgid(arg1
, arg2
));
8375 case TARGET_NR_umask
:
8376 return get_errno(umask(arg1
));
8377 case TARGET_NR_chroot
:
8378 if (!(p
= lock_user_string(arg1
)))
8379 return -TARGET_EFAULT
;
8380 ret
= get_errno(chroot(p
));
8381 unlock_user(p
, arg1
, 0);
8383 #ifdef TARGET_NR_dup2
8384 case TARGET_NR_dup2
:
8385 ret
= get_errno(dup2(arg1
, arg2
));
8387 fd_trans_dup(arg1
, arg2
);
8391 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8392 case TARGET_NR_dup3
:
8396 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8399 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8400 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8402 fd_trans_dup(arg1
, arg2
);
8407 #ifdef TARGET_NR_getppid /* not on alpha */
8408 case TARGET_NR_getppid
:
8409 return get_errno(getppid());
8411 #ifdef TARGET_NR_getpgrp
8412 case TARGET_NR_getpgrp
:
8413 return get_errno(getpgrp());
8415 case TARGET_NR_setsid
:
8416 return get_errno(setsid());
8417 #ifdef TARGET_NR_sigaction
8418 case TARGET_NR_sigaction
:
8420 #if defined(TARGET_ALPHA)
8421 struct target_sigaction act
, oact
, *pact
= 0;
8422 struct target_old_sigaction
*old_act
;
8424 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8425 return -TARGET_EFAULT
;
8426 act
._sa_handler
= old_act
->_sa_handler
;
8427 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8428 act
.sa_flags
= old_act
->sa_flags
;
8429 act
.sa_restorer
= 0;
8430 unlock_user_struct(old_act
, arg2
, 0);
8433 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8434 if (!is_error(ret
) && arg3
) {
8435 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8436 return -TARGET_EFAULT
;
8437 old_act
->_sa_handler
= oact
._sa_handler
;
8438 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8439 old_act
->sa_flags
= oact
.sa_flags
;
8440 unlock_user_struct(old_act
, arg3
, 1);
8442 #elif defined(TARGET_MIPS)
8443 struct target_sigaction act
, oact
, *pact
, *old_act
;
8446 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8447 return -TARGET_EFAULT
;
8448 act
._sa_handler
= old_act
->_sa_handler
;
8449 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8450 act
.sa_flags
= old_act
->sa_flags
;
8451 unlock_user_struct(old_act
, arg2
, 0);
8457 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8459 if (!is_error(ret
) && arg3
) {
8460 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8461 return -TARGET_EFAULT
;
8462 old_act
->_sa_handler
= oact
._sa_handler
;
8463 old_act
->sa_flags
= oact
.sa_flags
;
8464 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8465 old_act
->sa_mask
.sig
[1] = 0;
8466 old_act
->sa_mask
.sig
[2] = 0;
8467 old_act
->sa_mask
.sig
[3] = 0;
8468 unlock_user_struct(old_act
, arg3
, 1);
8471 struct target_old_sigaction
*old_act
;
8472 struct target_sigaction act
, oact
, *pact
;
8474 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8475 return -TARGET_EFAULT
;
8476 act
._sa_handler
= old_act
->_sa_handler
;
8477 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8478 act
.sa_flags
= old_act
->sa_flags
;
8479 act
.sa_restorer
= old_act
->sa_restorer
;
8480 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8481 act
.ka_restorer
= 0;
8483 unlock_user_struct(old_act
, arg2
, 0);
8488 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8489 if (!is_error(ret
) && arg3
) {
8490 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8491 return -TARGET_EFAULT
;
8492 old_act
->_sa_handler
= oact
._sa_handler
;
8493 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8494 old_act
->sa_flags
= oact
.sa_flags
;
8495 old_act
->sa_restorer
= oact
.sa_restorer
;
8496 unlock_user_struct(old_act
, arg3
, 1);
8502 case TARGET_NR_rt_sigaction
:
8504 #if defined(TARGET_ALPHA)
8505 /* For Alpha and SPARC this is a 5 argument syscall, with
8506 * a 'restorer' parameter which must be copied into the
8507 * sa_restorer field of the sigaction struct.
8508 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8509 * and arg5 is the sigsetsize.
8510 * Alpha also has a separate rt_sigaction struct that it uses
8511 * here; SPARC uses the usual sigaction struct.
8513 struct target_rt_sigaction
*rt_act
;
8514 struct target_sigaction act
, oact
, *pact
= 0;
8516 if (arg4
!= sizeof(target_sigset_t
)) {
8517 return -TARGET_EINVAL
;
8520 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8521 return -TARGET_EFAULT
;
8522 act
._sa_handler
= rt_act
->_sa_handler
;
8523 act
.sa_mask
= rt_act
->sa_mask
;
8524 act
.sa_flags
= rt_act
->sa_flags
;
8525 act
.sa_restorer
= arg5
;
8526 unlock_user_struct(rt_act
, arg2
, 0);
8529 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8530 if (!is_error(ret
) && arg3
) {
8531 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8532 return -TARGET_EFAULT
;
8533 rt_act
->_sa_handler
= oact
._sa_handler
;
8534 rt_act
->sa_mask
= oact
.sa_mask
;
8535 rt_act
->sa_flags
= oact
.sa_flags
;
8536 unlock_user_struct(rt_act
, arg3
, 1);
8540 target_ulong restorer
= arg4
;
8541 target_ulong sigsetsize
= arg5
;
8543 target_ulong sigsetsize
= arg4
;
8545 struct target_sigaction
*act
;
8546 struct target_sigaction
*oact
;
8548 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8549 return -TARGET_EINVAL
;
8552 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8553 return -TARGET_EFAULT
;
8555 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8556 act
->ka_restorer
= restorer
;
8562 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8563 ret
= -TARGET_EFAULT
;
8564 goto rt_sigaction_fail
;
8568 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8571 unlock_user_struct(act
, arg2
, 0);
8573 unlock_user_struct(oact
, arg3
, 1);
8577 #ifdef TARGET_NR_sgetmask /* not on alpha */
8578 case TARGET_NR_sgetmask
:
8581 abi_ulong target_set
;
8582 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8584 host_to_target_old_sigset(&target_set
, &cur_set
);
8590 #ifdef TARGET_NR_ssetmask /* not on alpha */
8591 case TARGET_NR_ssetmask
:
8594 abi_ulong target_set
= arg1
;
8595 target_to_host_old_sigset(&set
, &target_set
);
8596 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8598 host_to_target_old_sigset(&target_set
, &oset
);
8604 #ifdef TARGET_NR_sigprocmask
8605 case TARGET_NR_sigprocmask
:
8607 #if defined(TARGET_ALPHA)
8608 sigset_t set
, oldset
;
8613 case TARGET_SIG_BLOCK
:
8616 case TARGET_SIG_UNBLOCK
:
8619 case TARGET_SIG_SETMASK
:
8623 return -TARGET_EINVAL
;
8626 target_to_host_old_sigset(&set
, &mask
);
8628 ret
= do_sigprocmask(how
, &set
, &oldset
);
8629 if (!is_error(ret
)) {
8630 host_to_target_old_sigset(&mask
, &oldset
);
8632 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8635 sigset_t set
, oldset
, *set_ptr
;
8640 case TARGET_SIG_BLOCK
:
8643 case TARGET_SIG_UNBLOCK
:
8646 case TARGET_SIG_SETMASK
:
8650 return -TARGET_EINVAL
;
8652 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8653 return -TARGET_EFAULT
;
8654 target_to_host_old_sigset(&set
, p
);
8655 unlock_user(p
, arg2
, 0);
8661 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8662 if (!is_error(ret
) && arg3
) {
8663 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8664 return -TARGET_EFAULT
;
8665 host_to_target_old_sigset(p
, &oldset
);
8666 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8672 case TARGET_NR_rt_sigprocmask
:
8675 sigset_t set
, oldset
, *set_ptr
;
8677 if (arg4
!= sizeof(target_sigset_t
)) {
8678 return -TARGET_EINVAL
;
8683 case TARGET_SIG_BLOCK
:
8686 case TARGET_SIG_UNBLOCK
:
8689 case TARGET_SIG_SETMASK
:
8693 return -TARGET_EINVAL
;
8695 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8696 return -TARGET_EFAULT
;
8697 target_to_host_sigset(&set
, p
);
8698 unlock_user(p
, arg2
, 0);
8704 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8705 if (!is_error(ret
) && arg3
) {
8706 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8707 return -TARGET_EFAULT
;
8708 host_to_target_sigset(p
, &oldset
);
8709 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8713 #ifdef TARGET_NR_sigpending
8714 case TARGET_NR_sigpending
:
8717 ret
= get_errno(sigpending(&set
));
8718 if (!is_error(ret
)) {
8719 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8720 return -TARGET_EFAULT
;
8721 host_to_target_old_sigset(p
, &set
);
8722 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8727 case TARGET_NR_rt_sigpending
:
8731 /* Yes, this check is >, not != like most. We follow the kernel's
8732 * logic and it does it like this because it implements
8733 * NR_sigpending through the same code path, and in that case
8734 * the old_sigset_t is smaller in size.
8736 if (arg2
> sizeof(target_sigset_t
)) {
8737 return -TARGET_EINVAL
;
8740 ret
= get_errno(sigpending(&set
));
8741 if (!is_error(ret
)) {
8742 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8743 return -TARGET_EFAULT
;
8744 host_to_target_sigset(p
, &set
);
8745 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8749 #ifdef TARGET_NR_sigsuspend
8750 case TARGET_NR_sigsuspend
:
8752 TaskState
*ts
= cpu
->opaque
;
8753 #if defined(TARGET_ALPHA)
8754 abi_ulong mask
= arg1
;
8755 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8757 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8758 return -TARGET_EFAULT
;
8759 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8760 unlock_user(p
, arg1
, 0);
8762 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8764 if (ret
!= -TARGET_ERESTARTSYS
) {
8765 ts
->in_sigsuspend
= 1;
8770 case TARGET_NR_rt_sigsuspend
:
8772 TaskState
*ts
= cpu
->opaque
;
8774 if (arg2
!= sizeof(target_sigset_t
)) {
8775 return -TARGET_EINVAL
;
8777 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8778 return -TARGET_EFAULT
;
8779 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8780 unlock_user(p
, arg1
, 0);
8781 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8783 if (ret
!= -TARGET_ERESTARTSYS
) {
8784 ts
->in_sigsuspend
= 1;
8788 #ifdef TARGET_NR_rt_sigtimedwait
8789 case TARGET_NR_rt_sigtimedwait
:
8792 struct timespec uts
, *puts
;
8795 if (arg4
!= sizeof(target_sigset_t
)) {
8796 return -TARGET_EINVAL
;
8799 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8800 return -TARGET_EFAULT
;
8801 target_to_host_sigset(&set
, p
);
8802 unlock_user(p
, arg1
, 0);
8805 target_to_host_timespec(puts
, arg3
);
8809 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8811 if (!is_error(ret
)) {
8813 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8816 return -TARGET_EFAULT
;
8818 host_to_target_siginfo(p
, &uinfo
);
8819 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8821 ret
= host_to_target_signal(ret
);
8826 case TARGET_NR_rt_sigqueueinfo
:
8830 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8832 return -TARGET_EFAULT
;
8834 target_to_host_siginfo(&uinfo
, p
);
8835 unlock_user(p
, arg3
, 0);
8836 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8839 case TARGET_NR_rt_tgsigqueueinfo
:
8843 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8845 return -TARGET_EFAULT
;
8847 target_to_host_siginfo(&uinfo
, p
);
8848 unlock_user(p
, arg4
, 0);
8849 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8852 #ifdef TARGET_NR_sigreturn
8853 case TARGET_NR_sigreturn
:
8854 if (block_signals()) {
8855 return -TARGET_ERESTARTSYS
;
8857 return do_sigreturn(cpu_env
);
8859 case TARGET_NR_rt_sigreturn
:
8860 if (block_signals()) {
8861 return -TARGET_ERESTARTSYS
;
8863 return do_rt_sigreturn(cpu_env
);
8864 case TARGET_NR_sethostname
:
8865 if (!(p
= lock_user_string(arg1
)))
8866 return -TARGET_EFAULT
;
8867 ret
= get_errno(sethostname(p
, arg2
));
8868 unlock_user(p
, arg1
, 0);
8870 #ifdef TARGET_NR_setrlimit
8871 case TARGET_NR_setrlimit
:
8873 int resource
= target_to_host_resource(arg1
);
8874 struct target_rlimit
*target_rlim
;
8876 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8877 return -TARGET_EFAULT
;
8878 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8879 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8880 unlock_user_struct(target_rlim
, arg2
, 0);
8882 * If we just passed through resource limit settings for memory then
8883 * they would also apply to QEMU's own allocations, and QEMU will
8884 * crash or hang or die if its allocations fail. Ideally we would
8885 * track the guest allocations in QEMU and apply the limits ourselves.
8886 * For now, just tell the guest the call succeeded but don't actually
8889 if (resource
!= RLIMIT_AS
&&
8890 resource
!= RLIMIT_DATA
&&
8891 resource
!= RLIMIT_STACK
) {
8892 return get_errno(setrlimit(resource
, &rlim
));
8898 #ifdef TARGET_NR_getrlimit
8899 case TARGET_NR_getrlimit
:
8901 int resource
= target_to_host_resource(arg1
);
8902 struct target_rlimit
*target_rlim
;
8905 ret
= get_errno(getrlimit(resource
, &rlim
));
8906 if (!is_error(ret
)) {
8907 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8908 return -TARGET_EFAULT
;
8909 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8910 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8911 unlock_user_struct(target_rlim
, arg2
, 1);
8916 case TARGET_NR_getrusage
:
8918 struct rusage rusage
;
8919 ret
= get_errno(getrusage(arg1
, &rusage
));
8920 if (!is_error(ret
)) {
8921 ret
= host_to_target_rusage(arg2
, &rusage
);
8925 #if defined(TARGET_NR_gettimeofday)
8926 case TARGET_NR_gettimeofday
:
8931 ret
= get_errno(gettimeofday(&tv
, &tz
));
8932 if (!is_error(ret
)) {
8933 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
8934 return -TARGET_EFAULT
;
8936 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
8937 return -TARGET_EFAULT
;
8943 #if defined(TARGET_NR_settimeofday)
8944 case TARGET_NR_settimeofday
:
8946 struct timeval tv
, *ptv
= NULL
;
8947 struct timezone tz
, *ptz
= NULL
;
8950 if (copy_from_user_timeval(&tv
, arg1
)) {
8951 return -TARGET_EFAULT
;
8957 if (copy_from_user_timezone(&tz
, arg2
)) {
8958 return -TARGET_EFAULT
;
8963 return get_errno(settimeofday(ptv
, ptz
));
8966 #if defined(TARGET_NR_select)
8967 case TARGET_NR_select
:
8968 #if defined(TARGET_WANT_NI_OLD_SELECT)
8969 /* some architectures used to have old_select here
8970 * but now ENOSYS it.
8972 ret
= -TARGET_ENOSYS
;
8973 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8974 ret
= do_old_select(arg1
);
8976 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8980 #ifdef TARGET_NR_pselect6
8981 case TARGET_NR_pselect6
:
8983 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8984 fd_set rfds
, wfds
, efds
;
8985 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8986 struct timespec ts
, *ts_ptr
;
8989 * The 6th arg is actually two args smashed together,
8990 * so we cannot use the C library.
8998 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8999 target_sigset_t
*target_sigset
;
9007 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
9011 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
9015 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
9021 * This takes a timespec, and not a timeval, so we cannot
9022 * use the do_select() helper ...
9025 if (target_to_host_timespec(&ts
, ts_addr
)) {
9026 return -TARGET_EFAULT
;
9033 /* Extract the two packed args for the sigset */
9036 sig
.size
= SIGSET_T_SIZE
;
9038 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
9040 return -TARGET_EFAULT
;
9042 arg_sigset
= tswapal(arg7
[0]);
9043 arg_sigsize
= tswapal(arg7
[1]);
9044 unlock_user(arg7
, arg6
, 0);
9048 if (arg_sigsize
!= sizeof(*target_sigset
)) {
9049 /* Like the kernel, we enforce correct size sigsets */
9050 return -TARGET_EINVAL
;
9052 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
9053 sizeof(*target_sigset
), 1);
9054 if (!target_sigset
) {
9055 return -TARGET_EFAULT
;
9057 target_to_host_sigset(&set
, target_sigset
);
9058 unlock_user(target_sigset
, arg_sigset
, 0);
9066 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
9069 if (!is_error(ret
)) {
9070 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
9071 return -TARGET_EFAULT
;
9072 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
9073 return -TARGET_EFAULT
;
9074 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
9075 return -TARGET_EFAULT
;
9077 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
9078 return -TARGET_EFAULT
;
9083 #ifdef TARGET_NR_symlink
9084 case TARGET_NR_symlink
:
9087 p
= lock_user_string(arg1
);
9088 p2
= lock_user_string(arg2
);
9090 ret
= -TARGET_EFAULT
;
9092 ret
= get_errno(symlink(p
, p2
));
9093 unlock_user(p2
, arg2
, 0);
9094 unlock_user(p
, arg1
, 0);
9098 #if defined(TARGET_NR_symlinkat)
9099 case TARGET_NR_symlinkat
:
9102 p
= lock_user_string(arg1
);
9103 p2
= lock_user_string(arg3
);
9105 ret
= -TARGET_EFAULT
;
9107 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9108 unlock_user(p2
, arg3
, 0);
9109 unlock_user(p
, arg1
, 0);
9113 #ifdef TARGET_NR_readlink
9114 case TARGET_NR_readlink
:
9117 p
= lock_user_string(arg1
);
9118 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9120 ret
= -TARGET_EFAULT
;
9122 /* Short circuit this for the magic exe check. */
9123 ret
= -TARGET_EINVAL
;
9124 } else if (is_proc_myself((const char *)p
, "exe")) {
9125 char real
[PATH_MAX
], *temp
;
9126 temp
= realpath(exec_path
, real
);
9127 /* Return value is # of bytes that we wrote to the buffer. */
9129 ret
= get_errno(-1);
9131 /* Don't worry about sign mismatch as earlier mapping
9132 * logic would have thrown a bad address error. */
9133 ret
= MIN(strlen(real
), arg3
);
9134 /* We cannot NUL terminate the string. */
9135 memcpy(p2
, real
, ret
);
9138 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9140 unlock_user(p2
, arg2
, ret
);
9141 unlock_user(p
, arg1
, 0);
9145 #if defined(TARGET_NR_readlinkat)
9146 case TARGET_NR_readlinkat
:
9149 p
= lock_user_string(arg2
);
9150 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9152 ret
= -TARGET_EFAULT
;
9153 } else if (is_proc_myself((const char *)p
, "exe")) {
9154 char real
[PATH_MAX
], *temp
;
9155 temp
= realpath(exec_path
, real
);
9156 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9157 snprintf((char *)p2
, arg4
, "%s", real
);
9159 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9161 unlock_user(p2
, arg3
, ret
);
9162 unlock_user(p
, arg2
, 0);
9166 #ifdef TARGET_NR_swapon
9167 case TARGET_NR_swapon
:
9168 if (!(p
= lock_user_string(arg1
)))
9169 return -TARGET_EFAULT
;
9170 ret
= get_errno(swapon(p
, arg2
));
9171 unlock_user(p
, arg1
, 0);
9174 case TARGET_NR_reboot
:
9175 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9176 /* arg4 must be ignored in all other cases */
9177 p
= lock_user_string(arg4
);
9179 return -TARGET_EFAULT
;
9181 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9182 unlock_user(p
, arg4
, 0);
9184 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9187 #ifdef TARGET_NR_mmap
9188 case TARGET_NR_mmap
:
9189 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9190 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9191 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9192 || defined(TARGET_S390X)
9195 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9196 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9197 return -TARGET_EFAULT
;
9204 unlock_user(v
, arg1
, 0);
9205 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9206 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9210 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9211 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9217 #ifdef TARGET_NR_mmap2
9218 case TARGET_NR_mmap2
:
9220 #define MMAP_SHIFT 12
9222 ret
= target_mmap(arg1
, arg2
, arg3
,
9223 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9224 arg5
, arg6
<< MMAP_SHIFT
);
9225 return get_errno(ret
);
9227 case TARGET_NR_munmap
:
9228 return get_errno(target_munmap(arg1
, arg2
));
9229 case TARGET_NR_mprotect
:
9231 TaskState
*ts
= cpu
->opaque
;
9232 /* Special hack to detect libc making the stack executable. */
9233 if ((arg3
& PROT_GROWSDOWN
)
9234 && arg1
>= ts
->info
->stack_limit
9235 && arg1
<= ts
->info
->start_stack
) {
9236 arg3
&= ~PROT_GROWSDOWN
;
9237 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9238 arg1
= ts
->info
->stack_limit
;
9241 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9242 #ifdef TARGET_NR_mremap
9243 case TARGET_NR_mremap
:
9244 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9246 /* ??? msync/mlock/munlock are broken for softmmu. */
9247 #ifdef TARGET_NR_msync
9248 case TARGET_NR_msync
:
9249 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
9251 #ifdef TARGET_NR_mlock
9252 case TARGET_NR_mlock
:
9253 return get_errno(mlock(g2h(arg1
), arg2
));
9255 #ifdef TARGET_NR_munlock
9256 case TARGET_NR_munlock
:
9257 return get_errno(munlock(g2h(arg1
), arg2
));
9259 #ifdef TARGET_NR_mlockall
9260 case TARGET_NR_mlockall
:
9261 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9263 #ifdef TARGET_NR_munlockall
9264 case TARGET_NR_munlockall
:
9265 return get_errno(munlockall());
9267 #ifdef TARGET_NR_truncate
9268 case TARGET_NR_truncate
:
9269 if (!(p
= lock_user_string(arg1
)))
9270 return -TARGET_EFAULT
;
9271 ret
= get_errno(truncate(p
, arg2
));
9272 unlock_user(p
, arg1
, 0);
9275 #ifdef TARGET_NR_ftruncate
9276 case TARGET_NR_ftruncate
:
9277 return get_errno(ftruncate(arg1
, arg2
));
9279 case TARGET_NR_fchmod
:
9280 return get_errno(fchmod(arg1
, arg2
));
9281 #if defined(TARGET_NR_fchmodat)
9282 case TARGET_NR_fchmodat
:
9283 if (!(p
= lock_user_string(arg2
)))
9284 return -TARGET_EFAULT
;
9285 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9286 unlock_user(p
, arg2
, 0);
9289 case TARGET_NR_getpriority
:
9290 /* Note that negative values are valid for getpriority, so we must
9291 differentiate based on errno settings. */
9293 ret
= getpriority(arg1
, arg2
);
9294 if (ret
== -1 && errno
!= 0) {
9295 return -host_to_target_errno(errno
);
9298 /* Return value is the unbiased priority. Signal no error. */
9299 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9301 /* Return value is a biased priority to avoid negative numbers. */
9305 case TARGET_NR_setpriority
:
9306 return get_errno(setpriority(arg1
, arg2
, arg3
));
9307 #ifdef TARGET_NR_statfs
9308 case TARGET_NR_statfs
:
9309 if (!(p
= lock_user_string(arg1
))) {
9310 return -TARGET_EFAULT
;
9312 ret
= get_errno(statfs(path(p
), &stfs
));
9313 unlock_user(p
, arg1
, 0);
9315 if (!is_error(ret
)) {
9316 struct target_statfs
*target_stfs
;
9318 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9319 return -TARGET_EFAULT
;
9320 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9321 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9322 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9323 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9324 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9325 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9326 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9327 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9328 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9329 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9330 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9331 #ifdef _STATFS_F_FLAGS
9332 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9334 __put_user(0, &target_stfs
->f_flags
);
9336 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9337 unlock_user_struct(target_stfs
, arg2
, 1);
9341 #ifdef TARGET_NR_fstatfs
9342 case TARGET_NR_fstatfs
:
9343 ret
= get_errno(fstatfs(arg1
, &stfs
));
9344 goto convert_statfs
;
9346 #ifdef TARGET_NR_statfs64
9347 case TARGET_NR_statfs64
:
9348 if (!(p
= lock_user_string(arg1
))) {
9349 return -TARGET_EFAULT
;
9351 ret
= get_errno(statfs(path(p
), &stfs
));
9352 unlock_user(p
, arg1
, 0);
9354 if (!is_error(ret
)) {
9355 struct target_statfs64
*target_stfs
;
9357 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9358 return -TARGET_EFAULT
;
9359 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9360 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9361 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9362 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9363 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9364 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9365 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9366 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9367 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9368 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9369 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9370 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9371 unlock_user_struct(target_stfs
, arg3
, 1);
9374 case TARGET_NR_fstatfs64
:
9375 ret
= get_errno(fstatfs(arg1
, &stfs
));
9376 goto convert_statfs64
;
9378 #ifdef TARGET_NR_socketcall
9379 case TARGET_NR_socketcall
:
9380 return do_socketcall(arg1
, arg2
);
9382 #ifdef TARGET_NR_accept
9383 case TARGET_NR_accept
:
9384 return do_accept4(arg1
, arg2
, arg3
, 0);
9386 #ifdef TARGET_NR_accept4
9387 case TARGET_NR_accept4
:
9388 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9390 #ifdef TARGET_NR_bind
9391 case TARGET_NR_bind
:
9392 return do_bind(arg1
, arg2
, arg3
);
9394 #ifdef TARGET_NR_connect
9395 case TARGET_NR_connect
:
9396 return do_connect(arg1
, arg2
, arg3
);
9398 #ifdef TARGET_NR_getpeername
9399 case TARGET_NR_getpeername
:
9400 return do_getpeername(arg1
, arg2
, arg3
);
9402 #ifdef TARGET_NR_getsockname
9403 case TARGET_NR_getsockname
:
9404 return do_getsockname(arg1
, arg2
, arg3
);
9406 #ifdef TARGET_NR_getsockopt
9407 case TARGET_NR_getsockopt
:
9408 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9410 #ifdef TARGET_NR_listen
9411 case TARGET_NR_listen
:
9412 return get_errno(listen(arg1
, arg2
));
9414 #ifdef TARGET_NR_recv
9415 case TARGET_NR_recv
:
9416 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9418 #ifdef TARGET_NR_recvfrom
9419 case TARGET_NR_recvfrom
:
9420 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9422 #ifdef TARGET_NR_recvmsg
9423 case TARGET_NR_recvmsg
:
9424 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9426 #ifdef TARGET_NR_send
9427 case TARGET_NR_send
:
9428 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9430 #ifdef TARGET_NR_sendmsg
9431 case TARGET_NR_sendmsg
:
9432 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9434 #ifdef TARGET_NR_sendmmsg
9435 case TARGET_NR_sendmmsg
:
9436 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9438 #ifdef TARGET_NR_recvmmsg
9439 case TARGET_NR_recvmmsg
:
9440 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9442 #ifdef TARGET_NR_sendto
9443 case TARGET_NR_sendto
:
9444 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9446 #ifdef TARGET_NR_shutdown
9447 case TARGET_NR_shutdown
:
9448 return get_errno(shutdown(arg1
, arg2
));
9450 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9451 case TARGET_NR_getrandom
:
9452 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9454 return -TARGET_EFAULT
;
9456 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9457 unlock_user(p
, arg1
, ret
);
9460 #ifdef TARGET_NR_socket
9461 case TARGET_NR_socket
:
9462 return do_socket(arg1
, arg2
, arg3
);
9464 #ifdef TARGET_NR_socketpair
9465 case TARGET_NR_socketpair
:
9466 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9468 #ifdef TARGET_NR_setsockopt
9469 case TARGET_NR_setsockopt
:
9470 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9472 #if defined(TARGET_NR_syslog)
9473 case TARGET_NR_syslog
:
9478 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9479 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9480 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9481 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9482 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9483 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9484 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9485 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9486 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9487 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9488 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9489 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9492 return -TARGET_EINVAL
;
9497 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9499 return -TARGET_EFAULT
;
9501 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9502 unlock_user(p
, arg2
, arg3
);
9506 return -TARGET_EINVAL
;
9511 case TARGET_NR_setitimer
:
9513 struct itimerval value
, ovalue
, *pvalue
;
9517 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9518 || copy_from_user_timeval(&pvalue
->it_value
,
9519 arg2
+ sizeof(struct target_timeval
)))
9520 return -TARGET_EFAULT
;
9524 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9525 if (!is_error(ret
) && arg3
) {
9526 if (copy_to_user_timeval(arg3
,
9527 &ovalue
.it_interval
)
9528 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9530 return -TARGET_EFAULT
;
9534 case TARGET_NR_getitimer
:
9536 struct itimerval value
;
9538 ret
= get_errno(getitimer(arg1
, &value
));
9539 if (!is_error(ret
) && arg2
) {
9540 if (copy_to_user_timeval(arg2
,
9542 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9544 return -TARGET_EFAULT
;
9548 #ifdef TARGET_NR_stat
9549 case TARGET_NR_stat
:
9550 if (!(p
= lock_user_string(arg1
))) {
9551 return -TARGET_EFAULT
;
9553 ret
= get_errno(stat(path(p
), &st
));
9554 unlock_user(p
, arg1
, 0);
9557 #ifdef TARGET_NR_lstat
9558 case TARGET_NR_lstat
:
9559 if (!(p
= lock_user_string(arg1
))) {
9560 return -TARGET_EFAULT
;
9562 ret
= get_errno(lstat(path(p
), &st
));
9563 unlock_user(p
, arg1
, 0);
9566 #ifdef TARGET_NR_fstat
9567 case TARGET_NR_fstat
:
9569 ret
= get_errno(fstat(arg1
, &st
));
9570 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9573 if (!is_error(ret
)) {
9574 struct target_stat
*target_st
;
9576 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9577 return -TARGET_EFAULT
;
9578 memset(target_st
, 0, sizeof(*target_st
));
9579 __put_user(st
.st_dev
, &target_st
->st_dev
);
9580 __put_user(st
.st_ino
, &target_st
->st_ino
);
9581 __put_user(st
.st_mode
, &target_st
->st_mode
);
9582 __put_user(st
.st_uid
, &target_st
->st_uid
);
9583 __put_user(st
.st_gid
, &target_st
->st_gid
);
9584 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9585 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9586 __put_user(st
.st_size
, &target_st
->st_size
);
9587 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9588 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9589 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9590 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9591 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9592 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9593 defined(TARGET_STAT_HAVE_NSEC)
9594 __put_user(st
.st_atim
.tv_nsec
,
9595 &target_st
->target_st_atime_nsec
);
9596 __put_user(st
.st_mtim
.tv_nsec
,
9597 &target_st
->target_st_mtime_nsec
);
9598 __put_user(st
.st_ctim
.tv_nsec
,
9599 &target_st
->target_st_ctime_nsec
);
9601 unlock_user_struct(target_st
, arg2
, 1);
9606 case TARGET_NR_vhangup
:
9607 return get_errno(vhangup());
9608 #ifdef TARGET_NR_syscall
9609 case TARGET_NR_syscall
:
9610 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9611 arg6
, arg7
, arg8
, 0);
9613 #if defined(TARGET_NR_wait4)
9614 case TARGET_NR_wait4
:
9617 abi_long status_ptr
= arg2
;
9618 struct rusage rusage
, *rusage_ptr
;
9619 abi_ulong target_rusage
= arg4
;
9620 abi_long rusage_err
;
9622 rusage_ptr
= &rusage
;
9625 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9626 if (!is_error(ret
)) {
9627 if (status_ptr
&& ret
) {
9628 status
= host_to_target_waitstatus(status
);
9629 if (put_user_s32(status
, status_ptr
))
9630 return -TARGET_EFAULT
;
9632 if (target_rusage
) {
9633 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9642 #ifdef TARGET_NR_swapoff
9643 case TARGET_NR_swapoff
:
9644 if (!(p
= lock_user_string(arg1
)))
9645 return -TARGET_EFAULT
;
9646 ret
= get_errno(swapoff(p
));
9647 unlock_user(p
, arg1
, 0);
9650 case TARGET_NR_sysinfo
:
9652 struct target_sysinfo
*target_value
;
9653 struct sysinfo value
;
9654 ret
= get_errno(sysinfo(&value
));
9655 if (!is_error(ret
) && arg1
)
9657 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9658 return -TARGET_EFAULT
;
9659 __put_user(value
.uptime
, &target_value
->uptime
);
9660 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9661 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9662 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9663 __put_user(value
.totalram
, &target_value
->totalram
);
9664 __put_user(value
.freeram
, &target_value
->freeram
);
9665 __put_user(value
.sharedram
, &target_value
->sharedram
);
9666 __put_user(value
.bufferram
, &target_value
->bufferram
);
9667 __put_user(value
.totalswap
, &target_value
->totalswap
);
9668 __put_user(value
.freeswap
, &target_value
->freeswap
);
9669 __put_user(value
.procs
, &target_value
->procs
);
9670 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9671 __put_user(value
.freehigh
, &target_value
->freehigh
);
9672 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9673 unlock_user_struct(target_value
, arg1
, 1);
9677 #ifdef TARGET_NR_ipc
9679 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9681 #ifdef TARGET_NR_semget
9682 case TARGET_NR_semget
:
9683 return get_errno(semget(arg1
, arg2
, arg3
));
9685 #ifdef TARGET_NR_semop
9686 case TARGET_NR_semop
:
9687 return do_semop(arg1
, arg2
, arg3
);
9689 #ifdef TARGET_NR_semctl
9690 case TARGET_NR_semctl
:
9691 return do_semctl(arg1
, arg2
, arg3
, arg4
);
9693 #ifdef TARGET_NR_msgctl
9694 case TARGET_NR_msgctl
:
9695 return do_msgctl(arg1
, arg2
, arg3
);
9697 #ifdef TARGET_NR_msgget
9698 case TARGET_NR_msgget
:
9699 return get_errno(msgget(arg1
, arg2
));
9701 #ifdef TARGET_NR_msgrcv
9702 case TARGET_NR_msgrcv
:
9703 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9705 #ifdef TARGET_NR_msgsnd
9706 case TARGET_NR_msgsnd
:
9707 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9709 #ifdef TARGET_NR_shmget
9710 case TARGET_NR_shmget
:
9711 return get_errno(shmget(arg1
, arg2
, arg3
));
9713 #ifdef TARGET_NR_shmctl
9714 case TARGET_NR_shmctl
:
9715 return do_shmctl(arg1
, arg2
, arg3
);
9717 #ifdef TARGET_NR_shmat
9718 case TARGET_NR_shmat
:
9719 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9721 #ifdef TARGET_NR_shmdt
9722 case TARGET_NR_shmdt
:
9723 return do_shmdt(arg1
);
9725 case TARGET_NR_fsync
:
9726 return get_errno(fsync(arg1
));
9727 case TARGET_NR_clone
:
9728 /* Linux manages to have three different orderings for its
9729 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9730 * match the kernel's CONFIG_CLONE_* settings.
9731 * Microblaze is further special in that it uses a sixth
9732 * implicit argument to clone for the TLS pointer.
9734 #if defined(TARGET_MICROBLAZE)
9735 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9736 #elif defined(TARGET_CLONE_BACKWARDS)
9737 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9738 #elif defined(TARGET_CLONE_BACKWARDS2)
9739 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9741 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9744 #ifdef __NR_exit_group
9745 /* new thread calls */
9746 case TARGET_NR_exit_group
:
9747 preexit_cleanup(cpu_env
, arg1
);
9748 return get_errno(exit_group(arg1
));
9750 case TARGET_NR_setdomainname
:
9751 if (!(p
= lock_user_string(arg1
)))
9752 return -TARGET_EFAULT
;
9753 ret
= get_errno(setdomainname(p
, arg2
));
9754 unlock_user(p
, arg1
, 0);
9756 case TARGET_NR_uname
:
9757 /* no need to transcode because we use the linux syscall */
9759 struct new_utsname
* buf
;
9761 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9762 return -TARGET_EFAULT
;
9763 ret
= get_errno(sys_uname(buf
));
9764 if (!is_error(ret
)) {
9765 /* Overwrite the native machine name with whatever is being
9767 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
9768 sizeof(buf
->machine
));
9769 /* Allow the user to override the reported release. */
9770 if (qemu_uname_release
&& *qemu_uname_release
) {
9771 g_strlcpy(buf
->release
, qemu_uname_release
,
9772 sizeof(buf
->release
));
9775 unlock_user_struct(buf
, arg1
, 1);
9779 case TARGET_NR_modify_ldt
:
9780 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9781 #if !defined(TARGET_X86_64)
9782 case TARGET_NR_vm86
:
9783 return do_vm86(cpu_env
, arg1
, arg2
);
9786 #if defined(TARGET_NR_adjtimex)
9787 case TARGET_NR_adjtimex
:
9789 struct timex host_buf
;
9791 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9792 return -TARGET_EFAULT
;
9794 ret
= get_errno(adjtimex(&host_buf
));
9795 if (!is_error(ret
)) {
9796 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9797 return -TARGET_EFAULT
;
9803 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9804 case TARGET_NR_clock_adjtime
:
9806 struct timex htx
, *phtx
= &htx
;
9808 if (target_to_host_timex(phtx
, arg2
) != 0) {
9809 return -TARGET_EFAULT
;
9811 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9812 if (!is_error(ret
) && phtx
) {
9813 if (host_to_target_timex(arg2
, phtx
) != 0) {
9814 return -TARGET_EFAULT
;
9820 case TARGET_NR_getpgid
:
9821 return get_errno(getpgid(arg1
));
9822 case TARGET_NR_fchdir
:
9823 return get_errno(fchdir(arg1
));
9824 case TARGET_NR_personality
:
9825 return get_errno(personality(arg1
));
9826 #ifdef TARGET_NR__llseek /* Not on alpha */
9827 case TARGET_NR__llseek
:
9830 #if !defined(__NR_llseek)
9831 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9833 ret
= get_errno(res
);
9838 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9840 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9841 return -TARGET_EFAULT
;
9846 #ifdef TARGET_NR_getdents
9847 case TARGET_NR_getdents
:
9848 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9849 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9851 struct target_dirent
*target_dirp
;
9852 struct linux_dirent
*dirp
;
9853 abi_long count
= arg3
;
9855 dirp
= g_try_malloc(count
);
9857 return -TARGET_ENOMEM
;
9860 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9861 if (!is_error(ret
)) {
9862 struct linux_dirent
*de
;
9863 struct target_dirent
*tde
;
9865 int reclen
, treclen
;
9866 int count1
, tnamelen
;
9870 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9871 return -TARGET_EFAULT
;
9874 reclen
= de
->d_reclen
;
9875 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9876 assert(tnamelen
>= 0);
9877 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9878 assert(count1
+ treclen
<= count
);
9879 tde
->d_reclen
= tswap16(treclen
);
9880 tde
->d_ino
= tswapal(de
->d_ino
);
9881 tde
->d_off
= tswapal(de
->d_off
);
9882 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9883 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9885 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9889 unlock_user(target_dirp
, arg2
, ret
);
9895 struct linux_dirent
*dirp
;
9896 abi_long count
= arg3
;
9898 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9899 return -TARGET_EFAULT
;
9900 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9901 if (!is_error(ret
)) {
9902 struct linux_dirent
*de
;
9907 reclen
= de
->d_reclen
;
9910 de
->d_reclen
= tswap16(reclen
);
9911 tswapls(&de
->d_ino
);
9912 tswapls(&de
->d_off
);
9913 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9917 unlock_user(dirp
, arg2
, ret
);
9921 /* Implement getdents in terms of getdents64 */
9923 struct linux_dirent64
*dirp
;
9924 abi_long count
= arg3
;
9926 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9928 return -TARGET_EFAULT
;
9930 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9931 if (!is_error(ret
)) {
9932 /* Convert the dirent64 structs to target dirent. We do this
9933 * in-place, since we can guarantee that a target_dirent is no
9934 * larger than a dirent64; however this means we have to be
9935 * careful to read everything before writing in the new format.
9937 struct linux_dirent64
*de
;
9938 struct target_dirent
*tde
;
9943 tde
= (struct target_dirent
*)dirp
;
9945 int namelen
, treclen
;
9946 int reclen
= de
->d_reclen
;
9947 uint64_t ino
= de
->d_ino
;
9948 int64_t off
= de
->d_off
;
9949 uint8_t type
= de
->d_type
;
9951 namelen
= strlen(de
->d_name
);
9952 treclen
= offsetof(struct target_dirent
, d_name
)
9954 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9956 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9957 tde
->d_ino
= tswapal(ino
);
9958 tde
->d_off
= tswapal(off
);
9959 tde
->d_reclen
= tswap16(treclen
);
9960 /* The target_dirent type is in what was formerly a padding
9961 * byte at the end of the structure:
9963 *(((char *)tde
) + treclen
- 1) = type
;
9965 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9966 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9972 unlock_user(dirp
, arg2
, ret
);
9976 #endif /* TARGET_NR_getdents */
9977 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9978 case TARGET_NR_getdents64
:
9980 struct linux_dirent64
*dirp
;
9981 abi_long count
= arg3
;
9982 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9983 return -TARGET_EFAULT
;
9984 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9985 if (!is_error(ret
)) {
9986 struct linux_dirent64
*de
;
9991 reclen
= de
->d_reclen
;
9994 de
->d_reclen
= tswap16(reclen
);
9995 tswap64s((uint64_t *)&de
->d_ino
);
9996 tswap64s((uint64_t *)&de
->d_off
);
9997 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10001 unlock_user(dirp
, arg2
, ret
);
10004 #endif /* TARGET_NR_getdents64 */
10005 #if defined(TARGET_NR__newselect)
10006 case TARGET_NR__newselect
:
10007 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10009 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10010 # ifdef TARGET_NR_poll
10011 case TARGET_NR_poll
:
10013 # ifdef TARGET_NR_ppoll
10014 case TARGET_NR_ppoll
:
10017 struct target_pollfd
*target_pfd
;
10018 unsigned int nfds
= arg2
;
10019 struct pollfd
*pfd
;
10025 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
10026 return -TARGET_EINVAL
;
10029 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
10030 sizeof(struct target_pollfd
) * nfds
, 1);
10032 return -TARGET_EFAULT
;
10035 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
10036 for (i
= 0; i
< nfds
; i
++) {
10037 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
10038 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
10043 # ifdef TARGET_NR_ppoll
10044 case TARGET_NR_ppoll
:
10046 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
10047 target_sigset_t
*target_set
;
10048 sigset_t _set
, *set
= &_set
;
10051 if (target_to_host_timespec(timeout_ts
, arg3
)) {
10052 unlock_user(target_pfd
, arg1
, 0);
10053 return -TARGET_EFAULT
;
10060 if (arg5
!= sizeof(target_sigset_t
)) {
10061 unlock_user(target_pfd
, arg1
, 0);
10062 return -TARGET_EINVAL
;
10065 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10067 unlock_user(target_pfd
, arg1
, 0);
10068 return -TARGET_EFAULT
;
10070 target_to_host_sigset(set
, target_set
);
10075 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10076 set
, SIGSET_T_SIZE
));
10078 if (!is_error(ret
) && arg3
) {
10079 host_to_target_timespec(arg3
, timeout_ts
);
10082 unlock_user(target_set
, arg4
, 0);
10087 # ifdef TARGET_NR_poll
10088 case TARGET_NR_poll
:
10090 struct timespec ts
, *pts
;
10093 /* Convert ms to secs, ns */
10094 ts
.tv_sec
= arg3
/ 1000;
10095 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10098 /* -ve poll() timeout means "infinite" */
10101 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10106 g_assert_not_reached();
10109 if (!is_error(ret
)) {
10110 for(i
= 0; i
< nfds
; i
++) {
10111 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10114 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10118 case TARGET_NR_flock
:
10119 /* NOTE: the flock constant seems to be the same for every
10121 return get_errno(safe_flock(arg1
, arg2
));
10122 case TARGET_NR_readv
:
10124 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10126 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10127 unlock_iovec(vec
, arg2
, arg3
, 1);
10129 ret
= -host_to_target_errno(errno
);
10133 case TARGET_NR_writev
:
10135 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10137 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10138 unlock_iovec(vec
, arg2
, arg3
, 0);
10140 ret
= -host_to_target_errno(errno
);
10144 #if defined(TARGET_NR_preadv)
10145 case TARGET_NR_preadv
:
10147 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10149 unsigned long low
, high
;
10151 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10152 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10153 unlock_iovec(vec
, arg2
, arg3
, 1);
10155 ret
= -host_to_target_errno(errno
);
10160 #if defined(TARGET_NR_pwritev)
10161 case TARGET_NR_pwritev
:
10163 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10165 unsigned long low
, high
;
10167 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10168 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10169 unlock_iovec(vec
, arg2
, arg3
, 0);
10171 ret
= -host_to_target_errno(errno
);
10176 case TARGET_NR_getsid
:
10177 return get_errno(getsid(arg1
));
10178 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10179 case TARGET_NR_fdatasync
:
10180 return get_errno(fdatasync(arg1
));
10182 #ifdef TARGET_NR__sysctl
10183 case TARGET_NR__sysctl
:
10184 /* We don't implement this, but ENOTDIR is always a safe
10186 return -TARGET_ENOTDIR
;
10188 case TARGET_NR_sched_getaffinity
:
10190 unsigned int mask_size
;
10191 unsigned long *mask
;
10194 * sched_getaffinity needs multiples of ulong, so need to take
10195 * care of mismatches between target ulong and host ulong sizes.
10197 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10198 return -TARGET_EINVAL
;
10200 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10202 mask
= alloca(mask_size
);
10203 memset(mask
, 0, mask_size
);
10204 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10206 if (!is_error(ret
)) {
10208 /* More data returned than the caller's buffer will fit.
10209 * This only happens if sizeof(abi_long) < sizeof(long)
10210 * and the caller passed us a buffer holding an odd number
10211 * of abi_longs. If the host kernel is actually using the
10212 * extra 4 bytes then fail EINVAL; otherwise we can just
10213 * ignore them and only copy the interesting part.
10215 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10216 if (numcpus
> arg2
* 8) {
10217 return -TARGET_EINVAL
;
10222 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10223 return -TARGET_EFAULT
;
10228 case TARGET_NR_sched_setaffinity
:
10230 unsigned int mask_size
;
10231 unsigned long *mask
;
10234 * sched_setaffinity needs multiples of ulong, so need to take
10235 * care of mismatches between target ulong and host ulong sizes.
10237 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10238 return -TARGET_EINVAL
;
10240 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10241 mask
= alloca(mask_size
);
10243 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10248 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10250 case TARGET_NR_getcpu
:
10252 unsigned cpu
, node
;
10253 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10254 arg2
? &node
: NULL
,
10256 if (is_error(ret
)) {
10259 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10260 return -TARGET_EFAULT
;
10262 if (arg2
&& put_user_u32(node
, arg2
)) {
10263 return -TARGET_EFAULT
;
10267 case TARGET_NR_sched_setparam
:
10269 struct sched_param
*target_schp
;
10270 struct sched_param schp
;
10273 return -TARGET_EINVAL
;
10275 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10276 return -TARGET_EFAULT
;
10277 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10278 unlock_user_struct(target_schp
, arg2
, 0);
10279 return get_errno(sched_setparam(arg1
, &schp
));
10281 case TARGET_NR_sched_getparam
:
10283 struct sched_param
*target_schp
;
10284 struct sched_param schp
;
10287 return -TARGET_EINVAL
;
10289 ret
= get_errno(sched_getparam(arg1
, &schp
));
10290 if (!is_error(ret
)) {
10291 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10292 return -TARGET_EFAULT
;
10293 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10294 unlock_user_struct(target_schp
, arg2
, 1);
10298 case TARGET_NR_sched_setscheduler
:
10300 struct sched_param
*target_schp
;
10301 struct sched_param schp
;
10303 return -TARGET_EINVAL
;
10305 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10306 return -TARGET_EFAULT
;
10307 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10308 unlock_user_struct(target_schp
, arg3
, 0);
10309 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10311 case TARGET_NR_sched_getscheduler
:
10312 return get_errno(sched_getscheduler(arg1
));
10313 case TARGET_NR_sched_yield
:
10314 return get_errno(sched_yield());
10315 case TARGET_NR_sched_get_priority_max
:
10316 return get_errno(sched_get_priority_max(arg1
));
10317 case TARGET_NR_sched_get_priority_min
:
10318 return get_errno(sched_get_priority_min(arg1
));
10319 #ifdef TARGET_NR_sched_rr_get_interval
10320 case TARGET_NR_sched_rr_get_interval
:
10322 struct timespec ts
;
10323 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10324 if (!is_error(ret
)) {
10325 ret
= host_to_target_timespec(arg2
, &ts
);
10330 #if defined(TARGET_NR_nanosleep)
10331 case TARGET_NR_nanosleep
:
10333 struct timespec req
, rem
;
10334 target_to_host_timespec(&req
, arg1
);
10335 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10336 if (is_error(ret
) && arg2
) {
10337 host_to_target_timespec(arg2
, &rem
);
10342 case TARGET_NR_prctl
:
10344 case PR_GET_PDEATHSIG
:
10347 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10348 if (!is_error(ret
) && arg2
10349 && put_user_ual(deathsig
, arg2
)) {
10350 return -TARGET_EFAULT
;
10357 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10359 return -TARGET_EFAULT
;
10361 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10362 arg3
, arg4
, arg5
));
10363 unlock_user(name
, arg2
, 16);
10368 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10370 return -TARGET_EFAULT
;
10372 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10373 arg3
, arg4
, arg5
));
10374 unlock_user(name
, arg2
, 0);
10379 case TARGET_PR_GET_FP_MODE
:
10381 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10383 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10384 ret
|= TARGET_PR_FP_MODE_FR
;
10386 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10387 ret
|= TARGET_PR_FP_MODE_FRE
;
10391 case TARGET_PR_SET_FP_MODE
:
10393 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10394 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10395 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10396 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10397 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10399 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10400 TARGET_PR_FP_MODE_FRE
;
10402 /* If nothing to change, return right away, successfully. */
10403 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10406 /* Check the value is valid */
10407 if (arg2
& ~known_bits
) {
10408 return -TARGET_EOPNOTSUPP
;
10410 /* Setting FRE without FR is not supported. */
10411 if (new_fre
&& !new_fr
) {
10412 return -TARGET_EOPNOTSUPP
;
10414 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10415 /* FR1 is not supported */
10416 return -TARGET_EOPNOTSUPP
;
10418 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10419 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10420 /* cannot set FR=0 */
10421 return -TARGET_EOPNOTSUPP
;
10423 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10424 /* Cannot set FRE=1 */
10425 return -TARGET_EOPNOTSUPP
;
10429 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10430 for (i
= 0; i
< 32 ; i
+= 2) {
10431 if (!old_fr
&& new_fr
) {
10432 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10433 } else if (old_fr
&& !new_fr
) {
10434 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10439 env
->CP0_Status
|= (1 << CP0St_FR
);
10440 env
->hflags
|= MIPS_HFLAG_F64
;
10442 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10443 env
->hflags
&= ~MIPS_HFLAG_F64
;
10446 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10447 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10448 env
->hflags
|= MIPS_HFLAG_FRE
;
10451 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10452 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10458 #ifdef TARGET_AARCH64
10459 case TARGET_PR_SVE_SET_VL
:
10461 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10462 * PR_SVE_VL_INHERIT. Note the kernel definition
10463 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10464 * even though the current architectural maximum is VQ=16.
10466 ret
= -TARGET_EINVAL
;
10467 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10468 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10469 CPUARMState
*env
= cpu_env
;
10470 ARMCPU
*cpu
= env_archcpu(env
);
10471 uint32_t vq
, old_vq
;
10473 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10474 vq
= MAX(arg2
/ 16, 1);
10475 vq
= MIN(vq
, cpu
->sve_max_vq
);
10478 aarch64_sve_narrow_vq(env
, vq
);
10480 env
->vfp
.zcr_el
[1] = vq
- 1;
10481 arm_rebuild_hflags(env
);
10485 case TARGET_PR_SVE_GET_VL
:
10486 ret
= -TARGET_EINVAL
;
10488 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10489 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10490 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10494 case TARGET_PR_PAC_RESET_KEYS
:
10496 CPUARMState
*env
= cpu_env
;
10497 ARMCPU
*cpu
= env_archcpu(env
);
10499 if (arg3
|| arg4
|| arg5
) {
10500 return -TARGET_EINVAL
;
10502 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10503 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10504 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10505 TARGET_PR_PAC_APGAKEY
);
10511 } else if (arg2
& ~all
) {
10512 return -TARGET_EINVAL
;
10514 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10515 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10516 sizeof(ARMPACKey
), &err
);
10518 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10519 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10520 sizeof(ARMPACKey
), &err
);
10522 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10523 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10524 sizeof(ARMPACKey
), &err
);
10526 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10527 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10528 sizeof(ARMPACKey
), &err
);
10530 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10531 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10532 sizeof(ARMPACKey
), &err
);
10536 * Some unknown failure in the crypto. The best
10537 * we can do is log it and fail the syscall.
10538 * The real syscall cannot fail this way.
10540 qemu_log_mask(LOG_UNIMP
,
10541 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10542 error_get_pretty(err
));
10544 return -TARGET_EIO
;
10549 return -TARGET_EINVAL
;
10550 #endif /* AARCH64 */
10551 case PR_GET_SECCOMP
:
10552 case PR_SET_SECCOMP
:
10553 /* Disable seccomp to prevent the target disabling syscalls we
10555 return -TARGET_EINVAL
;
10557 /* Most prctl options have no pointer arguments */
10558 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10561 #ifdef TARGET_NR_arch_prctl
10562 case TARGET_NR_arch_prctl
:
10563 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10565 #ifdef TARGET_NR_pread64
10566 case TARGET_NR_pread64
:
10567 if (regpairs_aligned(cpu_env
, num
)) {
10571 if (arg2
== 0 && arg3
== 0) {
10572 /* Special-case NULL buffer and zero length, which should succeed */
10575 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10577 return -TARGET_EFAULT
;
10580 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10581 unlock_user(p
, arg2
, ret
);
10583 case TARGET_NR_pwrite64
:
10584 if (regpairs_aligned(cpu_env
, num
)) {
10588 if (arg2
== 0 && arg3
== 0) {
10589 /* Special-case NULL buffer and zero length, which should succeed */
10592 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10594 return -TARGET_EFAULT
;
10597 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10598 unlock_user(p
, arg2
, 0);
10601 case TARGET_NR_getcwd
:
10602 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10603 return -TARGET_EFAULT
;
10604 ret
= get_errno(sys_getcwd1(p
, arg2
));
10605 unlock_user(p
, arg1
, ret
);
10607 case TARGET_NR_capget
:
10608 case TARGET_NR_capset
:
10610 struct target_user_cap_header
*target_header
;
10611 struct target_user_cap_data
*target_data
= NULL
;
10612 struct __user_cap_header_struct header
;
10613 struct __user_cap_data_struct data
[2];
10614 struct __user_cap_data_struct
*dataptr
= NULL
;
10615 int i
, target_datalen
;
10616 int data_items
= 1;
10618 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10619 return -TARGET_EFAULT
;
10621 header
.version
= tswap32(target_header
->version
);
10622 header
.pid
= tswap32(target_header
->pid
);
10624 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10625 /* Version 2 and up takes pointer to two user_data structs */
10629 target_datalen
= sizeof(*target_data
) * data_items
;
10632 if (num
== TARGET_NR_capget
) {
10633 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10635 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10637 if (!target_data
) {
10638 unlock_user_struct(target_header
, arg1
, 0);
10639 return -TARGET_EFAULT
;
10642 if (num
== TARGET_NR_capset
) {
10643 for (i
= 0; i
< data_items
; i
++) {
10644 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10645 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10646 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10653 if (num
== TARGET_NR_capget
) {
10654 ret
= get_errno(capget(&header
, dataptr
));
10656 ret
= get_errno(capset(&header
, dataptr
));
10659 /* The kernel always updates version for both capget and capset */
10660 target_header
->version
= tswap32(header
.version
);
10661 unlock_user_struct(target_header
, arg1
, 1);
10664 if (num
== TARGET_NR_capget
) {
10665 for (i
= 0; i
< data_items
; i
++) {
10666 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10667 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10668 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10670 unlock_user(target_data
, arg2
, target_datalen
);
10672 unlock_user(target_data
, arg2
, 0);
10677 case TARGET_NR_sigaltstack
:
10678 return do_sigaltstack(arg1
, arg2
,
10679 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10681 #ifdef CONFIG_SENDFILE
10682 #ifdef TARGET_NR_sendfile
10683 case TARGET_NR_sendfile
:
10685 off_t
*offp
= NULL
;
10688 ret
= get_user_sal(off
, arg3
);
10689 if (is_error(ret
)) {
10694 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10695 if (!is_error(ret
) && arg3
) {
10696 abi_long ret2
= put_user_sal(off
, arg3
);
10697 if (is_error(ret2
)) {
10704 #ifdef TARGET_NR_sendfile64
10705 case TARGET_NR_sendfile64
:
10707 off_t
*offp
= NULL
;
10710 ret
= get_user_s64(off
, arg3
);
10711 if (is_error(ret
)) {
10716 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10717 if (!is_error(ret
) && arg3
) {
10718 abi_long ret2
= put_user_s64(off
, arg3
);
10719 if (is_error(ret2
)) {
10727 #ifdef TARGET_NR_vfork
10728 case TARGET_NR_vfork
:
10729 return get_errno(do_fork(cpu_env
,
10730 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10733 #ifdef TARGET_NR_ugetrlimit
10734 case TARGET_NR_ugetrlimit
:
10736 struct rlimit rlim
;
10737 int resource
= target_to_host_resource(arg1
);
10738 ret
= get_errno(getrlimit(resource
, &rlim
));
10739 if (!is_error(ret
)) {
10740 struct target_rlimit
*target_rlim
;
10741 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10742 return -TARGET_EFAULT
;
10743 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10744 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10745 unlock_user_struct(target_rlim
, arg2
, 1);
10750 #ifdef TARGET_NR_truncate64
10751 case TARGET_NR_truncate64
:
10752 if (!(p
= lock_user_string(arg1
)))
10753 return -TARGET_EFAULT
;
10754 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10755 unlock_user(p
, arg1
, 0);
10758 #ifdef TARGET_NR_ftruncate64
10759 case TARGET_NR_ftruncate64
:
10760 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10762 #ifdef TARGET_NR_stat64
10763 case TARGET_NR_stat64
:
10764 if (!(p
= lock_user_string(arg1
))) {
10765 return -TARGET_EFAULT
;
10767 ret
= get_errno(stat(path(p
), &st
));
10768 unlock_user(p
, arg1
, 0);
10769 if (!is_error(ret
))
10770 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10773 #ifdef TARGET_NR_lstat64
10774 case TARGET_NR_lstat64
:
10775 if (!(p
= lock_user_string(arg1
))) {
10776 return -TARGET_EFAULT
;
10778 ret
= get_errno(lstat(path(p
), &st
));
10779 unlock_user(p
, arg1
, 0);
10780 if (!is_error(ret
))
10781 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10784 #ifdef TARGET_NR_fstat64
10785 case TARGET_NR_fstat64
:
10786 ret
= get_errno(fstat(arg1
, &st
));
10787 if (!is_error(ret
))
10788 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10791 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10792 #ifdef TARGET_NR_fstatat64
10793 case TARGET_NR_fstatat64
:
10795 #ifdef TARGET_NR_newfstatat
10796 case TARGET_NR_newfstatat
:
10798 if (!(p
= lock_user_string(arg2
))) {
10799 return -TARGET_EFAULT
;
10801 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10802 unlock_user(p
, arg2
, 0);
10803 if (!is_error(ret
))
10804 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10807 #if defined(TARGET_NR_statx)
10808 case TARGET_NR_statx
:
10810 struct target_statx
*target_stx
;
10814 p
= lock_user_string(arg2
);
10816 return -TARGET_EFAULT
;
10818 #if defined(__NR_statx)
10821 * It is assumed that struct statx is architecture independent.
10823 struct target_statx host_stx
;
10826 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
10827 if (!is_error(ret
)) {
10828 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
10829 unlock_user(p
, arg2
, 0);
10830 return -TARGET_EFAULT
;
10834 if (ret
!= -TARGET_ENOSYS
) {
10835 unlock_user(p
, arg2
, 0);
10840 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
10841 unlock_user(p
, arg2
, 0);
10843 if (!is_error(ret
)) {
10844 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
10845 return -TARGET_EFAULT
;
10847 memset(target_stx
, 0, sizeof(*target_stx
));
10848 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
10849 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
10850 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
10851 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
10852 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
10853 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
10854 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
10855 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
10856 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
10857 __put_user(st
.st_size
, &target_stx
->stx_size
);
10858 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
10859 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
10860 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
10861 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
10862 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
10863 unlock_user_struct(target_stx
, arg5
, 1);
10868 #ifdef TARGET_NR_lchown
10869 case TARGET_NR_lchown
:
10870 if (!(p
= lock_user_string(arg1
)))
10871 return -TARGET_EFAULT
;
10872 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10873 unlock_user(p
, arg1
, 0);
10876 #ifdef TARGET_NR_getuid
10877 case TARGET_NR_getuid
:
10878 return get_errno(high2lowuid(getuid()));
10880 #ifdef TARGET_NR_getgid
10881 case TARGET_NR_getgid
:
10882 return get_errno(high2lowgid(getgid()));
10884 #ifdef TARGET_NR_geteuid
10885 case TARGET_NR_geteuid
:
10886 return get_errno(high2lowuid(geteuid()));
10888 #ifdef TARGET_NR_getegid
10889 case TARGET_NR_getegid
:
10890 return get_errno(high2lowgid(getegid()));
10892 case TARGET_NR_setreuid
:
10893 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10894 case TARGET_NR_setregid
:
10895 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10896 case TARGET_NR_getgroups
:
10898 int gidsetsize
= arg1
;
10899 target_id
*target_grouplist
;
10903 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10904 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10905 if (gidsetsize
== 0)
10907 if (!is_error(ret
)) {
10908 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10909 if (!target_grouplist
)
10910 return -TARGET_EFAULT
;
10911 for(i
= 0;i
< ret
; i
++)
10912 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10913 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10917 case TARGET_NR_setgroups
:
10919 int gidsetsize
= arg1
;
10920 target_id
*target_grouplist
;
10921 gid_t
*grouplist
= NULL
;
10924 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10925 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10926 if (!target_grouplist
) {
10927 return -TARGET_EFAULT
;
10929 for (i
= 0; i
< gidsetsize
; i
++) {
10930 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10932 unlock_user(target_grouplist
, arg2
, 0);
10934 return get_errno(setgroups(gidsetsize
, grouplist
));
10936 case TARGET_NR_fchown
:
10937 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10938 #if defined(TARGET_NR_fchownat)
10939 case TARGET_NR_fchownat
:
10940 if (!(p
= lock_user_string(arg2
)))
10941 return -TARGET_EFAULT
;
10942 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10943 low2highgid(arg4
), arg5
));
10944 unlock_user(p
, arg2
, 0);
10947 #ifdef TARGET_NR_setresuid
10948 case TARGET_NR_setresuid
:
10949 return get_errno(sys_setresuid(low2highuid(arg1
),
10951 low2highuid(arg3
)));
10953 #ifdef TARGET_NR_getresuid
10954 case TARGET_NR_getresuid
:
10956 uid_t ruid
, euid
, suid
;
10957 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10958 if (!is_error(ret
)) {
10959 if (put_user_id(high2lowuid(ruid
), arg1
)
10960 || put_user_id(high2lowuid(euid
), arg2
)
10961 || put_user_id(high2lowuid(suid
), arg3
))
10962 return -TARGET_EFAULT
;
10967 #ifdef TARGET_NR_getresgid
10968 case TARGET_NR_setresgid
:
10969 return get_errno(sys_setresgid(low2highgid(arg1
),
10971 low2highgid(arg3
)));
10973 #ifdef TARGET_NR_getresgid
10974 case TARGET_NR_getresgid
:
10976 gid_t rgid
, egid
, sgid
;
10977 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10978 if (!is_error(ret
)) {
10979 if (put_user_id(high2lowgid(rgid
), arg1
)
10980 || put_user_id(high2lowgid(egid
), arg2
)
10981 || put_user_id(high2lowgid(sgid
), arg3
))
10982 return -TARGET_EFAULT
;
10987 #ifdef TARGET_NR_chown
10988 case TARGET_NR_chown
:
10989 if (!(p
= lock_user_string(arg1
)))
10990 return -TARGET_EFAULT
;
10991 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10992 unlock_user(p
, arg1
, 0);
10995 case TARGET_NR_setuid
:
10996 return get_errno(sys_setuid(low2highuid(arg1
)));
10997 case TARGET_NR_setgid
:
10998 return get_errno(sys_setgid(low2highgid(arg1
)));
10999 case TARGET_NR_setfsuid
:
11000 return get_errno(setfsuid(arg1
));
11001 case TARGET_NR_setfsgid
:
11002 return get_errno(setfsgid(arg1
));
11004 #ifdef TARGET_NR_lchown32
11005 case TARGET_NR_lchown32
:
11006 if (!(p
= lock_user_string(arg1
)))
11007 return -TARGET_EFAULT
;
11008 ret
= get_errno(lchown(p
, arg2
, arg3
));
11009 unlock_user(p
, arg1
, 0);
11012 #ifdef TARGET_NR_getuid32
11013 case TARGET_NR_getuid32
:
11014 return get_errno(getuid());
11017 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11018 /* Alpha specific */
11019 case TARGET_NR_getxuid
:
11023 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11025 return get_errno(getuid());
11027 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11028 /* Alpha specific */
11029 case TARGET_NR_getxgid
:
11033 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11035 return get_errno(getgid());
11037 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11038 /* Alpha specific */
11039 case TARGET_NR_osf_getsysinfo
:
11040 ret
= -TARGET_EOPNOTSUPP
;
11042 case TARGET_GSI_IEEE_FP_CONTROL
:
11044 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11045 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11047 swcr
&= ~SWCR_STATUS_MASK
;
11048 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11050 if (put_user_u64 (swcr
, arg2
))
11051 return -TARGET_EFAULT
;
11056 /* case GSI_IEEE_STATE_AT_SIGNAL:
11057 -- Not implemented in linux kernel.
11059 -- Retrieves current unaligned access state; not much used.
11060 case GSI_PROC_TYPE:
11061 -- Retrieves implver information; surely not used.
11062 case GSI_GET_HWRPB:
11063 -- Grabs a copy of the HWRPB; surely not used.
11068 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11069 /* Alpha specific */
11070 case TARGET_NR_osf_setsysinfo
:
11071 ret
= -TARGET_EOPNOTSUPP
;
11073 case TARGET_SSI_IEEE_FP_CONTROL
:
11075 uint64_t swcr
, fpcr
;
11077 if (get_user_u64 (swcr
, arg2
)) {
11078 return -TARGET_EFAULT
;
11082 * The kernel calls swcr_update_status to update the
11083 * status bits from the fpcr at every point that it
11084 * could be queried. Therefore, we store the status
11085 * bits only in FPCR.
11087 ((CPUAlphaState
*)cpu_env
)->swcr
11088 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11090 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11091 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11092 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11093 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11098 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11100 uint64_t exc
, fpcr
, fex
;
11102 if (get_user_u64(exc
, arg2
)) {
11103 return -TARGET_EFAULT
;
11105 exc
&= SWCR_STATUS_MASK
;
11106 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11108 /* Old exceptions are not signaled. */
11109 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11111 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11112 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11114 /* Update the hardware fpcr. */
11115 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11116 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11119 int si_code
= TARGET_FPE_FLTUNK
;
11120 target_siginfo_t info
;
11122 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11123 si_code
= TARGET_FPE_FLTUND
;
11125 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11126 si_code
= TARGET_FPE_FLTRES
;
11128 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11129 si_code
= TARGET_FPE_FLTUND
;
11131 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11132 si_code
= TARGET_FPE_FLTOVF
;
11134 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11135 si_code
= TARGET_FPE_FLTDIV
;
11137 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11138 si_code
= TARGET_FPE_FLTINV
;
11141 info
.si_signo
= SIGFPE
;
11143 info
.si_code
= si_code
;
11144 info
._sifields
._sigfault
._addr
11145 = ((CPUArchState
*)cpu_env
)->pc
;
11146 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11147 QEMU_SI_FAULT
, &info
);
11153 /* case SSI_NVPAIRS:
11154 -- Used with SSIN_UACPROC to enable unaligned accesses.
11155 case SSI_IEEE_STATE_AT_SIGNAL:
11156 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11157 -- Not implemented in linux kernel
11162 #ifdef TARGET_NR_osf_sigprocmask
11163 /* Alpha specific. */
11164 case TARGET_NR_osf_sigprocmask
:
11168 sigset_t set
, oldset
;
11171 case TARGET_SIG_BLOCK
:
11174 case TARGET_SIG_UNBLOCK
:
11177 case TARGET_SIG_SETMASK
:
11181 return -TARGET_EINVAL
;
11184 target_to_host_old_sigset(&set
, &mask
);
11185 ret
= do_sigprocmask(how
, &set
, &oldset
);
11187 host_to_target_old_sigset(&mask
, &oldset
);
11194 #ifdef TARGET_NR_getgid32
11195 case TARGET_NR_getgid32
:
11196 return get_errno(getgid());
11198 #ifdef TARGET_NR_geteuid32
11199 case TARGET_NR_geteuid32
:
11200 return get_errno(geteuid());
11202 #ifdef TARGET_NR_getegid32
11203 case TARGET_NR_getegid32
:
11204 return get_errno(getegid());
11206 #ifdef TARGET_NR_setreuid32
11207 case TARGET_NR_setreuid32
:
11208 return get_errno(setreuid(arg1
, arg2
));
11210 #ifdef TARGET_NR_setregid32
11211 case TARGET_NR_setregid32
:
11212 return get_errno(setregid(arg1
, arg2
));
11214 #ifdef TARGET_NR_getgroups32
11215 case TARGET_NR_getgroups32
:
11217 int gidsetsize
= arg1
;
11218 uint32_t *target_grouplist
;
11222 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11223 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11224 if (gidsetsize
== 0)
11226 if (!is_error(ret
)) {
11227 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11228 if (!target_grouplist
) {
11229 return -TARGET_EFAULT
;
11231 for(i
= 0;i
< ret
; i
++)
11232 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11233 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11238 #ifdef TARGET_NR_setgroups32
11239 case TARGET_NR_setgroups32
:
11241 int gidsetsize
= arg1
;
11242 uint32_t *target_grouplist
;
11246 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11247 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11248 if (!target_grouplist
) {
11249 return -TARGET_EFAULT
;
11251 for(i
= 0;i
< gidsetsize
; i
++)
11252 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11253 unlock_user(target_grouplist
, arg2
, 0);
11254 return get_errno(setgroups(gidsetsize
, grouplist
));
11257 #ifdef TARGET_NR_fchown32
11258 case TARGET_NR_fchown32
:
11259 return get_errno(fchown(arg1
, arg2
, arg3
));
11261 #ifdef TARGET_NR_setresuid32
11262 case TARGET_NR_setresuid32
:
11263 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11265 #ifdef TARGET_NR_getresuid32
11266 case TARGET_NR_getresuid32
:
11268 uid_t ruid
, euid
, suid
;
11269 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11270 if (!is_error(ret
)) {
11271 if (put_user_u32(ruid
, arg1
)
11272 || put_user_u32(euid
, arg2
)
11273 || put_user_u32(suid
, arg3
))
11274 return -TARGET_EFAULT
;
11279 #ifdef TARGET_NR_setresgid32
11280 case TARGET_NR_setresgid32
:
11281 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11283 #ifdef TARGET_NR_getresgid32
11284 case TARGET_NR_getresgid32
:
11286 gid_t rgid
, egid
, sgid
;
11287 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11288 if (!is_error(ret
)) {
11289 if (put_user_u32(rgid
, arg1
)
11290 || put_user_u32(egid
, arg2
)
11291 || put_user_u32(sgid
, arg3
))
11292 return -TARGET_EFAULT
;
11297 #ifdef TARGET_NR_chown32
11298 case TARGET_NR_chown32
:
11299 if (!(p
= lock_user_string(arg1
)))
11300 return -TARGET_EFAULT
;
11301 ret
= get_errno(chown(p
, arg2
, arg3
));
11302 unlock_user(p
, arg1
, 0);
11305 #ifdef TARGET_NR_setuid32
11306 case TARGET_NR_setuid32
:
11307 return get_errno(sys_setuid(arg1
));
11309 #ifdef TARGET_NR_setgid32
11310 case TARGET_NR_setgid32
:
11311 return get_errno(sys_setgid(arg1
));
11313 #ifdef TARGET_NR_setfsuid32
11314 case TARGET_NR_setfsuid32
:
11315 return get_errno(setfsuid(arg1
));
11317 #ifdef TARGET_NR_setfsgid32
11318 case TARGET_NR_setfsgid32
:
11319 return get_errno(setfsgid(arg1
));
11321 #ifdef TARGET_NR_mincore
11322 case TARGET_NR_mincore
:
11324 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11326 return -TARGET_ENOMEM
;
11328 p
= lock_user_string(arg3
);
11330 ret
= -TARGET_EFAULT
;
11332 ret
= get_errno(mincore(a
, arg2
, p
));
11333 unlock_user(p
, arg3
, ret
);
11335 unlock_user(a
, arg1
, 0);
11339 #ifdef TARGET_NR_arm_fadvise64_64
11340 case TARGET_NR_arm_fadvise64_64
:
11341 /* arm_fadvise64_64 looks like fadvise64_64 but
11342 * with different argument order: fd, advice, offset, len
11343 * rather than the usual fd, offset, len, advice.
11344 * Note that offset and len are both 64-bit so appear as
11345 * pairs of 32-bit registers.
11347 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11348 target_offset64(arg5
, arg6
), arg2
);
11349 return -host_to_target_errno(ret
);
11352 #if TARGET_ABI_BITS == 32
11354 #ifdef TARGET_NR_fadvise64_64
11355 case TARGET_NR_fadvise64_64
:
11356 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11357 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11365 /* 6 args: fd, offset (high, low), len (high, low), advice */
11366 if (regpairs_aligned(cpu_env
, num
)) {
11367 /* offset is in (3,4), len in (5,6) and advice in 7 */
11375 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11376 target_offset64(arg4
, arg5
), arg6
);
11377 return -host_to_target_errno(ret
);
11380 #ifdef TARGET_NR_fadvise64
11381 case TARGET_NR_fadvise64
:
11382 /* 5 args: fd, offset (high, low), len, advice */
11383 if (regpairs_aligned(cpu_env
, num
)) {
11384 /* offset is in (3,4), len in 5 and advice in 6 */
11390 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11391 return -host_to_target_errno(ret
);
11394 #else /* not a 32-bit ABI */
11395 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11396 #ifdef TARGET_NR_fadvise64_64
11397 case TARGET_NR_fadvise64_64
:
11399 #ifdef TARGET_NR_fadvise64
11400 case TARGET_NR_fadvise64
:
11402 #ifdef TARGET_S390X
11404 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11405 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11406 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11407 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11411 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11413 #endif /* end of 64-bit ABI fadvise handling */
11415 #ifdef TARGET_NR_madvise
11416 case TARGET_NR_madvise
:
11417 /* A straight passthrough may not be safe because qemu sometimes
11418 turns private file-backed mappings into anonymous mappings.
11419 This will break MADV_DONTNEED.
11420 This is a hint, so ignoring and returning success is ok. */
11423 #ifdef TARGET_NR_fcntl64
11424 case TARGET_NR_fcntl64
:
11428 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11429 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11432 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11433 copyfrom
= copy_from_user_oabi_flock64
;
11434 copyto
= copy_to_user_oabi_flock64
;
11438 cmd
= target_to_host_fcntl_cmd(arg2
);
11439 if (cmd
== -TARGET_EINVAL
) {
11444 case TARGET_F_GETLK64
:
11445 ret
= copyfrom(&fl
, arg3
);
11449 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11451 ret
= copyto(arg3
, &fl
);
11455 case TARGET_F_SETLK64
:
11456 case TARGET_F_SETLKW64
:
11457 ret
= copyfrom(&fl
, arg3
);
11461 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11464 ret
= do_fcntl(arg1
, arg2
, arg3
);
11470 #ifdef TARGET_NR_cacheflush
11471 case TARGET_NR_cacheflush
:
11472 /* self-modifying code is handled automatically, so nothing needed */
11475 #ifdef TARGET_NR_getpagesize
11476 case TARGET_NR_getpagesize
:
11477 return TARGET_PAGE_SIZE
;
11479 case TARGET_NR_gettid
:
11480 return get_errno(sys_gettid());
11481 #ifdef TARGET_NR_readahead
11482 case TARGET_NR_readahead
:
11483 #if TARGET_ABI_BITS == 32
11484 if (regpairs_aligned(cpu_env
, num
)) {
11489 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11491 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11496 #ifdef TARGET_NR_setxattr
11497 case TARGET_NR_listxattr
:
11498 case TARGET_NR_llistxattr
:
11502 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11504 return -TARGET_EFAULT
;
11507 p
= lock_user_string(arg1
);
11509 if (num
== TARGET_NR_listxattr
) {
11510 ret
= get_errno(listxattr(p
, b
, arg3
));
11512 ret
= get_errno(llistxattr(p
, b
, arg3
));
11515 ret
= -TARGET_EFAULT
;
11517 unlock_user(p
, arg1
, 0);
11518 unlock_user(b
, arg2
, arg3
);
11521 case TARGET_NR_flistxattr
:
11525 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11527 return -TARGET_EFAULT
;
11530 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11531 unlock_user(b
, arg2
, arg3
);
11534 case TARGET_NR_setxattr
:
11535 case TARGET_NR_lsetxattr
:
11537 void *p
, *n
, *v
= 0;
11539 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11541 return -TARGET_EFAULT
;
11544 p
= lock_user_string(arg1
);
11545 n
= lock_user_string(arg2
);
11547 if (num
== TARGET_NR_setxattr
) {
11548 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11550 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11553 ret
= -TARGET_EFAULT
;
11555 unlock_user(p
, arg1
, 0);
11556 unlock_user(n
, arg2
, 0);
11557 unlock_user(v
, arg3
, 0);
11560 case TARGET_NR_fsetxattr
:
11564 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11566 return -TARGET_EFAULT
;
11569 n
= lock_user_string(arg2
);
11571 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11573 ret
= -TARGET_EFAULT
;
11575 unlock_user(n
, arg2
, 0);
11576 unlock_user(v
, arg3
, 0);
11579 case TARGET_NR_getxattr
:
11580 case TARGET_NR_lgetxattr
:
11582 void *p
, *n
, *v
= 0;
11584 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11586 return -TARGET_EFAULT
;
11589 p
= lock_user_string(arg1
);
11590 n
= lock_user_string(arg2
);
11592 if (num
== TARGET_NR_getxattr
) {
11593 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11595 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11598 ret
= -TARGET_EFAULT
;
11600 unlock_user(p
, arg1
, 0);
11601 unlock_user(n
, arg2
, 0);
11602 unlock_user(v
, arg3
, arg4
);
11605 case TARGET_NR_fgetxattr
:
11609 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11611 return -TARGET_EFAULT
;
11614 n
= lock_user_string(arg2
);
11616 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11618 ret
= -TARGET_EFAULT
;
11620 unlock_user(n
, arg2
, 0);
11621 unlock_user(v
, arg3
, arg4
);
11624 case TARGET_NR_removexattr
:
11625 case TARGET_NR_lremovexattr
:
11628 p
= lock_user_string(arg1
);
11629 n
= lock_user_string(arg2
);
11631 if (num
== TARGET_NR_removexattr
) {
11632 ret
= get_errno(removexattr(p
, n
));
11634 ret
= get_errno(lremovexattr(p
, n
));
11637 ret
= -TARGET_EFAULT
;
11639 unlock_user(p
, arg1
, 0);
11640 unlock_user(n
, arg2
, 0);
11643 case TARGET_NR_fremovexattr
:
11646 n
= lock_user_string(arg2
);
11648 ret
= get_errno(fremovexattr(arg1
, n
));
11650 ret
= -TARGET_EFAULT
;
11652 unlock_user(n
, arg2
, 0);
11656 #endif /* CONFIG_ATTR */
11657 #ifdef TARGET_NR_set_thread_area
11658 case TARGET_NR_set_thread_area
:
11659 #if defined(TARGET_MIPS)
11660 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11662 #elif defined(TARGET_CRIS)
11664 ret
= -TARGET_EINVAL
;
11666 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11670 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11671 return do_set_thread_area(cpu_env
, arg1
);
11672 #elif defined(TARGET_M68K)
11674 TaskState
*ts
= cpu
->opaque
;
11675 ts
->tp_value
= arg1
;
11679 return -TARGET_ENOSYS
;
11682 #ifdef TARGET_NR_get_thread_area
11683 case TARGET_NR_get_thread_area
:
11684 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11685 return do_get_thread_area(cpu_env
, arg1
);
11686 #elif defined(TARGET_M68K)
11688 TaskState
*ts
= cpu
->opaque
;
11689 return ts
->tp_value
;
11692 return -TARGET_ENOSYS
;
11695 #ifdef TARGET_NR_getdomainname
11696 case TARGET_NR_getdomainname
:
11697 return -TARGET_ENOSYS
;
11700 #ifdef TARGET_NR_clock_settime
11701 case TARGET_NR_clock_settime
:
11703 struct timespec ts
;
11705 ret
= target_to_host_timespec(&ts
, arg2
);
11706 if (!is_error(ret
)) {
11707 ret
= get_errno(clock_settime(arg1
, &ts
));
11712 #ifdef TARGET_NR_clock_settime64
11713 case TARGET_NR_clock_settime64
:
11715 struct timespec ts
;
11717 ret
= target_to_host_timespec64(&ts
, arg2
);
11718 if (!is_error(ret
)) {
11719 ret
= get_errno(clock_settime(arg1
, &ts
));
11724 #ifdef TARGET_NR_clock_gettime
11725 case TARGET_NR_clock_gettime
:
11727 struct timespec ts
;
11728 ret
= get_errno(clock_gettime(arg1
, &ts
));
11729 if (!is_error(ret
)) {
11730 ret
= host_to_target_timespec(arg2
, &ts
);
11735 #ifdef TARGET_NR_clock_gettime64
11736 case TARGET_NR_clock_gettime64
:
11738 struct timespec ts
;
11739 ret
= get_errno(clock_gettime(arg1
, &ts
));
11740 if (!is_error(ret
)) {
11741 ret
= host_to_target_timespec64(arg2
, &ts
);
11746 #ifdef TARGET_NR_clock_getres
11747 case TARGET_NR_clock_getres
:
11749 struct timespec ts
;
11750 ret
= get_errno(clock_getres(arg1
, &ts
));
11751 if (!is_error(ret
)) {
11752 host_to_target_timespec(arg2
, &ts
);
11757 #ifdef TARGET_NR_clock_nanosleep
11758 case TARGET_NR_clock_nanosleep
:
11760 struct timespec ts
;
11761 target_to_host_timespec(&ts
, arg3
);
11762 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11763 &ts
, arg4
? &ts
: NULL
));
11765 host_to_target_timespec(arg4
, &ts
);
11767 #if defined(TARGET_PPC)
11768 /* clock_nanosleep is odd in that it returns positive errno values.
11769 * On PPC, CR0 bit 3 should be set in such a situation. */
11770 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11771 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11778 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11779 case TARGET_NR_set_tid_address
:
11780 return get_errno(set_tid_address((int *)g2h(arg1
)));
11783 case TARGET_NR_tkill
:
11784 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11786 case TARGET_NR_tgkill
:
11787 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11788 target_to_host_signal(arg3
)));
11790 #ifdef TARGET_NR_set_robust_list
11791 case TARGET_NR_set_robust_list
:
11792 case TARGET_NR_get_robust_list
:
11793 /* The ABI for supporting robust futexes has userspace pass
11794 * the kernel a pointer to a linked list which is updated by
11795 * userspace after the syscall; the list is walked by the kernel
11796 * when the thread exits. Since the linked list in QEMU guest
11797 * memory isn't a valid linked list for the host and we have
11798 * no way to reliably intercept the thread-death event, we can't
11799 * support these. Silently return ENOSYS so that guest userspace
11800 * falls back to a non-robust futex implementation (which should
11801 * be OK except in the corner case of the guest crashing while
11802 * holding a mutex that is shared with another process via
11805 return -TARGET_ENOSYS
;
11808 #if defined(TARGET_NR_utimensat)
11809 case TARGET_NR_utimensat
:
11811 struct timespec
*tsp
, ts
[2];
11815 target_to_host_timespec(ts
, arg3
);
11816 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11820 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11822 if (!(p
= lock_user_string(arg2
))) {
11823 return -TARGET_EFAULT
;
11825 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11826 unlock_user(p
, arg2
, 0);
11831 #ifdef TARGET_NR_futex
11832 case TARGET_NR_futex
:
11833 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11835 #ifdef TARGET_NR_futex_time64
11836 case TARGET_NR_futex_time64
:
11837 return do_futex_time64(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11839 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11840 case TARGET_NR_inotify_init
:
11841 ret
= get_errno(sys_inotify_init());
11843 fd_trans_register(ret
, &target_inotify_trans
);
11847 #ifdef CONFIG_INOTIFY1
11848 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11849 case TARGET_NR_inotify_init1
:
11850 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11851 fcntl_flags_tbl
)));
11853 fd_trans_register(ret
, &target_inotify_trans
);
11858 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11859 case TARGET_NR_inotify_add_watch
:
11860 p
= lock_user_string(arg2
);
11861 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11862 unlock_user(p
, arg2
, 0);
11865 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11866 case TARGET_NR_inotify_rm_watch
:
11867 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11870 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11871 case TARGET_NR_mq_open
:
11873 struct mq_attr posix_mq_attr
;
11874 struct mq_attr
*pposix_mq_attr
;
11877 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11878 pposix_mq_attr
= NULL
;
11880 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11881 return -TARGET_EFAULT
;
11883 pposix_mq_attr
= &posix_mq_attr
;
11885 p
= lock_user_string(arg1
- 1);
11887 return -TARGET_EFAULT
;
11889 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
11890 unlock_user (p
, arg1
, 0);
11894 case TARGET_NR_mq_unlink
:
11895 p
= lock_user_string(arg1
- 1);
11897 return -TARGET_EFAULT
;
11899 ret
= get_errno(mq_unlink(p
));
11900 unlock_user (p
, arg1
, 0);
11903 #ifdef TARGET_NR_mq_timedsend
11904 case TARGET_NR_mq_timedsend
:
11906 struct timespec ts
;
11908 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11910 target_to_host_timespec(&ts
, arg5
);
11911 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11912 host_to_target_timespec(arg5
, &ts
);
11914 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11916 unlock_user (p
, arg2
, arg3
);
11921 #ifdef TARGET_NR_mq_timedreceive
11922 case TARGET_NR_mq_timedreceive
:
11924 struct timespec ts
;
11927 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11929 target_to_host_timespec(&ts
, arg5
);
11930 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11932 host_to_target_timespec(arg5
, &ts
);
11934 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11937 unlock_user (p
, arg2
, arg3
);
11939 put_user_u32(prio
, arg4
);
11944 /* Not implemented for now... */
11945 /* case TARGET_NR_mq_notify: */
11948 case TARGET_NR_mq_getsetattr
:
11950 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11953 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11954 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
11955 &posix_mq_attr_out
));
11956 } else if (arg3
!= 0) {
11957 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
11959 if (ret
== 0 && arg3
!= 0) {
11960 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11966 #ifdef CONFIG_SPLICE
11967 #ifdef TARGET_NR_tee
11968 case TARGET_NR_tee
:
11970 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11974 #ifdef TARGET_NR_splice
11975 case TARGET_NR_splice
:
11977 loff_t loff_in
, loff_out
;
11978 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11980 if (get_user_u64(loff_in
, arg2
)) {
11981 return -TARGET_EFAULT
;
11983 ploff_in
= &loff_in
;
11986 if (get_user_u64(loff_out
, arg4
)) {
11987 return -TARGET_EFAULT
;
11989 ploff_out
= &loff_out
;
11991 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11993 if (put_user_u64(loff_in
, arg2
)) {
11994 return -TARGET_EFAULT
;
11998 if (put_user_u64(loff_out
, arg4
)) {
11999 return -TARGET_EFAULT
;
12005 #ifdef TARGET_NR_vmsplice
12006 case TARGET_NR_vmsplice
:
12008 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12010 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12011 unlock_iovec(vec
, arg2
, arg3
, 0);
12013 ret
= -host_to_target_errno(errno
);
12018 #endif /* CONFIG_SPLICE */
12019 #ifdef CONFIG_EVENTFD
12020 #if defined(TARGET_NR_eventfd)
12021 case TARGET_NR_eventfd
:
12022 ret
= get_errno(eventfd(arg1
, 0));
12024 fd_trans_register(ret
, &target_eventfd_trans
);
12028 #if defined(TARGET_NR_eventfd2)
12029 case TARGET_NR_eventfd2
:
12031 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
12032 if (arg2
& TARGET_O_NONBLOCK
) {
12033 host_flags
|= O_NONBLOCK
;
12035 if (arg2
& TARGET_O_CLOEXEC
) {
12036 host_flags
|= O_CLOEXEC
;
12038 ret
= get_errno(eventfd(arg1
, host_flags
));
12040 fd_trans_register(ret
, &target_eventfd_trans
);
12045 #endif /* CONFIG_EVENTFD */
12046 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12047 case TARGET_NR_fallocate
:
12048 #if TARGET_ABI_BITS == 32
12049 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12050 target_offset64(arg5
, arg6
)));
12052 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12056 #if defined(CONFIG_SYNC_FILE_RANGE)
12057 #if defined(TARGET_NR_sync_file_range)
12058 case TARGET_NR_sync_file_range
:
12059 #if TARGET_ABI_BITS == 32
12060 #if defined(TARGET_MIPS)
12061 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12062 target_offset64(arg5
, arg6
), arg7
));
12064 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12065 target_offset64(arg4
, arg5
), arg6
));
12066 #endif /* !TARGET_MIPS */
12068 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12072 #if defined(TARGET_NR_sync_file_range2) || \
12073 defined(TARGET_NR_arm_sync_file_range)
12074 #if defined(TARGET_NR_sync_file_range2)
12075 case TARGET_NR_sync_file_range2
:
12077 #if defined(TARGET_NR_arm_sync_file_range)
12078 case TARGET_NR_arm_sync_file_range
:
12080 /* This is like sync_file_range but the arguments are reordered */
12081 #if TARGET_ABI_BITS == 32
12082 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12083 target_offset64(arg5
, arg6
), arg2
));
12085 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12090 #if defined(TARGET_NR_signalfd4)
12091 case TARGET_NR_signalfd4
:
12092 return do_signalfd4(arg1
, arg2
, arg4
);
12094 #if defined(TARGET_NR_signalfd)
12095 case TARGET_NR_signalfd
:
12096 return do_signalfd4(arg1
, arg2
, 0);
12098 #if defined(CONFIG_EPOLL)
12099 #if defined(TARGET_NR_epoll_create)
12100 case TARGET_NR_epoll_create
:
12101 return get_errno(epoll_create(arg1
));
12103 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12104 case TARGET_NR_epoll_create1
:
12105 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12107 #if defined(TARGET_NR_epoll_ctl)
12108 case TARGET_NR_epoll_ctl
:
12110 struct epoll_event ep
;
12111 struct epoll_event
*epp
= 0;
12113 struct target_epoll_event
*target_ep
;
12114 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12115 return -TARGET_EFAULT
;
12117 ep
.events
= tswap32(target_ep
->events
);
12118 /* The epoll_data_t union is just opaque data to the kernel,
12119 * so we transfer all 64 bits across and need not worry what
12120 * actual data type it is.
12122 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12123 unlock_user_struct(target_ep
, arg4
, 0);
12126 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12130 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12131 #if defined(TARGET_NR_epoll_wait)
12132 case TARGET_NR_epoll_wait
:
12134 #if defined(TARGET_NR_epoll_pwait)
12135 case TARGET_NR_epoll_pwait
:
12138 struct target_epoll_event
*target_ep
;
12139 struct epoll_event
*ep
;
12141 int maxevents
= arg3
;
12142 int timeout
= arg4
;
12144 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12145 return -TARGET_EINVAL
;
12148 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12149 maxevents
* sizeof(struct target_epoll_event
), 1);
12151 return -TARGET_EFAULT
;
12154 ep
= g_try_new(struct epoll_event
, maxevents
);
12156 unlock_user(target_ep
, arg2
, 0);
12157 return -TARGET_ENOMEM
;
12161 #if defined(TARGET_NR_epoll_pwait)
12162 case TARGET_NR_epoll_pwait
:
12164 target_sigset_t
*target_set
;
12165 sigset_t _set
, *set
= &_set
;
12168 if (arg6
!= sizeof(target_sigset_t
)) {
12169 ret
= -TARGET_EINVAL
;
12173 target_set
= lock_user(VERIFY_READ
, arg5
,
12174 sizeof(target_sigset_t
), 1);
12176 ret
= -TARGET_EFAULT
;
12179 target_to_host_sigset(set
, target_set
);
12180 unlock_user(target_set
, arg5
, 0);
12185 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12186 set
, SIGSET_T_SIZE
));
12190 #if defined(TARGET_NR_epoll_wait)
12191 case TARGET_NR_epoll_wait
:
12192 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12197 ret
= -TARGET_ENOSYS
;
12199 if (!is_error(ret
)) {
12201 for (i
= 0; i
< ret
; i
++) {
12202 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12203 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12205 unlock_user(target_ep
, arg2
,
12206 ret
* sizeof(struct target_epoll_event
));
12208 unlock_user(target_ep
, arg2
, 0);
12215 #ifdef TARGET_NR_prlimit64
12216 case TARGET_NR_prlimit64
:
12218 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12219 struct target_rlimit64
*target_rnew
, *target_rold
;
12220 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12221 int resource
= target_to_host_resource(arg2
);
12223 if (arg3
&& (resource
!= RLIMIT_AS
&&
12224 resource
!= RLIMIT_DATA
&&
12225 resource
!= RLIMIT_STACK
)) {
12226 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12227 return -TARGET_EFAULT
;
12229 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12230 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12231 unlock_user_struct(target_rnew
, arg3
, 0);
12235 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12236 if (!is_error(ret
) && arg4
) {
12237 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12238 return -TARGET_EFAULT
;
12240 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12241 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12242 unlock_user_struct(target_rold
, arg4
, 1);
12247 #ifdef TARGET_NR_gethostname
12248 case TARGET_NR_gethostname
:
12250 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12252 ret
= get_errno(gethostname(name
, arg2
));
12253 unlock_user(name
, arg1
, arg2
);
12255 ret
= -TARGET_EFAULT
;
12260 #ifdef TARGET_NR_atomic_cmpxchg_32
12261 case TARGET_NR_atomic_cmpxchg_32
:
12263 /* should use start_exclusive from main.c */
12264 abi_ulong mem_value
;
12265 if (get_user_u32(mem_value
, arg6
)) {
12266 target_siginfo_t info
;
12267 info
.si_signo
= SIGSEGV
;
12269 info
.si_code
= TARGET_SEGV_MAPERR
;
12270 info
._sifields
._sigfault
._addr
= arg6
;
12271 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12272 QEMU_SI_FAULT
, &info
);
12276 if (mem_value
== arg2
)
12277 put_user_u32(arg1
, arg6
);
12281 #ifdef TARGET_NR_atomic_barrier
12282 case TARGET_NR_atomic_barrier
:
12283 /* Like the kernel implementation and the
12284 qemu arm barrier, no-op this? */
12288 #ifdef TARGET_NR_timer_create
12289 case TARGET_NR_timer_create
:
12291 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12293 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12296 int timer_index
= next_free_host_timer();
12298 if (timer_index
< 0) {
12299 ret
= -TARGET_EAGAIN
;
12301 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12304 phost_sevp
= &host_sevp
;
12305 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12311 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12315 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12316 return -TARGET_EFAULT
;
12324 #ifdef TARGET_NR_timer_settime
12325 case TARGET_NR_timer_settime
:
12327 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12328 * struct itimerspec * old_value */
12329 target_timer_t timerid
= get_timer_id(arg1
);
12333 } else if (arg3
== 0) {
12334 ret
= -TARGET_EINVAL
;
12336 timer_t htimer
= g_posix_timers
[timerid
];
12337 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12339 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12340 return -TARGET_EFAULT
;
12343 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12344 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12345 return -TARGET_EFAULT
;
12352 #ifdef TARGET_NR_timer_gettime
12353 case TARGET_NR_timer_gettime
:
12355 /* args: timer_t timerid, struct itimerspec *curr_value */
12356 target_timer_t timerid
= get_timer_id(arg1
);
12360 } else if (!arg2
) {
12361 ret
= -TARGET_EFAULT
;
12363 timer_t htimer
= g_posix_timers
[timerid
];
12364 struct itimerspec hspec
;
12365 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12367 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12368 ret
= -TARGET_EFAULT
;
12375 #ifdef TARGET_NR_timer_getoverrun
12376 case TARGET_NR_timer_getoverrun
:
12378 /* args: timer_t timerid */
12379 target_timer_t timerid
= get_timer_id(arg1
);
12384 timer_t htimer
= g_posix_timers
[timerid
];
12385 ret
= get_errno(timer_getoverrun(htimer
));
12391 #ifdef TARGET_NR_timer_delete
12392 case TARGET_NR_timer_delete
:
12394 /* args: timer_t timerid */
12395 target_timer_t timerid
= get_timer_id(arg1
);
12400 timer_t htimer
= g_posix_timers
[timerid
];
12401 ret
= get_errno(timer_delete(htimer
));
12402 g_posix_timers
[timerid
] = 0;
12408 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12409 case TARGET_NR_timerfd_create
:
12410 return get_errno(timerfd_create(arg1
,
12411 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12414 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12415 case TARGET_NR_timerfd_gettime
:
12417 struct itimerspec its_curr
;
12419 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12421 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12422 return -TARGET_EFAULT
;
12428 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12429 case TARGET_NR_timerfd_settime
:
12431 struct itimerspec its_new
, its_old
, *p_new
;
12434 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12435 return -TARGET_EFAULT
;
12442 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12444 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12445 return -TARGET_EFAULT
;
12451 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12452 case TARGET_NR_ioprio_get
:
12453 return get_errno(ioprio_get(arg1
, arg2
));
12456 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12457 case TARGET_NR_ioprio_set
:
12458 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
12461 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12462 case TARGET_NR_setns
:
12463 return get_errno(setns(arg1
, arg2
));
12465 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12466 case TARGET_NR_unshare
:
12467 return get_errno(unshare(arg1
));
12469 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12470 case TARGET_NR_kcmp
:
12471 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12473 #ifdef TARGET_NR_swapcontext
12474 case TARGET_NR_swapcontext
:
12475 /* PowerPC specific. */
12476 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
12478 #ifdef TARGET_NR_memfd_create
12479 case TARGET_NR_memfd_create
:
12480 p
= lock_user_string(arg1
);
12482 return -TARGET_EFAULT
;
12484 ret
= get_errno(memfd_create(p
, arg2
));
12485 fd_trans_unregister(ret
);
12486 unlock_user(p
, arg1
, 0);
12489 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12490 case TARGET_NR_membarrier
:
12491 return get_errno(membarrier(arg1
, arg2
));
12495 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
12496 return -TARGET_ENOSYS
;
12501 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
12502 abi_long arg2
, abi_long arg3
, abi_long arg4
,
12503 abi_long arg5
, abi_long arg6
, abi_long arg7
,
12506 CPUState
*cpu
= env_cpu(cpu_env
);
12509 #ifdef DEBUG_ERESTARTSYS
12510 /* Debug-only code for exercising the syscall-restart code paths
12511 * in the per-architecture cpu main loops: restart every syscall
12512 * the guest makes once before letting it through.
12518 return -TARGET_ERESTARTSYS
;
12523 record_syscall_start(cpu
, num
, arg1
,
12524 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
12526 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
12527 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12530 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
12531 arg5
, arg6
, arg7
, arg8
);
12533 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
12534 print_syscall_ret(num
, ret
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12537 record_syscall_return(cpu
, num
, ret
);