]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/syscall.c
hw/i2c: Document the I2C qdev helpers
[mirror_qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
79
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #ifdef HAVE_DRM_H
116 #include <libdrm/drm.h>
117 #endif
118 #include "linux_loop.h"
119 #include "uname.h"
120
121 #include "qemu.h"
122 #include "qemu/guest-random.h"
123 #include "qemu/selfmap.h"
124 #include "user/syscall-trace.h"
125 #include "qapi/error.h"
126 #include "fd-trans.h"
127 #include "tcg/tcg.h"
128
129 #ifndef CLONE_IO
130 #define CLONE_IO 0x80000000 /* Clone io context */
131 #endif
132
133 /* We can't directly call the host clone syscall, because this will
134 * badly confuse libc (breaking mutexes, for example). So we must
135 * divide clone flags into:
136 * * flag combinations that look like pthread_create()
137 * * flag combinations that look like fork()
138 * * flags we can implement within QEMU itself
139 * * flags we can't support and will return an error for
140 */
141 /* For thread creation, all these flags must be present; for
142 * fork, none must be present.
143 */
144 #define CLONE_THREAD_FLAGS \
145 (CLONE_VM | CLONE_FS | CLONE_FILES | \
146 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
147
148 /* These flags are ignored:
149 * CLONE_DETACHED is now ignored by the kernel;
150 * CLONE_IO is just an optimisation hint to the I/O scheduler
151 */
152 #define CLONE_IGNORED_FLAGS \
153 (CLONE_DETACHED | CLONE_IO)
154
155 /* Flags for fork which we can implement within QEMU itself */
156 #define CLONE_OPTIONAL_FORK_FLAGS \
157 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
158 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
159
160 /* Flags for thread creation which we can implement within QEMU itself */
161 #define CLONE_OPTIONAL_THREAD_FLAGS \
162 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
163 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
164
165 #define CLONE_INVALID_FORK_FLAGS \
166 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
167
168 #define CLONE_INVALID_THREAD_FLAGS \
169 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
170 CLONE_IGNORED_FLAGS))
171
172 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
173 * have almost all been allocated. We cannot support any of
174 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
175 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
176 * The checks against the invalid thread masks above will catch these.
177 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
178 */
179
180 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
181 * once. This exercises the codepaths for restart.
182 */
183 //#define DEBUG_ERESTARTSYS
184
185 //#include <linux/msdos_fs.h>
186 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
187 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
188
189 #undef _syscall0
190 #undef _syscall1
191 #undef _syscall2
192 #undef _syscall3
193 #undef _syscall4
194 #undef _syscall5
195 #undef _syscall6
196
197 #define _syscall0(type,name) \
198 static type name (void) \
199 { \
200 return syscall(__NR_##name); \
201 }
202
203 #define _syscall1(type,name,type1,arg1) \
204 static type name (type1 arg1) \
205 { \
206 return syscall(__NR_##name, arg1); \
207 }
208
209 #define _syscall2(type,name,type1,arg1,type2,arg2) \
210 static type name (type1 arg1,type2 arg2) \
211 { \
212 return syscall(__NR_##name, arg1, arg2); \
213 }
214
215 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
216 static type name (type1 arg1,type2 arg2,type3 arg3) \
217 { \
218 return syscall(__NR_##name, arg1, arg2, arg3); \
219 }
220
221 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
223 { \
224 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
225 }
226
227 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
228 type5,arg5) \
229 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
230 { \
231 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
232 }
233
234
235 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
236 type5,arg5,type6,arg6) \
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
238 type6 arg6) \
239 { \
240 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
241 }
242
243
244 #define __NR_sys_uname __NR_uname
245 #define __NR_sys_getcwd1 __NR_getcwd
246 #define __NR_sys_getdents __NR_getdents
247 #define __NR_sys_getdents64 __NR_getdents64
248 #define __NR_sys_getpriority __NR_getpriority
249 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
250 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
251 #define __NR_sys_syslog __NR_syslog
252 #if defined(__NR_futex)
253 # define __NR_sys_futex __NR_futex
254 #endif
255 #if defined(__NR_futex_time64)
256 # define __NR_sys_futex_time64 __NR_futex_time64
257 #endif
258 #define __NR_sys_inotify_init __NR_inotify_init
259 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
260 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
261 #define __NR_sys_statx __NR_statx
262
263 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
264 #define __NR__llseek __NR_lseek
265 #endif
266
267 /* Newer kernel ports have llseek() instead of _llseek() */
268 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
269 #define TARGET_NR__llseek TARGET_NR_llseek
270 #endif
271
272 #define __NR_sys_gettid __NR_gettid
273 _syscall0(int, sys_gettid)
274
275 /* For the 64-bit guest on 32-bit host case we must emulate
276 * getdents using getdents64, because otherwise the host
277 * might hand us back more dirent records than we can fit
278 * into the guest buffer after structure format conversion.
279 * Otherwise we emulate getdents with getdents if the host has it.
280 */
281 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
282 #define EMULATE_GETDENTS_WITH_GETDENTS
283 #endif
284
285 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
286 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
287 #endif
288 #if (defined(TARGET_NR_getdents) && \
289 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
290 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
291 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
292 #endif
293 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
294 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
295 loff_t *, res, uint, wh);
296 #endif
297 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
298 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
299 siginfo_t *, uinfo)
300 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
301 #ifdef __NR_exit_group
302 _syscall1(int,exit_group,int,error_code)
303 #endif
304 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
305 _syscall1(int,set_tid_address,int *,tidptr)
306 #endif
307 #if defined(__NR_futex)
308 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
309 const struct timespec *,timeout,int *,uaddr2,int,val3)
310 #endif
311 #if defined(__NR_futex_time64)
312 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
313 const struct timespec *,timeout,int *,uaddr2,int,val3)
314 #endif
315 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
316 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
317 unsigned long *, user_mask_ptr);
318 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
319 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
320 unsigned long *, user_mask_ptr);
321 #define __NR_sys_getcpu __NR_getcpu
322 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
323 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
324 void *, arg);
325 _syscall2(int, capget, struct __user_cap_header_struct *, header,
326 struct __user_cap_data_struct *, data);
327 _syscall2(int, capset, struct __user_cap_header_struct *, header,
328 struct __user_cap_data_struct *, data);
329 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
330 _syscall2(int, ioprio_get, int, which, int, who)
331 #endif
332 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
333 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
334 #endif
335 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
336 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
337 #endif
338
339 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
340 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
341 unsigned long, idx1, unsigned long, idx2)
342 #endif
343
344 /*
345 * It is assumed that struct statx is architecture independent.
346 */
347 #if defined(TARGET_NR_statx) && defined(__NR_statx)
348 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
349 unsigned int, mask, struct target_statx *, statxbuf)
350 #endif
351 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
352 _syscall2(int, membarrier, int, cmd, int, flags)
353 #endif
354
355 static bitmask_transtbl fcntl_flags_tbl[] = {
356 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
357 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
358 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
359 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
360 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
361 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
362 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
363 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
364 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
365 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
366 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
367 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
368 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
369 #if defined(O_DIRECT)
370 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
371 #endif
372 #if defined(O_NOATIME)
373 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
374 #endif
375 #if defined(O_CLOEXEC)
376 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
377 #endif
378 #if defined(O_PATH)
379 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
380 #endif
381 #if defined(O_TMPFILE)
382 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
383 #endif
384 /* Don't terminate the list prematurely on 64-bit host+guest. */
385 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
386 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
387 #endif
388 { 0, 0, 0, 0 }
389 };
390
391 static int sys_getcwd1(char *buf, size_t size)
392 {
393 if (getcwd(buf, size) == NULL) {
394 /* getcwd() sets errno */
395 return (-1);
396 }
397 return strlen(buf)+1;
398 }
399
400 #ifdef TARGET_NR_utimensat
401 #if defined(__NR_utimensat)
402 #define __NR_sys_utimensat __NR_utimensat
403 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
404 const struct timespec *,tsp,int,flags)
405 #else
406 static int sys_utimensat(int dirfd, const char *pathname,
407 const struct timespec times[2], int flags)
408 {
409 errno = ENOSYS;
410 return -1;
411 }
412 #endif
413 #endif /* TARGET_NR_utimensat */
414
415 #ifdef TARGET_NR_renameat2
416 #if defined(__NR_renameat2)
417 #define __NR_sys_renameat2 __NR_renameat2
418 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
419 const char *, new, unsigned int, flags)
420 #else
421 static int sys_renameat2(int oldfd, const char *old,
422 int newfd, const char *new, int flags)
423 {
424 if (flags == 0) {
425 return renameat(oldfd, old, newfd, new);
426 }
427 errno = ENOSYS;
428 return -1;
429 }
430 #endif
431 #endif /* TARGET_NR_renameat2 */
432
433 #ifdef CONFIG_INOTIFY
434 #include <sys/inotify.h>
435
436 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
437 static int sys_inotify_init(void)
438 {
439 return (inotify_init());
440 }
441 #endif
442 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
443 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
444 {
445 return (inotify_add_watch(fd, pathname, mask));
446 }
447 #endif
448 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
449 static int sys_inotify_rm_watch(int fd, int32_t wd)
450 {
451 return (inotify_rm_watch(fd, wd));
452 }
453 #endif
454 #ifdef CONFIG_INOTIFY1
455 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
456 static int sys_inotify_init1(int flags)
457 {
458 return (inotify_init1(flags));
459 }
460 #endif
461 #endif
462 #else
463 /* Userspace can usually survive runtime without inotify */
464 #undef TARGET_NR_inotify_init
465 #undef TARGET_NR_inotify_init1
466 #undef TARGET_NR_inotify_add_watch
467 #undef TARGET_NR_inotify_rm_watch
468 #endif /* CONFIG_INOTIFY */
469
470 #if defined(TARGET_NR_prlimit64)
471 #ifndef __NR_prlimit64
472 # define __NR_prlimit64 -1
473 #endif
474 #define __NR_sys_prlimit64 __NR_prlimit64
475 /* The glibc rlimit structure may not be that used by the underlying syscall */
476 struct host_rlimit64 {
477 uint64_t rlim_cur;
478 uint64_t rlim_max;
479 };
480 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
481 const struct host_rlimit64 *, new_limit,
482 struct host_rlimit64 *, old_limit)
483 #endif
484
485
486 #if defined(TARGET_NR_timer_create)
487 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
488 static timer_t g_posix_timers[32] = { 0, } ;
489
490 static inline int next_free_host_timer(void)
491 {
492 int k ;
493 /* FIXME: Does finding the next free slot require a lock? */
494 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
495 if (g_posix_timers[k] == 0) {
496 g_posix_timers[k] = (timer_t) 1;
497 return k;
498 }
499 }
500 return -1;
501 }
502 #endif
503
504 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
505 #ifdef TARGET_ARM
506 static inline int regpairs_aligned(void *cpu_env, int num)
507 {
508 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
509 }
510 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
511 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
512 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
513 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
514 * of registers which translates to the same as ARM/MIPS, because we start with
515 * r3 as arg1 */
516 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
517 #elif defined(TARGET_SH4)
518 /* SH4 doesn't align register pairs, except for p{read,write}64 */
519 static inline int regpairs_aligned(void *cpu_env, int num)
520 {
521 switch (num) {
522 case TARGET_NR_pread64:
523 case TARGET_NR_pwrite64:
524 return 1;
525
526 default:
527 return 0;
528 }
529 }
530 #elif defined(TARGET_XTENSA)
531 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
532 #else
533 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
534 #endif
535
536 #define ERRNO_TABLE_SIZE 1200
537
538 /* target_to_host_errno_table[] is initialized from
539 * host_to_target_errno_table[] in syscall_init(). */
540 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
541 };
542
543 /*
544 * This list is the union of errno values overridden in asm-<arch>/errno.h
545 * minus the errnos that are not actually generic to all archs.
546 */
547 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
548 [EAGAIN] = TARGET_EAGAIN,
549 [EIDRM] = TARGET_EIDRM,
550 [ECHRNG] = TARGET_ECHRNG,
551 [EL2NSYNC] = TARGET_EL2NSYNC,
552 [EL3HLT] = TARGET_EL3HLT,
553 [EL3RST] = TARGET_EL3RST,
554 [ELNRNG] = TARGET_ELNRNG,
555 [EUNATCH] = TARGET_EUNATCH,
556 [ENOCSI] = TARGET_ENOCSI,
557 [EL2HLT] = TARGET_EL2HLT,
558 [EDEADLK] = TARGET_EDEADLK,
559 [ENOLCK] = TARGET_ENOLCK,
560 [EBADE] = TARGET_EBADE,
561 [EBADR] = TARGET_EBADR,
562 [EXFULL] = TARGET_EXFULL,
563 [ENOANO] = TARGET_ENOANO,
564 [EBADRQC] = TARGET_EBADRQC,
565 [EBADSLT] = TARGET_EBADSLT,
566 [EBFONT] = TARGET_EBFONT,
567 [ENOSTR] = TARGET_ENOSTR,
568 [ENODATA] = TARGET_ENODATA,
569 [ETIME] = TARGET_ETIME,
570 [ENOSR] = TARGET_ENOSR,
571 [ENONET] = TARGET_ENONET,
572 [ENOPKG] = TARGET_ENOPKG,
573 [EREMOTE] = TARGET_EREMOTE,
574 [ENOLINK] = TARGET_ENOLINK,
575 [EADV] = TARGET_EADV,
576 [ESRMNT] = TARGET_ESRMNT,
577 [ECOMM] = TARGET_ECOMM,
578 [EPROTO] = TARGET_EPROTO,
579 [EDOTDOT] = TARGET_EDOTDOT,
580 [EMULTIHOP] = TARGET_EMULTIHOP,
581 [EBADMSG] = TARGET_EBADMSG,
582 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
583 [EOVERFLOW] = TARGET_EOVERFLOW,
584 [ENOTUNIQ] = TARGET_ENOTUNIQ,
585 [EBADFD] = TARGET_EBADFD,
586 [EREMCHG] = TARGET_EREMCHG,
587 [ELIBACC] = TARGET_ELIBACC,
588 [ELIBBAD] = TARGET_ELIBBAD,
589 [ELIBSCN] = TARGET_ELIBSCN,
590 [ELIBMAX] = TARGET_ELIBMAX,
591 [ELIBEXEC] = TARGET_ELIBEXEC,
592 [EILSEQ] = TARGET_EILSEQ,
593 [ENOSYS] = TARGET_ENOSYS,
594 [ELOOP] = TARGET_ELOOP,
595 [ERESTART] = TARGET_ERESTART,
596 [ESTRPIPE] = TARGET_ESTRPIPE,
597 [ENOTEMPTY] = TARGET_ENOTEMPTY,
598 [EUSERS] = TARGET_EUSERS,
599 [ENOTSOCK] = TARGET_ENOTSOCK,
600 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
601 [EMSGSIZE] = TARGET_EMSGSIZE,
602 [EPROTOTYPE] = TARGET_EPROTOTYPE,
603 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
604 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
605 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
606 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
607 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
608 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
609 [EADDRINUSE] = TARGET_EADDRINUSE,
610 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
611 [ENETDOWN] = TARGET_ENETDOWN,
612 [ENETUNREACH] = TARGET_ENETUNREACH,
613 [ENETRESET] = TARGET_ENETRESET,
614 [ECONNABORTED] = TARGET_ECONNABORTED,
615 [ECONNRESET] = TARGET_ECONNRESET,
616 [ENOBUFS] = TARGET_ENOBUFS,
617 [EISCONN] = TARGET_EISCONN,
618 [ENOTCONN] = TARGET_ENOTCONN,
619 [EUCLEAN] = TARGET_EUCLEAN,
620 [ENOTNAM] = TARGET_ENOTNAM,
621 [ENAVAIL] = TARGET_ENAVAIL,
622 [EISNAM] = TARGET_EISNAM,
623 [EREMOTEIO] = TARGET_EREMOTEIO,
624 [EDQUOT] = TARGET_EDQUOT,
625 [ESHUTDOWN] = TARGET_ESHUTDOWN,
626 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
627 [ETIMEDOUT] = TARGET_ETIMEDOUT,
628 [ECONNREFUSED] = TARGET_ECONNREFUSED,
629 [EHOSTDOWN] = TARGET_EHOSTDOWN,
630 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
631 [EALREADY] = TARGET_EALREADY,
632 [EINPROGRESS] = TARGET_EINPROGRESS,
633 [ESTALE] = TARGET_ESTALE,
634 [ECANCELED] = TARGET_ECANCELED,
635 [ENOMEDIUM] = TARGET_ENOMEDIUM,
636 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
637 #ifdef ENOKEY
638 [ENOKEY] = TARGET_ENOKEY,
639 #endif
640 #ifdef EKEYEXPIRED
641 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
642 #endif
643 #ifdef EKEYREVOKED
644 [EKEYREVOKED] = TARGET_EKEYREVOKED,
645 #endif
646 #ifdef EKEYREJECTED
647 [EKEYREJECTED] = TARGET_EKEYREJECTED,
648 #endif
649 #ifdef EOWNERDEAD
650 [EOWNERDEAD] = TARGET_EOWNERDEAD,
651 #endif
652 #ifdef ENOTRECOVERABLE
653 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
654 #endif
655 #ifdef ENOMSG
656 [ENOMSG] = TARGET_ENOMSG,
657 #endif
658 #ifdef ERKFILL
659 [ERFKILL] = TARGET_ERFKILL,
660 #endif
661 #ifdef EHWPOISON
662 [EHWPOISON] = TARGET_EHWPOISON,
663 #endif
664 };
665
666 static inline int host_to_target_errno(int err)
667 {
668 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
669 host_to_target_errno_table[err]) {
670 return host_to_target_errno_table[err];
671 }
672 return err;
673 }
674
675 static inline int target_to_host_errno(int err)
676 {
677 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
678 target_to_host_errno_table[err]) {
679 return target_to_host_errno_table[err];
680 }
681 return err;
682 }
683
684 static inline abi_long get_errno(abi_long ret)
685 {
686 if (ret == -1)
687 return -host_to_target_errno(errno);
688 else
689 return ret;
690 }
691
692 const char *target_strerror(int err)
693 {
694 if (err == TARGET_ERESTARTSYS) {
695 return "To be restarted";
696 }
697 if (err == TARGET_QEMU_ESIGRETURN) {
698 return "Successful exit from sigreturn";
699 }
700
701 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
702 return NULL;
703 }
704 return strerror(target_to_host_errno(err));
705 }
706
707 #define safe_syscall0(type, name) \
708 static type safe_##name(void) \
709 { \
710 return safe_syscall(__NR_##name); \
711 }
712
713 #define safe_syscall1(type, name, type1, arg1) \
714 static type safe_##name(type1 arg1) \
715 { \
716 return safe_syscall(__NR_##name, arg1); \
717 }
718
719 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
720 static type safe_##name(type1 arg1, type2 arg2) \
721 { \
722 return safe_syscall(__NR_##name, arg1, arg2); \
723 }
724
725 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
726 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
727 { \
728 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
729 }
730
731 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
732 type4, arg4) \
733 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
734 { \
735 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
736 }
737
738 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
739 type4, arg4, type5, arg5) \
740 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
741 type5 arg5) \
742 { \
743 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
744 }
745
746 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
747 type4, arg4, type5, arg5, type6, arg6) \
748 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
749 type5 arg5, type6 arg6) \
750 { \
751 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
752 }
753
754 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
755 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
756 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
757 int, flags, mode_t, mode)
758 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
759 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
760 struct rusage *, rusage)
761 #endif
762 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
763 int, options, struct rusage *, rusage)
764 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
765 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
766 defined(TARGET_NR_pselect6)
767 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
768 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
769 #endif
770 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
771 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
772 struct timespec *, tsp, const sigset_t *, sigmask,
773 size_t, sigsetsize)
774 #endif
775 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
776 int, maxevents, int, timeout, const sigset_t *, sigmask,
777 size_t, sigsetsize)
778 #if defined(__NR_futex)
779 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
780 const struct timespec *,timeout,int *,uaddr2,int,val3)
781 #endif
782 #if defined(__NR_futex_time64)
783 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
784 const struct timespec *,timeout,int *,uaddr2,int,val3)
785 #endif
786 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
787 safe_syscall2(int, kill, pid_t, pid, int, sig)
788 safe_syscall2(int, tkill, int, tid, int, sig)
789 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
790 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
791 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
792 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
793 unsigned long, pos_l, unsigned long, pos_h)
794 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
795 unsigned long, pos_l, unsigned long, pos_h)
796 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
797 socklen_t, addrlen)
798 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
799 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
800 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
801 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
802 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
803 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
804 safe_syscall2(int, flock, int, fd, int, operation)
805 #ifdef TARGET_NR_rt_sigtimedwait
806 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
807 const struct timespec *, uts, size_t, sigsetsize)
808 #endif
809 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
810 int, flags)
811 #if defined(TARGET_NR_nanosleep)
812 safe_syscall2(int, nanosleep, const struct timespec *, req,
813 struct timespec *, rem)
814 #endif
815 #ifdef TARGET_NR_clock_nanosleep
816 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
817 const struct timespec *, req, struct timespec *, rem)
818 #endif
819 #ifdef __NR_ipc
820 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
821 void *, ptr, long, fifth)
822 #endif
823 #ifdef __NR_msgsnd
824 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
825 int, flags)
826 #endif
827 #ifdef __NR_msgrcv
828 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
829 long, msgtype, int, flags)
830 #endif
831 #ifdef __NR_semtimedop
832 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
833 unsigned, nsops, const struct timespec *, timeout)
834 #endif
835 #ifdef TARGET_NR_mq_timedsend
836 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
837 size_t, len, unsigned, prio, const struct timespec *, timeout)
838 #endif
839 #ifdef TARGET_NR_mq_timedreceive
840 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
841 size_t, len, unsigned *, prio, const struct timespec *, timeout)
842 #endif
843 /* We do ioctl like this rather than via safe_syscall3 to preserve the
844 * "third argument might be integer or pointer or not present" behaviour of
845 * the libc function.
846 */
847 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
848 /* Similarly for fcntl. Note that callers must always:
849 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
850 * use the flock64 struct rather than unsuffixed flock
851 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
852 */
853 #ifdef __NR_fcntl64
854 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
855 #else
856 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
857 #endif
858
859 static inline int host_to_target_sock_type(int host_type)
860 {
861 int target_type;
862
863 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
864 case SOCK_DGRAM:
865 target_type = TARGET_SOCK_DGRAM;
866 break;
867 case SOCK_STREAM:
868 target_type = TARGET_SOCK_STREAM;
869 break;
870 default:
871 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
872 break;
873 }
874
875 #if defined(SOCK_CLOEXEC)
876 if (host_type & SOCK_CLOEXEC) {
877 target_type |= TARGET_SOCK_CLOEXEC;
878 }
879 #endif
880
881 #if defined(SOCK_NONBLOCK)
882 if (host_type & SOCK_NONBLOCK) {
883 target_type |= TARGET_SOCK_NONBLOCK;
884 }
885 #endif
886
887 return target_type;
888 }
889
890 static abi_ulong target_brk;
891 static abi_ulong target_original_brk;
892 static abi_ulong brk_page;
893
894 void target_set_brk(abi_ulong new_brk)
895 {
896 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
897 brk_page = HOST_PAGE_ALIGN(target_brk);
898 }
899
900 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
901 #define DEBUGF_BRK(message, args...)
902
903 /* do_brk() must return target values and target errnos. */
904 abi_long do_brk(abi_ulong new_brk)
905 {
906 abi_long mapped_addr;
907 abi_ulong new_alloc_size;
908
909 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
910
911 if (!new_brk) {
912 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
913 return target_brk;
914 }
915 if (new_brk < target_original_brk) {
916 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
917 target_brk);
918 return target_brk;
919 }
920
921 /* If the new brk is less than the highest page reserved to the
922 * target heap allocation, set it and we're almost done... */
923 if (new_brk <= brk_page) {
924 /* Heap contents are initialized to zero, as for anonymous
925 * mapped pages. */
926 if (new_brk > target_brk) {
927 memset(g2h(target_brk), 0, new_brk - target_brk);
928 }
929 target_brk = new_brk;
930 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
931 return target_brk;
932 }
933
934 /* We need to allocate more memory after the brk... Note that
935 * we don't use MAP_FIXED because that will map over the top of
936 * any existing mapping (like the one with the host libc or qemu
937 * itself); instead we treat "mapped but at wrong address" as
938 * a failure and unmap again.
939 */
940 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
941 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
942 PROT_READ|PROT_WRITE,
943 MAP_ANON|MAP_PRIVATE, 0, 0));
944
945 if (mapped_addr == brk_page) {
946 /* Heap contents are initialized to zero, as for anonymous
947 * mapped pages. Technically the new pages are already
948 * initialized to zero since they *are* anonymous mapped
949 * pages, however we have to take care with the contents that
950 * come from the remaining part of the previous page: it may
951 * contains garbage data due to a previous heap usage (grown
952 * then shrunken). */
953 memset(g2h(target_brk), 0, brk_page - target_brk);
954
955 target_brk = new_brk;
956 brk_page = HOST_PAGE_ALIGN(target_brk);
957 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
958 target_brk);
959 return target_brk;
960 } else if (mapped_addr != -1) {
961 /* Mapped but at wrong address, meaning there wasn't actually
962 * enough space for this brk.
963 */
964 target_munmap(mapped_addr, new_alloc_size);
965 mapped_addr = -1;
966 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
967 }
968 else {
969 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
970 }
971
972 #if defined(TARGET_ALPHA)
973 /* We (partially) emulate OSF/1 on Alpha, which requires we
974 return a proper errno, not an unchanged brk value. */
975 return -TARGET_ENOMEM;
976 #endif
977 /* For everything else, return the previous break. */
978 return target_brk;
979 }
980
981 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
982 defined(TARGET_NR_pselect6)
983 static inline abi_long copy_from_user_fdset(fd_set *fds,
984 abi_ulong target_fds_addr,
985 int n)
986 {
987 int i, nw, j, k;
988 abi_ulong b, *target_fds;
989
990 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
991 if (!(target_fds = lock_user(VERIFY_READ,
992 target_fds_addr,
993 sizeof(abi_ulong) * nw,
994 1)))
995 return -TARGET_EFAULT;
996
997 FD_ZERO(fds);
998 k = 0;
999 for (i = 0; i < nw; i++) {
1000 /* grab the abi_ulong */
1001 __get_user(b, &target_fds[i]);
1002 for (j = 0; j < TARGET_ABI_BITS; j++) {
1003 /* check the bit inside the abi_ulong */
1004 if ((b >> j) & 1)
1005 FD_SET(k, fds);
1006 k++;
1007 }
1008 }
1009
1010 unlock_user(target_fds, target_fds_addr, 0);
1011
1012 return 0;
1013 }
1014
1015 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1016 abi_ulong target_fds_addr,
1017 int n)
1018 {
1019 if (target_fds_addr) {
1020 if (copy_from_user_fdset(fds, target_fds_addr, n))
1021 return -TARGET_EFAULT;
1022 *fds_ptr = fds;
1023 } else {
1024 *fds_ptr = NULL;
1025 }
1026 return 0;
1027 }
1028
1029 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1030 const fd_set *fds,
1031 int n)
1032 {
1033 int i, nw, j, k;
1034 abi_long v;
1035 abi_ulong *target_fds;
1036
1037 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1038 if (!(target_fds = lock_user(VERIFY_WRITE,
1039 target_fds_addr,
1040 sizeof(abi_ulong) * nw,
1041 0)))
1042 return -TARGET_EFAULT;
1043
1044 k = 0;
1045 for (i = 0; i < nw; i++) {
1046 v = 0;
1047 for (j = 0; j < TARGET_ABI_BITS; j++) {
1048 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1049 k++;
1050 }
1051 __put_user(v, &target_fds[i]);
1052 }
1053
1054 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1055
1056 return 0;
1057 }
1058 #endif
1059
1060 #if defined(__alpha__)
1061 #define HOST_HZ 1024
1062 #else
1063 #define HOST_HZ 100
1064 #endif
1065
1066 static inline abi_long host_to_target_clock_t(long ticks)
1067 {
1068 #if HOST_HZ == TARGET_HZ
1069 return ticks;
1070 #else
1071 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1072 #endif
1073 }
1074
1075 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1076 const struct rusage *rusage)
1077 {
1078 struct target_rusage *target_rusage;
1079
1080 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1081 return -TARGET_EFAULT;
1082 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1083 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1084 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1085 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1086 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1087 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1088 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1089 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1090 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1091 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1092 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1093 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1094 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1095 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1096 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1097 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1098 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1099 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1100 unlock_user_struct(target_rusage, target_addr, 1);
1101
1102 return 0;
1103 }
1104
1105 #ifdef TARGET_NR_setrlimit
1106 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1107 {
1108 abi_ulong target_rlim_swap;
1109 rlim_t result;
1110
1111 target_rlim_swap = tswapal(target_rlim);
1112 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1113 return RLIM_INFINITY;
1114
1115 result = target_rlim_swap;
1116 if (target_rlim_swap != (rlim_t)result)
1117 return RLIM_INFINITY;
1118
1119 return result;
1120 }
1121 #endif
1122
1123 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1124 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1125 {
1126 abi_ulong target_rlim_swap;
1127 abi_ulong result;
1128
1129 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1130 target_rlim_swap = TARGET_RLIM_INFINITY;
1131 else
1132 target_rlim_swap = rlim;
1133 result = tswapal(target_rlim_swap);
1134
1135 return result;
1136 }
1137 #endif
1138
1139 static inline int target_to_host_resource(int code)
1140 {
1141 switch (code) {
1142 case TARGET_RLIMIT_AS:
1143 return RLIMIT_AS;
1144 case TARGET_RLIMIT_CORE:
1145 return RLIMIT_CORE;
1146 case TARGET_RLIMIT_CPU:
1147 return RLIMIT_CPU;
1148 case TARGET_RLIMIT_DATA:
1149 return RLIMIT_DATA;
1150 case TARGET_RLIMIT_FSIZE:
1151 return RLIMIT_FSIZE;
1152 case TARGET_RLIMIT_LOCKS:
1153 return RLIMIT_LOCKS;
1154 case TARGET_RLIMIT_MEMLOCK:
1155 return RLIMIT_MEMLOCK;
1156 case TARGET_RLIMIT_MSGQUEUE:
1157 return RLIMIT_MSGQUEUE;
1158 case TARGET_RLIMIT_NICE:
1159 return RLIMIT_NICE;
1160 case TARGET_RLIMIT_NOFILE:
1161 return RLIMIT_NOFILE;
1162 case TARGET_RLIMIT_NPROC:
1163 return RLIMIT_NPROC;
1164 case TARGET_RLIMIT_RSS:
1165 return RLIMIT_RSS;
1166 case TARGET_RLIMIT_RTPRIO:
1167 return RLIMIT_RTPRIO;
1168 case TARGET_RLIMIT_SIGPENDING:
1169 return RLIMIT_SIGPENDING;
1170 case TARGET_RLIMIT_STACK:
1171 return RLIMIT_STACK;
1172 default:
1173 return code;
1174 }
1175 }
1176
1177 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1178 abi_ulong target_tv_addr)
1179 {
1180 struct target_timeval *target_tv;
1181
1182 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1183 return -TARGET_EFAULT;
1184 }
1185
1186 __get_user(tv->tv_sec, &target_tv->tv_sec);
1187 __get_user(tv->tv_usec, &target_tv->tv_usec);
1188
1189 unlock_user_struct(target_tv, target_tv_addr, 0);
1190
1191 return 0;
1192 }
1193
1194 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1195 const struct timeval *tv)
1196 {
1197 struct target_timeval *target_tv;
1198
1199 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1200 return -TARGET_EFAULT;
1201 }
1202
1203 __put_user(tv->tv_sec, &target_tv->tv_sec);
1204 __put_user(tv->tv_usec, &target_tv->tv_usec);
1205
1206 unlock_user_struct(target_tv, target_tv_addr, 1);
1207
1208 return 0;
1209 }
1210
1211 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1212 const struct timeval *tv)
1213 {
1214 struct target__kernel_sock_timeval *target_tv;
1215
1216 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1217 return -TARGET_EFAULT;
1218 }
1219
1220 __put_user(tv->tv_sec, &target_tv->tv_sec);
1221 __put_user(tv->tv_usec, &target_tv->tv_usec);
1222
1223 unlock_user_struct(target_tv, target_tv_addr, 1);
1224
1225 return 0;
1226 }
1227
1228 #if defined(TARGET_NR_futex) || \
1229 defined(TARGET_NR_rt_sigtimedwait) || \
1230 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1231 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1232 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1233 defined(TARGET_NR_mq_timedreceive)
1234 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1235 abi_ulong target_addr)
1236 {
1237 struct target_timespec *target_ts;
1238
1239 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1240 return -TARGET_EFAULT;
1241 }
1242 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1243 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1244 unlock_user_struct(target_ts, target_addr, 0);
1245 return 0;
1246 }
1247 #endif
1248
1249 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64)
1250 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1251 abi_ulong target_addr)
1252 {
1253 struct target__kernel_timespec *target_ts;
1254
1255 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1256 return -TARGET_EFAULT;
1257 }
1258 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1259 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1260 unlock_user_struct(target_ts, target_addr, 0);
1261 return 0;
1262 }
1263 #endif
1264
1265 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1266 struct timespec *host_ts)
1267 {
1268 struct target_timespec *target_ts;
1269
1270 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1271 return -TARGET_EFAULT;
1272 }
1273 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1274 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1275 unlock_user_struct(target_ts, target_addr, 1);
1276 return 0;
1277 }
1278
1279 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1280 struct timespec *host_ts)
1281 {
1282 struct target__kernel_timespec *target_ts;
1283
1284 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1285 return -TARGET_EFAULT;
1286 }
1287 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1288 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1289 unlock_user_struct(target_ts, target_addr, 1);
1290 return 0;
1291 }
1292
1293 #if defined(TARGET_NR_gettimeofday)
1294 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1295 struct timezone *tz)
1296 {
1297 struct target_timezone *target_tz;
1298
1299 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1300 return -TARGET_EFAULT;
1301 }
1302
1303 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1304 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1305
1306 unlock_user_struct(target_tz, target_tz_addr, 1);
1307
1308 return 0;
1309 }
1310 #endif
1311
1312 #if defined(TARGET_NR_settimeofday)
1313 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1314 abi_ulong target_tz_addr)
1315 {
1316 struct target_timezone *target_tz;
1317
1318 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1319 return -TARGET_EFAULT;
1320 }
1321
1322 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1323 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1324
1325 unlock_user_struct(target_tz, target_tz_addr, 0);
1326
1327 return 0;
1328 }
1329 #endif
1330
1331 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1332 #include <mqueue.h>
1333
1334 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1335 abi_ulong target_mq_attr_addr)
1336 {
1337 struct target_mq_attr *target_mq_attr;
1338
1339 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1340 target_mq_attr_addr, 1))
1341 return -TARGET_EFAULT;
1342
1343 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1344 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1345 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1346 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1347
1348 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1349
1350 return 0;
1351 }
1352
1353 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1354 const struct mq_attr *attr)
1355 {
1356 struct target_mq_attr *target_mq_attr;
1357
1358 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1359 target_mq_attr_addr, 0))
1360 return -TARGET_EFAULT;
1361
1362 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1363 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1364 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1365 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1366
1367 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1368
1369 return 0;
1370 }
1371 #endif
1372
1373 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1374 /* do_select() must return target values and target errnos. */
1375 static abi_long do_select(int n,
1376 abi_ulong rfd_addr, abi_ulong wfd_addr,
1377 abi_ulong efd_addr, abi_ulong target_tv_addr)
1378 {
1379 fd_set rfds, wfds, efds;
1380 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1381 struct timeval tv;
1382 struct timespec ts, *ts_ptr;
1383 abi_long ret;
1384
1385 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1386 if (ret) {
1387 return ret;
1388 }
1389 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1390 if (ret) {
1391 return ret;
1392 }
1393 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1394 if (ret) {
1395 return ret;
1396 }
1397
1398 if (target_tv_addr) {
1399 if (copy_from_user_timeval(&tv, target_tv_addr))
1400 return -TARGET_EFAULT;
1401 ts.tv_sec = tv.tv_sec;
1402 ts.tv_nsec = tv.tv_usec * 1000;
1403 ts_ptr = &ts;
1404 } else {
1405 ts_ptr = NULL;
1406 }
1407
1408 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1409 ts_ptr, NULL));
1410
1411 if (!is_error(ret)) {
1412 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1413 return -TARGET_EFAULT;
1414 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1415 return -TARGET_EFAULT;
1416 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1417 return -TARGET_EFAULT;
1418
1419 if (target_tv_addr) {
1420 tv.tv_sec = ts.tv_sec;
1421 tv.tv_usec = ts.tv_nsec / 1000;
1422 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1423 return -TARGET_EFAULT;
1424 }
1425 }
1426 }
1427
1428 return ret;
1429 }
1430
1431 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1432 static abi_long do_old_select(abi_ulong arg1)
1433 {
1434 struct target_sel_arg_struct *sel;
1435 abi_ulong inp, outp, exp, tvp;
1436 long nsel;
1437
1438 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1439 return -TARGET_EFAULT;
1440 }
1441
1442 nsel = tswapal(sel->n);
1443 inp = tswapal(sel->inp);
1444 outp = tswapal(sel->outp);
1445 exp = tswapal(sel->exp);
1446 tvp = tswapal(sel->tvp);
1447
1448 unlock_user_struct(sel, arg1, 0);
1449
1450 return do_select(nsel, inp, outp, exp, tvp);
1451 }
1452 #endif
1453 #endif
1454
1455 static abi_long do_pipe2(int host_pipe[], int flags)
1456 {
1457 #ifdef CONFIG_PIPE2
1458 return pipe2(host_pipe, flags);
1459 #else
1460 return -ENOSYS;
1461 #endif
1462 }
1463
1464 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1465 int flags, int is_pipe2)
1466 {
1467 int host_pipe[2];
1468 abi_long ret;
1469 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1470
1471 if (is_error(ret))
1472 return get_errno(ret);
1473
1474 /* Several targets have special calling conventions for the original
1475 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1476 if (!is_pipe2) {
1477 #if defined(TARGET_ALPHA)
1478 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1479 return host_pipe[0];
1480 #elif defined(TARGET_MIPS)
1481 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1482 return host_pipe[0];
1483 #elif defined(TARGET_SH4)
1484 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1485 return host_pipe[0];
1486 #elif defined(TARGET_SPARC)
1487 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1488 return host_pipe[0];
1489 #endif
1490 }
1491
1492 if (put_user_s32(host_pipe[0], pipedes)
1493 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1494 return -TARGET_EFAULT;
1495 return get_errno(ret);
1496 }
1497
1498 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1499 abi_ulong target_addr,
1500 socklen_t len)
1501 {
1502 struct target_ip_mreqn *target_smreqn;
1503
1504 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1505 if (!target_smreqn)
1506 return -TARGET_EFAULT;
1507 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1508 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1509 if (len == sizeof(struct target_ip_mreqn))
1510 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1511 unlock_user(target_smreqn, target_addr, 0);
1512
1513 return 0;
1514 }
1515
1516 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1517 abi_ulong target_addr,
1518 socklen_t len)
1519 {
1520 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1521 sa_family_t sa_family;
1522 struct target_sockaddr *target_saddr;
1523
1524 if (fd_trans_target_to_host_addr(fd)) {
1525 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1526 }
1527
1528 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1529 if (!target_saddr)
1530 return -TARGET_EFAULT;
1531
1532 sa_family = tswap16(target_saddr->sa_family);
1533
1534 /* Oops. The caller might send a incomplete sun_path; sun_path
1535 * must be terminated by \0 (see the manual page), but
1536 * unfortunately it is quite common to specify sockaddr_un
1537 * length as "strlen(x->sun_path)" while it should be
1538 * "strlen(...) + 1". We'll fix that here if needed.
1539 * Linux kernel has a similar feature.
1540 */
1541
1542 if (sa_family == AF_UNIX) {
1543 if (len < unix_maxlen && len > 0) {
1544 char *cp = (char*)target_saddr;
1545
1546 if ( cp[len-1] && !cp[len] )
1547 len++;
1548 }
1549 if (len > unix_maxlen)
1550 len = unix_maxlen;
1551 }
1552
1553 memcpy(addr, target_saddr, len);
1554 addr->sa_family = sa_family;
1555 if (sa_family == AF_NETLINK) {
1556 struct sockaddr_nl *nladdr;
1557
1558 nladdr = (struct sockaddr_nl *)addr;
1559 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1560 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1561 } else if (sa_family == AF_PACKET) {
1562 struct target_sockaddr_ll *lladdr;
1563
1564 lladdr = (struct target_sockaddr_ll *)addr;
1565 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1566 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1567 }
1568 unlock_user(target_saddr, target_addr, 0);
1569
1570 return 0;
1571 }
1572
1573 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1574 struct sockaddr *addr,
1575 socklen_t len)
1576 {
1577 struct target_sockaddr *target_saddr;
1578
1579 if (len == 0) {
1580 return 0;
1581 }
1582 assert(addr);
1583
1584 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1585 if (!target_saddr)
1586 return -TARGET_EFAULT;
1587 memcpy(target_saddr, addr, len);
1588 if (len >= offsetof(struct target_sockaddr, sa_family) +
1589 sizeof(target_saddr->sa_family)) {
1590 target_saddr->sa_family = tswap16(addr->sa_family);
1591 }
1592 if (addr->sa_family == AF_NETLINK &&
1593 len >= sizeof(struct target_sockaddr_nl)) {
1594 struct target_sockaddr_nl *target_nl =
1595 (struct target_sockaddr_nl *)target_saddr;
1596 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1597 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1598 } else if (addr->sa_family == AF_PACKET) {
1599 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1600 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1601 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1602 } else if (addr->sa_family == AF_INET6 &&
1603 len >= sizeof(struct target_sockaddr_in6)) {
1604 struct target_sockaddr_in6 *target_in6 =
1605 (struct target_sockaddr_in6 *)target_saddr;
1606 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1607 }
1608 unlock_user(target_saddr, target_addr, len);
1609
1610 return 0;
1611 }
1612
1613 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1614 struct target_msghdr *target_msgh)
1615 {
1616 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1617 abi_long msg_controllen;
1618 abi_ulong target_cmsg_addr;
1619 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1620 socklen_t space = 0;
1621
1622 msg_controllen = tswapal(target_msgh->msg_controllen);
1623 if (msg_controllen < sizeof (struct target_cmsghdr))
1624 goto the_end;
1625 target_cmsg_addr = tswapal(target_msgh->msg_control);
1626 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1627 target_cmsg_start = target_cmsg;
1628 if (!target_cmsg)
1629 return -TARGET_EFAULT;
1630
1631 while (cmsg && target_cmsg) {
1632 void *data = CMSG_DATA(cmsg);
1633 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1634
1635 int len = tswapal(target_cmsg->cmsg_len)
1636 - sizeof(struct target_cmsghdr);
1637
1638 space += CMSG_SPACE(len);
1639 if (space > msgh->msg_controllen) {
1640 space -= CMSG_SPACE(len);
1641 /* This is a QEMU bug, since we allocated the payload
1642 * area ourselves (unlike overflow in host-to-target
1643 * conversion, which is just the guest giving us a buffer
1644 * that's too small). It can't happen for the payload types
1645 * we currently support; if it becomes an issue in future
1646 * we would need to improve our allocation strategy to
1647 * something more intelligent than "twice the size of the
1648 * target buffer we're reading from".
1649 */
1650 qemu_log_mask(LOG_UNIMP,
1651 ("Unsupported ancillary data %d/%d: "
1652 "unhandled msg size\n"),
1653 tswap32(target_cmsg->cmsg_level),
1654 tswap32(target_cmsg->cmsg_type));
1655 break;
1656 }
1657
1658 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1659 cmsg->cmsg_level = SOL_SOCKET;
1660 } else {
1661 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1662 }
1663 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1664 cmsg->cmsg_len = CMSG_LEN(len);
1665
1666 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1667 int *fd = (int *)data;
1668 int *target_fd = (int *)target_data;
1669 int i, numfds = len / sizeof(int);
1670
1671 for (i = 0; i < numfds; i++) {
1672 __get_user(fd[i], target_fd + i);
1673 }
1674 } else if (cmsg->cmsg_level == SOL_SOCKET
1675 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1676 struct ucred *cred = (struct ucred *)data;
1677 struct target_ucred *target_cred =
1678 (struct target_ucred *)target_data;
1679
1680 __get_user(cred->pid, &target_cred->pid);
1681 __get_user(cred->uid, &target_cred->uid);
1682 __get_user(cred->gid, &target_cred->gid);
1683 } else {
1684 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1685 cmsg->cmsg_level, cmsg->cmsg_type);
1686 memcpy(data, target_data, len);
1687 }
1688
1689 cmsg = CMSG_NXTHDR(msgh, cmsg);
1690 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1691 target_cmsg_start);
1692 }
1693 unlock_user(target_cmsg, target_cmsg_addr, 0);
1694 the_end:
1695 msgh->msg_controllen = space;
1696 return 0;
1697 }
1698
1699 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1700 struct msghdr *msgh)
1701 {
1702 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1703 abi_long msg_controllen;
1704 abi_ulong target_cmsg_addr;
1705 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1706 socklen_t space = 0;
1707
1708 msg_controllen = tswapal(target_msgh->msg_controllen);
1709 if (msg_controllen < sizeof (struct target_cmsghdr))
1710 goto the_end;
1711 target_cmsg_addr = tswapal(target_msgh->msg_control);
1712 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1713 target_cmsg_start = target_cmsg;
1714 if (!target_cmsg)
1715 return -TARGET_EFAULT;
1716
1717 while (cmsg && target_cmsg) {
1718 void *data = CMSG_DATA(cmsg);
1719 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1720
1721 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1722 int tgt_len, tgt_space;
1723
1724 /* We never copy a half-header but may copy half-data;
1725 * this is Linux's behaviour in put_cmsg(). Note that
1726 * truncation here is a guest problem (which we report
1727 * to the guest via the CTRUNC bit), unlike truncation
1728 * in target_to_host_cmsg, which is a QEMU bug.
1729 */
1730 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1731 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1732 break;
1733 }
1734
1735 if (cmsg->cmsg_level == SOL_SOCKET) {
1736 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1737 } else {
1738 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1739 }
1740 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1741
1742 /* Payload types which need a different size of payload on
1743 * the target must adjust tgt_len here.
1744 */
1745 tgt_len = len;
1746 switch (cmsg->cmsg_level) {
1747 case SOL_SOCKET:
1748 switch (cmsg->cmsg_type) {
1749 case SO_TIMESTAMP:
1750 tgt_len = sizeof(struct target_timeval);
1751 break;
1752 default:
1753 break;
1754 }
1755 break;
1756 default:
1757 break;
1758 }
1759
1760 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1761 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1762 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1763 }
1764
1765 /* We must now copy-and-convert len bytes of payload
1766 * into tgt_len bytes of destination space. Bear in mind
1767 * that in both source and destination we may be dealing
1768 * with a truncated value!
1769 */
1770 switch (cmsg->cmsg_level) {
1771 case SOL_SOCKET:
1772 switch (cmsg->cmsg_type) {
1773 case SCM_RIGHTS:
1774 {
1775 int *fd = (int *)data;
1776 int *target_fd = (int *)target_data;
1777 int i, numfds = tgt_len / sizeof(int);
1778
1779 for (i = 0; i < numfds; i++) {
1780 __put_user(fd[i], target_fd + i);
1781 }
1782 break;
1783 }
1784 case SO_TIMESTAMP:
1785 {
1786 struct timeval *tv = (struct timeval *)data;
1787 struct target_timeval *target_tv =
1788 (struct target_timeval *)target_data;
1789
1790 if (len != sizeof(struct timeval) ||
1791 tgt_len != sizeof(struct target_timeval)) {
1792 goto unimplemented;
1793 }
1794
1795 /* copy struct timeval to target */
1796 __put_user(tv->tv_sec, &target_tv->tv_sec);
1797 __put_user(tv->tv_usec, &target_tv->tv_usec);
1798 break;
1799 }
1800 case SCM_CREDENTIALS:
1801 {
1802 struct ucred *cred = (struct ucred *)data;
1803 struct target_ucred *target_cred =
1804 (struct target_ucred *)target_data;
1805
1806 __put_user(cred->pid, &target_cred->pid);
1807 __put_user(cred->uid, &target_cred->uid);
1808 __put_user(cred->gid, &target_cred->gid);
1809 break;
1810 }
1811 default:
1812 goto unimplemented;
1813 }
1814 break;
1815
1816 case SOL_IP:
1817 switch (cmsg->cmsg_type) {
1818 case IP_TTL:
1819 {
1820 uint32_t *v = (uint32_t *)data;
1821 uint32_t *t_int = (uint32_t *)target_data;
1822
1823 if (len != sizeof(uint32_t) ||
1824 tgt_len != sizeof(uint32_t)) {
1825 goto unimplemented;
1826 }
1827 __put_user(*v, t_int);
1828 break;
1829 }
1830 case IP_RECVERR:
1831 {
1832 struct errhdr_t {
1833 struct sock_extended_err ee;
1834 struct sockaddr_in offender;
1835 };
1836 struct errhdr_t *errh = (struct errhdr_t *)data;
1837 struct errhdr_t *target_errh =
1838 (struct errhdr_t *)target_data;
1839
1840 if (len != sizeof(struct errhdr_t) ||
1841 tgt_len != sizeof(struct errhdr_t)) {
1842 goto unimplemented;
1843 }
1844 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1845 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1846 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1847 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1848 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1849 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1850 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1851 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1852 (void *) &errh->offender, sizeof(errh->offender));
1853 break;
1854 }
1855 default:
1856 goto unimplemented;
1857 }
1858 break;
1859
1860 case SOL_IPV6:
1861 switch (cmsg->cmsg_type) {
1862 case IPV6_HOPLIMIT:
1863 {
1864 uint32_t *v = (uint32_t *)data;
1865 uint32_t *t_int = (uint32_t *)target_data;
1866
1867 if (len != sizeof(uint32_t) ||
1868 tgt_len != sizeof(uint32_t)) {
1869 goto unimplemented;
1870 }
1871 __put_user(*v, t_int);
1872 break;
1873 }
1874 case IPV6_RECVERR:
1875 {
1876 struct errhdr6_t {
1877 struct sock_extended_err ee;
1878 struct sockaddr_in6 offender;
1879 };
1880 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1881 struct errhdr6_t *target_errh =
1882 (struct errhdr6_t *)target_data;
1883
1884 if (len != sizeof(struct errhdr6_t) ||
1885 tgt_len != sizeof(struct errhdr6_t)) {
1886 goto unimplemented;
1887 }
1888 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1889 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1890 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1891 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1892 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1893 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1894 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1895 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1896 (void *) &errh->offender, sizeof(errh->offender));
1897 break;
1898 }
1899 default:
1900 goto unimplemented;
1901 }
1902 break;
1903
1904 default:
1905 unimplemented:
1906 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1907 cmsg->cmsg_level, cmsg->cmsg_type);
1908 memcpy(target_data, data, MIN(len, tgt_len));
1909 if (tgt_len > len) {
1910 memset(target_data + len, 0, tgt_len - len);
1911 }
1912 }
1913
1914 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1915 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1916 if (msg_controllen < tgt_space) {
1917 tgt_space = msg_controllen;
1918 }
1919 msg_controllen -= tgt_space;
1920 space += tgt_space;
1921 cmsg = CMSG_NXTHDR(msgh, cmsg);
1922 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1923 target_cmsg_start);
1924 }
1925 unlock_user(target_cmsg, target_cmsg_addr, space);
1926 the_end:
1927 target_msgh->msg_controllen = tswapal(space);
1928 return 0;
1929 }
1930
1931 /* do_setsockopt() Must return target values and target errnos. */
1932 static abi_long do_setsockopt(int sockfd, int level, int optname,
1933 abi_ulong optval_addr, socklen_t optlen)
1934 {
1935 abi_long ret;
1936 int val;
1937 struct ip_mreqn *ip_mreq;
1938 struct ip_mreq_source *ip_mreq_source;
1939
1940 switch(level) {
1941 case SOL_TCP:
1942 /* TCP options all take an 'int' value. */
1943 if (optlen < sizeof(uint32_t))
1944 return -TARGET_EINVAL;
1945
1946 if (get_user_u32(val, optval_addr))
1947 return -TARGET_EFAULT;
1948 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1949 break;
1950 case SOL_IP:
1951 switch(optname) {
1952 case IP_TOS:
1953 case IP_TTL:
1954 case IP_HDRINCL:
1955 case IP_ROUTER_ALERT:
1956 case IP_RECVOPTS:
1957 case IP_RETOPTS:
1958 case IP_PKTINFO:
1959 case IP_MTU_DISCOVER:
1960 case IP_RECVERR:
1961 case IP_RECVTTL:
1962 case IP_RECVTOS:
1963 #ifdef IP_FREEBIND
1964 case IP_FREEBIND:
1965 #endif
1966 case IP_MULTICAST_TTL:
1967 case IP_MULTICAST_LOOP:
1968 val = 0;
1969 if (optlen >= sizeof(uint32_t)) {
1970 if (get_user_u32(val, optval_addr))
1971 return -TARGET_EFAULT;
1972 } else if (optlen >= 1) {
1973 if (get_user_u8(val, optval_addr))
1974 return -TARGET_EFAULT;
1975 }
1976 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1977 break;
1978 case IP_ADD_MEMBERSHIP:
1979 case IP_DROP_MEMBERSHIP:
1980 if (optlen < sizeof (struct target_ip_mreq) ||
1981 optlen > sizeof (struct target_ip_mreqn))
1982 return -TARGET_EINVAL;
1983
1984 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1985 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1986 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1987 break;
1988
1989 case IP_BLOCK_SOURCE:
1990 case IP_UNBLOCK_SOURCE:
1991 case IP_ADD_SOURCE_MEMBERSHIP:
1992 case IP_DROP_SOURCE_MEMBERSHIP:
1993 if (optlen != sizeof (struct target_ip_mreq_source))
1994 return -TARGET_EINVAL;
1995
1996 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1997 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1998 unlock_user (ip_mreq_source, optval_addr, 0);
1999 break;
2000
2001 default:
2002 goto unimplemented;
2003 }
2004 break;
2005 case SOL_IPV6:
2006 switch (optname) {
2007 case IPV6_MTU_DISCOVER:
2008 case IPV6_MTU:
2009 case IPV6_V6ONLY:
2010 case IPV6_RECVPKTINFO:
2011 case IPV6_UNICAST_HOPS:
2012 case IPV6_MULTICAST_HOPS:
2013 case IPV6_MULTICAST_LOOP:
2014 case IPV6_RECVERR:
2015 case IPV6_RECVHOPLIMIT:
2016 case IPV6_2292HOPLIMIT:
2017 case IPV6_CHECKSUM:
2018 case IPV6_ADDRFORM:
2019 case IPV6_2292PKTINFO:
2020 case IPV6_RECVTCLASS:
2021 case IPV6_RECVRTHDR:
2022 case IPV6_2292RTHDR:
2023 case IPV6_RECVHOPOPTS:
2024 case IPV6_2292HOPOPTS:
2025 case IPV6_RECVDSTOPTS:
2026 case IPV6_2292DSTOPTS:
2027 case IPV6_TCLASS:
2028 #ifdef IPV6_RECVPATHMTU
2029 case IPV6_RECVPATHMTU:
2030 #endif
2031 #ifdef IPV6_TRANSPARENT
2032 case IPV6_TRANSPARENT:
2033 #endif
2034 #ifdef IPV6_FREEBIND
2035 case IPV6_FREEBIND:
2036 #endif
2037 #ifdef IPV6_RECVORIGDSTADDR
2038 case IPV6_RECVORIGDSTADDR:
2039 #endif
2040 val = 0;
2041 if (optlen < sizeof(uint32_t)) {
2042 return -TARGET_EINVAL;
2043 }
2044 if (get_user_u32(val, optval_addr)) {
2045 return -TARGET_EFAULT;
2046 }
2047 ret = get_errno(setsockopt(sockfd, level, optname,
2048 &val, sizeof(val)));
2049 break;
2050 case IPV6_PKTINFO:
2051 {
2052 struct in6_pktinfo pki;
2053
2054 if (optlen < sizeof(pki)) {
2055 return -TARGET_EINVAL;
2056 }
2057
2058 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2059 return -TARGET_EFAULT;
2060 }
2061
2062 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2063
2064 ret = get_errno(setsockopt(sockfd, level, optname,
2065 &pki, sizeof(pki)));
2066 break;
2067 }
2068 case IPV6_ADD_MEMBERSHIP:
2069 case IPV6_DROP_MEMBERSHIP:
2070 {
2071 struct ipv6_mreq ipv6mreq;
2072
2073 if (optlen < sizeof(ipv6mreq)) {
2074 return -TARGET_EINVAL;
2075 }
2076
2077 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2078 return -TARGET_EFAULT;
2079 }
2080
2081 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2082
2083 ret = get_errno(setsockopt(sockfd, level, optname,
2084 &ipv6mreq, sizeof(ipv6mreq)));
2085 break;
2086 }
2087 default:
2088 goto unimplemented;
2089 }
2090 break;
2091 case SOL_ICMPV6:
2092 switch (optname) {
2093 case ICMPV6_FILTER:
2094 {
2095 struct icmp6_filter icmp6f;
2096
2097 if (optlen > sizeof(icmp6f)) {
2098 optlen = sizeof(icmp6f);
2099 }
2100
2101 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2102 return -TARGET_EFAULT;
2103 }
2104
2105 for (val = 0; val < 8; val++) {
2106 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2107 }
2108
2109 ret = get_errno(setsockopt(sockfd, level, optname,
2110 &icmp6f, optlen));
2111 break;
2112 }
2113 default:
2114 goto unimplemented;
2115 }
2116 break;
2117 case SOL_RAW:
2118 switch (optname) {
2119 case ICMP_FILTER:
2120 case IPV6_CHECKSUM:
2121 /* those take an u32 value */
2122 if (optlen < sizeof(uint32_t)) {
2123 return -TARGET_EINVAL;
2124 }
2125
2126 if (get_user_u32(val, optval_addr)) {
2127 return -TARGET_EFAULT;
2128 }
2129 ret = get_errno(setsockopt(sockfd, level, optname,
2130 &val, sizeof(val)));
2131 break;
2132
2133 default:
2134 goto unimplemented;
2135 }
2136 break;
2137 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2138 case SOL_ALG:
2139 switch (optname) {
2140 case ALG_SET_KEY:
2141 {
2142 char *alg_key = g_malloc(optlen);
2143
2144 if (!alg_key) {
2145 return -TARGET_ENOMEM;
2146 }
2147 if (copy_from_user(alg_key, optval_addr, optlen)) {
2148 g_free(alg_key);
2149 return -TARGET_EFAULT;
2150 }
2151 ret = get_errno(setsockopt(sockfd, level, optname,
2152 alg_key, optlen));
2153 g_free(alg_key);
2154 break;
2155 }
2156 case ALG_SET_AEAD_AUTHSIZE:
2157 {
2158 ret = get_errno(setsockopt(sockfd, level, optname,
2159 NULL, optlen));
2160 break;
2161 }
2162 default:
2163 goto unimplemented;
2164 }
2165 break;
2166 #endif
2167 case TARGET_SOL_SOCKET:
2168 switch (optname) {
2169 case TARGET_SO_RCVTIMEO:
2170 {
2171 struct timeval tv;
2172
2173 optname = SO_RCVTIMEO;
2174
2175 set_timeout:
2176 if (optlen != sizeof(struct target_timeval)) {
2177 return -TARGET_EINVAL;
2178 }
2179
2180 if (copy_from_user_timeval(&tv, optval_addr)) {
2181 return -TARGET_EFAULT;
2182 }
2183
2184 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2185 &tv, sizeof(tv)));
2186 return ret;
2187 }
2188 case TARGET_SO_SNDTIMEO:
2189 optname = SO_SNDTIMEO;
2190 goto set_timeout;
2191 case TARGET_SO_ATTACH_FILTER:
2192 {
2193 struct target_sock_fprog *tfprog;
2194 struct target_sock_filter *tfilter;
2195 struct sock_fprog fprog;
2196 struct sock_filter *filter;
2197 int i;
2198
2199 if (optlen != sizeof(*tfprog)) {
2200 return -TARGET_EINVAL;
2201 }
2202 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2203 return -TARGET_EFAULT;
2204 }
2205 if (!lock_user_struct(VERIFY_READ, tfilter,
2206 tswapal(tfprog->filter), 0)) {
2207 unlock_user_struct(tfprog, optval_addr, 1);
2208 return -TARGET_EFAULT;
2209 }
2210
2211 fprog.len = tswap16(tfprog->len);
2212 filter = g_try_new(struct sock_filter, fprog.len);
2213 if (filter == NULL) {
2214 unlock_user_struct(tfilter, tfprog->filter, 1);
2215 unlock_user_struct(tfprog, optval_addr, 1);
2216 return -TARGET_ENOMEM;
2217 }
2218 for (i = 0; i < fprog.len; i++) {
2219 filter[i].code = tswap16(tfilter[i].code);
2220 filter[i].jt = tfilter[i].jt;
2221 filter[i].jf = tfilter[i].jf;
2222 filter[i].k = tswap32(tfilter[i].k);
2223 }
2224 fprog.filter = filter;
2225
2226 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2227 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2228 g_free(filter);
2229
2230 unlock_user_struct(tfilter, tfprog->filter, 1);
2231 unlock_user_struct(tfprog, optval_addr, 1);
2232 return ret;
2233 }
2234 case TARGET_SO_BINDTODEVICE:
2235 {
2236 char *dev_ifname, *addr_ifname;
2237
2238 if (optlen > IFNAMSIZ - 1) {
2239 optlen = IFNAMSIZ - 1;
2240 }
2241 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2242 if (!dev_ifname) {
2243 return -TARGET_EFAULT;
2244 }
2245 optname = SO_BINDTODEVICE;
2246 addr_ifname = alloca(IFNAMSIZ);
2247 memcpy(addr_ifname, dev_ifname, optlen);
2248 addr_ifname[optlen] = 0;
2249 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2250 addr_ifname, optlen));
2251 unlock_user (dev_ifname, optval_addr, 0);
2252 return ret;
2253 }
2254 case TARGET_SO_LINGER:
2255 {
2256 struct linger lg;
2257 struct target_linger *tlg;
2258
2259 if (optlen != sizeof(struct target_linger)) {
2260 return -TARGET_EINVAL;
2261 }
2262 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2263 return -TARGET_EFAULT;
2264 }
2265 __get_user(lg.l_onoff, &tlg->l_onoff);
2266 __get_user(lg.l_linger, &tlg->l_linger);
2267 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2268 &lg, sizeof(lg)));
2269 unlock_user_struct(tlg, optval_addr, 0);
2270 return ret;
2271 }
2272 /* Options with 'int' argument. */
2273 case TARGET_SO_DEBUG:
2274 optname = SO_DEBUG;
2275 break;
2276 case TARGET_SO_REUSEADDR:
2277 optname = SO_REUSEADDR;
2278 break;
2279 #ifdef SO_REUSEPORT
2280 case TARGET_SO_REUSEPORT:
2281 optname = SO_REUSEPORT;
2282 break;
2283 #endif
2284 case TARGET_SO_TYPE:
2285 optname = SO_TYPE;
2286 break;
2287 case TARGET_SO_ERROR:
2288 optname = SO_ERROR;
2289 break;
2290 case TARGET_SO_DONTROUTE:
2291 optname = SO_DONTROUTE;
2292 break;
2293 case TARGET_SO_BROADCAST:
2294 optname = SO_BROADCAST;
2295 break;
2296 case TARGET_SO_SNDBUF:
2297 optname = SO_SNDBUF;
2298 break;
2299 case TARGET_SO_SNDBUFFORCE:
2300 optname = SO_SNDBUFFORCE;
2301 break;
2302 case TARGET_SO_RCVBUF:
2303 optname = SO_RCVBUF;
2304 break;
2305 case TARGET_SO_RCVBUFFORCE:
2306 optname = SO_RCVBUFFORCE;
2307 break;
2308 case TARGET_SO_KEEPALIVE:
2309 optname = SO_KEEPALIVE;
2310 break;
2311 case TARGET_SO_OOBINLINE:
2312 optname = SO_OOBINLINE;
2313 break;
2314 case TARGET_SO_NO_CHECK:
2315 optname = SO_NO_CHECK;
2316 break;
2317 case TARGET_SO_PRIORITY:
2318 optname = SO_PRIORITY;
2319 break;
2320 #ifdef SO_BSDCOMPAT
2321 case TARGET_SO_BSDCOMPAT:
2322 optname = SO_BSDCOMPAT;
2323 break;
2324 #endif
2325 case TARGET_SO_PASSCRED:
2326 optname = SO_PASSCRED;
2327 break;
2328 case TARGET_SO_PASSSEC:
2329 optname = SO_PASSSEC;
2330 break;
2331 case TARGET_SO_TIMESTAMP:
2332 optname = SO_TIMESTAMP;
2333 break;
2334 case TARGET_SO_RCVLOWAT:
2335 optname = SO_RCVLOWAT;
2336 break;
2337 default:
2338 goto unimplemented;
2339 }
2340 if (optlen < sizeof(uint32_t))
2341 return -TARGET_EINVAL;
2342
2343 if (get_user_u32(val, optval_addr))
2344 return -TARGET_EFAULT;
2345 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2346 break;
2347 #ifdef SOL_NETLINK
2348 case SOL_NETLINK:
2349 switch (optname) {
2350 case NETLINK_PKTINFO:
2351 case NETLINK_ADD_MEMBERSHIP:
2352 case NETLINK_DROP_MEMBERSHIP:
2353 case NETLINK_BROADCAST_ERROR:
2354 case NETLINK_NO_ENOBUFS:
2355 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2356 case NETLINK_LISTEN_ALL_NSID:
2357 case NETLINK_CAP_ACK:
2358 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2359 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2360 case NETLINK_EXT_ACK:
2361 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2362 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2363 case NETLINK_GET_STRICT_CHK:
2364 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2365 break;
2366 default:
2367 goto unimplemented;
2368 }
2369 val = 0;
2370 if (optlen < sizeof(uint32_t)) {
2371 return -TARGET_EINVAL;
2372 }
2373 if (get_user_u32(val, optval_addr)) {
2374 return -TARGET_EFAULT;
2375 }
2376 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2377 sizeof(val)));
2378 break;
2379 #endif /* SOL_NETLINK */
2380 default:
2381 unimplemented:
2382 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2383 level, optname);
2384 ret = -TARGET_ENOPROTOOPT;
2385 }
2386 return ret;
2387 }
2388
2389 /* do_getsockopt() Must return target values and target errnos. */
2390 static abi_long do_getsockopt(int sockfd, int level, int optname,
2391 abi_ulong optval_addr, abi_ulong optlen)
2392 {
2393 abi_long ret;
2394 int len, val;
2395 socklen_t lv;
2396
2397 switch(level) {
2398 case TARGET_SOL_SOCKET:
2399 level = SOL_SOCKET;
2400 switch (optname) {
2401 /* These don't just return a single integer */
2402 case TARGET_SO_PEERNAME:
2403 goto unimplemented;
2404 case TARGET_SO_RCVTIMEO: {
2405 struct timeval tv;
2406 socklen_t tvlen;
2407
2408 optname = SO_RCVTIMEO;
2409
2410 get_timeout:
2411 if (get_user_u32(len, optlen)) {
2412 return -TARGET_EFAULT;
2413 }
2414 if (len < 0) {
2415 return -TARGET_EINVAL;
2416 }
2417
2418 tvlen = sizeof(tv);
2419 ret = get_errno(getsockopt(sockfd, level, optname,
2420 &tv, &tvlen));
2421 if (ret < 0) {
2422 return ret;
2423 }
2424 if (len > sizeof(struct target_timeval)) {
2425 len = sizeof(struct target_timeval);
2426 }
2427 if (copy_to_user_timeval(optval_addr, &tv)) {
2428 return -TARGET_EFAULT;
2429 }
2430 if (put_user_u32(len, optlen)) {
2431 return -TARGET_EFAULT;
2432 }
2433 break;
2434 }
2435 case TARGET_SO_SNDTIMEO:
2436 optname = SO_SNDTIMEO;
2437 goto get_timeout;
2438 case TARGET_SO_PEERCRED: {
2439 struct ucred cr;
2440 socklen_t crlen;
2441 struct target_ucred *tcr;
2442
2443 if (get_user_u32(len, optlen)) {
2444 return -TARGET_EFAULT;
2445 }
2446 if (len < 0) {
2447 return -TARGET_EINVAL;
2448 }
2449
2450 crlen = sizeof(cr);
2451 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2452 &cr, &crlen));
2453 if (ret < 0) {
2454 return ret;
2455 }
2456 if (len > crlen) {
2457 len = crlen;
2458 }
2459 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2460 return -TARGET_EFAULT;
2461 }
2462 __put_user(cr.pid, &tcr->pid);
2463 __put_user(cr.uid, &tcr->uid);
2464 __put_user(cr.gid, &tcr->gid);
2465 unlock_user_struct(tcr, optval_addr, 1);
2466 if (put_user_u32(len, optlen)) {
2467 return -TARGET_EFAULT;
2468 }
2469 break;
2470 }
2471 case TARGET_SO_PEERSEC: {
2472 char *name;
2473
2474 if (get_user_u32(len, optlen)) {
2475 return -TARGET_EFAULT;
2476 }
2477 if (len < 0) {
2478 return -TARGET_EINVAL;
2479 }
2480 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2481 if (!name) {
2482 return -TARGET_EFAULT;
2483 }
2484 lv = len;
2485 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2486 name, &lv));
2487 if (put_user_u32(lv, optlen)) {
2488 ret = -TARGET_EFAULT;
2489 }
2490 unlock_user(name, optval_addr, lv);
2491 break;
2492 }
2493 case TARGET_SO_LINGER:
2494 {
2495 struct linger lg;
2496 socklen_t lglen;
2497 struct target_linger *tlg;
2498
2499 if (get_user_u32(len, optlen)) {
2500 return -TARGET_EFAULT;
2501 }
2502 if (len < 0) {
2503 return -TARGET_EINVAL;
2504 }
2505
2506 lglen = sizeof(lg);
2507 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2508 &lg, &lglen));
2509 if (ret < 0) {
2510 return ret;
2511 }
2512 if (len > lglen) {
2513 len = lglen;
2514 }
2515 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2516 return -TARGET_EFAULT;
2517 }
2518 __put_user(lg.l_onoff, &tlg->l_onoff);
2519 __put_user(lg.l_linger, &tlg->l_linger);
2520 unlock_user_struct(tlg, optval_addr, 1);
2521 if (put_user_u32(len, optlen)) {
2522 return -TARGET_EFAULT;
2523 }
2524 break;
2525 }
2526 /* Options with 'int' argument. */
2527 case TARGET_SO_DEBUG:
2528 optname = SO_DEBUG;
2529 goto int_case;
2530 case TARGET_SO_REUSEADDR:
2531 optname = SO_REUSEADDR;
2532 goto int_case;
2533 #ifdef SO_REUSEPORT
2534 case TARGET_SO_REUSEPORT:
2535 optname = SO_REUSEPORT;
2536 goto int_case;
2537 #endif
2538 case TARGET_SO_TYPE:
2539 optname = SO_TYPE;
2540 goto int_case;
2541 case TARGET_SO_ERROR:
2542 optname = SO_ERROR;
2543 goto int_case;
2544 case TARGET_SO_DONTROUTE:
2545 optname = SO_DONTROUTE;
2546 goto int_case;
2547 case TARGET_SO_BROADCAST:
2548 optname = SO_BROADCAST;
2549 goto int_case;
2550 case TARGET_SO_SNDBUF:
2551 optname = SO_SNDBUF;
2552 goto int_case;
2553 case TARGET_SO_RCVBUF:
2554 optname = SO_RCVBUF;
2555 goto int_case;
2556 case TARGET_SO_KEEPALIVE:
2557 optname = SO_KEEPALIVE;
2558 goto int_case;
2559 case TARGET_SO_OOBINLINE:
2560 optname = SO_OOBINLINE;
2561 goto int_case;
2562 case TARGET_SO_NO_CHECK:
2563 optname = SO_NO_CHECK;
2564 goto int_case;
2565 case TARGET_SO_PRIORITY:
2566 optname = SO_PRIORITY;
2567 goto int_case;
2568 #ifdef SO_BSDCOMPAT
2569 case TARGET_SO_BSDCOMPAT:
2570 optname = SO_BSDCOMPAT;
2571 goto int_case;
2572 #endif
2573 case TARGET_SO_PASSCRED:
2574 optname = SO_PASSCRED;
2575 goto int_case;
2576 case TARGET_SO_TIMESTAMP:
2577 optname = SO_TIMESTAMP;
2578 goto int_case;
2579 case TARGET_SO_RCVLOWAT:
2580 optname = SO_RCVLOWAT;
2581 goto int_case;
2582 case TARGET_SO_ACCEPTCONN:
2583 optname = SO_ACCEPTCONN;
2584 goto int_case;
2585 default:
2586 goto int_case;
2587 }
2588 break;
2589 case SOL_TCP:
2590 /* TCP options all take an 'int' value. */
2591 int_case:
2592 if (get_user_u32(len, optlen))
2593 return -TARGET_EFAULT;
2594 if (len < 0)
2595 return -TARGET_EINVAL;
2596 lv = sizeof(lv);
2597 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2598 if (ret < 0)
2599 return ret;
2600 if (optname == SO_TYPE) {
2601 val = host_to_target_sock_type(val);
2602 }
2603 if (len > lv)
2604 len = lv;
2605 if (len == 4) {
2606 if (put_user_u32(val, optval_addr))
2607 return -TARGET_EFAULT;
2608 } else {
2609 if (put_user_u8(val, optval_addr))
2610 return -TARGET_EFAULT;
2611 }
2612 if (put_user_u32(len, optlen))
2613 return -TARGET_EFAULT;
2614 break;
2615 case SOL_IP:
2616 switch(optname) {
2617 case IP_TOS:
2618 case IP_TTL:
2619 case IP_HDRINCL:
2620 case IP_ROUTER_ALERT:
2621 case IP_RECVOPTS:
2622 case IP_RETOPTS:
2623 case IP_PKTINFO:
2624 case IP_MTU_DISCOVER:
2625 case IP_RECVERR:
2626 case IP_RECVTOS:
2627 #ifdef IP_FREEBIND
2628 case IP_FREEBIND:
2629 #endif
2630 case IP_MULTICAST_TTL:
2631 case IP_MULTICAST_LOOP:
2632 if (get_user_u32(len, optlen))
2633 return -TARGET_EFAULT;
2634 if (len < 0)
2635 return -TARGET_EINVAL;
2636 lv = sizeof(lv);
2637 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2638 if (ret < 0)
2639 return ret;
2640 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2641 len = 1;
2642 if (put_user_u32(len, optlen)
2643 || put_user_u8(val, optval_addr))
2644 return -TARGET_EFAULT;
2645 } else {
2646 if (len > sizeof(int))
2647 len = sizeof(int);
2648 if (put_user_u32(len, optlen)
2649 || put_user_u32(val, optval_addr))
2650 return -TARGET_EFAULT;
2651 }
2652 break;
2653 default:
2654 ret = -TARGET_ENOPROTOOPT;
2655 break;
2656 }
2657 break;
2658 case SOL_IPV6:
2659 switch (optname) {
2660 case IPV6_MTU_DISCOVER:
2661 case IPV6_MTU:
2662 case IPV6_V6ONLY:
2663 case IPV6_RECVPKTINFO:
2664 case IPV6_UNICAST_HOPS:
2665 case IPV6_MULTICAST_HOPS:
2666 case IPV6_MULTICAST_LOOP:
2667 case IPV6_RECVERR:
2668 case IPV6_RECVHOPLIMIT:
2669 case IPV6_2292HOPLIMIT:
2670 case IPV6_CHECKSUM:
2671 case IPV6_ADDRFORM:
2672 case IPV6_2292PKTINFO:
2673 case IPV6_RECVTCLASS:
2674 case IPV6_RECVRTHDR:
2675 case IPV6_2292RTHDR:
2676 case IPV6_RECVHOPOPTS:
2677 case IPV6_2292HOPOPTS:
2678 case IPV6_RECVDSTOPTS:
2679 case IPV6_2292DSTOPTS:
2680 case IPV6_TCLASS:
2681 #ifdef IPV6_RECVPATHMTU
2682 case IPV6_RECVPATHMTU:
2683 #endif
2684 #ifdef IPV6_TRANSPARENT
2685 case IPV6_TRANSPARENT:
2686 #endif
2687 #ifdef IPV6_FREEBIND
2688 case IPV6_FREEBIND:
2689 #endif
2690 #ifdef IPV6_RECVORIGDSTADDR
2691 case IPV6_RECVORIGDSTADDR:
2692 #endif
2693 if (get_user_u32(len, optlen))
2694 return -TARGET_EFAULT;
2695 if (len < 0)
2696 return -TARGET_EINVAL;
2697 lv = sizeof(lv);
2698 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2699 if (ret < 0)
2700 return ret;
2701 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2702 len = 1;
2703 if (put_user_u32(len, optlen)
2704 || put_user_u8(val, optval_addr))
2705 return -TARGET_EFAULT;
2706 } else {
2707 if (len > sizeof(int))
2708 len = sizeof(int);
2709 if (put_user_u32(len, optlen)
2710 || put_user_u32(val, optval_addr))
2711 return -TARGET_EFAULT;
2712 }
2713 break;
2714 default:
2715 ret = -TARGET_ENOPROTOOPT;
2716 break;
2717 }
2718 break;
2719 #ifdef SOL_NETLINK
2720 case SOL_NETLINK:
2721 switch (optname) {
2722 case NETLINK_PKTINFO:
2723 case NETLINK_BROADCAST_ERROR:
2724 case NETLINK_NO_ENOBUFS:
2725 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2726 case NETLINK_LISTEN_ALL_NSID:
2727 case NETLINK_CAP_ACK:
2728 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2729 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2730 case NETLINK_EXT_ACK:
2731 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2732 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2733 case NETLINK_GET_STRICT_CHK:
2734 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2735 if (get_user_u32(len, optlen)) {
2736 return -TARGET_EFAULT;
2737 }
2738 if (len != sizeof(val)) {
2739 return -TARGET_EINVAL;
2740 }
2741 lv = len;
2742 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2743 if (ret < 0) {
2744 return ret;
2745 }
2746 if (put_user_u32(lv, optlen)
2747 || put_user_u32(val, optval_addr)) {
2748 return -TARGET_EFAULT;
2749 }
2750 break;
2751 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2752 case NETLINK_LIST_MEMBERSHIPS:
2753 {
2754 uint32_t *results;
2755 int i;
2756 if (get_user_u32(len, optlen)) {
2757 return -TARGET_EFAULT;
2758 }
2759 if (len < 0) {
2760 return -TARGET_EINVAL;
2761 }
2762 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2763 if (!results) {
2764 return -TARGET_EFAULT;
2765 }
2766 lv = len;
2767 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2768 if (ret < 0) {
2769 unlock_user(results, optval_addr, 0);
2770 return ret;
2771 }
2772 /* swap host endianess to target endianess. */
2773 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2774 results[i] = tswap32(results[i]);
2775 }
2776 if (put_user_u32(lv, optlen)) {
2777 return -TARGET_EFAULT;
2778 }
2779 unlock_user(results, optval_addr, 0);
2780 break;
2781 }
2782 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2783 default:
2784 goto unimplemented;
2785 }
2786 break;
2787 #endif /* SOL_NETLINK */
2788 default:
2789 unimplemented:
2790 qemu_log_mask(LOG_UNIMP,
2791 "getsockopt level=%d optname=%d not yet supported\n",
2792 level, optname);
2793 ret = -TARGET_EOPNOTSUPP;
2794 break;
2795 }
2796 return ret;
2797 }
2798
2799 /* Convert target low/high pair representing file offset into the host
2800 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2801 * as the kernel doesn't handle them either.
2802 */
2803 static void target_to_host_low_high(abi_ulong tlow,
2804 abi_ulong thigh,
2805 unsigned long *hlow,
2806 unsigned long *hhigh)
2807 {
2808 uint64_t off = tlow |
2809 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2810 TARGET_LONG_BITS / 2;
2811
2812 *hlow = off;
2813 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2814 }
2815
2816 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2817 abi_ulong count, int copy)
2818 {
2819 struct target_iovec *target_vec;
2820 struct iovec *vec;
2821 abi_ulong total_len, max_len;
2822 int i;
2823 int err = 0;
2824 bool bad_address = false;
2825
2826 if (count == 0) {
2827 errno = 0;
2828 return NULL;
2829 }
2830 if (count > IOV_MAX) {
2831 errno = EINVAL;
2832 return NULL;
2833 }
2834
2835 vec = g_try_new0(struct iovec, count);
2836 if (vec == NULL) {
2837 errno = ENOMEM;
2838 return NULL;
2839 }
2840
2841 target_vec = lock_user(VERIFY_READ, target_addr,
2842 count * sizeof(struct target_iovec), 1);
2843 if (target_vec == NULL) {
2844 err = EFAULT;
2845 goto fail2;
2846 }
2847
2848 /* ??? If host page size > target page size, this will result in a
2849 value larger than what we can actually support. */
2850 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2851 total_len = 0;
2852
2853 for (i = 0; i < count; i++) {
2854 abi_ulong base = tswapal(target_vec[i].iov_base);
2855 abi_long len = tswapal(target_vec[i].iov_len);
2856
2857 if (len < 0) {
2858 err = EINVAL;
2859 goto fail;
2860 } else if (len == 0) {
2861 /* Zero length pointer is ignored. */
2862 vec[i].iov_base = 0;
2863 } else {
2864 vec[i].iov_base = lock_user(type, base, len, copy);
2865 /* If the first buffer pointer is bad, this is a fault. But
2866 * subsequent bad buffers will result in a partial write; this
2867 * is realized by filling the vector with null pointers and
2868 * zero lengths. */
2869 if (!vec[i].iov_base) {
2870 if (i == 0) {
2871 err = EFAULT;
2872 goto fail;
2873 } else {
2874 bad_address = true;
2875 }
2876 }
2877 if (bad_address) {
2878 len = 0;
2879 }
2880 if (len > max_len - total_len) {
2881 len = max_len - total_len;
2882 }
2883 }
2884 vec[i].iov_len = len;
2885 total_len += len;
2886 }
2887
2888 unlock_user(target_vec, target_addr, 0);
2889 return vec;
2890
2891 fail:
2892 while (--i >= 0) {
2893 if (tswapal(target_vec[i].iov_len) > 0) {
2894 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2895 }
2896 }
2897 unlock_user(target_vec, target_addr, 0);
2898 fail2:
2899 g_free(vec);
2900 errno = err;
2901 return NULL;
2902 }
2903
2904 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2905 abi_ulong count, int copy)
2906 {
2907 struct target_iovec *target_vec;
2908 int i;
2909
2910 target_vec = lock_user(VERIFY_READ, target_addr,
2911 count * sizeof(struct target_iovec), 1);
2912 if (target_vec) {
2913 for (i = 0; i < count; i++) {
2914 abi_ulong base = tswapal(target_vec[i].iov_base);
2915 abi_long len = tswapal(target_vec[i].iov_len);
2916 if (len < 0) {
2917 break;
2918 }
2919 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2920 }
2921 unlock_user(target_vec, target_addr, 0);
2922 }
2923
2924 g_free(vec);
2925 }
2926
2927 static inline int target_to_host_sock_type(int *type)
2928 {
2929 int host_type = 0;
2930 int target_type = *type;
2931
2932 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2933 case TARGET_SOCK_DGRAM:
2934 host_type = SOCK_DGRAM;
2935 break;
2936 case TARGET_SOCK_STREAM:
2937 host_type = SOCK_STREAM;
2938 break;
2939 default:
2940 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2941 break;
2942 }
2943 if (target_type & TARGET_SOCK_CLOEXEC) {
2944 #if defined(SOCK_CLOEXEC)
2945 host_type |= SOCK_CLOEXEC;
2946 #else
2947 return -TARGET_EINVAL;
2948 #endif
2949 }
2950 if (target_type & TARGET_SOCK_NONBLOCK) {
2951 #if defined(SOCK_NONBLOCK)
2952 host_type |= SOCK_NONBLOCK;
2953 #elif !defined(O_NONBLOCK)
2954 return -TARGET_EINVAL;
2955 #endif
2956 }
2957 *type = host_type;
2958 return 0;
2959 }
2960
2961 /* Try to emulate socket type flags after socket creation. */
2962 static int sock_flags_fixup(int fd, int target_type)
2963 {
2964 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2965 if (target_type & TARGET_SOCK_NONBLOCK) {
2966 int flags = fcntl(fd, F_GETFL);
2967 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2968 close(fd);
2969 return -TARGET_EINVAL;
2970 }
2971 }
2972 #endif
2973 return fd;
2974 }
2975
2976 /* do_socket() Must return target values and target errnos. */
2977 static abi_long do_socket(int domain, int type, int protocol)
2978 {
2979 int target_type = type;
2980 int ret;
2981
2982 ret = target_to_host_sock_type(&type);
2983 if (ret) {
2984 return ret;
2985 }
2986
2987 if (domain == PF_NETLINK && !(
2988 #ifdef CONFIG_RTNETLINK
2989 protocol == NETLINK_ROUTE ||
2990 #endif
2991 protocol == NETLINK_KOBJECT_UEVENT ||
2992 protocol == NETLINK_AUDIT)) {
2993 return -TARGET_EPFNOSUPPORT;
2994 }
2995
2996 if (domain == AF_PACKET ||
2997 (domain == AF_INET && type == SOCK_PACKET)) {
2998 protocol = tswap16(protocol);
2999 }
3000
3001 ret = get_errno(socket(domain, type, protocol));
3002 if (ret >= 0) {
3003 ret = sock_flags_fixup(ret, target_type);
3004 if (type == SOCK_PACKET) {
3005 /* Manage an obsolete case :
3006 * if socket type is SOCK_PACKET, bind by name
3007 */
3008 fd_trans_register(ret, &target_packet_trans);
3009 } else if (domain == PF_NETLINK) {
3010 switch (protocol) {
3011 #ifdef CONFIG_RTNETLINK
3012 case NETLINK_ROUTE:
3013 fd_trans_register(ret, &target_netlink_route_trans);
3014 break;
3015 #endif
3016 case NETLINK_KOBJECT_UEVENT:
3017 /* nothing to do: messages are strings */
3018 break;
3019 case NETLINK_AUDIT:
3020 fd_trans_register(ret, &target_netlink_audit_trans);
3021 break;
3022 default:
3023 g_assert_not_reached();
3024 }
3025 }
3026 }
3027 return ret;
3028 }
3029
3030 /* do_bind() Must return target values and target errnos. */
3031 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3032 socklen_t addrlen)
3033 {
3034 void *addr;
3035 abi_long ret;
3036
3037 if ((int)addrlen < 0) {
3038 return -TARGET_EINVAL;
3039 }
3040
3041 addr = alloca(addrlen+1);
3042
3043 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3044 if (ret)
3045 return ret;
3046
3047 return get_errno(bind(sockfd, addr, addrlen));
3048 }
3049
3050 /* do_connect() Must return target values and target errnos. */
3051 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3052 socklen_t addrlen)
3053 {
3054 void *addr;
3055 abi_long ret;
3056
3057 if ((int)addrlen < 0) {
3058 return -TARGET_EINVAL;
3059 }
3060
3061 addr = alloca(addrlen+1);
3062
3063 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3064 if (ret)
3065 return ret;
3066
3067 return get_errno(safe_connect(sockfd, addr, addrlen));
3068 }
3069
3070 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3071 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3072 int flags, int send)
3073 {
3074 abi_long ret, len;
3075 struct msghdr msg;
3076 abi_ulong count;
3077 struct iovec *vec;
3078 abi_ulong target_vec;
3079
3080 if (msgp->msg_name) {
3081 msg.msg_namelen = tswap32(msgp->msg_namelen);
3082 msg.msg_name = alloca(msg.msg_namelen+1);
3083 ret = target_to_host_sockaddr(fd, msg.msg_name,
3084 tswapal(msgp->msg_name),
3085 msg.msg_namelen);
3086 if (ret == -TARGET_EFAULT) {
3087 /* For connected sockets msg_name and msg_namelen must
3088 * be ignored, so returning EFAULT immediately is wrong.
3089 * Instead, pass a bad msg_name to the host kernel, and
3090 * let it decide whether to return EFAULT or not.
3091 */
3092 msg.msg_name = (void *)-1;
3093 } else if (ret) {
3094 goto out2;
3095 }
3096 } else {
3097 msg.msg_name = NULL;
3098 msg.msg_namelen = 0;
3099 }
3100 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3101 msg.msg_control = alloca(msg.msg_controllen);
3102 memset(msg.msg_control, 0, msg.msg_controllen);
3103
3104 msg.msg_flags = tswap32(msgp->msg_flags);
3105
3106 count = tswapal(msgp->msg_iovlen);
3107 target_vec = tswapal(msgp->msg_iov);
3108
3109 if (count > IOV_MAX) {
3110 /* sendrcvmsg returns a different errno for this condition than
3111 * readv/writev, so we must catch it here before lock_iovec() does.
3112 */
3113 ret = -TARGET_EMSGSIZE;
3114 goto out2;
3115 }
3116
3117 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3118 target_vec, count, send);
3119 if (vec == NULL) {
3120 ret = -host_to_target_errno(errno);
3121 goto out2;
3122 }
3123 msg.msg_iovlen = count;
3124 msg.msg_iov = vec;
3125
3126 if (send) {
3127 if (fd_trans_target_to_host_data(fd)) {
3128 void *host_msg;
3129
3130 host_msg = g_malloc(msg.msg_iov->iov_len);
3131 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3132 ret = fd_trans_target_to_host_data(fd)(host_msg,
3133 msg.msg_iov->iov_len);
3134 if (ret >= 0) {
3135 msg.msg_iov->iov_base = host_msg;
3136 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3137 }
3138 g_free(host_msg);
3139 } else {
3140 ret = target_to_host_cmsg(&msg, msgp);
3141 if (ret == 0) {
3142 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3143 }
3144 }
3145 } else {
3146 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3147 if (!is_error(ret)) {
3148 len = ret;
3149 if (fd_trans_host_to_target_data(fd)) {
3150 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3151 MIN(msg.msg_iov->iov_len, len));
3152 } else {
3153 ret = host_to_target_cmsg(msgp, &msg);
3154 }
3155 if (!is_error(ret)) {
3156 msgp->msg_namelen = tswap32(msg.msg_namelen);
3157 msgp->msg_flags = tswap32(msg.msg_flags);
3158 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3159 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3160 msg.msg_name, msg.msg_namelen);
3161 if (ret) {
3162 goto out;
3163 }
3164 }
3165
3166 ret = len;
3167 }
3168 }
3169 }
3170
3171 out:
3172 unlock_iovec(vec, target_vec, count, !send);
3173 out2:
3174 return ret;
3175 }
3176
3177 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3178 int flags, int send)
3179 {
3180 abi_long ret;
3181 struct target_msghdr *msgp;
3182
3183 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3184 msgp,
3185 target_msg,
3186 send ? 1 : 0)) {
3187 return -TARGET_EFAULT;
3188 }
3189 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3190 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3191 return ret;
3192 }
3193
3194 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3195 * so it might not have this *mmsg-specific flag either.
3196 */
3197 #ifndef MSG_WAITFORONE
3198 #define MSG_WAITFORONE 0x10000
3199 #endif
3200
3201 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3202 unsigned int vlen, unsigned int flags,
3203 int send)
3204 {
3205 struct target_mmsghdr *mmsgp;
3206 abi_long ret = 0;
3207 int i;
3208
3209 if (vlen > UIO_MAXIOV) {
3210 vlen = UIO_MAXIOV;
3211 }
3212
3213 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3214 if (!mmsgp) {
3215 return -TARGET_EFAULT;
3216 }
3217
3218 for (i = 0; i < vlen; i++) {
3219 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3220 if (is_error(ret)) {
3221 break;
3222 }
3223 mmsgp[i].msg_len = tswap32(ret);
3224 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3225 if (flags & MSG_WAITFORONE) {
3226 flags |= MSG_DONTWAIT;
3227 }
3228 }
3229
3230 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3231
3232 /* Return number of datagrams sent if we sent any at all;
3233 * otherwise return the error.
3234 */
3235 if (i) {
3236 return i;
3237 }
3238 return ret;
3239 }
3240
3241 /* do_accept4() Must return target values and target errnos. */
3242 static abi_long do_accept4(int fd, abi_ulong target_addr,
3243 abi_ulong target_addrlen_addr, int flags)
3244 {
3245 socklen_t addrlen, ret_addrlen;
3246 void *addr;
3247 abi_long ret;
3248 int host_flags;
3249
3250 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3251
3252 if (target_addr == 0) {
3253 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3254 }
3255
3256 /* linux returns EINVAL if addrlen pointer is invalid */
3257 if (get_user_u32(addrlen, target_addrlen_addr))
3258 return -TARGET_EINVAL;
3259
3260 if ((int)addrlen < 0) {
3261 return -TARGET_EINVAL;
3262 }
3263
3264 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3265 return -TARGET_EINVAL;
3266
3267 addr = alloca(addrlen);
3268
3269 ret_addrlen = addrlen;
3270 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3271 if (!is_error(ret)) {
3272 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3273 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3274 ret = -TARGET_EFAULT;
3275 }
3276 }
3277 return ret;
3278 }
3279
3280 /* do_getpeername() Must return target values and target errnos. */
3281 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3282 abi_ulong target_addrlen_addr)
3283 {
3284 socklen_t addrlen, ret_addrlen;
3285 void *addr;
3286 abi_long ret;
3287
3288 if (get_user_u32(addrlen, target_addrlen_addr))
3289 return -TARGET_EFAULT;
3290
3291 if ((int)addrlen < 0) {
3292 return -TARGET_EINVAL;
3293 }
3294
3295 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3296 return -TARGET_EFAULT;
3297
3298 addr = alloca(addrlen);
3299
3300 ret_addrlen = addrlen;
3301 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3302 if (!is_error(ret)) {
3303 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3304 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3305 ret = -TARGET_EFAULT;
3306 }
3307 }
3308 return ret;
3309 }
3310
3311 /* do_getsockname() Must return target values and target errnos. */
3312 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3313 abi_ulong target_addrlen_addr)
3314 {
3315 socklen_t addrlen, ret_addrlen;
3316 void *addr;
3317 abi_long ret;
3318
3319 if (get_user_u32(addrlen, target_addrlen_addr))
3320 return -TARGET_EFAULT;
3321
3322 if ((int)addrlen < 0) {
3323 return -TARGET_EINVAL;
3324 }
3325
3326 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3327 return -TARGET_EFAULT;
3328
3329 addr = alloca(addrlen);
3330
3331 ret_addrlen = addrlen;
3332 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3333 if (!is_error(ret)) {
3334 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3335 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3336 ret = -TARGET_EFAULT;
3337 }
3338 }
3339 return ret;
3340 }
3341
3342 /* do_socketpair() Must return target values and target errnos. */
3343 static abi_long do_socketpair(int domain, int type, int protocol,
3344 abi_ulong target_tab_addr)
3345 {
3346 int tab[2];
3347 abi_long ret;
3348
3349 target_to_host_sock_type(&type);
3350
3351 ret = get_errno(socketpair(domain, type, protocol, tab));
3352 if (!is_error(ret)) {
3353 if (put_user_s32(tab[0], target_tab_addr)
3354 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3355 ret = -TARGET_EFAULT;
3356 }
3357 return ret;
3358 }
3359
3360 /* do_sendto() Must return target values and target errnos. */
3361 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3362 abi_ulong target_addr, socklen_t addrlen)
3363 {
3364 void *addr;
3365 void *host_msg;
3366 void *copy_msg = NULL;
3367 abi_long ret;
3368
3369 if ((int)addrlen < 0) {
3370 return -TARGET_EINVAL;
3371 }
3372
3373 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3374 if (!host_msg)
3375 return -TARGET_EFAULT;
3376 if (fd_trans_target_to_host_data(fd)) {
3377 copy_msg = host_msg;
3378 host_msg = g_malloc(len);
3379 memcpy(host_msg, copy_msg, len);
3380 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3381 if (ret < 0) {
3382 goto fail;
3383 }
3384 }
3385 if (target_addr) {
3386 addr = alloca(addrlen+1);
3387 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3388 if (ret) {
3389 goto fail;
3390 }
3391 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3392 } else {
3393 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3394 }
3395 fail:
3396 if (copy_msg) {
3397 g_free(host_msg);
3398 host_msg = copy_msg;
3399 }
3400 unlock_user(host_msg, msg, 0);
3401 return ret;
3402 }
3403
3404 /* do_recvfrom() Must return target values and target errnos. */
3405 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3406 abi_ulong target_addr,
3407 abi_ulong target_addrlen)
3408 {
3409 socklen_t addrlen, ret_addrlen;
3410 void *addr;
3411 void *host_msg;
3412 abi_long ret;
3413
3414 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3415 if (!host_msg)
3416 return -TARGET_EFAULT;
3417 if (target_addr) {
3418 if (get_user_u32(addrlen, target_addrlen)) {
3419 ret = -TARGET_EFAULT;
3420 goto fail;
3421 }
3422 if ((int)addrlen < 0) {
3423 ret = -TARGET_EINVAL;
3424 goto fail;
3425 }
3426 addr = alloca(addrlen);
3427 ret_addrlen = addrlen;
3428 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3429 addr, &ret_addrlen));
3430 } else {
3431 addr = NULL; /* To keep compiler quiet. */
3432 addrlen = 0; /* To keep compiler quiet. */
3433 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3434 }
3435 if (!is_error(ret)) {
3436 if (fd_trans_host_to_target_data(fd)) {
3437 abi_long trans;
3438 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3439 if (is_error(trans)) {
3440 ret = trans;
3441 goto fail;
3442 }
3443 }
3444 if (target_addr) {
3445 host_to_target_sockaddr(target_addr, addr,
3446 MIN(addrlen, ret_addrlen));
3447 if (put_user_u32(ret_addrlen, target_addrlen)) {
3448 ret = -TARGET_EFAULT;
3449 goto fail;
3450 }
3451 }
3452 unlock_user(host_msg, msg, len);
3453 } else {
3454 fail:
3455 unlock_user(host_msg, msg, 0);
3456 }
3457 return ret;
3458 }
3459
3460 #ifdef TARGET_NR_socketcall
3461 /* do_socketcall() must return target values and target errnos. */
3462 static abi_long do_socketcall(int num, abi_ulong vptr)
3463 {
3464 static const unsigned nargs[] = { /* number of arguments per operation */
3465 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3466 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3467 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3468 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3469 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3470 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3471 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3472 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3473 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3474 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3475 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3476 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3477 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3478 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3479 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3480 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3481 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3482 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3483 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3484 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3485 };
3486 abi_long a[6]; /* max 6 args */
3487 unsigned i;
3488
3489 /* check the range of the first argument num */
3490 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3491 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3492 return -TARGET_EINVAL;
3493 }
3494 /* ensure we have space for args */
3495 if (nargs[num] > ARRAY_SIZE(a)) {
3496 return -TARGET_EINVAL;
3497 }
3498 /* collect the arguments in a[] according to nargs[] */
3499 for (i = 0; i < nargs[num]; ++i) {
3500 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3501 return -TARGET_EFAULT;
3502 }
3503 }
3504 /* now when we have the args, invoke the appropriate underlying function */
3505 switch (num) {
3506 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3507 return do_socket(a[0], a[1], a[2]);
3508 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3509 return do_bind(a[0], a[1], a[2]);
3510 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3511 return do_connect(a[0], a[1], a[2]);
3512 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3513 return get_errno(listen(a[0], a[1]));
3514 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3515 return do_accept4(a[0], a[1], a[2], 0);
3516 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3517 return do_getsockname(a[0], a[1], a[2]);
3518 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3519 return do_getpeername(a[0], a[1], a[2]);
3520 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3521 return do_socketpair(a[0], a[1], a[2], a[3]);
3522 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3523 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3524 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3525 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3526 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3527 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3528 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3529 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3530 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3531 return get_errno(shutdown(a[0], a[1]));
3532 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3533 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3534 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3535 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3536 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3537 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3538 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3539 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3540 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3541 return do_accept4(a[0], a[1], a[2], a[3]);
3542 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3543 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3544 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3545 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3546 default:
3547 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3548 return -TARGET_EINVAL;
3549 }
3550 }
3551 #endif
3552
3553 #define N_SHM_REGIONS 32
3554
3555 static struct shm_region {
3556 abi_ulong start;
3557 abi_ulong size;
3558 bool in_use;
3559 } shm_regions[N_SHM_REGIONS];
3560
3561 #ifndef TARGET_SEMID64_DS
3562 /* asm-generic version of this struct */
3563 struct target_semid64_ds
3564 {
3565 struct target_ipc_perm sem_perm;
3566 abi_ulong sem_otime;
3567 #if TARGET_ABI_BITS == 32
3568 abi_ulong __unused1;
3569 #endif
3570 abi_ulong sem_ctime;
3571 #if TARGET_ABI_BITS == 32
3572 abi_ulong __unused2;
3573 #endif
3574 abi_ulong sem_nsems;
3575 abi_ulong __unused3;
3576 abi_ulong __unused4;
3577 };
3578 #endif
3579
3580 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3581 abi_ulong target_addr)
3582 {
3583 struct target_ipc_perm *target_ip;
3584 struct target_semid64_ds *target_sd;
3585
3586 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3587 return -TARGET_EFAULT;
3588 target_ip = &(target_sd->sem_perm);
3589 host_ip->__key = tswap32(target_ip->__key);
3590 host_ip->uid = tswap32(target_ip->uid);
3591 host_ip->gid = tswap32(target_ip->gid);
3592 host_ip->cuid = tswap32(target_ip->cuid);
3593 host_ip->cgid = tswap32(target_ip->cgid);
3594 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3595 host_ip->mode = tswap32(target_ip->mode);
3596 #else
3597 host_ip->mode = tswap16(target_ip->mode);
3598 #endif
3599 #if defined(TARGET_PPC)
3600 host_ip->__seq = tswap32(target_ip->__seq);
3601 #else
3602 host_ip->__seq = tswap16(target_ip->__seq);
3603 #endif
3604 unlock_user_struct(target_sd, target_addr, 0);
3605 return 0;
3606 }
3607
3608 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3609 struct ipc_perm *host_ip)
3610 {
3611 struct target_ipc_perm *target_ip;
3612 struct target_semid64_ds *target_sd;
3613
3614 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3615 return -TARGET_EFAULT;
3616 target_ip = &(target_sd->sem_perm);
3617 target_ip->__key = tswap32(host_ip->__key);
3618 target_ip->uid = tswap32(host_ip->uid);
3619 target_ip->gid = tswap32(host_ip->gid);
3620 target_ip->cuid = tswap32(host_ip->cuid);
3621 target_ip->cgid = tswap32(host_ip->cgid);
3622 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3623 target_ip->mode = tswap32(host_ip->mode);
3624 #else
3625 target_ip->mode = tswap16(host_ip->mode);
3626 #endif
3627 #if defined(TARGET_PPC)
3628 target_ip->__seq = tswap32(host_ip->__seq);
3629 #else
3630 target_ip->__seq = tswap16(host_ip->__seq);
3631 #endif
3632 unlock_user_struct(target_sd, target_addr, 1);
3633 return 0;
3634 }
3635
3636 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3637 abi_ulong target_addr)
3638 {
3639 struct target_semid64_ds *target_sd;
3640
3641 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3642 return -TARGET_EFAULT;
3643 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3644 return -TARGET_EFAULT;
3645 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3646 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3647 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3648 unlock_user_struct(target_sd, target_addr, 0);
3649 return 0;
3650 }
3651
3652 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3653 struct semid_ds *host_sd)
3654 {
3655 struct target_semid64_ds *target_sd;
3656
3657 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3658 return -TARGET_EFAULT;
3659 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3660 return -TARGET_EFAULT;
3661 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3662 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3663 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3664 unlock_user_struct(target_sd, target_addr, 1);
3665 return 0;
3666 }
3667
3668 struct target_seminfo {
3669 int semmap;
3670 int semmni;
3671 int semmns;
3672 int semmnu;
3673 int semmsl;
3674 int semopm;
3675 int semume;
3676 int semusz;
3677 int semvmx;
3678 int semaem;
3679 };
3680
3681 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3682 struct seminfo *host_seminfo)
3683 {
3684 struct target_seminfo *target_seminfo;
3685 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3686 return -TARGET_EFAULT;
3687 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3688 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3689 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3690 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3691 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3692 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3693 __put_user(host_seminfo->semume, &target_seminfo->semume);
3694 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3695 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3696 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3697 unlock_user_struct(target_seminfo, target_addr, 1);
3698 return 0;
3699 }
3700
3701 union semun {
3702 int val;
3703 struct semid_ds *buf;
3704 unsigned short *array;
3705 struct seminfo *__buf;
3706 };
3707
3708 union target_semun {
3709 int val;
3710 abi_ulong buf;
3711 abi_ulong array;
3712 abi_ulong __buf;
3713 };
3714
3715 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3716 abi_ulong target_addr)
3717 {
3718 int nsems;
3719 unsigned short *array;
3720 union semun semun;
3721 struct semid_ds semid_ds;
3722 int i, ret;
3723
3724 semun.buf = &semid_ds;
3725
3726 ret = semctl(semid, 0, IPC_STAT, semun);
3727 if (ret == -1)
3728 return get_errno(ret);
3729
3730 nsems = semid_ds.sem_nsems;
3731
3732 *host_array = g_try_new(unsigned short, nsems);
3733 if (!*host_array) {
3734 return -TARGET_ENOMEM;
3735 }
3736 array = lock_user(VERIFY_READ, target_addr,
3737 nsems*sizeof(unsigned short), 1);
3738 if (!array) {
3739 g_free(*host_array);
3740 return -TARGET_EFAULT;
3741 }
3742
3743 for(i=0; i<nsems; i++) {
3744 __get_user((*host_array)[i], &array[i]);
3745 }
3746 unlock_user(array, target_addr, 0);
3747
3748 return 0;
3749 }
3750
3751 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3752 unsigned short **host_array)
3753 {
3754 int nsems;
3755 unsigned short *array;
3756 union semun semun;
3757 struct semid_ds semid_ds;
3758 int i, ret;
3759
3760 semun.buf = &semid_ds;
3761
3762 ret = semctl(semid, 0, IPC_STAT, semun);
3763 if (ret == -1)
3764 return get_errno(ret);
3765
3766 nsems = semid_ds.sem_nsems;
3767
3768 array = lock_user(VERIFY_WRITE, target_addr,
3769 nsems*sizeof(unsigned short), 0);
3770 if (!array)
3771 return -TARGET_EFAULT;
3772
3773 for(i=0; i<nsems; i++) {
3774 __put_user((*host_array)[i], &array[i]);
3775 }
3776 g_free(*host_array);
3777 unlock_user(array, target_addr, 1);
3778
3779 return 0;
3780 }
3781
3782 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3783 abi_ulong target_arg)
3784 {
3785 union target_semun target_su = { .buf = target_arg };
3786 union semun arg;
3787 struct semid_ds dsarg;
3788 unsigned short *array = NULL;
3789 struct seminfo seminfo;
3790 abi_long ret = -TARGET_EINVAL;
3791 abi_long err;
3792 cmd &= 0xff;
3793
3794 switch( cmd ) {
3795 case GETVAL:
3796 case SETVAL:
3797 /* In 64 bit cross-endian situations, we will erroneously pick up
3798 * the wrong half of the union for the "val" element. To rectify
3799 * this, the entire 8-byte structure is byteswapped, followed by
3800 * a swap of the 4 byte val field. In other cases, the data is
3801 * already in proper host byte order. */
3802 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3803 target_su.buf = tswapal(target_su.buf);
3804 arg.val = tswap32(target_su.val);
3805 } else {
3806 arg.val = target_su.val;
3807 }
3808 ret = get_errno(semctl(semid, semnum, cmd, arg));
3809 break;
3810 case GETALL:
3811 case SETALL:
3812 err = target_to_host_semarray(semid, &array, target_su.array);
3813 if (err)
3814 return err;
3815 arg.array = array;
3816 ret = get_errno(semctl(semid, semnum, cmd, arg));
3817 err = host_to_target_semarray(semid, target_su.array, &array);
3818 if (err)
3819 return err;
3820 break;
3821 case IPC_STAT:
3822 case IPC_SET:
3823 case SEM_STAT:
3824 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3825 if (err)
3826 return err;
3827 arg.buf = &dsarg;
3828 ret = get_errno(semctl(semid, semnum, cmd, arg));
3829 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3830 if (err)
3831 return err;
3832 break;
3833 case IPC_INFO:
3834 case SEM_INFO:
3835 arg.__buf = &seminfo;
3836 ret = get_errno(semctl(semid, semnum, cmd, arg));
3837 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3838 if (err)
3839 return err;
3840 break;
3841 case IPC_RMID:
3842 case GETPID:
3843 case GETNCNT:
3844 case GETZCNT:
3845 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3846 break;
3847 }
3848
3849 return ret;
3850 }
3851
3852 struct target_sembuf {
3853 unsigned short sem_num;
3854 short sem_op;
3855 short sem_flg;
3856 };
3857
3858 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3859 abi_ulong target_addr,
3860 unsigned nsops)
3861 {
3862 struct target_sembuf *target_sembuf;
3863 int i;
3864
3865 target_sembuf = lock_user(VERIFY_READ, target_addr,
3866 nsops*sizeof(struct target_sembuf), 1);
3867 if (!target_sembuf)
3868 return -TARGET_EFAULT;
3869
3870 for(i=0; i<nsops; i++) {
3871 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3872 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3873 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3874 }
3875
3876 unlock_user(target_sembuf, target_addr, 0);
3877
3878 return 0;
3879 }
3880
3881 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3882 {
3883 struct sembuf sops[nsops];
3884 abi_long ret;
3885
3886 if (target_to_host_sembuf(sops, ptr, nsops))
3887 return -TARGET_EFAULT;
3888
3889 ret = -TARGET_ENOSYS;
3890 #ifdef __NR_semtimedop
3891 ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3892 #endif
3893 #ifdef __NR_ipc
3894 if (ret == -TARGET_ENOSYS) {
3895 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3896 }
3897 #endif
3898 return ret;
3899 }
3900
3901 struct target_msqid_ds
3902 {
3903 struct target_ipc_perm msg_perm;
3904 abi_ulong msg_stime;
3905 #if TARGET_ABI_BITS == 32
3906 abi_ulong __unused1;
3907 #endif
3908 abi_ulong msg_rtime;
3909 #if TARGET_ABI_BITS == 32
3910 abi_ulong __unused2;
3911 #endif
3912 abi_ulong msg_ctime;
3913 #if TARGET_ABI_BITS == 32
3914 abi_ulong __unused3;
3915 #endif
3916 abi_ulong __msg_cbytes;
3917 abi_ulong msg_qnum;
3918 abi_ulong msg_qbytes;
3919 abi_ulong msg_lspid;
3920 abi_ulong msg_lrpid;
3921 abi_ulong __unused4;
3922 abi_ulong __unused5;
3923 };
3924
3925 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3926 abi_ulong target_addr)
3927 {
3928 struct target_msqid_ds *target_md;
3929
3930 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3931 return -TARGET_EFAULT;
3932 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3933 return -TARGET_EFAULT;
3934 host_md->msg_stime = tswapal(target_md->msg_stime);
3935 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3936 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3937 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3938 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3939 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3940 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3941 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3942 unlock_user_struct(target_md, target_addr, 0);
3943 return 0;
3944 }
3945
3946 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3947 struct msqid_ds *host_md)
3948 {
3949 struct target_msqid_ds *target_md;
3950
3951 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3952 return -TARGET_EFAULT;
3953 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3954 return -TARGET_EFAULT;
3955 target_md->msg_stime = tswapal(host_md->msg_stime);
3956 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3957 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3958 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3959 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3960 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3961 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3962 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3963 unlock_user_struct(target_md, target_addr, 1);
3964 return 0;
3965 }
3966
3967 struct target_msginfo {
3968 int msgpool;
3969 int msgmap;
3970 int msgmax;
3971 int msgmnb;
3972 int msgmni;
3973 int msgssz;
3974 int msgtql;
3975 unsigned short int msgseg;
3976 };
3977
3978 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3979 struct msginfo *host_msginfo)
3980 {
3981 struct target_msginfo *target_msginfo;
3982 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3983 return -TARGET_EFAULT;
3984 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3985 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3986 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3987 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3988 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3989 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3990 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3991 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3992 unlock_user_struct(target_msginfo, target_addr, 1);
3993 return 0;
3994 }
3995
3996 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3997 {
3998 struct msqid_ds dsarg;
3999 struct msginfo msginfo;
4000 abi_long ret = -TARGET_EINVAL;
4001
4002 cmd &= 0xff;
4003
4004 switch (cmd) {
4005 case IPC_STAT:
4006 case IPC_SET:
4007 case MSG_STAT:
4008 if (target_to_host_msqid_ds(&dsarg,ptr))
4009 return -TARGET_EFAULT;
4010 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4011 if (host_to_target_msqid_ds(ptr,&dsarg))
4012 return -TARGET_EFAULT;
4013 break;
4014 case IPC_RMID:
4015 ret = get_errno(msgctl(msgid, cmd, NULL));
4016 break;
4017 case IPC_INFO:
4018 case MSG_INFO:
4019 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4020 if (host_to_target_msginfo(ptr, &msginfo))
4021 return -TARGET_EFAULT;
4022 break;
4023 }
4024
4025 return ret;
4026 }
4027
4028 struct target_msgbuf {
4029 abi_long mtype;
4030 char mtext[1];
4031 };
4032
4033 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4034 ssize_t msgsz, int msgflg)
4035 {
4036 struct target_msgbuf *target_mb;
4037 struct msgbuf *host_mb;
4038 abi_long ret = 0;
4039
4040 if (msgsz < 0) {
4041 return -TARGET_EINVAL;
4042 }
4043
4044 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4045 return -TARGET_EFAULT;
4046 host_mb = g_try_malloc(msgsz + sizeof(long));
4047 if (!host_mb) {
4048 unlock_user_struct(target_mb, msgp, 0);
4049 return -TARGET_ENOMEM;
4050 }
4051 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4052 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4053 ret = -TARGET_ENOSYS;
4054 #ifdef __NR_msgsnd
4055 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4056 #endif
4057 #ifdef __NR_ipc
4058 if (ret == -TARGET_ENOSYS) {
4059 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4060 host_mb, 0));
4061 }
4062 #endif
4063 g_free(host_mb);
4064 unlock_user_struct(target_mb, msgp, 0);
4065
4066 return ret;
4067 }
4068
4069 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4070 ssize_t msgsz, abi_long msgtyp,
4071 int msgflg)
4072 {
4073 struct target_msgbuf *target_mb;
4074 char *target_mtext;
4075 struct msgbuf *host_mb;
4076 abi_long ret = 0;
4077
4078 if (msgsz < 0) {
4079 return -TARGET_EINVAL;
4080 }
4081
4082 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4083 return -TARGET_EFAULT;
4084
4085 host_mb = g_try_malloc(msgsz + sizeof(long));
4086 if (!host_mb) {
4087 ret = -TARGET_ENOMEM;
4088 goto end;
4089 }
4090 ret = -TARGET_ENOSYS;
4091 #ifdef __NR_msgrcv
4092 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4093 #endif
4094 #ifdef __NR_ipc
4095 if (ret == -TARGET_ENOSYS) {
4096 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4097 msgflg, host_mb, msgtyp));
4098 }
4099 #endif
4100
4101 if (ret > 0) {
4102 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4103 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4104 if (!target_mtext) {
4105 ret = -TARGET_EFAULT;
4106 goto end;
4107 }
4108 memcpy(target_mb->mtext, host_mb->mtext, ret);
4109 unlock_user(target_mtext, target_mtext_addr, ret);
4110 }
4111
4112 target_mb->mtype = tswapal(host_mb->mtype);
4113
4114 end:
4115 if (target_mb)
4116 unlock_user_struct(target_mb, msgp, 1);
4117 g_free(host_mb);
4118 return ret;
4119 }
4120
4121 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4122 abi_ulong target_addr)
4123 {
4124 struct target_shmid_ds *target_sd;
4125
4126 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4127 return -TARGET_EFAULT;
4128 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4129 return -TARGET_EFAULT;
4130 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4131 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4132 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4133 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4134 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4135 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4136 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4137 unlock_user_struct(target_sd, target_addr, 0);
4138 return 0;
4139 }
4140
4141 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4142 struct shmid_ds *host_sd)
4143 {
4144 struct target_shmid_ds *target_sd;
4145
4146 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4147 return -TARGET_EFAULT;
4148 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4149 return -TARGET_EFAULT;
4150 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4151 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4152 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4153 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4154 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4155 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4156 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4157 unlock_user_struct(target_sd, target_addr, 1);
4158 return 0;
4159 }
4160
4161 struct target_shminfo {
4162 abi_ulong shmmax;
4163 abi_ulong shmmin;
4164 abi_ulong shmmni;
4165 abi_ulong shmseg;
4166 abi_ulong shmall;
4167 };
4168
4169 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4170 struct shminfo *host_shminfo)
4171 {
4172 struct target_shminfo *target_shminfo;
4173 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4174 return -TARGET_EFAULT;
4175 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4176 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4177 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4178 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4179 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4180 unlock_user_struct(target_shminfo, target_addr, 1);
4181 return 0;
4182 }
4183
4184 struct target_shm_info {
4185 int used_ids;
4186 abi_ulong shm_tot;
4187 abi_ulong shm_rss;
4188 abi_ulong shm_swp;
4189 abi_ulong swap_attempts;
4190 abi_ulong swap_successes;
4191 };
4192
4193 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4194 struct shm_info *host_shm_info)
4195 {
4196 struct target_shm_info *target_shm_info;
4197 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4198 return -TARGET_EFAULT;
4199 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4200 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4201 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4202 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4203 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4204 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4205 unlock_user_struct(target_shm_info, target_addr, 1);
4206 return 0;
4207 }
4208
4209 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4210 {
4211 struct shmid_ds dsarg;
4212 struct shminfo shminfo;
4213 struct shm_info shm_info;
4214 abi_long ret = -TARGET_EINVAL;
4215
4216 cmd &= 0xff;
4217
4218 switch(cmd) {
4219 case IPC_STAT:
4220 case IPC_SET:
4221 case SHM_STAT:
4222 if (target_to_host_shmid_ds(&dsarg, buf))
4223 return -TARGET_EFAULT;
4224 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4225 if (host_to_target_shmid_ds(buf, &dsarg))
4226 return -TARGET_EFAULT;
4227 break;
4228 case IPC_INFO:
4229 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4230 if (host_to_target_shminfo(buf, &shminfo))
4231 return -TARGET_EFAULT;
4232 break;
4233 case SHM_INFO:
4234 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4235 if (host_to_target_shm_info(buf, &shm_info))
4236 return -TARGET_EFAULT;
4237 break;
4238 case IPC_RMID:
4239 case SHM_LOCK:
4240 case SHM_UNLOCK:
4241 ret = get_errno(shmctl(shmid, cmd, NULL));
4242 break;
4243 }
4244
4245 return ret;
4246 }
4247
4248 #ifndef TARGET_FORCE_SHMLBA
4249 /* For most architectures, SHMLBA is the same as the page size;
4250 * some architectures have larger values, in which case they should
4251 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4252 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4253 * and defining its own value for SHMLBA.
4254 *
4255 * The kernel also permits SHMLBA to be set by the architecture to a
4256 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4257 * this means that addresses are rounded to the large size if
4258 * SHM_RND is set but addresses not aligned to that size are not rejected
4259 * as long as they are at least page-aligned. Since the only architecture
4260 * which uses this is ia64 this code doesn't provide for that oddity.
4261 */
4262 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4263 {
4264 return TARGET_PAGE_SIZE;
4265 }
4266 #endif
4267
4268 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4269 int shmid, abi_ulong shmaddr, int shmflg)
4270 {
4271 abi_long raddr;
4272 void *host_raddr;
4273 struct shmid_ds shm_info;
4274 int i,ret;
4275 abi_ulong shmlba;
4276
4277 /* find out the length of the shared memory segment */
4278 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4279 if (is_error(ret)) {
4280 /* can't get length, bail out */
4281 return ret;
4282 }
4283
4284 shmlba = target_shmlba(cpu_env);
4285
4286 if (shmaddr & (shmlba - 1)) {
4287 if (shmflg & SHM_RND) {
4288 shmaddr &= ~(shmlba - 1);
4289 } else {
4290 return -TARGET_EINVAL;
4291 }
4292 }
4293 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4294 return -TARGET_EINVAL;
4295 }
4296
4297 mmap_lock();
4298
4299 if (shmaddr)
4300 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4301 else {
4302 abi_ulong mmap_start;
4303
4304 /* In order to use the host shmat, we need to honor host SHMLBA. */
4305 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4306
4307 if (mmap_start == -1) {
4308 errno = ENOMEM;
4309 host_raddr = (void *)-1;
4310 } else
4311 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4312 }
4313
4314 if (host_raddr == (void *)-1) {
4315 mmap_unlock();
4316 return get_errno((long)host_raddr);
4317 }
4318 raddr=h2g((unsigned long)host_raddr);
4319
4320 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4321 PAGE_VALID | PAGE_READ |
4322 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4323
4324 for (i = 0; i < N_SHM_REGIONS; i++) {
4325 if (!shm_regions[i].in_use) {
4326 shm_regions[i].in_use = true;
4327 shm_regions[i].start = raddr;
4328 shm_regions[i].size = shm_info.shm_segsz;
4329 break;
4330 }
4331 }
4332
4333 mmap_unlock();
4334 return raddr;
4335
4336 }
4337
4338 static inline abi_long do_shmdt(abi_ulong shmaddr)
4339 {
4340 int i;
4341 abi_long rv;
4342
4343 mmap_lock();
4344
4345 for (i = 0; i < N_SHM_REGIONS; ++i) {
4346 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4347 shm_regions[i].in_use = false;
4348 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4349 break;
4350 }
4351 }
4352 rv = get_errno(shmdt(g2h(shmaddr)));
4353
4354 mmap_unlock();
4355
4356 return rv;
4357 }
4358
4359 #ifdef TARGET_NR_ipc
4360 /* ??? This only works with linear mappings. */
4361 /* do_ipc() must return target values and target errnos. */
4362 static abi_long do_ipc(CPUArchState *cpu_env,
4363 unsigned int call, abi_long first,
4364 abi_long second, abi_long third,
4365 abi_long ptr, abi_long fifth)
4366 {
4367 int version;
4368 abi_long ret = 0;
4369
4370 version = call >> 16;
4371 call &= 0xffff;
4372
4373 switch (call) {
4374 case IPCOP_semop:
4375 ret = do_semop(first, ptr, second);
4376 break;
4377
4378 case IPCOP_semget:
4379 ret = get_errno(semget(first, second, third));
4380 break;
4381
4382 case IPCOP_semctl: {
4383 /* The semun argument to semctl is passed by value, so dereference the
4384 * ptr argument. */
4385 abi_ulong atptr;
4386 get_user_ual(atptr, ptr);
4387 ret = do_semctl(first, second, third, atptr);
4388 break;
4389 }
4390
4391 case IPCOP_msgget:
4392 ret = get_errno(msgget(first, second));
4393 break;
4394
4395 case IPCOP_msgsnd:
4396 ret = do_msgsnd(first, ptr, second, third);
4397 break;
4398
4399 case IPCOP_msgctl:
4400 ret = do_msgctl(first, second, ptr);
4401 break;
4402
4403 case IPCOP_msgrcv:
4404 switch (version) {
4405 case 0:
4406 {
4407 struct target_ipc_kludge {
4408 abi_long msgp;
4409 abi_long msgtyp;
4410 } *tmp;
4411
4412 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4413 ret = -TARGET_EFAULT;
4414 break;
4415 }
4416
4417 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4418
4419 unlock_user_struct(tmp, ptr, 0);
4420 break;
4421 }
4422 default:
4423 ret = do_msgrcv(first, ptr, second, fifth, third);
4424 }
4425 break;
4426
4427 case IPCOP_shmat:
4428 switch (version) {
4429 default:
4430 {
4431 abi_ulong raddr;
4432 raddr = do_shmat(cpu_env, first, ptr, second);
4433 if (is_error(raddr))
4434 return get_errno(raddr);
4435 if (put_user_ual(raddr, third))
4436 return -TARGET_EFAULT;
4437 break;
4438 }
4439 case 1:
4440 ret = -TARGET_EINVAL;
4441 break;
4442 }
4443 break;
4444 case IPCOP_shmdt:
4445 ret = do_shmdt(ptr);
4446 break;
4447
4448 case IPCOP_shmget:
4449 /* IPC_* flag values are the same on all linux platforms */
4450 ret = get_errno(shmget(first, second, third));
4451 break;
4452
4453 /* IPC_* and SHM_* command values are the same on all linux platforms */
4454 case IPCOP_shmctl:
4455 ret = do_shmctl(first, second, ptr);
4456 break;
4457 default:
4458 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4459 call, version);
4460 ret = -TARGET_ENOSYS;
4461 break;
4462 }
4463 return ret;
4464 }
4465 #endif
4466
4467 /* kernel structure types definitions */
4468
4469 #define STRUCT(name, ...) STRUCT_ ## name,
4470 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4471 enum {
4472 #include "syscall_types.h"
4473 STRUCT_MAX
4474 };
4475 #undef STRUCT
4476 #undef STRUCT_SPECIAL
4477
4478 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4479 #define STRUCT_SPECIAL(name)
4480 #include "syscall_types.h"
4481 #undef STRUCT
4482 #undef STRUCT_SPECIAL
4483
4484 #define MAX_STRUCT_SIZE 4096
4485
4486 #ifdef CONFIG_FIEMAP
4487 /* So fiemap access checks don't overflow on 32 bit systems.
4488 * This is very slightly smaller than the limit imposed by
4489 * the underlying kernel.
4490 */
4491 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4492 / sizeof(struct fiemap_extent))
4493
4494 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4495 int fd, int cmd, abi_long arg)
4496 {
4497 /* The parameter for this ioctl is a struct fiemap followed
4498 * by an array of struct fiemap_extent whose size is set
4499 * in fiemap->fm_extent_count. The array is filled in by the
4500 * ioctl.
4501 */
4502 int target_size_in, target_size_out;
4503 struct fiemap *fm;
4504 const argtype *arg_type = ie->arg_type;
4505 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4506 void *argptr, *p;
4507 abi_long ret;
4508 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4509 uint32_t outbufsz;
4510 int free_fm = 0;
4511
4512 assert(arg_type[0] == TYPE_PTR);
4513 assert(ie->access == IOC_RW);
4514 arg_type++;
4515 target_size_in = thunk_type_size(arg_type, 0);
4516 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4517 if (!argptr) {
4518 return -TARGET_EFAULT;
4519 }
4520 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4521 unlock_user(argptr, arg, 0);
4522 fm = (struct fiemap *)buf_temp;
4523 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4524 return -TARGET_EINVAL;
4525 }
4526
4527 outbufsz = sizeof (*fm) +
4528 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4529
4530 if (outbufsz > MAX_STRUCT_SIZE) {
4531 /* We can't fit all the extents into the fixed size buffer.
4532 * Allocate one that is large enough and use it instead.
4533 */
4534 fm = g_try_malloc(outbufsz);
4535 if (!fm) {
4536 return -TARGET_ENOMEM;
4537 }
4538 memcpy(fm, buf_temp, sizeof(struct fiemap));
4539 free_fm = 1;
4540 }
4541 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4542 if (!is_error(ret)) {
4543 target_size_out = target_size_in;
4544 /* An extent_count of 0 means we were only counting the extents
4545 * so there are no structs to copy
4546 */
4547 if (fm->fm_extent_count != 0) {
4548 target_size_out += fm->fm_mapped_extents * extent_size;
4549 }
4550 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4551 if (!argptr) {
4552 ret = -TARGET_EFAULT;
4553 } else {
4554 /* Convert the struct fiemap */
4555 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4556 if (fm->fm_extent_count != 0) {
4557 p = argptr + target_size_in;
4558 /* ...and then all the struct fiemap_extents */
4559 for (i = 0; i < fm->fm_mapped_extents; i++) {
4560 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4561 THUNK_TARGET);
4562 p += extent_size;
4563 }
4564 }
4565 unlock_user(argptr, arg, target_size_out);
4566 }
4567 }
4568 if (free_fm) {
4569 g_free(fm);
4570 }
4571 return ret;
4572 }
4573 #endif
4574
4575 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4576 int fd, int cmd, abi_long arg)
4577 {
4578 const argtype *arg_type = ie->arg_type;
4579 int target_size;
4580 void *argptr;
4581 int ret;
4582 struct ifconf *host_ifconf;
4583 uint32_t outbufsz;
4584 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4585 int target_ifreq_size;
4586 int nb_ifreq;
4587 int free_buf = 0;
4588 int i;
4589 int target_ifc_len;
4590 abi_long target_ifc_buf;
4591 int host_ifc_len;
4592 char *host_ifc_buf;
4593
4594 assert(arg_type[0] == TYPE_PTR);
4595 assert(ie->access == IOC_RW);
4596
4597 arg_type++;
4598 target_size = thunk_type_size(arg_type, 0);
4599
4600 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4601 if (!argptr)
4602 return -TARGET_EFAULT;
4603 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4604 unlock_user(argptr, arg, 0);
4605
4606 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4607 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4608 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4609
4610 if (target_ifc_buf != 0) {
4611 target_ifc_len = host_ifconf->ifc_len;
4612 nb_ifreq = target_ifc_len / target_ifreq_size;
4613 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4614
4615 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4616 if (outbufsz > MAX_STRUCT_SIZE) {
4617 /*
4618 * We can't fit all the extents into the fixed size buffer.
4619 * Allocate one that is large enough and use it instead.
4620 */
4621 host_ifconf = malloc(outbufsz);
4622 if (!host_ifconf) {
4623 return -TARGET_ENOMEM;
4624 }
4625 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4626 free_buf = 1;
4627 }
4628 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4629
4630 host_ifconf->ifc_len = host_ifc_len;
4631 } else {
4632 host_ifc_buf = NULL;
4633 }
4634 host_ifconf->ifc_buf = host_ifc_buf;
4635
4636 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4637 if (!is_error(ret)) {
4638 /* convert host ifc_len to target ifc_len */
4639
4640 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4641 target_ifc_len = nb_ifreq * target_ifreq_size;
4642 host_ifconf->ifc_len = target_ifc_len;
4643
4644 /* restore target ifc_buf */
4645
4646 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4647
4648 /* copy struct ifconf to target user */
4649
4650 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4651 if (!argptr)
4652 return -TARGET_EFAULT;
4653 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4654 unlock_user(argptr, arg, target_size);
4655
4656 if (target_ifc_buf != 0) {
4657 /* copy ifreq[] to target user */
4658 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4659 for (i = 0; i < nb_ifreq ; i++) {
4660 thunk_convert(argptr + i * target_ifreq_size,
4661 host_ifc_buf + i * sizeof(struct ifreq),
4662 ifreq_arg_type, THUNK_TARGET);
4663 }
4664 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4665 }
4666 }
4667
4668 if (free_buf) {
4669 free(host_ifconf);
4670 }
4671
4672 return ret;
4673 }
4674
4675 #if defined(CONFIG_USBFS)
4676 #if HOST_LONG_BITS > 64
4677 #error USBDEVFS thunks do not support >64 bit hosts yet.
4678 #endif
4679 struct live_urb {
4680 uint64_t target_urb_adr;
4681 uint64_t target_buf_adr;
4682 char *target_buf_ptr;
4683 struct usbdevfs_urb host_urb;
4684 };
4685
4686 static GHashTable *usbdevfs_urb_hashtable(void)
4687 {
4688 static GHashTable *urb_hashtable;
4689
4690 if (!urb_hashtable) {
4691 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4692 }
4693 return urb_hashtable;
4694 }
4695
4696 static void urb_hashtable_insert(struct live_urb *urb)
4697 {
4698 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4699 g_hash_table_insert(urb_hashtable, urb, urb);
4700 }
4701
4702 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4703 {
4704 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4705 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4706 }
4707
4708 static void urb_hashtable_remove(struct live_urb *urb)
4709 {
4710 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4711 g_hash_table_remove(urb_hashtable, urb);
4712 }
4713
4714 static abi_long
4715 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4716 int fd, int cmd, abi_long arg)
4717 {
4718 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4719 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4720 struct live_urb *lurb;
4721 void *argptr;
4722 uint64_t hurb;
4723 int target_size;
4724 uintptr_t target_urb_adr;
4725 abi_long ret;
4726
4727 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4728
4729 memset(buf_temp, 0, sizeof(uint64_t));
4730 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4731 if (is_error(ret)) {
4732 return ret;
4733 }
4734
4735 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4736 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4737 if (!lurb->target_urb_adr) {
4738 return -TARGET_EFAULT;
4739 }
4740 urb_hashtable_remove(lurb);
4741 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4742 lurb->host_urb.buffer_length);
4743 lurb->target_buf_ptr = NULL;
4744
4745 /* restore the guest buffer pointer */
4746 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4747
4748 /* update the guest urb struct */
4749 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4750 if (!argptr) {
4751 g_free(lurb);
4752 return -TARGET_EFAULT;
4753 }
4754 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4755 unlock_user(argptr, lurb->target_urb_adr, target_size);
4756
4757 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4758 /* write back the urb handle */
4759 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4760 if (!argptr) {
4761 g_free(lurb);
4762 return -TARGET_EFAULT;
4763 }
4764
4765 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4766 target_urb_adr = lurb->target_urb_adr;
4767 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4768 unlock_user(argptr, arg, target_size);
4769
4770 g_free(lurb);
4771 return ret;
4772 }
4773
4774 static abi_long
4775 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4776 uint8_t *buf_temp __attribute__((unused)),
4777 int fd, int cmd, abi_long arg)
4778 {
4779 struct live_urb *lurb;
4780
4781 /* map target address back to host URB with metadata. */
4782 lurb = urb_hashtable_lookup(arg);
4783 if (!lurb) {
4784 return -TARGET_EFAULT;
4785 }
4786 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4787 }
4788
4789 static abi_long
4790 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4791 int fd, int cmd, abi_long arg)
4792 {
4793 const argtype *arg_type = ie->arg_type;
4794 int target_size;
4795 abi_long ret;
4796 void *argptr;
4797 int rw_dir;
4798 struct live_urb *lurb;
4799
4800 /*
4801 * each submitted URB needs to map to a unique ID for the
4802 * kernel, and that unique ID needs to be a pointer to
4803 * host memory. hence, we need to malloc for each URB.
4804 * isochronous transfers have a variable length struct.
4805 */
4806 arg_type++;
4807 target_size = thunk_type_size(arg_type, THUNK_TARGET);
4808
4809 /* construct host copy of urb and metadata */
4810 lurb = g_try_malloc0(sizeof(struct live_urb));
4811 if (!lurb) {
4812 return -TARGET_ENOMEM;
4813 }
4814
4815 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4816 if (!argptr) {
4817 g_free(lurb);
4818 return -TARGET_EFAULT;
4819 }
4820 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4821 unlock_user(argptr, arg, 0);
4822
4823 lurb->target_urb_adr = arg;
4824 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4825
4826 /* buffer space used depends on endpoint type so lock the entire buffer */
4827 /* control type urbs should check the buffer contents for true direction */
4828 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4829 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4830 lurb->host_urb.buffer_length, 1);
4831 if (lurb->target_buf_ptr == NULL) {
4832 g_free(lurb);
4833 return -TARGET_EFAULT;
4834 }
4835
4836 /* update buffer pointer in host copy */
4837 lurb->host_urb.buffer = lurb->target_buf_ptr;
4838
4839 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4840 if (is_error(ret)) {
4841 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4842 g_free(lurb);
4843 } else {
4844 urb_hashtable_insert(lurb);
4845 }
4846
4847 return ret;
4848 }
4849 #endif /* CONFIG_USBFS */
4850
4851 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4852 int cmd, abi_long arg)
4853 {
4854 void *argptr;
4855 struct dm_ioctl *host_dm;
4856 abi_long guest_data;
4857 uint32_t guest_data_size;
4858 int target_size;
4859 const argtype *arg_type = ie->arg_type;
4860 abi_long ret;
4861 void *big_buf = NULL;
4862 char *host_data;
4863
4864 arg_type++;
4865 target_size = thunk_type_size(arg_type, 0);
4866 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4867 if (!argptr) {
4868 ret = -TARGET_EFAULT;
4869 goto out;
4870 }
4871 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4872 unlock_user(argptr, arg, 0);
4873
4874 /* buf_temp is too small, so fetch things into a bigger buffer */
4875 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4876 memcpy(big_buf, buf_temp, target_size);
4877 buf_temp = big_buf;
4878 host_dm = big_buf;
4879
4880 guest_data = arg + host_dm->data_start;
4881 if ((guest_data - arg) < 0) {
4882 ret = -TARGET_EINVAL;
4883 goto out;
4884 }
4885 guest_data_size = host_dm->data_size - host_dm->data_start;
4886 host_data = (char*)host_dm + host_dm->data_start;
4887
4888 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4889 if (!argptr) {
4890 ret = -TARGET_EFAULT;
4891 goto out;
4892 }
4893
4894 switch (ie->host_cmd) {
4895 case DM_REMOVE_ALL:
4896 case DM_LIST_DEVICES:
4897 case DM_DEV_CREATE:
4898 case DM_DEV_REMOVE:
4899 case DM_DEV_SUSPEND:
4900 case DM_DEV_STATUS:
4901 case DM_DEV_WAIT:
4902 case DM_TABLE_STATUS:
4903 case DM_TABLE_CLEAR:
4904 case DM_TABLE_DEPS:
4905 case DM_LIST_VERSIONS:
4906 /* no input data */
4907 break;
4908 case DM_DEV_RENAME:
4909 case DM_DEV_SET_GEOMETRY:
4910 /* data contains only strings */
4911 memcpy(host_data, argptr, guest_data_size);
4912 break;
4913 case DM_TARGET_MSG:
4914 memcpy(host_data, argptr, guest_data_size);
4915 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4916 break;
4917 case DM_TABLE_LOAD:
4918 {
4919 void *gspec = argptr;
4920 void *cur_data = host_data;
4921 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4922 int spec_size = thunk_type_size(arg_type, 0);
4923 int i;
4924
4925 for (i = 0; i < host_dm->target_count; i++) {
4926 struct dm_target_spec *spec = cur_data;
4927 uint32_t next;
4928 int slen;
4929
4930 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4931 slen = strlen((char*)gspec + spec_size) + 1;
4932 next = spec->next;
4933 spec->next = sizeof(*spec) + slen;
4934 strcpy((char*)&spec[1], gspec + spec_size);
4935 gspec += next;
4936 cur_data += spec->next;
4937 }
4938 break;
4939 }
4940 default:
4941 ret = -TARGET_EINVAL;
4942 unlock_user(argptr, guest_data, 0);
4943 goto out;
4944 }
4945 unlock_user(argptr, guest_data, 0);
4946
4947 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4948 if (!is_error(ret)) {
4949 guest_data = arg + host_dm->data_start;
4950 guest_data_size = host_dm->data_size - host_dm->data_start;
4951 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4952 switch (ie->host_cmd) {
4953 case DM_REMOVE_ALL:
4954 case DM_DEV_CREATE:
4955 case DM_DEV_REMOVE:
4956 case DM_DEV_RENAME:
4957 case DM_DEV_SUSPEND:
4958 case DM_DEV_STATUS:
4959 case DM_TABLE_LOAD:
4960 case DM_TABLE_CLEAR:
4961 case DM_TARGET_MSG:
4962 case DM_DEV_SET_GEOMETRY:
4963 /* no return data */
4964 break;
4965 case DM_LIST_DEVICES:
4966 {
4967 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4968 uint32_t remaining_data = guest_data_size;
4969 void *cur_data = argptr;
4970 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4971 int nl_size = 12; /* can't use thunk_size due to alignment */
4972
4973 while (1) {
4974 uint32_t next = nl->next;
4975 if (next) {
4976 nl->next = nl_size + (strlen(nl->name) + 1);
4977 }
4978 if (remaining_data < nl->next) {
4979 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4980 break;
4981 }
4982 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4983 strcpy(cur_data + nl_size, nl->name);
4984 cur_data += nl->next;
4985 remaining_data -= nl->next;
4986 if (!next) {
4987 break;
4988 }
4989 nl = (void*)nl + next;
4990 }
4991 break;
4992 }
4993 case DM_DEV_WAIT:
4994 case DM_TABLE_STATUS:
4995 {
4996 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4997 void *cur_data = argptr;
4998 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4999 int spec_size = thunk_type_size(arg_type, 0);
5000 int i;
5001
5002 for (i = 0; i < host_dm->target_count; i++) {
5003 uint32_t next = spec->next;
5004 int slen = strlen((char*)&spec[1]) + 1;
5005 spec->next = (cur_data - argptr) + spec_size + slen;
5006 if (guest_data_size < spec->next) {
5007 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5008 break;
5009 }
5010 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5011 strcpy(cur_data + spec_size, (char*)&spec[1]);
5012 cur_data = argptr + spec->next;
5013 spec = (void*)host_dm + host_dm->data_start + next;
5014 }
5015 break;
5016 }
5017 case DM_TABLE_DEPS:
5018 {
5019 void *hdata = (void*)host_dm + host_dm->data_start;
5020 int count = *(uint32_t*)hdata;
5021 uint64_t *hdev = hdata + 8;
5022 uint64_t *gdev = argptr + 8;
5023 int i;
5024
5025 *(uint32_t*)argptr = tswap32(count);
5026 for (i = 0; i < count; i++) {
5027 *gdev = tswap64(*hdev);
5028 gdev++;
5029 hdev++;
5030 }
5031 break;
5032 }
5033 case DM_LIST_VERSIONS:
5034 {
5035 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5036 uint32_t remaining_data = guest_data_size;
5037 void *cur_data = argptr;
5038 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5039 int vers_size = thunk_type_size(arg_type, 0);
5040
5041 while (1) {
5042 uint32_t next = vers->next;
5043 if (next) {
5044 vers->next = vers_size + (strlen(vers->name) + 1);
5045 }
5046 if (remaining_data < vers->next) {
5047 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5048 break;
5049 }
5050 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5051 strcpy(cur_data + vers_size, vers->name);
5052 cur_data += vers->next;
5053 remaining_data -= vers->next;
5054 if (!next) {
5055 break;
5056 }
5057 vers = (void*)vers + next;
5058 }
5059 break;
5060 }
5061 default:
5062 unlock_user(argptr, guest_data, 0);
5063 ret = -TARGET_EINVAL;
5064 goto out;
5065 }
5066 unlock_user(argptr, guest_data, guest_data_size);
5067
5068 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5069 if (!argptr) {
5070 ret = -TARGET_EFAULT;
5071 goto out;
5072 }
5073 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5074 unlock_user(argptr, arg, target_size);
5075 }
5076 out:
5077 g_free(big_buf);
5078 return ret;
5079 }
5080
5081 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5082 int cmd, abi_long arg)
5083 {
5084 void *argptr;
5085 int target_size;
5086 const argtype *arg_type = ie->arg_type;
5087 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5088 abi_long ret;
5089
5090 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5091 struct blkpg_partition host_part;
5092
5093 /* Read and convert blkpg */
5094 arg_type++;
5095 target_size = thunk_type_size(arg_type, 0);
5096 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5097 if (!argptr) {
5098 ret = -TARGET_EFAULT;
5099 goto out;
5100 }
5101 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5102 unlock_user(argptr, arg, 0);
5103
5104 switch (host_blkpg->op) {
5105 case BLKPG_ADD_PARTITION:
5106 case BLKPG_DEL_PARTITION:
5107 /* payload is struct blkpg_partition */
5108 break;
5109 default:
5110 /* Unknown opcode */
5111 ret = -TARGET_EINVAL;
5112 goto out;
5113 }
5114
5115 /* Read and convert blkpg->data */
5116 arg = (abi_long)(uintptr_t)host_blkpg->data;
5117 target_size = thunk_type_size(part_arg_type, 0);
5118 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5119 if (!argptr) {
5120 ret = -TARGET_EFAULT;
5121 goto out;
5122 }
5123 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5124 unlock_user(argptr, arg, 0);
5125
5126 /* Swizzle the data pointer to our local copy and call! */
5127 host_blkpg->data = &host_part;
5128 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5129
5130 out:
5131 return ret;
5132 }
5133
5134 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5135 int fd, int cmd, abi_long arg)
5136 {
5137 const argtype *arg_type = ie->arg_type;
5138 const StructEntry *se;
5139 const argtype *field_types;
5140 const int *dst_offsets, *src_offsets;
5141 int target_size;
5142 void *argptr;
5143 abi_ulong *target_rt_dev_ptr = NULL;
5144 unsigned long *host_rt_dev_ptr = NULL;
5145 abi_long ret;
5146 int i;
5147
5148 assert(ie->access == IOC_W);
5149 assert(*arg_type == TYPE_PTR);
5150 arg_type++;
5151 assert(*arg_type == TYPE_STRUCT);
5152 target_size = thunk_type_size(arg_type, 0);
5153 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5154 if (!argptr) {
5155 return -TARGET_EFAULT;
5156 }
5157 arg_type++;
5158 assert(*arg_type == (int)STRUCT_rtentry);
5159 se = struct_entries + *arg_type++;
5160 assert(se->convert[0] == NULL);
5161 /* convert struct here to be able to catch rt_dev string */
5162 field_types = se->field_types;
5163 dst_offsets = se->field_offsets[THUNK_HOST];
5164 src_offsets = se->field_offsets[THUNK_TARGET];
5165 for (i = 0; i < se->nb_fields; i++) {
5166 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5167 assert(*field_types == TYPE_PTRVOID);
5168 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5169 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5170 if (*target_rt_dev_ptr != 0) {
5171 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5172 tswapal(*target_rt_dev_ptr));
5173 if (!*host_rt_dev_ptr) {
5174 unlock_user(argptr, arg, 0);
5175 return -TARGET_EFAULT;
5176 }
5177 } else {
5178 *host_rt_dev_ptr = 0;
5179 }
5180 field_types++;
5181 continue;
5182 }
5183 field_types = thunk_convert(buf_temp + dst_offsets[i],
5184 argptr + src_offsets[i],
5185 field_types, THUNK_HOST);
5186 }
5187 unlock_user(argptr, arg, 0);
5188
5189 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5190
5191 assert(host_rt_dev_ptr != NULL);
5192 assert(target_rt_dev_ptr != NULL);
5193 if (*host_rt_dev_ptr != 0) {
5194 unlock_user((void *)*host_rt_dev_ptr,
5195 *target_rt_dev_ptr, 0);
5196 }
5197 return ret;
5198 }
5199
5200 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5201 int fd, int cmd, abi_long arg)
5202 {
5203 int sig = target_to_host_signal(arg);
5204 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5205 }
5206
5207 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5208 int fd, int cmd, abi_long arg)
5209 {
5210 struct timeval tv;
5211 abi_long ret;
5212
5213 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5214 if (is_error(ret)) {
5215 return ret;
5216 }
5217
5218 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5219 if (copy_to_user_timeval(arg, &tv)) {
5220 return -TARGET_EFAULT;
5221 }
5222 } else {
5223 if (copy_to_user_timeval64(arg, &tv)) {
5224 return -TARGET_EFAULT;
5225 }
5226 }
5227
5228 return ret;
5229 }
5230
5231 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5232 int fd, int cmd, abi_long arg)
5233 {
5234 struct timespec ts;
5235 abi_long ret;
5236
5237 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5238 if (is_error(ret)) {
5239 return ret;
5240 }
5241
5242 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5243 if (host_to_target_timespec(arg, &ts)) {
5244 return -TARGET_EFAULT;
5245 }
5246 } else{
5247 if (host_to_target_timespec64(arg, &ts)) {
5248 return -TARGET_EFAULT;
5249 }
5250 }
5251
5252 return ret;
5253 }
5254
5255 #ifdef TIOCGPTPEER
5256 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5257 int fd, int cmd, abi_long arg)
5258 {
5259 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5260 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5261 }
5262 #endif
5263
5264 #ifdef HAVE_DRM_H
5265
5266 static void unlock_drm_version(struct drm_version *host_ver,
5267 struct target_drm_version *target_ver,
5268 bool copy)
5269 {
5270 unlock_user(host_ver->name, target_ver->name,
5271 copy ? host_ver->name_len : 0);
5272 unlock_user(host_ver->date, target_ver->date,
5273 copy ? host_ver->date_len : 0);
5274 unlock_user(host_ver->desc, target_ver->desc,
5275 copy ? host_ver->desc_len : 0);
5276 }
5277
5278 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5279 struct target_drm_version *target_ver)
5280 {
5281 memset(host_ver, 0, sizeof(*host_ver));
5282
5283 __get_user(host_ver->name_len, &target_ver->name_len);
5284 if (host_ver->name_len) {
5285 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5286 target_ver->name_len, 0);
5287 if (!host_ver->name) {
5288 return -EFAULT;
5289 }
5290 }
5291
5292 __get_user(host_ver->date_len, &target_ver->date_len);
5293 if (host_ver->date_len) {
5294 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5295 target_ver->date_len, 0);
5296 if (!host_ver->date) {
5297 goto err;
5298 }
5299 }
5300
5301 __get_user(host_ver->desc_len, &target_ver->desc_len);
5302 if (host_ver->desc_len) {
5303 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5304 target_ver->desc_len, 0);
5305 if (!host_ver->desc) {
5306 goto err;
5307 }
5308 }
5309
5310 return 0;
5311 err:
5312 unlock_drm_version(host_ver, target_ver, false);
5313 return -EFAULT;
5314 }
5315
5316 static inline void host_to_target_drmversion(
5317 struct target_drm_version *target_ver,
5318 struct drm_version *host_ver)
5319 {
5320 __put_user(host_ver->version_major, &target_ver->version_major);
5321 __put_user(host_ver->version_minor, &target_ver->version_minor);
5322 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5323 __put_user(host_ver->name_len, &target_ver->name_len);
5324 __put_user(host_ver->date_len, &target_ver->date_len);
5325 __put_user(host_ver->desc_len, &target_ver->desc_len);
5326 unlock_drm_version(host_ver, target_ver, true);
5327 }
5328
5329 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5330 int fd, int cmd, abi_long arg)
5331 {
5332 struct drm_version *ver;
5333 struct target_drm_version *target_ver;
5334 abi_long ret;
5335
5336 switch (ie->host_cmd) {
5337 case DRM_IOCTL_VERSION:
5338 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5339 return -TARGET_EFAULT;
5340 }
5341 ver = (struct drm_version *)buf_temp;
5342 ret = target_to_host_drmversion(ver, target_ver);
5343 if (!is_error(ret)) {
5344 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5345 if (is_error(ret)) {
5346 unlock_drm_version(ver, target_ver, false);
5347 } else {
5348 host_to_target_drmversion(target_ver, ver);
5349 }
5350 }
5351 unlock_user_struct(target_ver, arg, 0);
5352 return ret;
5353 }
5354 return -TARGET_ENOSYS;
5355 }
5356
5357 #endif
5358
5359 IOCTLEntry ioctl_entries[] = {
5360 #define IOCTL(cmd, access, ...) \
5361 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5362 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5363 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5364 #define IOCTL_IGNORE(cmd) \
5365 { TARGET_ ## cmd, 0, #cmd },
5366 #include "ioctls.h"
5367 { 0, 0, },
5368 };
5369
5370 /* ??? Implement proper locking for ioctls. */
5371 /* do_ioctl() Must return target values and target errnos. */
5372 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5373 {
5374 const IOCTLEntry *ie;
5375 const argtype *arg_type;
5376 abi_long ret;
5377 uint8_t buf_temp[MAX_STRUCT_SIZE];
5378 int target_size;
5379 void *argptr;
5380
5381 ie = ioctl_entries;
5382 for(;;) {
5383 if (ie->target_cmd == 0) {
5384 qemu_log_mask(
5385 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5386 return -TARGET_ENOSYS;
5387 }
5388 if (ie->target_cmd == cmd)
5389 break;
5390 ie++;
5391 }
5392 arg_type = ie->arg_type;
5393 if (ie->do_ioctl) {
5394 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5395 } else if (!ie->host_cmd) {
5396 /* Some architectures define BSD ioctls in their headers
5397 that are not implemented in Linux. */
5398 return -TARGET_ENOSYS;
5399 }
5400
5401 switch(arg_type[0]) {
5402 case TYPE_NULL:
5403 /* no argument */
5404 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5405 break;
5406 case TYPE_PTRVOID:
5407 case TYPE_INT:
5408 case TYPE_LONG:
5409 case TYPE_ULONG:
5410 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5411 break;
5412 case TYPE_PTR:
5413 arg_type++;
5414 target_size = thunk_type_size(arg_type, 0);
5415 switch(ie->access) {
5416 case IOC_R:
5417 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5418 if (!is_error(ret)) {
5419 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5420 if (!argptr)
5421 return -TARGET_EFAULT;
5422 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5423 unlock_user(argptr, arg, target_size);
5424 }
5425 break;
5426 case IOC_W:
5427 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5428 if (!argptr)
5429 return -TARGET_EFAULT;
5430 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5431 unlock_user(argptr, arg, 0);
5432 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5433 break;
5434 default:
5435 case IOC_RW:
5436 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5437 if (!argptr)
5438 return -TARGET_EFAULT;
5439 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5440 unlock_user(argptr, arg, 0);
5441 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5442 if (!is_error(ret)) {
5443 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5444 if (!argptr)
5445 return -TARGET_EFAULT;
5446 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5447 unlock_user(argptr, arg, target_size);
5448 }
5449 break;
5450 }
5451 break;
5452 default:
5453 qemu_log_mask(LOG_UNIMP,
5454 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5455 (long)cmd, arg_type[0]);
5456 ret = -TARGET_ENOSYS;
5457 break;
5458 }
5459 return ret;
5460 }
5461
5462 static const bitmask_transtbl iflag_tbl[] = {
5463 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5464 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5465 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5466 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5467 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5468 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5469 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5470 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5471 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5472 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5473 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5474 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5475 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5476 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5477 { 0, 0, 0, 0 }
5478 };
5479
5480 static const bitmask_transtbl oflag_tbl[] = {
5481 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5482 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5483 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5484 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5485 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5486 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5487 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5488 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5489 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5490 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5491 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5492 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5493 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5494 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5495 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5496 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5497 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5498 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5499 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5500 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5501 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5502 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5503 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5504 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5505 { 0, 0, 0, 0 }
5506 };
5507
5508 static const bitmask_transtbl cflag_tbl[] = {
5509 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5510 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5511 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5512 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5513 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5514 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5515 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5516 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5517 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5518 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5519 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5520 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5521 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5522 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5523 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5524 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5525 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5526 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5527 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5528 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5529 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5530 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5531 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5532 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5533 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5534 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5535 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5536 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5537 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5538 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5539 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5540 { 0, 0, 0, 0 }
5541 };
5542
5543 static const bitmask_transtbl lflag_tbl[] = {
5544 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5545 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5546 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5547 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5548 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5549 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5550 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5551 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5552 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5553 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5554 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5555 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5556 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5557 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5558 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5559 { 0, 0, 0, 0 }
5560 };
5561
5562 static void target_to_host_termios (void *dst, const void *src)
5563 {
5564 struct host_termios *host = dst;
5565 const struct target_termios *target = src;
5566
5567 host->c_iflag =
5568 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5569 host->c_oflag =
5570 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5571 host->c_cflag =
5572 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5573 host->c_lflag =
5574 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5575 host->c_line = target->c_line;
5576
5577 memset(host->c_cc, 0, sizeof(host->c_cc));
5578 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5579 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5580 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5581 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5582 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5583 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5584 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5585 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5586 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5587 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5588 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5589 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5590 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5591 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5592 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5593 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5594 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5595 }
5596
5597 static void host_to_target_termios (void *dst, const void *src)
5598 {
5599 struct target_termios *target = dst;
5600 const struct host_termios *host = src;
5601
5602 target->c_iflag =
5603 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5604 target->c_oflag =
5605 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5606 target->c_cflag =
5607 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5608 target->c_lflag =
5609 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5610 target->c_line = host->c_line;
5611
5612 memset(target->c_cc, 0, sizeof(target->c_cc));
5613 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5614 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5615 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5616 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5617 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5618 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5619 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5620 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5621 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5622 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5623 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5624 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5625 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5626 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5627 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5628 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5629 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5630 }
5631
5632 static const StructEntry struct_termios_def = {
5633 .convert = { host_to_target_termios, target_to_host_termios },
5634 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5635 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5636 };
5637
5638 static bitmask_transtbl mmap_flags_tbl[] = {
5639 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5640 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5641 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5642 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5643 MAP_ANONYMOUS, MAP_ANONYMOUS },
5644 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5645 MAP_GROWSDOWN, MAP_GROWSDOWN },
5646 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5647 MAP_DENYWRITE, MAP_DENYWRITE },
5648 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5649 MAP_EXECUTABLE, MAP_EXECUTABLE },
5650 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5651 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5652 MAP_NORESERVE, MAP_NORESERVE },
5653 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5654 /* MAP_STACK had been ignored by the kernel for quite some time.
5655 Recognize it for the target insofar as we do not want to pass
5656 it through to the host. */
5657 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5658 { 0, 0, 0, 0 }
5659 };
5660
5661 /*
5662 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5663 * TARGET_I386 is defined if TARGET_X86_64 is defined
5664 */
5665 #if defined(TARGET_I386)
5666
5667 /* NOTE: there is really one LDT for all the threads */
5668 static uint8_t *ldt_table;
5669
5670 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5671 {
5672 int size;
5673 void *p;
5674
5675 if (!ldt_table)
5676 return 0;
5677 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5678 if (size > bytecount)
5679 size = bytecount;
5680 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5681 if (!p)
5682 return -TARGET_EFAULT;
5683 /* ??? Should this by byteswapped? */
5684 memcpy(p, ldt_table, size);
5685 unlock_user(p, ptr, size);
5686 return size;
5687 }
5688
5689 /* XXX: add locking support */
5690 static abi_long write_ldt(CPUX86State *env,
5691 abi_ulong ptr, unsigned long bytecount, int oldmode)
5692 {
5693 struct target_modify_ldt_ldt_s ldt_info;
5694 struct target_modify_ldt_ldt_s *target_ldt_info;
5695 int seg_32bit, contents, read_exec_only, limit_in_pages;
5696 int seg_not_present, useable, lm;
5697 uint32_t *lp, entry_1, entry_2;
5698
5699 if (bytecount != sizeof(ldt_info))
5700 return -TARGET_EINVAL;
5701 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5702 return -TARGET_EFAULT;
5703 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5704 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5705 ldt_info.limit = tswap32(target_ldt_info->limit);
5706 ldt_info.flags = tswap32(target_ldt_info->flags);
5707 unlock_user_struct(target_ldt_info, ptr, 0);
5708
5709 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5710 return -TARGET_EINVAL;
5711 seg_32bit = ldt_info.flags & 1;
5712 contents = (ldt_info.flags >> 1) & 3;
5713 read_exec_only = (ldt_info.flags >> 3) & 1;
5714 limit_in_pages = (ldt_info.flags >> 4) & 1;
5715 seg_not_present = (ldt_info.flags >> 5) & 1;
5716 useable = (ldt_info.flags >> 6) & 1;
5717 #ifdef TARGET_ABI32
5718 lm = 0;
5719 #else
5720 lm = (ldt_info.flags >> 7) & 1;
5721 #endif
5722 if (contents == 3) {
5723 if (oldmode)
5724 return -TARGET_EINVAL;
5725 if (seg_not_present == 0)
5726 return -TARGET_EINVAL;
5727 }
5728 /* allocate the LDT */
5729 if (!ldt_table) {
5730 env->ldt.base = target_mmap(0,
5731 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5732 PROT_READ|PROT_WRITE,
5733 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5734 if (env->ldt.base == -1)
5735 return -TARGET_ENOMEM;
5736 memset(g2h(env->ldt.base), 0,
5737 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5738 env->ldt.limit = 0xffff;
5739 ldt_table = g2h(env->ldt.base);
5740 }
5741
5742 /* NOTE: same code as Linux kernel */
5743 /* Allow LDTs to be cleared by the user. */
5744 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5745 if (oldmode ||
5746 (contents == 0 &&
5747 read_exec_only == 1 &&
5748 seg_32bit == 0 &&
5749 limit_in_pages == 0 &&
5750 seg_not_present == 1 &&
5751 useable == 0 )) {
5752 entry_1 = 0;
5753 entry_2 = 0;
5754 goto install;
5755 }
5756 }
5757
5758 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5759 (ldt_info.limit & 0x0ffff);
5760 entry_2 = (ldt_info.base_addr & 0xff000000) |
5761 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5762 (ldt_info.limit & 0xf0000) |
5763 ((read_exec_only ^ 1) << 9) |
5764 (contents << 10) |
5765 ((seg_not_present ^ 1) << 15) |
5766 (seg_32bit << 22) |
5767 (limit_in_pages << 23) |
5768 (lm << 21) |
5769 0x7000;
5770 if (!oldmode)
5771 entry_2 |= (useable << 20);
5772
5773 /* Install the new entry ... */
5774 install:
5775 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5776 lp[0] = tswap32(entry_1);
5777 lp[1] = tswap32(entry_2);
5778 return 0;
5779 }
5780
5781 /* specific and weird i386 syscalls */
5782 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5783 unsigned long bytecount)
5784 {
5785 abi_long ret;
5786
5787 switch (func) {
5788 case 0:
5789 ret = read_ldt(ptr, bytecount);
5790 break;
5791 case 1:
5792 ret = write_ldt(env, ptr, bytecount, 1);
5793 break;
5794 case 0x11:
5795 ret = write_ldt(env, ptr, bytecount, 0);
5796 break;
5797 default:
5798 ret = -TARGET_ENOSYS;
5799 break;
5800 }
5801 return ret;
5802 }
5803
5804 #if defined(TARGET_ABI32)
5805 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5806 {
5807 uint64_t *gdt_table = g2h(env->gdt.base);
5808 struct target_modify_ldt_ldt_s ldt_info;
5809 struct target_modify_ldt_ldt_s *target_ldt_info;
5810 int seg_32bit, contents, read_exec_only, limit_in_pages;
5811 int seg_not_present, useable, lm;
5812 uint32_t *lp, entry_1, entry_2;
5813 int i;
5814
5815 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5816 if (!target_ldt_info)
5817 return -TARGET_EFAULT;
5818 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5819 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5820 ldt_info.limit = tswap32(target_ldt_info->limit);
5821 ldt_info.flags = tswap32(target_ldt_info->flags);
5822 if (ldt_info.entry_number == -1) {
5823 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5824 if (gdt_table[i] == 0) {
5825 ldt_info.entry_number = i;
5826 target_ldt_info->entry_number = tswap32(i);
5827 break;
5828 }
5829 }
5830 }
5831 unlock_user_struct(target_ldt_info, ptr, 1);
5832
5833 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5834 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5835 return -TARGET_EINVAL;
5836 seg_32bit = ldt_info.flags & 1;
5837 contents = (ldt_info.flags >> 1) & 3;
5838 read_exec_only = (ldt_info.flags >> 3) & 1;
5839 limit_in_pages = (ldt_info.flags >> 4) & 1;
5840 seg_not_present = (ldt_info.flags >> 5) & 1;
5841 useable = (ldt_info.flags >> 6) & 1;
5842 #ifdef TARGET_ABI32
5843 lm = 0;
5844 #else
5845 lm = (ldt_info.flags >> 7) & 1;
5846 #endif
5847
5848 if (contents == 3) {
5849 if (seg_not_present == 0)
5850 return -TARGET_EINVAL;
5851 }
5852
5853 /* NOTE: same code as Linux kernel */
5854 /* Allow LDTs to be cleared by the user. */
5855 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5856 if ((contents == 0 &&
5857 read_exec_only == 1 &&
5858 seg_32bit == 0 &&
5859 limit_in_pages == 0 &&
5860 seg_not_present == 1 &&
5861 useable == 0 )) {
5862 entry_1 = 0;
5863 entry_2 = 0;
5864 goto install;
5865 }
5866 }
5867
5868 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5869 (ldt_info.limit & 0x0ffff);
5870 entry_2 = (ldt_info.base_addr & 0xff000000) |
5871 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5872 (ldt_info.limit & 0xf0000) |
5873 ((read_exec_only ^ 1) << 9) |
5874 (contents << 10) |
5875 ((seg_not_present ^ 1) << 15) |
5876 (seg_32bit << 22) |
5877 (limit_in_pages << 23) |
5878 (useable << 20) |
5879 (lm << 21) |
5880 0x7000;
5881
5882 /* Install the new entry ... */
5883 install:
5884 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5885 lp[0] = tswap32(entry_1);
5886 lp[1] = tswap32(entry_2);
5887 return 0;
5888 }
5889
5890 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5891 {
5892 struct target_modify_ldt_ldt_s *target_ldt_info;
5893 uint64_t *gdt_table = g2h(env->gdt.base);
5894 uint32_t base_addr, limit, flags;
5895 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5896 int seg_not_present, useable, lm;
5897 uint32_t *lp, entry_1, entry_2;
5898
5899 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5900 if (!target_ldt_info)
5901 return -TARGET_EFAULT;
5902 idx = tswap32(target_ldt_info->entry_number);
5903 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5904 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5905 unlock_user_struct(target_ldt_info, ptr, 1);
5906 return -TARGET_EINVAL;
5907 }
5908 lp = (uint32_t *)(gdt_table + idx);
5909 entry_1 = tswap32(lp[0]);
5910 entry_2 = tswap32(lp[1]);
5911
5912 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5913 contents = (entry_2 >> 10) & 3;
5914 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5915 seg_32bit = (entry_2 >> 22) & 1;
5916 limit_in_pages = (entry_2 >> 23) & 1;
5917 useable = (entry_2 >> 20) & 1;
5918 #ifdef TARGET_ABI32
5919 lm = 0;
5920 #else
5921 lm = (entry_2 >> 21) & 1;
5922 #endif
5923 flags = (seg_32bit << 0) | (contents << 1) |
5924 (read_exec_only << 3) | (limit_in_pages << 4) |
5925 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5926 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5927 base_addr = (entry_1 >> 16) |
5928 (entry_2 & 0xff000000) |
5929 ((entry_2 & 0xff) << 16);
5930 target_ldt_info->base_addr = tswapal(base_addr);
5931 target_ldt_info->limit = tswap32(limit);
5932 target_ldt_info->flags = tswap32(flags);
5933 unlock_user_struct(target_ldt_info, ptr, 1);
5934 return 0;
5935 }
5936
5937 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5938 {
5939 return -TARGET_ENOSYS;
5940 }
5941 #else
5942 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5943 {
5944 abi_long ret = 0;
5945 abi_ulong val;
5946 int idx;
5947
5948 switch(code) {
5949 case TARGET_ARCH_SET_GS:
5950 case TARGET_ARCH_SET_FS:
5951 if (code == TARGET_ARCH_SET_GS)
5952 idx = R_GS;
5953 else
5954 idx = R_FS;
5955 cpu_x86_load_seg(env, idx, 0);
5956 env->segs[idx].base = addr;
5957 break;
5958 case TARGET_ARCH_GET_GS:
5959 case TARGET_ARCH_GET_FS:
5960 if (code == TARGET_ARCH_GET_GS)
5961 idx = R_GS;
5962 else
5963 idx = R_FS;
5964 val = env->segs[idx].base;
5965 if (put_user(val, addr, abi_ulong))
5966 ret = -TARGET_EFAULT;
5967 break;
5968 default:
5969 ret = -TARGET_EINVAL;
5970 break;
5971 }
5972 return ret;
5973 }
5974 #endif /* defined(TARGET_ABI32 */
5975
5976 #endif /* defined(TARGET_I386) */
5977
5978 #define NEW_STACK_SIZE 0x40000
5979
5980
5981 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5982 typedef struct {
5983 CPUArchState *env;
5984 pthread_mutex_t mutex;
5985 pthread_cond_t cond;
5986 pthread_t thread;
5987 uint32_t tid;
5988 abi_ulong child_tidptr;
5989 abi_ulong parent_tidptr;
5990 sigset_t sigmask;
5991 } new_thread_info;
5992
5993 static void *clone_func(void *arg)
5994 {
5995 new_thread_info *info = arg;
5996 CPUArchState *env;
5997 CPUState *cpu;
5998 TaskState *ts;
5999
6000 rcu_register_thread();
6001 tcg_register_thread();
6002 env = info->env;
6003 cpu = env_cpu(env);
6004 thread_cpu = cpu;
6005 ts = (TaskState *)cpu->opaque;
6006 info->tid = sys_gettid();
6007 task_settid(ts);
6008 if (info->child_tidptr)
6009 put_user_u32(info->tid, info->child_tidptr);
6010 if (info->parent_tidptr)
6011 put_user_u32(info->tid, info->parent_tidptr);
6012 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6013 /* Enable signals. */
6014 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6015 /* Signal to the parent that we're ready. */
6016 pthread_mutex_lock(&info->mutex);
6017 pthread_cond_broadcast(&info->cond);
6018 pthread_mutex_unlock(&info->mutex);
6019 /* Wait until the parent has finished initializing the tls state. */
6020 pthread_mutex_lock(&clone_lock);
6021 pthread_mutex_unlock(&clone_lock);
6022 cpu_loop(env);
6023 /* never exits */
6024 return NULL;
6025 }
6026
6027 /* do_fork() Must return host values and target errnos (unlike most
6028 do_*() functions). */
6029 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6030 abi_ulong parent_tidptr, target_ulong newtls,
6031 abi_ulong child_tidptr)
6032 {
6033 CPUState *cpu = env_cpu(env);
6034 int ret;
6035 TaskState *ts;
6036 CPUState *new_cpu;
6037 CPUArchState *new_env;
6038 sigset_t sigmask;
6039
6040 flags &= ~CLONE_IGNORED_FLAGS;
6041
6042 /* Emulate vfork() with fork() */
6043 if (flags & CLONE_VFORK)
6044 flags &= ~(CLONE_VFORK | CLONE_VM);
6045
6046 if (flags & CLONE_VM) {
6047 TaskState *parent_ts = (TaskState *)cpu->opaque;
6048 new_thread_info info;
6049 pthread_attr_t attr;
6050
6051 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6052 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6053 return -TARGET_EINVAL;
6054 }
6055
6056 ts = g_new0(TaskState, 1);
6057 init_task_state(ts);
6058
6059 /* Grab a mutex so that thread setup appears atomic. */
6060 pthread_mutex_lock(&clone_lock);
6061
6062 /* we create a new CPU instance. */
6063 new_env = cpu_copy(env);
6064 /* Init regs that differ from the parent. */
6065 cpu_clone_regs_child(new_env, newsp, flags);
6066 cpu_clone_regs_parent(env, flags);
6067 new_cpu = env_cpu(new_env);
6068 new_cpu->opaque = ts;
6069 ts->bprm = parent_ts->bprm;
6070 ts->info = parent_ts->info;
6071 ts->signal_mask = parent_ts->signal_mask;
6072
6073 if (flags & CLONE_CHILD_CLEARTID) {
6074 ts->child_tidptr = child_tidptr;
6075 }
6076
6077 if (flags & CLONE_SETTLS) {
6078 cpu_set_tls (new_env, newtls);
6079 }
6080
6081 memset(&info, 0, sizeof(info));
6082 pthread_mutex_init(&info.mutex, NULL);
6083 pthread_mutex_lock(&info.mutex);
6084 pthread_cond_init(&info.cond, NULL);
6085 info.env = new_env;
6086 if (flags & CLONE_CHILD_SETTID) {
6087 info.child_tidptr = child_tidptr;
6088 }
6089 if (flags & CLONE_PARENT_SETTID) {
6090 info.parent_tidptr = parent_tidptr;
6091 }
6092
6093 ret = pthread_attr_init(&attr);
6094 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6095 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6096 /* It is not safe to deliver signals until the child has finished
6097 initializing, so temporarily block all signals. */
6098 sigfillset(&sigmask);
6099 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6100 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6101
6102 /* If this is our first additional thread, we need to ensure we
6103 * generate code for parallel execution and flush old translations.
6104 */
6105 if (!parallel_cpus) {
6106 parallel_cpus = true;
6107 tb_flush(cpu);
6108 }
6109
6110 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6111 /* TODO: Free new CPU state if thread creation failed. */
6112
6113 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6114 pthread_attr_destroy(&attr);
6115 if (ret == 0) {
6116 /* Wait for the child to initialize. */
6117 pthread_cond_wait(&info.cond, &info.mutex);
6118 ret = info.tid;
6119 } else {
6120 ret = -1;
6121 }
6122 pthread_mutex_unlock(&info.mutex);
6123 pthread_cond_destroy(&info.cond);
6124 pthread_mutex_destroy(&info.mutex);
6125 pthread_mutex_unlock(&clone_lock);
6126 } else {
6127 /* if no CLONE_VM, we consider it is a fork */
6128 if (flags & CLONE_INVALID_FORK_FLAGS) {
6129 return -TARGET_EINVAL;
6130 }
6131
6132 /* We can't support custom termination signals */
6133 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6134 return -TARGET_EINVAL;
6135 }
6136
6137 if (block_signals()) {
6138 return -TARGET_ERESTARTSYS;
6139 }
6140
6141 fork_start();
6142 ret = fork();
6143 if (ret == 0) {
6144 /* Child Process. */
6145 cpu_clone_regs_child(env, newsp, flags);
6146 fork_end(1);
6147 /* There is a race condition here. The parent process could
6148 theoretically read the TID in the child process before the child
6149 tid is set. This would require using either ptrace
6150 (not implemented) or having *_tidptr to point at a shared memory
6151 mapping. We can't repeat the spinlock hack used above because
6152 the child process gets its own copy of the lock. */
6153 if (flags & CLONE_CHILD_SETTID)
6154 put_user_u32(sys_gettid(), child_tidptr);
6155 if (flags & CLONE_PARENT_SETTID)
6156 put_user_u32(sys_gettid(), parent_tidptr);
6157 ts = (TaskState *)cpu->opaque;
6158 if (flags & CLONE_SETTLS)
6159 cpu_set_tls (env, newtls);
6160 if (flags & CLONE_CHILD_CLEARTID)
6161 ts->child_tidptr = child_tidptr;
6162 } else {
6163 cpu_clone_regs_parent(env, flags);
6164 fork_end(0);
6165 }
6166 }
6167 return ret;
6168 }
6169
6170 /* warning : doesn't handle linux specific flags... */
6171 static int target_to_host_fcntl_cmd(int cmd)
6172 {
6173 int ret;
6174
6175 switch(cmd) {
6176 case TARGET_F_DUPFD:
6177 case TARGET_F_GETFD:
6178 case TARGET_F_SETFD:
6179 case TARGET_F_GETFL:
6180 case TARGET_F_SETFL:
6181 case TARGET_F_OFD_GETLK:
6182 case TARGET_F_OFD_SETLK:
6183 case TARGET_F_OFD_SETLKW:
6184 ret = cmd;
6185 break;
6186 case TARGET_F_GETLK:
6187 ret = F_GETLK64;
6188 break;
6189 case TARGET_F_SETLK:
6190 ret = F_SETLK64;
6191 break;
6192 case TARGET_F_SETLKW:
6193 ret = F_SETLKW64;
6194 break;
6195 case TARGET_F_GETOWN:
6196 ret = F_GETOWN;
6197 break;
6198 case TARGET_F_SETOWN:
6199 ret = F_SETOWN;
6200 break;
6201 case TARGET_F_GETSIG:
6202 ret = F_GETSIG;
6203 break;
6204 case TARGET_F_SETSIG:
6205 ret = F_SETSIG;
6206 break;
6207 #if TARGET_ABI_BITS == 32
6208 case TARGET_F_GETLK64:
6209 ret = F_GETLK64;
6210 break;
6211 case TARGET_F_SETLK64:
6212 ret = F_SETLK64;
6213 break;
6214 case TARGET_F_SETLKW64:
6215 ret = F_SETLKW64;
6216 break;
6217 #endif
6218 case TARGET_F_SETLEASE:
6219 ret = F_SETLEASE;
6220 break;
6221 case TARGET_F_GETLEASE:
6222 ret = F_GETLEASE;
6223 break;
6224 #ifdef F_DUPFD_CLOEXEC
6225 case TARGET_F_DUPFD_CLOEXEC:
6226 ret = F_DUPFD_CLOEXEC;
6227 break;
6228 #endif
6229 case TARGET_F_NOTIFY:
6230 ret = F_NOTIFY;
6231 break;
6232 #ifdef F_GETOWN_EX
6233 case TARGET_F_GETOWN_EX:
6234 ret = F_GETOWN_EX;
6235 break;
6236 #endif
6237 #ifdef F_SETOWN_EX
6238 case TARGET_F_SETOWN_EX:
6239 ret = F_SETOWN_EX;
6240 break;
6241 #endif
6242 #ifdef F_SETPIPE_SZ
6243 case TARGET_F_SETPIPE_SZ:
6244 ret = F_SETPIPE_SZ;
6245 break;
6246 case TARGET_F_GETPIPE_SZ:
6247 ret = F_GETPIPE_SZ;
6248 break;
6249 #endif
6250 default:
6251 ret = -TARGET_EINVAL;
6252 break;
6253 }
6254
6255 #if defined(__powerpc64__)
6256 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6257 * is not supported by kernel. The glibc fcntl call actually adjusts
6258 * them to 5, 6 and 7 before making the syscall(). Since we make the
6259 * syscall directly, adjust to what is supported by the kernel.
6260 */
6261 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6262 ret -= F_GETLK64 - 5;
6263 }
6264 #endif
6265
6266 return ret;
6267 }
6268
6269 #define FLOCK_TRANSTBL \
6270 switch (type) { \
6271 TRANSTBL_CONVERT(F_RDLCK); \
6272 TRANSTBL_CONVERT(F_WRLCK); \
6273 TRANSTBL_CONVERT(F_UNLCK); \
6274 TRANSTBL_CONVERT(F_EXLCK); \
6275 TRANSTBL_CONVERT(F_SHLCK); \
6276 }
6277
6278 static int target_to_host_flock(int type)
6279 {
6280 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6281 FLOCK_TRANSTBL
6282 #undef TRANSTBL_CONVERT
6283 return -TARGET_EINVAL;
6284 }
6285
6286 static int host_to_target_flock(int type)
6287 {
6288 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6289 FLOCK_TRANSTBL
6290 #undef TRANSTBL_CONVERT
6291 /* if we don't know how to convert the value coming
6292 * from the host we copy to the target field as-is
6293 */
6294 return type;
6295 }
6296
6297 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6298 abi_ulong target_flock_addr)
6299 {
6300 struct target_flock *target_fl;
6301 int l_type;
6302
6303 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6304 return -TARGET_EFAULT;
6305 }
6306
6307 __get_user(l_type, &target_fl->l_type);
6308 l_type = target_to_host_flock(l_type);
6309 if (l_type < 0) {
6310 return l_type;
6311 }
6312 fl->l_type = l_type;
6313 __get_user(fl->l_whence, &target_fl->l_whence);
6314 __get_user(fl->l_start, &target_fl->l_start);
6315 __get_user(fl->l_len, &target_fl->l_len);
6316 __get_user(fl->l_pid, &target_fl->l_pid);
6317 unlock_user_struct(target_fl, target_flock_addr, 0);
6318 return 0;
6319 }
6320
6321 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6322 const struct flock64 *fl)
6323 {
6324 struct target_flock *target_fl;
6325 short l_type;
6326
6327 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6328 return -TARGET_EFAULT;
6329 }
6330
6331 l_type = host_to_target_flock(fl->l_type);
6332 __put_user(l_type, &target_fl->l_type);
6333 __put_user(fl->l_whence, &target_fl->l_whence);
6334 __put_user(fl->l_start, &target_fl->l_start);
6335 __put_user(fl->l_len, &target_fl->l_len);
6336 __put_user(fl->l_pid, &target_fl->l_pid);
6337 unlock_user_struct(target_fl, target_flock_addr, 1);
6338 return 0;
6339 }
6340
6341 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6342 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6343
6344 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6345 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6346 abi_ulong target_flock_addr)
6347 {
6348 struct target_oabi_flock64 *target_fl;
6349 int l_type;
6350
6351 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6352 return -TARGET_EFAULT;
6353 }
6354
6355 __get_user(l_type, &target_fl->l_type);
6356 l_type = target_to_host_flock(l_type);
6357 if (l_type < 0) {
6358 return l_type;
6359 }
6360 fl->l_type = l_type;
6361 __get_user(fl->l_whence, &target_fl->l_whence);
6362 __get_user(fl->l_start, &target_fl->l_start);
6363 __get_user(fl->l_len, &target_fl->l_len);
6364 __get_user(fl->l_pid, &target_fl->l_pid);
6365 unlock_user_struct(target_fl, target_flock_addr, 0);
6366 return 0;
6367 }
6368
6369 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6370 const struct flock64 *fl)
6371 {
6372 struct target_oabi_flock64 *target_fl;
6373 short l_type;
6374
6375 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6376 return -TARGET_EFAULT;
6377 }
6378
6379 l_type = host_to_target_flock(fl->l_type);
6380 __put_user(l_type, &target_fl->l_type);
6381 __put_user(fl->l_whence, &target_fl->l_whence);
6382 __put_user(fl->l_start, &target_fl->l_start);
6383 __put_user(fl->l_len, &target_fl->l_len);
6384 __put_user(fl->l_pid, &target_fl->l_pid);
6385 unlock_user_struct(target_fl, target_flock_addr, 1);
6386 return 0;
6387 }
6388 #endif
6389
6390 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6391 abi_ulong target_flock_addr)
6392 {
6393 struct target_flock64 *target_fl;
6394 int l_type;
6395
6396 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6397 return -TARGET_EFAULT;
6398 }
6399
6400 __get_user(l_type, &target_fl->l_type);
6401 l_type = target_to_host_flock(l_type);
6402 if (l_type < 0) {
6403 return l_type;
6404 }
6405 fl->l_type = l_type;
6406 __get_user(fl->l_whence, &target_fl->l_whence);
6407 __get_user(fl->l_start, &target_fl->l_start);
6408 __get_user(fl->l_len, &target_fl->l_len);
6409 __get_user(fl->l_pid, &target_fl->l_pid);
6410 unlock_user_struct(target_fl, target_flock_addr, 0);
6411 return 0;
6412 }
6413
6414 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6415 const struct flock64 *fl)
6416 {
6417 struct target_flock64 *target_fl;
6418 short l_type;
6419
6420 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6421 return -TARGET_EFAULT;
6422 }
6423
6424 l_type = host_to_target_flock(fl->l_type);
6425 __put_user(l_type, &target_fl->l_type);
6426 __put_user(fl->l_whence, &target_fl->l_whence);
6427 __put_user(fl->l_start, &target_fl->l_start);
6428 __put_user(fl->l_len, &target_fl->l_len);
6429 __put_user(fl->l_pid, &target_fl->l_pid);
6430 unlock_user_struct(target_fl, target_flock_addr, 1);
6431 return 0;
6432 }
6433
6434 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6435 {
6436 struct flock64 fl64;
6437 #ifdef F_GETOWN_EX
6438 struct f_owner_ex fox;
6439 struct target_f_owner_ex *target_fox;
6440 #endif
6441 abi_long ret;
6442 int host_cmd = target_to_host_fcntl_cmd(cmd);
6443
6444 if (host_cmd == -TARGET_EINVAL)
6445 return host_cmd;
6446
6447 switch(cmd) {
6448 case TARGET_F_GETLK:
6449 ret = copy_from_user_flock(&fl64, arg);
6450 if (ret) {
6451 return ret;
6452 }
6453 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6454 if (ret == 0) {
6455 ret = copy_to_user_flock(arg, &fl64);
6456 }
6457 break;
6458
6459 case TARGET_F_SETLK:
6460 case TARGET_F_SETLKW:
6461 ret = copy_from_user_flock(&fl64, arg);
6462 if (ret) {
6463 return ret;
6464 }
6465 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6466 break;
6467
6468 case TARGET_F_GETLK64:
6469 case TARGET_F_OFD_GETLK:
6470 ret = copy_from_user_flock64(&fl64, arg);
6471 if (ret) {
6472 return ret;
6473 }
6474 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6475 if (ret == 0) {
6476 ret = copy_to_user_flock64(arg, &fl64);
6477 }
6478 break;
6479 case TARGET_F_SETLK64:
6480 case TARGET_F_SETLKW64:
6481 case TARGET_F_OFD_SETLK:
6482 case TARGET_F_OFD_SETLKW:
6483 ret = copy_from_user_flock64(&fl64, arg);
6484 if (ret) {
6485 return ret;
6486 }
6487 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6488 break;
6489
6490 case TARGET_F_GETFL:
6491 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6492 if (ret >= 0) {
6493 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6494 }
6495 break;
6496
6497 case TARGET_F_SETFL:
6498 ret = get_errno(safe_fcntl(fd, host_cmd,
6499 target_to_host_bitmask(arg,
6500 fcntl_flags_tbl)));
6501 break;
6502
6503 #ifdef F_GETOWN_EX
6504 case TARGET_F_GETOWN_EX:
6505 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6506 if (ret >= 0) {
6507 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6508 return -TARGET_EFAULT;
6509 target_fox->type = tswap32(fox.type);
6510 target_fox->pid = tswap32(fox.pid);
6511 unlock_user_struct(target_fox, arg, 1);
6512 }
6513 break;
6514 #endif
6515
6516 #ifdef F_SETOWN_EX
6517 case TARGET_F_SETOWN_EX:
6518 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6519 return -TARGET_EFAULT;
6520 fox.type = tswap32(target_fox->type);
6521 fox.pid = tswap32(target_fox->pid);
6522 unlock_user_struct(target_fox, arg, 0);
6523 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6524 break;
6525 #endif
6526
6527 case TARGET_F_SETOWN:
6528 case TARGET_F_GETOWN:
6529 case TARGET_F_SETSIG:
6530 case TARGET_F_GETSIG:
6531 case TARGET_F_SETLEASE:
6532 case TARGET_F_GETLEASE:
6533 case TARGET_F_SETPIPE_SZ:
6534 case TARGET_F_GETPIPE_SZ:
6535 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6536 break;
6537
6538 default:
6539 ret = get_errno(safe_fcntl(fd, cmd, arg));
6540 break;
6541 }
6542 return ret;
6543 }
6544
6545 #ifdef USE_UID16
6546
6547 static inline int high2lowuid(int uid)
6548 {
6549 if (uid > 65535)
6550 return 65534;
6551 else
6552 return uid;
6553 }
6554
6555 static inline int high2lowgid(int gid)
6556 {
6557 if (gid > 65535)
6558 return 65534;
6559 else
6560 return gid;
6561 }
6562
6563 static inline int low2highuid(int uid)
6564 {
6565 if ((int16_t)uid == -1)
6566 return -1;
6567 else
6568 return uid;
6569 }
6570
6571 static inline int low2highgid(int gid)
6572 {
6573 if ((int16_t)gid == -1)
6574 return -1;
6575 else
6576 return gid;
6577 }
6578 static inline int tswapid(int id)
6579 {
6580 return tswap16(id);
6581 }
6582
6583 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6584
6585 #else /* !USE_UID16 */
6586 static inline int high2lowuid(int uid)
6587 {
6588 return uid;
6589 }
6590 static inline int high2lowgid(int gid)
6591 {
6592 return gid;
6593 }
6594 static inline int low2highuid(int uid)
6595 {
6596 return uid;
6597 }
6598 static inline int low2highgid(int gid)
6599 {
6600 return gid;
6601 }
6602 static inline int tswapid(int id)
6603 {
6604 return tswap32(id);
6605 }
6606
6607 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6608
6609 #endif /* USE_UID16 */
6610
6611 /* We must do direct syscalls for setting UID/GID, because we want to
6612 * implement the Linux system call semantics of "change only for this thread",
6613 * not the libc/POSIX semantics of "change for all threads in process".
6614 * (See http://ewontfix.com/17/ for more details.)
6615 * We use the 32-bit version of the syscalls if present; if it is not
6616 * then either the host architecture supports 32-bit UIDs natively with
6617 * the standard syscall, or the 16-bit UID is the best we can do.
6618 */
6619 #ifdef __NR_setuid32
6620 #define __NR_sys_setuid __NR_setuid32
6621 #else
6622 #define __NR_sys_setuid __NR_setuid
6623 #endif
6624 #ifdef __NR_setgid32
6625 #define __NR_sys_setgid __NR_setgid32
6626 #else
6627 #define __NR_sys_setgid __NR_setgid
6628 #endif
6629 #ifdef __NR_setresuid32
6630 #define __NR_sys_setresuid __NR_setresuid32
6631 #else
6632 #define __NR_sys_setresuid __NR_setresuid
6633 #endif
6634 #ifdef __NR_setresgid32
6635 #define __NR_sys_setresgid __NR_setresgid32
6636 #else
6637 #define __NR_sys_setresgid __NR_setresgid
6638 #endif
6639
6640 _syscall1(int, sys_setuid, uid_t, uid)
6641 _syscall1(int, sys_setgid, gid_t, gid)
6642 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6643 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6644
6645 void syscall_init(void)
6646 {
6647 IOCTLEntry *ie;
6648 const argtype *arg_type;
6649 int size;
6650 int i;
6651
6652 thunk_init(STRUCT_MAX);
6653
6654 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6655 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6656 #include "syscall_types.h"
6657 #undef STRUCT
6658 #undef STRUCT_SPECIAL
6659
6660 /* Build target_to_host_errno_table[] table from
6661 * host_to_target_errno_table[]. */
6662 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6663 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6664 }
6665
6666 /* we patch the ioctl size if necessary. We rely on the fact that
6667 no ioctl has all the bits at '1' in the size field */
6668 ie = ioctl_entries;
6669 while (ie->target_cmd != 0) {
6670 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6671 TARGET_IOC_SIZEMASK) {
6672 arg_type = ie->arg_type;
6673 if (arg_type[0] != TYPE_PTR) {
6674 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6675 ie->target_cmd);
6676 exit(1);
6677 }
6678 arg_type++;
6679 size = thunk_type_size(arg_type, 0);
6680 ie->target_cmd = (ie->target_cmd &
6681 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6682 (size << TARGET_IOC_SIZESHIFT);
6683 }
6684
6685 /* automatic consistency check if same arch */
6686 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6687 (defined(__x86_64__) && defined(TARGET_X86_64))
6688 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6689 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6690 ie->name, ie->target_cmd, ie->host_cmd);
6691 }
6692 #endif
6693 ie++;
6694 }
6695 }
6696
6697 #ifdef TARGET_NR_truncate64
6698 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6699 abi_long arg2,
6700 abi_long arg3,
6701 abi_long arg4)
6702 {
6703 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6704 arg2 = arg3;
6705 arg3 = arg4;
6706 }
6707 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6708 }
6709 #endif
6710
6711 #ifdef TARGET_NR_ftruncate64
6712 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6713 abi_long arg2,
6714 abi_long arg3,
6715 abi_long arg4)
6716 {
6717 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6718 arg2 = arg3;
6719 arg3 = arg4;
6720 }
6721 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6722 }
6723 #endif
6724
6725 #if defined(TARGET_NR_timer_settime) || \
6726 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6727 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6728 abi_ulong target_addr)
6729 {
6730 struct target_itimerspec *target_itspec;
6731
6732 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6733 return -TARGET_EFAULT;
6734 }
6735
6736 host_itspec->it_interval.tv_sec =
6737 tswapal(target_itspec->it_interval.tv_sec);
6738 host_itspec->it_interval.tv_nsec =
6739 tswapal(target_itspec->it_interval.tv_nsec);
6740 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6741 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6742
6743 unlock_user_struct(target_itspec, target_addr, 1);
6744 return 0;
6745 }
6746 #endif
6747
6748 #if ((defined(TARGET_NR_timerfd_gettime) || \
6749 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6750 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6751 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6752 struct itimerspec *host_its)
6753 {
6754 struct target_itimerspec *target_itspec;
6755
6756 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6757 return -TARGET_EFAULT;
6758 }
6759
6760 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6761 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6762
6763 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6764 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6765
6766 unlock_user_struct(target_itspec, target_addr, 0);
6767 return 0;
6768 }
6769 #endif
6770
6771 #if defined(TARGET_NR_adjtimex) || \
6772 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6773 static inline abi_long target_to_host_timex(struct timex *host_tx,
6774 abi_long target_addr)
6775 {
6776 struct target_timex *target_tx;
6777
6778 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6779 return -TARGET_EFAULT;
6780 }
6781
6782 __get_user(host_tx->modes, &target_tx->modes);
6783 __get_user(host_tx->offset, &target_tx->offset);
6784 __get_user(host_tx->freq, &target_tx->freq);
6785 __get_user(host_tx->maxerror, &target_tx->maxerror);
6786 __get_user(host_tx->esterror, &target_tx->esterror);
6787 __get_user(host_tx->status, &target_tx->status);
6788 __get_user(host_tx->constant, &target_tx->constant);
6789 __get_user(host_tx->precision, &target_tx->precision);
6790 __get_user(host_tx->tolerance, &target_tx->tolerance);
6791 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6792 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6793 __get_user(host_tx->tick, &target_tx->tick);
6794 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6795 __get_user(host_tx->jitter, &target_tx->jitter);
6796 __get_user(host_tx->shift, &target_tx->shift);
6797 __get_user(host_tx->stabil, &target_tx->stabil);
6798 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6799 __get_user(host_tx->calcnt, &target_tx->calcnt);
6800 __get_user(host_tx->errcnt, &target_tx->errcnt);
6801 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6802 __get_user(host_tx->tai, &target_tx->tai);
6803
6804 unlock_user_struct(target_tx, target_addr, 0);
6805 return 0;
6806 }
6807
6808 static inline abi_long host_to_target_timex(abi_long target_addr,
6809 struct timex *host_tx)
6810 {
6811 struct target_timex *target_tx;
6812
6813 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6814 return -TARGET_EFAULT;
6815 }
6816
6817 __put_user(host_tx->modes, &target_tx->modes);
6818 __put_user(host_tx->offset, &target_tx->offset);
6819 __put_user(host_tx->freq, &target_tx->freq);
6820 __put_user(host_tx->maxerror, &target_tx->maxerror);
6821 __put_user(host_tx->esterror, &target_tx->esterror);
6822 __put_user(host_tx->status, &target_tx->status);
6823 __put_user(host_tx->constant, &target_tx->constant);
6824 __put_user(host_tx->precision, &target_tx->precision);
6825 __put_user(host_tx->tolerance, &target_tx->tolerance);
6826 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6827 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6828 __put_user(host_tx->tick, &target_tx->tick);
6829 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6830 __put_user(host_tx->jitter, &target_tx->jitter);
6831 __put_user(host_tx->shift, &target_tx->shift);
6832 __put_user(host_tx->stabil, &target_tx->stabil);
6833 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6834 __put_user(host_tx->calcnt, &target_tx->calcnt);
6835 __put_user(host_tx->errcnt, &target_tx->errcnt);
6836 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6837 __put_user(host_tx->tai, &target_tx->tai);
6838
6839 unlock_user_struct(target_tx, target_addr, 1);
6840 return 0;
6841 }
6842 #endif
6843
6844 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6845 abi_ulong target_addr)
6846 {
6847 struct target_sigevent *target_sevp;
6848
6849 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6850 return -TARGET_EFAULT;
6851 }
6852
6853 /* This union is awkward on 64 bit systems because it has a 32 bit
6854 * integer and a pointer in it; we follow the conversion approach
6855 * used for handling sigval types in signal.c so the guest should get
6856 * the correct value back even if we did a 64 bit byteswap and it's
6857 * using the 32 bit integer.
6858 */
6859 host_sevp->sigev_value.sival_ptr =
6860 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6861 host_sevp->sigev_signo =
6862 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6863 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6864 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6865
6866 unlock_user_struct(target_sevp, target_addr, 1);
6867 return 0;
6868 }
6869
6870 #if defined(TARGET_NR_mlockall)
6871 static inline int target_to_host_mlockall_arg(int arg)
6872 {
6873 int result = 0;
6874
6875 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6876 result |= MCL_CURRENT;
6877 }
6878 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6879 result |= MCL_FUTURE;
6880 }
6881 return result;
6882 }
6883 #endif
6884
6885 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6886 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6887 defined(TARGET_NR_newfstatat))
6888 static inline abi_long host_to_target_stat64(void *cpu_env,
6889 abi_ulong target_addr,
6890 struct stat *host_st)
6891 {
6892 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6893 if (((CPUARMState *)cpu_env)->eabi) {
6894 struct target_eabi_stat64 *target_st;
6895
6896 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6897 return -TARGET_EFAULT;
6898 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6899 __put_user(host_st->st_dev, &target_st->st_dev);
6900 __put_user(host_st->st_ino, &target_st->st_ino);
6901 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6902 __put_user(host_st->st_ino, &target_st->__st_ino);
6903 #endif
6904 __put_user(host_st->st_mode, &target_st->st_mode);
6905 __put_user(host_st->st_nlink, &target_st->st_nlink);
6906 __put_user(host_st->st_uid, &target_st->st_uid);
6907 __put_user(host_st->st_gid, &target_st->st_gid);
6908 __put_user(host_st->st_rdev, &target_st->st_rdev);
6909 __put_user(host_st->st_size, &target_st->st_size);
6910 __put_user(host_st->st_blksize, &target_st->st_blksize);
6911 __put_user(host_st->st_blocks, &target_st->st_blocks);
6912 __put_user(host_st->st_atime, &target_st->target_st_atime);
6913 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6914 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6915 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6916 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6917 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6918 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6919 #endif
6920 unlock_user_struct(target_st, target_addr, 1);
6921 } else
6922 #endif
6923 {
6924 #if defined(TARGET_HAS_STRUCT_STAT64)
6925 struct target_stat64 *target_st;
6926 #else
6927 struct target_stat *target_st;
6928 #endif
6929
6930 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6931 return -TARGET_EFAULT;
6932 memset(target_st, 0, sizeof(*target_st));
6933 __put_user(host_st->st_dev, &target_st->st_dev);
6934 __put_user(host_st->st_ino, &target_st->st_ino);
6935 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6936 __put_user(host_st->st_ino, &target_st->__st_ino);
6937 #endif
6938 __put_user(host_st->st_mode, &target_st->st_mode);
6939 __put_user(host_st->st_nlink, &target_st->st_nlink);
6940 __put_user(host_st->st_uid, &target_st->st_uid);
6941 __put_user(host_st->st_gid, &target_st->st_gid);
6942 __put_user(host_st->st_rdev, &target_st->st_rdev);
6943 /* XXX: better use of kernel struct */
6944 __put_user(host_st->st_size, &target_st->st_size);
6945 __put_user(host_st->st_blksize, &target_st->st_blksize);
6946 __put_user(host_st->st_blocks, &target_st->st_blocks);
6947 __put_user(host_st->st_atime, &target_st->target_st_atime);
6948 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6949 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6950 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6951 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6952 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6953 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6954 #endif
6955 unlock_user_struct(target_st, target_addr, 1);
6956 }
6957
6958 return 0;
6959 }
6960 #endif
6961
6962 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6963 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6964 abi_ulong target_addr)
6965 {
6966 struct target_statx *target_stx;
6967
6968 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
6969 return -TARGET_EFAULT;
6970 }
6971 memset(target_stx, 0, sizeof(*target_stx));
6972
6973 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6974 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6975 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6976 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6977 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6978 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6979 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6980 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6981 __put_user(host_stx->stx_size, &target_stx->stx_size);
6982 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6983 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6984 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6985 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6986 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
6987 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
6988 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
6989 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
6990 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
6991 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
6992 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6993 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6994 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6995 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6996
6997 unlock_user_struct(target_stx, target_addr, 1);
6998
6999 return 0;
7000 }
7001 #endif
7002
7003 static int do_sys_futex(int *uaddr, int op, int val,
7004 const struct timespec *timeout, int *uaddr2,
7005 int val3)
7006 {
7007 #if HOST_LONG_BITS == 64
7008 #if defined(__NR_futex)
7009 /* always a 64-bit time_t, it doesn't define _time64 version */
7010 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7011
7012 #endif
7013 #else /* HOST_LONG_BITS == 64 */
7014 #if defined(__NR_futex_time64)
7015 if (sizeof(timeout->tv_sec) == 8) {
7016 /* _time64 function on 32bit arch */
7017 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7018 }
7019 #endif
7020 #if defined(__NR_futex)
7021 /* old function on 32bit arch */
7022 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7023 #endif
7024 #endif /* HOST_LONG_BITS == 64 */
7025 g_assert_not_reached();
7026 }
7027
7028 static int do_safe_futex(int *uaddr, int op, int val,
7029 const struct timespec *timeout, int *uaddr2,
7030 int val3)
7031 {
7032 #if HOST_LONG_BITS == 64
7033 #if defined(__NR_futex)
7034 /* always a 64-bit time_t, it doesn't define _time64 version */
7035 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7036 #endif
7037 #else /* HOST_LONG_BITS == 64 */
7038 #if defined(__NR_futex_time64)
7039 if (sizeof(timeout->tv_sec) == 8) {
7040 /* _time64 function on 32bit arch */
7041 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7042 val3));
7043 }
7044 #endif
7045 #if defined(__NR_futex)
7046 /* old function on 32bit arch */
7047 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7048 #endif
7049 #endif /* HOST_LONG_BITS == 64 */
7050 return -TARGET_ENOSYS;
7051 }
7052
7053 /* ??? Using host futex calls even when target atomic operations
7054 are not really atomic probably breaks things. However implementing
7055 futexes locally would make futexes shared between multiple processes
7056 tricky. However they're probably useless because guest atomic
7057 operations won't work either. */
7058 #if defined(TARGET_NR_futex)
7059 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7060 target_ulong uaddr2, int val3)
7061 {
7062 struct timespec ts, *pts;
7063 int base_op;
7064
7065 /* ??? We assume FUTEX_* constants are the same on both host
7066 and target. */
7067 #ifdef FUTEX_CMD_MASK
7068 base_op = op & FUTEX_CMD_MASK;
7069 #else
7070 base_op = op;
7071 #endif
7072 switch (base_op) {
7073 case FUTEX_WAIT:
7074 case FUTEX_WAIT_BITSET:
7075 if (timeout) {
7076 pts = &ts;
7077 target_to_host_timespec(pts, timeout);
7078 } else {
7079 pts = NULL;
7080 }
7081 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7082 case FUTEX_WAKE:
7083 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7084 case FUTEX_FD:
7085 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7086 case FUTEX_REQUEUE:
7087 case FUTEX_CMP_REQUEUE:
7088 case FUTEX_WAKE_OP:
7089 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7090 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7091 But the prototype takes a `struct timespec *'; insert casts
7092 to satisfy the compiler. We do not need to tswap TIMEOUT
7093 since it's not compared to guest memory. */
7094 pts = (struct timespec *)(uintptr_t) timeout;
7095 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7096 (base_op == FUTEX_CMP_REQUEUE
7097 ? tswap32(val3)
7098 : val3));
7099 default:
7100 return -TARGET_ENOSYS;
7101 }
7102 }
7103 #endif
7104
7105 #if defined(TARGET_NR_futex_time64)
7106 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7107 target_ulong uaddr2, int val3)
7108 {
7109 struct timespec ts, *pts;
7110 int base_op;
7111
7112 /* ??? We assume FUTEX_* constants are the same on both host
7113 and target. */
7114 #ifdef FUTEX_CMD_MASK
7115 base_op = op & FUTEX_CMD_MASK;
7116 #else
7117 base_op = op;
7118 #endif
7119 switch (base_op) {
7120 case FUTEX_WAIT:
7121 case FUTEX_WAIT_BITSET:
7122 if (timeout) {
7123 pts = &ts;
7124 target_to_host_timespec64(pts, timeout);
7125 } else {
7126 pts = NULL;
7127 }
7128 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7129 case FUTEX_WAKE:
7130 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7131 case FUTEX_FD:
7132 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7133 case FUTEX_REQUEUE:
7134 case FUTEX_CMP_REQUEUE:
7135 case FUTEX_WAKE_OP:
7136 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7137 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7138 But the prototype takes a `struct timespec *'; insert casts
7139 to satisfy the compiler. We do not need to tswap TIMEOUT
7140 since it's not compared to guest memory. */
7141 pts = (struct timespec *)(uintptr_t) timeout;
7142 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7143 (base_op == FUTEX_CMP_REQUEUE
7144 ? tswap32(val3)
7145 : val3));
7146 default:
7147 return -TARGET_ENOSYS;
7148 }
7149 }
7150 #endif
7151
7152 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7153 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7154 abi_long handle, abi_long mount_id,
7155 abi_long flags)
7156 {
7157 struct file_handle *target_fh;
7158 struct file_handle *fh;
7159 int mid = 0;
7160 abi_long ret;
7161 char *name;
7162 unsigned int size, total_size;
7163
7164 if (get_user_s32(size, handle)) {
7165 return -TARGET_EFAULT;
7166 }
7167
7168 name = lock_user_string(pathname);
7169 if (!name) {
7170 return -TARGET_EFAULT;
7171 }
7172
7173 total_size = sizeof(struct file_handle) + size;
7174 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7175 if (!target_fh) {
7176 unlock_user(name, pathname, 0);
7177 return -TARGET_EFAULT;
7178 }
7179
7180 fh = g_malloc0(total_size);
7181 fh->handle_bytes = size;
7182
7183 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7184 unlock_user(name, pathname, 0);
7185
7186 /* man name_to_handle_at(2):
7187 * Other than the use of the handle_bytes field, the caller should treat
7188 * the file_handle structure as an opaque data type
7189 */
7190
7191 memcpy(target_fh, fh, total_size);
7192 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7193 target_fh->handle_type = tswap32(fh->handle_type);
7194 g_free(fh);
7195 unlock_user(target_fh, handle, total_size);
7196
7197 if (put_user_s32(mid, mount_id)) {
7198 return -TARGET_EFAULT;
7199 }
7200
7201 return ret;
7202
7203 }
7204 #endif
7205
7206 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7207 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7208 abi_long flags)
7209 {
7210 struct file_handle *target_fh;
7211 struct file_handle *fh;
7212 unsigned int size, total_size;
7213 abi_long ret;
7214
7215 if (get_user_s32(size, handle)) {
7216 return -TARGET_EFAULT;
7217 }
7218
7219 total_size = sizeof(struct file_handle) + size;
7220 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7221 if (!target_fh) {
7222 return -TARGET_EFAULT;
7223 }
7224
7225 fh = g_memdup(target_fh, total_size);
7226 fh->handle_bytes = size;
7227 fh->handle_type = tswap32(target_fh->handle_type);
7228
7229 ret = get_errno(open_by_handle_at(mount_fd, fh,
7230 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7231
7232 g_free(fh);
7233
7234 unlock_user(target_fh, handle, total_size);
7235
7236 return ret;
7237 }
7238 #endif
7239
7240 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7241
7242 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7243 {
7244 int host_flags;
7245 target_sigset_t *target_mask;
7246 sigset_t host_mask;
7247 abi_long ret;
7248
7249 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7250 return -TARGET_EINVAL;
7251 }
7252 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7253 return -TARGET_EFAULT;
7254 }
7255
7256 target_to_host_sigset(&host_mask, target_mask);
7257
7258 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7259
7260 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7261 if (ret >= 0) {
7262 fd_trans_register(ret, &target_signalfd_trans);
7263 }
7264
7265 unlock_user_struct(target_mask, mask, 0);
7266
7267 return ret;
7268 }
7269 #endif
7270
7271 /* Map host to target signal numbers for the wait family of syscalls.
7272 Assume all other status bits are the same. */
7273 int host_to_target_waitstatus(int status)
7274 {
7275 if (WIFSIGNALED(status)) {
7276 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7277 }
7278 if (WIFSTOPPED(status)) {
7279 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7280 | (status & 0xff);
7281 }
7282 return status;
7283 }
7284
7285 static int open_self_cmdline(void *cpu_env, int fd)
7286 {
7287 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7288 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7289 int i;
7290
7291 for (i = 0; i < bprm->argc; i++) {
7292 size_t len = strlen(bprm->argv[i]) + 1;
7293
7294 if (write(fd, bprm->argv[i], len) != len) {
7295 return -1;
7296 }
7297 }
7298
7299 return 0;
7300 }
7301
7302 static int open_self_maps(void *cpu_env, int fd)
7303 {
7304 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7305 TaskState *ts = cpu->opaque;
7306 GSList *map_info = read_self_maps();
7307 GSList *s;
7308 int count;
7309
7310 for (s = map_info; s; s = g_slist_next(s)) {
7311 MapInfo *e = (MapInfo *) s->data;
7312
7313 if (h2g_valid(e->start)) {
7314 unsigned long min = e->start;
7315 unsigned long max = e->end;
7316 int flags = page_get_flags(h2g(min));
7317 const char *path;
7318
7319 max = h2g_valid(max - 1) ?
7320 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7321
7322 if (page_check_range(h2g(min), max - min, flags) == -1) {
7323 continue;
7324 }
7325
7326 if (h2g(min) == ts->info->stack_limit) {
7327 path = "[stack]";
7328 } else {
7329 path = e->path;
7330 }
7331
7332 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7333 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7334 h2g(min), h2g(max - 1) + 1,
7335 e->is_read ? 'r' : '-',
7336 e->is_write ? 'w' : '-',
7337 e->is_exec ? 'x' : '-',
7338 e->is_priv ? 'p' : '-',
7339 (uint64_t) e->offset, e->dev, e->inode);
7340 if (path) {
7341 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7342 } else {
7343 dprintf(fd, "\n");
7344 }
7345 }
7346 }
7347
7348 free_self_maps(map_info);
7349
7350 #ifdef TARGET_VSYSCALL_PAGE
7351 /*
7352 * We only support execution from the vsyscall page.
7353 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7354 */
7355 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7356 " --xp 00000000 00:00 0",
7357 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7358 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
7359 #endif
7360
7361 return 0;
7362 }
7363
7364 static int open_self_stat(void *cpu_env, int fd)
7365 {
7366 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7367 TaskState *ts = cpu->opaque;
7368 g_autoptr(GString) buf = g_string_new(NULL);
7369 int i;
7370
7371 for (i = 0; i < 44; i++) {
7372 if (i == 0) {
7373 /* pid */
7374 g_string_printf(buf, FMT_pid " ", getpid());
7375 } else if (i == 1) {
7376 /* app name */
7377 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7378 bin = bin ? bin + 1 : ts->bprm->argv[0];
7379 g_string_printf(buf, "(%.15s) ", bin);
7380 } else if (i == 27) {
7381 /* stack bottom */
7382 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7383 } else {
7384 /* for the rest, there is MasterCard */
7385 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7386 }
7387
7388 if (write(fd, buf->str, buf->len) != buf->len) {
7389 return -1;
7390 }
7391 }
7392
7393 return 0;
7394 }
7395
7396 static int open_self_auxv(void *cpu_env, int fd)
7397 {
7398 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7399 TaskState *ts = cpu->opaque;
7400 abi_ulong auxv = ts->info->saved_auxv;
7401 abi_ulong len = ts->info->auxv_len;
7402 char *ptr;
7403
7404 /*
7405 * Auxiliary vector is stored in target process stack.
7406 * read in whole auxv vector and copy it to file
7407 */
7408 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7409 if (ptr != NULL) {
7410 while (len > 0) {
7411 ssize_t r;
7412 r = write(fd, ptr, len);
7413 if (r <= 0) {
7414 break;
7415 }
7416 len -= r;
7417 ptr += r;
7418 }
7419 lseek(fd, 0, SEEK_SET);
7420 unlock_user(ptr, auxv, len);
7421 }
7422
7423 return 0;
7424 }
7425
7426 static int is_proc_myself(const char *filename, const char *entry)
7427 {
7428 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7429 filename += strlen("/proc/");
7430 if (!strncmp(filename, "self/", strlen("self/"))) {
7431 filename += strlen("self/");
7432 } else if (*filename >= '1' && *filename <= '9') {
7433 char myself[80];
7434 snprintf(myself, sizeof(myself), "%d/", getpid());
7435 if (!strncmp(filename, myself, strlen(myself))) {
7436 filename += strlen(myself);
7437 } else {
7438 return 0;
7439 }
7440 } else {
7441 return 0;
7442 }
7443 if (!strcmp(filename, entry)) {
7444 return 1;
7445 }
7446 }
7447 return 0;
7448 }
7449
7450 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7451 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7452 static int is_proc(const char *filename, const char *entry)
7453 {
7454 return strcmp(filename, entry) == 0;
7455 }
7456 #endif
7457
7458 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7459 static int open_net_route(void *cpu_env, int fd)
7460 {
7461 FILE *fp;
7462 char *line = NULL;
7463 size_t len = 0;
7464 ssize_t read;
7465
7466 fp = fopen("/proc/net/route", "r");
7467 if (fp == NULL) {
7468 return -1;
7469 }
7470
7471 /* read header */
7472
7473 read = getline(&line, &len, fp);
7474 dprintf(fd, "%s", line);
7475
7476 /* read routes */
7477
7478 while ((read = getline(&line, &len, fp)) != -1) {
7479 char iface[16];
7480 uint32_t dest, gw, mask;
7481 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7482 int fields;
7483
7484 fields = sscanf(line,
7485 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7486 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7487 &mask, &mtu, &window, &irtt);
7488 if (fields != 11) {
7489 continue;
7490 }
7491 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7492 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7493 metric, tswap32(mask), mtu, window, irtt);
7494 }
7495
7496 free(line);
7497 fclose(fp);
7498
7499 return 0;
7500 }
7501 #endif
7502
7503 #if defined(TARGET_SPARC)
7504 static int open_cpuinfo(void *cpu_env, int fd)
7505 {
7506 dprintf(fd, "type\t\t: sun4u\n");
7507 return 0;
7508 }
7509 #endif
7510
7511 #if defined(TARGET_HPPA)
7512 static int open_cpuinfo(void *cpu_env, int fd)
7513 {
7514 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7515 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7516 dprintf(fd, "capabilities\t: os32\n");
7517 dprintf(fd, "model\t\t: 9000/778/B160L\n");
7518 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7519 return 0;
7520 }
7521 #endif
7522
7523 #if defined(TARGET_M68K)
7524 static int open_hardware(void *cpu_env, int fd)
7525 {
7526 dprintf(fd, "Model:\t\tqemu-m68k\n");
7527 return 0;
7528 }
7529 #endif
7530
7531 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7532 {
7533 struct fake_open {
7534 const char *filename;
7535 int (*fill)(void *cpu_env, int fd);
7536 int (*cmp)(const char *s1, const char *s2);
7537 };
7538 const struct fake_open *fake_open;
7539 static const struct fake_open fakes[] = {
7540 { "maps", open_self_maps, is_proc_myself },
7541 { "stat", open_self_stat, is_proc_myself },
7542 { "auxv", open_self_auxv, is_proc_myself },
7543 { "cmdline", open_self_cmdline, is_proc_myself },
7544 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7545 { "/proc/net/route", open_net_route, is_proc },
7546 #endif
7547 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7548 { "/proc/cpuinfo", open_cpuinfo, is_proc },
7549 #endif
7550 #if defined(TARGET_M68K)
7551 { "/proc/hardware", open_hardware, is_proc },
7552 #endif
7553 { NULL, NULL, NULL }
7554 };
7555
7556 if (is_proc_myself(pathname, "exe")) {
7557 int execfd = qemu_getauxval(AT_EXECFD);
7558 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7559 }
7560
7561 for (fake_open = fakes; fake_open->filename; fake_open++) {
7562 if (fake_open->cmp(pathname, fake_open->filename)) {
7563 break;
7564 }
7565 }
7566
7567 if (fake_open->filename) {
7568 const char *tmpdir;
7569 char filename[PATH_MAX];
7570 int fd, r;
7571
7572 /* create temporary file to map stat to */
7573 tmpdir = getenv("TMPDIR");
7574 if (!tmpdir)
7575 tmpdir = "/tmp";
7576 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7577 fd = mkstemp(filename);
7578 if (fd < 0) {
7579 return fd;
7580 }
7581 unlink(filename);
7582
7583 if ((r = fake_open->fill(cpu_env, fd))) {
7584 int e = errno;
7585 close(fd);
7586 errno = e;
7587 return r;
7588 }
7589 lseek(fd, 0, SEEK_SET);
7590
7591 return fd;
7592 }
7593
7594 return safe_openat(dirfd, path(pathname), flags, mode);
7595 }
7596
7597 #define TIMER_MAGIC 0x0caf0000
7598 #define TIMER_MAGIC_MASK 0xffff0000
7599
7600 /* Convert QEMU provided timer ID back to internal 16bit index format */
7601 static target_timer_t get_timer_id(abi_long arg)
7602 {
7603 target_timer_t timerid = arg;
7604
7605 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7606 return -TARGET_EINVAL;
7607 }
7608
7609 timerid &= 0xffff;
7610
7611 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7612 return -TARGET_EINVAL;
7613 }
7614
7615 return timerid;
7616 }
7617
7618 static int target_to_host_cpu_mask(unsigned long *host_mask,
7619 size_t host_size,
7620 abi_ulong target_addr,
7621 size_t target_size)
7622 {
7623 unsigned target_bits = sizeof(abi_ulong) * 8;
7624 unsigned host_bits = sizeof(*host_mask) * 8;
7625 abi_ulong *target_mask;
7626 unsigned i, j;
7627
7628 assert(host_size >= target_size);
7629
7630 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7631 if (!target_mask) {
7632 return -TARGET_EFAULT;
7633 }
7634 memset(host_mask, 0, host_size);
7635
7636 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7637 unsigned bit = i * target_bits;
7638 abi_ulong val;
7639
7640 __get_user(val, &target_mask[i]);
7641 for (j = 0; j < target_bits; j++, bit++) {
7642 if (val & (1UL << j)) {
7643 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7644 }
7645 }
7646 }
7647
7648 unlock_user(target_mask, target_addr, 0);
7649 return 0;
7650 }
7651
7652 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7653 size_t host_size,
7654 abi_ulong target_addr,
7655 size_t target_size)
7656 {
7657 unsigned target_bits = sizeof(abi_ulong) * 8;
7658 unsigned host_bits = sizeof(*host_mask) * 8;
7659 abi_ulong *target_mask;
7660 unsigned i, j;
7661
7662 assert(host_size >= target_size);
7663
7664 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7665 if (!target_mask) {
7666 return -TARGET_EFAULT;
7667 }
7668
7669 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7670 unsigned bit = i * target_bits;
7671 abi_ulong val = 0;
7672
7673 for (j = 0; j < target_bits; j++, bit++) {
7674 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7675 val |= 1UL << j;
7676 }
7677 }
7678 __put_user(val, &target_mask[i]);
7679 }
7680
7681 unlock_user(target_mask, target_addr, target_size);
7682 return 0;
7683 }
7684
7685 /* This is an internal helper for do_syscall so that it is easier
7686 * to have a single return point, so that actions, such as logging
7687 * of syscall results, can be performed.
7688 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7689 */
7690 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7691 abi_long arg2, abi_long arg3, abi_long arg4,
7692 abi_long arg5, abi_long arg6, abi_long arg7,
7693 abi_long arg8)
7694 {
7695 CPUState *cpu = env_cpu(cpu_env);
7696 abi_long ret;
7697 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7698 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7699 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7700 || defined(TARGET_NR_statx)
7701 struct stat st;
7702 #endif
7703 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7704 || defined(TARGET_NR_fstatfs)
7705 struct statfs stfs;
7706 #endif
7707 void *p;
7708
7709 switch(num) {
7710 case TARGET_NR_exit:
7711 /* In old applications this may be used to implement _exit(2).
7712 However in threaded applictions it is used for thread termination,
7713 and _exit_group is used for application termination.
7714 Do thread termination if we have more then one thread. */
7715
7716 if (block_signals()) {
7717 return -TARGET_ERESTARTSYS;
7718 }
7719
7720 pthread_mutex_lock(&clone_lock);
7721
7722 if (CPU_NEXT(first_cpu)) {
7723 TaskState *ts = cpu->opaque;
7724
7725 object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
7726 object_unref(OBJECT(cpu));
7727 /*
7728 * At this point the CPU should be unrealized and removed
7729 * from cpu lists. We can clean-up the rest of the thread
7730 * data without the lock held.
7731 */
7732
7733 pthread_mutex_unlock(&clone_lock);
7734
7735 if (ts->child_tidptr) {
7736 put_user_u32(0, ts->child_tidptr);
7737 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7738 NULL, NULL, 0);
7739 }
7740 thread_cpu = NULL;
7741 g_free(ts);
7742 rcu_unregister_thread();
7743 pthread_exit(NULL);
7744 }
7745
7746 pthread_mutex_unlock(&clone_lock);
7747 preexit_cleanup(cpu_env, arg1);
7748 _exit(arg1);
7749 return 0; /* avoid warning */
7750 case TARGET_NR_read:
7751 if (arg2 == 0 && arg3 == 0) {
7752 return get_errno(safe_read(arg1, 0, 0));
7753 } else {
7754 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7755 return -TARGET_EFAULT;
7756 ret = get_errno(safe_read(arg1, p, arg3));
7757 if (ret >= 0 &&
7758 fd_trans_host_to_target_data(arg1)) {
7759 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7760 }
7761 unlock_user(p, arg2, ret);
7762 }
7763 return ret;
7764 case TARGET_NR_write:
7765 if (arg2 == 0 && arg3 == 0) {
7766 return get_errno(safe_write(arg1, 0, 0));
7767 }
7768 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7769 return -TARGET_EFAULT;
7770 if (fd_trans_target_to_host_data(arg1)) {
7771 void *copy = g_malloc(arg3);
7772 memcpy(copy, p, arg3);
7773 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7774 if (ret >= 0) {
7775 ret = get_errno(safe_write(arg1, copy, ret));
7776 }
7777 g_free(copy);
7778 } else {
7779 ret = get_errno(safe_write(arg1, p, arg3));
7780 }
7781 unlock_user(p, arg2, 0);
7782 return ret;
7783
7784 #ifdef TARGET_NR_open
7785 case TARGET_NR_open:
7786 if (!(p = lock_user_string(arg1)))
7787 return -TARGET_EFAULT;
7788 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7789 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7790 arg3));
7791 fd_trans_unregister(ret);
7792 unlock_user(p, arg1, 0);
7793 return ret;
7794 #endif
7795 case TARGET_NR_openat:
7796 if (!(p = lock_user_string(arg2)))
7797 return -TARGET_EFAULT;
7798 ret = get_errno(do_openat(cpu_env, arg1, p,
7799 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7800 arg4));
7801 fd_trans_unregister(ret);
7802 unlock_user(p, arg2, 0);
7803 return ret;
7804 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7805 case TARGET_NR_name_to_handle_at:
7806 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7807 return ret;
7808 #endif
7809 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7810 case TARGET_NR_open_by_handle_at:
7811 ret = do_open_by_handle_at(arg1, arg2, arg3);
7812 fd_trans_unregister(ret);
7813 return ret;
7814 #endif
7815 case TARGET_NR_close:
7816 fd_trans_unregister(arg1);
7817 return get_errno(close(arg1));
7818
7819 case TARGET_NR_brk:
7820 return do_brk(arg1);
7821 #ifdef TARGET_NR_fork
7822 case TARGET_NR_fork:
7823 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7824 #endif
7825 #ifdef TARGET_NR_waitpid
7826 case TARGET_NR_waitpid:
7827 {
7828 int status;
7829 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7830 if (!is_error(ret) && arg2 && ret
7831 && put_user_s32(host_to_target_waitstatus(status), arg2))
7832 return -TARGET_EFAULT;
7833 }
7834 return ret;
7835 #endif
7836 #ifdef TARGET_NR_waitid
7837 case TARGET_NR_waitid:
7838 {
7839 siginfo_t info;
7840 info.si_pid = 0;
7841 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7842 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7843 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7844 return -TARGET_EFAULT;
7845 host_to_target_siginfo(p, &info);
7846 unlock_user(p, arg3, sizeof(target_siginfo_t));
7847 }
7848 }
7849 return ret;
7850 #endif
7851 #ifdef TARGET_NR_creat /* not on alpha */
7852 case TARGET_NR_creat:
7853 if (!(p = lock_user_string(arg1)))
7854 return -TARGET_EFAULT;
7855 ret = get_errno(creat(p, arg2));
7856 fd_trans_unregister(ret);
7857 unlock_user(p, arg1, 0);
7858 return ret;
7859 #endif
7860 #ifdef TARGET_NR_link
7861 case TARGET_NR_link:
7862 {
7863 void * p2;
7864 p = lock_user_string(arg1);
7865 p2 = lock_user_string(arg2);
7866 if (!p || !p2)
7867 ret = -TARGET_EFAULT;
7868 else
7869 ret = get_errno(link(p, p2));
7870 unlock_user(p2, arg2, 0);
7871 unlock_user(p, arg1, 0);
7872 }
7873 return ret;
7874 #endif
7875 #if defined(TARGET_NR_linkat)
7876 case TARGET_NR_linkat:
7877 {
7878 void * p2 = NULL;
7879 if (!arg2 || !arg4)
7880 return -TARGET_EFAULT;
7881 p = lock_user_string(arg2);
7882 p2 = lock_user_string(arg4);
7883 if (!p || !p2)
7884 ret = -TARGET_EFAULT;
7885 else
7886 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7887 unlock_user(p, arg2, 0);
7888 unlock_user(p2, arg4, 0);
7889 }
7890 return ret;
7891 #endif
7892 #ifdef TARGET_NR_unlink
7893 case TARGET_NR_unlink:
7894 if (!(p = lock_user_string(arg1)))
7895 return -TARGET_EFAULT;
7896 ret = get_errno(unlink(p));
7897 unlock_user(p, arg1, 0);
7898 return ret;
7899 #endif
7900 #if defined(TARGET_NR_unlinkat)
7901 case TARGET_NR_unlinkat:
7902 if (!(p = lock_user_string(arg2)))
7903 return -TARGET_EFAULT;
7904 ret = get_errno(unlinkat(arg1, p, arg3));
7905 unlock_user(p, arg2, 0);
7906 return ret;
7907 #endif
7908 case TARGET_NR_execve:
7909 {
7910 char **argp, **envp;
7911 int argc, envc;
7912 abi_ulong gp;
7913 abi_ulong guest_argp;
7914 abi_ulong guest_envp;
7915 abi_ulong addr;
7916 char **q;
7917 int total_size = 0;
7918
7919 argc = 0;
7920 guest_argp = arg2;
7921 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7922 if (get_user_ual(addr, gp))
7923 return -TARGET_EFAULT;
7924 if (!addr)
7925 break;
7926 argc++;
7927 }
7928 envc = 0;
7929 guest_envp = arg3;
7930 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7931 if (get_user_ual(addr, gp))
7932 return -TARGET_EFAULT;
7933 if (!addr)
7934 break;
7935 envc++;
7936 }
7937
7938 argp = g_new0(char *, argc + 1);
7939 envp = g_new0(char *, envc + 1);
7940
7941 for (gp = guest_argp, q = argp; gp;
7942 gp += sizeof(abi_ulong), q++) {
7943 if (get_user_ual(addr, gp))
7944 goto execve_efault;
7945 if (!addr)
7946 break;
7947 if (!(*q = lock_user_string(addr)))
7948 goto execve_efault;
7949 total_size += strlen(*q) + 1;
7950 }
7951 *q = NULL;
7952
7953 for (gp = guest_envp, q = envp; gp;
7954 gp += sizeof(abi_ulong), q++) {
7955 if (get_user_ual(addr, gp))
7956 goto execve_efault;
7957 if (!addr)
7958 break;
7959 if (!(*q = lock_user_string(addr)))
7960 goto execve_efault;
7961 total_size += strlen(*q) + 1;
7962 }
7963 *q = NULL;
7964
7965 if (!(p = lock_user_string(arg1)))
7966 goto execve_efault;
7967 /* Although execve() is not an interruptible syscall it is
7968 * a special case where we must use the safe_syscall wrapper:
7969 * if we allow a signal to happen before we make the host
7970 * syscall then we will 'lose' it, because at the point of
7971 * execve the process leaves QEMU's control. So we use the
7972 * safe syscall wrapper to ensure that we either take the
7973 * signal as a guest signal, or else it does not happen
7974 * before the execve completes and makes it the other
7975 * program's problem.
7976 */
7977 ret = get_errno(safe_execve(p, argp, envp));
7978 unlock_user(p, arg1, 0);
7979
7980 goto execve_end;
7981
7982 execve_efault:
7983 ret = -TARGET_EFAULT;
7984
7985 execve_end:
7986 for (gp = guest_argp, q = argp; *q;
7987 gp += sizeof(abi_ulong), q++) {
7988 if (get_user_ual(addr, gp)
7989 || !addr)
7990 break;
7991 unlock_user(*q, addr, 0);
7992 }
7993 for (gp = guest_envp, q = envp; *q;
7994 gp += sizeof(abi_ulong), q++) {
7995 if (get_user_ual(addr, gp)
7996 || !addr)
7997 break;
7998 unlock_user(*q, addr, 0);
7999 }
8000
8001 g_free(argp);
8002 g_free(envp);
8003 }
8004 return ret;
8005 case TARGET_NR_chdir:
8006 if (!(p = lock_user_string(arg1)))
8007 return -TARGET_EFAULT;
8008 ret = get_errno(chdir(p));
8009 unlock_user(p, arg1, 0);
8010 return ret;
8011 #ifdef TARGET_NR_time
8012 case TARGET_NR_time:
8013 {
8014 time_t host_time;
8015 ret = get_errno(time(&host_time));
8016 if (!is_error(ret)
8017 && arg1
8018 && put_user_sal(host_time, arg1))
8019 return -TARGET_EFAULT;
8020 }
8021 return ret;
8022 #endif
8023 #ifdef TARGET_NR_mknod
8024 case TARGET_NR_mknod:
8025 if (!(p = lock_user_string(arg1)))
8026 return -TARGET_EFAULT;
8027 ret = get_errno(mknod(p, arg2, arg3));
8028 unlock_user(p, arg1, 0);
8029 return ret;
8030 #endif
8031 #if defined(TARGET_NR_mknodat)
8032 case TARGET_NR_mknodat:
8033 if (!(p = lock_user_string(arg2)))
8034 return -TARGET_EFAULT;
8035 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8036 unlock_user(p, arg2, 0);
8037 return ret;
8038 #endif
8039 #ifdef TARGET_NR_chmod
8040 case TARGET_NR_chmod:
8041 if (!(p = lock_user_string(arg1)))
8042 return -TARGET_EFAULT;
8043 ret = get_errno(chmod(p, arg2));
8044 unlock_user(p, arg1, 0);
8045 return ret;
8046 #endif
8047 #ifdef TARGET_NR_lseek
8048 case TARGET_NR_lseek:
8049 return get_errno(lseek(arg1, arg2, arg3));
8050 #endif
8051 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8052 /* Alpha specific */
8053 case TARGET_NR_getxpid:
8054 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8055 return get_errno(getpid());
8056 #endif
8057 #ifdef TARGET_NR_getpid
8058 case TARGET_NR_getpid:
8059 return get_errno(getpid());
8060 #endif
8061 case TARGET_NR_mount:
8062 {
8063 /* need to look at the data field */
8064 void *p2, *p3;
8065
8066 if (arg1) {
8067 p = lock_user_string(arg1);
8068 if (!p) {
8069 return -TARGET_EFAULT;
8070 }
8071 } else {
8072 p = NULL;
8073 }
8074
8075 p2 = lock_user_string(arg2);
8076 if (!p2) {
8077 if (arg1) {
8078 unlock_user(p, arg1, 0);
8079 }
8080 return -TARGET_EFAULT;
8081 }
8082
8083 if (arg3) {
8084 p3 = lock_user_string(arg3);
8085 if (!p3) {
8086 if (arg1) {
8087 unlock_user(p, arg1, 0);
8088 }
8089 unlock_user(p2, arg2, 0);
8090 return -TARGET_EFAULT;
8091 }
8092 } else {
8093 p3 = NULL;
8094 }
8095
8096 /* FIXME - arg5 should be locked, but it isn't clear how to
8097 * do that since it's not guaranteed to be a NULL-terminated
8098 * string.
8099 */
8100 if (!arg5) {
8101 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8102 } else {
8103 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8104 }
8105 ret = get_errno(ret);
8106
8107 if (arg1) {
8108 unlock_user(p, arg1, 0);
8109 }
8110 unlock_user(p2, arg2, 0);
8111 if (arg3) {
8112 unlock_user(p3, arg3, 0);
8113 }
8114 }
8115 return ret;
8116 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8117 #if defined(TARGET_NR_umount)
8118 case TARGET_NR_umount:
8119 #endif
8120 #if defined(TARGET_NR_oldumount)
8121 case TARGET_NR_oldumount:
8122 #endif
8123 if (!(p = lock_user_string(arg1)))
8124 return -TARGET_EFAULT;
8125 ret = get_errno(umount(p));
8126 unlock_user(p, arg1, 0);
8127 return ret;
8128 #endif
8129 #ifdef TARGET_NR_stime /* not on alpha */
8130 case TARGET_NR_stime:
8131 {
8132 struct timespec ts;
8133 ts.tv_nsec = 0;
8134 if (get_user_sal(ts.tv_sec, arg1)) {
8135 return -TARGET_EFAULT;
8136 }
8137 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8138 }
8139 #endif
8140 #ifdef TARGET_NR_alarm /* not on alpha */
8141 case TARGET_NR_alarm:
8142 return alarm(arg1);
8143 #endif
8144 #ifdef TARGET_NR_pause /* not on alpha */
8145 case TARGET_NR_pause:
8146 if (!block_signals()) {
8147 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8148 }
8149 return -TARGET_EINTR;
8150 #endif
8151 #ifdef TARGET_NR_utime
8152 case TARGET_NR_utime:
8153 {
8154 struct utimbuf tbuf, *host_tbuf;
8155 struct target_utimbuf *target_tbuf;
8156 if (arg2) {
8157 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8158 return -TARGET_EFAULT;
8159 tbuf.actime = tswapal(target_tbuf->actime);
8160 tbuf.modtime = tswapal(target_tbuf->modtime);
8161 unlock_user_struct(target_tbuf, arg2, 0);
8162 host_tbuf = &tbuf;
8163 } else {
8164 host_tbuf = NULL;
8165 }
8166 if (!(p = lock_user_string(arg1)))
8167 return -TARGET_EFAULT;
8168 ret = get_errno(utime(p, host_tbuf));
8169 unlock_user(p, arg1, 0);
8170 }
8171 return ret;
8172 #endif
8173 #ifdef TARGET_NR_utimes
8174 case TARGET_NR_utimes:
8175 {
8176 struct timeval *tvp, tv[2];
8177 if (arg2) {
8178 if (copy_from_user_timeval(&tv[0], arg2)
8179 || copy_from_user_timeval(&tv[1],
8180 arg2 + sizeof(struct target_timeval)))
8181 return -TARGET_EFAULT;
8182 tvp = tv;
8183 } else {
8184 tvp = NULL;
8185 }
8186 if (!(p = lock_user_string(arg1)))
8187 return -TARGET_EFAULT;
8188 ret = get_errno(utimes(p, tvp));
8189 unlock_user(p, arg1, 0);
8190 }
8191 return ret;
8192 #endif
8193 #if defined(TARGET_NR_futimesat)
8194 case TARGET_NR_futimesat:
8195 {
8196 struct timeval *tvp, tv[2];
8197 if (arg3) {
8198 if (copy_from_user_timeval(&tv[0], arg3)
8199 || copy_from_user_timeval(&tv[1],
8200 arg3 + sizeof(struct target_timeval)))
8201 return -TARGET_EFAULT;
8202 tvp = tv;
8203 } else {
8204 tvp = NULL;
8205 }
8206 if (!(p = lock_user_string(arg2))) {
8207 return -TARGET_EFAULT;
8208 }
8209 ret = get_errno(futimesat(arg1, path(p), tvp));
8210 unlock_user(p, arg2, 0);
8211 }
8212 return ret;
8213 #endif
8214 #ifdef TARGET_NR_access
8215 case TARGET_NR_access:
8216 if (!(p = lock_user_string(arg1))) {
8217 return -TARGET_EFAULT;
8218 }
8219 ret = get_errno(access(path(p), arg2));
8220 unlock_user(p, arg1, 0);
8221 return ret;
8222 #endif
8223 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8224 case TARGET_NR_faccessat:
8225 if (!(p = lock_user_string(arg2))) {
8226 return -TARGET_EFAULT;
8227 }
8228 ret = get_errno(faccessat(arg1, p, arg3, 0));
8229 unlock_user(p, arg2, 0);
8230 return ret;
8231 #endif
8232 #ifdef TARGET_NR_nice /* not on alpha */
8233 case TARGET_NR_nice:
8234 return get_errno(nice(arg1));
8235 #endif
8236 case TARGET_NR_sync:
8237 sync();
8238 return 0;
8239 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8240 case TARGET_NR_syncfs:
8241 return get_errno(syncfs(arg1));
8242 #endif
8243 case TARGET_NR_kill:
8244 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8245 #ifdef TARGET_NR_rename
8246 case TARGET_NR_rename:
8247 {
8248 void *p2;
8249 p = lock_user_string(arg1);
8250 p2 = lock_user_string(arg2);
8251 if (!p || !p2)
8252 ret = -TARGET_EFAULT;
8253 else
8254 ret = get_errno(rename(p, p2));
8255 unlock_user(p2, arg2, 0);
8256 unlock_user(p, arg1, 0);
8257 }
8258 return ret;
8259 #endif
8260 #if defined(TARGET_NR_renameat)
8261 case TARGET_NR_renameat:
8262 {
8263 void *p2;
8264 p = lock_user_string(arg2);
8265 p2 = lock_user_string(arg4);
8266 if (!p || !p2)
8267 ret = -TARGET_EFAULT;
8268 else
8269 ret = get_errno(renameat(arg1, p, arg3, p2));
8270 unlock_user(p2, arg4, 0);
8271 unlock_user(p, arg2, 0);
8272 }
8273 return ret;
8274 #endif
8275 #if defined(TARGET_NR_renameat2)
8276 case TARGET_NR_renameat2:
8277 {
8278 void *p2;
8279 p = lock_user_string(arg2);
8280 p2 = lock_user_string(arg4);
8281 if (!p || !p2) {
8282 ret = -TARGET_EFAULT;
8283 } else {
8284 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8285 }
8286 unlock_user(p2, arg4, 0);
8287 unlock_user(p, arg2, 0);
8288 }
8289 return ret;
8290 #endif
8291 #ifdef TARGET_NR_mkdir
8292 case TARGET_NR_mkdir:
8293 if (!(p = lock_user_string(arg1)))
8294 return -TARGET_EFAULT;
8295 ret = get_errno(mkdir(p, arg2));
8296 unlock_user(p, arg1, 0);
8297 return ret;
8298 #endif
8299 #if defined(TARGET_NR_mkdirat)
8300 case TARGET_NR_mkdirat:
8301 if (!(p = lock_user_string(arg2)))
8302 return -TARGET_EFAULT;
8303 ret = get_errno(mkdirat(arg1, p, arg3));
8304 unlock_user(p, arg2, 0);
8305 return ret;
8306 #endif
8307 #ifdef TARGET_NR_rmdir
8308 case TARGET_NR_rmdir:
8309 if (!(p = lock_user_string(arg1)))
8310 return -TARGET_EFAULT;
8311 ret = get_errno(rmdir(p));
8312 unlock_user(p, arg1, 0);
8313 return ret;
8314 #endif
8315 case TARGET_NR_dup:
8316 ret = get_errno(dup(arg1));
8317 if (ret >= 0) {
8318 fd_trans_dup(arg1, ret);
8319 }
8320 return ret;
8321 #ifdef TARGET_NR_pipe
8322 case TARGET_NR_pipe:
8323 return do_pipe(cpu_env, arg1, 0, 0);
8324 #endif
8325 #ifdef TARGET_NR_pipe2
8326 case TARGET_NR_pipe2:
8327 return do_pipe(cpu_env, arg1,
8328 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8329 #endif
8330 case TARGET_NR_times:
8331 {
8332 struct target_tms *tmsp;
8333 struct tms tms;
8334 ret = get_errno(times(&tms));
8335 if (arg1) {
8336 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8337 if (!tmsp)
8338 return -TARGET_EFAULT;
8339 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8340 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8341 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8342 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8343 }
8344 if (!is_error(ret))
8345 ret = host_to_target_clock_t(ret);
8346 }
8347 return ret;
8348 case TARGET_NR_acct:
8349 if (arg1 == 0) {
8350 ret = get_errno(acct(NULL));
8351 } else {
8352 if (!(p = lock_user_string(arg1))) {
8353 return -TARGET_EFAULT;
8354 }
8355 ret = get_errno(acct(path(p)));
8356 unlock_user(p, arg1, 0);
8357 }
8358 return ret;
8359 #ifdef TARGET_NR_umount2
8360 case TARGET_NR_umount2:
8361 if (!(p = lock_user_string(arg1)))
8362 return -TARGET_EFAULT;
8363 ret = get_errno(umount2(p, arg2));
8364 unlock_user(p, arg1, 0);
8365 return ret;
8366 #endif
8367 case TARGET_NR_ioctl:
8368 return do_ioctl(arg1, arg2, arg3);
8369 #ifdef TARGET_NR_fcntl
8370 case TARGET_NR_fcntl:
8371 return do_fcntl(arg1, arg2, arg3);
8372 #endif
8373 case TARGET_NR_setpgid:
8374 return get_errno(setpgid(arg1, arg2));
8375 case TARGET_NR_umask:
8376 return get_errno(umask(arg1));
8377 case TARGET_NR_chroot:
8378 if (!(p = lock_user_string(arg1)))
8379 return -TARGET_EFAULT;
8380 ret = get_errno(chroot(p));
8381 unlock_user(p, arg1, 0);
8382 return ret;
8383 #ifdef TARGET_NR_dup2
8384 case TARGET_NR_dup2:
8385 ret = get_errno(dup2(arg1, arg2));
8386 if (ret >= 0) {
8387 fd_trans_dup(arg1, arg2);
8388 }
8389 return ret;
8390 #endif
8391 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8392 case TARGET_NR_dup3:
8393 {
8394 int host_flags;
8395
8396 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8397 return -EINVAL;
8398 }
8399 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8400 ret = get_errno(dup3(arg1, arg2, host_flags));
8401 if (ret >= 0) {
8402 fd_trans_dup(arg1, arg2);
8403 }
8404 return ret;
8405 }
8406 #endif
8407 #ifdef TARGET_NR_getppid /* not on alpha */
8408 case TARGET_NR_getppid:
8409 return get_errno(getppid());
8410 #endif
8411 #ifdef TARGET_NR_getpgrp
8412 case TARGET_NR_getpgrp:
8413 return get_errno(getpgrp());
8414 #endif
8415 case TARGET_NR_setsid:
8416 return get_errno(setsid());
8417 #ifdef TARGET_NR_sigaction
8418 case TARGET_NR_sigaction:
8419 {
8420 #if defined(TARGET_ALPHA)
8421 struct target_sigaction act, oact, *pact = 0;
8422 struct target_old_sigaction *old_act;
8423 if (arg2) {
8424 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8425 return -TARGET_EFAULT;
8426 act._sa_handler = old_act->_sa_handler;
8427 target_siginitset(&act.sa_mask, old_act->sa_mask);
8428 act.sa_flags = old_act->sa_flags;
8429 act.sa_restorer = 0;
8430 unlock_user_struct(old_act, arg2, 0);
8431 pact = &act;
8432 }
8433 ret = get_errno(do_sigaction(arg1, pact, &oact));
8434 if (!is_error(ret) && arg3) {
8435 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8436 return -TARGET_EFAULT;
8437 old_act->_sa_handler = oact._sa_handler;
8438 old_act->sa_mask = oact.sa_mask.sig[0];
8439 old_act->sa_flags = oact.sa_flags;
8440 unlock_user_struct(old_act, arg3, 1);
8441 }
8442 #elif defined(TARGET_MIPS)
8443 struct target_sigaction act, oact, *pact, *old_act;
8444
8445 if (arg2) {
8446 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8447 return -TARGET_EFAULT;
8448 act._sa_handler = old_act->_sa_handler;
8449 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8450 act.sa_flags = old_act->sa_flags;
8451 unlock_user_struct(old_act, arg2, 0);
8452 pact = &act;
8453 } else {
8454 pact = NULL;
8455 }
8456
8457 ret = get_errno(do_sigaction(arg1, pact, &oact));
8458
8459 if (!is_error(ret) && arg3) {
8460 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8461 return -TARGET_EFAULT;
8462 old_act->_sa_handler = oact._sa_handler;
8463 old_act->sa_flags = oact.sa_flags;
8464 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8465 old_act->sa_mask.sig[1] = 0;
8466 old_act->sa_mask.sig[2] = 0;
8467 old_act->sa_mask.sig[3] = 0;
8468 unlock_user_struct(old_act, arg3, 1);
8469 }
8470 #else
8471 struct target_old_sigaction *old_act;
8472 struct target_sigaction act, oact, *pact;
8473 if (arg2) {
8474 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8475 return -TARGET_EFAULT;
8476 act._sa_handler = old_act->_sa_handler;
8477 target_siginitset(&act.sa_mask, old_act->sa_mask);
8478 act.sa_flags = old_act->sa_flags;
8479 act.sa_restorer = old_act->sa_restorer;
8480 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8481 act.ka_restorer = 0;
8482 #endif
8483 unlock_user_struct(old_act, arg2, 0);
8484 pact = &act;
8485 } else {
8486 pact = NULL;
8487 }
8488 ret = get_errno(do_sigaction(arg1, pact, &oact));
8489 if (!is_error(ret) && arg3) {
8490 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8491 return -TARGET_EFAULT;
8492 old_act->_sa_handler = oact._sa_handler;
8493 old_act->sa_mask = oact.sa_mask.sig[0];
8494 old_act->sa_flags = oact.sa_flags;
8495 old_act->sa_restorer = oact.sa_restorer;
8496 unlock_user_struct(old_act, arg3, 1);
8497 }
8498 #endif
8499 }
8500 return ret;
8501 #endif
8502 case TARGET_NR_rt_sigaction:
8503 {
8504 #if defined(TARGET_ALPHA)
8505 /* For Alpha and SPARC this is a 5 argument syscall, with
8506 * a 'restorer' parameter which must be copied into the
8507 * sa_restorer field of the sigaction struct.
8508 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8509 * and arg5 is the sigsetsize.
8510 * Alpha also has a separate rt_sigaction struct that it uses
8511 * here; SPARC uses the usual sigaction struct.
8512 */
8513 struct target_rt_sigaction *rt_act;
8514 struct target_sigaction act, oact, *pact = 0;
8515
8516 if (arg4 != sizeof(target_sigset_t)) {
8517 return -TARGET_EINVAL;
8518 }
8519 if (arg2) {
8520 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8521 return -TARGET_EFAULT;
8522 act._sa_handler = rt_act->_sa_handler;
8523 act.sa_mask = rt_act->sa_mask;
8524 act.sa_flags = rt_act->sa_flags;
8525 act.sa_restorer = arg5;
8526 unlock_user_struct(rt_act, arg2, 0);
8527 pact = &act;
8528 }
8529 ret = get_errno(do_sigaction(arg1, pact, &oact));
8530 if (!is_error(ret) && arg3) {
8531 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8532 return -TARGET_EFAULT;
8533 rt_act->_sa_handler = oact._sa_handler;
8534 rt_act->sa_mask = oact.sa_mask;
8535 rt_act->sa_flags = oact.sa_flags;
8536 unlock_user_struct(rt_act, arg3, 1);
8537 }
8538 #else
8539 #ifdef TARGET_SPARC
8540 target_ulong restorer = arg4;
8541 target_ulong sigsetsize = arg5;
8542 #else
8543 target_ulong sigsetsize = arg4;
8544 #endif
8545 struct target_sigaction *act;
8546 struct target_sigaction *oact;
8547
8548 if (sigsetsize != sizeof(target_sigset_t)) {
8549 return -TARGET_EINVAL;
8550 }
8551 if (arg2) {
8552 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8553 return -TARGET_EFAULT;
8554 }
8555 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8556 act->ka_restorer = restorer;
8557 #endif
8558 } else {
8559 act = NULL;
8560 }
8561 if (arg3) {
8562 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8563 ret = -TARGET_EFAULT;
8564 goto rt_sigaction_fail;
8565 }
8566 } else
8567 oact = NULL;
8568 ret = get_errno(do_sigaction(arg1, act, oact));
8569 rt_sigaction_fail:
8570 if (act)
8571 unlock_user_struct(act, arg2, 0);
8572 if (oact)
8573 unlock_user_struct(oact, arg3, 1);
8574 #endif
8575 }
8576 return ret;
8577 #ifdef TARGET_NR_sgetmask /* not on alpha */
8578 case TARGET_NR_sgetmask:
8579 {
8580 sigset_t cur_set;
8581 abi_ulong target_set;
8582 ret = do_sigprocmask(0, NULL, &cur_set);
8583 if (!ret) {
8584 host_to_target_old_sigset(&target_set, &cur_set);
8585 ret = target_set;
8586 }
8587 }
8588 return ret;
8589 #endif
8590 #ifdef TARGET_NR_ssetmask /* not on alpha */
8591 case TARGET_NR_ssetmask:
8592 {
8593 sigset_t set, oset;
8594 abi_ulong target_set = arg1;
8595 target_to_host_old_sigset(&set, &target_set);
8596 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8597 if (!ret) {
8598 host_to_target_old_sigset(&target_set, &oset);
8599 ret = target_set;
8600 }
8601 }
8602 return ret;
8603 #endif
8604 #ifdef TARGET_NR_sigprocmask
8605 case TARGET_NR_sigprocmask:
8606 {
8607 #if defined(TARGET_ALPHA)
8608 sigset_t set, oldset;
8609 abi_ulong mask;
8610 int how;
8611
8612 switch (arg1) {
8613 case TARGET_SIG_BLOCK:
8614 how = SIG_BLOCK;
8615 break;
8616 case TARGET_SIG_UNBLOCK:
8617 how = SIG_UNBLOCK;
8618 break;
8619 case TARGET_SIG_SETMASK:
8620 how = SIG_SETMASK;
8621 break;
8622 default:
8623 return -TARGET_EINVAL;
8624 }
8625 mask = arg2;
8626 target_to_host_old_sigset(&set, &mask);
8627
8628 ret = do_sigprocmask(how, &set, &oldset);
8629 if (!is_error(ret)) {
8630 host_to_target_old_sigset(&mask, &oldset);
8631 ret = mask;
8632 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8633 }
8634 #else
8635 sigset_t set, oldset, *set_ptr;
8636 int how;
8637
8638 if (arg2) {
8639 switch (arg1) {
8640 case TARGET_SIG_BLOCK:
8641 how = SIG_BLOCK;
8642 break;
8643 case TARGET_SIG_UNBLOCK:
8644 how = SIG_UNBLOCK;
8645 break;
8646 case TARGET_SIG_SETMASK:
8647 how = SIG_SETMASK;
8648 break;
8649 default:
8650 return -TARGET_EINVAL;
8651 }
8652 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8653 return -TARGET_EFAULT;
8654 target_to_host_old_sigset(&set, p);
8655 unlock_user(p, arg2, 0);
8656 set_ptr = &set;
8657 } else {
8658 how = 0;
8659 set_ptr = NULL;
8660 }
8661 ret = do_sigprocmask(how, set_ptr, &oldset);
8662 if (!is_error(ret) && arg3) {
8663 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8664 return -TARGET_EFAULT;
8665 host_to_target_old_sigset(p, &oldset);
8666 unlock_user(p, arg3, sizeof(target_sigset_t));
8667 }
8668 #endif
8669 }
8670 return ret;
8671 #endif
8672 case TARGET_NR_rt_sigprocmask:
8673 {
8674 int how = arg1;
8675 sigset_t set, oldset, *set_ptr;
8676
8677 if (arg4 != sizeof(target_sigset_t)) {
8678 return -TARGET_EINVAL;
8679 }
8680
8681 if (arg2) {
8682 switch(how) {
8683 case TARGET_SIG_BLOCK:
8684 how = SIG_BLOCK;
8685 break;
8686 case TARGET_SIG_UNBLOCK:
8687 how = SIG_UNBLOCK;
8688 break;
8689 case TARGET_SIG_SETMASK:
8690 how = SIG_SETMASK;
8691 break;
8692 default:
8693 return -TARGET_EINVAL;
8694 }
8695 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8696 return -TARGET_EFAULT;
8697 target_to_host_sigset(&set, p);
8698 unlock_user(p, arg2, 0);
8699 set_ptr = &set;
8700 } else {
8701 how = 0;
8702 set_ptr = NULL;
8703 }
8704 ret = do_sigprocmask(how, set_ptr, &oldset);
8705 if (!is_error(ret) && arg3) {
8706 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8707 return -TARGET_EFAULT;
8708 host_to_target_sigset(p, &oldset);
8709 unlock_user(p, arg3, sizeof(target_sigset_t));
8710 }
8711 }
8712 return ret;
8713 #ifdef TARGET_NR_sigpending
8714 case TARGET_NR_sigpending:
8715 {
8716 sigset_t set;
8717 ret = get_errno(sigpending(&set));
8718 if (!is_error(ret)) {
8719 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8720 return -TARGET_EFAULT;
8721 host_to_target_old_sigset(p, &set);
8722 unlock_user(p, arg1, sizeof(target_sigset_t));
8723 }
8724 }
8725 return ret;
8726 #endif
8727 case TARGET_NR_rt_sigpending:
8728 {
8729 sigset_t set;
8730
8731 /* Yes, this check is >, not != like most. We follow the kernel's
8732 * logic and it does it like this because it implements
8733 * NR_sigpending through the same code path, and in that case
8734 * the old_sigset_t is smaller in size.
8735 */
8736 if (arg2 > sizeof(target_sigset_t)) {
8737 return -TARGET_EINVAL;
8738 }
8739
8740 ret = get_errno(sigpending(&set));
8741 if (!is_error(ret)) {
8742 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8743 return -TARGET_EFAULT;
8744 host_to_target_sigset(p, &set);
8745 unlock_user(p, arg1, sizeof(target_sigset_t));
8746 }
8747 }
8748 return ret;
8749 #ifdef TARGET_NR_sigsuspend
8750 case TARGET_NR_sigsuspend:
8751 {
8752 TaskState *ts = cpu->opaque;
8753 #if defined(TARGET_ALPHA)
8754 abi_ulong mask = arg1;
8755 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8756 #else
8757 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8758 return -TARGET_EFAULT;
8759 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8760 unlock_user(p, arg1, 0);
8761 #endif
8762 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8763 SIGSET_T_SIZE));
8764 if (ret != -TARGET_ERESTARTSYS) {
8765 ts->in_sigsuspend = 1;
8766 }
8767 }
8768 return ret;
8769 #endif
8770 case TARGET_NR_rt_sigsuspend:
8771 {
8772 TaskState *ts = cpu->opaque;
8773
8774 if (arg2 != sizeof(target_sigset_t)) {
8775 return -TARGET_EINVAL;
8776 }
8777 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8778 return -TARGET_EFAULT;
8779 target_to_host_sigset(&ts->sigsuspend_mask, p);
8780 unlock_user(p, arg1, 0);
8781 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8782 SIGSET_T_SIZE));
8783 if (ret != -TARGET_ERESTARTSYS) {
8784 ts->in_sigsuspend = 1;
8785 }
8786 }
8787 return ret;
8788 #ifdef TARGET_NR_rt_sigtimedwait
8789 case TARGET_NR_rt_sigtimedwait:
8790 {
8791 sigset_t set;
8792 struct timespec uts, *puts;
8793 siginfo_t uinfo;
8794
8795 if (arg4 != sizeof(target_sigset_t)) {
8796 return -TARGET_EINVAL;
8797 }
8798
8799 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8800 return -TARGET_EFAULT;
8801 target_to_host_sigset(&set, p);
8802 unlock_user(p, arg1, 0);
8803 if (arg3) {
8804 puts = &uts;
8805 target_to_host_timespec(puts, arg3);
8806 } else {
8807 puts = NULL;
8808 }
8809 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8810 SIGSET_T_SIZE));
8811 if (!is_error(ret)) {
8812 if (arg2) {
8813 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8814 0);
8815 if (!p) {
8816 return -TARGET_EFAULT;
8817 }
8818 host_to_target_siginfo(p, &uinfo);
8819 unlock_user(p, arg2, sizeof(target_siginfo_t));
8820 }
8821 ret = host_to_target_signal(ret);
8822 }
8823 }
8824 return ret;
8825 #endif
8826 case TARGET_NR_rt_sigqueueinfo:
8827 {
8828 siginfo_t uinfo;
8829
8830 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8831 if (!p) {
8832 return -TARGET_EFAULT;
8833 }
8834 target_to_host_siginfo(&uinfo, p);
8835 unlock_user(p, arg3, 0);
8836 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8837 }
8838 return ret;
8839 case TARGET_NR_rt_tgsigqueueinfo:
8840 {
8841 siginfo_t uinfo;
8842
8843 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8844 if (!p) {
8845 return -TARGET_EFAULT;
8846 }
8847 target_to_host_siginfo(&uinfo, p);
8848 unlock_user(p, arg4, 0);
8849 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8850 }
8851 return ret;
8852 #ifdef TARGET_NR_sigreturn
8853 case TARGET_NR_sigreturn:
8854 if (block_signals()) {
8855 return -TARGET_ERESTARTSYS;
8856 }
8857 return do_sigreturn(cpu_env);
8858 #endif
8859 case TARGET_NR_rt_sigreturn:
8860 if (block_signals()) {
8861 return -TARGET_ERESTARTSYS;
8862 }
8863 return do_rt_sigreturn(cpu_env);
8864 case TARGET_NR_sethostname:
8865 if (!(p = lock_user_string(arg1)))
8866 return -TARGET_EFAULT;
8867 ret = get_errno(sethostname(p, arg2));
8868 unlock_user(p, arg1, 0);
8869 return ret;
8870 #ifdef TARGET_NR_setrlimit
8871 case TARGET_NR_setrlimit:
8872 {
8873 int resource = target_to_host_resource(arg1);
8874 struct target_rlimit *target_rlim;
8875 struct rlimit rlim;
8876 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8877 return -TARGET_EFAULT;
8878 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8879 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8880 unlock_user_struct(target_rlim, arg2, 0);
8881 /*
8882 * If we just passed through resource limit settings for memory then
8883 * they would also apply to QEMU's own allocations, and QEMU will
8884 * crash or hang or die if its allocations fail. Ideally we would
8885 * track the guest allocations in QEMU and apply the limits ourselves.
8886 * For now, just tell the guest the call succeeded but don't actually
8887 * limit anything.
8888 */
8889 if (resource != RLIMIT_AS &&
8890 resource != RLIMIT_DATA &&
8891 resource != RLIMIT_STACK) {
8892 return get_errno(setrlimit(resource, &rlim));
8893 } else {
8894 return 0;
8895 }
8896 }
8897 #endif
8898 #ifdef TARGET_NR_getrlimit
8899 case TARGET_NR_getrlimit:
8900 {
8901 int resource = target_to_host_resource(arg1);
8902 struct target_rlimit *target_rlim;
8903 struct rlimit rlim;
8904
8905 ret = get_errno(getrlimit(resource, &rlim));
8906 if (!is_error(ret)) {
8907 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8908 return -TARGET_EFAULT;
8909 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8910 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8911 unlock_user_struct(target_rlim, arg2, 1);
8912 }
8913 }
8914 return ret;
8915 #endif
8916 case TARGET_NR_getrusage:
8917 {
8918 struct rusage rusage;
8919 ret = get_errno(getrusage(arg1, &rusage));
8920 if (!is_error(ret)) {
8921 ret = host_to_target_rusage(arg2, &rusage);
8922 }
8923 }
8924 return ret;
8925 #if defined(TARGET_NR_gettimeofday)
8926 case TARGET_NR_gettimeofday:
8927 {
8928 struct timeval tv;
8929 struct timezone tz;
8930
8931 ret = get_errno(gettimeofday(&tv, &tz));
8932 if (!is_error(ret)) {
8933 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
8934 return -TARGET_EFAULT;
8935 }
8936 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
8937 return -TARGET_EFAULT;
8938 }
8939 }
8940 }
8941 return ret;
8942 #endif
8943 #if defined(TARGET_NR_settimeofday)
8944 case TARGET_NR_settimeofday:
8945 {
8946 struct timeval tv, *ptv = NULL;
8947 struct timezone tz, *ptz = NULL;
8948
8949 if (arg1) {
8950 if (copy_from_user_timeval(&tv, arg1)) {
8951 return -TARGET_EFAULT;
8952 }
8953 ptv = &tv;
8954 }
8955
8956 if (arg2) {
8957 if (copy_from_user_timezone(&tz, arg2)) {
8958 return -TARGET_EFAULT;
8959 }
8960 ptz = &tz;
8961 }
8962
8963 return get_errno(settimeofday(ptv, ptz));
8964 }
8965 #endif
8966 #if defined(TARGET_NR_select)
8967 case TARGET_NR_select:
8968 #if defined(TARGET_WANT_NI_OLD_SELECT)
8969 /* some architectures used to have old_select here
8970 * but now ENOSYS it.
8971 */
8972 ret = -TARGET_ENOSYS;
8973 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8974 ret = do_old_select(arg1);
8975 #else
8976 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8977 #endif
8978 return ret;
8979 #endif
8980 #ifdef TARGET_NR_pselect6
8981 case TARGET_NR_pselect6:
8982 {
8983 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8984 fd_set rfds, wfds, efds;
8985 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8986 struct timespec ts, *ts_ptr;
8987
8988 /*
8989 * The 6th arg is actually two args smashed together,
8990 * so we cannot use the C library.
8991 */
8992 sigset_t set;
8993 struct {
8994 sigset_t *set;
8995 size_t size;
8996 } sig, *sig_ptr;
8997
8998 abi_ulong arg_sigset, arg_sigsize, *arg7;
8999 target_sigset_t *target_sigset;
9000
9001 n = arg1;
9002 rfd_addr = arg2;
9003 wfd_addr = arg3;
9004 efd_addr = arg4;
9005 ts_addr = arg5;
9006
9007 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9008 if (ret) {
9009 return ret;
9010 }
9011 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9012 if (ret) {
9013 return ret;
9014 }
9015 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9016 if (ret) {
9017 return ret;
9018 }
9019
9020 /*
9021 * This takes a timespec, and not a timeval, so we cannot
9022 * use the do_select() helper ...
9023 */
9024 if (ts_addr) {
9025 if (target_to_host_timespec(&ts, ts_addr)) {
9026 return -TARGET_EFAULT;
9027 }
9028 ts_ptr = &ts;
9029 } else {
9030 ts_ptr = NULL;
9031 }
9032
9033 /* Extract the two packed args for the sigset */
9034 if (arg6) {
9035 sig_ptr = &sig;
9036 sig.size = SIGSET_T_SIZE;
9037
9038 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9039 if (!arg7) {
9040 return -TARGET_EFAULT;
9041 }
9042 arg_sigset = tswapal(arg7[0]);
9043 arg_sigsize = tswapal(arg7[1]);
9044 unlock_user(arg7, arg6, 0);
9045
9046 if (arg_sigset) {
9047 sig.set = &set;
9048 if (arg_sigsize != sizeof(*target_sigset)) {
9049 /* Like the kernel, we enforce correct size sigsets */
9050 return -TARGET_EINVAL;
9051 }
9052 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9053 sizeof(*target_sigset), 1);
9054 if (!target_sigset) {
9055 return -TARGET_EFAULT;
9056 }
9057 target_to_host_sigset(&set, target_sigset);
9058 unlock_user(target_sigset, arg_sigset, 0);
9059 } else {
9060 sig.set = NULL;
9061 }
9062 } else {
9063 sig_ptr = NULL;
9064 }
9065
9066 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9067 ts_ptr, sig_ptr));
9068
9069 if (!is_error(ret)) {
9070 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9071 return -TARGET_EFAULT;
9072 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9073 return -TARGET_EFAULT;
9074 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9075 return -TARGET_EFAULT;
9076
9077 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9078 return -TARGET_EFAULT;
9079 }
9080 }
9081 return ret;
9082 #endif
9083 #ifdef TARGET_NR_symlink
9084 case TARGET_NR_symlink:
9085 {
9086 void *p2;
9087 p = lock_user_string(arg1);
9088 p2 = lock_user_string(arg2);
9089 if (!p || !p2)
9090 ret = -TARGET_EFAULT;
9091 else
9092 ret = get_errno(symlink(p, p2));
9093 unlock_user(p2, arg2, 0);
9094 unlock_user(p, arg1, 0);
9095 }
9096 return ret;
9097 #endif
9098 #if defined(TARGET_NR_symlinkat)
9099 case TARGET_NR_symlinkat:
9100 {
9101 void *p2;
9102 p = lock_user_string(arg1);
9103 p2 = lock_user_string(arg3);
9104 if (!p || !p2)
9105 ret = -TARGET_EFAULT;
9106 else
9107 ret = get_errno(symlinkat(p, arg2, p2));
9108 unlock_user(p2, arg3, 0);
9109 unlock_user(p, arg1, 0);
9110 }
9111 return ret;
9112 #endif
9113 #ifdef TARGET_NR_readlink
9114 case TARGET_NR_readlink:
9115 {
9116 void *p2;
9117 p = lock_user_string(arg1);
9118 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9119 if (!p || !p2) {
9120 ret = -TARGET_EFAULT;
9121 } else if (!arg3) {
9122 /* Short circuit this for the magic exe check. */
9123 ret = -TARGET_EINVAL;
9124 } else if (is_proc_myself((const char *)p, "exe")) {
9125 char real[PATH_MAX], *temp;
9126 temp = realpath(exec_path, real);
9127 /* Return value is # of bytes that we wrote to the buffer. */
9128 if (temp == NULL) {
9129 ret = get_errno(-1);
9130 } else {
9131 /* Don't worry about sign mismatch as earlier mapping
9132 * logic would have thrown a bad address error. */
9133 ret = MIN(strlen(real), arg3);
9134 /* We cannot NUL terminate the string. */
9135 memcpy(p2, real, ret);
9136 }
9137 } else {
9138 ret = get_errno(readlink(path(p), p2, arg3));
9139 }
9140 unlock_user(p2, arg2, ret);
9141 unlock_user(p, arg1, 0);
9142 }
9143 return ret;
9144 #endif
9145 #if defined(TARGET_NR_readlinkat)
9146 case TARGET_NR_readlinkat:
9147 {
9148 void *p2;
9149 p = lock_user_string(arg2);
9150 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9151 if (!p || !p2) {
9152 ret = -TARGET_EFAULT;
9153 } else if (is_proc_myself((const char *)p, "exe")) {
9154 char real[PATH_MAX], *temp;
9155 temp = realpath(exec_path, real);
9156 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9157 snprintf((char *)p2, arg4, "%s", real);
9158 } else {
9159 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9160 }
9161 unlock_user(p2, arg3, ret);
9162 unlock_user(p, arg2, 0);
9163 }
9164 return ret;
9165 #endif
9166 #ifdef TARGET_NR_swapon
9167 case TARGET_NR_swapon:
9168 if (!(p = lock_user_string(arg1)))
9169 return -TARGET_EFAULT;
9170 ret = get_errno(swapon(p, arg2));
9171 unlock_user(p, arg1, 0);
9172 return ret;
9173 #endif
9174 case TARGET_NR_reboot:
9175 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9176 /* arg4 must be ignored in all other cases */
9177 p = lock_user_string(arg4);
9178 if (!p) {
9179 return -TARGET_EFAULT;
9180 }
9181 ret = get_errno(reboot(arg1, arg2, arg3, p));
9182 unlock_user(p, arg4, 0);
9183 } else {
9184 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9185 }
9186 return ret;
9187 #ifdef TARGET_NR_mmap
9188 case TARGET_NR_mmap:
9189 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9190 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9191 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9192 || defined(TARGET_S390X)
9193 {
9194 abi_ulong *v;
9195 abi_ulong v1, v2, v3, v4, v5, v6;
9196 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9197 return -TARGET_EFAULT;
9198 v1 = tswapal(v[0]);
9199 v2 = tswapal(v[1]);
9200 v3 = tswapal(v[2]);
9201 v4 = tswapal(v[3]);
9202 v5 = tswapal(v[4]);
9203 v6 = tswapal(v[5]);
9204 unlock_user(v, arg1, 0);
9205 ret = get_errno(target_mmap(v1, v2, v3,
9206 target_to_host_bitmask(v4, mmap_flags_tbl),
9207 v5, v6));
9208 }
9209 #else
9210 ret = get_errno(target_mmap(arg1, arg2, arg3,
9211 target_to_host_bitmask(arg4, mmap_flags_tbl),
9212 arg5,
9213 arg6));
9214 #endif
9215 return ret;
9216 #endif
9217 #ifdef TARGET_NR_mmap2
9218 case TARGET_NR_mmap2:
9219 #ifndef MMAP_SHIFT
9220 #define MMAP_SHIFT 12
9221 #endif
9222 ret = target_mmap(arg1, arg2, arg3,
9223 target_to_host_bitmask(arg4, mmap_flags_tbl),
9224 arg5, arg6 << MMAP_SHIFT);
9225 return get_errno(ret);
9226 #endif
9227 case TARGET_NR_munmap:
9228 return get_errno(target_munmap(arg1, arg2));
9229 case TARGET_NR_mprotect:
9230 {
9231 TaskState *ts = cpu->opaque;
9232 /* Special hack to detect libc making the stack executable. */
9233 if ((arg3 & PROT_GROWSDOWN)
9234 && arg1 >= ts->info->stack_limit
9235 && arg1 <= ts->info->start_stack) {
9236 arg3 &= ~PROT_GROWSDOWN;
9237 arg2 = arg2 + arg1 - ts->info->stack_limit;
9238 arg1 = ts->info->stack_limit;
9239 }
9240 }
9241 return get_errno(target_mprotect(arg1, arg2, arg3));
9242 #ifdef TARGET_NR_mremap
9243 case TARGET_NR_mremap:
9244 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9245 #endif
9246 /* ??? msync/mlock/munlock are broken for softmmu. */
9247 #ifdef TARGET_NR_msync
9248 case TARGET_NR_msync:
9249 return get_errno(msync(g2h(arg1), arg2, arg3));
9250 #endif
9251 #ifdef TARGET_NR_mlock
9252 case TARGET_NR_mlock:
9253 return get_errno(mlock(g2h(arg1), arg2));
9254 #endif
9255 #ifdef TARGET_NR_munlock
9256 case TARGET_NR_munlock:
9257 return get_errno(munlock(g2h(arg1), arg2));
9258 #endif
9259 #ifdef TARGET_NR_mlockall
9260 case TARGET_NR_mlockall:
9261 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9262 #endif
9263 #ifdef TARGET_NR_munlockall
9264 case TARGET_NR_munlockall:
9265 return get_errno(munlockall());
9266 #endif
9267 #ifdef TARGET_NR_truncate
9268 case TARGET_NR_truncate:
9269 if (!(p = lock_user_string(arg1)))
9270 return -TARGET_EFAULT;
9271 ret = get_errno(truncate(p, arg2));
9272 unlock_user(p, arg1, 0);
9273 return ret;
9274 #endif
9275 #ifdef TARGET_NR_ftruncate
9276 case TARGET_NR_ftruncate:
9277 return get_errno(ftruncate(arg1, arg2));
9278 #endif
9279 case TARGET_NR_fchmod:
9280 return get_errno(fchmod(arg1, arg2));
9281 #if defined(TARGET_NR_fchmodat)
9282 case TARGET_NR_fchmodat:
9283 if (!(p = lock_user_string(arg2)))
9284 return -TARGET_EFAULT;
9285 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9286 unlock_user(p, arg2, 0);
9287 return ret;
9288 #endif
9289 case TARGET_NR_getpriority:
9290 /* Note that negative values are valid for getpriority, so we must
9291 differentiate based on errno settings. */
9292 errno = 0;
9293 ret = getpriority(arg1, arg2);
9294 if (ret == -1 && errno != 0) {
9295 return -host_to_target_errno(errno);
9296 }
9297 #ifdef TARGET_ALPHA
9298 /* Return value is the unbiased priority. Signal no error. */
9299 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9300 #else
9301 /* Return value is a biased priority to avoid negative numbers. */
9302 ret = 20 - ret;
9303 #endif
9304 return ret;
9305 case TARGET_NR_setpriority:
9306 return get_errno(setpriority(arg1, arg2, arg3));
9307 #ifdef TARGET_NR_statfs
9308 case TARGET_NR_statfs:
9309 if (!(p = lock_user_string(arg1))) {
9310 return -TARGET_EFAULT;
9311 }
9312 ret = get_errno(statfs(path(p), &stfs));
9313 unlock_user(p, arg1, 0);
9314 convert_statfs:
9315 if (!is_error(ret)) {
9316 struct target_statfs *target_stfs;
9317
9318 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9319 return -TARGET_EFAULT;
9320 __put_user(stfs.f_type, &target_stfs->f_type);
9321 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9322 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9323 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9324 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9325 __put_user(stfs.f_files, &target_stfs->f_files);
9326 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9327 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9328 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9329 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9330 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9331 #ifdef _STATFS_F_FLAGS
9332 __put_user(stfs.f_flags, &target_stfs->f_flags);
9333 #else
9334 __put_user(0, &target_stfs->f_flags);
9335 #endif
9336 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9337 unlock_user_struct(target_stfs, arg2, 1);
9338 }
9339 return ret;
9340 #endif
9341 #ifdef TARGET_NR_fstatfs
9342 case TARGET_NR_fstatfs:
9343 ret = get_errno(fstatfs(arg1, &stfs));
9344 goto convert_statfs;
9345 #endif
9346 #ifdef TARGET_NR_statfs64
9347 case TARGET_NR_statfs64:
9348 if (!(p = lock_user_string(arg1))) {
9349 return -TARGET_EFAULT;
9350 }
9351 ret = get_errno(statfs(path(p), &stfs));
9352 unlock_user(p, arg1, 0);
9353 convert_statfs64:
9354 if (!is_error(ret)) {
9355 struct target_statfs64 *target_stfs;
9356
9357 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9358 return -TARGET_EFAULT;
9359 __put_user(stfs.f_type, &target_stfs->f_type);
9360 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9361 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9362 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9363 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9364 __put_user(stfs.f_files, &target_stfs->f_files);
9365 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9366 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9367 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9368 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9369 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9370 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9371 unlock_user_struct(target_stfs, arg3, 1);
9372 }
9373 return ret;
9374 case TARGET_NR_fstatfs64:
9375 ret = get_errno(fstatfs(arg1, &stfs));
9376 goto convert_statfs64;
9377 #endif
9378 #ifdef TARGET_NR_socketcall
9379 case TARGET_NR_socketcall:
9380 return do_socketcall(arg1, arg2);
9381 #endif
9382 #ifdef TARGET_NR_accept
9383 case TARGET_NR_accept:
9384 return do_accept4(arg1, arg2, arg3, 0);
9385 #endif
9386 #ifdef TARGET_NR_accept4
9387 case TARGET_NR_accept4:
9388 return do_accept4(arg1, arg2, arg3, arg4);
9389 #endif
9390 #ifdef TARGET_NR_bind
9391 case TARGET_NR_bind:
9392 return do_bind(arg1, arg2, arg3);
9393 #endif
9394 #ifdef TARGET_NR_connect
9395 case TARGET_NR_connect:
9396 return do_connect(arg1, arg2, arg3);
9397 #endif
9398 #ifdef TARGET_NR_getpeername
9399 case TARGET_NR_getpeername:
9400 return do_getpeername(arg1, arg2, arg3);
9401 #endif
9402 #ifdef TARGET_NR_getsockname
9403 case TARGET_NR_getsockname:
9404 return do_getsockname(arg1, arg2, arg3);
9405 #endif
9406 #ifdef TARGET_NR_getsockopt
9407 case TARGET_NR_getsockopt:
9408 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9409 #endif
9410 #ifdef TARGET_NR_listen
9411 case TARGET_NR_listen:
9412 return get_errno(listen(arg1, arg2));
9413 #endif
9414 #ifdef TARGET_NR_recv
9415 case TARGET_NR_recv:
9416 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9417 #endif
9418 #ifdef TARGET_NR_recvfrom
9419 case TARGET_NR_recvfrom:
9420 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9421 #endif
9422 #ifdef TARGET_NR_recvmsg
9423 case TARGET_NR_recvmsg:
9424 return do_sendrecvmsg(arg1, arg2, arg3, 0);
9425 #endif
9426 #ifdef TARGET_NR_send
9427 case TARGET_NR_send:
9428 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9429 #endif
9430 #ifdef TARGET_NR_sendmsg
9431 case TARGET_NR_sendmsg:
9432 return do_sendrecvmsg(arg1, arg2, arg3, 1);
9433 #endif
9434 #ifdef TARGET_NR_sendmmsg
9435 case TARGET_NR_sendmmsg:
9436 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9437 #endif
9438 #ifdef TARGET_NR_recvmmsg
9439 case TARGET_NR_recvmmsg:
9440 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9441 #endif
9442 #ifdef TARGET_NR_sendto
9443 case TARGET_NR_sendto:
9444 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9445 #endif
9446 #ifdef TARGET_NR_shutdown
9447 case TARGET_NR_shutdown:
9448 return get_errno(shutdown(arg1, arg2));
9449 #endif
9450 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9451 case TARGET_NR_getrandom:
9452 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9453 if (!p) {
9454 return -TARGET_EFAULT;
9455 }
9456 ret = get_errno(getrandom(p, arg2, arg3));
9457 unlock_user(p, arg1, ret);
9458 return ret;
9459 #endif
9460 #ifdef TARGET_NR_socket
9461 case TARGET_NR_socket:
9462 return do_socket(arg1, arg2, arg3);
9463 #endif
9464 #ifdef TARGET_NR_socketpair
9465 case TARGET_NR_socketpair:
9466 return do_socketpair(arg1, arg2, arg3, arg4);
9467 #endif
9468 #ifdef TARGET_NR_setsockopt
9469 case TARGET_NR_setsockopt:
9470 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9471 #endif
9472 #if defined(TARGET_NR_syslog)
9473 case TARGET_NR_syslog:
9474 {
9475 int len = arg2;
9476
9477 switch (arg1) {
9478 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9479 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9480 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9481 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9482 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9483 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9484 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9485 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9486 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9487 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9488 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9489 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9490 {
9491 if (len < 0) {
9492 return -TARGET_EINVAL;
9493 }
9494 if (len == 0) {
9495 return 0;
9496 }
9497 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9498 if (!p) {
9499 return -TARGET_EFAULT;
9500 }
9501 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9502 unlock_user(p, arg2, arg3);
9503 }
9504 return ret;
9505 default:
9506 return -TARGET_EINVAL;
9507 }
9508 }
9509 break;
9510 #endif
9511 case TARGET_NR_setitimer:
9512 {
9513 struct itimerval value, ovalue, *pvalue;
9514
9515 if (arg2) {
9516 pvalue = &value;
9517 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9518 || copy_from_user_timeval(&pvalue->it_value,
9519 arg2 + sizeof(struct target_timeval)))
9520 return -TARGET_EFAULT;
9521 } else {
9522 pvalue = NULL;
9523 }
9524 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9525 if (!is_error(ret) && arg3) {
9526 if (copy_to_user_timeval(arg3,
9527 &ovalue.it_interval)
9528 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9529 &ovalue.it_value))
9530 return -TARGET_EFAULT;
9531 }
9532 }
9533 return ret;
9534 case TARGET_NR_getitimer:
9535 {
9536 struct itimerval value;
9537
9538 ret = get_errno(getitimer(arg1, &value));
9539 if (!is_error(ret) && arg2) {
9540 if (copy_to_user_timeval(arg2,
9541 &value.it_interval)
9542 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9543 &value.it_value))
9544 return -TARGET_EFAULT;
9545 }
9546 }
9547 return ret;
9548 #ifdef TARGET_NR_stat
9549 case TARGET_NR_stat:
9550 if (!(p = lock_user_string(arg1))) {
9551 return -TARGET_EFAULT;
9552 }
9553 ret = get_errno(stat(path(p), &st));
9554 unlock_user(p, arg1, 0);
9555 goto do_stat;
9556 #endif
9557 #ifdef TARGET_NR_lstat
9558 case TARGET_NR_lstat:
9559 if (!(p = lock_user_string(arg1))) {
9560 return -TARGET_EFAULT;
9561 }
9562 ret = get_errno(lstat(path(p), &st));
9563 unlock_user(p, arg1, 0);
9564 goto do_stat;
9565 #endif
9566 #ifdef TARGET_NR_fstat
9567 case TARGET_NR_fstat:
9568 {
9569 ret = get_errno(fstat(arg1, &st));
9570 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9571 do_stat:
9572 #endif
9573 if (!is_error(ret)) {
9574 struct target_stat *target_st;
9575
9576 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9577 return -TARGET_EFAULT;
9578 memset(target_st, 0, sizeof(*target_st));
9579 __put_user(st.st_dev, &target_st->st_dev);
9580 __put_user(st.st_ino, &target_st->st_ino);
9581 __put_user(st.st_mode, &target_st->st_mode);
9582 __put_user(st.st_uid, &target_st->st_uid);
9583 __put_user(st.st_gid, &target_st->st_gid);
9584 __put_user(st.st_nlink, &target_st->st_nlink);
9585 __put_user(st.st_rdev, &target_st->st_rdev);
9586 __put_user(st.st_size, &target_st->st_size);
9587 __put_user(st.st_blksize, &target_st->st_blksize);
9588 __put_user(st.st_blocks, &target_st->st_blocks);
9589 __put_user(st.st_atime, &target_st->target_st_atime);
9590 __put_user(st.st_mtime, &target_st->target_st_mtime);
9591 __put_user(st.st_ctime, &target_st->target_st_ctime);
9592 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9593 defined(TARGET_STAT_HAVE_NSEC)
9594 __put_user(st.st_atim.tv_nsec,
9595 &target_st->target_st_atime_nsec);
9596 __put_user(st.st_mtim.tv_nsec,
9597 &target_st->target_st_mtime_nsec);
9598 __put_user(st.st_ctim.tv_nsec,
9599 &target_st->target_st_ctime_nsec);
9600 #endif
9601 unlock_user_struct(target_st, arg2, 1);
9602 }
9603 }
9604 return ret;
9605 #endif
9606 case TARGET_NR_vhangup:
9607 return get_errno(vhangup());
9608 #ifdef TARGET_NR_syscall
9609 case TARGET_NR_syscall:
9610 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9611 arg6, arg7, arg8, 0);
9612 #endif
9613 #if defined(TARGET_NR_wait4)
9614 case TARGET_NR_wait4:
9615 {
9616 int status;
9617 abi_long status_ptr = arg2;
9618 struct rusage rusage, *rusage_ptr;
9619 abi_ulong target_rusage = arg4;
9620 abi_long rusage_err;
9621 if (target_rusage)
9622 rusage_ptr = &rusage;
9623 else
9624 rusage_ptr = NULL;
9625 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9626 if (!is_error(ret)) {
9627 if (status_ptr && ret) {
9628 status = host_to_target_waitstatus(status);
9629 if (put_user_s32(status, status_ptr))
9630 return -TARGET_EFAULT;
9631 }
9632 if (target_rusage) {
9633 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9634 if (rusage_err) {
9635 ret = rusage_err;
9636 }
9637 }
9638 }
9639 }
9640 return ret;
9641 #endif
9642 #ifdef TARGET_NR_swapoff
9643 case TARGET_NR_swapoff:
9644 if (!(p = lock_user_string(arg1)))
9645 return -TARGET_EFAULT;
9646 ret = get_errno(swapoff(p));
9647 unlock_user(p, arg1, 0);
9648 return ret;
9649 #endif
9650 case TARGET_NR_sysinfo:
9651 {
9652 struct target_sysinfo *target_value;
9653 struct sysinfo value;
9654 ret = get_errno(sysinfo(&value));
9655 if (!is_error(ret) && arg1)
9656 {
9657 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9658 return -TARGET_EFAULT;
9659 __put_user(value.uptime, &target_value->uptime);
9660 __put_user(value.loads[0], &target_value->loads[0]);
9661 __put_user(value.loads[1], &target_value->loads[1]);
9662 __put_user(value.loads[2], &target_value->loads[2]);
9663 __put_user(value.totalram, &target_value->totalram);
9664 __put_user(value.freeram, &target_value->freeram);
9665 __put_user(value.sharedram, &target_value->sharedram);
9666 __put_user(value.bufferram, &target_value->bufferram);
9667 __put_user(value.totalswap, &target_value->totalswap);
9668 __put_user(value.freeswap, &target_value->freeswap);
9669 __put_user(value.procs, &target_value->procs);
9670 __put_user(value.totalhigh, &target_value->totalhigh);
9671 __put_user(value.freehigh, &target_value->freehigh);
9672 __put_user(value.mem_unit, &target_value->mem_unit);
9673 unlock_user_struct(target_value, arg1, 1);
9674 }
9675 }
9676 return ret;
9677 #ifdef TARGET_NR_ipc
9678 case TARGET_NR_ipc:
9679 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9680 #endif
9681 #ifdef TARGET_NR_semget
9682 case TARGET_NR_semget:
9683 return get_errno(semget(arg1, arg2, arg3));
9684 #endif
9685 #ifdef TARGET_NR_semop
9686 case TARGET_NR_semop:
9687 return do_semop(arg1, arg2, arg3);
9688 #endif
9689 #ifdef TARGET_NR_semctl
9690 case TARGET_NR_semctl:
9691 return do_semctl(arg1, arg2, arg3, arg4);
9692 #endif
9693 #ifdef TARGET_NR_msgctl
9694 case TARGET_NR_msgctl:
9695 return do_msgctl(arg1, arg2, arg3);
9696 #endif
9697 #ifdef TARGET_NR_msgget
9698 case TARGET_NR_msgget:
9699 return get_errno(msgget(arg1, arg2));
9700 #endif
9701 #ifdef TARGET_NR_msgrcv
9702 case TARGET_NR_msgrcv:
9703 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9704 #endif
9705 #ifdef TARGET_NR_msgsnd
9706 case TARGET_NR_msgsnd:
9707 return do_msgsnd(arg1, arg2, arg3, arg4);
9708 #endif
9709 #ifdef TARGET_NR_shmget
9710 case TARGET_NR_shmget:
9711 return get_errno(shmget(arg1, arg2, arg3));
9712 #endif
9713 #ifdef TARGET_NR_shmctl
9714 case TARGET_NR_shmctl:
9715 return do_shmctl(arg1, arg2, arg3);
9716 #endif
9717 #ifdef TARGET_NR_shmat
9718 case TARGET_NR_shmat:
9719 return do_shmat(cpu_env, arg1, arg2, arg3);
9720 #endif
9721 #ifdef TARGET_NR_shmdt
9722 case TARGET_NR_shmdt:
9723 return do_shmdt(arg1);
9724 #endif
9725 case TARGET_NR_fsync:
9726 return get_errno(fsync(arg1));
9727 case TARGET_NR_clone:
9728 /* Linux manages to have three different orderings for its
9729 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9730 * match the kernel's CONFIG_CLONE_* settings.
9731 * Microblaze is further special in that it uses a sixth
9732 * implicit argument to clone for the TLS pointer.
9733 */
9734 #if defined(TARGET_MICROBLAZE)
9735 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9736 #elif defined(TARGET_CLONE_BACKWARDS)
9737 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9738 #elif defined(TARGET_CLONE_BACKWARDS2)
9739 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9740 #else
9741 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9742 #endif
9743 return ret;
9744 #ifdef __NR_exit_group
9745 /* new thread calls */
9746 case TARGET_NR_exit_group:
9747 preexit_cleanup(cpu_env, arg1);
9748 return get_errno(exit_group(arg1));
9749 #endif
9750 case TARGET_NR_setdomainname:
9751 if (!(p = lock_user_string(arg1)))
9752 return -TARGET_EFAULT;
9753 ret = get_errno(setdomainname(p, arg2));
9754 unlock_user(p, arg1, 0);
9755 return ret;
9756 case TARGET_NR_uname:
9757 /* no need to transcode because we use the linux syscall */
9758 {
9759 struct new_utsname * buf;
9760
9761 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9762 return -TARGET_EFAULT;
9763 ret = get_errno(sys_uname(buf));
9764 if (!is_error(ret)) {
9765 /* Overwrite the native machine name with whatever is being
9766 emulated. */
9767 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9768 sizeof(buf->machine));
9769 /* Allow the user to override the reported release. */
9770 if (qemu_uname_release && *qemu_uname_release) {
9771 g_strlcpy(buf->release, qemu_uname_release,
9772 sizeof(buf->release));
9773 }
9774 }
9775 unlock_user_struct(buf, arg1, 1);
9776 }
9777 return ret;
9778 #ifdef TARGET_I386
9779 case TARGET_NR_modify_ldt:
9780 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9781 #if !defined(TARGET_X86_64)
9782 case TARGET_NR_vm86:
9783 return do_vm86(cpu_env, arg1, arg2);
9784 #endif
9785 #endif
9786 #if defined(TARGET_NR_adjtimex)
9787 case TARGET_NR_adjtimex:
9788 {
9789 struct timex host_buf;
9790
9791 if (target_to_host_timex(&host_buf, arg1) != 0) {
9792 return -TARGET_EFAULT;
9793 }
9794 ret = get_errno(adjtimex(&host_buf));
9795 if (!is_error(ret)) {
9796 if (host_to_target_timex(arg1, &host_buf) != 0) {
9797 return -TARGET_EFAULT;
9798 }
9799 }
9800 }
9801 return ret;
9802 #endif
9803 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9804 case TARGET_NR_clock_adjtime:
9805 {
9806 struct timex htx, *phtx = &htx;
9807
9808 if (target_to_host_timex(phtx, arg2) != 0) {
9809 return -TARGET_EFAULT;
9810 }
9811 ret = get_errno(clock_adjtime(arg1, phtx));
9812 if (!is_error(ret) && phtx) {
9813 if (host_to_target_timex(arg2, phtx) != 0) {
9814 return -TARGET_EFAULT;
9815 }
9816 }
9817 }
9818 return ret;
9819 #endif
9820 case TARGET_NR_getpgid:
9821 return get_errno(getpgid(arg1));
9822 case TARGET_NR_fchdir:
9823 return get_errno(fchdir(arg1));
9824 case TARGET_NR_personality:
9825 return get_errno(personality(arg1));
9826 #ifdef TARGET_NR__llseek /* Not on alpha */
9827 case TARGET_NR__llseek:
9828 {
9829 int64_t res;
9830 #if !defined(__NR_llseek)
9831 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9832 if (res == -1) {
9833 ret = get_errno(res);
9834 } else {
9835 ret = 0;
9836 }
9837 #else
9838 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9839 #endif
9840 if ((ret == 0) && put_user_s64(res, arg4)) {
9841 return -TARGET_EFAULT;
9842 }
9843 }
9844 return ret;
9845 #endif
9846 #ifdef TARGET_NR_getdents
9847 case TARGET_NR_getdents:
9848 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9849 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9850 {
9851 struct target_dirent *target_dirp;
9852 struct linux_dirent *dirp;
9853 abi_long count = arg3;
9854
9855 dirp = g_try_malloc(count);
9856 if (!dirp) {
9857 return -TARGET_ENOMEM;
9858 }
9859
9860 ret = get_errno(sys_getdents(arg1, dirp, count));
9861 if (!is_error(ret)) {
9862 struct linux_dirent *de;
9863 struct target_dirent *tde;
9864 int len = ret;
9865 int reclen, treclen;
9866 int count1, tnamelen;
9867
9868 count1 = 0;
9869 de = dirp;
9870 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9871 return -TARGET_EFAULT;
9872 tde = target_dirp;
9873 while (len > 0) {
9874 reclen = de->d_reclen;
9875 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9876 assert(tnamelen >= 0);
9877 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9878 assert(count1 + treclen <= count);
9879 tde->d_reclen = tswap16(treclen);
9880 tde->d_ino = tswapal(de->d_ino);
9881 tde->d_off = tswapal(de->d_off);
9882 memcpy(tde->d_name, de->d_name, tnamelen);
9883 de = (struct linux_dirent *)((char *)de + reclen);
9884 len -= reclen;
9885 tde = (struct target_dirent *)((char *)tde + treclen);
9886 count1 += treclen;
9887 }
9888 ret = count1;
9889 unlock_user(target_dirp, arg2, ret);
9890 }
9891 g_free(dirp);
9892 }
9893 #else
9894 {
9895 struct linux_dirent *dirp;
9896 abi_long count = arg3;
9897
9898 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9899 return -TARGET_EFAULT;
9900 ret = get_errno(sys_getdents(arg1, dirp, count));
9901 if (!is_error(ret)) {
9902 struct linux_dirent *de;
9903 int len = ret;
9904 int reclen;
9905 de = dirp;
9906 while (len > 0) {
9907 reclen = de->d_reclen;
9908 if (reclen > len)
9909 break;
9910 de->d_reclen = tswap16(reclen);
9911 tswapls(&de->d_ino);
9912 tswapls(&de->d_off);
9913 de = (struct linux_dirent *)((char *)de + reclen);
9914 len -= reclen;
9915 }
9916 }
9917 unlock_user(dirp, arg2, ret);
9918 }
9919 #endif
9920 #else
9921 /* Implement getdents in terms of getdents64 */
9922 {
9923 struct linux_dirent64 *dirp;
9924 abi_long count = arg3;
9925
9926 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9927 if (!dirp) {
9928 return -TARGET_EFAULT;
9929 }
9930 ret = get_errno(sys_getdents64(arg1, dirp, count));
9931 if (!is_error(ret)) {
9932 /* Convert the dirent64 structs to target dirent. We do this
9933 * in-place, since we can guarantee that a target_dirent is no
9934 * larger than a dirent64; however this means we have to be
9935 * careful to read everything before writing in the new format.
9936 */
9937 struct linux_dirent64 *de;
9938 struct target_dirent *tde;
9939 int len = ret;
9940 int tlen = 0;
9941
9942 de = dirp;
9943 tde = (struct target_dirent *)dirp;
9944 while (len > 0) {
9945 int namelen, treclen;
9946 int reclen = de->d_reclen;
9947 uint64_t ino = de->d_ino;
9948 int64_t off = de->d_off;
9949 uint8_t type = de->d_type;
9950
9951 namelen = strlen(de->d_name);
9952 treclen = offsetof(struct target_dirent, d_name)
9953 + namelen + 2;
9954 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9955
9956 memmove(tde->d_name, de->d_name, namelen + 1);
9957 tde->d_ino = tswapal(ino);
9958 tde->d_off = tswapal(off);
9959 tde->d_reclen = tswap16(treclen);
9960 /* The target_dirent type is in what was formerly a padding
9961 * byte at the end of the structure:
9962 */
9963 *(((char *)tde) + treclen - 1) = type;
9964
9965 de = (struct linux_dirent64 *)((char *)de + reclen);
9966 tde = (struct target_dirent *)((char *)tde + treclen);
9967 len -= reclen;
9968 tlen += treclen;
9969 }
9970 ret = tlen;
9971 }
9972 unlock_user(dirp, arg2, ret);
9973 }
9974 #endif
9975 return ret;
9976 #endif /* TARGET_NR_getdents */
9977 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9978 case TARGET_NR_getdents64:
9979 {
9980 struct linux_dirent64 *dirp;
9981 abi_long count = arg3;
9982 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9983 return -TARGET_EFAULT;
9984 ret = get_errno(sys_getdents64(arg1, dirp, count));
9985 if (!is_error(ret)) {
9986 struct linux_dirent64 *de;
9987 int len = ret;
9988 int reclen;
9989 de = dirp;
9990 while (len > 0) {
9991 reclen = de->d_reclen;
9992 if (reclen > len)
9993 break;
9994 de->d_reclen = tswap16(reclen);
9995 tswap64s((uint64_t *)&de->d_ino);
9996 tswap64s((uint64_t *)&de->d_off);
9997 de = (struct linux_dirent64 *)((char *)de + reclen);
9998 len -= reclen;
9999 }
10000 }
10001 unlock_user(dirp, arg2, ret);
10002 }
10003 return ret;
10004 #endif /* TARGET_NR_getdents64 */
10005 #if defined(TARGET_NR__newselect)
10006 case TARGET_NR__newselect:
10007 return do_select(arg1, arg2, arg3, arg4, arg5);
10008 #endif
10009 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10010 # ifdef TARGET_NR_poll
10011 case TARGET_NR_poll:
10012 # endif
10013 # ifdef TARGET_NR_ppoll
10014 case TARGET_NR_ppoll:
10015 # endif
10016 {
10017 struct target_pollfd *target_pfd;
10018 unsigned int nfds = arg2;
10019 struct pollfd *pfd;
10020 unsigned int i;
10021
10022 pfd = NULL;
10023 target_pfd = NULL;
10024 if (nfds) {
10025 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10026 return -TARGET_EINVAL;
10027 }
10028
10029 target_pfd = lock_user(VERIFY_WRITE, arg1,
10030 sizeof(struct target_pollfd) * nfds, 1);
10031 if (!target_pfd) {
10032 return -TARGET_EFAULT;
10033 }
10034
10035 pfd = alloca(sizeof(struct pollfd) * nfds);
10036 for (i = 0; i < nfds; i++) {
10037 pfd[i].fd = tswap32(target_pfd[i].fd);
10038 pfd[i].events = tswap16(target_pfd[i].events);
10039 }
10040 }
10041
10042 switch (num) {
10043 # ifdef TARGET_NR_ppoll
10044 case TARGET_NR_ppoll:
10045 {
10046 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10047 target_sigset_t *target_set;
10048 sigset_t _set, *set = &_set;
10049
10050 if (arg3) {
10051 if (target_to_host_timespec(timeout_ts, arg3)) {
10052 unlock_user(target_pfd, arg1, 0);
10053 return -TARGET_EFAULT;
10054 }
10055 } else {
10056 timeout_ts = NULL;
10057 }
10058
10059 if (arg4) {
10060 if (arg5 != sizeof(target_sigset_t)) {
10061 unlock_user(target_pfd, arg1, 0);
10062 return -TARGET_EINVAL;
10063 }
10064
10065 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10066 if (!target_set) {
10067 unlock_user(target_pfd, arg1, 0);
10068 return -TARGET_EFAULT;
10069 }
10070 target_to_host_sigset(set, target_set);
10071 } else {
10072 set = NULL;
10073 }
10074
10075 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10076 set, SIGSET_T_SIZE));
10077
10078 if (!is_error(ret) && arg3) {
10079 host_to_target_timespec(arg3, timeout_ts);
10080 }
10081 if (arg4) {
10082 unlock_user(target_set, arg4, 0);
10083 }
10084 break;
10085 }
10086 # endif
10087 # ifdef TARGET_NR_poll
10088 case TARGET_NR_poll:
10089 {
10090 struct timespec ts, *pts;
10091
10092 if (arg3 >= 0) {
10093 /* Convert ms to secs, ns */
10094 ts.tv_sec = arg3 / 1000;
10095 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10096 pts = &ts;
10097 } else {
10098 /* -ve poll() timeout means "infinite" */
10099 pts = NULL;
10100 }
10101 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10102 break;
10103 }
10104 # endif
10105 default:
10106 g_assert_not_reached();
10107 }
10108
10109 if (!is_error(ret)) {
10110 for(i = 0; i < nfds; i++) {
10111 target_pfd[i].revents = tswap16(pfd[i].revents);
10112 }
10113 }
10114 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10115 }
10116 return ret;
10117 #endif
10118 case TARGET_NR_flock:
10119 /* NOTE: the flock constant seems to be the same for every
10120 Linux platform */
10121 return get_errno(safe_flock(arg1, arg2));
10122 case TARGET_NR_readv:
10123 {
10124 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10125 if (vec != NULL) {
10126 ret = get_errno(safe_readv(arg1, vec, arg3));
10127 unlock_iovec(vec, arg2, arg3, 1);
10128 } else {
10129 ret = -host_to_target_errno(errno);
10130 }
10131 }
10132 return ret;
10133 case TARGET_NR_writev:
10134 {
10135 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10136 if (vec != NULL) {
10137 ret = get_errno(safe_writev(arg1, vec, arg3));
10138 unlock_iovec(vec, arg2, arg3, 0);
10139 } else {
10140 ret = -host_to_target_errno(errno);
10141 }
10142 }
10143 return ret;
10144 #if defined(TARGET_NR_preadv)
10145 case TARGET_NR_preadv:
10146 {
10147 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10148 if (vec != NULL) {
10149 unsigned long low, high;
10150
10151 target_to_host_low_high(arg4, arg5, &low, &high);
10152 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10153 unlock_iovec(vec, arg2, arg3, 1);
10154 } else {
10155 ret = -host_to_target_errno(errno);
10156 }
10157 }
10158 return ret;
10159 #endif
10160 #if defined(TARGET_NR_pwritev)
10161 case TARGET_NR_pwritev:
10162 {
10163 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10164 if (vec != NULL) {
10165 unsigned long low, high;
10166
10167 target_to_host_low_high(arg4, arg5, &low, &high);
10168 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10169 unlock_iovec(vec, arg2, arg3, 0);
10170 } else {
10171 ret = -host_to_target_errno(errno);
10172 }
10173 }
10174 return ret;
10175 #endif
10176 case TARGET_NR_getsid:
10177 return get_errno(getsid(arg1));
10178 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10179 case TARGET_NR_fdatasync:
10180 return get_errno(fdatasync(arg1));
10181 #endif
10182 #ifdef TARGET_NR__sysctl
10183 case TARGET_NR__sysctl:
10184 /* We don't implement this, but ENOTDIR is always a safe
10185 return value. */
10186 return -TARGET_ENOTDIR;
10187 #endif
10188 case TARGET_NR_sched_getaffinity:
10189 {
10190 unsigned int mask_size;
10191 unsigned long *mask;
10192
10193 /*
10194 * sched_getaffinity needs multiples of ulong, so need to take
10195 * care of mismatches between target ulong and host ulong sizes.
10196 */
10197 if (arg2 & (sizeof(abi_ulong) - 1)) {
10198 return -TARGET_EINVAL;
10199 }
10200 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10201
10202 mask = alloca(mask_size);
10203 memset(mask, 0, mask_size);
10204 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10205
10206 if (!is_error(ret)) {
10207 if (ret > arg2) {
10208 /* More data returned than the caller's buffer will fit.
10209 * This only happens if sizeof(abi_long) < sizeof(long)
10210 * and the caller passed us a buffer holding an odd number
10211 * of abi_longs. If the host kernel is actually using the
10212 * extra 4 bytes then fail EINVAL; otherwise we can just
10213 * ignore them and only copy the interesting part.
10214 */
10215 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10216 if (numcpus > arg2 * 8) {
10217 return -TARGET_EINVAL;
10218 }
10219 ret = arg2;
10220 }
10221
10222 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10223 return -TARGET_EFAULT;
10224 }
10225 }
10226 }
10227 return ret;
10228 case TARGET_NR_sched_setaffinity:
10229 {
10230 unsigned int mask_size;
10231 unsigned long *mask;
10232
10233 /*
10234 * sched_setaffinity needs multiples of ulong, so need to take
10235 * care of mismatches between target ulong and host ulong sizes.
10236 */
10237 if (arg2 & (sizeof(abi_ulong) - 1)) {
10238 return -TARGET_EINVAL;
10239 }
10240 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10241 mask = alloca(mask_size);
10242
10243 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10244 if (ret) {
10245 return ret;
10246 }
10247
10248 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10249 }
10250 case TARGET_NR_getcpu:
10251 {
10252 unsigned cpu, node;
10253 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10254 arg2 ? &node : NULL,
10255 NULL));
10256 if (is_error(ret)) {
10257 return ret;
10258 }
10259 if (arg1 && put_user_u32(cpu, arg1)) {
10260 return -TARGET_EFAULT;
10261 }
10262 if (arg2 && put_user_u32(node, arg2)) {
10263 return -TARGET_EFAULT;
10264 }
10265 }
10266 return ret;
10267 case TARGET_NR_sched_setparam:
10268 {
10269 struct sched_param *target_schp;
10270 struct sched_param schp;
10271
10272 if (arg2 == 0) {
10273 return -TARGET_EINVAL;
10274 }
10275 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10276 return -TARGET_EFAULT;
10277 schp.sched_priority = tswap32(target_schp->sched_priority);
10278 unlock_user_struct(target_schp, arg2, 0);
10279 return get_errno(sched_setparam(arg1, &schp));
10280 }
10281 case TARGET_NR_sched_getparam:
10282 {
10283 struct sched_param *target_schp;
10284 struct sched_param schp;
10285
10286 if (arg2 == 0) {
10287 return -TARGET_EINVAL;
10288 }
10289 ret = get_errno(sched_getparam(arg1, &schp));
10290 if (!is_error(ret)) {
10291 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10292 return -TARGET_EFAULT;
10293 target_schp->sched_priority = tswap32(schp.sched_priority);
10294 unlock_user_struct(target_schp, arg2, 1);
10295 }
10296 }
10297 return ret;
10298 case TARGET_NR_sched_setscheduler:
10299 {
10300 struct sched_param *target_schp;
10301 struct sched_param schp;
10302 if (arg3 == 0) {
10303 return -TARGET_EINVAL;
10304 }
10305 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10306 return -TARGET_EFAULT;
10307 schp.sched_priority = tswap32(target_schp->sched_priority);
10308 unlock_user_struct(target_schp, arg3, 0);
10309 return get_errno(sched_setscheduler(arg1, arg2, &schp));
10310 }
10311 case TARGET_NR_sched_getscheduler:
10312 return get_errno(sched_getscheduler(arg1));
10313 case TARGET_NR_sched_yield:
10314 return get_errno(sched_yield());
10315 case TARGET_NR_sched_get_priority_max:
10316 return get_errno(sched_get_priority_max(arg1));
10317 case TARGET_NR_sched_get_priority_min:
10318 return get_errno(sched_get_priority_min(arg1));
10319 #ifdef TARGET_NR_sched_rr_get_interval
10320 case TARGET_NR_sched_rr_get_interval:
10321 {
10322 struct timespec ts;
10323 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10324 if (!is_error(ret)) {
10325 ret = host_to_target_timespec(arg2, &ts);
10326 }
10327 }
10328 return ret;
10329 #endif
10330 #if defined(TARGET_NR_nanosleep)
10331 case TARGET_NR_nanosleep:
10332 {
10333 struct timespec req, rem;
10334 target_to_host_timespec(&req, arg1);
10335 ret = get_errno(safe_nanosleep(&req, &rem));
10336 if (is_error(ret) && arg2) {
10337 host_to_target_timespec(arg2, &rem);
10338 }
10339 }
10340 return ret;
10341 #endif
10342 case TARGET_NR_prctl:
10343 switch (arg1) {
10344 case PR_GET_PDEATHSIG:
10345 {
10346 int deathsig;
10347 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10348 if (!is_error(ret) && arg2
10349 && put_user_ual(deathsig, arg2)) {
10350 return -TARGET_EFAULT;
10351 }
10352 return ret;
10353 }
10354 #ifdef PR_GET_NAME
10355 case PR_GET_NAME:
10356 {
10357 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10358 if (!name) {
10359 return -TARGET_EFAULT;
10360 }
10361 ret = get_errno(prctl(arg1, (unsigned long)name,
10362 arg3, arg4, arg5));
10363 unlock_user(name, arg2, 16);
10364 return ret;
10365 }
10366 case PR_SET_NAME:
10367 {
10368 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10369 if (!name) {
10370 return -TARGET_EFAULT;
10371 }
10372 ret = get_errno(prctl(arg1, (unsigned long)name,
10373 arg3, arg4, arg5));
10374 unlock_user(name, arg2, 0);
10375 return ret;
10376 }
10377 #endif
10378 #ifdef TARGET_MIPS
10379 case TARGET_PR_GET_FP_MODE:
10380 {
10381 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10382 ret = 0;
10383 if (env->CP0_Status & (1 << CP0St_FR)) {
10384 ret |= TARGET_PR_FP_MODE_FR;
10385 }
10386 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10387 ret |= TARGET_PR_FP_MODE_FRE;
10388 }
10389 return ret;
10390 }
10391 case TARGET_PR_SET_FP_MODE:
10392 {
10393 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10394 bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10395 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10396 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10397 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10398
10399 const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10400 TARGET_PR_FP_MODE_FRE;
10401
10402 /* If nothing to change, return right away, successfully. */
10403 if (old_fr == new_fr && old_fre == new_fre) {
10404 return 0;
10405 }
10406 /* Check the value is valid */
10407 if (arg2 & ~known_bits) {
10408 return -TARGET_EOPNOTSUPP;
10409 }
10410 /* Setting FRE without FR is not supported. */
10411 if (new_fre && !new_fr) {
10412 return -TARGET_EOPNOTSUPP;
10413 }
10414 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10415 /* FR1 is not supported */
10416 return -TARGET_EOPNOTSUPP;
10417 }
10418 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10419 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10420 /* cannot set FR=0 */
10421 return -TARGET_EOPNOTSUPP;
10422 }
10423 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10424 /* Cannot set FRE=1 */
10425 return -TARGET_EOPNOTSUPP;
10426 }
10427
10428 int i;
10429 fpr_t *fpr = env->active_fpu.fpr;
10430 for (i = 0; i < 32 ; i += 2) {
10431 if (!old_fr && new_fr) {
10432 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10433 } else if (old_fr && !new_fr) {
10434 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10435 }
10436 }
10437
10438 if (new_fr) {
10439 env->CP0_Status |= (1 << CP0St_FR);
10440 env->hflags |= MIPS_HFLAG_F64;
10441 } else {
10442 env->CP0_Status &= ~(1 << CP0St_FR);
10443 env->hflags &= ~MIPS_HFLAG_F64;
10444 }
10445 if (new_fre) {
10446 env->CP0_Config5 |= (1 << CP0C5_FRE);
10447 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10448 env->hflags |= MIPS_HFLAG_FRE;
10449 }
10450 } else {
10451 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10452 env->hflags &= ~MIPS_HFLAG_FRE;
10453 }
10454
10455 return 0;
10456 }
10457 #endif /* MIPS */
10458 #ifdef TARGET_AARCH64
10459 case TARGET_PR_SVE_SET_VL:
10460 /*
10461 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10462 * PR_SVE_VL_INHERIT. Note the kernel definition
10463 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10464 * even though the current architectural maximum is VQ=16.
10465 */
10466 ret = -TARGET_EINVAL;
10467 if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10468 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10469 CPUARMState *env = cpu_env;
10470 ARMCPU *cpu = env_archcpu(env);
10471 uint32_t vq, old_vq;
10472
10473 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10474 vq = MAX(arg2 / 16, 1);
10475 vq = MIN(vq, cpu->sve_max_vq);
10476
10477 if (vq < old_vq) {
10478 aarch64_sve_narrow_vq(env, vq);
10479 }
10480 env->vfp.zcr_el[1] = vq - 1;
10481 arm_rebuild_hflags(env);
10482 ret = vq * 16;
10483 }
10484 return ret;
10485 case TARGET_PR_SVE_GET_VL:
10486 ret = -TARGET_EINVAL;
10487 {
10488 ARMCPU *cpu = env_archcpu(cpu_env);
10489 if (cpu_isar_feature(aa64_sve, cpu)) {
10490 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10491 }
10492 }
10493 return ret;
10494 case TARGET_PR_PAC_RESET_KEYS:
10495 {
10496 CPUARMState *env = cpu_env;
10497 ARMCPU *cpu = env_archcpu(env);
10498
10499 if (arg3 || arg4 || arg5) {
10500 return -TARGET_EINVAL;
10501 }
10502 if (cpu_isar_feature(aa64_pauth, cpu)) {
10503 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10504 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10505 TARGET_PR_PAC_APGAKEY);
10506 int ret = 0;
10507 Error *err = NULL;
10508
10509 if (arg2 == 0) {
10510 arg2 = all;
10511 } else if (arg2 & ~all) {
10512 return -TARGET_EINVAL;
10513 }
10514 if (arg2 & TARGET_PR_PAC_APIAKEY) {
10515 ret |= qemu_guest_getrandom(&env->keys.apia,
10516 sizeof(ARMPACKey), &err);
10517 }
10518 if (arg2 & TARGET_PR_PAC_APIBKEY) {
10519 ret |= qemu_guest_getrandom(&env->keys.apib,
10520 sizeof(ARMPACKey), &err);
10521 }
10522 if (arg2 & TARGET_PR_PAC_APDAKEY) {
10523 ret |= qemu_guest_getrandom(&env->keys.apda,
10524 sizeof(ARMPACKey), &err);
10525 }
10526 if (arg2 & TARGET_PR_PAC_APDBKEY) {
10527 ret |= qemu_guest_getrandom(&env->keys.apdb,
10528 sizeof(ARMPACKey), &err);
10529 }
10530 if (arg2 & TARGET_PR_PAC_APGAKEY) {
10531 ret |= qemu_guest_getrandom(&env->keys.apga,
10532 sizeof(ARMPACKey), &err);
10533 }
10534 if (ret != 0) {
10535 /*
10536 * Some unknown failure in the crypto. The best
10537 * we can do is log it and fail the syscall.
10538 * The real syscall cannot fail this way.
10539 */
10540 qemu_log_mask(LOG_UNIMP,
10541 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10542 error_get_pretty(err));
10543 error_free(err);
10544 return -TARGET_EIO;
10545 }
10546 return 0;
10547 }
10548 }
10549 return -TARGET_EINVAL;
10550 #endif /* AARCH64 */
10551 case PR_GET_SECCOMP:
10552 case PR_SET_SECCOMP:
10553 /* Disable seccomp to prevent the target disabling syscalls we
10554 * need. */
10555 return -TARGET_EINVAL;
10556 default:
10557 /* Most prctl options have no pointer arguments */
10558 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10559 }
10560 break;
10561 #ifdef TARGET_NR_arch_prctl
10562 case TARGET_NR_arch_prctl:
10563 return do_arch_prctl(cpu_env, arg1, arg2);
10564 #endif
10565 #ifdef TARGET_NR_pread64
10566 case TARGET_NR_pread64:
10567 if (regpairs_aligned(cpu_env, num)) {
10568 arg4 = arg5;
10569 arg5 = arg6;
10570 }
10571 if (arg2 == 0 && arg3 == 0) {
10572 /* Special-case NULL buffer and zero length, which should succeed */
10573 p = 0;
10574 } else {
10575 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10576 if (!p) {
10577 return -TARGET_EFAULT;
10578 }
10579 }
10580 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10581 unlock_user(p, arg2, ret);
10582 return ret;
10583 case TARGET_NR_pwrite64:
10584 if (regpairs_aligned(cpu_env, num)) {
10585 arg4 = arg5;
10586 arg5 = arg6;
10587 }
10588 if (arg2 == 0 && arg3 == 0) {
10589 /* Special-case NULL buffer and zero length, which should succeed */
10590 p = 0;
10591 } else {
10592 p = lock_user(VERIFY_READ, arg2, arg3, 1);
10593 if (!p) {
10594 return -TARGET_EFAULT;
10595 }
10596 }
10597 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10598 unlock_user(p, arg2, 0);
10599 return ret;
10600 #endif
10601 case TARGET_NR_getcwd:
10602 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10603 return -TARGET_EFAULT;
10604 ret = get_errno(sys_getcwd1(p, arg2));
10605 unlock_user(p, arg1, ret);
10606 return ret;
10607 case TARGET_NR_capget:
10608 case TARGET_NR_capset:
10609 {
10610 struct target_user_cap_header *target_header;
10611 struct target_user_cap_data *target_data = NULL;
10612 struct __user_cap_header_struct header;
10613 struct __user_cap_data_struct data[2];
10614 struct __user_cap_data_struct *dataptr = NULL;
10615 int i, target_datalen;
10616 int data_items = 1;
10617
10618 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10619 return -TARGET_EFAULT;
10620 }
10621 header.version = tswap32(target_header->version);
10622 header.pid = tswap32(target_header->pid);
10623
10624 if (header.version != _LINUX_CAPABILITY_VERSION) {
10625 /* Version 2 and up takes pointer to two user_data structs */
10626 data_items = 2;
10627 }
10628
10629 target_datalen = sizeof(*target_data) * data_items;
10630
10631 if (arg2) {
10632 if (num == TARGET_NR_capget) {
10633 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10634 } else {
10635 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10636 }
10637 if (!target_data) {
10638 unlock_user_struct(target_header, arg1, 0);
10639 return -TARGET_EFAULT;
10640 }
10641
10642 if (num == TARGET_NR_capset) {
10643 for (i = 0; i < data_items; i++) {
10644 data[i].effective = tswap32(target_data[i].effective);
10645 data[i].permitted = tswap32(target_data[i].permitted);
10646 data[i].inheritable = tswap32(target_data[i].inheritable);
10647 }
10648 }
10649
10650 dataptr = data;
10651 }
10652
10653 if (num == TARGET_NR_capget) {
10654 ret = get_errno(capget(&header, dataptr));
10655 } else {
10656 ret = get_errno(capset(&header, dataptr));
10657 }
10658
10659 /* The kernel always updates version for both capget and capset */
10660 target_header->version = tswap32(header.version);
10661 unlock_user_struct(target_header, arg1, 1);
10662
10663 if (arg2) {
10664 if (num == TARGET_NR_capget) {
10665 for (i = 0; i < data_items; i++) {
10666 target_data[i].effective = tswap32(data[i].effective);
10667 target_data[i].permitted = tswap32(data[i].permitted);
10668 target_data[i].inheritable = tswap32(data[i].inheritable);
10669 }
10670 unlock_user(target_data, arg2, target_datalen);
10671 } else {
10672 unlock_user(target_data, arg2, 0);
10673 }
10674 }
10675 return ret;
10676 }
10677 case TARGET_NR_sigaltstack:
10678 return do_sigaltstack(arg1, arg2,
10679 get_sp_from_cpustate((CPUArchState *)cpu_env));
10680
10681 #ifdef CONFIG_SENDFILE
10682 #ifdef TARGET_NR_sendfile
10683 case TARGET_NR_sendfile:
10684 {
10685 off_t *offp = NULL;
10686 off_t off;
10687 if (arg3) {
10688 ret = get_user_sal(off, arg3);
10689 if (is_error(ret)) {
10690 return ret;
10691 }
10692 offp = &off;
10693 }
10694 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10695 if (!is_error(ret) && arg3) {
10696 abi_long ret2 = put_user_sal(off, arg3);
10697 if (is_error(ret2)) {
10698 ret = ret2;
10699 }
10700 }
10701 return ret;
10702 }
10703 #endif
10704 #ifdef TARGET_NR_sendfile64
10705 case TARGET_NR_sendfile64:
10706 {
10707 off_t *offp = NULL;
10708 off_t off;
10709 if (arg3) {
10710 ret = get_user_s64(off, arg3);
10711 if (is_error(ret)) {
10712 return ret;
10713 }
10714 offp = &off;
10715 }
10716 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10717 if (!is_error(ret) && arg3) {
10718 abi_long ret2 = put_user_s64(off, arg3);
10719 if (is_error(ret2)) {
10720 ret = ret2;
10721 }
10722 }
10723 return ret;
10724 }
10725 #endif
10726 #endif
10727 #ifdef TARGET_NR_vfork
10728 case TARGET_NR_vfork:
10729 return get_errno(do_fork(cpu_env,
10730 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10731 0, 0, 0, 0));
10732 #endif
10733 #ifdef TARGET_NR_ugetrlimit
10734 case TARGET_NR_ugetrlimit:
10735 {
10736 struct rlimit rlim;
10737 int resource = target_to_host_resource(arg1);
10738 ret = get_errno(getrlimit(resource, &rlim));
10739 if (!is_error(ret)) {
10740 struct target_rlimit *target_rlim;
10741 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10742 return -TARGET_EFAULT;
10743 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10744 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10745 unlock_user_struct(target_rlim, arg2, 1);
10746 }
10747 return ret;
10748 }
10749 #endif
10750 #ifdef TARGET_NR_truncate64
10751 case TARGET_NR_truncate64:
10752 if (!(p = lock_user_string(arg1)))
10753 return -TARGET_EFAULT;
10754 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10755 unlock_user(p, arg1, 0);
10756 return ret;
10757 #endif
10758 #ifdef TARGET_NR_ftruncate64
10759 case TARGET_NR_ftruncate64:
10760 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10761 #endif
10762 #ifdef TARGET_NR_stat64
10763 case TARGET_NR_stat64:
10764 if (!(p = lock_user_string(arg1))) {
10765 return -TARGET_EFAULT;
10766 }
10767 ret = get_errno(stat(path(p), &st));
10768 unlock_user(p, arg1, 0);
10769 if (!is_error(ret))
10770 ret = host_to_target_stat64(cpu_env, arg2, &st);
10771 return ret;
10772 #endif
10773 #ifdef TARGET_NR_lstat64
10774 case TARGET_NR_lstat64:
10775 if (!(p = lock_user_string(arg1))) {
10776 return -TARGET_EFAULT;
10777 }
10778 ret = get_errno(lstat(path(p), &st));
10779 unlock_user(p, arg1, 0);
10780 if (!is_error(ret))
10781 ret = host_to_target_stat64(cpu_env, arg2, &st);
10782 return ret;
10783 #endif
10784 #ifdef TARGET_NR_fstat64
10785 case TARGET_NR_fstat64:
10786 ret = get_errno(fstat(arg1, &st));
10787 if (!is_error(ret))
10788 ret = host_to_target_stat64(cpu_env, arg2, &st);
10789 return ret;
10790 #endif
10791 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10792 #ifdef TARGET_NR_fstatat64
10793 case TARGET_NR_fstatat64:
10794 #endif
10795 #ifdef TARGET_NR_newfstatat
10796 case TARGET_NR_newfstatat:
10797 #endif
10798 if (!(p = lock_user_string(arg2))) {
10799 return -TARGET_EFAULT;
10800 }
10801 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10802 unlock_user(p, arg2, 0);
10803 if (!is_error(ret))
10804 ret = host_to_target_stat64(cpu_env, arg3, &st);
10805 return ret;
10806 #endif
10807 #if defined(TARGET_NR_statx)
10808 case TARGET_NR_statx:
10809 {
10810 struct target_statx *target_stx;
10811 int dirfd = arg1;
10812 int flags = arg3;
10813
10814 p = lock_user_string(arg2);
10815 if (p == NULL) {
10816 return -TARGET_EFAULT;
10817 }
10818 #if defined(__NR_statx)
10819 {
10820 /*
10821 * It is assumed that struct statx is architecture independent.
10822 */
10823 struct target_statx host_stx;
10824 int mask = arg4;
10825
10826 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10827 if (!is_error(ret)) {
10828 if (host_to_target_statx(&host_stx, arg5) != 0) {
10829 unlock_user(p, arg2, 0);
10830 return -TARGET_EFAULT;
10831 }
10832 }
10833
10834 if (ret != -TARGET_ENOSYS) {
10835 unlock_user(p, arg2, 0);
10836 return ret;
10837 }
10838 }
10839 #endif
10840 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10841 unlock_user(p, arg2, 0);
10842
10843 if (!is_error(ret)) {
10844 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10845 return -TARGET_EFAULT;
10846 }
10847 memset(target_stx, 0, sizeof(*target_stx));
10848 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10849 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10850 __put_user(st.st_ino, &target_stx->stx_ino);
10851 __put_user(st.st_mode, &target_stx->stx_mode);
10852 __put_user(st.st_uid, &target_stx->stx_uid);
10853 __put_user(st.st_gid, &target_stx->stx_gid);
10854 __put_user(st.st_nlink, &target_stx->stx_nlink);
10855 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10856 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10857 __put_user(st.st_size, &target_stx->stx_size);
10858 __put_user(st.st_blksize, &target_stx->stx_blksize);
10859 __put_user(st.st_blocks, &target_stx->stx_blocks);
10860 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10861 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10862 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10863 unlock_user_struct(target_stx, arg5, 1);
10864 }
10865 }
10866 return ret;
10867 #endif
10868 #ifdef TARGET_NR_lchown
10869 case TARGET_NR_lchown:
10870 if (!(p = lock_user_string(arg1)))
10871 return -TARGET_EFAULT;
10872 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10873 unlock_user(p, arg1, 0);
10874 return ret;
10875 #endif
10876 #ifdef TARGET_NR_getuid
10877 case TARGET_NR_getuid:
10878 return get_errno(high2lowuid(getuid()));
10879 #endif
10880 #ifdef TARGET_NR_getgid
10881 case TARGET_NR_getgid:
10882 return get_errno(high2lowgid(getgid()));
10883 #endif
10884 #ifdef TARGET_NR_geteuid
10885 case TARGET_NR_geteuid:
10886 return get_errno(high2lowuid(geteuid()));
10887 #endif
10888 #ifdef TARGET_NR_getegid
10889 case TARGET_NR_getegid:
10890 return get_errno(high2lowgid(getegid()));
10891 #endif
10892 case TARGET_NR_setreuid:
10893 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10894 case TARGET_NR_setregid:
10895 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10896 case TARGET_NR_getgroups:
10897 {
10898 int gidsetsize = arg1;
10899 target_id *target_grouplist;
10900 gid_t *grouplist;
10901 int i;
10902
10903 grouplist = alloca(gidsetsize * sizeof(gid_t));
10904 ret = get_errno(getgroups(gidsetsize, grouplist));
10905 if (gidsetsize == 0)
10906 return ret;
10907 if (!is_error(ret)) {
10908 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10909 if (!target_grouplist)
10910 return -TARGET_EFAULT;
10911 for(i = 0;i < ret; i++)
10912 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10913 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10914 }
10915 }
10916 return ret;
10917 case TARGET_NR_setgroups:
10918 {
10919 int gidsetsize = arg1;
10920 target_id *target_grouplist;
10921 gid_t *grouplist = NULL;
10922 int i;
10923 if (gidsetsize) {
10924 grouplist = alloca(gidsetsize * sizeof(gid_t));
10925 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10926 if (!target_grouplist) {
10927 return -TARGET_EFAULT;
10928 }
10929 for (i = 0; i < gidsetsize; i++) {
10930 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10931 }
10932 unlock_user(target_grouplist, arg2, 0);
10933 }
10934 return get_errno(setgroups(gidsetsize, grouplist));
10935 }
10936 case TARGET_NR_fchown:
10937 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10938 #if defined(TARGET_NR_fchownat)
10939 case TARGET_NR_fchownat:
10940 if (!(p = lock_user_string(arg2)))
10941 return -TARGET_EFAULT;
10942 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10943 low2highgid(arg4), arg5));
10944 unlock_user(p, arg2, 0);
10945 return ret;
10946 #endif
10947 #ifdef TARGET_NR_setresuid
10948 case TARGET_NR_setresuid:
10949 return get_errno(sys_setresuid(low2highuid(arg1),
10950 low2highuid(arg2),
10951 low2highuid(arg3)));
10952 #endif
10953 #ifdef TARGET_NR_getresuid
10954 case TARGET_NR_getresuid:
10955 {
10956 uid_t ruid, euid, suid;
10957 ret = get_errno(getresuid(&ruid, &euid, &suid));
10958 if (!is_error(ret)) {
10959 if (put_user_id(high2lowuid(ruid), arg1)
10960 || put_user_id(high2lowuid(euid), arg2)
10961 || put_user_id(high2lowuid(suid), arg3))
10962 return -TARGET_EFAULT;
10963 }
10964 }
10965 return ret;
10966 #endif
10967 #ifdef TARGET_NR_getresgid
10968 case TARGET_NR_setresgid:
10969 return get_errno(sys_setresgid(low2highgid(arg1),
10970 low2highgid(arg2),
10971 low2highgid(arg3)));
10972 #endif
10973 #ifdef TARGET_NR_getresgid
10974 case TARGET_NR_getresgid:
10975 {
10976 gid_t rgid, egid, sgid;
10977 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10978 if (!is_error(ret)) {
10979 if (put_user_id(high2lowgid(rgid), arg1)
10980 || put_user_id(high2lowgid(egid), arg2)
10981 || put_user_id(high2lowgid(sgid), arg3))
10982 return -TARGET_EFAULT;
10983 }
10984 }
10985 return ret;
10986 #endif
10987 #ifdef TARGET_NR_chown
10988 case TARGET_NR_chown:
10989 if (!(p = lock_user_string(arg1)))
10990 return -TARGET_EFAULT;
10991 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10992 unlock_user(p, arg1, 0);
10993 return ret;
10994 #endif
10995 case TARGET_NR_setuid:
10996 return get_errno(sys_setuid(low2highuid(arg1)));
10997 case TARGET_NR_setgid:
10998 return get_errno(sys_setgid(low2highgid(arg1)));
10999 case TARGET_NR_setfsuid:
11000 return get_errno(setfsuid(arg1));
11001 case TARGET_NR_setfsgid:
11002 return get_errno(setfsgid(arg1));
11003
11004 #ifdef TARGET_NR_lchown32
11005 case TARGET_NR_lchown32:
11006 if (!(p = lock_user_string(arg1)))
11007 return -TARGET_EFAULT;
11008 ret = get_errno(lchown(p, arg2, arg3));
11009 unlock_user(p, arg1, 0);
11010 return ret;
11011 #endif
11012 #ifdef TARGET_NR_getuid32
11013 case TARGET_NR_getuid32:
11014 return get_errno(getuid());
11015 #endif
11016
11017 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11018 /* Alpha specific */
11019 case TARGET_NR_getxuid:
11020 {
11021 uid_t euid;
11022 euid=geteuid();
11023 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11024 }
11025 return get_errno(getuid());
11026 #endif
11027 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11028 /* Alpha specific */
11029 case TARGET_NR_getxgid:
11030 {
11031 uid_t egid;
11032 egid=getegid();
11033 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11034 }
11035 return get_errno(getgid());
11036 #endif
11037 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11038 /* Alpha specific */
11039 case TARGET_NR_osf_getsysinfo:
11040 ret = -TARGET_EOPNOTSUPP;
11041 switch (arg1) {
11042 case TARGET_GSI_IEEE_FP_CONTROL:
11043 {
11044 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11045 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11046
11047 swcr &= ~SWCR_STATUS_MASK;
11048 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11049
11050 if (put_user_u64 (swcr, arg2))
11051 return -TARGET_EFAULT;
11052 ret = 0;
11053 }
11054 break;
11055
11056 /* case GSI_IEEE_STATE_AT_SIGNAL:
11057 -- Not implemented in linux kernel.
11058 case GSI_UACPROC:
11059 -- Retrieves current unaligned access state; not much used.
11060 case GSI_PROC_TYPE:
11061 -- Retrieves implver information; surely not used.
11062 case GSI_GET_HWRPB:
11063 -- Grabs a copy of the HWRPB; surely not used.
11064 */
11065 }
11066 return ret;
11067 #endif
11068 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11069 /* Alpha specific */
11070 case TARGET_NR_osf_setsysinfo:
11071 ret = -TARGET_EOPNOTSUPP;
11072 switch (arg1) {
11073 case TARGET_SSI_IEEE_FP_CONTROL:
11074 {
11075 uint64_t swcr, fpcr;
11076
11077 if (get_user_u64 (swcr, arg2)) {
11078 return -TARGET_EFAULT;
11079 }
11080
11081 /*
11082 * The kernel calls swcr_update_status to update the
11083 * status bits from the fpcr at every point that it
11084 * could be queried. Therefore, we store the status
11085 * bits only in FPCR.
11086 */
11087 ((CPUAlphaState *)cpu_env)->swcr
11088 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11089
11090 fpcr = cpu_alpha_load_fpcr(cpu_env);
11091 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11092 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11093 cpu_alpha_store_fpcr(cpu_env, fpcr);
11094 ret = 0;
11095 }
11096 break;
11097
11098 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11099 {
11100 uint64_t exc, fpcr, fex;
11101
11102 if (get_user_u64(exc, arg2)) {
11103 return -TARGET_EFAULT;
11104 }
11105 exc &= SWCR_STATUS_MASK;
11106 fpcr = cpu_alpha_load_fpcr(cpu_env);
11107
11108 /* Old exceptions are not signaled. */
11109 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11110 fex = exc & ~fex;
11111 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11112 fex &= ((CPUArchState *)cpu_env)->swcr;
11113
11114 /* Update the hardware fpcr. */
11115 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11116 cpu_alpha_store_fpcr(cpu_env, fpcr);
11117
11118 if (fex) {
11119 int si_code = TARGET_FPE_FLTUNK;
11120 target_siginfo_t info;
11121
11122 if (fex & SWCR_TRAP_ENABLE_DNO) {
11123 si_code = TARGET_FPE_FLTUND;
11124 }
11125 if (fex & SWCR_TRAP_ENABLE_INE) {
11126 si_code = TARGET_FPE_FLTRES;
11127 }
11128 if (fex & SWCR_TRAP_ENABLE_UNF) {
11129 si_code = TARGET_FPE_FLTUND;
11130 }
11131 if (fex & SWCR_TRAP_ENABLE_OVF) {
11132 si_code = TARGET_FPE_FLTOVF;
11133 }
11134 if (fex & SWCR_TRAP_ENABLE_DZE) {
11135 si_code = TARGET_FPE_FLTDIV;
11136 }
11137 if (fex & SWCR_TRAP_ENABLE_INV) {
11138 si_code = TARGET_FPE_FLTINV;
11139 }
11140
11141 info.si_signo = SIGFPE;
11142 info.si_errno = 0;
11143 info.si_code = si_code;
11144 info._sifields._sigfault._addr
11145 = ((CPUArchState *)cpu_env)->pc;
11146 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11147 QEMU_SI_FAULT, &info);
11148 }
11149 ret = 0;
11150 }
11151 break;
11152
11153 /* case SSI_NVPAIRS:
11154 -- Used with SSIN_UACPROC to enable unaligned accesses.
11155 case SSI_IEEE_STATE_AT_SIGNAL:
11156 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11157 -- Not implemented in linux kernel
11158 */
11159 }
11160 return ret;
11161 #endif
11162 #ifdef TARGET_NR_osf_sigprocmask
11163 /* Alpha specific. */
11164 case TARGET_NR_osf_sigprocmask:
11165 {
11166 abi_ulong mask;
11167 int how;
11168 sigset_t set, oldset;
11169
11170 switch(arg1) {
11171 case TARGET_SIG_BLOCK:
11172 how = SIG_BLOCK;
11173 break;
11174 case TARGET_SIG_UNBLOCK:
11175 how = SIG_UNBLOCK;
11176 break;
11177 case TARGET_SIG_SETMASK:
11178 how = SIG_SETMASK;
11179 break;
11180 default:
11181 return -TARGET_EINVAL;
11182 }
11183 mask = arg2;
11184 target_to_host_old_sigset(&set, &mask);
11185 ret = do_sigprocmask(how, &set, &oldset);
11186 if (!ret) {
11187 host_to_target_old_sigset(&mask, &oldset);
11188 ret = mask;
11189 }
11190 }
11191 return ret;
11192 #endif
11193
11194 #ifdef TARGET_NR_getgid32
11195 case TARGET_NR_getgid32:
11196 return get_errno(getgid());
11197 #endif
11198 #ifdef TARGET_NR_geteuid32
11199 case TARGET_NR_geteuid32:
11200 return get_errno(geteuid());
11201 #endif
11202 #ifdef TARGET_NR_getegid32
11203 case TARGET_NR_getegid32:
11204 return get_errno(getegid());
11205 #endif
11206 #ifdef TARGET_NR_setreuid32
11207 case TARGET_NR_setreuid32:
11208 return get_errno(setreuid(arg1, arg2));
11209 #endif
11210 #ifdef TARGET_NR_setregid32
11211 case TARGET_NR_setregid32:
11212 return get_errno(setregid(arg1, arg2));
11213 #endif
11214 #ifdef TARGET_NR_getgroups32
11215 case TARGET_NR_getgroups32:
11216 {
11217 int gidsetsize = arg1;
11218 uint32_t *target_grouplist;
11219 gid_t *grouplist;
11220 int i;
11221
11222 grouplist = alloca(gidsetsize * sizeof(gid_t));
11223 ret = get_errno(getgroups(gidsetsize, grouplist));
11224 if (gidsetsize == 0)
11225 return ret;
11226 if (!is_error(ret)) {
11227 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11228 if (!target_grouplist) {
11229 return -TARGET_EFAULT;
11230 }
11231 for(i = 0;i < ret; i++)
11232 target_grouplist[i] = tswap32(grouplist[i]);
11233 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11234 }
11235 }
11236 return ret;
11237 #endif
11238 #ifdef TARGET_NR_setgroups32
11239 case TARGET_NR_setgroups32:
11240 {
11241 int gidsetsize = arg1;
11242 uint32_t *target_grouplist;
11243 gid_t *grouplist;
11244 int i;
11245
11246 grouplist = alloca(gidsetsize * sizeof(gid_t));
11247 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11248 if (!target_grouplist) {
11249 return -TARGET_EFAULT;
11250 }
11251 for(i = 0;i < gidsetsize; i++)
11252 grouplist[i] = tswap32(target_grouplist[i]);
11253 unlock_user(target_grouplist, arg2, 0);
11254 return get_errno(setgroups(gidsetsize, grouplist));
11255 }
11256 #endif
11257 #ifdef TARGET_NR_fchown32
11258 case TARGET_NR_fchown32:
11259 return get_errno(fchown(arg1, arg2, arg3));
11260 #endif
11261 #ifdef TARGET_NR_setresuid32
11262 case TARGET_NR_setresuid32:
11263 return get_errno(sys_setresuid(arg1, arg2, arg3));
11264 #endif
11265 #ifdef TARGET_NR_getresuid32
11266 case TARGET_NR_getresuid32:
11267 {
11268 uid_t ruid, euid, suid;
11269 ret = get_errno(getresuid(&ruid, &euid, &suid));
11270 if (!is_error(ret)) {
11271 if (put_user_u32(ruid, arg1)
11272 || put_user_u32(euid, arg2)
11273 || put_user_u32(suid, arg3))
11274 return -TARGET_EFAULT;
11275 }
11276 }
11277 return ret;
11278 #endif
11279 #ifdef TARGET_NR_setresgid32
11280 case TARGET_NR_setresgid32:
11281 return get_errno(sys_setresgid(arg1, arg2, arg3));
11282 #endif
11283 #ifdef TARGET_NR_getresgid32
11284 case TARGET_NR_getresgid32:
11285 {
11286 gid_t rgid, egid, sgid;
11287 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11288 if (!is_error(ret)) {
11289 if (put_user_u32(rgid, arg1)
11290 || put_user_u32(egid, arg2)
11291 || put_user_u32(sgid, arg3))
11292 return -TARGET_EFAULT;
11293 }
11294 }
11295 return ret;
11296 #endif
11297 #ifdef TARGET_NR_chown32
11298 case TARGET_NR_chown32:
11299 if (!(p = lock_user_string(arg1)))
11300 return -TARGET_EFAULT;
11301 ret = get_errno(chown(p, arg2, arg3));
11302 unlock_user(p, arg1, 0);
11303 return ret;
11304 #endif
11305 #ifdef TARGET_NR_setuid32
11306 case TARGET_NR_setuid32:
11307 return get_errno(sys_setuid(arg1));
11308 #endif
11309 #ifdef TARGET_NR_setgid32
11310 case TARGET_NR_setgid32:
11311 return get_errno(sys_setgid(arg1));
11312 #endif
11313 #ifdef TARGET_NR_setfsuid32
11314 case TARGET_NR_setfsuid32:
11315 return get_errno(setfsuid(arg1));
11316 #endif
11317 #ifdef TARGET_NR_setfsgid32
11318 case TARGET_NR_setfsgid32:
11319 return get_errno(setfsgid(arg1));
11320 #endif
11321 #ifdef TARGET_NR_mincore
11322 case TARGET_NR_mincore:
11323 {
11324 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11325 if (!a) {
11326 return -TARGET_ENOMEM;
11327 }
11328 p = lock_user_string(arg3);
11329 if (!p) {
11330 ret = -TARGET_EFAULT;
11331 } else {
11332 ret = get_errno(mincore(a, arg2, p));
11333 unlock_user(p, arg3, ret);
11334 }
11335 unlock_user(a, arg1, 0);
11336 }
11337 return ret;
11338 #endif
11339 #ifdef TARGET_NR_arm_fadvise64_64
11340 case TARGET_NR_arm_fadvise64_64:
11341 /* arm_fadvise64_64 looks like fadvise64_64 but
11342 * with different argument order: fd, advice, offset, len
11343 * rather than the usual fd, offset, len, advice.
11344 * Note that offset and len are both 64-bit so appear as
11345 * pairs of 32-bit registers.
11346 */
11347 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11348 target_offset64(arg5, arg6), arg2);
11349 return -host_to_target_errno(ret);
11350 #endif
11351
11352 #if TARGET_ABI_BITS == 32
11353
11354 #ifdef TARGET_NR_fadvise64_64
11355 case TARGET_NR_fadvise64_64:
11356 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11357 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11358 ret = arg2;
11359 arg2 = arg3;
11360 arg3 = arg4;
11361 arg4 = arg5;
11362 arg5 = arg6;
11363 arg6 = ret;
11364 #else
11365 /* 6 args: fd, offset (high, low), len (high, low), advice */
11366 if (regpairs_aligned(cpu_env, num)) {
11367 /* offset is in (3,4), len in (5,6) and advice in 7 */
11368 arg2 = arg3;
11369 arg3 = arg4;
11370 arg4 = arg5;
11371 arg5 = arg6;
11372 arg6 = arg7;
11373 }
11374 #endif
11375 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11376 target_offset64(arg4, arg5), arg6);
11377 return -host_to_target_errno(ret);
11378 #endif
11379
11380 #ifdef TARGET_NR_fadvise64
11381 case TARGET_NR_fadvise64:
11382 /* 5 args: fd, offset (high, low), len, advice */
11383 if (regpairs_aligned(cpu_env, num)) {
11384 /* offset is in (3,4), len in 5 and advice in 6 */
11385 arg2 = arg3;
11386 arg3 = arg4;
11387 arg4 = arg5;
11388 arg5 = arg6;
11389 }
11390 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11391 return -host_to_target_errno(ret);
11392 #endif
11393
11394 #else /* not a 32-bit ABI */
11395 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11396 #ifdef TARGET_NR_fadvise64_64
11397 case TARGET_NR_fadvise64_64:
11398 #endif
11399 #ifdef TARGET_NR_fadvise64
11400 case TARGET_NR_fadvise64:
11401 #endif
11402 #ifdef TARGET_S390X
11403 switch (arg4) {
11404 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11405 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11406 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11407 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11408 default: break;
11409 }
11410 #endif
11411 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11412 #endif
11413 #endif /* end of 64-bit ABI fadvise handling */
11414
11415 #ifdef TARGET_NR_madvise
11416 case TARGET_NR_madvise:
11417 /* A straight passthrough may not be safe because qemu sometimes
11418 turns private file-backed mappings into anonymous mappings.
11419 This will break MADV_DONTNEED.
11420 This is a hint, so ignoring and returning success is ok. */
11421 return 0;
11422 #endif
11423 #ifdef TARGET_NR_fcntl64
11424 case TARGET_NR_fcntl64:
11425 {
11426 int cmd;
11427 struct flock64 fl;
11428 from_flock64_fn *copyfrom = copy_from_user_flock64;
11429 to_flock64_fn *copyto = copy_to_user_flock64;
11430
11431 #ifdef TARGET_ARM
11432 if (!((CPUARMState *)cpu_env)->eabi) {
11433 copyfrom = copy_from_user_oabi_flock64;
11434 copyto = copy_to_user_oabi_flock64;
11435 }
11436 #endif
11437
11438 cmd = target_to_host_fcntl_cmd(arg2);
11439 if (cmd == -TARGET_EINVAL) {
11440 return cmd;
11441 }
11442
11443 switch(arg2) {
11444 case TARGET_F_GETLK64:
11445 ret = copyfrom(&fl, arg3);
11446 if (ret) {
11447 break;
11448 }
11449 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11450 if (ret == 0) {
11451 ret = copyto(arg3, &fl);
11452 }
11453 break;
11454
11455 case TARGET_F_SETLK64:
11456 case TARGET_F_SETLKW64:
11457 ret = copyfrom(&fl, arg3);
11458 if (ret) {
11459 break;
11460 }
11461 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11462 break;
11463 default:
11464 ret = do_fcntl(arg1, arg2, arg3);
11465 break;
11466 }
11467 return ret;
11468 }
11469 #endif
11470 #ifdef TARGET_NR_cacheflush
11471 case TARGET_NR_cacheflush:
11472 /* self-modifying code is handled automatically, so nothing needed */
11473 return 0;
11474 #endif
11475 #ifdef TARGET_NR_getpagesize
11476 case TARGET_NR_getpagesize:
11477 return TARGET_PAGE_SIZE;
11478 #endif
11479 case TARGET_NR_gettid:
11480 return get_errno(sys_gettid());
11481 #ifdef TARGET_NR_readahead
11482 case TARGET_NR_readahead:
11483 #if TARGET_ABI_BITS == 32
11484 if (regpairs_aligned(cpu_env, num)) {
11485 arg2 = arg3;
11486 arg3 = arg4;
11487 arg4 = arg5;
11488 }
11489 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11490 #else
11491 ret = get_errno(readahead(arg1, arg2, arg3));
11492 #endif
11493 return ret;
11494 #endif
11495 #ifdef CONFIG_ATTR
11496 #ifdef TARGET_NR_setxattr
11497 case TARGET_NR_listxattr:
11498 case TARGET_NR_llistxattr:
11499 {
11500 void *p, *b = 0;
11501 if (arg2) {
11502 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11503 if (!b) {
11504 return -TARGET_EFAULT;
11505 }
11506 }
11507 p = lock_user_string(arg1);
11508 if (p) {
11509 if (num == TARGET_NR_listxattr) {
11510 ret = get_errno(listxattr(p, b, arg3));
11511 } else {
11512 ret = get_errno(llistxattr(p, b, arg3));
11513 }
11514 } else {
11515 ret = -TARGET_EFAULT;
11516 }
11517 unlock_user(p, arg1, 0);
11518 unlock_user(b, arg2, arg3);
11519 return ret;
11520 }
11521 case TARGET_NR_flistxattr:
11522 {
11523 void *b = 0;
11524 if (arg2) {
11525 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11526 if (!b) {
11527 return -TARGET_EFAULT;
11528 }
11529 }
11530 ret = get_errno(flistxattr(arg1, b, arg3));
11531 unlock_user(b, arg2, arg3);
11532 return ret;
11533 }
11534 case TARGET_NR_setxattr:
11535 case TARGET_NR_lsetxattr:
11536 {
11537 void *p, *n, *v = 0;
11538 if (arg3) {
11539 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11540 if (!v) {
11541 return -TARGET_EFAULT;
11542 }
11543 }
11544 p = lock_user_string(arg1);
11545 n = lock_user_string(arg2);
11546 if (p && n) {
11547 if (num == TARGET_NR_setxattr) {
11548 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11549 } else {
11550 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11551 }
11552 } else {
11553 ret = -TARGET_EFAULT;
11554 }
11555 unlock_user(p, arg1, 0);
11556 unlock_user(n, arg2, 0);
11557 unlock_user(v, arg3, 0);
11558 }
11559 return ret;
11560 case TARGET_NR_fsetxattr:
11561 {
11562 void *n, *v = 0;
11563 if (arg3) {
11564 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11565 if (!v) {
11566 return -TARGET_EFAULT;
11567 }
11568 }
11569 n = lock_user_string(arg2);
11570 if (n) {
11571 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11572 } else {
11573 ret = -TARGET_EFAULT;
11574 }
11575 unlock_user(n, arg2, 0);
11576 unlock_user(v, arg3, 0);
11577 }
11578 return ret;
11579 case TARGET_NR_getxattr:
11580 case TARGET_NR_lgetxattr:
11581 {
11582 void *p, *n, *v = 0;
11583 if (arg3) {
11584 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11585 if (!v) {
11586 return -TARGET_EFAULT;
11587 }
11588 }
11589 p = lock_user_string(arg1);
11590 n = lock_user_string(arg2);
11591 if (p && n) {
11592 if (num == TARGET_NR_getxattr) {
11593 ret = get_errno(getxattr(p, n, v, arg4));
11594 } else {
11595 ret = get_errno(lgetxattr(p, n, v, arg4));
11596 }
11597 } else {
11598 ret = -TARGET_EFAULT;
11599 }
11600 unlock_user(p, arg1, 0);
11601 unlock_user(n, arg2, 0);
11602 unlock_user(v, arg3, arg4);
11603 }
11604 return ret;
11605 case TARGET_NR_fgetxattr:
11606 {
11607 void *n, *v = 0;
11608 if (arg3) {
11609 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11610 if (!v) {
11611 return -TARGET_EFAULT;
11612 }
11613 }
11614 n = lock_user_string(arg2);
11615 if (n) {
11616 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11617 } else {
11618 ret = -TARGET_EFAULT;
11619 }
11620 unlock_user(n, arg2, 0);
11621 unlock_user(v, arg3, arg4);
11622 }
11623 return ret;
11624 case TARGET_NR_removexattr:
11625 case TARGET_NR_lremovexattr:
11626 {
11627 void *p, *n;
11628 p = lock_user_string(arg1);
11629 n = lock_user_string(arg2);
11630 if (p && n) {
11631 if (num == TARGET_NR_removexattr) {
11632 ret = get_errno(removexattr(p, n));
11633 } else {
11634 ret = get_errno(lremovexattr(p, n));
11635 }
11636 } else {
11637 ret = -TARGET_EFAULT;
11638 }
11639 unlock_user(p, arg1, 0);
11640 unlock_user(n, arg2, 0);
11641 }
11642 return ret;
11643 case TARGET_NR_fremovexattr:
11644 {
11645 void *n;
11646 n = lock_user_string(arg2);
11647 if (n) {
11648 ret = get_errno(fremovexattr(arg1, n));
11649 } else {
11650 ret = -TARGET_EFAULT;
11651 }
11652 unlock_user(n, arg2, 0);
11653 }
11654 return ret;
11655 #endif
11656 #endif /* CONFIG_ATTR */
11657 #ifdef TARGET_NR_set_thread_area
11658 case TARGET_NR_set_thread_area:
11659 #if defined(TARGET_MIPS)
11660 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11661 return 0;
11662 #elif defined(TARGET_CRIS)
11663 if (arg1 & 0xff)
11664 ret = -TARGET_EINVAL;
11665 else {
11666 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11667 ret = 0;
11668 }
11669 return ret;
11670 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11671 return do_set_thread_area(cpu_env, arg1);
11672 #elif defined(TARGET_M68K)
11673 {
11674 TaskState *ts = cpu->opaque;
11675 ts->tp_value = arg1;
11676 return 0;
11677 }
11678 #else
11679 return -TARGET_ENOSYS;
11680 #endif
11681 #endif
11682 #ifdef TARGET_NR_get_thread_area
11683 case TARGET_NR_get_thread_area:
11684 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11685 return do_get_thread_area(cpu_env, arg1);
11686 #elif defined(TARGET_M68K)
11687 {
11688 TaskState *ts = cpu->opaque;
11689 return ts->tp_value;
11690 }
11691 #else
11692 return -TARGET_ENOSYS;
11693 #endif
11694 #endif
11695 #ifdef TARGET_NR_getdomainname
11696 case TARGET_NR_getdomainname:
11697 return -TARGET_ENOSYS;
11698 #endif
11699
11700 #ifdef TARGET_NR_clock_settime
11701 case TARGET_NR_clock_settime:
11702 {
11703 struct timespec ts;
11704
11705 ret = target_to_host_timespec(&ts, arg2);
11706 if (!is_error(ret)) {
11707 ret = get_errno(clock_settime(arg1, &ts));
11708 }
11709 return ret;
11710 }
11711 #endif
11712 #ifdef TARGET_NR_clock_settime64
11713 case TARGET_NR_clock_settime64:
11714 {
11715 struct timespec ts;
11716
11717 ret = target_to_host_timespec64(&ts, arg2);
11718 if (!is_error(ret)) {
11719 ret = get_errno(clock_settime(arg1, &ts));
11720 }
11721 return ret;
11722 }
11723 #endif
11724 #ifdef TARGET_NR_clock_gettime
11725 case TARGET_NR_clock_gettime:
11726 {
11727 struct timespec ts;
11728 ret = get_errno(clock_gettime(arg1, &ts));
11729 if (!is_error(ret)) {
11730 ret = host_to_target_timespec(arg2, &ts);
11731 }
11732 return ret;
11733 }
11734 #endif
11735 #ifdef TARGET_NR_clock_gettime64
11736 case TARGET_NR_clock_gettime64:
11737 {
11738 struct timespec ts;
11739 ret = get_errno(clock_gettime(arg1, &ts));
11740 if (!is_error(ret)) {
11741 ret = host_to_target_timespec64(arg2, &ts);
11742 }
11743 return ret;
11744 }
11745 #endif
11746 #ifdef TARGET_NR_clock_getres
11747 case TARGET_NR_clock_getres:
11748 {
11749 struct timespec ts;
11750 ret = get_errno(clock_getres(arg1, &ts));
11751 if (!is_error(ret)) {
11752 host_to_target_timespec(arg2, &ts);
11753 }
11754 return ret;
11755 }
11756 #endif
11757 #ifdef TARGET_NR_clock_nanosleep
11758 case TARGET_NR_clock_nanosleep:
11759 {
11760 struct timespec ts;
11761 target_to_host_timespec(&ts, arg3);
11762 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11763 &ts, arg4 ? &ts : NULL));
11764 if (arg4)
11765 host_to_target_timespec(arg4, &ts);
11766
11767 #if defined(TARGET_PPC)
11768 /* clock_nanosleep is odd in that it returns positive errno values.
11769 * On PPC, CR0 bit 3 should be set in such a situation. */
11770 if (ret && ret != -TARGET_ERESTARTSYS) {
11771 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11772 }
11773 #endif
11774 return ret;
11775 }
11776 #endif
11777
11778 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11779 case TARGET_NR_set_tid_address:
11780 return get_errno(set_tid_address((int *)g2h(arg1)));
11781 #endif
11782
11783 case TARGET_NR_tkill:
11784 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11785
11786 case TARGET_NR_tgkill:
11787 return get_errno(safe_tgkill((int)arg1, (int)arg2,
11788 target_to_host_signal(arg3)));
11789
11790 #ifdef TARGET_NR_set_robust_list
11791 case TARGET_NR_set_robust_list:
11792 case TARGET_NR_get_robust_list:
11793 /* The ABI for supporting robust futexes has userspace pass
11794 * the kernel a pointer to a linked list which is updated by
11795 * userspace after the syscall; the list is walked by the kernel
11796 * when the thread exits. Since the linked list in QEMU guest
11797 * memory isn't a valid linked list for the host and we have
11798 * no way to reliably intercept the thread-death event, we can't
11799 * support these. Silently return ENOSYS so that guest userspace
11800 * falls back to a non-robust futex implementation (which should
11801 * be OK except in the corner case of the guest crashing while
11802 * holding a mutex that is shared with another process via
11803 * shared memory).
11804 */
11805 return -TARGET_ENOSYS;
11806 #endif
11807
11808 #if defined(TARGET_NR_utimensat)
11809 case TARGET_NR_utimensat:
11810 {
11811 struct timespec *tsp, ts[2];
11812 if (!arg3) {
11813 tsp = NULL;
11814 } else {
11815 target_to_host_timespec(ts, arg3);
11816 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11817 tsp = ts;
11818 }
11819 if (!arg2)
11820 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11821 else {
11822 if (!(p = lock_user_string(arg2))) {
11823 return -TARGET_EFAULT;
11824 }
11825 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11826 unlock_user(p, arg2, 0);
11827 }
11828 }
11829 return ret;
11830 #endif
11831 #ifdef TARGET_NR_futex
11832 case TARGET_NR_futex:
11833 return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11834 #endif
11835 #ifdef TARGET_NR_futex_time64
11836 case TARGET_NR_futex_time64:
11837 return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
11838 #endif
11839 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11840 case TARGET_NR_inotify_init:
11841 ret = get_errno(sys_inotify_init());
11842 if (ret >= 0) {
11843 fd_trans_register(ret, &target_inotify_trans);
11844 }
11845 return ret;
11846 #endif
11847 #ifdef CONFIG_INOTIFY1
11848 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11849 case TARGET_NR_inotify_init1:
11850 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11851 fcntl_flags_tbl)));
11852 if (ret >= 0) {
11853 fd_trans_register(ret, &target_inotify_trans);
11854 }
11855 return ret;
11856 #endif
11857 #endif
11858 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11859 case TARGET_NR_inotify_add_watch:
11860 p = lock_user_string(arg2);
11861 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11862 unlock_user(p, arg2, 0);
11863 return ret;
11864 #endif
11865 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11866 case TARGET_NR_inotify_rm_watch:
11867 return get_errno(sys_inotify_rm_watch(arg1, arg2));
11868 #endif
11869
11870 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11871 case TARGET_NR_mq_open:
11872 {
11873 struct mq_attr posix_mq_attr;
11874 struct mq_attr *pposix_mq_attr;
11875 int host_flags;
11876
11877 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11878 pposix_mq_attr = NULL;
11879 if (arg4) {
11880 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11881 return -TARGET_EFAULT;
11882 }
11883 pposix_mq_attr = &posix_mq_attr;
11884 }
11885 p = lock_user_string(arg1 - 1);
11886 if (!p) {
11887 return -TARGET_EFAULT;
11888 }
11889 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11890 unlock_user (p, arg1, 0);
11891 }
11892 return ret;
11893
11894 case TARGET_NR_mq_unlink:
11895 p = lock_user_string(arg1 - 1);
11896 if (!p) {
11897 return -TARGET_EFAULT;
11898 }
11899 ret = get_errno(mq_unlink(p));
11900 unlock_user (p, arg1, 0);
11901 return ret;
11902
11903 #ifdef TARGET_NR_mq_timedsend
11904 case TARGET_NR_mq_timedsend:
11905 {
11906 struct timespec ts;
11907
11908 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11909 if (arg5 != 0) {
11910 target_to_host_timespec(&ts, arg5);
11911 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11912 host_to_target_timespec(arg5, &ts);
11913 } else {
11914 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11915 }
11916 unlock_user (p, arg2, arg3);
11917 }
11918 return ret;
11919 #endif
11920
11921 #ifdef TARGET_NR_mq_timedreceive
11922 case TARGET_NR_mq_timedreceive:
11923 {
11924 struct timespec ts;
11925 unsigned int prio;
11926
11927 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11928 if (arg5 != 0) {
11929 target_to_host_timespec(&ts, arg5);
11930 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11931 &prio, &ts));
11932 host_to_target_timespec(arg5, &ts);
11933 } else {
11934 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11935 &prio, NULL));
11936 }
11937 unlock_user (p, arg2, arg3);
11938 if (arg4 != 0)
11939 put_user_u32(prio, arg4);
11940 }
11941 return ret;
11942 #endif
11943
11944 /* Not implemented for now... */
11945 /* case TARGET_NR_mq_notify: */
11946 /* break; */
11947
11948 case TARGET_NR_mq_getsetattr:
11949 {
11950 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11951 ret = 0;
11952 if (arg2 != 0) {
11953 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11954 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11955 &posix_mq_attr_out));
11956 } else if (arg3 != 0) {
11957 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11958 }
11959 if (ret == 0 && arg3 != 0) {
11960 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11961 }
11962 }
11963 return ret;
11964 #endif
11965
11966 #ifdef CONFIG_SPLICE
11967 #ifdef TARGET_NR_tee
11968 case TARGET_NR_tee:
11969 {
11970 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11971 }
11972 return ret;
11973 #endif
11974 #ifdef TARGET_NR_splice
11975 case TARGET_NR_splice:
11976 {
11977 loff_t loff_in, loff_out;
11978 loff_t *ploff_in = NULL, *ploff_out = NULL;
11979 if (arg2) {
11980 if (get_user_u64(loff_in, arg2)) {
11981 return -TARGET_EFAULT;
11982 }
11983 ploff_in = &loff_in;
11984 }
11985 if (arg4) {
11986 if (get_user_u64(loff_out, arg4)) {
11987 return -TARGET_EFAULT;
11988 }
11989 ploff_out = &loff_out;
11990 }
11991 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11992 if (arg2) {
11993 if (put_user_u64(loff_in, arg2)) {
11994 return -TARGET_EFAULT;
11995 }
11996 }
11997 if (arg4) {
11998 if (put_user_u64(loff_out, arg4)) {
11999 return -TARGET_EFAULT;
12000 }
12001 }
12002 }
12003 return ret;
12004 #endif
12005 #ifdef TARGET_NR_vmsplice
12006 case TARGET_NR_vmsplice:
12007 {
12008 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12009 if (vec != NULL) {
12010 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12011 unlock_iovec(vec, arg2, arg3, 0);
12012 } else {
12013 ret = -host_to_target_errno(errno);
12014 }
12015 }
12016 return ret;
12017 #endif
12018 #endif /* CONFIG_SPLICE */
12019 #ifdef CONFIG_EVENTFD
12020 #if defined(TARGET_NR_eventfd)
12021 case TARGET_NR_eventfd:
12022 ret = get_errno(eventfd(arg1, 0));
12023 if (ret >= 0) {
12024 fd_trans_register(ret, &target_eventfd_trans);
12025 }
12026 return ret;
12027 #endif
12028 #if defined(TARGET_NR_eventfd2)
12029 case TARGET_NR_eventfd2:
12030 {
12031 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12032 if (arg2 & TARGET_O_NONBLOCK) {
12033 host_flags |= O_NONBLOCK;
12034 }
12035 if (arg2 & TARGET_O_CLOEXEC) {
12036 host_flags |= O_CLOEXEC;
12037 }
12038 ret = get_errno(eventfd(arg1, host_flags));
12039 if (ret >= 0) {
12040 fd_trans_register(ret, &target_eventfd_trans);
12041 }
12042 return ret;
12043 }
12044 #endif
12045 #endif /* CONFIG_EVENTFD */
12046 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12047 case TARGET_NR_fallocate:
12048 #if TARGET_ABI_BITS == 32
12049 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12050 target_offset64(arg5, arg6)));
12051 #else
12052 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12053 #endif
12054 return ret;
12055 #endif
12056 #if defined(CONFIG_SYNC_FILE_RANGE)
12057 #if defined(TARGET_NR_sync_file_range)
12058 case TARGET_NR_sync_file_range:
12059 #if TARGET_ABI_BITS == 32
12060 #if defined(TARGET_MIPS)
12061 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12062 target_offset64(arg5, arg6), arg7));
12063 #else
12064 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12065 target_offset64(arg4, arg5), arg6));
12066 #endif /* !TARGET_MIPS */
12067 #else
12068 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12069 #endif
12070 return ret;
12071 #endif
12072 #if defined(TARGET_NR_sync_file_range2) || \
12073 defined(TARGET_NR_arm_sync_file_range)
12074 #if defined(TARGET_NR_sync_file_range2)
12075 case TARGET_NR_sync_file_range2:
12076 #endif
12077 #if defined(TARGET_NR_arm_sync_file_range)
12078 case TARGET_NR_arm_sync_file_range:
12079 #endif
12080 /* This is like sync_file_range but the arguments are reordered */
12081 #if TARGET_ABI_BITS == 32
12082 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12083 target_offset64(arg5, arg6), arg2));
12084 #else
12085 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12086 #endif
12087 return ret;
12088 #endif
12089 #endif
12090 #if defined(TARGET_NR_signalfd4)
12091 case TARGET_NR_signalfd4:
12092 return do_signalfd4(arg1, arg2, arg4);
12093 #endif
12094 #if defined(TARGET_NR_signalfd)
12095 case TARGET_NR_signalfd:
12096 return do_signalfd4(arg1, arg2, 0);
12097 #endif
12098 #if defined(CONFIG_EPOLL)
12099 #if defined(TARGET_NR_epoll_create)
12100 case TARGET_NR_epoll_create:
12101 return get_errno(epoll_create(arg1));
12102 #endif
12103 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12104 case TARGET_NR_epoll_create1:
12105 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12106 #endif
12107 #if defined(TARGET_NR_epoll_ctl)
12108 case TARGET_NR_epoll_ctl:
12109 {
12110 struct epoll_event ep;
12111 struct epoll_event *epp = 0;
12112 if (arg4) {
12113 struct target_epoll_event *target_ep;
12114 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12115 return -TARGET_EFAULT;
12116 }
12117 ep.events = tswap32(target_ep->events);
12118 /* The epoll_data_t union is just opaque data to the kernel,
12119 * so we transfer all 64 bits across and need not worry what
12120 * actual data type it is.
12121 */
12122 ep.data.u64 = tswap64(target_ep->data.u64);
12123 unlock_user_struct(target_ep, arg4, 0);
12124 epp = &ep;
12125 }
12126 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12127 }
12128 #endif
12129
12130 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12131 #if defined(TARGET_NR_epoll_wait)
12132 case TARGET_NR_epoll_wait:
12133 #endif
12134 #if defined(TARGET_NR_epoll_pwait)
12135 case TARGET_NR_epoll_pwait:
12136 #endif
12137 {
12138 struct target_epoll_event *target_ep;
12139 struct epoll_event *ep;
12140 int epfd = arg1;
12141 int maxevents = arg3;
12142 int timeout = arg4;
12143
12144 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12145 return -TARGET_EINVAL;
12146 }
12147
12148 target_ep = lock_user(VERIFY_WRITE, arg2,
12149 maxevents * sizeof(struct target_epoll_event), 1);
12150 if (!target_ep) {
12151 return -TARGET_EFAULT;
12152 }
12153
12154 ep = g_try_new(struct epoll_event, maxevents);
12155 if (!ep) {
12156 unlock_user(target_ep, arg2, 0);
12157 return -TARGET_ENOMEM;
12158 }
12159
12160 switch (num) {
12161 #if defined(TARGET_NR_epoll_pwait)
12162 case TARGET_NR_epoll_pwait:
12163 {
12164 target_sigset_t *target_set;
12165 sigset_t _set, *set = &_set;
12166
12167 if (arg5) {
12168 if (arg6 != sizeof(target_sigset_t)) {
12169 ret = -TARGET_EINVAL;
12170 break;
12171 }
12172
12173 target_set = lock_user(VERIFY_READ, arg5,
12174 sizeof(target_sigset_t), 1);
12175 if (!target_set) {
12176 ret = -TARGET_EFAULT;
12177 break;
12178 }
12179 target_to_host_sigset(set, target_set);
12180 unlock_user(target_set, arg5, 0);
12181 } else {
12182 set = NULL;
12183 }
12184
12185 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12186 set, SIGSET_T_SIZE));
12187 break;
12188 }
12189 #endif
12190 #if defined(TARGET_NR_epoll_wait)
12191 case TARGET_NR_epoll_wait:
12192 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12193 NULL, 0));
12194 break;
12195 #endif
12196 default:
12197 ret = -TARGET_ENOSYS;
12198 }
12199 if (!is_error(ret)) {
12200 int i;
12201 for (i = 0; i < ret; i++) {
12202 target_ep[i].events = tswap32(ep[i].events);
12203 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12204 }
12205 unlock_user(target_ep, arg2,
12206 ret * sizeof(struct target_epoll_event));
12207 } else {
12208 unlock_user(target_ep, arg2, 0);
12209 }
12210 g_free(ep);
12211 return ret;
12212 }
12213 #endif
12214 #endif
12215 #ifdef TARGET_NR_prlimit64
12216 case TARGET_NR_prlimit64:
12217 {
12218 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12219 struct target_rlimit64 *target_rnew, *target_rold;
12220 struct host_rlimit64 rnew, rold, *rnewp = 0;
12221 int resource = target_to_host_resource(arg2);
12222
12223 if (arg3 && (resource != RLIMIT_AS &&
12224 resource != RLIMIT_DATA &&
12225 resource != RLIMIT_STACK)) {
12226 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12227 return -TARGET_EFAULT;
12228 }
12229 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12230 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12231 unlock_user_struct(target_rnew, arg3, 0);
12232 rnewp = &rnew;
12233 }
12234
12235 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12236 if (!is_error(ret) && arg4) {
12237 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12238 return -TARGET_EFAULT;
12239 }
12240 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12241 target_rold->rlim_max = tswap64(rold.rlim_max);
12242 unlock_user_struct(target_rold, arg4, 1);
12243 }
12244 return ret;
12245 }
12246 #endif
12247 #ifdef TARGET_NR_gethostname
12248 case TARGET_NR_gethostname:
12249 {
12250 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12251 if (name) {
12252 ret = get_errno(gethostname(name, arg2));
12253 unlock_user(name, arg1, arg2);
12254 } else {
12255 ret = -TARGET_EFAULT;
12256 }
12257 return ret;
12258 }
12259 #endif
12260 #ifdef TARGET_NR_atomic_cmpxchg_32
12261 case TARGET_NR_atomic_cmpxchg_32:
12262 {
12263 /* should use start_exclusive from main.c */
12264 abi_ulong mem_value;
12265 if (get_user_u32(mem_value, arg6)) {
12266 target_siginfo_t info;
12267 info.si_signo = SIGSEGV;
12268 info.si_errno = 0;
12269 info.si_code = TARGET_SEGV_MAPERR;
12270 info._sifields._sigfault._addr = arg6;
12271 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12272 QEMU_SI_FAULT, &info);
12273 ret = 0xdeadbeef;
12274
12275 }
12276 if (mem_value == arg2)
12277 put_user_u32(arg1, arg6);
12278 return mem_value;
12279 }
12280 #endif
12281 #ifdef TARGET_NR_atomic_barrier
12282 case TARGET_NR_atomic_barrier:
12283 /* Like the kernel implementation and the
12284 qemu arm barrier, no-op this? */
12285 return 0;
12286 #endif
12287
12288 #ifdef TARGET_NR_timer_create
12289 case TARGET_NR_timer_create:
12290 {
12291 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12292
12293 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12294
12295 int clkid = arg1;
12296 int timer_index = next_free_host_timer();
12297
12298 if (timer_index < 0) {
12299 ret = -TARGET_EAGAIN;
12300 } else {
12301 timer_t *phtimer = g_posix_timers + timer_index;
12302
12303 if (arg2) {
12304 phost_sevp = &host_sevp;
12305 ret = target_to_host_sigevent(phost_sevp, arg2);
12306 if (ret != 0) {
12307 return ret;
12308 }
12309 }
12310
12311 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12312 if (ret) {
12313 phtimer = NULL;
12314 } else {
12315 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12316 return -TARGET_EFAULT;
12317 }
12318 }
12319 }
12320 return ret;
12321 }
12322 #endif
12323
12324 #ifdef TARGET_NR_timer_settime
12325 case TARGET_NR_timer_settime:
12326 {
12327 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12328 * struct itimerspec * old_value */
12329 target_timer_t timerid = get_timer_id(arg1);
12330
12331 if (timerid < 0) {
12332 ret = timerid;
12333 } else if (arg3 == 0) {
12334 ret = -TARGET_EINVAL;
12335 } else {
12336 timer_t htimer = g_posix_timers[timerid];
12337 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12338
12339 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12340 return -TARGET_EFAULT;
12341 }
12342 ret = get_errno(
12343 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12344 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12345 return -TARGET_EFAULT;
12346 }
12347 }
12348 return ret;
12349 }
12350 #endif
12351
12352 #ifdef TARGET_NR_timer_gettime
12353 case TARGET_NR_timer_gettime:
12354 {
12355 /* args: timer_t timerid, struct itimerspec *curr_value */
12356 target_timer_t timerid = get_timer_id(arg1);
12357
12358 if (timerid < 0) {
12359 ret = timerid;
12360 } else if (!arg2) {
12361 ret = -TARGET_EFAULT;
12362 } else {
12363 timer_t htimer = g_posix_timers[timerid];
12364 struct itimerspec hspec;
12365 ret = get_errno(timer_gettime(htimer, &hspec));
12366
12367 if (host_to_target_itimerspec(arg2, &hspec)) {
12368 ret = -TARGET_EFAULT;
12369 }
12370 }
12371 return ret;
12372 }
12373 #endif
12374
12375 #ifdef TARGET_NR_timer_getoverrun
12376 case TARGET_NR_timer_getoverrun:
12377 {
12378 /* args: timer_t timerid */
12379 target_timer_t timerid = get_timer_id(arg1);
12380
12381 if (timerid < 0) {
12382 ret = timerid;
12383 } else {
12384 timer_t htimer = g_posix_timers[timerid];
12385 ret = get_errno(timer_getoverrun(htimer));
12386 }
12387 return ret;
12388 }
12389 #endif
12390
12391 #ifdef TARGET_NR_timer_delete
12392 case TARGET_NR_timer_delete:
12393 {
12394 /* args: timer_t timerid */
12395 target_timer_t timerid = get_timer_id(arg1);
12396
12397 if (timerid < 0) {
12398 ret = timerid;
12399 } else {
12400 timer_t htimer = g_posix_timers[timerid];
12401 ret = get_errno(timer_delete(htimer));
12402 g_posix_timers[timerid] = 0;
12403 }
12404 return ret;
12405 }
12406 #endif
12407
12408 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12409 case TARGET_NR_timerfd_create:
12410 return get_errno(timerfd_create(arg1,
12411 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12412 #endif
12413
12414 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12415 case TARGET_NR_timerfd_gettime:
12416 {
12417 struct itimerspec its_curr;
12418
12419 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12420
12421 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12422 return -TARGET_EFAULT;
12423 }
12424 }
12425 return ret;
12426 #endif
12427
12428 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12429 case TARGET_NR_timerfd_settime:
12430 {
12431 struct itimerspec its_new, its_old, *p_new;
12432
12433 if (arg3) {
12434 if (target_to_host_itimerspec(&its_new, arg3)) {
12435 return -TARGET_EFAULT;
12436 }
12437 p_new = &its_new;
12438 } else {
12439 p_new = NULL;
12440 }
12441
12442 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12443
12444 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12445 return -TARGET_EFAULT;
12446 }
12447 }
12448 return ret;
12449 #endif
12450
12451 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12452 case TARGET_NR_ioprio_get:
12453 return get_errno(ioprio_get(arg1, arg2));
12454 #endif
12455
12456 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12457 case TARGET_NR_ioprio_set:
12458 return get_errno(ioprio_set(arg1, arg2, arg3));
12459 #endif
12460
12461 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12462 case TARGET_NR_setns:
12463 return get_errno(setns(arg1, arg2));
12464 #endif
12465 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12466 case TARGET_NR_unshare:
12467 return get_errno(unshare(arg1));
12468 #endif
12469 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12470 case TARGET_NR_kcmp:
12471 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12472 #endif
12473 #ifdef TARGET_NR_swapcontext
12474 case TARGET_NR_swapcontext:
12475 /* PowerPC specific. */
12476 return do_swapcontext(cpu_env, arg1, arg2, arg3);
12477 #endif
12478 #ifdef TARGET_NR_memfd_create
12479 case TARGET_NR_memfd_create:
12480 p = lock_user_string(arg1);
12481 if (!p) {
12482 return -TARGET_EFAULT;
12483 }
12484 ret = get_errno(memfd_create(p, arg2));
12485 fd_trans_unregister(ret);
12486 unlock_user(p, arg1, 0);
12487 return ret;
12488 #endif
12489 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12490 case TARGET_NR_membarrier:
12491 return get_errno(membarrier(arg1, arg2));
12492 #endif
12493
12494 default:
12495 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12496 return -TARGET_ENOSYS;
12497 }
12498 return ret;
12499 }
12500
12501 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12502 abi_long arg2, abi_long arg3, abi_long arg4,
12503 abi_long arg5, abi_long arg6, abi_long arg7,
12504 abi_long arg8)
12505 {
12506 CPUState *cpu = env_cpu(cpu_env);
12507 abi_long ret;
12508
12509 #ifdef DEBUG_ERESTARTSYS
12510 /* Debug-only code for exercising the syscall-restart code paths
12511 * in the per-architecture cpu main loops: restart every syscall
12512 * the guest makes once before letting it through.
12513 */
12514 {
12515 static bool flag;
12516 flag = !flag;
12517 if (flag) {
12518 return -TARGET_ERESTARTSYS;
12519 }
12520 }
12521 #endif
12522
12523 record_syscall_start(cpu, num, arg1,
12524 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12525
12526 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12527 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12528 }
12529
12530 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12531 arg5, arg6, arg7, arg8);
12532
12533 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12534 print_syscall_ret(num, ret, arg1, arg2, arg3, arg4, arg5, arg6);
12535 }
12536
12537 record_syscall_return(cpu, num, ret);
12538 return ret;
12539 }