]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/syscall.c
linux-user/alpha: Fix sigsuspend for big-endian hosts
[mirror_qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143
144 #ifndef CLONE_IO
145 #define CLONE_IO 0x80000000 /* Clone io context */
146 #endif
147
148 /* We can't directly call the host clone syscall, because this will
149 * badly confuse libc (breaking mutexes, for example). So we must
150 * divide clone flags into:
151 * * flag combinations that look like pthread_create()
152 * * flag combinations that look like fork()
153 * * flags we can implement within QEMU itself
154 * * flags we can't support and will return an error for
155 */
156 /* For thread creation, all these flags must be present; for
157 * fork, none must be present.
158 */
159 #define CLONE_THREAD_FLAGS \
160 (CLONE_VM | CLONE_FS | CLONE_FILES | \
161 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
162
163 /* These flags are ignored:
164 * CLONE_DETACHED is now ignored by the kernel;
165 * CLONE_IO is just an optimisation hint to the I/O scheduler
166 */
167 #define CLONE_IGNORED_FLAGS \
168 (CLONE_DETACHED | CLONE_IO)
169
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS \
172 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
173 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
174
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS \
177 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
178 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
179
180 #define CLONE_INVALID_FORK_FLAGS \
181 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
182
183 #define CLONE_INVALID_THREAD_FLAGS \
184 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
185 CLONE_IGNORED_FLAGS))
186
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188 * have almost all been allocated. We cannot support any of
189 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191 * The checks against the invalid thread masks above will catch these.
192 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
193 */
194
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196 * once. This exercises the codepaths for restart.
197 */
198 //#define DEBUG_ERESTARTSYS
199
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
205
206 #undef _syscall0
207 #undef _syscall1
208 #undef _syscall2
209 #undef _syscall3
210 #undef _syscall4
211 #undef _syscall5
212 #undef _syscall6
213
214 #define _syscall0(type,name) \
215 static type name (void) \
216 { \
217 return syscall(__NR_##name); \
218 }
219
220 #define _syscall1(type,name,type1,arg1) \
221 static type name (type1 arg1) \
222 { \
223 return syscall(__NR_##name, arg1); \
224 }
225
226 #define _syscall2(type,name,type1,arg1,type2,arg2) \
227 static type name (type1 arg1,type2 arg2) \
228 { \
229 return syscall(__NR_##name, arg1, arg2); \
230 }
231
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
233 static type name (type1 arg1,type2 arg2,type3 arg3) \
234 { \
235 return syscall(__NR_##name, arg1, arg2, arg3); \
236 }
237
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
240 { \
241 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
242 }
243
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
245 type5,arg5) \
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
247 { \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
249 }
250
251
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
253 type5,arg5,type6,arg6) \
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
255 type6 arg6) \
256 { \
257 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
258 }
259
260
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
271 #endif
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
274 #endif
275 #define __NR_sys_statx __NR_statx
276
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
279 #endif
280
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
284 #endif
285
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
289 #endif
290
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid)
293
294 /* For the 64-bit guest on 32-bit host case we must emulate
295 * getdents using getdents64, because otherwise the host
296 * might hand us back more dirent records than we can fit
297 * into the guest buffer after structure format conversion.
298 * Otherwise we emulate getdents with getdents if the host has it.
299 */
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
302 #endif
303
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
306 #endif
307 #if (defined(TARGET_NR_getdents) && \
308 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
311 #endif
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
314 loff_t *, res, uint, wh);
315 #endif
316 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
317 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
318 siginfo_t *, uinfo)
319 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group,int,error_code)
322 #endif
323 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
324 _syscall1(int,set_tid_address,int *,tidptr)
325 #endif
326 #if defined(__NR_futex)
327 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
328 const struct timespec *,timeout,int *,uaddr2,int,val3)
329 #endif
330 #if defined(__NR_futex_time64)
331 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
332 const struct timespec *,timeout,int *,uaddr2,int,val3)
333 #endif
334 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
335 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
336 unsigned long *, user_mask_ptr);
337 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
338 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
339 unsigned long *, user_mask_ptr);
340 /* sched_attr is not defined in glibc */
341 struct sched_attr {
342 uint32_t size;
343 uint32_t sched_policy;
344 uint64_t sched_flags;
345 int32_t sched_nice;
346 uint32_t sched_priority;
347 uint64_t sched_runtime;
348 uint64_t sched_deadline;
349 uint64_t sched_period;
350 uint32_t sched_util_min;
351 uint32_t sched_util_max;
352 };
353 #define __NR_sys_sched_getattr __NR_sched_getattr
354 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
355 unsigned int, size, unsigned int, flags);
356 #define __NR_sys_sched_setattr __NR_sched_setattr
357 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
358 unsigned int, flags);
359 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
360 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
361 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
362 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
363 const struct sched_param *, param);
364 #define __NR_sys_sched_getparam __NR_sched_getparam
365 _syscall2(int, sys_sched_getparam, pid_t, pid,
366 struct sched_param *, param);
367 #define __NR_sys_sched_setparam __NR_sched_setparam
368 _syscall2(int, sys_sched_setparam, pid_t, pid,
369 const struct sched_param *, param);
370 #define __NR_sys_getcpu __NR_getcpu
371 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
372 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
373 void *, arg);
374 _syscall2(int, capget, struct __user_cap_header_struct *, header,
375 struct __user_cap_data_struct *, data);
376 _syscall2(int, capset, struct __user_cap_header_struct *, header,
377 struct __user_cap_data_struct *, data);
378 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
379 _syscall2(int, ioprio_get, int, which, int, who)
380 #endif
381 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
382 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
383 #endif
384 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
385 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
386 #endif
387
388 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
389 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
390 unsigned long, idx1, unsigned long, idx2)
391 #endif
392
393 /*
394 * It is assumed that struct statx is architecture independent.
395 */
396 #if defined(TARGET_NR_statx) && defined(__NR_statx)
397 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
398 unsigned int, mask, struct target_statx *, statxbuf)
399 #endif
400 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
401 _syscall2(int, membarrier, int, cmd, int, flags)
402 #endif
403
404 static const bitmask_transtbl fcntl_flags_tbl[] = {
405 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
406 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
407 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
408 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
409 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
410 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
411 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
412 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
413 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
414 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
415 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
416 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
417 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
418 #if defined(O_DIRECT)
419 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
420 #endif
421 #if defined(O_NOATIME)
422 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
423 #endif
424 #if defined(O_CLOEXEC)
425 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
426 #endif
427 #if defined(O_PATH)
428 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
429 #endif
430 #if defined(O_TMPFILE)
431 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
432 #endif
433 /* Don't terminate the list prematurely on 64-bit host+guest. */
434 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
435 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
436 #endif
437 { 0, 0, 0, 0 }
438 };
439
440 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
441
442 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
443 #if defined(__NR_utimensat)
444 #define __NR_sys_utimensat __NR_utimensat
445 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
446 const struct timespec *,tsp,int,flags)
447 #else
448 static int sys_utimensat(int dirfd, const char *pathname,
449 const struct timespec times[2], int flags)
450 {
451 errno = ENOSYS;
452 return -1;
453 }
454 #endif
455 #endif /* TARGET_NR_utimensat */
456
457 #ifdef TARGET_NR_renameat2
458 #if defined(__NR_renameat2)
459 #define __NR_sys_renameat2 __NR_renameat2
460 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
461 const char *, new, unsigned int, flags)
462 #else
463 static int sys_renameat2(int oldfd, const char *old,
464 int newfd, const char *new, int flags)
465 {
466 if (flags == 0) {
467 return renameat(oldfd, old, newfd, new);
468 }
469 errno = ENOSYS;
470 return -1;
471 }
472 #endif
473 #endif /* TARGET_NR_renameat2 */
474
475 #ifdef CONFIG_INOTIFY
476 #include <sys/inotify.h>
477 #else
478 /* Userspace can usually survive runtime without inotify */
479 #undef TARGET_NR_inotify_init
480 #undef TARGET_NR_inotify_init1
481 #undef TARGET_NR_inotify_add_watch
482 #undef TARGET_NR_inotify_rm_watch
483 #endif /* CONFIG_INOTIFY */
484
485 #if defined(TARGET_NR_prlimit64)
486 #ifndef __NR_prlimit64
487 # define __NR_prlimit64 -1
488 #endif
489 #define __NR_sys_prlimit64 __NR_prlimit64
490 /* The glibc rlimit structure may not be that used by the underlying syscall */
491 struct host_rlimit64 {
492 uint64_t rlim_cur;
493 uint64_t rlim_max;
494 };
495 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
496 const struct host_rlimit64 *, new_limit,
497 struct host_rlimit64 *, old_limit)
498 #endif
499
500
501 #if defined(TARGET_NR_timer_create)
502 /* Maximum of 32 active POSIX timers allowed at any one time. */
503 static timer_t g_posix_timers[32] = { 0, } ;
504
505 static inline int next_free_host_timer(void)
506 {
507 int k ;
508 /* FIXME: Does finding the next free slot require a lock? */
509 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
510 if (g_posix_timers[k] == 0) {
511 g_posix_timers[k] = (timer_t) 1;
512 return k;
513 }
514 }
515 return -1;
516 }
517 #endif
518
519 static inline int host_to_target_errno(int host_errno)
520 {
521 switch (host_errno) {
522 #define E(X) case X: return TARGET_##X;
523 #include "errnos.c.inc"
524 #undef E
525 default:
526 return host_errno;
527 }
528 }
529
530 static inline int target_to_host_errno(int target_errno)
531 {
532 switch (target_errno) {
533 #define E(X) case TARGET_##X: return X;
534 #include "errnos.c.inc"
535 #undef E
536 default:
537 return target_errno;
538 }
539 }
540
541 static inline abi_long get_errno(abi_long ret)
542 {
543 if (ret == -1)
544 return -host_to_target_errno(errno);
545 else
546 return ret;
547 }
548
549 const char *target_strerror(int err)
550 {
551 if (err == QEMU_ERESTARTSYS) {
552 return "To be restarted";
553 }
554 if (err == QEMU_ESIGRETURN) {
555 return "Successful exit from sigreturn";
556 }
557
558 return strerror(target_to_host_errno(err));
559 }
560
561 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
562 {
563 int i;
564 uint8_t b;
565 if (usize <= ksize) {
566 return 1;
567 }
568 for (i = ksize; i < usize; i++) {
569 if (get_user_u8(b, addr + i)) {
570 return -TARGET_EFAULT;
571 }
572 if (b != 0) {
573 return 0;
574 }
575 }
576 return 1;
577 }
578
579 #define safe_syscall0(type, name) \
580 static type safe_##name(void) \
581 { \
582 return safe_syscall(__NR_##name); \
583 }
584
585 #define safe_syscall1(type, name, type1, arg1) \
586 static type safe_##name(type1 arg1) \
587 { \
588 return safe_syscall(__NR_##name, arg1); \
589 }
590
591 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
592 static type safe_##name(type1 arg1, type2 arg2) \
593 { \
594 return safe_syscall(__NR_##name, arg1, arg2); \
595 }
596
597 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
598 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
599 { \
600 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
601 }
602
603 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
604 type4, arg4) \
605 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
606 { \
607 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
608 }
609
610 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
611 type4, arg4, type5, arg5) \
612 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
613 type5 arg5) \
614 { \
615 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
616 }
617
618 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
619 type4, arg4, type5, arg5, type6, arg6) \
620 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
621 type5 arg5, type6 arg6) \
622 { \
623 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
624 }
625
626 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
627 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
628 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
629 int, flags, mode_t, mode)
630 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
631 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
632 struct rusage *, rusage)
633 #endif
634 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
635 int, options, struct rusage *, rusage)
636 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
637 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
638 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
639 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
640 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
641 #endif
642 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
643 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
644 struct timespec *, tsp, const sigset_t *, sigmask,
645 size_t, sigsetsize)
646 #endif
647 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
648 int, maxevents, int, timeout, const sigset_t *, sigmask,
649 size_t, sigsetsize)
650 #if defined(__NR_futex)
651 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
652 const struct timespec *,timeout,int *,uaddr2,int,val3)
653 #endif
654 #if defined(__NR_futex_time64)
655 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
656 const struct timespec *,timeout,int *,uaddr2,int,val3)
657 #endif
658 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
659 safe_syscall2(int, kill, pid_t, pid, int, sig)
660 safe_syscall2(int, tkill, int, tid, int, sig)
661 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
662 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
663 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
664 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
665 unsigned long, pos_l, unsigned long, pos_h)
666 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
667 unsigned long, pos_l, unsigned long, pos_h)
668 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
669 socklen_t, addrlen)
670 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
671 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
672 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
673 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
674 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
675 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
676 safe_syscall2(int, flock, int, fd, int, operation)
677 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
678 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
679 const struct timespec *, uts, size_t, sigsetsize)
680 #endif
681 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
682 int, flags)
683 #if defined(TARGET_NR_nanosleep)
684 safe_syscall2(int, nanosleep, const struct timespec *, req,
685 struct timespec *, rem)
686 #endif
687 #if defined(TARGET_NR_clock_nanosleep) || \
688 defined(TARGET_NR_clock_nanosleep_time64)
689 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
690 const struct timespec *, req, struct timespec *, rem)
691 #endif
692 #ifdef __NR_ipc
693 #ifdef __s390x__
694 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
695 void *, ptr)
696 #else
697 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
698 void *, ptr, long, fifth)
699 #endif
700 #endif
701 #ifdef __NR_msgsnd
702 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
703 int, flags)
704 #endif
705 #ifdef __NR_msgrcv
706 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
707 long, msgtype, int, flags)
708 #endif
709 #ifdef __NR_semtimedop
710 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
711 unsigned, nsops, const struct timespec *, timeout)
712 #endif
713 #if defined(TARGET_NR_mq_timedsend) || \
714 defined(TARGET_NR_mq_timedsend_time64)
715 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
716 size_t, len, unsigned, prio, const struct timespec *, timeout)
717 #endif
718 #if defined(TARGET_NR_mq_timedreceive) || \
719 defined(TARGET_NR_mq_timedreceive_time64)
720 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
721 size_t, len, unsigned *, prio, const struct timespec *, timeout)
722 #endif
723 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
724 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
725 int, outfd, loff_t *, poutoff, size_t, length,
726 unsigned int, flags)
727 #endif
728
729 /* We do ioctl like this rather than via safe_syscall3 to preserve the
730 * "third argument might be integer or pointer or not present" behaviour of
731 * the libc function.
732 */
733 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
734 /* Similarly for fcntl. Note that callers must always:
735 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
736 * use the flock64 struct rather than unsuffixed flock
737 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
738 */
739 #ifdef __NR_fcntl64
740 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
741 #else
742 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
743 #endif
744
745 static inline int host_to_target_sock_type(int host_type)
746 {
747 int target_type;
748
749 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
750 case SOCK_DGRAM:
751 target_type = TARGET_SOCK_DGRAM;
752 break;
753 case SOCK_STREAM:
754 target_type = TARGET_SOCK_STREAM;
755 break;
756 default:
757 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
758 break;
759 }
760
761 #if defined(SOCK_CLOEXEC)
762 if (host_type & SOCK_CLOEXEC) {
763 target_type |= TARGET_SOCK_CLOEXEC;
764 }
765 #endif
766
767 #if defined(SOCK_NONBLOCK)
768 if (host_type & SOCK_NONBLOCK) {
769 target_type |= TARGET_SOCK_NONBLOCK;
770 }
771 #endif
772
773 return target_type;
774 }
775
776 static abi_ulong target_brk;
777 static abi_ulong target_original_brk;
778 static abi_ulong brk_page;
779
780 void target_set_brk(abi_ulong new_brk)
781 {
782 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
783 brk_page = HOST_PAGE_ALIGN(target_brk);
784 }
785
786 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
787 #define DEBUGF_BRK(message, args...)
788
789 /* do_brk() must return target values and target errnos. */
790 abi_long do_brk(abi_ulong new_brk)
791 {
792 abi_long mapped_addr;
793 abi_ulong new_alloc_size;
794
795 /* brk pointers are always untagged */
796
797 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
798
799 if (!new_brk) {
800 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
801 return target_brk;
802 }
803 if (new_brk < target_original_brk) {
804 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
805 target_brk);
806 return target_brk;
807 }
808
809 /* If the new brk is less than the highest page reserved to the
810 * target heap allocation, set it and we're almost done... */
811 if (new_brk <= brk_page) {
812 /* Heap contents are initialized to zero, as for anonymous
813 * mapped pages. */
814 if (new_brk > target_brk) {
815 memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
816 }
817 target_brk = new_brk;
818 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
819 return target_brk;
820 }
821
822 /* We need to allocate more memory after the brk... Note that
823 * we don't use MAP_FIXED because that will map over the top of
824 * any existing mapping (like the one with the host libc or qemu
825 * itself); instead we treat "mapped but at wrong address" as
826 * a failure and unmap again.
827 */
828 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
829 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
830 PROT_READ|PROT_WRITE,
831 MAP_ANON|MAP_PRIVATE, 0, 0));
832
833 if (mapped_addr == brk_page) {
834 /* Heap contents are initialized to zero, as for anonymous
835 * mapped pages. Technically the new pages are already
836 * initialized to zero since they *are* anonymous mapped
837 * pages, however we have to take care with the contents that
838 * come from the remaining part of the previous page: it may
839 * contains garbage data due to a previous heap usage (grown
840 * then shrunken). */
841 memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
842
843 target_brk = new_brk;
844 brk_page = HOST_PAGE_ALIGN(target_brk);
845 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
846 target_brk);
847 return target_brk;
848 } else if (mapped_addr != -1) {
849 /* Mapped but at wrong address, meaning there wasn't actually
850 * enough space for this brk.
851 */
852 target_munmap(mapped_addr, new_alloc_size);
853 mapped_addr = -1;
854 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
855 }
856 else {
857 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
858 }
859
860 #if defined(TARGET_ALPHA)
861 /* We (partially) emulate OSF/1 on Alpha, which requires we
862 return a proper errno, not an unchanged brk value. */
863 return -TARGET_ENOMEM;
864 #endif
865 /* For everything else, return the previous break. */
866 return target_brk;
867 }
868
869 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
870 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
871 static inline abi_long copy_from_user_fdset(fd_set *fds,
872 abi_ulong target_fds_addr,
873 int n)
874 {
875 int i, nw, j, k;
876 abi_ulong b, *target_fds;
877
878 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
879 if (!(target_fds = lock_user(VERIFY_READ,
880 target_fds_addr,
881 sizeof(abi_ulong) * nw,
882 1)))
883 return -TARGET_EFAULT;
884
885 FD_ZERO(fds);
886 k = 0;
887 for (i = 0; i < nw; i++) {
888 /* grab the abi_ulong */
889 __get_user(b, &target_fds[i]);
890 for (j = 0; j < TARGET_ABI_BITS; j++) {
891 /* check the bit inside the abi_ulong */
892 if ((b >> j) & 1)
893 FD_SET(k, fds);
894 k++;
895 }
896 }
897
898 unlock_user(target_fds, target_fds_addr, 0);
899
900 return 0;
901 }
902
903 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
904 abi_ulong target_fds_addr,
905 int n)
906 {
907 if (target_fds_addr) {
908 if (copy_from_user_fdset(fds, target_fds_addr, n))
909 return -TARGET_EFAULT;
910 *fds_ptr = fds;
911 } else {
912 *fds_ptr = NULL;
913 }
914 return 0;
915 }
916
917 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
918 const fd_set *fds,
919 int n)
920 {
921 int i, nw, j, k;
922 abi_long v;
923 abi_ulong *target_fds;
924
925 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
926 if (!(target_fds = lock_user(VERIFY_WRITE,
927 target_fds_addr,
928 sizeof(abi_ulong) * nw,
929 0)))
930 return -TARGET_EFAULT;
931
932 k = 0;
933 for (i = 0; i < nw; i++) {
934 v = 0;
935 for (j = 0; j < TARGET_ABI_BITS; j++) {
936 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
937 k++;
938 }
939 __put_user(v, &target_fds[i]);
940 }
941
942 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
943
944 return 0;
945 }
946 #endif
947
948 #if defined(__alpha__)
949 #define HOST_HZ 1024
950 #else
951 #define HOST_HZ 100
952 #endif
953
954 static inline abi_long host_to_target_clock_t(long ticks)
955 {
956 #if HOST_HZ == TARGET_HZ
957 return ticks;
958 #else
959 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
960 #endif
961 }
962
963 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
964 const struct rusage *rusage)
965 {
966 struct target_rusage *target_rusage;
967
968 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
969 return -TARGET_EFAULT;
970 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
971 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
972 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
973 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
974 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
975 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
976 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
977 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
978 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
979 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
980 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
981 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
982 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
983 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
984 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
985 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
986 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
987 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
988 unlock_user_struct(target_rusage, target_addr, 1);
989
990 return 0;
991 }
992
993 #ifdef TARGET_NR_setrlimit
994 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
995 {
996 abi_ulong target_rlim_swap;
997 rlim_t result;
998
999 target_rlim_swap = tswapal(target_rlim);
1000 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1001 return RLIM_INFINITY;
1002
1003 result = target_rlim_swap;
1004 if (target_rlim_swap != (rlim_t)result)
1005 return RLIM_INFINITY;
1006
1007 return result;
1008 }
1009 #endif
1010
1011 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1012 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1013 {
1014 abi_ulong target_rlim_swap;
1015 abi_ulong result;
1016
1017 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1018 target_rlim_swap = TARGET_RLIM_INFINITY;
1019 else
1020 target_rlim_swap = rlim;
1021 result = tswapal(target_rlim_swap);
1022
1023 return result;
1024 }
1025 #endif
1026
1027 static inline int target_to_host_resource(int code)
1028 {
1029 switch (code) {
1030 case TARGET_RLIMIT_AS:
1031 return RLIMIT_AS;
1032 case TARGET_RLIMIT_CORE:
1033 return RLIMIT_CORE;
1034 case TARGET_RLIMIT_CPU:
1035 return RLIMIT_CPU;
1036 case TARGET_RLIMIT_DATA:
1037 return RLIMIT_DATA;
1038 case TARGET_RLIMIT_FSIZE:
1039 return RLIMIT_FSIZE;
1040 case TARGET_RLIMIT_LOCKS:
1041 return RLIMIT_LOCKS;
1042 case TARGET_RLIMIT_MEMLOCK:
1043 return RLIMIT_MEMLOCK;
1044 case TARGET_RLIMIT_MSGQUEUE:
1045 return RLIMIT_MSGQUEUE;
1046 case TARGET_RLIMIT_NICE:
1047 return RLIMIT_NICE;
1048 case TARGET_RLIMIT_NOFILE:
1049 return RLIMIT_NOFILE;
1050 case TARGET_RLIMIT_NPROC:
1051 return RLIMIT_NPROC;
1052 case TARGET_RLIMIT_RSS:
1053 return RLIMIT_RSS;
1054 case TARGET_RLIMIT_RTPRIO:
1055 return RLIMIT_RTPRIO;
1056 case TARGET_RLIMIT_RTTIME:
1057 return RLIMIT_RTTIME;
1058 case TARGET_RLIMIT_SIGPENDING:
1059 return RLIMIT_SIGPENDING;
1060 case TARGET_RLIMIT_STACK:
1061 return RLIMIT_STACK;
1062 default:
1063 return code;
1064 }
1065 }
1066
1067 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1068 abi_ulong target_tv_addr)
1069 {
1070 struct target_timeval *target_tv;
1071
1072 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1073 return -TARGET_EFAULT;
1074 }
1075
1076 __get_user(tv->tv_sec, &target_tv->tv_sec);
1077 __get_user(tv->tv_usec, &target_tv->tv_usec);
1078
1079 unlock_user_struct(target_tv, target_tv_addr, 0);
1080
1081 return 0;
1082 }
1083
1084 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1085 const struct timeval *tv)
1086 {
1087 struct target_timeval *target_tv;
1088
1089 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1090 return -TARGET_EFAULT;
1091 }
1092
1093 __put_user(tv->tv_sec, &target_tv->tv_sec);
1094 __put_user(tv->tv_usec, &target_tv->tv_usec);
1095
1096 unlock_user_struct(target_tv, target_tv_addr, 1);
1097
1098 return 0;
1099 }
1100
1101 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1102 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1103 abi_ulong target_tv_addr)
1104 {
1105 struct target__kernel_sock_timeval *target_tv;
1106
1107 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1108 return -TARGET_EFAULT;
1109 }
1110
1111 __get_user(tv->tv_sec, &target_tv->tv_sec);
1112 __get_user(tv->tv_usec, &target_tv->tv_usec);
1113
1114 unlock_user_struct(target_tv, target_tv_addr, 0);
1115
1116 return 0;
1117 }
1118 #endif
1119
1120 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1121 const struct timeval *tv)
1122 {
1123 struct target__kernel_sock_timeval *target_tv;
1124
1125 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1126 return -TARGET_EFAULT;
1127 }
1128
1129 __put_user(tv->tv_sec, &target_tv->tv_sec);
1130 __put_user(tv->tv_usec, &target_tv->tv_usec);
1131
1132 unlock_user_struct(target_tv, target_tv_addr, 1);
1133
1134 return 0;
1135 }
1136
1137 #if defined(TARGET_NR_futex) || \
1138 defined(TARGET_NR_rt_sigtimedwait) || \
1139 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1140 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1141 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1142 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1143 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1144 defined(TARGET_NR_timer_settime) || \
1145 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1146 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1147 abi_ulong target_addr)
1148 {
1149 struct target_timespec *target_ts;
1150
1151 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1152 return -TARGET_EFAULT;
1153 }
1154 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1155 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1156 unlock_user_struct(target_ts, target_addr, 0);
1157 return 0;
1158 }
1159 #endif
1160
1161 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1162 defined(TARGET_NR_timer_settime64) || \
1163 defined(TARGET_NR_mq_timedsend_time64) || \
1164 defined(TARGET_NR_mq_timedreceive_time64) || \
1165 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1166 defined(TARGET_NR_clock_nanosleep_time64) || \
1167 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1168 defined(TARGET_NR_utimensat) || \
1169 defined(TARGET_NR_utimensat_time64) || \
1170 defined(TARGET_NR_semtimedop_time64) || \
1171 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1172 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1173 abi_ulong target_addr)
1174 {
1175 struct target__kernel_timespec *target_ts;
1176
1177 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1178 return -TARGET_EFAULT;
1179 }
1180 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1181 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1182 /* in 32bit mode, this drops the padding */
1183 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1184 unlock_user_struct(target_ts, target_addr, 0);
1185 return 0;
1186 }
1187 #endif
1188
1189 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1190 struct timespec *host_ts)
1191 {
1192 struct target_timespec *target_ts;
1193
1194 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1195 return -TARGET_EFAULT;
1196 }
1197 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1198 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1199 unlock_user_struct(target_ts, target_addr, 1);
1200 return 0;
1201 }
1202
1203 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1204 struct timespec *host_ts)
1205 {
1206 struct target__kernel_timespec *target_ts;
1207
1208 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1209 return -TARGET_EFAULT;
1210 }
1211 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1212 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1213 unlock_user_struct(target_ts, target_addr, 1);
1214 return 0;
1215 }
1216
1217 #if defined(TARGET_NR_gettimeofday)
1218 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1219 struct timezone *tz)
1220 {
1221 struct target_timezone *target_tz;
1222
1223 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1224 return -TARGET_EFAULT;
1225 }
1226
1227 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1228 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1229
1230 unlock_user_struct(target_tz, target_tz_addr, 1);
1231
1232 return 0;
1233 }
1234 #endif
1235
1236 #if defined(TARGET_NR_settimeofday)
1237 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1238 abi_ulong target_tz_addr)
1239 {
1240 struct target_timezone *target_tz;
1241
1242 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1243 return -TARGET_EFAULT;
1244 }
1245
1246 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1247 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1248
1249 unlock_user_struct(target_tz, target_tz_addr, 0);
1250
1251 return 0;
1252 }
1253 #endif
1254
1255 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1256 #include <mqueue.h>
1257
1258 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1259 abi_ulong target_mq_attr_addr)
1260 {
1261 struct target_mq_attr *target_mq_attr;
1262
1263 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1264 target_mq_attr_addr, 1))
1265 return -TARGET_EFAULT;
1266
1267 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1268 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1269 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1270 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1271
1272 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1273
1274 return 0;
1275 }
1276
1277 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1278 const struct mq_attr *attr)
1279 {
1280 struct target_mq_attr *target_mq_attr;
1281
1282 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1283 target_mq_attr_addr, 0))
1284 return -TARGET_EFAULT;
1285
1286 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1287 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1288 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1289 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1290
1291 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1292
1293 return 0;
1294 }
1295 #endif
1296
1297 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1298 /* do_select() must return target values and target errnos. */
1299 static abi_long do_select(int n,
1300 abi_ulong rfd_addr, abi_ulong wfd_addr,
1301 abi_ulong efd_addr, abi_ulong target_tv_addr)
1302 {
1303 fd_set rfds, wfds, efds;
1304 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1305 struct timeval tv;
1306 struct timespec ts, *ts_ptr;
1307 abi_long ret;
1308
1309 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1310 if (ret) {
1311 return ret;
1312 }
1313 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1314 if (ret) {
1315 return ret;
1316 }
1317 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1318 if (ret) {
1319 return ret;
1320 }
1321
1322 if (target_tv_addr) {
1323 if (copy_from_user_timeval(&tv, target_tv_addr))
1324 return -TARGET_EFAULT;
1325 ts.tv_sec = tv.tv_sec;
1326 ts.tv_nsec = tv.tv_usec * 1000;
1327 ts_ptr = &ts;
1328 } else {
1329 ts_ptr = NULL;
1330 }
1331
1332 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1333 ts_ptr, NULL));
1334
1335 if (!is_error(ret)) {
1336 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1337 return -TARGET_EFAULT;
1338 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1339 return -TARGET_EFAULT;
1340 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1341 return -TARGET_EFAULT;
1342
1343 if (target_tv_addr) {
1344 tv.tv_sec = ts.tv_sec;
1345 tv.tv_usec = ts.tv_nsec / 1000;
1346 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1347 return -TARGET_EFAULT;
1348 }
1349 }
1350 }
1351
1352 return ret;
1353 }
1354
1355 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1356 static abi_long do_old_select(abi_ulong arg1)
1357 {
1358 struct target_sel_arg_struct *sel;
1359 abi_ulong inp, outp, exp, tvp;
1360 long nsel;
1361
1362 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1363 return -TARGET_EFAULT;
1364 }
1365
1366 nsel = tswapal(sel->n);
1367 inp = tswapal(sel->inp);
1368 outp = tswapal(sel->outp);
1369 exp = tswapal(sel->exp);
1370 tvp = tswapal(sel->tvp);
1371
1372 unlock_user_struct(sel, arg1, 0);
1373
1374 return do_select(nsel, inp, outp, exp, tvp);
1375 }
1376 #endif
1377 #endif
1378
1379 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1380 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1381 abi_long arg4, abi_long arg5, abi_long arg6,
1382 bool time64)
1383 {
1384 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1385 fd_set rfds, wfds, efds;
1386 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1387 struct timespec ts, *ts_ptr;
1388 abi_long ret;
1389
1390 /*
1391 * The 6th arg is actually two args smashed together,
1392 * so we cannot use the C library.
1393 */
1394 sigset_t set;
1395 struct {
1396 sigset_t *set;
1397 size_t size;
1398 } sig, *sig_ptr;
1399
1400 abi_ulong arg_sigset, arg_sigsize, *arg7;
1401 target_sigset_t *target_sigset;
1402
1403 n = arg1;
1404 rfd_addr = arg2;
1405 wfd_addr = arg3;
1406 efd_addr = arg4;
1407 ts_addr = arg5;
1408
1409 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1410 if (ret) {
1411 return ret;
1412 }
1413 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1414 if (ret) {
1415 return ret;
1416 }
1417 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1418 if (ret) {
1419 return ret;
1420 }
1421
1422 /*
1423 * This takes a timespec, and not a timeval, so we cannot
1424 * use the do_select() helper ...
1425 */
1426 if (ts_addr) {
1427 if (time64) {
1428 if (target_to_host_timespec64(&ts, ts_addr)) {
1429 return -TARGET_EFAULT;
1430 }
1431 } else {
1432 if (target_to_host_timespec(&ts, ts_addr)) {
1433 return -TARGET_EFAULT;
1434 }
1435 }
1436 ts_ptr = &ts;
1437 } else {
1438 ts_ptr = NULL;
1439 }
1440
1441 /* Extract the two packed args for the sigset */
1442 if (arg6) {
1443 sig_ptr = &sig;
1444 sig.size = SIGSET_T_SIZE;
1445
1446 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1447 if (!arg7) {
1448 return -TARGET_EFAULT;
1449 }
1450 arg_sigset = tswapal(arg7[0]);
1451 arg_sigsize = tswapal(arg7[1]);
1452 unlock_user(arg7, arg6, 0);
1453
1454 if (arg_sigset) {
1455 sig.set = &set;
1456 if (arg_sigsize != sizeof(*target_sigset)) {
1457 /* Like the kernel, we enforce correct size sigsets */
1458 return -TARGET_EINVAL;
1459 }
1460 target_sigset = lock_user(VERIFY_READ, arg_sigset,
1461 sizeof(*target_sigset), 1);
1462 if (!target_sigset) {
1463 return -TARGET_EFAULT;
1464 }
1465 target_to_host_sigset(&set, target_sigset);
1466 unlock_user(target_sigset, arg_sigset, 0);
1467 } else {
1468 sig.set = NULL;
1469 }
1470 } else {
1471 sig_ptr = NULL;
1472 }
1473
1474 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1475 ts_ptr, sig_ptr));
1476
1477 if (!is_error(ret)) {
1478 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1479 return -TARGET_EFAULT;
1480 }
1481 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1482 return -TARGET_EFAULT;
1483 }
1484 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1485 return -TARGET_EFAULT;
1486 }
1487 if (time64) {
1488 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1489 return -TARGET_EFAULT;
1490 }
1491 } else {
1492 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1493 return -TARGET_EFAULT;
1494 }
1495 }
1496 }
1497 return ret;
1498 }
1499 #endif
1500
1501 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1502 defined(TARGET_NR_ppoll_time64)
1503 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1504 abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1505 {
1506 struct target_pollfd *target_pfd;
1507 unsigned int nfds = arg2;
1508 struct pollfd *pfd;
1509 unsigned int i;
1510 abi_long ret;
1511
1512 pfd = NULL;
1513 target_pfd = NULL;
1514 if (nfds) {
1515 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1516 return -TARGET_EINVAL;
1517 }
1518 target_pfd = lock_user(VERIFY_WRITE, arg1,
1519 sizeof(struct target_pollfd) * nfds, 1);
1520 if (!target_pfd) {
1521 return -TARGET_EFAULT;
1522 }
1523
1524 pfd = alloca(sizeof(struct pollfd) * nfds);
1525 for (i = 0; i < nfds; i++) {
1526 pfd[i].fd = tswap32(target_pfd[i].fd);
1527 pfd[i].events = tswap16(target_pfd[i].events);
1528 }
1529 }
1530 if (ppoll) {
1531 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1532 target_sigset_t *target_set;
1533 sigset_t _set, *set = &_set;
1534
1535 if (arg3) {
1536 if (time64) {
1537 if (target_to_host_timespec64(timeout_ts, arg3)) {
1538 unlock_user(target_pfd, arg1, 0);
1539 return -TARGET_EFAULT;
1540 }
1541 } else {
1542 if (target_to_host_timespec(timeout_ts, arg3)) {
1543 unlock_user(target_pfd, arg1, 0);
1544 return -TARGET_EFAULT;
1545 }
1546 }
1547 } else {
1548 timeout_ts = NULL;
1549 }
1550
1551 if (arg4) {
1552 if (arg5 != sizeof(target_sigset_t)) {
1553 unlock_user(target_pfd, arg1, 0);
1554 return -TARGET_EINVAL;
1555 }
1556
1557 target_set = lock_user(VERIFY_READ, arg4,
1558 sizeof(target_sigset_t), 1);
1559 if (!target_set) {
1560 unlock_user(target_pfd, arg1, 0);
1561 return -TARGET_EFAULT;
1562 }
1563 target_to_host_sigset(set, target_set);
1564 } else {
1565 set = NULL;
1566 }
1567
1568 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1569 set, SIGSET_T_SIZE));
1570
1571 if (!is_error(ret) && arg3) {
1572 if (time64) {
1573 if (host_to_target_timespec64(arg3, timeout_ts)) {
1574 return -TARGET_EFAULT;
1575 }
1576 } else {
1577 if (host_to_target_timespec(arg3, timeout_ts)) {
1578 return -TARGET_EFAULT;
1579 }
1580 }
1581 }
1582 if (arg4) {
1583 unlock_user(target_set, arg4, 0);
1584 }
1585 } else {
1586 struct timespec ts, *pts;
1587
1588 if (arg3 >= 0) {
1589 /* Convert ms to secs, ns */
1590 ts.tv_sec = arg3 / 1000;
1591 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1592 pts = &ts;
1593 } else {
1594 /* -ve poll() timeout means "infinite" */
1595 pts = NULL;
1596 }
1597 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1598 }
1599
1600 if (!is_error(ret)) {
1601 for (i = 0; i < nfds; i++) {
1602 target_pfd[i].revents = tswap16(pfd[i].revents);
1603 }
1604 }
1605 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1606 return ret;
1607 }
1608 #endif
1609
1610 static abi_long do_pipe2(int host_pipe[], int flags)
1611 {
1612 #ifdef CONFIG_PIPE2
1613 return pipe2(host_pipe, flags);
1614 #else
1615 return -ENOSYS;
1616 #endif
1617 }
1618
1619 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1620 int flags, int is_pipe2)
1621 {
1622 int host_pipe[2];
1623 abi_long ret;
1624 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1625
1626 if (is_error(ret))
1627 return get_errno(ret);
1628
1629 /* Several targets have special calling conventions for the original
1630 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1631 if (!is_pipe2) {
1632 #if defined(TARGET_ALPHA)
1633 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1634 return host_pipe[0];
1635 #elif defined(TARGET_MIPS)
1636 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1637 return host_pipe[0];
1638 #elif defined(TARGET_SH4)
1639 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1640 return host_pipe[0];
1641 #elif defined(TARGET_SPARC)
1642 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1643 return host_pipe[0];
1644 #endif
1645 }
1646
1647 if (put_user_s32(host_pipe[0], pipedes)
1648 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1649 return -TARGET_EFAULT;
1650 return get_errno(ret);
1651 }
1652
1653 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1654 abi_ulong target_addr,
1655 socklen_t len)
1656 {
1657 struct target_ip_mreqn *target_smreqn;
1658
1659 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1660 if (!target_smreqn)
1661 return -TARGET_EFAULT;
1662 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1663 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1664 if (len == sizeof(struct target_ip_mreqn))
1665 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1666 unlock_user(target_smreqn, target_addr, 0);
1667
1668 return 0;
1669 }
1670
1671 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1672 abi_ulong target_addr,
1673 socklen_t len)
1674 {
1675 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1676 sa_family_t sa_family;
1677 struct target_sockaddr *target_saddr;
1678
1679 if (fd_trans_target_to_host_addr(fd)) {
1680 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1681 }
1682
1683 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1684 if (!target_saddr)
1685 return -TARGET_EFAULT;
1686
1687 sa_family = tswap16(target_saddr->sa_family);
1688
1689 /* Oops. The caller might send a incomplete sun_path; sun_path
1690 * must be terminated by \0 (see the manual page), but
1691 * unfortunately it is quite common to specify sockaddr_un
1692 * length as "strlen(x->sun_path)" while it should be
1693 * "strlen(...) + 1". We'll fix that here if needed.
1694 * Linux kernel has a similar feature.
1695 */
1696
1697 if (sa_family == AF_UNIX) {
1698 if (len < unix_maxlen && len > 0) {
1699 char *cp = (char*)target_saddr;
1700
1701 if ( cp[len-1] && !cp[len] )
1702 len++;
1703 }
1704 if (len > unix_maxlen)
1705 len = unix_maxlen;
1706 }
1707
1708 memcpy(addr, target_saddr, len);
1709 addr->sa_family = sa_family;
1710 if (sa_family == AF_NETLINK) {
1711 struct sockaddr_nl *nladdr;
1712
1713 nladdr = (struct sockaddr_nl *)addr;
1714 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1715 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1716 } else if (sa_family == AF_PACKET) {
1717 struct target_sockaddr_ll *lladdr;
1718
1719 lladdr = (struct target_sockaddr_ll *)addr;
1720 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1721 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1722 }
1723 unlock_user(target_saddr, target_addr, 0);
1724
1725 return 0;
1726 }
1727
1728 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1729 struct sockaddr *addr,
1730 socklen_t len)
1731 {
1732 struct target_sockaddr *target_saddr;
1733
1734 if (len == 0) {
1735 return 0;
1736 }
1737 assert(addr);
1738
1739 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1740 if (!target_saddr)
1741 return -TARGET_EFAULT;
1742 memcpy(target_saddr, addr, len);
1743 if (len >= offsetof(struct target_sockaddr, sa_family) +
1744 sizeof(target_saddr->sa_family)) {
1745 target_saddr->sa_family = tswap16(addr->sa_family);
1746 }
1747 if (addr->sa_family == AF_NETLINK &&
1748 len >= sizeof(struct target_sockaddr_nl)) {
1749 struct target_sockaddr_nl *target_nl =
1750 (struct target_sockaddr_nl *)target_saddr;
1751 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1752 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1753 } else if (addr->sa_family == AF_PACKET) {
1754 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1755 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1756 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1757 } else if (addr->sa_family == AF_INET6 &&
1758 len >= sizeof(struct target_sockaddr_in6)) {
1759 struct target_sockaddr_in6 *target_in6 =
1760 (struct target_sockaddr_in6 *)target_saddr;
1761 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1762 }
1763 unlock_user(target_saddr, target_addr, len);
1764
1765 return 0;
1766 }
1767
1768 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1769 struct target_msghdr *target_msgh)
1770 {
1771 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1772 abi_long msg_controllen;
1773 abi_ulong target_cmsg_addr;
1774 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1775 socklen_t space = 0;
1776
1777 msg_controllen = tswapal(target_msgh->msg_controllen);
1778 if (msg_controllen < sizeof (struct target_cmsghdr))
1779 goto the_end;
1780 target_cmsg_addr = tswapal(target_msgh->msg_control);
1781 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1782 target_cmsg_start = target_cmsg;
1783 if (!target_cmsg)
1784 return -TARGET_EFAULT;
1785
1786 while (cmsg && target_cmsg) {
1787 void *data = CMSG_DATA(cmsg);
1788 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1789
1790 int len = tswapal(target_cmsg->cmsg_len)
1791 - sizeof(struct target_cmsghdr);
1792
1793 space += CMSG_SPACE(len);
1794 if (space > msgh->msg_controllen) {
1795 space -= CMSG_SPACE(len);
1796 /* This is a QEMU bug, since we allocated the payload
1797 * area ourselves (unlike overflow in host-to-target
1798 * conversion, which is just the guest giving us a buffer
1799 * that's too small). It can't happen for the payload types
1800 * we currently support; if it becomes an issue in future
1801 * we would need to improve our allocation strategy to
1802 * something more intelligent than "twice the size of the
1803 * target buffer we're reading from".
1804 */
1805 qemu_log_mask(LOG_UNIMP,
1806 ("Unsupported ancillary data %d/%d: "
1807 "unhandled msg size\n"),
1808 tswap32(target_cmsg->cmsg_level),
1809 tswap32(target_cmsg->cmsg_type));
1810 break;
1811 }
1812
1813 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1814 cmsg->cmsg_level = SOL_SOCKET;
1815 } else {
1816 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1817 }
1818 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1819 cmsg->cmsg_len = CMSG_LEN(len);
1820
1821 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1822 int *fd = (int *)data;
1823 int *target_fd = (int *)target_data;
1824 int i, numfds = len / sizeof(int);
1825
1826 for (i = 0; i < numfds; i++) {
1827 __get_user(fd[i], target_fd + i);
1828 }
1829 } else if (cmsg->cmsg_level == SOL_SOCKET
1830 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1831 struct ucred *cred = (struct ucred *)data;
1832 struct target_ucred *target_cred =
1833 (struct target_ucred *)target_data;
1834
1835 __get_user(cred->pid, &target_cred->pid);
1836 __get_user(cred->uid, &target_cred->uid);
1837 __get_user(cred->gid, &target_cred->gid);
1838 } else {
1839 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1840 cmsg->cmsg_level, cmsg->cmsg_type);
1841 memcpy(data, target_data, len);
1842 }
1843
1844 cmsg = CMSG_NXTHDR(msgh, cmsg);
1845 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1846 target_cmsg_start);
1847 }
1848 unlock_user(target_cmsg, target_cmsg_addr, 0);
1849 the_end:
1850 msgh->msg_controllen = space;
1851 return 0;
1852 }
1853
1854 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1855 struct msghdr *msgh)
1856 {
1857 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1858 abi_long msg_controllen;
1859 abi_ulong target_cmsg_addr;
1860 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1861 socklen_t space = 0;
1862
1863 msg_controllen = tswapal(target_msgh->msg_controllen);
1864 if (msg_controllen < sizeof (struct target_cmsghdr))
1865 goto the_end;
1866 target_cmsg_addr = tswapal(target_msgh->msg_control);
1867 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1868 target_cmsg_start = target_cmsg;
1869 if (!target_cmsg)
1870 return -TARGET_EFAULT;
1871
1872 while (cmsg && target_cmsg) {
1873 void *data = CMSG_DATA(cmsg);
1874 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1875
1876 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1877 int tgt_len, tgt_space;
1878
1879 /* We never copy a half-header but may copy half-data;
1880 * this is Linux's behaviour in put_cmsg(). Note that
1881 * truncation here is a guest problem (which we report
1882 * to the guest via the CTRUNC bit), unlike truncation
1883 * in target_to_host_cmsg, which is a QEMU bug.
1884 */
1885 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1886 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1887 break;
1888 }
1889
1890 if (cmsg->cmsg_level == SOL_SOCKET) {
1891 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1892 } else {
1893 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1894 }
1895 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1896
1897 /* Payload types which need a different size of payload on
1898 * the target must adjust tgt_len here.
1899 */
1900 tgt_len = len;
1901 switch (cmsg->cmsg_level) {
1902 case SOL_SOCKET:
1903 switch (cmsg->cmsg_type) {
1904 case SO_TIMESTAMP:
1905 tgt_len = sizeof(struct target_timeval);
1906 break;
1907 default:
1908 break;
1909 }
1910 break;
1911 default:
1912 break;
1913 }
1914
1915 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1916 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1917 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1918 }
1919
1920 /* We must now copy-and-convert len bytes of payload
1921 * into tgt_len bytes of destination space. Bear in mind
1922 * that in both source and destination we may be dealing
1923 * with a truncated value!
1924 */
1925 switch (cmsg->cmsg_level) {
1926 case SOL_SOCKET:
1927 switch (cmsg->cmsg_type) {
1928 case SCM_RIGHTS:
1929 {
1930 int *fd = (int *)data;
1931 int *target_fd = (int *)target_data;
1932 int i, numfds = tgt_len / sizeof(int);
1933
1934 for (i = 0; i < numfds; i++) {
1935 __put_user(fd[i], target_fd + i);
1936 }
1937 break;
1938 }
1939 case SO_TIMESTAMP:
1940 {
1941 struct timeval *tv = (struct timeval *)data;
1942 struct target_timeval *target_tv =
1943 (struct target_timeval *)target_data;
1944
1945 if (len != sizeof(struct timeval) ||
1946 tgt_len != sizeof(struct target_timeval)) {
1947 goto unimplemented;
1948 }
1949
1950 /* copy struct timeval to target */
1951 __put_user(tv->tv_sec, &target_tv->tv_sec);
1952 __put_user(tv->tv_usec, &target_tv->tv_usec);
1953 break;
1954 }
1955 case SCM_CREDENTIALS:
1956 {
1957 struct ucred *cred = (struct ucred *)data;
1958 struct target_ucred *target_cred =
1959 (struct target_ucred *)target_data;
1960
1961 __put_user(cred->pid, &target_cred->pid);
1962 __put_user(cred->uid, &target_cred->uid);
1963 __put_user(cred->gid, &target_cred->gid);
1964 break;
1965 }
1966 default:
1967 goto unimplemented;
1968 }
1969 break;
1970
1971 case SOL_IP:
1972 switch (cmsg->cmsg_type) {
1973 case IP_TTL:
1974 {
1975 uint32_t *v = (uint32_t *)data;
1976 uint32_t *t_int = (uint32_t *)target_data;
1977
1978 if (len != sizeof(uint32_t) ||
1979 tgt_len != sizeof(uint32_t)) {
1980 goto unimplemented;
1981 }
1982 __put_user(*v, t_int);
1983 break;
1984 }
1985 case IP_RECVERR:
1986 {
1987 struct errhdr_t {
1988 struct sock_extended_err ee;
1989 struct sockaddr_in offender;
1990 };
1991 struct errhdr_t *errh = (struct errhdr_t *)data;
1992 struct errhdr_t *target_errh =
1993 (struct errhdr_t *)target_data;
1994
1995 if (len != sizeof(struct errhdr_t) ||
1996 tgt_len != sizeof(struct errhdr_t)) {
1997 goto unimplemented;
1998 }
1999 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2000 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2001 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2002 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2003 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2004 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2005 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2006 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2007 (void *) &errh->offender, sizeof(errh->offender));
2008 break;
2009 }
2010 default:
2011 goto unimplemented;
2012 }
2013 break;
2014
2015 case SOL_IPV6:
2016 switch (cmsg->cmsg_type) {
2017 case IPV6_HOPLIMIT:
2018 {
2019 uint32_t *v = (uint32_t *)data;
2020 uint32_t *t_int = (uint32_t *)target_data;
2021
2022 if (len != sizeof(uint32_t) ||
2023 tgt_len != sizeof(uint32_t)) {
2024 goto unimplemented;
2025 }
2026 __put_user(*v, t_int);
2027 break;
2028 }
2029 case IPV6_RECVERR:
2030 {
2031 struct errhdr6_t {
2032 struct sock_extended_err ee;
2033 struct sockaddr_in6 offender;
2034 };
2035 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2036 struct errhdr6_t *target_errh =
2037 (struct errhdr6_t *)target_data;
2038
2039 if (len != sizeof(struct errhdr6_t) ||
2040 tgt_len != sizeof(struct errhdr6_t)) {
2041 goto unimplemented;
2042 }
2043 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2044 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2045 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2046 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2047 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2048 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2049 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2050 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2051 (void *) &errh->offender, sizeof(errh->offender));
2052 break;
2053 }
2054 default:
2055 goto unimplemented;
2056 }
2057 break;
2058
2059 default:
2060 unimplemented:
2061 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2062 cmsg->cmsg_level, cmsg->cmsg_type);
2063 memcpy(target_data, data, MIN(len, tgt_len));
2064 if (tgt_len > len) {
2065 memset(target_data + len, 0, tgt_len - len);
2066 }
2067 }
2068
2069 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2070 tgt_space = TARGET_CMSG_SPACE(tgt_len);
2071 if (msg_controllen < tgt_space) {
2072 tgt_space = msg_controllen;
2073 }
2074 msg_controllen -= tgt_space;
2075 space += tgt_space;
2076 cmsg = CMSG_NXTHDR(msgh, cmsg);
2077 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2078 target_cmsg_start);
2079 }
2080 unlock_user(target_cmsg, target_cmsg_addr, space);
2081 the_end:
2082 target_msgh->msg_controllen = tswapal(space);
2083 return 0;
2084 }
2085
2086 /* do_setsockopt() Must return target values and target errnos. */
2087 static abi_long do_setsockopt(int sockfd, int level, int optname,
2088 abi_ulong optval_addr, socklen_t optlen)
2089 {
2090 abi_long ret;
2091 int val;
2092 struct ip_mreqn *ip_mreq;
2093 struct ip_mreq_source *ip_mreq_source;
2094
2095 switch(level) {
2096 case SOL_TCP:
2097 case SOL_UDP:
2098 /* TCP and UDP options all take an 'int' value. */
2099 if (optlen < sizeof(uint32_t))
2100 return -TARGET_EINVAL;
2101
2102 if (get_user_u32(val, optval_addr))
2103 return -TARGET_EFAULT;
2104 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2105 break;
2106 case SOL_IP:
2107 switch(optname) {
2108 case IP_TOS:
2109 case IP_TTL:
2110 case IP_HDRINCL:
2111 case IP_ROUTER_ALERT:
2112 case IP_RECVOPTS:
2113 case IP_RETOPTS:
2114 case IP_PKTINFO:
2115 case IP_MTU_DISCOVER:
2116 case IP_RECVERR:
2117 case IP_RECVTTL:
2118 case IP_RECVTOS:
2119 #ifdef IP_FREEBIND
2120 case IP_FREEBIND:
2121 #endif
2122 case IP_MULTICAST_TTL:
2123 case IP_MULTICAST_LOOP:
2124 val = 0;
2125 if (optlen >= sizeof(uint32_t)) {
2126 if (get_user_u32(val, optval_addr))
2127 return -TARGET_EFAULT;
2128 } else if (optlen >= 1) {
2129 if (get_user_u8(val, optval_addr))
2130 return -TARGET_EFAULT;
2131 }
2132 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2133 break;
2134 case IP_ADD_MEMBERSHIP:
2135 case IP_DROP_MEMBERSHIP:
2136 if (optlen < sizeof (struct target_ip_mreq) ||
2137 optlen > sizeof (struct target_ip_mreqn))
2138 return -TARGET_EINVAL;
2139
2140 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2141 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2142 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2143 break;
2144
2145 case IP_BLOCK_SOURCE:
2146 case IP_UNBLOCK_SOURCE:
2147 case IP_ADD_SOURCE_MEMBERSHIP:
2148 case IP_DROP_SOURCE_MEMBERSHIP:
2149 if (optlen != sizeof (struct target_ip_mreq_source))
2150 return -TARGET_EINVAL;
2151
2152 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2153 if (!ip_mreq_source) {
2154 return -TARGET_EFAULT;
2155 }
2156 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2157 unlock_user (ip_mreq_source, optval_addr, 0);
2158 break;
2159
2160 default:
2161 goto unimplemented;
2162 }
2163 break;
2164 case SOL_IPV6:
2165 switch (optname) {
2166 case IPV6_MTU_DISCOVER:
2167 case IPV6_MTU:
2168 case IPV6_V6ONLY:
2169 case IPV6_RECVPKTINFO:
2170 case IPV6_UNICAST_HOPS:
2171 case IPV6_MULTICAST_HOPS:
2172 case IPV6_MULTICAST_LOOP:
2173 case IPV6_RECVERR:
2174 case IPV6_RECVHOPLIMIT:
2175 case IPV6_2292HOPLIMIT:
2176 case IPV6_CHECKSUM:
2177 case IPV6_ADDRFORM:
2178 case IPV6_2292PKTINFO:
2179 case IPV6_RECVTCLASS:
2180 case IPV6_RECVRTHDR:
2181 case IPV6_2292RTHDR:
2182 case IPV6_RECVHOPOPTS:
2183 case IPV6_2292HOPOPTS:
2184 case IPV6_RECVDSTOPTS:
2185 case IPV6_2292DSTOPTS:
2186 case IPV6_TCLASS:
2187 case IPV6_ADDR_PREFERENCES:
2188 #ifdef IPV6_RECVPATHMTU
2189 case IPV6_RECVPATHMTU:
2190 #endif
2191 #ifdef IPV6_TRANSPARENT
2192 case IPV6_TRANSPARENT:
2193 #endif
2194 #ifdef IPV6_FREEBIND
2195 case IPV6_FREEBIND:
2196 #endif
2197 #ifdef IPV6_RECVORIGDSTADDR
2198 case IPV6_RECVORIGDSTADDR:
2199 #endif
2200 val = 0;
2201 if (optlen < sizeof(uint32_t)) {
2202 return -TARGET_EINVAL;
2203 }
2204 if (get_user_u32(val, optval_addr)) {
2205 return -TARGET_EFAULT;
2206 }
2207 ret = get_errno(setsockopt(sockfd, level, optname,
2208 &val, sizeof(val)));
2209 break;
2210 case IPV6_PKTINFO:
2211 {
2212 struct in6_pktinfo pki;
2213
2214 if (optlen < sizeof(pki)) {
2215 return -TARGET_EINVAL;
2216 }
2217
2218 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2219 return -TARGET_EFAULT;
2220 }
2221
2222 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2223
2224 ret = get_errno(setsockopt(sockfd, level, optname,
2225 &pki, sizeof(pki)));
2226 break;
2227 }
2228 case IPV6_ADD_MEMBERSHIP:
2229 case IPV6_DROP_MEMBERSHIP:
2230 {
2231 struct ipv6_mreq ipv6mreq;
2232
2233 if (optlen < sizeof(ipv6mreq)) {
2234 return -TARGET_EINVAL;
2235 }
2236
2237 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2238 return -TARGET_EFAULT;
2239 }
2240
2241 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2242
2243 ret = get_errno(setsockopt(sockfd, level, optname,
2244 &ipv6mreq, sizeof(ipv6mreq)));
2245 break;
2246 }
2247 default:
2248 goto unimplemented;
2249 }
2250 break;
2251 case SOL_ICMPV6:
2252 switch (optname) {
2253 case ICMPV6_FILTER:
2254 {
2255 struct icmp6_filter icmp6f;
2256
2257 if (optlen > sizeof(icmp6f)) {
2258 optlen = sizeof(icmp6f);
2259 }
2260
2261 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2262 return -TARGET_EFAULT;
2263 }
2264
2265 for (val = 0; val < 8; val++) {
2266 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2267 }
2268
2269 ret = get_errno(setsockopt(sockfd, level, optname,
2270 &icmp6f, optlen));
2271 break;
2272 }
2273 default:
2274 goto unimplemented;
2275 }
2276 break;
2277 case SOL_RAW:
2278 switch (optname) {
2279 case ICMP_FILTER:
2280 case IPV6_CHECKSUM:
2281 /* those take an u32 value */
2282 if (optlen < sizeof(uint32_t)) {
2283 return -TARGET_EINVAL;
2284 }
2285
2286 if (get_user_u32(val, optval_addr)) {
2287 return -TARGET_EFAULT;
2288 }
2289 ret = get_errno(setsockopt(sockfd, level, optname,
2290 &val, sizeof(val)));
2291 break;
2292
2293 default:
2294 goto unimplemented;
2295 }
2296 break;
2297 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2298 case SOL_ALG:
2299 switch (optname) {
2300 case ALG_SET_KEY:
2301 {
2302 char *alg_key = g_malloc(optlen);
2303
2304 if (!alg_key) {
2305 return -TARGET_ENOMEM;
2306 }
2307 if (copy_from_user(alg_key, optval_addr, optlen)) {
2308 g_free(alg_key);
2309 return -TARGET_EFAULT;
2310 }
2311 ret = get_errno(setsockopt(sockfd, level, optname,
2312 alg_key, optlen));
2313 g_free(alg_key);
2314 break;
2315 }
2316 case ALG_SET_AEAD_AUTHSIZE:
2317 {
2318 ret = get_errno(setsockopt(sockfd, level, optname,
2319 NULL, optlen));
2320 break;
2321 }
2322 default:
2323 goto unimplemented;
2324 }
2325 break;
2326 #endif
2327 case TARGET_SOL_SOCKET:
2328 switch (optname) {
2329 case TARGET_SO_RCVTIMEO:
2330 {
2331 struct timeval tv;
2332
2333 optname = SO_RCVTIMEO;
2334
2335 set_timeout:
2336 if (optlen != sizeof(struct target_timeval)) {
2337 return -TARGET_EINVAL;
2338 }
2339
2340 if (copy_from_user_timeval(&tv, optval_addr)) {
2341 return -TARGET_EFAULT;
2342 }
2343
2344 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2345 &tv, sizeof(tv)));
2346 return ret;
2347 }
2348 case TARGET_SO_SNDTIMEO:
2349 optname = SO_SNDTIMEO;
2350 goto set_timeout;
2351 case TARGET_SO_ATTACH_FILTER:
2352 {
2353 struct target_sock_fprog *tfprog;
2354 struct target_sock_filter *tfilter;
2355 struct sock_fprog fprog;
2356 struct sock_filter *filter;
2357 int i;
2358
2359 if (optlen != sizeof(*tfprog)) {
2360 return -TARGET_EINVAL;
2361 }
2362 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2363 return -TARGET_EFAULT;
2364 }
2365 if (!lock_user_struct(VERIFY_READ, tfilter,
2366 tswapal(tfprog->filter), 0)) {
2367 unlock_user_struct(tfprog, optval_addr, 1);
2368 return -TARGET_EFAULT;
2369 }
2370
2371 fprog.len = tswap16(tfprog->len);
2372 filter = g_try_new(struct sock_filter, fprog.len);
2373 if (filter == NULL) {
2374 unlock_user_struct(tfilter, tfprog->filter, 1);
2375 unlock_user_struct(tfprog, optval_addr, 1);
2376 return -TARGET_ENOMEM;
2377 }
2378 for (i = 0; i < fprog.len; i++) {
2379 filter[i].code = tswap16(tfilter[i].code);
2380 filter[i].jt = tfilter[i].jt;
2381 filter[i].jf = tfilter[i].jf;
2382 filter[i].k = tswap32(tfilter[i].k);
2383 }
2384 fprog.filter = filter;
2385
2386 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2387 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2388 g_free(filter);
2389
2390 unlock_user_struct(tfilter, tfprog->filter, 1);
2391 unlock_user_struct(tfprog, optval_addr, 1);
2392 return ret;
2393 }
2394 case TARGET_SO_BINDTODEVICE:
2395 {
2396 char *dev_ifname, *addr_ifname;
2397
2398 if (optlen > IFNAMSIZ - 1) {
2399 optlen = IFNAMSIZ - 1;
2400 }
2401 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2402 if (!dev_ifname) {
2403 return -TARGET_EFAULT;
2404 }
2405 optname = SO_BINDTODEVICE;
2406 addr_ifname = alloca(IFNAMSIZ);
2407 memcpy(addr_ifname, dev_ifname, optlen);
2408 addr_ifname[optlen] = 0;
2409 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2410 addr_ifname, optlen));
2411 unlock_user (dev_ifname, optval_addr, 0);
2412 return ret;
2413 }
2414 case TARGET_SO_LINGER:
2415 {
2416 struct linger lg;
2417 struct target_linger *tlg;
2418
2419 if (optlen != sizeof(struct target_linger)) {
2420 return -TARGET_EINVAL;
2421 }
2422 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2423 return -TARGET_EFAULT;
2424 }
2425 __get_user(lg.l_onoff, &tlg->l_onoff);
2426 __get_user(lg.l_linger, &tlg->l_linger);
2427 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2428 &lg, sizeof(lg)));
2429 unlock_user_struct(tlg, optval_addr, 0);
2430 return ret;
2431 }
2432 /* Options with 'int' argument. */
2433 case TARGET_SO_DEBUG:
2434 optname = SO_DEBUG;
2435 break;
2436 case TARGET_SO_REUSEADDR:
2437 optname = SO_REUSEADDR;
2438 break;
2439 #ifdef SO_REUSEPORT
2440 case TARGET_SO_REUSEPORT:
2441 optname = SO_REUSEPORT;
2442 break;
2443 #endif
2444 case TARGET_SO_TYPE:
2445 optname = SO_TYPE;
2446 break;
2447 case TARGET_SO_ERROR:
2448 optname = SO_ERROR;
2449 break;
2450 case TARGET_SO_DONTROUTE:
2451 optname = SO_DONTROUTE;
2452 break;
2453 case TARGET_SO_BROADCAST:
2454 optname = SO_BROADCAST;
2455 break;
2456 case TARGET_SO_SNDBUF:
2457 optname = SO_SNDBUF;
2458 break;
2459 case TARGET_SO_SNDBUFFORCE:
2460 optname = SO_SNDBUFFORCE;
2461 break;
2462 case TARGET_SO_RCVBUF:
2463 optname = SO_RCVBUF;
2464 break;
2465 case TARGET_SO_RCVBUFFORCE:
2466 optname = SO_RCVBUFFORCE;
2467 break;
2468 case TARGET_SO_KEEPALIVE:
2469 optname = SO_KEEPALIVE;
2470 break;
2471 case TARGET_SO_OOBINLINE:
2472 optname = SO_OOBINLINE;
2473 break;
2474 case TARGET_SO_NO_CHECK:
2475 optname = SO_NO_CHECK;
2476 break;
2477 case TARGET_SO_PRIORITY:
2478 optname = SO_PRIORITY;
2479 break;
2480 #ifdef SO_BSDCOMPAT
2481 case TARGET_SO_BSDCOMPAT:
2482 optname = SO_BSDCOMPAT;
2483 break;
2484 #endif
2485 case TARGET_SO_PASSCRED:
2486 optname = SO_PASSCRED;
2487 break;
2488 case TARGET_SO_PASSSEC:
2489 optname = SO_PASSSEC;
2490 break;
2491 case TARGET_SO_TIMESTAMP:
2492 optname = SO_TIMESTAMP;
2493 break;
2494 case TARGET_SO_RCVLOWAT:
2495 optname = SO_RCVLOWAT;
2496 break;
2497 default:
2498 goto unimplemented;
2499 }
2500 if (optlen < sizeof(uint32_t))
2501 return -TARGET_EINVAL;
2502
2503 if (get_user_u32(val, optval_addr))
2504 return -TARGET_EFAULT;
2505 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2506 break;
2507 #ifdef SOL_NETLINK
2508 case SOL_NETLINK:
2509 switch (optname) {
2510 case NETLINK_PKTINFO:
2511 case NETLINK_ADD_MEMBERSHIP:
2512 case NETLINK_DROP_MEMBERSHIP:
2513 case NETLINK_BROADCAST_ERROR:
2514 case NETLINK_NO_ENOBUFS:
2515 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2516 case NETLINK_LISTEN_ALL_NSID:
2517 case NETLINK_CAP_ACK:
2518 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2519 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2520 case NETLINK_EXT_ACK:
2521 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2522 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2523 case NETLINK_GET_STRICT_CHK:
2524 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2525 break;
2526 default:
2527 goto unimplemented;
2528 }
2529 val = 0;
2530 if (optlen < sizeof(uint32_t)) {
2531 return -TARGET_EINVAL;
2532 }
2533 if (get_user_u32(val, optval_addr)) {
2534 return -TARGET_EFAULT;
2535 }
2536 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2537 sizeof(val)));
2538 break;
2539 #endif /* SOL_NETLINK */
2540 default:
2541 unimplemented:
2542 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2543 level, optname);
2544 ret = -TARGET_ENOPROTOOPT;
2545 }
2546 return ret;
2547 }
2548
2549 /* do_getsockopt() Must return target values and target errnos. */
2550 static abi_long do_getsockopt(int sockfd, int level, int optname,
2551 abi_ulong optval_addr, abi_ulong optlen)
2552 {
2553 abi_long ret;
2554 int len, val;
2555 socklen_t lv;
2556
2557 switch(level) {
2558 case TARGET_SOL_SOCKET:
2559 level = SOL_SOCKET;
2560 switch (optname) {
2561 /* These don't just return a single integer */
2562 case TARGET_SO_PEERNAME:
2563 goto unimplemented;
2564 case TARGET_SO_RCVTIMEO: {
2565 struct timeval tv;
2566 socklen_t tvlen;
2567
2568 optname = SO_RCVTIMEO;
2569
2570 get_timeout:
2571 if (get_user_u32(len, optlen)) {
2572 return -TARGET_EFAULT;
2573 }
2574 if (len < 0) {
2575 return -TARGET_EINVAL;
2576 }
2577
2578 tvlen = sizeof(tv);
2579 ret = get_errno(getsockopt(sockfd, level, optname,
2580 &tv, &tvlen));
2581 if (ret < 0) {
2582 return ret;
2583 }
2584 if (len > sizeof(struct target_timeval)) {
2585 len = sizeof(struct target_timeval);
2586 }
2587 if (copy_to_user_timeval(optval_addr, &tv)) {
2588 return -TARGET_EFAULT;
2589 }
2590 if (put_user_u32(len, optlen)) {
2591 return -TARGET_EFAULT;
2592 }
2593 break;
2594 }
2595 case TARGET_SO_SNDTIMEO:
2596 optname = SO_SNDTIMEO;
2597 goto get_timeout;
2598 case TARGET_SO_PEERCRED: {
2599 struct ucred cr;
2600 socklen_t crlen;
2601 struct target_ucred *tcr;
2602
2603 if (get_user_u32(len, optlen)) {
2604 return -TARGET_EFAULT;
2605 }
2606 if (len < 0) {
2607 return -TARGET_EINVAL;
2608 }
2609
2610 crlen = sizeof(cr);
2611 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2612 &cr, &crlen));
2613 if (ret < 0) {
2614 return ret;
2615 }
2616 if (len > crlen) {
2617 len = crlen;
2618 }
2619 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2620 return -TARGET_EFAULT;
2621 }
2622 __put_user(cr.pid, &tcr->pid);
2623 __put_user(cr.uid, &tcr->uid);
2624 __put_user(cr.gid, &tcr->gid);
2625 unlock_user_struct(tcr, optval_addr, 1);
2626 if (put_user_u32(len, optlen)) {
2627 return -TARGET_EFAULT;
2628 }
2629 break;
2630 }
2631 case TARGET_SO_PEERSEC: {
2632 char *name;
2633
2634 if (get_user_u32(len, optlen)) {
2635 return -TARGET_EFAULT;
2636 }
2637 if (len < 0) {
2638 return -TARGET_EINVAL;
2639 }
2640 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2641 if (!name) {
2642 return -TARGET_EFAULT;
2643 }
2644 lv = len;
2645 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2646 name, &lv));
2647 if (put_user_u32(lv, optlen)) {
2648 ret = -TARGET_EFAULT;
2649 }
2650 unlock_user(name, optval_addr, lv);
2651 break;
2652 }
2653 case TARGET_SO_LINGER:
2654 {
2655 struct linger lg;
2656 socklen_t lglen;
2657 struct target_linger *tlg;
2658
2659 if (get_user_u32(len, optlen)) {
2660 return -TARGET_EFAULT;
2661 }
2662 if (len < 0) {
2663 return -TARGET_EINVAL;
2664 }
2665
2666 lglen = sizeof(lg);
2667 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2668 &lg, &lglen));
2669 if (ret < 0) {
2670 return ret;
2671 }
2672 if (len > lglen) {
2673 len = lglen;
2674 }
2675 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2676 return -TARGET_EFAULT;
2677 }
2678 __put_user(lg.l_onoff, &tlg->l_onoff);
2679 __put_user(lg.l_linger, &tlg->l_linger);
2680 unlock_user_struct(tlg, optval_addr, 1);
2681 if (put_user_u32(len, optlen)) {
2682 return -TARGET_EFAULT;
2683 }
2684 break;
2685 }
2686 /* Options with 'int' argument. */
2687 case TARGET_SO_DEBUG:
2688 optname = SO_DEBUG;
2689 goto int_case;
2690 case TARGET_SO_REUSEADDR:
2691 optname = SO_REUSEADDR;
2692 goto int_case;
2693 #ifdef SO_REUSEPORT
2694 case TARGET_SO_REUSEPORT:
2695 optname = SO_REUSEPORT;
2696 goto int_case;
2697 #endif
2698 case TARGET_SO_TYPE:
2699 optname = SO_TYPE;
2700 goto int_case;
2701 case TARGET_SO_ERROR:
2702 optname = SO_ERROR;
2703 goto int_case;
2704 case TARGET_SO_DONTROUTE:
2705 optname = SO_DONTROUTE;
2706 goto int_case;
2707 case TARGET_SO_BROADCAST:
2708 optname = SO_BROADCAST;
2709 goto int_case;
2710 case TARGET_SO_SNDBUF:
2711 optname = SO_SNDBUF;
2712 goto int_case;
2713 case TARGET_SO_RCVBUF:
2714 optname = SO_RCVBUF;
2715 goto int_case;
2716 case TARGET_SO_KEEPALIVE:
2717 optname = SO_KEEPALIVE;
2718 goto int_case;
2719 case TARGET_SO_OOBINLINE:
2720 optname = SO_OOBINLINE;
2721 goto int_case;
2722 case TARGET_SO_NO_CHECK:
2723 optname = SO_NO_CHECK;
2724 goto int_case;
2725 case TARGET_SO_PRIORITY:
2726 optname = SO_PRIORITY;
2727 goto int_case;
2728 #ifdef SO_BSDCOMPAT
2729 case TARGET_SO_BSDCOMPAT:
2730 optname = SO_BSDCOMPAT;
2731 goto int_case;
2732 #endif
2733 case TARGET_SO_PASSCRED:
2734 optname = SO_PASSCRED;
2735 goto int_case;
2736 case TARGET_SO_TIMESTAMP:
2737 optname = SO_TIMESTAMP;
2738 goto int_case;
2739 case TARGET_SO_RCVLOWAT:
2740 optname = SO_RCVLOWAT;
2741 goto int_case;
2742 case TARGET_SO_ACCEPTCONN:
2743 optname = SO_ACCEPTCONN;
2744 goto int_case;
2745 case TARGET_SO_PROTOCOL:
2746 optname = SO_PROTOCOL;
2747 goto int_case;
2748 case TARGET_SO_DOMAIN:
2749 optname = SO_DOMAIN;
2750 goto int_case;
2751 default:
2752 goto int_case;
2753 }
2754 break;
2755 case SOL_TCP:
2756 case SOL_UDP:
2757 /* TCP and UDP options all take an 'int' value. */
2758 int_case:
2759 if (get_user_u32(len, optlen))
2760 return -TARGET_EFAULT;
2761 if (len < 0)
2762 return -TARGET_EINVAL;
2763 lv = sizeof(lv);
2764 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2765 if (ret < 0)
2766 return ret;
2767 if (optname == SO_TYPE) {
2768 val = host_to_target_sock_type(val);
2769 }
2770 if (len > lv)
2771 len = lv;
2772 if (len == 4) {
2773 if (put_user_u32(val, optval_addr))
2774 return -TARGET_EFAULT;
2775 } else {
2776 if (put_user_u8(val, optval_addr))
2777 return -TARGET_EFAULT;
2778 }
2779 if (put_user_u32(len, optlen))
2780 return -TARGET_EFAULT;
2781 break;
2782 case SOL_IP:
2783 switch(optname) {
2784 case IP_TOS:
2785 case IP_TTL:
2786 case IP_HDRINCL:
2787 case IP_ROUTER_ALERT:
2788 case IP_RECVOPTS:
2789 case IP_RETOPTS:
2790 case IP_PKTINFO:
2791 case IP_MTU_DISCOVER:
2792 case IP_RECVERR:
2793 case IP_RECVTOS:
2794 #ifdef IP_FREEBIND
2795 case IP_FREEBIND:
2796 #endif
2797 case IP_MULTICAST_TTL:
2798 case IP_MULTICAST_LOOP:
2799 if (get_user_u32(len, optlen))
2800 return -TARGET_EFAULT;
2801 if (len < 0)
2802 return -TARGET_EINVAL;
2803 lv = sizeof(lv);
2804 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2805 if (ret < 0)
2806 return ret;
2807 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2808 len = 1;
2809 if (put_user_u32(len, optlen)
2810 || put_user_u8(val, optval_addr))
2811 return -TARGET_EFAULT;
2812 } else {
2813 if (len > sizeof(int))
2814 len = sizeof(int);
2815 if (put_user_u32(len, optlen)
2816 || put_user_u32(val, optval_addr))
2817 return -TARGET_EFAULT;
2818 }
2819 break;
2820 default:
2821 ret = -TARGET_ENOPROTOOPT;
2822 break;
2823 }
2824 break;
2825 case SOL_IPV6:
2826 switch (optname) {
2827 case IPV6_MTU_DISCOVER:
2828 case IPV6_MTU:
2829 case IPV6_V6ONLY:
2830 case IPV6_RECVPKTINFO:
2831 case IPV6_UNICAST_HOPS:
2832 case IPV6_MULTICAST_HOPS:
2833 case IPV6_MULTICAST_LOOP:
2834 case IPV6_RECVERR:
2835 case IPV6_RECVHOPLIMIT:
2836 case IPV6_2292HOPLIMIT:
2837 case IPV6_CHECKSUM:
2838 case IPV6_ADDRFORM:
2839 case IPV6_2292PKTINFO:
2840 case IPV6_RECVTCLASS:
2841 case IPV6_RECVRTHDR:
2842 case IPV6_2292RTHDR:
2843 case IPV6_RECVHOPOPTS:
2844 case IPV6_2292HOPOPTS:
2845 case IPV6_RECVDSTOPTS:
2846 case IPV6_2292DSTOPTS:
2847 case IPV6_TCLASS:
2848 case IPV6_ADDR_PREFERENCES:
2849 #ifdef IPV6_RECVPATHMTU
2850 case IPV6_RECVPATHMTU:
2851 #endif
2852 #ifdef IPV6_TRANSPARENT
2853 case IPV6_TRANSPARENT:
2854 #endif
2855 #ifdef IPV6_FREEBIND
2856 case IPV6_FREEBIND:
2857 #endif
2858 #ifdef IPV6_RECVORIGDSTADDR
2859 case IPV6_RECVORIGDSTADDR:
2860 #endif
2861 if (get_user_u32(len, optlen))
2862 return -TARGET_EFAULT;
2863 if (len < 0)
2864 return -TARGET_EINVAL;
2865 lv = sizeof(lv);
2866 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2867 if (ret < 0)
2868 return ret;
2869 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2870 len = 1;
2871 if (put_user_u32(len, optlen)
2872 || put_user_u8(val, optval_addr))
2873 return -TARGET_EFAULT;
2874 } else {
2875 if (len > sizeof(int))
2876 len = sizeof(int);
2877 if (put_user_u32(len, optlen)
2878 || put_user_u32(val, optval_addr))
2879 return -TARGET_EFAULT;
2880 }
2881 break;
2882 default:
2883 ret = -TARGET_ENOPROTOOPT;
2884 break;
2885 }
2886 break;
2887 #ifdef SOL_NETLINK
2888 case SOL_NETLINK:
2889 switch (optname) {
2890 case NETLINK_PKTINFO:
2891 case NETLINK_BROADCAST_ERROR:
2892 case NETLINK_NO_ENOBUFS:
2893 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2894 case NETLINK_LISTEN_ALL_NSID:
2895 case NETLINK_CAP_ACK:
2896 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2897 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2898 case NETLINK_EXT_ACK:
2899 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2900 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2901 case NETLINK_GET_STRICT_CHK:
2902 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2903 if (get_user_u32(len, optlen)) {
2904 return -TARGET_EFAULT;
2905 }
2906 if (len != sizeof(val)) {
2907 return -TARGET_EINVAL;
2908 }
2909 lv = len;
2910 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2911 if (ret < 0) {
2912 return ret;
2913 }
2914 if (put_user_u32(lv, optlen)
2915 || put_user_u32(val, optval_addr)) {
2916 return -TARGET_EFAULT;
2917 }
2918 break;
2919 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2920 case NETLINK_LIST_MEMBERSHIPS:
2921 {
2922 uint32_t *results;
2923 int i;
2924 if (get_user_u32(len, optlen)) {
2925 return -TARGET_EFAULT;
2926 }
2927 if (len < 0) {
2928 return -TARGET_EINVAL;
2929 }
2930 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2931 if (!results && len > 0) {
2932 return -TARGET_EFAULT;
2933 }
2934 lv = len;
2935 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2936 if (ret < 0) {
2937 unlock_user(results, optval_addr, 0);
2938 return ret;
2939 }
2940 /* swap host endianess to target endianess. */
2941 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2942 results[i] = tswap32(results[i]);
2943 }
2944 if (put_user_u32(lv, optlen)) {
2945 return -TARGET_EFAULT;
2946 }
2947 unlock_user(results, optval_addr, 0);
2948 break;
2949 }
2950 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2951 default:
2952 goto unimplemented;
2953 }
2954 break;
2955 #endif /* SOL_NETLINK */
2956 default:
2957 unimplemented:
2958 qemu_log_mask(LOG_UNIMP,
2959 "getsockopt level=%d optname=%d not yet supported\n",
2960 level, optname);
2961 ret = -TARGET_EOPNOTSUPP;
2962 break;
2963 }
2964 return ret;
2965 }
2966
2967 /* Convert target low/high pair representing file offset into the host
2968 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2969 * as the kernel doesn't handle them either.
2970 */
2971 static void target_to_host_low_high(abi_ulong tlow,
2972 abi_ulong thigh,
2973 unsigned long *hlow,
2974 unsigned long *hhigh)
2975 {
2976 uint64_t off = tlow |
2977 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2978 TARGET_LONG_BITS / 2;
2979
2980 *hlow = off;
2981 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2982 }
2983
2984 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2985 abi_ulong count, int copy)
2986 {
2987 struct target_iovec *target_vec;
2988 struct iovec *vec;
2989 abi_ulong total_len, max_len;
2990 int i;
2991 int err = 0;
2992 bool bad_address = false;
2993
2994 if (count == 0) {
2995 errno = 0;
2996 return NULL;
2997 }
2998 if (count > IOV_MAX) {
2999 errno = EINVAL;
3000 return NULL;
3001 }
3002
3003 vec = g_try_new0(struct iovec, count);
3004 if (vec == NULL) {
3005 errno = ENOMEM;
3006 return NULL;
3007 }
3008
3009 target_vec = lock_user(VERIFY_READ, target_addr,
3010 count * sizeof(struct target_iovec), 1);
3011 if (target_vec == NULL) {
3012 err = EFAULT;
3013 goto fail2;
3014 }
3015
3016 /* ??? If host page size > target page size, this will result in a
3017 value larger than what we can actually support. */
3018 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3019 total_len = 0;
3020
3021 for (i = 0; i < count; i++) {
3022 abi_ulong base = tswapal(target_vec[i].iov_base);
3023 abi_long len = tswapal(target_vec[i].iov_len);
3024
3025 if (len < 0) {
3026 err = EINVAL;
3027 goto fail;
3028 } else if (len == 0) {
3029 /* Zero length pointer is ignored. */
3030 vec[i].iov_base = 0;
3031 } else {
3032 vec[i].iov_base = lock_user(type, base, len, copy);
3033 /* If the first buffer pointer is bad, this is a fault. But
3034 * subsequent bad buffers will result in a partial write; this
3035 * is realized by filling the vector with null pointers and
3036 * zero lengths. */
3037 if (!vec[i].iov_base) {
3038 if (i == 0) {
3039 err = EFAULT;
3040 goto fail;
3041 } else {
3042 bad_address = true;
3043 }
3044 }
3045 if (bad_address) {
3046 len = 0;
3047 }
3048 if (len > max_len - total_len) {
3049 len = max_len - total_len;
3050 }
3051 }
3052 vec[i].iov_len = len;
3053 total_len += len;
3054 }
3055
3056 unlock_user(target_vec, target_addr, 0);
3057 return vec;
3058
3059 fail:
3060 while (--i >= 0) {
3061 if (tswapal(target_vec[i].iov_len) > 0) {
3062 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3063 }
3064 }
3065 unlock_user(target_vec, target_addr, 0);
3066 fail2:
3067 g_free(vec);
3068 errno = err;
3069 return NULL;
3070 }
3071
3072 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3073 abi_ulong count, int copy)
3074 {
3075 struct target_iovec *target_vec;
3076 int i;
3077
3078 target_vec = lock_user(VERIFY_READ, target_addr,
3079 count * sizeof(struct target_iovec), 1);
3080 if (target_vec) {
3081 for (i = 0; i < count; i++) {
3082 abi_ulong base = tswapal(target_vec[i].iov_base);
3083 abi_long len = tswapal(target_vec[i].iov_len);
3084 if (len < 0) {
3085 break;
3086 }
3087 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3088 }
3089 unlock_user(target_vec, target_addr, 0);
3090 }
3091
3092 g_free(vec);
3093 }
3094
3095 static inline int target_to_host_sock_type(int *type)
3096 {
3097 int host_type = 0;
3098 int target_type = *type;
3099
3100 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3101 case TARGET_SOCK_DGRAM:
3102 host_type = SOCK_DGRAM;
3103 break;
3104 case TARGET_SOCK_STREAM:
3105 host_type = SOCK_STREAM;
3106 break;
3107 default:
3108 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3109 break;
3110 }
3111 if (target_type & TARGET_SOCK_CLOEXEC) {
3112 #if defined(SOCK_CLOEXEC)
3113 host_type |= SOCK_CLOEXEC;
3114 #else
3115 return -TARGET_EINVAL;
3116 #endif
3117 }
3118 if (target_type & TARGET_SOCK_NONBLOCK) {
3119 #if defined(SOCK_NONBLOCK)
3120 host_type |= SOCK_NONBLOCK;
3121 #elif !defined(O_NONBLOCK)
3122 return -TARGET_EINVAL;
3123 #endif
3124 }
3125 *type = host_type;
3126 return 0;
3127 }
3128
3129 /* Try to emulate socket type flags after socket creation. */
3130 static int sock_flags_fixup(int fd, int target_type)
3131 {
3132 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3133 if (target_type & TARGET_SOCK_NONBLOCK) {
3134 int flags = fcntl(fd, F_GETFL);
3135 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3136 close(fd);
3137 return -TARGET_EINVAL;
3138 }
3139 }
3140 #endif
3141 return fd;
3142 }
3143
3144 /* do_socket() Must return target values and target errnos. */
3145 static abi_long do_socket(int domain, int type, int protocol)
3146 {
3147 int target_type = type;
3148 int ret;
3149
3150 ret = target_to_host_sock_type(&type);
3151 if (ret) {
3152 return ret;
3153 }
3154
3155 if (domain == PF_NETLINK && !(
3156 #ifdef CONFIG_RTNETLINK
3157 protocol == NETLINK_ROUTE ||
3158 #endif
3159 protocol == NETLINK_KOBJECT_UEVENT ||
3160 protocol == NETLINK_AUDIT)) {
3161 return -TARGET_EPROTONOSUPPORT;
3162 }
3163
3164 if (domain == AF_PACKET ||
3165 (domain == AF_INET && type == SOCK_PACKET)) {
3166 protocol = tswap16(protocol);
3167 }
3168
3169 ret = get_errno(socket(domain, type, protocol));
3170 if (ret >= 0) {
3171 ret = sock_flags_fixup(ret, target_type);
3172 if (type == SOCK_PACKET) {
3173 /* Manage an obsolete case :
3174 * if socket type is SOCK_PACKET, bind by name
3175 */
3176 fd_trans_register(ret, &target_packet_trans);
3177 } else if (domain == PF_NETLINK) {
3178 switch (protocol) {
3179 #ifdef CONFIG_RTNETLINK
3180 case NETLINK_ROUTE:
3181 fd_trans_register(ret, &target_netlink_route_trans);
3182 break;
3183 #endif
3184 case NETLINK_KOBJECT_UEVENT:
3185 /* nothing to do: messages are strings */
3186 break;
3187 case NETLINK_AUDIT:
3188 fd_trans_register(ret, &target_netlink_audit_trans);
3189 break;
3190 default:
3191 g_assert_not_reached();
3192 }
3193 }
3194 }
3195 return ret;
3196 }
3197
3198 /* do_bind() Must return target values and target errnos. */
3199 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3200 socklen_t addrlen)
3201 {
3202 void *addr;
3203 abi_long ret;
3204
3205 if ((int)addrlen < 0) {
3206 return -TARGET_EINVAL;
3207 }
3208
3209 addr = alloca(addrlen+1);
3210
3211 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3212 if (ret)
3213 return ret;
3214
3215 return get_errno(bind(sockfd, addr, addrlen));
3216 }
3217
3218 /* do_connect() Must return target values and target errnos. */
3219 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3220 socklen_t addrlen)
3221 {
3222 void *addr;
3223 abi_long ret;
3224
3225 if ((int)addrlen < 0) {
3226 return -TARGET_EINVAL;
3227 }
3228
3229 addr = alloca(addrlen+1);
3230
3231 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3232 if (ret)
3233 return ret;
3234
3235 return get_errno(safe_connect(sockfd, addr, addrlen));
3236 }
3237
3238 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3239 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3240 int flags, int send)
3241 {
3242 abi_long ret, len;
3243 struct msghdr msg;
3244 abi_ulong count;
3245 struct iovec *vec;
3246 abi_ulong target_vec;
3247
3248 if (msgp->msg_name) {
3249 msg.msg_namelen = tswap32(msgp->msg_namelen);
3250 msg.msg_name = alloca(msg.msg_namelen+1);
3251 ret = target_to_host_sockaddr(fd, msg.msg_name,
3252 tswapal(msgp->msg_name),
3253 msg.msg_namelen);
3254 if (ret == -TARGET_EFAULT) {
3255 /* For connected sockets msg_name and msg_namelen must
3256 * be ignored, so returning EFAULT immediately is wrong.
3257 * Instead, pass a bad msg_name to the host kernel, and
3258 * let it decide whether to return EFAULT or not.
3259 */
3260 msg.msg_name = (void *)-1;
3261 } else if (ret) {
3262 goto out2;
3263 }
3264 } else {
3265 msg.msg_name = NULL;
3266 msg.msg_namelen = 0;
3267 }
3268 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3269 msg.msg_control = alloca(msg.msg_controllen);
3270 memset(msg.msg_control, 0, msg.msg_controllen);
3271
3272 msg.msg_flags = tswap32(msgp->msg_flags);
3273
3274 count = tswapal(msgp->msg_iovlen);
3275 target_vec = tswapal(msgp->msg_iov);
3276
3277 if (count > IOV_MAX) {
3278 /* sendrcvmsg returns a different errno for this condition than
3279 * readv/writev, so we must catch it here before lock_iovec() does.
3280 */
3281 ret = -TARGET_EMSGSIZE;
3282 goto out2;
3283 }
3284
3285 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3286 target_vec, count, send);
3287 if (vec == NULL) {
3288 ret = -host_to_target_errno(errno);
3289 goto out2;
3290 }
3291 msg.msg_iovlen = count;
3292 msg.msg_iov = vec;
3293
3294 if (send) {
3295 if (fd_trans_target_to_host_data(fd)) {
3296 void *host_msg;
3297
3298 host_msg = g_malloc(msg.msg_iov->iov_len);
3299 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3300 ret = fd_trans_target_to_host_data(fd)(host_msg,
3301 msg.msg_iov->iov_len);
3302 if (ret >= 0) {
3303 msg.msg_iov->iov_base = host_msg;
3304 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3305 }
3306 g_free(host_msg);
3307 } else {
3308 ret = target_to_host_cmsg(&msg, msgp);
3309 if (ret == 0) {
3310 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3311 }
3312 }
3313 } else {
3314 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3315 if (!is_error(ret)) {
3316 len = ret;
3317 if (fd_trans_host_to_target_data(fd)) {
3318 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3319 MIN(msg.msg_iov->iov_len, len));
3320 } else {
3321 ret = host_to_target_cmsg(msgp, &msg);
3322 }
3323 if (!is_error(ret)) {
3324 msgp->msg_namelen = tswap32(msg.msg_namelen);
3325 msgp->msg_flags = tswap32(msg.msg_flags);
3326 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3327 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3328 msg.msg_name, msg.msg_namelen);
3329 if (ret) {
3330 goto out;
3331 }
3332 }
3333
3334 ret = len;
3335 }
3336 }
3337 }
3338
3339 out:
3340 unlock_iovec(vec, target_vec, count, !send);
3341 out2:
3342 return ret;
3343 }
3344
3345 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3346 int flags, int send)
3347 {
3348 abi_long ret;
3349 struct target_msghdr *msgp;
3350
3351 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3352 msgp,
3353 target_msg,
3354 send ? 1 : 0)) {
3355 return -TARGET_EFAULT;
3356 }
3357 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3358 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3359 return ret;
3360 }
3361
3362 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3363 * so it might not have this *mmsg-specific flag either.
3364 */
3365 #ifndef MSG_WAITFORONE
3366 #define MSG_WAITFORONE 0x10000
3367 #endif
3368
3369 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3370 unsigned int vlen, unsigned int flags,
3371 int send)
3372 {
3373 struct target_mmsghdr *mmsgp;
3374 abi_long ret = 0;
3375 int i;
3376
3377 if (vlen > UIO_MAXIOV) {
3378 vlen = UIO_MAXIOV;
3379 }
3380
3381 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3382 if (!mmsgp) {
3383 return -TARGET_EFAULT;
3384 }
3385
3386 for (i = 0; i < vlen; i++) {
3387 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3388 if (is_error(ret)) {
3389 break;
3390 }
3391 mmsgp[i].msg_len = tswap32(ret);
3392 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3393 if (flags & MSG_WAITFORONE) {
3394 flags |= MSG_DONTWAIT;
3395 }
3396 }
3397
3398 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3399
3400 /* Return number of datagrams sent if we sent any at all;
3401 * otherwise return the error.
3402 */
3403 if (i) {
3404 return i;
3405 }
3406 return ret;
3407 }
3408
3409 /* do_accept4() Must return target values and target errnos. */
3410 static abi_long do_accept4(int fd, abi_ulong target_addr,
3411 abi_ulong target_addrlen_addr, int flags)
3412 {
3413 socklen_t addrlen, ret_addrlen;
3414 void *addr;
3415 abi_long ret;
3416 int host_flags;
3417
3418 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3419
3420 if (target_addr == 0) {
3421 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3422 }
3423
3424 /* linux returns EFAULT if addrlen pointer is invalid */
3425 if (get_user_u32(addrlen, target_addrlen_addr))
3426 return -TARGET_EFAULT;
3427
3428 if ((int)addrlen < 0) {
3429 return -TARGET_EINVAL;
3430 }
3431
3432 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3433 return -TARGET_EFAULT;
3434 }
3435
3436 addr = alloca(addrlen);
3437
3438 ret_addrlen = addrlen;
3439 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3440 if (!is_error(ret)) {
3441 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3442 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3443 ret = -TARGET_EFAULT;
3444 }
3445 }
3446 return ret;
3447 }
3448
3449 /* do_getpeername() Must return target values and target errnos. */
3450 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3451 abi_ulong target_addrlen_addr)
3452 {
3453 socklen_t addrlen, ret_addrlen;
3454 void *addr;
3455 abi_long ret;
3456
3457 if (get_user_u32(addrlen, target_addrlen_addr))
3458 return -TARGET_EFAULT;
3459
3460 if ((int)addrlen < 0) {
3461 return -TARGET_EINVAL;
3462 }
3463
3464 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3465 return -TARGET_EFAULT;
3466 }
3467
3468 addr = alloca(addrlen);
3469
3470 ret_addrlen = addrlen;
3471 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3472 if (!is_error(ret)) {
3473 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3474 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3475 ret = -TARGET_EFAULT;
3476 }
3477 }
3478 return ret;
3479 }
3480
3481 /* do_getsockname() Must return target values and target errnos. */
3482 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3483 abi_ulong target_addrlen_addr)
3484 {
3485 socklen_t addrlen, ret_addrlen;
3486 void *addr;
3487 abi_long ret;
3488
3489 if (get_user_u32(addrlen, target_addrlen_addr))
3490 return -TARGET_EFAULT;
3491
3492 if ((int)addrlen < 0) {
3493 return -TARGET_EINVAL;
3494 }
3495
3496 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3497 return -TARGET_EFAULT;
3498 }
3499
3500 addr = alloca(addrlen);
3501
3502 ret_addrlen = addrlen;
3503 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3504 if (!is_error(ret)) {
3505 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3506 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3507 ret = -TARGET_EFAULT;
3508 }
3509 }
3510 return ret;
3511 }
3512
3513 /* do_socketpair() Must return target values and target errnos. */
3514 static abi_long do_socketpair(int domain, int type, int protocol,
3515 abi_ulong target_tab_addr)
3516 {
3517 int tab[2];
3518 abi_long ret;
3519
3520 target_to_host_sock_type(&type);
3521
3522 ret = get_errno(socketpair(domain, type, protocol, tab));
3523 if (!is_error(ret)) {
3524 if (put_user_s32(tab[0], target_tab_addr)
3525 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3526 ret = -TARGET_EFAULT;
3527 }
3528 return ret;
3529 }
3530
3531 /* do_sendto() Must return target values and target errnos. */
3532 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3533 abi_ulong target_addr, socklen_t addrlen)
3534 {
3535 void *addr;
3536 void *host_msg;
3537 void *copy_msg = NULL;
3538 abi_long ret;
3539
3540 if ((int)addrlen < 0) {
3541 return -TARGET_EINVAL;
3542 }
3543
3544 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3545 if (!host_msg)
3546 return -TARGET_EFAULT;
3547 if (fd_trans_target_to_host_data(fd)) {
3548 copy_msg = host_msg;
3549 host_msg = g_malloc(len);
3550 memcpy(host_msg, copy_msg, len);
3551 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3552 if (ret < 0) {
3553 goto fail;
3554 }
3555 }
3556 if (target_addr) {
3557 addr = alloca(addrlen+1);
3558 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3559 if (ret) {
3560 goto fail;
3561 }
3562 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3563 } else {
3564 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3565 }
3566 fail:
3567 if (copy_msg) {
3568 g_free(host_msg);
3569 host_msg = copy_msg;
3570 }
3571 unlock_user(host_msg, msg, 0);
3572 return ret;
3573 }
3574
3575 /* do_recvfrom() Must return target values and target errnos. */
3576 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3577 abi_ulong target_addr,
3578 abi_ulong target_addrlen)
3579 {
3580 socklen_t addrlen, ret_addrlen;
3581 void *addr;
3582 void *host_msg;
3583 abi_long ret;
3584
3585 if (!msg) {
3586 host_msg = NULL;
3587 } else {
3588 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3589 if (!host_msg) {
3590 return -TARGET_EFAULT;
3591 }
3592 }
3593 if (target_addr) {
3594 if (get_user_u32(addrlen, target_addrlen)) {
3595 ret = -TARGET_EFAULT;
3596 goto fail;
3597 }
3598 if ((int)addrlen < 0) {
3599 ret = -TARGET_EINVAL;
3600 goto fail;
3601 }
3602 addr = alloca(addrlen);
3603 ret_addrlen = addrlen;
3604 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3605 addr, &ret_addrlen));
3606 } else {
3607 addr = NULL; /* To keep compiler quiet. */
3608 addrlen = 0; /* To keep compiler quiet. */
3609 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3610 }
3611 if (!is_error(ret)) {
3612 if (fd_trans_host_to_target_data(fd)) {
3613 abi_long trans;
3614 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3615 if (is_error(trans)) {
3616 ret = trans;
3617 goto fail;
3618 }
3619 }
3620 if (target_addr) {
3621 host_to_target_sockaddr(target_addr, addr,
3622 MIN(addrlen, ret_addrlen));
3623 if (put_user_u32(ret_addrlen, target_addrlen)) {
3624 ret = -TARGET_EFAULT;
3625 goto fail;
3626 }
3627 }
3628 unlock_user(host_msg, msg, len);
3629 } else {
3630 fail:
3631 unlock_user(host_msg, msg, 0);
3632 }
3633 return ret;
3634 }
3635
3636 #ifdef TARGET_NR_socketcall
3637 /* do_socketcall() must return target values and target errnos. */
3638 static abi_long do_socketcall(int num, abi_ulong vptr)
3639 {
3640 static const unsigned nargs[] = { /* number of arguments per operation */
3641 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3642 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3643 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3644 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3645 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3646 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3647 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3648 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3649 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3650 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3651 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3652 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3653 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3654 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3655 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3656 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3657 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3658 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3659 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3660 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3661 };
3662 abi_long a[6]; /* max 6 args */
3663 unsigned i;
3664
3665 /* check the range of the first argument num */
3666 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3667 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3668 return -TARGET_EINVAL;
3669 }
3670 /* ensure we have space for args */
3671 if (nargs[num] > ARRAY_SIZE(a)) {
3672 return -TARGET_EINVAL;
3673 }
3674 /* collect the arguments in a[] according to nargs[] */
3675 for (i = 0; i < nargs[num]; ++i) {
3676 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3677 return -TARGET_EFAULT;
3678 }
3679 }
3680 /* now when we have the args, invoke the appropriate underlying function */
3681 switch (num) {
3682 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3683 return do_socket(a[0], a[1], a[2]);
3684 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3685 return do_bind(a[0], a[1], a[2]);
3686 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3687 return do_connect(a[0], a[1], a[2]);
3688 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3689 return get_errno(listen(a[0], a[1]));
3690 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3691 return do_accept4(a[0], a[1], a[2], 0);
3692 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3693 return do_getsockname(a[0], a[1], a[2]);
3694 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3695 return do_getpeername(a[0], a[1], a[2]);
3696 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3697 return do_socketpair(a[0], a[1], a[2], a[3]);
3698 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3699 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3700 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3701 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3702 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3703 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3704 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3705 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3706 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3707 return get_errno(shutdown(a[0], a[1]));
3708 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3709 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3710 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3711 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3712 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3713 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3714 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3715 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3716 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3717 return do_accept4(a[0], a[1], a[2], a[3]);
3718 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3719 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3720 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3721 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3722 default:
3723 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3724 return -TARGET_EINVAL;
3725 }
3726 }
3727 #endif
3728
3729 #define N_SHM_REGIONS 32
3730
3731 static struct shm_region {
3732 abi_ulong start;
3733 abi_ulong size;
3734 bool in_use;
3735 } shm_regions[N_SHM_REGIONS];
3736
3737 #ifndef TARGET_SEMID64_DS
3738 /* asm-generic version of this struct */
3739 struct target_semid64_ds
3740 {
3741 struct target_ipc_perm sem_perm;
3742 abi_ulong sem_otime;
3743 #if TARGET_ABI_BITS == 32
3744 abi_ulong __unused1;
3745 #endif
3746 abi_ulong sem_ctime;
3747 #if TARGET_ABI_BITS == 32
3748 abi_ulong __unused2;
3749 #endif
3750 abi_ulong sem_nsems;
3751 abi_ulong __unused3;
3752 abi_ulong __unused4;
3753 };
3754 #endif
3755
3756 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3757 abi_ulong target_addr)
3758 {
3759 struct target_ipc_perm *target_ip;
3760 struct target_semid64_ds *target_sd;
3761
3762 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3763 return -TARGET_EFAULT;
3764 target_ip = &(target_sd->sem_perm);
3765 host_ip->__key = tswap32(target_ip->__key);
3766 host_ip->uid = tswap32(target_ip->uid);
3767 host_ip->gid = tswap32(target_ip->gid);
3768 host_ip->cuid = tswap32(target_ip->cuid);
3769 host_ip->cgid = tswap32(target_ip->cgid);
3770 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3771 host_ip->mode = tswap32(target_ip->mode);
3772 #else
3773 host_ip->mode = tswap16(target_ip->mode);
3774 #endif
3775 #if defined(TARGET_PPC)
3776 host_ip->__seq = tswap32(target_ip->__seq);
3777 #else
3778 host_ip->__seq = tswap16(target_ip->__seq);
3779 #endif
3780 unlock_user_struct(target_sd, target_addr, 0);
3781 return 0;
3782 }
3783
3784 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3785 struct ipc_perm *host_ip)
3786 {
3787 struct target_ipc_perm *target_ip;
3788 struct target_semid64_ds *target_sd;
3789
3790 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3791 return -TARGET_EFAULT;
3792 target_ip = &(target_sd->sem_perm);
3793 target_ip->__key = tswap32(host_ip->__key);
3794 target_ip->uid = tswap32(host_ip->uid);
3795 target_ip->gid = tswap32(host_ip->gid);
3796 target_ip->cuid = tswap32(host_ip->cuid);
3797 target_ip->cgid = tswap32(host_ip->cgid);
3798 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3799 target_ip->mode = tswap32(host_ip->mode);
3800 #else
3801 target_ip->mode = tswap16(host_ip->mode);
3802 #endif
3803 #if defined(TARGET_PPC)
3804 target_ip->__seq = tswap32(host_ip->__seq);
3805 #else
3806 target_ip->__seq = tswap16(host_ip->__seq);
3807 #endif
3808 unlock_user_struct(target_sd, target_addr, 1);
3809 return 0;
3810 }
3811
3812 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3813 abi_ulong target_addr)
3814 {
3815 struct target_semid64_ds *target_sd;
3816
3817 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3818 return -TARGET_EFAULT;
3819 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3820 return -TARGET_EFAULT;
3821 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3822 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3823 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3824 unlock_user_struct(target_sd, target_addr, 0);
3825 return 0;
3826 }
3827
3828 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3829 struct semid_ds *host_sd)
3830 {
3831 struct target_semid64_ds *target_sd;
3832
3833 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3834 return -TARGET_EFAULT;
3835 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3836 return -TARGET_EFAULT;
3837 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3838 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3839 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3840 unlock_user_struct(target_sd, target_addr, 1);
3841 return 0;
3842 }
3843
3844 struct target_seminfo {
3845 int semmap;
3846 int semmni;
3847 int semmns;
3848 int semmnu;
3849 int semmsl;
3850 int semopm;
3851 int semume;
3852 int semusz;
3853 int semvmx;
3854 int semaem;
3855 };
3856
3857 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3858 struct seminfo *host_seminfo)
3859 {
3860 struct target_seminfo *target_seminfo;
3861 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3862 return -TARGET_EFAULT;
3863 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3864 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3865 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3866 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3867 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3868 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3869 __put_user(host_seminfo->semume, &target_seminfo->semume);
3870 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3871 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3872 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3873 unlock_user_struct(target_seminfo, target_addr, 1);
3874 return 0;
3875 }
3876
3877 union semun {
3878 int val;
3879 struct semid_ds *buf;
3880 unsigned short *array;
3881 struct seminfo *__buf;
3882 };
3883
3884 union target_semun {
3885 int val;
3886 abi_ulong buf;
3887 abi_ulong array;
3888 abi_ulong __buf;
3889 };
3890
3891 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3892 abi_ulong target_addr)
3893 {
3894 int nsems;
3895 unsigned short *array;
3896 union semun semun;
3897 struct semid_ds semid_ds;
3898 int i, ret;
3899
3900 semun.buf = &semid_ds;
3901
3902 ret = semctl(semid, 0, IPC_STAT, semun);
3903 if (ret == -1)
3904 return get_errno(ret);
3905
3906 nsems = semid_ds.sem_nsems;
3907
3908 *host_array = g_try_new(unsigned short, nsems);
3909 if (!*host_array) {
3910 return -TARGET_ENOMEM;
3911 }
3912 array = lock_user(VERIFY_READ, target_addr,
3913 nsems*sizeof(unsigned short), 1);
3914 if (!array) {
3915 g_free(*host_array);
3916 return -TARGET_EFAULT;
3917 }
3918
3919 for(i=0; i<nsems; i++) {
3920 __get_user((*host_array)[i], &array[i]);
3921 }
3922 unlock_user(array, target_addr, 0);
3923
3924 return 0;
3925 }
3926
3927 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3928 unsigned short **host_array)
3929 {
3930 int nsems;
3931 unsigned short *array;
3932 union semun semun;
3933 struct semid_ds semid_ds;
3934 int i, ret;
3935
3936 semun.buf = &semid_ds;
3937
3938 ret = semctl(semid, 0, IPC_STAT, semun);
3939 if (ret == -1)
3940 return get_errno(ret);
3941
3942 nsems = semid_ds.sem_nsems;
3943
3944 array = lock_user(VERIFY_WRITE, target_addr,
3945 nsems*sizeof(unsigned short), 0);
3946 if (!array)
3947 return -TARGET_EFAULT;
3948
3949 for(i=0; i<nsems; i++) {
3950 __put_user((*host_array)[i], &array[i]);
3951 }
3952 g_free(*host_array);
3953 unlock_user(array, target_addr, 1);
3954
3955 return 0;
3956 }
3957
3958 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3959 abi_ulong target_arg)
3960 {
3961 union target_semun target_su = { .buf = target_arg };
3962 union semun arg;
3963 struct semid_ds dsarg;
3964 unsigned short *array = NULL;
3965 struct seminfo seminfo;
3966 abi_long ret = -TARGET_EINVAL;
3967 abi_long err;
3968 cmd &= 0xff;
3969
3970 switch( cmd ) {
3971 case GETVAL:
3972 case SETVAL:
3973 /* In 64 bit cross-endian situations, we will erroneously pick up
3974 * the wrong half of the union for the "val" element. To rectify
3975 * this, the entire 8-byte structure is byteswapped, followed by
3976 * a swap of the 4 byte val field. In other cases, the data is
3977 * already in proper host byte order. */
3978 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3979 target_su.buf = tswapal(target_su.buf);
3980 arg.val = tswap32(target_su.val);
3981 } else {
3982 arg.val = target_su.val;
3983 }
3984 ret = get_errno(semctl(semid, semnum, cmd, arg));
3985 break;
3986 case GETALL:
3987 case SETALL:
3988 err = target_to_host_semarray(semid, &array, target_su.array);
3989 if (err)
3990 return err;
3991 arg.array = array;
3992 ret = get_errno(semctl(semid, semnum, cmd, arg));
3993 err = host_to_target_semarray(semid, target_su.array, &array);
3994 if (err)
3995 return err;
3996 break;
3997 case IPC_STAT:
3998 case IPC_SET:
3999 case SEM_STAT:
4000 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4001 if (err)
4002 return err;
4003 arg.buf = &dsarg;
4004 ret = get_errno(semctl(semid, semnum, cmd, arg));
4005 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4006 if (err)
4007 return err;
4008 break;
4009 case IPC_INFO:
4010 case SEM_INFO:
4011 arg.__buf = &seminfo;
4012 ret = get_errno(semctl(semid, semnum, cmd, arg));
4013 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4014 if (err)
4015 return err;
4016 break;
4017 case IPC_RMID:
4018 case GETPID:
4019 case GETNCNT:
4020 case GETZCNT:
4021 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4022 break;
4023 }
4024
4025 return ret;
4026 }
4027
4028 struct target_sembuf {
4029 unsigned short sem_num;
4030 short sem_op;
4031 short sem_flg;
4032 };
4033
4034 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4035 abi_ulong target_addr,
4036 unsigned nsops)
4037 {
4038 struct target_sembuf *target_sembuf;
4039 int i;
4040
4041 target_sembuf = lock_user(VERIFY_READ, target_addr,
4042 nsops*sizeof(struct target_sembuf), 1);
4043 if (!target_sembuf)
4044 return -TARGET_EFAULT;
4045
4046 for(i=0; i<nsops; i++) {
4047 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4048 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4049 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4050 }
4051
4052 unlock_user(target_sembuf, target_addr, 0);
4053
4054 return 0;
4055 }
4056
4057 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4058 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4059
4060 /*
4061 * This macro is required to handle the s390 variants, which passes the
4062 * arguments in a different order than default.
4063 */
4064 #ifdef __s390x__
4065 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4066 (__nsops), (__timeout), (__sops)
4067 #else
4068 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4069 (__nsops), 0, (__sops), (__timeout)
4070 #endif
4071
4072 static inline abi_long do_semtimedop(int semid,
4073 abi_long ptr,
4074 unsigned nsops,
4075 abi_long timeout, bool time64)
4076 {
4077 struct sembuf *sops;
4078 struct timespec ts, *pts = NULL;
4079 abi_long ret;
4080
4081 if (timeout) {
4082 pts = &ts;
4083 if (time64) {
4084 if (target_to_host_timespec64(pts, timeout)) {
4085 return -TARGET_EFAULT;
4086 }
4087 } else {
4088 if (target_to_host_timespec(pts, timeout)) {
4089 return -TARGET_EFAULT;
4090 }
4091 }
4092 }
4093
4094 if (nsops > TARGET_SEMOPM) {
4095 return -TARGET_E2BIG;
4096 }
4097
4098 sops = g_new(struct sembuf, nsops);
4099
4100 if (target_to_host_sembuf(sops, ptr, nsops)) {
4101 g_free(sops);
4102 return -TARGET_EFAULT;
4103 }
4104
4105 ret = -TARGET_ENOSYS;
4106 #ifdef __NR_semtimedop
4107 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4108 #endif
4109 #ifdef __NR_ipc
4110 if (ret == -TARGET_ENOSYS) {
4111 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4112 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4113 }
4114 #endif
4115 g_free(sops);
4116 return ret;
4117 }
4118 #endif
4119
4120 struct target_msqid_ds
4121 {
4122 struct target_ipc_perm msg_perm;
4123 abi_ulong msg_stime;
4124 #if TARGET_ABI_BITS == 32
4125 abi_ulong __unused1;
4126 #endif
4127 abi_ulong msg_rtime;
4128 #if TARGET_ABI_BITS == 32
4129 abi_ulong __unused2;
4130 #endif
4131 abi_ulong msg_ctime;
4132 #if TARGET_ABI_BITS == 32
4133 abi_ulong __unused3;
4134 #endif
4135 abi_ulong __msg_cbytes;
4136 abi_ulong msg_qnum;
4137 abi_ulong msg_qbytes;
4138 abi_ulong msg_lspid;
4139 abi_ulong msg_lrpid;
4140 abi_ulong __unused4;
4141 abi_ulong __unused5;
4142 };
4143
4144 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4145 abi_ulong target_addr)
4146 {
4147 struct target_msqid_ds *target_md;
4148
4149 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4150 return -TARGET_EFAULT;
4151 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4152 return -TARGET_EFAULT;
4153 host_md->msg_stime = tswapal(target_md->msg_stime);
4154 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4155 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4156 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4157 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4158 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4159 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4160 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4161 unlock_user_struct(target_md, target_addr, 0);
4162 return 0;
4163 }
4164
4165 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4166 struct msqid_ds *host_md)
4167 {
4168 struct target_msqid_ds *target_md;
4169
4170 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4171 return -TARGET_EFAULT;
4172 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4173 return -TARGET_EFAULT;
4174 target_md->msg_stime = tswapal(host_md->msg_stime);
4175 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4176 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4177 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4178 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4179 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4180 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4181 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4182 unlock_user_struct(target_md, target_addr, 1);
4183 return 0;
4184 }
4185
4186 struct target_msginfo {
4187 int msgpool;
4188 int msgmap;
4189 int msgmax;
4190 int msgmnb;
4191 int msgmni;
4192 int msgssz;
4193 int msgtql;
4194 unsigned short int msgseg;
4195 };
4196
4197 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4198 struct msginfo *host_msginfo)
4199 {
4200 struct target_msginfo *target_msginfo;
4201 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4202 return -TARGET_EFAULT;
4203 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4204 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4205 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4206 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4207 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4208 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4209 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4210 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4211 unlock_user_struct(target_msginfo, target_addr, 1);
4212 return 0;
4213 }
4214
4215 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4216 {
4217 struct msqid_ds dsarg;
4218 struct msginfo msginfo;
4219 abi_long ret = -TARGET_EINVAL;
4220
4221 cmd &= 0xff;
4222
4223 switch (cmd) {
4224 case IPC_STAT:
4225 case IPC_SET:
4226 case MSG_STAT:
4227 if (target_to_host_msqid_ds(&dsarg,ptr))
4228 return -TARGET_EFAULT;
4229 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4230 if (host_to_target_msqid_ds(ptr,&dsarg))
4231 return -TARGET_EFAULT;
4232 break;
4233 case IPC_RMID:
4234 ret = get_errno(msgctl(msgid, cmd, NULL));
4235 break;
4236 case IPC_INFO:
4237 case MSG_INFO:
4238 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4239 if (host_to_target_msginfo(ptr, &msginfo))
4240 return -TARGET_EFAULT;
4241 break;
4242 }
4243
4244 return ret;
4245 }
4246
4247 struct target_msgbuf {
4248 abi_long mtype;
4249 char mtext[1];
4250 };
4251
4252 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4253 ssize_t msgsz, int msgflg)
4254 {
4255 struct target_msgbuf *target_mb;
4256 struct msgbuf *host_mb;
4257 abi_long ret = 0;
4258
4259 if (msgsz < 0) {
4260 return -TARGET_EINVAL;
4261 }
4262
4263 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4264 return -TARGET_EFAULT;
4265 host_mb = g_try_malloc(msgsz + sizeof(long));
4266 if (!host_mb) {
4267 unlock_user_struct(target_mb, msgp, 0);
4268 return -TARGET_ENOMEM;
4269 }
4270 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4271 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4272 ret = -TARGET_ENOSYS;
4273 #ifdef __NR_msgsnd
4274 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4275 #endif
4276 #ifdef __NR_ipc
4277 if (ret == -TARGET_ENOSYS) {
4278 #ifdef __s390x__
4279 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4280 host_mb));
4281 #else
4282 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4283 host_mb, 0));
4284 #endif
4285 }
4286 #endif
4287 g_free(host_mb);
4288 unlock_user_struct(target_mb, msgp, 0);
4289
4290 return ret;
4291 }
4292
4293 #ifdef __NR_ipc
4294 #if defined(__sparc__)
4295 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4296 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4297 #elif defined(__s390x__)
4298 /* The s390 sys_ipc variant has only five parameters. */
4299 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4300 ((long int[]){(long int)__msgp, __msgtyp})
4301 #else
4302 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4303 ((long int[]){(long int)__msgp, __msgtyp}), 0
4304 #endif
4305 #endif
4306
4307 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4308 ssize_t msgsz, abi_long msgtyp,
4309 int msgflg)
4310 {
4311 struct target_msgbuf *target_mb;
4312 char *target_mtext;
4313 struct msgbuf *host_mb;
4314 abi_long ret = 0;
4315
4316 if (msgsz < 0) {
4317 return -TARGET_EINVAL;
4318 }
4319
4320 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4321 return -TARGET_EFAULT;
4322
4323 host_mb = g_try_malloc(msgsz + sizeof(long));
4324 if (!host_mb) {
4325 ret = -TARGET_ENOMEM;
4326 goto end;
4327 }
4328 ret = -TARGET_ENOSYS;
4329 #ifdef __NR_msgrcv
4330 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4331 #endif
4332 #ifdef __NR_ipc
4333 if (ret == -TARGET_ENOSYS) {
4334 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4335 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4336 }
4337 #endif
4338
4339 if (ret > 0) {
4340 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4341 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4342 if (!target_mtext) {
4343 ret = -TARGET_EFAULT;
4344 goto end;
4345 }
4346 memcpy(target_mb->mtext, host_mb->mtext, ret);
4347 unlock_user(target_mtext, target_mtext_addr, ret);
4348 }
4349
4350 target_mb->mtype = tswapal(host_mb->mtype);
4351
4352 end:
4353 if (target_mb)
4354 unlock_user_struct(target_mb, msgp, 1);
4355 g_free(host_mb);
4356 return ret;
4357 }
4358
4359 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4360 abi_ulong target_addr)
4361 {
4362 struct target_shmid_ds *target_sd;
4363
4364 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4365 return -TARGET_EFAULT;
4366 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4367 return -TARGET_EFAULT;
4368 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4369 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4370 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4371 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4372 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4373 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4374 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4375 unlock_user_struct(target_sd, target_addr, 0);
4376 return 0;
4377 }
4378
4379 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4380 struct shmid_ds *host_sd)
4381 {
4382 struct target_shmid_ds *target_sd;
4383
4384 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4385 return -TARGET_EFAULT;
4386 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4387 return -TARGET_EFAULT;
4388 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4389 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4390 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4391 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4392 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4393 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4394 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4395 unlock_user_struct(target_sd, target_addr, 1);
4396 return 0;
4397 }
4398
4399 struct target_shminfo {
4400 abi_ulong shmmax;
4401 abi_ulong shmmin;
4402 abi_ulong shmmni;
4403 abi_ulong shmseg;
4404 abi_ulong shmall;
4405 };
4406
4407 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4408 struct shminfo *host_shminfo)
4409 {
4410 struct target_shminfo *target_shminfo;
4411 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4412 return -TARGET_EFAULT;
4413 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4414 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4415 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4416 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4417 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4418 unlock_user_struct(target_shminfo, target_addr, 1);
4419 return 0;
4420 }
4421
4422 struct target_shm_info {
4423 int used_ids;
4424 abi_ulong shm_tot;
4425 abi_ulong shm_rss;
4426 abi_ulong shm_swp;
4427 abi_ulong swap_attempts;
4428 abi_ulong swap_successes;
4429 };
4430
4431 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4432 struct shm_info *host_shm_info)
4433 {
4434 struct target_shm_info *target_shm_info;
4435 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4436 return -TARGET_EFAULT;
4437 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4438 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4439 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4440 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4441 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4442 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4443 unlock_user_struct(target_shm_info, target_addr, 1);
4444 return 0;
4445 }
4446
4447 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4448 {
4449 struct shmid_ds dsarg;
4450 struct shminfo shminfo;
4451 struct shm_info shm_info;
4452 abi_long ret = -TARGET_EINVAL;
4453
4454 cmd &= 0xff;
4455
4456 switch(cmd) {
4457 case IPC_STAT:
4458 case IPC_SET:
4459 case SHM_STAT:
4460 if (target_to_host_shmid_ds(&dsarg, buf))
4461 return -TARGET_EFAULT;
4462 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4463 if (host_to_target_shmid_ds(buf, &dsarg))
4464 return -TARGET_EFAULT;
4465 break;
4466 case IPC_INFO:
4467 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4468 if (host_to_target_shminfo(buf, &shminfo))
4469 return -TARGET_EFAULT;
4470 break;
4471 case SHM_INFO:
4472 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4473 if (host_to_target_shm_info(buf, &shm_info))
4474 return -TARGET_EFAULT;
4475 break;
4476 case IPC_RMID:
4477 case SHM_LOCK:
4478 case SHM_UNLOCK:
4479 ret = get_errno(shmctl(shmid, cmd, NULL));
4480 break;
4481 }
4482
4483 return ret;
4484 }
4485
4486 #ifndef TARGET_FORCE_SHMLBA
4487 /* For most architectures, SHMLBA is the same as the page size;
4488 * some architectures have larger values, in which case they should
4489 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4490 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4491 * and defining its own value for SHMLBA.
4492 *
4493 * The kernel also permits SHMLBA to be set by the architecture to a
4494 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4495 * this means that addresses are rounded to the large size if
4496 * SHM_RND is set but addresses not aligned to that size are not rejected
4497 * as long as they are at least page-aligned. Since the only architecture
4498 * which uses this is ia64 this code doesn't provide for that oddity.
4499 */
4500 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4501 {
4502 return TARGET_PAGE_SIZE;
4503 }
4504 #endif
4505
4506 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4507 int shmid, abi_ulong shmaddr, int shmflg)
4508 {
4509 CPUState *cpu = env_cpu(cpu_env);
4510 abi_long raddr;
4511 void *host_raddr;
4512 struct shmid_ds shm_info;
4513 int i,ret;
4514 abi_ulong shmlba;
4515
4516 /* shmat pointers are always untagged */
4517
4518 /* find out the length of the shared memory segment */
4519 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4520 if (is_error(ret)) {
4521 /* can't get length, bail out */
4522 return ret;
4523 }
4524
4525 shmlba = target_shmlba(cpu_env);
4526
4527 if (shmaddr & (shmlba - 1)) {
4528 if (shmflg & SHM_RND) {
4529 shmaddr &= ~(shmlba - 1);
4530 } else {
4531 return -TARGET_EINVAL;
4532 }
4533 }
4534 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4535 return -TARGET_EINVAL;
4536 }
4537
4538 mmap_lock();
4539
4540 /*
4541 * We're mapping shared memory, so ensure we generate code for parallel
4542 * execution and flush old translations. This will work up to the level
4543 * supported by the host -- anything that requires EXCP_ATOMIC will not
4544 * be atomic with respect to an external process.
4545 */
4546 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4547 cpu->tcg_cflags |= CF_PARALLEL;
4548 tb_flush(cpu);
4549 }
4550
4551 if (shmaddr)
4552 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4553 else {
4554 abi_ulong mmap_start;
4555
4556 /* In order to use the host shmat, we need to honor host SHMLBA. */
4557 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4558
4559 if (mmap_start == -1) {
4560 errno = ENOMEM;
4561 host_raddr = (void *)-1;
4562 } else
4563 host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4564 shmflg | SHM_REMAP);
4565 }
4566
4567 if (host_raddr == (void *)-1) {
4568 mmap_unlock();
4569 return get_errno((long)host_raddr);
4570 }
4571 raddr=h2g((unsigned long)host_raddr);
4572
4573 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4574 PAGE_VALID | PAGE_RESET | PAGE_READ |
4575 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4576
4577 for (i = 0; i < N_SHM_REGIONS; i++) {
4578 if (!shm_regions[i].in_use) {
4579 shm_regions[i].in_use = true;
4580 shm_regions[i].start = raddr;
4581 shm_regions[i].size = shm_info.shm_segsz;
4582 break;
4583 }
4584 }
4585
4586 mmap_unlock();
4587 return raddr;
4588
4589 }
4590
4591 static inline abi_long do_shmdt(abi_ulong shmaddr)
4592 {
4593 int i;
4594 abi_long rv;
4595
4596 /* shmdt pointers are always untagged */
4597
4598 mmap_lock();
4599
4600 for (i = 0; i < N_SHM_REGIONS; ++i) {
4601 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4602 shm_regions[i].in_use = false;
4603 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4604 break;
4605 }
4606 }
4607 rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4608
4609 mmap_unlock();
4610
4611 return rv;
4612 }
4613
4614 #ifdef TARGET_NR_ipc
4615 /* ??? This only works with linear mappings. */
4616 /* do_ipc() must return target values and target errnos. */
4617 static abi_long do_ipc(CPUArchState *cpu_env,
4618 unsigned int call, abi_long first,
4619 abi_long second, abi_long third,
4620 abi_long ptr, abi_long fifth)
4621 {
4622 int version;
4623 abi_long ret = 0;
4624
4625 version = call >> 16;
4626 call &= 0xffff;
4627
4628 switch (call) {
4629 case IPCOP_semop:
4630 ret = do_semtimedop(first, ptr, second, 0, false);
4631 break;
4632 case IPCOP_semtimedop:
4633 /*
4634 * The s390 sys_ipc variant has only five parameters instead of six
4635 * (as for default variant) and the only difference is the handling of
4636 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4637 * to a struct timespec where the generic variant uses fifth parameter.
4638 */
4639 #if defined(TARGET_S390X)
4640 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4641 #else
4642 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4643 #endif
4644 break;
4645
4646 case IPCOP_semget:
4647 ret = get_errno(semget(first, second, third));
4648 break;
4649
4650 case IPCOP_semctl: {
4651 /* The semun argument to semctl is passed by value, so dereference the
4652 * ptr argument. */
4653 abi_ulong atptr;
4654 get_user_ual(atptr, ptr);
4655 ret = do_semctl(first, second, third, atptr);
4656 break;
4657 }
4658
4659 case IPCOP_msgget:
4660 ret = get_errno(msgget(first, second));
4661 break;
4662
4663 case IPCOP_msgsnd:
4664 ret = do_msgsnd(first, ptr, second, third);
4665 break;
4666
4667 case IPCOP_msgctl:
4668 ret = do_msgctl(first, second, ptr);
4669 break;
4670
4671 case IPCOP_msgrcv:
4672 switch (version) {
4673 case 0:
4674 {
4675 struct target_ipc_kludge {
4676 abi_long msgp;
4677 abi_long msgtyp;
4678 } *tmp;
4679
4680 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4681 ret = -TARGET_EFAULT;
4682 break;
4683 }
4684
4685 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4686
4687 unlock_user_struct(tmp, ptr, 0);
4688 break;
4689 }
4690 default:
4691 ret = do_msgrcv(first, ptr, second, fifth, third);
4692 }
4693 break;
4694
4695 case IPCOP_shmat:
4696 switch (version) {
4697 default:
4698 {
4699 abi_ulong raddr;
4700 raddr = do_shmat(cpu_env, first, ptr, second);
4701 if (is_error(raddr))
4702 return get_errno(raddr);
4703 if (put_user_ual(raddr, third))
4704 return -TARGET_EFAULT;
4705 break;
4706 }
4707 case 1:
4708 ret = -TARGET_EINVAL;
4709 break;
4710 }
4711 break;
4712 case IPCOP_shmdt:
4713 ret = do_shmdt(ptr);
4714 break;
4715
4716 case IPCOP_shmget:
4717 /* IPC_* flag values are the same on all linux platforms */
4718 ret = get_errno(shmget(first, second, third));
4719 break;
4720
4721 /* IPC_* and SHM_* command values are the same on all linux platforms */
4722 case IPCOP_shmctl:
4723 ret = do_shmctl(first, second, ptr);
4724 break;
4725 default:
4726 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4727 call, version);
4728 ret = -TARGET_ENOSYS;
4729 break;
4730 }
4731 return ret;
4732 }
4733 #endif
4734
4735 /* kernel structure types definitions */
4736
4737 #define STRUCT(name, ...) STRUCT_ ## name,
4738 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4739 enum {
4740 #include "syscall_types.h"
4741 STRUCT_MAX
4742 };
4743 #undef STRUCT
4744 #undef STRUCT_SPECIAL
4745
4746 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4747 #define STRUCT_SPECIAL(name)
4748 #include "syscall_types.h"
4749 #undef STRUCT
4750 #undef STRUCT_SPECIAL
4751
4752 #define MAX_STRUCT_SIZE 4096
4753
4754 #ifdef CONFIG_FIEMAP
4755 /* So fiemap access checks don't overflow on 32 bit systems.
4756 * This is very slightly smaller than the limit imposed by
4757 * the underlying kernel.
4758 */
4759 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4760 / sizeof(struct fiemap_extent))
4761
4762 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4763 int fd, int cmd, abi_long arg)
4764 {
4765 /* The parameter for this ioctl is a struct fiemap followed
4766 * by an array of struct fiemap_extent whose size is set
4767 * in fiemap->fm_extent_count. The array is filled in by the
4768 * ioctl.
4769 */
4770 int target_size_in, target_size_out;
4771 struct fiemap *fm;
4772 const argtype *arg_type = ie->arg_type;
4773 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4774 void *argptr, *p;
4775 abi_long ret;
4776 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4777 uint32_t outbufsz;
4778 int free_fm = 0;
4779
4780 assert(arg_type[0] == TYPE_PTR);
4781 assert(ie->access == IOC_RW);
4782 arg_type++;
4783 target_size_in = thunk_type_size(arg_type, 0);
4784 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4785 if (!argptr) {
4786 return -TARGET_EFAULT;
4787 }
4788 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4789 unlock_user(argptr, arg, 0);
4790 fm = (struct fiemap *)buf_temp;
4791 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4792 return -TARGET_EINVAL;
4793 }
4794
4795 outbufsz = sizeof (*fm) +
4796 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4797
4798 if (outbufsz > MAX_STRUCT_SIZE) {
4799 /* We can't fit all the extents into the fixed size buffer.
4800 * Allocate one that is large enough and use it instead.
4801 */
4802 fm = g_try_malloc(outbufsz);
4803 if (!fm) {
4804 return -TARGET_ENOMEM;
4805 }
4806 memcpy(fm, buf_temp, sizeof(struct fiemap));
4807 free_fm = 1;
4808 }
4809 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4810 if (!is_error(ret)) {
4811 target_size_out = target_size_in;
4812 /* An extent_count of 0 means we were only counting the extents
4813 * so there are no structs to copy
4814 */
4815 if (fm->fm_extent_count != 0) {
4816 target_size_out += fm->fm_mapped_extents * extent_size;
4817 }
4818 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4819 if (!argptr) {
4820 ret = -TARGET_EFAULT;
4821 } else {
4822 /* Convert the struct fiemap */
4823 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4824 if (fm->fm_extent_count != 0) {
4825 p = argptr + target_size_in;
4826 /* ...and then all the struct fiemap_extents */
4827 for (i = 0; i < fm->fm_mapped_extents; i++) {
4828 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4829 THUNK_TARGET);
4830 p += extent_size;
4831 }
4832 }
4833 unlock_user(argptr, arg, target_size_out);
4834 }
4835 }
4836 if (free_fm) {
4837 g_free(fm);
4838 }
4839 return ret;
4840 }
4841 #endif
4842
4843 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4844 int fd, int cmd, abi_long arg)
4845 {
4846 const argtype *arg_type = ie->arg_type;
4847 int target_size;
4848 void *argptr;
4849 int ret;
4850 struct ifconf *host_ifconf;
4851 uint32_t outbufsz;
4852 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4853 const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4854 int target_ifreq_size;
4855 int nb_ifreq;
4856 int free_buf = 0;
4857 int i;
4858 int target_ifc_len;
4859 abi_long target_ifc_buf;
4860 int host_ifc_len;
4861 char *host_ifc_buf;
4862
4863 assert(arg_type[0] == TYPE_PTR);
4864 assert(ie->access == IOC_RW);
4865
4866 arg_type++;
4867 target_size = thunk_type_size(arg_type, 0);
4868
4869 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4870 if (!argptr)
4871 return -TARGET_EFAULT;
4872 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4873 unlock_user(argptr, arg, 0);
4874
4875 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4876 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4877 target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4878
4879 if (target_ifc_buf != 0) {
4880 target_ifc_len = host_ifconf->ifc_len;
4881 nb_ifreq = target_ifc_len / target_ifreq_size;
4882 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4883
4884 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4885 if (outbufsz > MAX_STRUCT_SIZE) {
4886 /*
4887 * We can't fit all the extents into the fixed size buffer.
4888 * Allocate one that is large enough and use it instead.
4889 */
4890 host_ifconf = g_try_malloc(outbufsz);
4891 if (!host_ifconf) {
4892 return -TARGET_ENOMEM;
4893 }
4894 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4895 free_buf = 1;
4896 }
4897 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4898
4899 host_ifconf->ifc_len = host_ifc_len;
4900 } else {
4901 host_ifc_buf = NULL;
4902 }
4903 host_ifconf->ifc_buf = host_ifc_buf;
4904
4905 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4906 if (!is_error(ret)) {
4907 /* convert host ifc_len to target ifc_len */
4908
4909 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4910 target_ifc_len = nb_ifreq * target_ifreq_size;
4911 host_ifconf->ifc_len = target_ifc_len;
4912
4913 /* restore target ifc_buf */
4914
4915 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4916
4917 /* copy struct ifconf to target user */
4918
4919 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4920 if (!argptr)
4921 return -TARGET_EFAULT;
4922 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4923 unlock_user(argptr, arg, target_size);
4924
4925 if (target_ifc_buf != 0) {
4926 /* copy ifreq[] to target user */
4927 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4928 for (i = 0; i < nb_ifreq ; i++) {
4929 thunk_convert(argptr + i * target_ifreq_size,
4930 host_ifc_buf + i * sizeof(struct ifreq),
4931 ifreq_arg_type, THUNK_TARGET);
4932 }
4933 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4934 }
4935 }
4936
4937 if (free_buf) {
4938 g_free(host_ifconf);
4939 }
4940
4941 return ret;
4942 }
4943
4944 #if defined(CONFIG_USBFS)
4945 #if HOST_LONG_BITS > 64
4946 #error USBDEVFS thunks do not support >64 bit hosts yet.
4947 #endif
4948 struct live_urb {
4949 uint64_t target_urb_adr;
4950 uint64_t target_buf_adr;
4951 char *target_buf_ptr;
4952 struct usbdevfs_urb host_urb;
4953 };
4954
4955 static GHashTable *usbdevfs_urb_hashtable(void)
4956 {
4957 static GHashTable *urb_hashtable;
4958
4959 if (!urb_hashtable) {
4960 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4961 }
4962 return urb_hashtable;
4963 }
4964
4965 static void urb_hashtable_insert(struct live_urb *urb)
4966 {
4967 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4968 g_hash_table_insert(urb_hashtable, urb, urb);
4969 }
4970
4971 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4972 {
4973 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4974 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4975 }
4976
4977 static void urb_hashtable_remove(struct live_urb *urb)
4978 {
4979 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4980 g_hash_table_remove(urb_hashtable, urb);
4981 }
4982
4983 static abi_long
4984 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4985 int fd, int cmd, abi_long arg)
4986 {
4987 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4988 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4989 struct live_urb *lurb;
4990 void *argptr;
4991 uint64_t hurb;
4992 int target_size;
4993 uintptr_t target_urb_adr;
4994 abi_long ret;
4995
4996 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4997
4998 memset(buf_temp, 0, sizeof(uint64_t));
4999 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5000 if (is_error(ret)) {
5001 return ret;
5002 }
5003
5004 memcpy(&hurb, buf_temp, sizeof(uint64_t));
5005 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5006 if (!lurb->target_urb_adr) {
5007 return -TARGET_EFAULT;
5008 }
5009 urb_hashtable_remove(lurb);
5010 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5011 lurb->host_urb.buffer_length);
5012 lurb->target_buf_ptr = NULL;
5013
5014 /* restore the guest buffer pointer */
5015 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5016
5017 /* update the guest urb struct */
5018 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5019 if (!argptr) {
5020 g_free(lurb);
5021 return -TARGET_EFAULT;
5022 }
5023 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5024 unlock_user(argptr, lurb->target_urb_adr, target_size);
5025
5026 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5027 /* write back the urb handle */
5028 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5029 if (!argptr) {
5030 g_free(lurb);
5031 return -TARGET_EFAULT;
5032 }
5033
5034 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5035 target_urb_adr = lurb->target_urb_adr;
5036 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5037 unlock_user(argptr, arg, target_size);
5038
5039 g_free(lurb);
5040 return ret;
5041 }
5042
5043 static abi_long
5044 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5045 uint8_t *buf_temp __attribute__((unused)),
5046 int fd, int cmd, abi_long arg)
5047 {
5048 struct live_urb *lurb;
5049
5050 /* map target address back to host URB with metadata. */
5051 lurb = urb_hashtable_lookup(arg);
5052 if (!lurb) {
5053 return -TARGET_EFAULT;
5054 }
5055 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5056 }
5057
5058 static abi_long
5059 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5060 int fd, int cmd, abi_long arg)
5061 {
5062 const argtype *arg_type = ie->arg_type;
5063 int target_size;
5064 abi_long ret;
5065 void *argptr;
5066 int rw_dir;
5067 struct live_urb *lurb;
5068
5069 /*
5070 * each submitted URB needs to map to a unique ID for the
5071 * kernel, and that unique ID needs to be a pointer to
5072 * host memory. hence, we need to malloc for each URB.
5073 * isochronous transfers have a variable length struct.
5074 */
5075 arg_type++;
5076 target_size = thunk_type_size(arg_type, THUNK_TARGET);
5077
5078 /* construct host copy of urb and metadata */
5079 lurb = g_try_new0(struct live_urb, 1);
5080 if (!lurb) {
5081 return -TARGET_ENOMEM;
5082 }
5083
5084 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5085 if (!argptr) {
5086 g_free(lurb);
5087 return -TARGET_EFAULT;
5088 }
5089 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5090 unlock_user(argptr, arg, 0);
5091
5092 lurb->target_urb_adr = arg;
5093 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5094
5095 /* buffer space used depends on endpoint type so lock the entire buffer */
5096 /* control type urbs should check the buffer contents for true direction */
5097 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5098 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5099 lurb->host_urb.buffer_length, 1);
5100 if (lurb->target_buf_ptr == NULL) {
5101 g_free(lurb);
5102 return -TARGET_EFAULT;
5103 }
5104
5105 /* update buffer pointer in host copy */
5106 lurb->host_urb.buffer = lurb->target_buf_ptr;
5107
5108 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5109 if (is_error(ret)) {
5110 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5111 g_free(lurb);
5112 } else {
5113 urb_hashtable_insert(lurb);
5114 }
5115
5116 return ret;
5117 }
5118 #endif /* CONFIG_USBFS */
5119
5120 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5121 int cmd, abi_long arg)
5122 {
5123 void *argptr;
5124 struct dm_ioctl *host_dm;
5125 abi_long guest_data;
5126 uint32_t guest_data_size;
5127 int target_size;
5128 const argtype *arg_type = ie->arg_type;
5129 abi_long ret;
5130 void *big_buf = NULL;
5131 char *host_data;
5132
5133 arg_type++;
5134 target_size = thunk_type_size(arg_type, 0);
5135 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5136 if (!argptr) {
5137 ret = -TARGET_EFAULT;
5138 goto out;
5139 }
5140 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5141 unlock_user(argptr, arg, 0);
5142
5143 /* buf_temp is too small, so fetch things into a bigger buffer */
5144 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5145 memcpy(big_buf, buf_temp, target_size);
5146 buf_temp = big_buf;
5147 host_dm = big_buf;
5148
5149 guest_data = arg + host_dm->data_start;
5150 if ((guest_data - arg) < 0) {
5151 ret = -TARGET_EINVAL;
5152 goto out;
5153 }
5154 guest_data_size = host_dm->data_size - host_dm->data_start;
5155 host_data = (char*)host_dm + host_dm->data_start;
5156
5157 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5158 if (!argptr) {
5159 ret = -TARGET_EFAULT;
5160 goto out;
5161 }
5162
5163 switch (ie->host_cmd) {
5164 case DM_REMOVE_ALL:
5165 case DM_LIST_DEVICES:
5166 case DM_DEV_CREATE:
5167 case DM_DEV_REMOVE:
5168 case DM_DEV_SUSPEND:
5169 case DM_DEV_STATUS:
5170 case DM_DEV_WAIT:
5171 case DM_TABLE_STATUS:
5172 case DM_TABLE_CLEAR:
5173 case DM_TABLE_DEPS:
5174 case DM_LIST_VERSIONS:
5175 /* no input data */
5176 break;
5177 case DM_DEV_RENAME:
5178 case DM_DEV_SET_GEOMETRY:
5179 /* data contains only strings */
5180 memcpy(host_data, argptr, guest_data_size);
5181 break;
5182 case DM_TARGET_MSG:
5183 memcpy(host_data, argptr, guest_data_size);
5184 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5185 break;
5186 case DM_TABLE_LOAD:
5187 {
5188 void *gspec = argptr;
5189 void *cur_data = host_data;
5190 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5191 int spec_size = thunk_type_size(arg_type, 0);
5192 int i;
5193
5194 for (i = 0; i < host_dm->target_count; i++) {
5195 struct dm_target_spec *spec = cur_data;
5196 uint32_t next;
5197 int slen;
5198
5199 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5200 slen = strlen((char*)gspec + spec_size) + 1;
5201 next = spec->next;
5202 spec->next = sizeof(*spec) + slen;
5203 strcpy((char*)&spec[1], gspec + spec_size);
5204 gspec += next;
5205 cur_data += spec->next;
5206 }
5207 break;
5208 }
5209 default:
5210 ret = -TARGET_EINVAL;
5211 unlock_user(argptr, guest_data, 0);
5212 goto out;
5213 }
5214 unlock_user(argptr, guest_data, 0);
5215
5216 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5217 if (!is_error(ret)) {
5218 guest_data = arg + host_dm->data_start;
5219 guest_data_size = host_dm->data_size - host_dm->data_start;
5220 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5221 switch (ie->host_cmd) {
5222 case DM_REMOVE_ALL:
5223 case DM_DEV_CREATE:
5224 case DM_DEV_REMOVE:
5225 case DM_DEV_RENAME:
5226 case DM_DEV_SUSPEND:
5227 case DM_DEV_STATUS:
5228 case DM_TABLE_LOAD:
5229 case DM_TABLE_CLEAR:
5230 case DM_TARGET_MSG:
5231 case DM_DEV_SET_GEOMETRY:
5232 /* no return data */
5233 break;
5234 case DM_LIST_DEVICES:
5235 {
5236 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5237 uint32_t remaining_data = guest_data_size;
5238 void *cur_data = argptr;
5239 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5240 int nl_size = 12; /* can't use thunk_size due to alignment */
5241
5242 while (1) {
5243 uint32_t next = nl->next;
5244 if (next) {
5245 nl->next = nl_size + (strlen(nl->name) + 1);
5246 }
5247 if (remaining_data < nl->next) {
5248 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5249 break;
5250 }
5251 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5252 strcpy(cur_data + nl_size, nl->name);
5253 cur_data += nl->next;
5254 remaining_data -= nl->next;
5255 if (!next) {
5256 break;
5257 }
5258 nl = (void*)nl + next;
5259 }
5260 break;
5261 }
5262 case DM_DEV_WAIT:
5263 case DM_TABLE_STATUS:
5264 {
5265 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5266 void *cur_data = argptr;
5267 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5268 int spec_size = thunk_type_size(arg_type, 0);
5269 int i;
5270
5271 for (i = 0; i < host_dm->target_count; i++) {
5272 uint32_t next = spec->next;
5273 int slen = strlen((char*)&spec[1]) + 1;
5274 spec->next = (cur_data - argptr) + spec_size + slen;
5275 if (guest_data_size < spec->next) {
5276 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5277 break;
5278 }
5279 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5280 strcpy(cur_data + spec_size, (char*)&spec[1]);
5281 cur_data = argptr + spec->next;
5282 spec = (void*)host_dm + host_dm->data_start + next;
5283 }
5284 break;
5285 }
5286 case DM_TABLE_DEPS:
5287 {
5288 void *hdata = (void*)host_dm + host_dm->data_start;
5289 int count = *(uint32_t*)hdata;
5290 uint64_t *hdev = hdata + 8;
5291 uint64_t *gdev = argptr + 8;
5292 int i;
5293
5294 *(uint32_t*)argptr = tswap32(count);
5295 for (i = 0; i < count; i++) {
5296 *gdev = tswap64(*hdev);
5297 gdev++;
5298 hdev++;
5299 }
5300 break;
5301 }
5302 case DM_LIST_VERSIONS:
5303 {
5304 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5305 uint32_t remaining_data = guest_data_size;
5306 void *cur_data = argptr;
5307 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5308 int vers_size = thunk_type_size(arg_type, 0);
5309
5310 while (1) {
5311 uint32_t next = vers->next;
5312 if (next) {
5313 vers->next = vers_size + (strlen(vers->name) + 1);
5314 }
5315 if (remaining_data < vers->next) {
5316 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5317 break;
5318 }
5319 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5320 strcpy(cur_data + vers_size, vers->name);
5321 cur_data += vers->next;
5322 remaining_data -= vers->next;
5323 if (!next) {
5324 break;
5325 }
5326 vers = (void*)vers + next;
5327 }
5328 break;
5329 }
5330 default:
5331 unlock_user(argptr, guest_data, 0);
5332 ret = -TARGET_EINVAL;
5333 goto out;
5334 }
5335 unlock_user(argptr, guest_data, guest_data_size);
5336
5337 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5338 if (!argptr) {
5339 ret = -TARGET_EFAULT;
5340 goto out;
5341 }
5342 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5343 unlock_user(argptr, arg, target_size);
5344 }
5345 out:
5346 g_free(big_buf);
5347 return ret;
5348 }
5349
5350 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5351 int cmd, abi_long arg)
5352 {
5353 void *argptr;
5354 int target_size;
5355 const argtype *arg_type = ie->arg_type;
5356 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5357 abi_long ret;
5358
5359 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5360 struct blkpg_partition host_part;
5361
5362 /* Read and convert blkpg */
5363 arg_type++;
5364 target_size = thunk_type_size(arg_type, 0);
5365 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5366 if (!argptr) {
5367 ret = -TARGET_EFAULT;
5368 goto out;
5369 }
5370 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5371 unlock_user(argptr, arg, 0);
5372
5373 switch (host_blkpg->op) {
5374 case BLKPG_ADD_PARTITION:
5375 case BLKPG_DEL_PARTITION:
5376 /* payload is struct blkpg_partition */
5377 break;
5378 default:
5379 /* Unknown opcode */
5380 ret = -TARGET_EINVAL;
5381 goto out;
5382 }
5383
5384 /* Read and convert blkpg->data */
5385 arg = (abi_long)(uintptr_t)host_blkpg->data;
5386 target_size = thunk_type_size(part_arg_type, 0);
5387 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5388 if (!argptr) {
5389 ret = -TARGET_EFAULT;
5390 goto out;
5391 }
5392 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5393 unlock_user(argptr, arg, 0);
5394
5395 /* Swizzle the data pointer to our local copy and call! */
5396 host_blkpg->data = &host_part;
5397 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5398
5399 out:
5400 return ret;
5401 }
5402
5403 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5404 int fd, int cmd, abi_long arg)
5405 {
5406 const argtype *arg_type = ie->arg_type;
5407 const StructEntry *se;
5408 const argtype *field_types;
5409 const int *dst_offsets, *src_offsets;
5410 int target_size;
5411 void *argptr;
5412 abi_ulong *target_rt_dev_ptr = NULL;
5413 unsigned long *host_rt_dev_ptr = NULL;
5414 abi_long ret;
5415 int i;
5416
5417 assert(ie->access == IOC_W);
5418 assert(*arg_type == TYPE_PTR);
5419 arg_type++;
5420 assert(*arg_type == TYPE_STRUCT);
5421 target_size = thunk_type_size(arg_type, 0);
5422 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5423 if (!argptr) {
5424 return -TARGET_EFAULT;
5425 }
5426 arg_type++;
5427 assert(*arg_type == (int)STRUCT_rtentry);
5428 se = struct_entries + *arg_type++;
5429 assert(se->convert[0] == NULL);
5430 /* convert struct here to be able to catch rt_dev string */
5431 field_types = se->field_types;
5432 dst_offsets = se->field_offsets[THUNK_HOST];
5433 src_offsets = se->field_offsets[THUNK_TARGET];
5434 for (i = 0; i < se->nb_fields; i++) {
5435 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5436 assert(*field_types == TYPE_PTRVOID);
5437 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5438 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5439 if (*target_rt_dev_ptr != 0) {
5440 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5441 tswapal(*target_rt_dev_ptr));
5442 if (!*host_rt_dev_ptr) {
5443 unlock_user(argptr, arg, 0);
5444 return -TARGET_EFAULT;
5445 }
5446 } else {
5447 *host_rt_dev_ptr = 0;
5448 }
5449 field_types++;
5450 continue;
5451 }
5452 field_types = thunk_convert(buf_temp + dst_offsets[i],
5453 argptr + src_offsets[i],
5454 field_types, THUNK_HOST);
5455 }
5456 unlock_user(argptr, arg, 0);
5457
5458 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5459
5460 assert(host_rt_dev_ptr != NULL);
5461 assert(target_rt_dev_ptr != NULL);
5462 if (*host_rt_dev_ptr != 0) {
5463 unlock_user((void *)*host_rt_dev_ptr,
5464 *target_rt_dev_ptr, 0);
5465 }
5466 return ret;
5467 }
5468
5469 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5470 int fd, int cmd, abi_long arg)
5471 {
5472 int sig = target_to_host_signal(arg);
5473 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5474 }
5475
5476 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5477 int fd, int cmd, abi_long arg)
5478 {
5479 struct timeval tv;
5480 abi_long ret;
5481
5482 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5483 if (is_error(ret)) {
5484 return ret;
5485 }
5486
5487 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5488 if (copy_to_user_timeval(arg, &tv)) {
5489 return -TARGET_EFAULT;
5490 }
5491 } else {
5492 if (copy_to_user_timeval64(arg, &tv)) {
5493 return -TARGET_EFAULT;
5494 }
5495 }
5496
5497 return ret;
5498 }
5499
5500 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5501 int fd, int cmd, abi_long arg)
5502 {
5503 struct timespec ts;
5504 abi_long ret;
5505
5506 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5507 if (is_error(ret)) {
5508 return ret;
5509 }
5510
5511 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5512 if (host_to_target_timespec(arg, &ts)) {
5513 return -TARGET_EFAULT;
5514 }
5515 } else{
5516 if (host_to_target_timespec64(arg, &ts)) {
5517 return -TARGET_EFAULT;
5518 }
5519 }
5520
5521 return ret;
5522 }
5523
5524 #ifdef TIOCGPTPEER
5525 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5526 int fd, int cmd, abi_long arg)
5527 {
5528 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5529 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5530 }
5531 #endif
5532
5533 #ifdef HAVE_DRM_H
5534
5535 static void unlock_drm_version(struct drm_version *host_ver,
5536 struct target_drm_version *target_ver,
5537 bool copy)
5538 {
5539 unlock_user(host_ver->name, target_ver->name,
5540 copy ? host_ver->name_len : 0);
5541 unlock_user(host_ver->date, target_ver->date,
5542 copy ? host_ver->date_len : 0);
5543 unlock_user(host_ver->desc, target_ver->desc,
5544 copy ? host_ver->desc_len : 0);
5545 }
5546
5547 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5548 struct target_drm_version *target_ver)
5549 {
5550 memset(host_ver, 0, sizeof(*host_ver));
5551
5552 __get_user(host_ver->name_len, &target_ver->name_len);
5553 if (host_ver->name_len) {
5554 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5555 target_ver->name_len, 0);
5556 if (!host_ver->name) {
5557 return -EFAULT;
5558 }
5559 }
5560
5561 __get_user(host_ver->date_len, &target_ver->date_len);
5562 if (host_ver->date_len) {
5563 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5564 target_ver->date_len, 0);
5565 if (!host_ver->date) {
5566 goto err;
5567 }
5568 }
5569
5570 __get_user(host_ver->desc_len, &target_ver->desc_len);
5571 if (host_ver->desc_len) {
5572 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5573 target_ver->desc_len, 0);
5574 if (!host_ver->desc) {
5575 goto err;
5576 }
5577 }
5578
5579 return 0;
5580 err:
5581 unlock_drm_version(host_ver, target_ver, false);
5582 return -EFAULT;
5583 }
5584
5585 static inline void host_to_target_drmversion(
5586 struct target_drm_version *target_ver,
5587 struct drm_version *host_ver)
5588 {
5589 __put_user(host_ver->version_major, &target_ver->version_major);
5590 __put_user(host_ver->version_minor, &target_ver->version_minor);
5591 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5592 __put_user(host_ver->name_len, &target_ver->name_len);
5593 __put_user(host_ver->date_len, &target_ver->date_len);
5594 __put_user(host_ver->desc_len, &target_ver->desc_len);
5595 unlock_drm_version(host_ver, target_ver, true);
5596 }
5597
5598 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5599 int fd, int cmd, abi_long arg)
5600 {
5601 struct drm_version *ver;
5602 struct target_drm_version *target_ver;
5603 abi_long ret;
5604
5605 switch (ie->host_cmd) {
5606 case DRM_IOCTL_VERSION:
5607 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5608 return -TARGET_EFAULT;
5609 }
5610 ver = (struct drm_version *)buf_temp;
5611 ret = target_to_host_drmversion(ver, target_ver);
5612 if (!is_error(ret)) {
5613 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5614 if (is_error(ret)) {
5615 unlock_drm_version(ver, target_ver, false);
5616 } else {
5617 host_to_target_drmversion(target_ver, ver);
5618 }
5619 }
5620 unlock_user_struct(target_ver, arg, 0);
5621 return ret;
5622 }
5623 return -TARGET_ENOSYS;
5624 }
5625
5626 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5627 struct drm_i915_getparam *gparam,
5628 int fd, abi_long arg)
5629 {
5630 abi_long ret;
5631 int value;
5632 struct target_drm_i915_getparam *target_gparam;
5633
5634 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5635 return -TARGET_EFAULT;
5636 }
5637
5638 __get_user(gparam->param, &target_gparam->param);
5639 gparam->value = &value;
5640 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5641 put_user_s32(value, target_gparam->value);
5642
5643 unlock_user_struct(target_gparam, arg, 0);
5644 return ret;
5645 }
5646
5647 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5648 int fd, int cmd, abi_long arg)
5649 {
5650 switch (ie->host_cmd) {
5651 case DRM_IOCTL_I915_GETPARAM:
5652 return do_ioctl_drm_i915_getparam(ie,
5653 (struct drm_i915_getparam *)buf_temp,
5654 fd, arg);
5655 default:
5656 return -TARGET_ENOSYS;
5657 }
5658 }
5659
5660 #endif
5661
5662 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5663 int fd, int cmd, abi_long arg)
5664 {
5665 struct tun_filter *filter = (struct tun_filter *)buf_temp;
5666 struct tun_filter *target_filter;
5667 char *target_addr;
5668
5669 assert(ie->access == IOC_W);
5670
5671 target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5672 if (!target_filter) {
5673 return -TARGET_EFAULT;
5674 }
5675 filter->flags = tswap16(target_filter->flags);
5676 filter->count = tswap16(target_filter->count);
5677 unlock_user(target_filter, arg, 0);
5678
5679 if (filter->count) {
5680 if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5681 MAX_STRUCT_SIZE) {
5682 return -TARGET_EFAULT;
5683 }
5684
5685 target_addr = lock_user(VERIFY_READ,
5686 arg + offsetof(struct tun_filter, addr),
5687 filter->count * ETH_ALEN, 1);
5688 if (!target_addr) {
5689 return -TARGET_EFAULT;
5690 }
5691 memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5692 unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5693 }
5694
5695 return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5696 }
5697
5698 IOCTLEntry ioctl_entries[] = {
5699 #define IOCTL(cmd, access, ...) \
5700 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5701 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5702 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5703 #define IOCTL_IGNORE(cmd) \
5704 { TARGET_ ## cmd, 0, #cmd },
5705 #include "ioctls.h"
5706 { 0, 0, },
5707 };
5708
5709 /* ??? Implement proper locking for ioctls. */
5710 /* do_ioctl() Must return target values and target errnos. */
5711 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5712 {
5713 const IOCTLEntry *ie;
5714 const argtype *arg_type;
5715 abi_long ret;
5716 uint8_t buf_temp[MAX_STRUCT_SIZE];
5717 int target_size;
5718 void *argptr;
5719
5720 ie = ioctl_entries;
5721 for(;;) {
5722 if (ie->target_cmd == 0) {
5723 qemu_log_mask(
5724 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5725 return -TARGET_ENOSYS;
5726 }
5727 if (ie->target_cmd == cmd)
5728 break;
5729 ie++;
5730 }
5731 arg_type = ie->arg_type;
5732 if (ie->do_ioctl) {
5733 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5734 } else if (!ie->host_cmd) {
5735 /* Some architectures define BSD ioctls in their headers
5736 that are not implemented in Linux. */
5737 return -TARGET_ENOSYS;
5738 }
5739
5740 switch(arg_type[0]) {
5741 case TYPE_NULL:
5742 /* no argument */
5743 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5744 break;
5745 case TYPE_PTRVOID:
5746 case TYPE_INT:
5747 case TYPE_LONG:
5748 case TYPE_ULONG:
5749 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5750 break;
5751 case TYPE_PTR:
5752 arg_type++;
5753 target_size = thunk_type_size(arg_type, 0);
5754 switch(ie->access) {
5755 case IOC_R:
5756 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5757 if (!is_error(ret)) {
5758 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5759 if (!argptr)
5760 return -TARGET_EFAULT;
5761 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5762 unlock_user(argptr, arg, target_size);
5763 }
5764 break;
5765 case IOC_W:
5766 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5767 if (!argptr)
5768 return -TARGET_EFAULT;
5769 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5770 unlock_user(argptr, arg, 0);
5771 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5772 break;
5773 default:
5774 case IOC_RW:
5775 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5776 if (!argptr)
5777 return -TARGET_EFAULT;
5778 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5779 unlock_user(argptr, arg, 0);
5780 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5781 if (!is_error(ret)) {
5782 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5783 if (!argptr)
5784 return -TARGET_EFAULT;
5785 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5786 unlock_user(argptr, arg, target_size);
5787 }
5788 break;
5789 }
5790 break;
5791 default:
5792 qemu_log_mask(LOG_UNIMP,
5793 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5794 (long)cmd, arg_type[0]);
5795 ret = -TARGET_ENOSYS;
5796 break;
5797 }
5798 return ret;
5799 }
5800
5801 static const bitmask_transtbl iflag_tbl[] = {
5802 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5803 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5804 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5805 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5806 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5807 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5808 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5809 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5810 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5811 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5812 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5813 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5814 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5815 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5816 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5817 { 0, 0, 0, 0 }
5818 };
5819
5820 static const bitmask_transtbl oflag_tbl[] = {
5821 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5822 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5823 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5824 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5825 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5826 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5827 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5828 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5829 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5830 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5831 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5832 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5833 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5834 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5835 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5836 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5837 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5838 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5839 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5840 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5841 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5842 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5843 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5844 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5845 { 0, 0, 0, 0 }
5846 };
5847
5848 static const bitmask_transtbl cflag_tbl[] = {
5849 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5850 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5851 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5852 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5853 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5854 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5855 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5856 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5857 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5858 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5859 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5860 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5861 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5862 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5863 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5864 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5865 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5866 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5867 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5868 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5869 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5870 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5871 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5872 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5873 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5874 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5875 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5876 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5877 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5878 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5879 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5880 { 0, 0, 0, 0 }
5881 };
5882
5883 static const bitmask_transtbl lflag_tbl[] = {
5884 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5885 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5886 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5887 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5888 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5889 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5890 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5891 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5892 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5893 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5894 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5895 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5896 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5897 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5898 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5899 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5900 { 0, 0, 0, 0 }
5901 };
5902
5903 static void target_to_host_termios (void *dst, const void *src)
5904 {
5905 struct host_termios *host = dst;
5906 const struct target_termios *target = src;
5907
5908 host->c_iflag =
5909 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5910 host->c_oflag =
5911 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5912 host->c_cflag =
5913 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5914 host->c_lflag =
5915 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5916 host->c_line = target->c_line;
5917
5918 memset(host->c_cc, 0, sizeof(host->c_cc));
5919 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5920 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5921 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5922 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5923 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5924 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5925 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5926 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5927 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5928 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5929 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5930 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5931 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5932 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5933 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5934 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5935 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5936 }
5937
5938 static void host_to_target_termios (void *dst, const void *src)
5939 {
5940 struct target_termios *target = dst;
5941 const struct host_termios *host = src;
5942
5943 target->c_iflag =
5944 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5945 target->c_oflag =
5946 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5947 target->c_cflag =
5948 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5949 target->c_lflag =
5950 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5951 target->c_line = host->c_line;
5952
5953 memset(target->c_cc, 0, sizeof(target->c_cc));
5954 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5955 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5956 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5957 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5958 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5959 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5960 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5961 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5962 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5963 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5964 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5965 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5966 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5967 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5968 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5969 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5970 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5971 }
5972
5973 static const StructEntry struct_termios_def = {
5974 .convert = { host_to_target_termios, target_to_host_termios },
5975 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5976 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5977 .print = print_termios,
5978 };
5979
5980 static const bitmask_transtbl mmap_flags_tbl[] = {
5981 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5982 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5983 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5984 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5985 MAP_ANONYMOUS, MAP_ANONYMOUS },
5986 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5987 MAP_GROWSDOWN, MAP_GROWSDOWN },
5988 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5989 MAP_DENYWRITE, MAP_DENYWRITE },
5990 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5991 MAP_EXECUTABLE, MAP_EXECUTABLE },
5992 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5993 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5994 MAP_NORESERVE, MAP_NORESERVE },
5995 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5996 /* MAP_STACK had been ignored by the kernel for quite some time.
5997 Recognize it for the target insofar as we do not want to pass
5998 it through to the host. */
5999 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6000 { 0, 0, 0, 0 }
6001 };
6002
6003 /*
6004 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6005 * TARGET_I386 is defined if TARGET_X86_64 is defined
6006 */
6007 #if defined(TARGET_I386)
6008
6009 /* NOTE: there is really one LDT for all the threads */
6010 static uint8_t *ldt_table;
6011
6012 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6013 {
6014 int size;
6015 void *p;
6016
6017 if (!ldt_table)
6018 return 0;
6019 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6020 if (size > bytecount)
6021 size = bytecount;
6022 p = lock_user(VERIFY_WRITE, ptr, size, 0);
6023 if (!p)
6024 return -TARGET_EFAULT;
6025 /* ??? Should this by byteswapped? */
6026 memcpy(p, ldt_table, size);
6027 unlock_user(p, ptr, size);
6028 return size;
6029 }
6030
6031 /* XXX: add locking support */
6032 static abi_long write_ldt(CPUX86State *env,
6033 abi_ulong ptr, unsigned long bytecount, int oldmode)
6034 {
6035 struct target_modify_ldt_ldt_s ldt_info;
6036 struct target_modify_ldt_ldt_s *target_ldt_info;
6037 int seg_32bit, contents, read_exec_only, limit_in_pages;
6038 int seg_not_present, useable, lm;
6039 uint32_t *lp, entry_1, entry_2;
6040
6041 if (bytecount != sizeof(ldt_info))
6042 return -TARGET_EINVAL;
6043 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6044 return -TARGET_EFAULT;
6045 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6046 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6047 ldt_info.limit = tswap32(target_ldt_info->limit);
6048 ldt_info.flags = tswap32(target_ldt_info->flags);
6049 unlock_user_struct(target_ldt_info, ptr, 0);
6050
6051 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6052 return -TARGET_EINVAL;
6053 seg_32bit = ldt_info.flags & 1;
6054 contents = (ldt_info.flags >> 1) & 3;
6055 read_exec_only = (ldt_info.flags >> 3) & 1;
6056 limit_in_pages = (ldt_info.flags >> 4) & 1;
6057 seg_not_present = (ldt_info.flags >> 5) & 1;
6058 useable = (ldt_info.flags >> 6) & 1;
6059 #ifdef TARGET_ABI32
6060 lm = 0;
6061 #else
6062 lm = (ldt_info.flags >> 7) & 1;
6063 #endif
6064 if (contents == 3) {
6065 if (oldmode)
6066 return -TARGET_EINVAL;
6067 if (seg_not_present == 0)
6068 return -TARGET_EINVAL;
6069 }
6070 /* allocate the LDT */
6071 if (!ldt_table) {
6072 env->ldt.base = target_mmap(0,
6073 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6074 PROT_READ|PROT_WRITE,
6075 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6076 if (env->ldt.base == -1)
6077 return -TARGET_ENOMEM;
6078 memset(g2h_untagged(env->ldt.base), 0,
6079 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6080 env->ldt.limit = 0xffff;
6081 ldt_table = g2h_untagged(env->ldt.base);
6082 }
6083
6084 /* NOTE: same code as Linux kernel */
6085 /* Allow LDTs to be cleared by the user. */
6086 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6087 if (oldmode ||
6088 (contents == 0 &&
6089 read_exec_only == 1 &&
6090 seg_32bit == 0 &&
6091 limit_in_pages == 0 &&
6092 seg_not_present == 1 &&
6093 useable == 0 )) {
6094 entry_1 = 0;
6095 entry_2 = 0;
6096 goto install;
6097 }
6098 }
6099
6100 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6101 (ldt_info.limit & 0x0ffff);
6102 entry_2 = (ldt_info.base_addr & 0xff000000) |
6103 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6104 (ldt_info.limit & 0xf0000) |
6105 ((read_exec_only ^ 1) << 9) |
6106 (contents << 10) |
6107 ((seg_not_present ^ 1) << 15) |
6108 (seg_32bit << 22) |
6109 (limit_in_pages << 23) |
6110 (lm << 21) |
6111 0x7000;
6112 if (!oldmode)
6113 entry_2 |= (useable << 20);
6114
6115 /* Install the new entry ... */
6116 install:
6117 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6118 lp[0] = tswap32(entry_1);
6119 lp[1] = tswap32(entry_2);
6120 return 0;
6121 }
6122
6123 /* specific and weird i386 syscalls */
6124 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6125 unsigned long bytecount)
6126 {
6127 abi_long ret;
6128
6129 switch (func) {
6130 case 0:
6131 ret = read_ldt(ptr, bytecount);
6132 break;
6133 case 1:
6134 ret = write_ldt(env, ptr, bytecount, 1);
6135 break;
6136 case 0x11:
6137 ret = write_ldt(env, ptr, bytecount, 0);
6138 break;
6139 default:
6140 ret = -TARGET_ENOSYS;
6141 break;
6142 }
6143 return ret;
6144 }
6145
6146 #if defined(TARGET_ABI32)
6147 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6148 {
6149 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6150 struct target_modify_ldt_ldt_s ldt_info;
6151 struct target_modify_ldt_ldt_s *target_ldt_info;
6152 int seg_32bit, contents, read_exec_only, limit_in_pages;
6153 int seg_not_present, useable, lm;
6154 uint32_t *lp, entry_1, entry_2;
6155 int i;
6156
6157 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6158 if (!target_ldt_info)
6159 return -TARGET_EFAULT;
6160 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6161 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6162 ldt_info.limit = tswap32(target_ldt_info->limit);
6163 ldt_info.flags = tswap32(target_ldt_info->flags);
6164 if (ldt_info.entry_number == -1) {
6165 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6166 if (gdt_table[i] == 0) {
6167 ldt_info.entry_number = i;
6168 target_ldt_info->entry_number = tswap32(i);
6169 break;
6170 }
6171 }
6172 }
6173 unlock_user_struct(target_ldt_info, ptr, 1);
6174
6175 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6176 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6177 return -TARGET_EINVAL;
6178 seg_32bit = ldt_info.flags & 1;
6179 contents = (ldt_info.flags >> 1) & 3;
6180 read_exec_only = (ldt_info.flags >> 3) & 1;
6181 limit_in_pages = (ldt_info.flags >> 4) & 1;
6182 seg_not_present = (ldt_info.flags >> 5) & 1;
6183 useable = (ldt_info.flags >> 6) & 1;
6184 #ifdef TARGET_ABI32
6185 lm = 0;
6186 #else
6187 lm = (ldt_info.flags >> 7) & 1;
6188 #endif
6189
6190 if (contents == 3) {
6191 if (seg_not_present == 0)
6192 return -TARGET_EINVAL;
6193 }
6194
6195 /* NOTE: same code as Linux kernel */
6196 /* Allow LDTs to be cleared by the user. */
6197 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6198 if ((contents == 0 &&
6199 read_exec_only == 1 &&
6200 seg_32bit == 0 &&
6201 limit_in_pages == 0 &&
6202 seg_not_present == 1 &&
6203 useable == 0 )) {
6204 entry_1 = 0;
6205 entry_2 = 0;
6206 goto install;
6207 }
6208 }
6209
6210 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6211 (ldt_info.limit & 0x0ffff);
6212 entry_2 = (ldt_info.base_addr & 0xff000000) |
6213 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6214 (ldt_info.limit & 0xf0000) |
6215 ((read_exec_only ^ 1) << 9) |
6216 (contents << 10) |
6217 ((seg_not_present ^ 1) << 15) |
6218 (seg_32bit << 22) |
6219 (limit_in_pages << 23) |
6220 (useable << 20) |
6221 (lm << 21) |
6222 0x7000;
6223
6224 /* Install the new entry ... */
6225 install:
6226 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6227 lp[0] = tswap32(entry_1);
6228 lp[1] = tswap32(entry_2);
6229 return 0;
6230 }
6231
6232 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6233 {
6234 struct target_modify_ldt_ldt_s *target_ldt_info;
6235 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6236 uint32_t base_addr, limit, flags;
6237 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6238 int seg_not_present, useable, lm;
6239 uint32_t *lp, entry_1, entry_2;
6240
6241 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6242 if (!target_ldt_info)
6243 return -TARGET_EFAULT;
6244 idx = tswap32(target_ldt_info->entry_number);
6245 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6246 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6247 unlock_user_struct(target_ldt_info, ptr, 1);
6248 return -TARGET_EINVAL;
6249 }
6250 lp = (uint32_t *)(gdt_table + idx);
6251 entry_1 = tswap32(lp[0]);
6252 entry_2 = tswap32(lp[1]);
6253
6254 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6255 contents = (entry_2 >> 10) & 3;
6256 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6257 seg_32bit = (entry_2 >> 22) & 1;
6258 limit_in_pages = (entry_2 >> 23) & 1;
6259 useable = (entry_2 >> 20) & 1;
6260 #ifdef TARGET_ABI32
6261 lm = 0;
6262 #else
6263 lm = (entry_2 >> 21) & 1;
6264 #endif
6265 flags = (seg_32bit << 0) | (contents << 1) |
6266 (read_exec_only << 3) | (limit_in_pages << 4) |
6267 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6268 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6269 base_addr = (entry_1 >> 16) |
6270 (entry_2 & 0xff000000) |
6271 ((entry_2 & 0xff) << 16);
6272 target_ldt_info->base_addr = tswapal(base_addr);
6273 target_ldt_info->limit = tswap32(limit);
6274 target_ldt_info->flags = tswap32(flags);
6275 unlock_user_struct(target_ldt_info, ptr, 1);
6276 return 0;
6277 }
6278
6279 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6280 {
6281 return -TARGET_ENOSYS;
6282 }
6283 #else
6284 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6285 {
6286 abi_long ret = 0;
6287 abi_ulong val;
6288 int idx;
6289
6290 switch(code) {
6291 case TARGET_ARCH_SET_GS:
6292 case TARGET_ARCH_SET_FS:
6293 if (code == TARGET_ARCH_SET_GS)
6294 idx = R_GS;
6295 else
6296 idx = R_FS;
6297 cpu_x86_load_seg(env, idx, 0);
6298 env->segs[idx].base = addr;
6299 break;
6300 case TARGET_ARCH_GET_GS:
6301 case TARGET_ARCH_GET_FS:
6302 if (code == TARGET_ARCH_GET_GS)
6303 idx = R_GS;
6304 else
6305 idx = R_FS;
6306 val = env->segs[idx].base;
6307 if (put_user(val, addr, abi_ulong))
6308 ret = -TARGET_EFAULT;
6309 break;
6310 default:
6311 ret = -TARGET_EINVAL;
6312 break;
6313 }
6314 return ret;
6315 }
6316 #endif /* defined(TARGET_ABI32 */
6317 #endif /* defined(TARGET_I386) */
6318
6319 /*
6320 * These constants are generic. Supply any that are missing from the host.
6321 */
6322 #ifndef PR_SET_NAME
6323 # define PR_SET_NAME 15
6324 # define PR_GET_NAME 16
6325 #endif
6326 #ifndef PR_SET_FP_MODE
6327 # define PR_SET_FP_MODE 45
6328 # define PR_GET_FP_MODE 46
6329 # define PR_FP_MODE_FR (1 << 0)
6330 # define PR_FP_MODE_FRE (1 << 1)
6331 #endif
6332 #ifndef PR_SVE_SET_VL
6333 # define PR_SVE_SET_VL 50
6334 # define PR_SVE_GET_VL 51
6335 # define PR_SVE_VL_LEN_MASK 0xffff
6336 # define PR_SVE_VL_INHERIT (1 << 17)
6337 #endif
6338 #ifndef PR_PAC_RESET_KEYS
6339 # define PR_PAC_RESET_KEYS 54
6340 # define PR_PAC_APIAKEY (1 << 0)
6341 # define PR_PAC_APIBKEY (1 << 1)
6342 # define PR_PAC_APDAKEY (1 << 2)
6343 # define PR_PAC_APDBKEY (1 << 3)
6344 # define PR_PAC_APGAKEY (1 << 4)
6345 #endif
6346 #ifndef PR_SET_TAGGED_ADDR_CTRL
6347 # define PR_SET_TAGGED_ADDR_CTRL 55
6348 # define PR_GET_TAGGED_ADDR_CTRL 56
6349 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6350 #endif
6351 #ifndef PR_MTE_TCF_SHIFT
6352 # define PR_MTE_TCF_SHIFT 1
6353 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6354 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6355 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6356 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6357 # define PR_MTE_TAG_SHIFT 3
6358 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6359 #endif
6360 #ifndef PR_SET_IO_FLUSHER
6361 # define PR_SET_IO_FLUSHER 57
6362 # define PR_GET_IO_FLUSHER 58
6363 #endif
6364 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6365 # define PR_SET_SYSCALL_USER_DISPATCH 59
6366 #endif
6367
6368 #include "target_prctl.h"
6369
6370 static abi_long do_prctl_inval0(CPUArchState *env)
6371 {
6372 return -TARGET_EINVAL;
6373 }
6374
6375 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6376 {
6377 return -TARGET_EINVAL;
6378 }
6379
6380 #ifndef do_prctl_get_fp_mode
6381 #define do_prctl_get_fp_mode do_prctl_inval0
6382 #endif
6383 #ifndef do_prctl_set_fp_mode
6384 #define do_prctl_set_fp_mode do_prctl_inval1
6385 #endif
6386 #ifndef do_prctl_get_vl
6387 #define do_prctl_get_vl do_prctl_inval0
6388 #endif
6389 #ifndef do_prctl_set_vl
6390 #define do_prctl_set_vl do_prctl_inval1
6391 #endif
6392 #ifndef do_prctl_reset_keys
6393 #define do_prctl_reset_keys do_prctl_inval1
6394 #endif
6395 #ifndef do_prctl_set_tagged_addr_ctrl
6396 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6397 #endif
6398 #ifndef do_prctl_get_tagged_addr_ctrl
6399 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6400 #endif
6401 #ifndef do_prctl_get_unalign
6402 #define do_prctl_get_unalign do_prctl_inval1
6403 #endif
6404 #ifndef do_prctl_set_unalign
6405 #define do_prctl_set_unalign do_prctl_inval1
6406 #endif
6407
6408 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6409 abi_long arg3, abi_long arg4, abi_long arg5)
6410 {
6411 abi_long ret;
6412
6413 switch (option) {
6414 case PR_GET_PDEATHSIG:
6415 {
6416 int deathsig;
6417 ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6418 arg3, arg4, arg5));
6419 if (!is_error(ret) &&
6420 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6421 return -TARGET_EFAULT;
6422 }
6423 return ret;
6424 }
6425 case PR_SET_PDEATHSIG:
6426 return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6427 arg3, arg4, arg5));
6428 case PR_GET_NAME:
6429 {
6430 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6431 if (!name) {
6432 return -TARGET_EFAULT;
6433 }
6434 ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6435 arg3, arg4, arg5));
6436 unlock_user(name, arg2, 16);
6437 return ret;
6438 }
6439 case PR_SET_NAME:
6440 {
6441 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6442 if (!name) {
6443 return -TARGET_EFAULT;
6444 }
6445 ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6446 arg3, arg4, arg5));
6447 unlock_user(name, arg2, 0);
6448 return ret;
6449 }
6450 case PR_GET_FP_MODE:
6451 return do_prctl_get_fp_mode(env);
6452 case PR_SET_FP_MODE:
6453 return do_prctl_set_fp_mode(env, arg2);
6454 case PR_SVE_GET_VL:
6455 return do_prctl_get_vl(env);
6456 case PR_SVE_SET_VL:
6457 return do_prctl_set_vl(env, arg2);
6458 case PR_PAC_RESET_KEYS:
6459 if (arg3 || arg4 || arg5) {
6460 return -TARGET_EINVAL;
6461 }
6462 return do_prctl_reset_keys(env, arg2);
6463 case PR_SET_TAGGED_ADDR_CTRL:
6464 if (arg3 || arg4 || arg5) {
6465 return -TARGET_EINVAL;
6466 }
6467 return do_prctl_set_tagged_addr_ctrl(env, arg2);
6468 case PR_GET_TAGGED_ADDR_CTRL:
6469 if (arg2 || arg3 || arg4 || arg5) {
6470 return -TARGET_EINVAL;
6471 }
6472 return do_prctl_get_tagged_addr_ctrl(env);
6473
6474 case PR_GET_UNALIGN:
6475 return do_prctl_get_unalign(env, arg2);
6476 case PR_SET_UNALIGN:
6477 return do_prctl_set_unalign(env, arg2);
6478
6479 case PR_CAP_AMBIENT:
6480 case PR_CAPBSET_READ:
6481 case PR_CAPBSET_DROP:
6482 case PR_GET_DUMPABLE:
6483 case PR_SET_DUMPABLE:
6484 case PR_GET_KEEPCAPS:
6485 case PR_SET_KEEPCAPS:
6486 case PR_GET_SECUREBITS:
6487 case PR_SET_SECUREBITS:
6488 case PR_GET_TIMING:
6489 case PR_SET_TIMING:
6490 case PR_GET_TIMERSLACK:
6491 case PR_SET_TIMERSLACK:
6492 case PR_MCE_KILL:
6493 case PR_MCE_KILL_GET:
6494 case PR_GET_NO_NEW_PRIVS:
6495 case PR_SET_NO_NEW_PRIVS:
6496 case PR_GET_IO_FLUSHER:
6497 case PR_SET_IO_FLUSHER:
6498 /* Some prctl options have no pointer arguments and we can pass on. */
6499 return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6500
6501 case PR_GET_CHILD_SUBREAPER:
6502 case PR_SET_CHILD_SUBREAPER:
6503 case PR_GET_SPECULATION_CTRL:
6504 case PR_SET_SPECULATION_CTRL:
6505 case PR_GET_TID_ADDRESS:
6506 /* TODO */
6507 return -TARGET_EINVAL;
6508
6509 case PR_GET_FPEXC:
6510 case PR_SET_FPEXC:
6511 /* Was used for SPE on PowerPC. */
6512 return -TARGET_EINVAL;
6513
6514 case PR_GET_ENDIAN:
6515 case PR_SET_ENDIAN:
6516 case PR_GET_FPEMU:
6517 case PR_SET_FPEMU:
6518 case PR_SET_MM:
6519 case PR_GET_SECCOMP:
6520 case PR_SET_SECCOMP:
6521 case PR_SET_SYSCALL_USER_DISPATCH:
6522 case PR_GET_THP_DISABLE:
6523 case PR_SET_THP_DISABLE:
6524 case PR_GET_TSC:
6525 case PR_SET_TSC:
6526 /* Disable to prevent the target disabling stuff we need. */
6527 return -TARGET_EINVAL;
6528
6529 default:
6530 qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6531 option);
6532 return -TARGET_EINVAL;
6533 }
6534 }
6535
6536 #define NEW_STACK_SIZE 0x40000
6537
6538
6539 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6540 typedef struct {
6541 CPUArchState *env;
6542 pthread_mutex_t mutex;
6543 pthread_cond_t cond;
6544 pthread_t thread;
6545 uint32_t tid;
6546 abi_ulong child_tidptr;
6547 abi_ulong parent_tidptr;
6548 sigset_t sigmask;
6549 } new_thread_info;
6550
6551 static void *clone_func(void *arg)
6552 {
6553 new_thread_info *info = arg;
6554 CPUArchState *env;
6555 CPUState *cpu;
6556 TaskState *ts;
6557
6558 rcu_register_thread();
6559 tcg_register_thread();
6560 env = info->env;
6561 cpu = env_cpu(env);
6562 thread_cpu = cpu;
6563 ts = (TaskState *)cpu->opaque;
6564 info->tid = sys_gettid();
6565 task_settid(ts);
6566 if (info->child_tidptr)
6567 put_user_u32(info->tid, info->child_tidptr);
6568 if (info->parent_tidptr)
6569 put_user_u32(info->tid, info->parent_tidptr);
6570 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6571 /* Enable signals. */
6572 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6573 /* Signal to the parent that we're ready. */
6574 pthread_mutex_lock(&info->mutex);
6575 pthread_cond_broadcast(&info->cond);
6576 pthread_mutex_unlock(&info->mutex);
6577 /* Wait until the parent has finished initializing the tls state. */
6578 pthread_mutex_lock(&clone_lock);
6579 pthread_mutex_unlock(&clone_lock);
6580 cpu_loop(env);
6581 /* never exits */
6582 return NULL;
6583 }
6584
6585 /* do_fork() Must return host values and target errnos (unlike most
6586 do_*() functions). */
6587 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6588 abi_ulong parent_tidptr, target_ulong newtls,
6589 abi_ulong child_tidptr)
6590 {
6591 CPUState *cpu = env_cpu(env);
6592 int ret;
6593 TaskState *ts;
6594 CPUState *new_cpu;
6595 CPUArchState *new_env;
6596 sigset_t sigmask;
6597
6598 flags &= ~CLONE_IGNORED_FLAGS;
6599
6600 /* Emulate vfork() with fork() */
6601 if (flags & CLONE_VFORK)
6602 flags &= ~(CLONE_VFORK | CLONE_VM);
6603
6604 if (flags & CLONE_VM) {
6605 TaskState *parent_ts = (TaskState *)cpu->opaque;
6606 new_thread_info info;
6607 pthread_attr_t attr;
6608
6609 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6610 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6611 return -TARGET_EINVAL;
6612 }
6613
6614 ts = g_new0(TaskState, 1);
6615 init_task_state(ts);
6616
6617 /* Grab a mutex so that thread setup appears atomic. */
6618 pthread_mutex_lock(&clone_lock);
6619
6620 /*
6621 * If this is our first additional thread, we need to ensure we
6622 * generate code for parallel execution and flush old translations.
6623 * Do this now so that the copy gets CF_PARALLEL too.
6624 */
6625 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6626 cpu->tcg_cflags |= CF_PARALLEL;
6627 tb_flush(cpu);
6628 }
6629
6630 /* we create a new CPU instance. */
6631 new_env = cpu_copy(env);
6632 /* Init regs that differ from the parent. */
6633 cpu_clone_regs_child(new_env, newsp, flags);
6634 cpu_clone_regs_parent(env, flags);
6635 new_cpu = env_cpu(new_env);
6636 new_cpu->opaque = ts;
6637 ts->bprm = parent_ts->bprm;
6638 ts->info = parent_ts->info;
6639 ts->signal_mask = parent_ts->signal_mask;
6640
6641 if (flags & CLONE_CHILD_CLEARTID) {
6642 ts->child_tidptr = child_tidptr;
6643 }
6644
6645 if (flags & CLONE_SETTLS) {
6646 cpu_set_tls (new_env, newtls);
6647 }
6648
6649 memset(&info, 0, sizeof(info));
6650 pthread_mutex_init(&info.mutex, NULL);
6651 pthread_mutex_lock(&info.mutex);
6652 pthread_cond_init(&info.cond, NULL);
6653 info.env = new_env;
6654 if (flags & CLONE_CHILD_SETTID) {
6655 info.child_tidptr = child_tidptr;
6656 }
6657 if (flags & CLONE_PARENT_SETTID) {
6658 info.parent_tidptr = parent_tidptr;
6659 }
6660
6661 ret = pthread_attr_init(&attr);
6662 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6663 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6664 /* It is not safe to deliver signals until the child has finished
6665 initializing, so temporarily block all signals. */
6666 sigfillset(&sigmask);
6667 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6668 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6669
6670 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6671 /* TODO: Free new CPU state if thread creation failed. */
6672
6673 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6674 pthread_attr_destroy(&attr);
6675 if (ret == 0) {
6676 /* Wait for the child to initialize. */
6677 pthread_cond_wait(&info.cond, &info.mutex);
6678 ret = info.tid;
6679 } else {
6680 ret = -1;
6681 }
6682 pthread_mutex_unlock(&info.mutex);
6683 pthread_cond_destroy(&info.cond);
6684 pthread_mutex_destroy(&info.mutex);
6685 pthread_mutex_unlock(&clone_lock);
6686 } else {
6687 /* if no CLONE_VM, we consider it is a fork */
6688 if (flags & CLONE_INVALID_FORK_FLAGS) {
6689 return -TARGET_EINVAL;
6690 }
6691
6692 /* We can't support custom termination signals */
6693 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6694 return -TARGET_EINVAL;
6695 }
6696
6697 if (block_signals()) {
6698 return -QEMU_ERESTARTSYS;
6699 }
6700
6701 fork_start();
6702 ret = fork();
6703 if (ret == 0) {
6704 /* Child Process. */
6705 cpu_clone_regs_child(env, newsp, flags);
6706 fork_end(1);
6707 /* There is a race condition here. The parent process could
6708 theoretically read the TID in the child process before the child
6709 tid is set. This would require using either ptrace
6710 (not implemented) or having *_tidptr to point at a shared memory
6711 mapping. We can't repeat the spinlock hack used above because
6712 the child process gets its own copy of the lock. */
6713 if (flags & CLONE_CHILD_SETTID)
6714 put_user_u32(sys_gettid(), child_tidptr);
6715 if (flags & CLONE_PARENT_SETTID)
6716 put_user_u32(sys_gettid(), parent_tidptr);
6717 ts = (TaskState *)cpu->opaque;
6718 if (flags & CLONE_SETTLS)
6719 cpu_set_tls (env, newtls);
6720 if (flags & CLONE_CHILD_CLEARTID)
6721 ts->child_tidptr = child_tidptr;
6722 } else {
6723 cpu_clone_regs_parent(env, flags);
6724 fork_end(0);
6725 }
6726 }
6727 return ret;
6728 }
6729
6730 /* warning : doesn't handle linux specific flags... */
6731 static int target_to_host_fcntl_cmd(int cmd)
6732 {
6733 int ret;
6734
6735 switch(cmd) {
6736 case TARGET_F_DUPFD:
6737 case TARGET_F_GETFD:
6738 case TARGET_F_SETFD:
6739 case TARGET_F_GETFL:
6740 case TARGET_F_SETFL:
6741 case TARGET_F_OFD_GETLK:
6742 case TARGET_F_OFD_SETLK:
6743 case TARGET_F_OFD_SETLKW:
6744 ret = cmd;
6745 break;
6746 case TARGET_F_GETLK:
6747 ret = F_GETLK64;
6748 break;
6749 case TARGET_F_SETLK:
6750 ret = F_SETLK64;
6751 break;
6752 case TARGET_F_SETLKW:
6753 ret = F_SETLKW64;
6754 break;
6755 case TARGET_F_GETOWN:
6756 ret = F_GETOWN;
6757 break;
6758 case TARGET_F_SETOWN:
6759 ret = F_SETOWN;
6760 break;
6761 case TARGET_F_GETSIG:
6762 ret = F_GETSIG;
6763 break;
6764 case TARGET_F_SETSIG:
6765 ret = F_SETSIG;
6766 break;
6767 #if TARGET_ABI_BITS == 32
6768 case TARGET_F_GETLK64:
6769 ret = F_GETLK64;
6770 break;
6771 case TARGET_F_SETLK64:
6772 ret = F_SETLK64;
6773 break;
6774 case TARGET_F_SETLKW64:
6775 ret = F_SETLKW64;
6776 break;
6777 #endif
6778 case TARGET_F_SETLEASE:
6779 ret = F_SETLEASE;
6780 break;
6781 case TARGET_F_GETLEASE:
6782 ret = F_GETLEASE;
6783 break;
6784 #ifdef F_DUPFD_CLOEXEC
6785 case TARGET_F_DUPFD_CLOEXEC:
6786 ret = F_DUPFD_CLOEXEC;
6787 break;
6788 #endif
6789 case TARGET_F_NOTIFY:
6790 ret = F_NOTIFY;
6791 break;
6792 #ifdef F_GETOWN_EX
6793 case TARGET_F_GETOWN_EX:
6794 ret = F_GETOWN_EX;
6795 break;
6796 #endif
6797 #ifdef F_SETOWN_EX
6798 case TARGET_F_SETOWN_EX:
6799 ret = F_SETOWN_EX;
6800 break;
6801 #endif
6802 #ifdef F_SETPIPE_SZ
6803 case TARGET_F_SETPIPE_SZ:
6804 ret = F_SETPIPE_SZ;
6805 break;
6806 case TARGET_F_GETPIPE_SZ:
6807 ret = F_GETPIPE_SZ;
6808 break;
6809 #endif
6810 #ifdef F_ADD_SEALS
6811 case TARGET_F_ADD_SEALS:
6812 ret = F_ADD_SEALS;
6813 break;
6814 case TARGET_F_GET_SEALS:
6815 ret = F_GET_SEALS;
6816 break;
6817 #endif
6818 default:
6819 ret = -TARGET_EINVAL;
6820 break;
6821 }
6822
6823 #if defined(__powerpc64__)
6824 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6825 * is not supported by kernel. The glibc fcntl call actually adjusts
6826 * them to 5, 6 and 7 before making the syscall(). Since we make the
6827 * syscall directly, adjust to what is supported by the kernel.
6828 */
6829 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6830 ret -= F_GETLK64 - 5;
6831 }
6832 #endif
6833
6834 return ret;
6835 }
6836
6837 #define FLOCK_TRANSTBL \
6838 switch (type) { \
6839 TRANSTBL_CONVERT(F_RDLCK); \
6840 TRANSTBL_CONVERT(F_WRLCK); \
6841 TRANSTBL_CONVERT(F_UNLCK); \
6842 }
6843
6844 static int target_to_host_flock(int type)
6845 {
6846 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6847 FLOCK_TRANSTBL
6848 #undef TRANSTBL_CONVERT
6849 return -TARGET_EINVAL;
6850 }
6851
6852 static int host_to_target_flock(int type)
6853 {
6854 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6855 FLOCK_TRANSTBL
6856 #undef TRANSTBL_CONVERT
6857 /* if we don't know how to convert the value coming
6858 * from the host we copy to the target field as-is
6859 */
6860 return type;
6861 }
6862
6863 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6864 abi_ulong target_flock_addr)
6865 {
6866 struct target_flock *target_fl;
6867 int l_type;
6868
6869 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6870 return -TARGET_EFAULT;
6871 }
6872
6873 __get_user(l_type, &target_fl->l_type);
6874 l_type = target_to_host_flock(l_type);
6875 if (l_type < 0) {
6876 return l_type;
6877 }
6878 fl->l_type = l_type;
6879 __get_user(fl->l_whence, &target_fl->l_whence);
6880 __get_user(fl->l_start, &target_fl->l_start);
6881 __get_user(fl->l_len, &target_fl->l_len);
6882 __get_user(fl->l_pid, &target_fl->l_pid);
6883 unlock_user_struct(target_fl, target_flock_addr, 0);
6884 return 0;
6885 }
6886
6887 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6888 const struct flock64 *fl)
6889 {
6890 struct target_flock *target_fl;
6891 short l_type;
6892
6893 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6894 return -TARGET_EFAULT;
6895 }
6896
6897 l_type = host_to_target_flock(fl->l_type);
6898 __put_user(l_type, &target_fl->l_type);
6899 __put_user(fl->l_whence, &target_fl->l_whence);
6900 __put_user(fl->l_start, &target_fl->l_start);
6901 __put_user(fl->l_len, &target_fl->l_len);
6902 __put_user(fl->l_pid, &target_fl->l_pid);
6903 unlock_user_struct(target_fl, target_flock_addr, 1);
6904 return 0;
6905 }
6906
6907 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6908 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6909
6910 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6911 struct target_oabi_flock64 {
6912 abi_short l_type;
6913 abi_short l_whence;
6914 abi_llong l_start;
6915 abi_llong l_len;
6916 abi_int l_pid;
6917 } QEMU_PACKED;
6918
6919 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6920 abi_ulong target_flock_addr)
6921 {
6922 struct target_oabi_flock64 *target_fl;
6923 int l_type;
6924
6925 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6926 return -TARGET_EFAULT;
6927 }
6928
6929 __get_user(l_type, &target_fl->l_type);
6930 l_type = target_to_host_flock(l_type);
6931 if (l_type < 0) {
6932 return l_type;
6933 }
6934 fl->l_type = l_type;
6935 __get_user(fl->l_whence, &target_fl->l_whence);
6936 __get_user(fl->l_start, &target_fl->l_start);
6937 __get_user(fl->l_len, &target_fl->l_len);
6938 __get_user(fl->l_pid, &target_fl->l_pid);
6939 unlock_user_struct(target_fl, target_flock_addr, 0);
6940 return 0;
6941 }
6942
6943 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6944 const struct flock64 *fl)
6945 {
6946 struct target_oabi_flock64 *target_fl;
6947 short l_type;
6948
6949 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6950 return -TARGET_EFAULT;
6951 }
6952
6953 l_type = host_to_target_flock(fl->l_type);
6954 __put_user(l_type, &target_fl->l_type);
6955 __put_user(fl->l_whence, &target_fl->l_whence);
6956 __put_user(fl->l_start, &target_fl->l_start);
6957 __put_user(fl->l_len, &target_fl->l_len);
6958 __put_user(fl->l_pid, &target_fl->l_pid);
6959 unlock_user_struct(target_fl, target_flock_addr, 1);
6960 return 0;
6961 }
6962 #endif
6963
6964 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6965 abi_ulong target_flock_addr)
6966 {
6967 struct target_flock64 *target_fl;
6968 int l_type;
6969
6970 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6971 return -TARGET_EFAULT;
6972 }
6973
6974 __get_user(l_type, &target_fl->l_type);
6975 l_type = target_to_host_flock(l_type);
6976 if (l_type < 0) {
6977 return l_type;
6978 }
6979 fl->l_type = l_type;
6980 __get_user(fl->l_whence, &target_fl->l_whence);
6981 __get_user(fl->l_start, &target_fl->l_start);
6982 __get_user(fl->l_len, &target_fl->l_len);
6983 __get_user(fl->l_pid, &target_fl->l_pid);
6984 unlock_user_struct(target_fl, target_flock_addr, 0);
6985 return 0;
6986 }
6987
6988 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6989 const struct flock64 *fl)
6990 {
6991 struct target_flock64 *target_fl;
6992 short l_type;
6993
6994 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6995 return -TARGET_EFAULT;
6996 }
6997
6998 l_type = host_to_target_flock(fl->l_type);
6999 __put_user(l_type, &target_fl->l_type);
7000 __put_user(fl->l_whence, &target_fl->l_whence);
7001 __put_user(fl->l_start, &target_fl->l_start);
7002 __put_user(fl->l_len, &target_fl->l_len);
7003 __put_user(fl->l_pid, &target_fl->l_pid);
7004 unlock_user_struct(target_fl, target_flock_addr, 1);
7005 return 0;
7006 }
7007
7008 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7009 {
7010 struct flock64 fl64;
7011 #ifdef F_GETOWN_EX
7012 struct f_owner_ex fox;
7013 struct target_f_owner_ex *target_fox;
7014 #endif
7015 abi_long ret;
7016 int host_cmd = target_to_host_fcntl_cmd(cmd);
7017
7018 if (host_cmd == -TARGET_EINVAL)
7019 return host_cmd;
7020
7021 switch(cmd) {
7022 case TARGET_F_GETLK:
7023 ret = copy_from_user_flock(&fl64, arg);
7024 if (ret) {
7025 return ret;
7026 }
7027 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7028 if (ret == 0) {
7029 ret = copy_to_user_flock(arg, &fl64);
7030 }
7031 break;
7032
7033 case TARGET_F_SETLK:
7034 case TARGET_F_SETLKW:
7035 ret = copy_from_user_flock(&fl64, arg);
7036 if (ret) {
7037 return ret;
7038 }
7039 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7040 break;
7041
7042 case TARGET_F_GETLK64:
7043 case TARGET_F_OFD_GETLK:
7044 ret = copy_from_user_flock64(&fl64, arg);
7045 if (ret) {
7046 return ret;
7047 }
7048 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7049 if (ret == 0) {
7050 ret = copy_to_user_flock64(arg, &fl64);
7051 }
7052 break;
7053 case TARGET_F_SETLK64:
7054 case TARGET_F_SETLKW64:
7055 case TARGET_F_OFD_SETLK:
7056 case TARGET_F_OFD_SETLKW:
7057 ret = copy_from_user_flock64(&fl64, arg);
7058 if (ret) {
7059 return ret;
7060 }
7061 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7062 break;
7063
7064 case TARGET_F_GETFL:
7065 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7066 if (ret >= 0) {
7067 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7068 }
7069 break;
7070
7071 case TARGET_F_SETFL:
7072 ret = get_errno(safe_fcntl(fd, host_cmd,
7073 target_to_host_bitmask(arg,
7074 fcntl_flags_tbl)));
7075 break;
7076
7077 #ifdef F_GETOWN_EX
7078 case TARGET_F_GETOWN_EX:
7079 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7080 if (ret >= 0) {
7081 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7082 return -TARGET_EFAULT;
7083 target_fox->type = tswap32(fox.type);
7084 target_fox->pid = tswap32(fox.pid);
7085 unlock_user_struct(target_fox, arg, 1);
7086 }
7087 break;
7088 #endif
7089
7090 #ifdef F_SETOWN_EX
7091 case TARGET_F_SETOWN_EX:
7092 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7093 return -TARGET_EFAULT;
7094 fox.type = tswap32(target_fox->type);
7095 fox.pid = tswap32(target_fox->pid);
7096 unlock_user_struct(target_fox, arg, 0);
7097 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7098 break;
7099 #endif
7100
7101 case TARGET_F_SETSIG:
7102 ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7103 break;
7104
7105 case TARGET_F_GETSIG:
7106 ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7107 break;
7108
7109 case TARGET_F_SETOWN:
7110 case TARGET_F_GETOWN:
7111 case TARGET_F_SETLEASE:
7112 case TARGET_F_GETLEASE:
7113 case TARGET_F_SETPIPE_SZ:
7114 case TARGET_F_GETPIPE_SZ:
7115 case TARGET_F_ADD_SEALS:
7116 case TARGET_F_GET_SEALS:
7117 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7118 break;
7119
7120 default:
7121 ret = get_errno(safe_fcntl(fd, cmd, arg));
7122 break;
7123 }
7124 return ret;
7125 }
7126
7127 #ifdef USE_UID16
7128
7129 static inline int high2lowuid(int uid)
7130 {
7131 if (uid > 65535)
7132 return 65534;
7133 else
7134 return uid;
7135 }
7136
7137 static inline int high2lowgid(int gid)
7138 {
7139 if (gid > 65535)
7140 return 65534;
7141 else
7142 return gid;
7143 }
7144
7145 static inline int low2highuid(int uid)
7146 {
7147 if ((int16_t)uid == -1)
7148 return -1;
7149 else
7150 return uid;
7151 }
7152
7153 static inline int low2highgid(int gid)
7154 {
7155 if ((int16_t)gid == -1)
7156 return -1;
7157 else
7158 return gid;
7159 }
7160 static inline int tswapid(int id)
7161 {
7162 return tswap16(id);
7163 }
7164
7165 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7166
7167 #else /* !USE_UID16 */
7168 static inline int high2lowuid(int uid)
7169 {
7170 return uid;
7171 }
7172 static inline int high2lowgid(int gid)
7173 {
7174 return gid;
7175 }
7176 static inline int low2highuid(int uid)
7177 {
7178 return uid;
7179 }
7180 static inline int low2highgid(int gid)
7181 {
7182 return gid;
7183 }
7184 static inline int tswapid(int id)
7185 {
7186 return tswap32(id);
7187 }
7188
7189 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7190
7191 #endif /* USE_UID16 */
7192
7193 /* We must do direct syscalls for setting UID/GID, because we want to
7194 * implement the Linux system call semantics of "change only for this thread",
7195 * not the libc/POSIX semantics of "change for all threads in process".
7196 * (See http://ewontfix.com/17/ for more details.)
7197 * We use the 32-bit version of the syscalls if present; if it is not
7198 * then either the host architecture supports 32-bit UIDs natively with
7199 * the standard syscall, or the 16-bit UID is the best we can do.
7200 */
7201 #ifdef __NR_setuid32
7202 #define __NR_sys_setuid __NR_setuid32
7203 #else
7204 #define __NR_sys_setuid __NR_setuid
7205 #endif
7206 #ifdef __NR_setgid32
7207 #define __NR_sys_setgid __NR_setgid32
7208 #else
7209 #define __NR_sys_setgid __NR_setgid
7210 #endif
7211 #ifdef __NR_setresuid32
7212 #define __NR_sys_setresuid __NR_setresuid32
7213 #else
7214 #define __NR_sys_setresuid __NR_setresuid
7215 #endif
7216 #ifdef __NR_setresgid32
7217 #define __NR_sys_setresgid __NR_setresgid32
7218 #else
7219 #define __NR_sys_setresgid __NR_setresgid
7220 #endif
7221
7222 _syscall1(int, sys_setuid, uid_t, uid)
7223 _syscall1(int, sys_setgid, gid_t, gid)
7224 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7225 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7226
7227 void syscall_init(void)
7228 {
7229 IOCTLEntry *ie;
7230 const argtype *arg_type;
7231 int size;
7232
7233 thunk_init(STRUCT_MAX);
7234
7235 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7236 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7237 #include "syscall_types.h"
7238 #undef STRUCT
7239 #undef STRUCT_SPECIAL
7240
7241 /* we patch the ioctl size if necessary. We rely on the fact that
7242 no ioctl has all the bits at '1' in the size field */
7243 ie = ioctl_entries;
7244 while (ie->target_cmd != 0) {
7245 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7246 TARGET_IOC_SIZEMASK) {
7247 arg_type = ie->arg_type;
7248 if (arg_type[0] != TYPE_PTR) {
7249 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7250 ie->target_cmd);
7251 exit(1);
7252 }
7253 arg_type++;
7254 size = thunk_type_size(arg_type, 0);
7255 ie->target_cmd = (ie->target_cmd &
7256 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7257 (size << TARGET_IOC_SIZESHIFT);
7258 }
7259
7260 /* automatic consistency check if same arch */
7261 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7262 (defined(__x86_64__) && defined(TARGET_X86_64))
7263 if (unlikely(ie->target_cmd != ie->host_cmd)) {
7264 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7265 ie->name, ie->target_cmd, ie->host_cmd);
7266 }
7267 #endif
7268 ie++;
7269 }
7270 }
7271
7272 #ifdef TARGET_NR_truncate64
7273 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7274 abi_long arg2,
7275 abi_long arg3,
7276 abi_long arg4)
7277 {
7278 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7279 arg2 = arg3;
7280 arg3 = arg4;
7281 }
7282 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7283 }
7284 #endif
7285
7286 #ifdef TARGET_NR_ftruncate64
7287 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7288 abi_long arg2,
7289 abi_long arg3,
7290 abi_long arg4)
7291 {
7292 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7293 arg2 = arg3;
7294 arg3 = arg4;
7295 }
7296 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7297 }
7298 #endif
7299
7300 #if defined(TARGET_NR_timer_settime) || \
7301 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7302 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7303 abi_ulong target_addr)
7304 {
7305 if (target_to_host_timespec(&host_its->it_interval, target_addr +
7306 offsetof(struct target_itimerspec,
7307 it_interval)) ||
7308 target_to_host_timespec(&host_its->it_value, target_addr +
7309 offsetof(struct target_itimerspec,
7310 it_value))) {
7311 return -TARGET_EFAULT;
7312 }
7313
7314 return 0;
7315 }
7316 #endif
7317
7318 #if defined(TARGET_NR_timer_settime64) || \
7319 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7320 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7321 abi_ulong target_addr)
7322 {
7323 if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7324 offsetof(struct target__kernel_itimerspec,
7325 it_interval)) ||
7326 target_to_host_timespec64(&host_its->it_value, target_addr +
7327 offsetof(struct target__kernel_itimerspec,
7328 it_value))) {
7329 return -TARGET_EFAULT;
7330 }
7331
7332 return 0;
7333 }
7334 #endif
7335
7336 #if ((defined(TARGET_NR_timerfd_gettime) || \
7337 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7338 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7339 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7340 struct itimerspec *host_its)
7341 {
7342 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7343 it_interval),
7344 &host_its->it_interval) ||
7345 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7346 it_value),
7347 &host_its->it_value)) {
7348 return -TARGET_EFAULT;
7349 }
7350 return 0;
7351 }
7352 #endif
7353
7354 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7355 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7356 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7357 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7358 struct itimerspec *host_its)
7359 {
7360 if (host_to_target_timespec64(target_addr +
7361 offsetof(struct target__kernel_itimerspec,
7362 it_interval),
7363 &host_its->it_interval) ||
7364 host_to_target_timespec64(target_addr +
7365 offsetof(struct target__kernel_itimerspec,
7366 it_value),
7367 &host_its->it_value)) {
7368 return -TARGET_EFAULT;
7369 }
7370 return 0;
7371 }
7372 #endif
7373
7374 #if defined(TARGET_NR_adjtimex) || \
7375 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7376 static inline abi_long target_to_host_timex(struct timex *host_tx,
7377 abi_long target_addr)
7378 {
7379 struct target_timex *target_tx;
7380
7381 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7382 return -TARGET_EFAULT;
7383 }
7384
7385 __get_user(host_tx->modes, &target_tx->modes);
7386 __get_user(host_tx->offset, &target_tx->offset);
7387 __get_user(host_tx->freq, &target_tx->freq);
7388 __get_user(host_tx->maxerror, &target_tx->maxerror);
7389 __get_user(host_tx->esterror, &target_tx->esterror);
7390 __get_user(host_tx->status, &target_tx->status);
7391 __get_user(host_tx->constant, &target_tx->constant);
7392 __get_user(host_tx->precision, &target_tx->precision);
7393 __get_user(host_tx->tolerance, &target_tx->tolerance);
7394 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7395 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7396 __get_user(host_tx->tick, &target_tx->tick);
7397 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7398 __get_user(host_tx->jitter, &target_tx->jitter);
7399 __get_user(host_tx->shift, &target_tx->shift);
7400 __get_user(host_tx->stabil, &target_tx->stabil);
7401 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7402 __get_user(host_tx->calcnt, &target_tx->calcnt);
7403 __get_user(host_tx->errcnt, &target_tx->errcnt);
7404 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7405 __get_user(host_tx->tai, &target_tx->tai);
7406
7407 unlock_user_struct(target_tx, target_addr, 0);
7408 return 0;
7409 }
7410
7411 static inline abi_long host_to_target_timex(abi_long target_addr,
7412 struct timex *host_tx)
7413 {
7414 struct target_timex *target_tx;
7415
7416 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7417 return -TARGET_EFAULT;
7418 }
7419
7420 __put_user(host_tx->modes, &target_tx->modes);
7421 __put_user(host_tx->offset, &target_tx->offset);
7422 __put_user(host_tx->freq, &target_tx->freq);
7423 __put_user(host_tx->maxerror, &target_tx->maxerror);
7424 __put_user(host_tx->esterror, &target_tx->esterror);
7425 __put_user(host_tx->status, &target_tx->status);
7426 __put_user(host_tx->constant, &target_tx->constant);
7427 __put_user(host_tx->precision, &target_tx->precision);
7428 __put_user(host_tx->tolerance, &target_tx->tolerance);
7429 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7430 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7431 __put_user(host_tx->tick, &target_tx->tick);
7432 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7433 __put_user(host_tx->jitter, &target_tx->jitter);
7434 __put_user(host_tx->shift, &target_tx->shift);
7435 __put_user(host_tx->stabil, &target_tx->stabil);
7436 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7437 __put_user(host_tx->calcnt, &target_tx->calcnt);
7438 __put_user(host_tx->errcnt, &target_tx->errcnt);
7439 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7440 __put_user(host_tx->tai, &target_tx->tai);
7441
7442 unlock_user_struct(target_tx, target_addr, 1);
7443 return 0;
7444 }
7445 #endif
7446
7447
7448 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7449 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7450 abi_long target_addr)
7451 {
7452 struct target__kernel_timex *target_tx;
7453
7454 if (copy_from_user_timeval64(&host_tx->time, target_addr +
7455 offsetof(struct target__kernel_timex,
7456 time))) {
7457 return -TARGET_EFAULT;
7458 }
7459
7460 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7461 return -TARGET_EFAULT;
7462 }
7463
7464 __get_user(host_tx->modes, &target_tx->modes);
7465 __get_user(host_tx->offset, &target_tx->offset);
7466 __get_user(host_tx->freq, &target_tx->freq);
7467 __get_user(host_tx->maxerror, &target_tx->maxerror);
7468 __get_user(host_tx->esterror, &target_tx->esterror);
7469 __get_user(host_tx->status, &target_tx->status);
7470 __get_user(host_tx->constant, &target_tx->constant);
7471 __get_user(host_tx->precision, &target_tx->precision);
7472 __get_user(host_tx->tolerance, &target_tx->tolerance);
7473 __get_user(host_tx->tick, &target_tx->tick);
7474 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7475 __get_user(host_tx->jitter, &target_tx->jitter);
7476 __get_user(host_tx->shift, &target_tx->shift);
7477 __get_user(host_tx->stabil, &target_tx->stabil);
7478 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7479 __get_user(host_tx->calcnt, &target_tx->calcnt);
7480 __get_user(host_tx->errcnt, &target_tx->errcnt);
7481 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7482 __get_user(host_tx->tai, &target_tx->tai);
7483
7484 unlock_user_struct(target_tx, target_addr, 0);
7485 return 0;
7486 }
7487
7488 static inline abi_long host_to_target_timex64(abi_long target_addr,
7489 struct timex *host_tx)
7490 {
7491 struct target__kernel_timex *target_tx;
7492
7493 if (copy_to_user_timeval64(target_addr +
7494 offsetof(struct target__kernel_timex, time),
7495 &host_tx->time)) {
7496 return -TARGET_EFAULT;
7497 }
7498
7499 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7500 return -TARGET_EFAULT;
7501 }
7502
7503 __put_user(host_tx->modes, &target_tx->modes);
7504 __put_user(host_tx->offset, &target_tx->offset);
7505 __put_user(host_tx->freq, &target_tx->freq);
7506 __put_user(host_tx->maxerror, &target_tx->maxerror);
7507 __put_user(host_tx->esterror, &target_tx->esterror);
7508 __put_user(host_tx->status, &target_tx->status);
7509 __put_user(host_tx->constant, &target_tx->constant);
7510 __put_user(host_tx->precision, &target_tx->precision);
7511 __put_user(host_tx->tolerance, &target_tx->tolerance);
7512 __put_user(host_tx->tick, &target_tx->tick);
7513 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7514 __put_user(host_tx->jitter, &target_tx->jitter);
7515 __put_user(host_tx->shift, &target_tx->shift);
7516 __put_user(host_tx->stabil, &target_tx->stabil);
7517 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7518 __put_user(host_tx->calcnt, &target_tx->calcnt);
7519 __put_user(host_tx->errcnt, &target_tx->errcnt);
7520 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7521 __put_user(host_tx->tai, &target_tx->tai);
7522
7523 unlock_user_struct(target_tx, target_addr, 1);
7524 return 0;
7525 }
7526 #endif
7527
7528 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7529 #define sigev_notify_thread_id _sigev_un._tid
7530 #endif
7531
7532 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7533 abi_ulong target_addr)
7534 {
7535 struct target_sigevent *target_sevp;
7536
7537 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7538 return -TARGET_EFAULT;
7539 }
7540
7541 /* This union is awkward on 64 bit systems because it has a 32 bit
7542 * integer and a pointer in it; we follow the conversion approach
7543 * used for handling sigval types in signal.c so the guest should get
7544 * the correct value back even if we did a 64 bit byteswap and it's
7545 * using the 32 bit integer.
7546 */
7547 host_sevp->sigev_value.sival_ptr =
7548 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7549 host_sevp->sigev_signo =
7550 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7551 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7552 host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7553
7554 unlock_user_struct(target_sevp, target_addr, 1);
7555 return 0;
7556 }
7557
7558 #if defined(TARGET_NR_mlockall)
7559 static inline int target_to_host_mlockall_arg(int arg)
7560 {
7561 int result = 0;
7562
7563 if (arg & TARGET_MCL_CURRENT) {
7564 result |= MCL_CURRENT;
7565 }
7566 if (arg & TARGET_MCL_FUTURE) {
7567 result |= MCL_FUTURE;
7568 }
7569 #ifdef MCL_ONFAULT
7570 if (arg & TARGET_MCL_ONFAULT) {
7571 result |= MCL_ONFAULT;
7572 }
7573 #endif
7574
7575 return result;
7576 }
7577 #endif
7578
7579 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7580 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7581 defined(TARGET_NR_newfstatat))
7582 static inline abi_long host_to_target_stat64(void *cpu_env,
7583 abi_ulong target_addr,
7584 struct stat *host_st)
7585 {
7586 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7587 if (((CPUARMState *)cpu_env)->eabi) {
7588 struct target_eabi_stat64 *target_st;
7589
7590 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7591 return -TARGET_EFAULT;
7592 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7593 __put_user(host_st->st_dev, &target_st->st_dev);
7594 __put_user(host_st->st_ino, &target_st->st_ino);
7595 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7596 __put_user(host_st->st_ino, &target_st->__st_ino);
7597 #endif
7598 __put_user(host_st->st_mode, &target_st->st_mode);
7599 __put_user(host_st->st_nlink, &target_st->st_nlink);
7600 __put_user(host_st->st_uid, &target_st->st_uid);
7601 __put_user(host_st->st_gid, &target_st->st_gid);
7602 __put_user(host_st->st_rdev, &target_st->st_rdev);
7603 __put_user(host_st->st_size, &target_st->st_size);
7604 __put_user(host_st->st_blksize, &target_st->st_blksize);
7605 __put_user(host_st->st_blocks, &target_st->st_blocks);
7606 __put_user(host_st->st_atime, &target_st->target_st_atime);
7607 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7608 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7609 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7610 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7611 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7612 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7613 #endif
7614 unlock_user_struct(target_st, target_addr, 1);
7615 } else
7616 #endif
7617 {
7618 #if defined(TARGET_HAS_STRUCT_STAT64)
7619 struct target_stat64 *target_st;
7620 #else
7621 struct target_stat *target_st;
7622 #endif
7623
7624 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7625 return -TARGET_EFAULT;
7626 memset(target_st, 0, sizeof(*target_st));
7627 __put_user(host_st->st_dev, &target_st->st_dev);
7628 __put_user(host_st->st_ino, &target_st->st_ino);
7629 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7630 __put_user(host_st->st_ino, &target_st->__st_ino);
7631 #endif
7632 __put_user(host_st->st_mode, &target_st->st_mode);
7633 __put_user(host_st->st_nlink, &target_st->st_nlink);
7634 __put_user(host_st->st_uid, &target_st->st_uid);
7635 __put_user(host_st->st_gid, &target_st->st_gid);
7636 __put_user(host_st->st_rdev, &target_st->st_rdev);
7637 /* XXX: better use of kernel struct */
7638 __put_user(host_st->st_size, &target_st->st_size);
7639 __put_user(host_st->st_blksize, &target_st->st_blksize);
7640 __put_user(host_st->st_blocks, &target_st->st_blocks);
7641 __put_user(host_st->st_atime, &target_st->target_st_atime);
7642 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7643 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7644 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7645 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7646 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7647 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7648 #endif
7649 unlock_user_struct(target_st, target_addr, 1);
7650 }
7651
7652 return 0;
7653 }
7654 #endif
7655
7656 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7657 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7658 abi_ulong target_addr)
7659 {
7660 struct target_statx *target_stx;
7661
7662 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7663 return -TARGET_EFAULT;
7664 }
7665 memset(target_stx, 0, sizeof(*target_stx));
7666
7667 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7668 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7669 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7670 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7671 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7672 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7673 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7674 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7675 __put_user(host_stx->stx_size, &target_stx->stx_size);
7676 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7677 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7678 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7679 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7680 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7681 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7682 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7683 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7684 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7685 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7686 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7687 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7688 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7689 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7690
7691 unlock_user_struct(target_stx, target_addr, 1);
7692
7693 return 0;
7694 }
7695 #endif
7696
7697 static int do_sys_futex(int *uaddr, int op, int val,
7698 const struct timespec *timeout, int *uaddr2,
7699 int val3)
7700 {
7701 #if HOST_LONG_BITS == 64
7702 #if defined(__NR_futex)
7703 /* always a 64-bit time_t, it doesn't define _time64 version */
7704 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7705
7706 #endif
7707 #else /* HOST_LONG_BITS == 64 */
7708 #if defined(__NR_futex_time64)
7709 if (sizeof(timeout->tv_sec) == 8) {
7710 /* _time64 function on 32bit arch */
7711 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7712 }
7713 #endif
7714 #if defined(__NR_futex)
7715 /* old function on 32bit arch */
7716 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7717 #endif
7718 #endif /* HOST_LONG_BITS == 64 */
7719 g_assert_not_reached();
7720 }
7721
7722 static int do_safe_futex(int *uaddr, int op, int val,
7723 const struct timespec *timeout, int *uaddr2,
7724 int val3)
7725 {
7726 #if HOST_LONG_BITS == 64
7727 #if defined(__NR_futex)
7728 /* always a 64-bit time_t, it doesn't define _time64 version */
7729 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7730 #endif
7731 #else /* HOST_LONG_BITS == 64 */
7732 #if defined(__NR_futex_time64)
7733 if (sizeof(timeout->tv_sec) == 8) {
7734 /* _time64 function on 32bit arch */
7735 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7736 val3));
7737 }
7738 #endif
7739 #if defined(__NR_futex)
7740 /* old function on 32bit arch */
7741 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7742 #endif
7743 #endif /* HOST_LONG_BITS == 64 */
7744 return -TARGET_ENOSYS;
7745 }
7746
7747 /* ??? Using host futex calls even when target atomic operations
7748 are not really atomic probably breaks things. However implementing
7749 futexes locally would make futexes shared between multiple processes
7750 tricky. However they're probably useless because guest atomic
7751 operations won't work either. */
7752 #if defined(TARGET_NR_futex)
7753 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7754 target_ulong timeout, target_ulong uaddr2, int val3)
7755 {
7756 struct timespec ts, *pts;
7757 int base_op;
7758
7759 /* ??? We assume FUTEX_* constants are the same on both host
7760 and target. */
7761 #ifdef FUTEX_CMD_MASK
7762 base_op = op & FUTEX_CMD_MASK;
7763 #else
7764 base_op = op;
7765 #endif
7766 switch (base_op) {
7767 case FUTEX_WAIT:
7768 case FUTEX_WAIT_BITSET:
7769 if (timeout) {
7770 pts = &ts;
7771 target_to_host_timespec(pts, timeout);
7772 } else {
7773 pts = NULL;
7774 }
7775 return do_safe_futex(g2h(cpu, uaddr),
7776 op, tswap32(val), pts, NULL, val3);
7777 case FUTEX_WAKE:
7778 return do_safe_futex(g2h(cpu, uaddr),
7779 op, val, NULL, NULL, 0);
7780 case FUTEX_FD:
7781 return do_safe_futex(g2h(cpu, uaddr),
7782 op, val, NULL, NULL, 0);
7783 case FUTEX_REQUEUE:
7784 case FUTEX_CMP_REQUEUE:
7785 case FUTEX_WAKE_OP:
7786 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7787 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7788 But the prototype takes a `struct timespec *'; insert casts
7789 to satisfy the compiler. We do not need to tswap TIMEOUT
7790 since it's not compared to guest memory. */
7791 pts = (struct timespec *)(uintptr_t) timeout;
7792 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7793 (base_op == FUTEX_CMP_REQUEUE
7794 ? tswap32(val3) : val3));
7795 default:
7796 return -TARGET_ENOSYS;
7797 }
7798 }
7799 #endif
7800
7801 #if defined(TARGET_NR_futex_time64)
7802 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7803 int val, target_ulong timeout,
7804 target_ulong uaddr2, int val3)
7805 {
7806 struct timespec ts, *pts;
7807 int base_op;
7808
7809 /* ??? We assume FUTEX_* constants are the same on both host
7810 and target. */
7811 #ifdef FUTEX_CMD_MASK
7812 base_op = op & FUTEX_CMD_MASK;
7813 #else
7814 base_op = op;
7815 #endif
7816 switch (base_op) {
7817 case FUTEX_WAIT:
7818 case FUTEX_WAIT_BITSET:
7819 if (timeout) {
7820 pts = &ts;
7821 if (target_to_host_timespec64(pts, timeout)) {
7822 return -TARGET_EFAULT;
7823 }
7824 } else {
7825 pts = NULL;
7826 }
7827 return do_safe_futex(g2h(cpu, uaddr), op,
7828 tswap32(val), pts, NULL, val3);
7829 case FUTEX_WAKE:
7830 return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7831 case FUTEX_FD:
7832 return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7833 case FUTEX_REQUEUE:
7834 case FUTEX_CMP_REQUEUE:
7835 case FUTEX_WAKE_OP:
7836 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7837 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7838 But the prototype takes a `struct timespec *'; insert casts
7839 to satisfy the compiler. We do not need to tswap TIMEOUT
7840 since it's not compared to guest memory. */
7841 pts = (struct timespec *)(uintptr_t) timeout;
7842 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7843 (base_op == FUTEX_CMP_REQUEUE
7844 ? tswap32(val3) : val3));
7845 default:
7846 return -TARGET_ENOSYS;
7847 }
7848 }
7849 #endif
7850
7851 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7852 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7853 abi_long handle, abi_long mount_id,
7854 abi_long flags)
7855 {
7856 struct file_handle *target_fh;
7857 struct file_handle *fh;
7858 int mid = 0;
7859 abi_long ret;
7860 char *name;
7861 unsigned int size, total_size;
7862
7863 if (get_user_s32(size, handle)) {
7864 return -TARGET_EFAULT;
7865 }
7866
7867 name = lock_user_string(pathname);
7868 if (!name) {
7869 return -TARGET_EFAULT;
7870 }
7871
7872 total_size = sizeof(struct file_handle) + size;
7873 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7874 if (!target_fh) {
7875 unlock_user(name, pathname, 0);
7876 return -TARGET_EFAULT;
7877 }
7878
7879 fh = g_malloc0(total_size);
7880 fh->handle_bytes = size;
7881
7882 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7883 unlock_user(name, pathname, 0);
7884
7885 /* man name_to_handle_at(2):
7886 * Other than the use of the handle_bytes field, the caller should treat
7887 * the file_handle structure as an opaque data type
7888 */
7889
7890 memcpy(target_fh, fh, total_size);
7891 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7892 target_fh->handle_type = tswap32(fh->handle_type);
7893 g_free(fh);
7894 unlock_user(target_fh, handle, total_size);
7895
7896 if (put_user_s32(mid, mount_id)) {
7897 return -TARGET_EFAULT;
7898 }
7899
7900 return ret;
7901
7902 }
7903 #endif
7904
7905 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7906 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7907 abi_long flags)
7908 {
7909 struct file_handle *target_fh;
7910 struct file_handle *fh;
7911 unsigned int size, total_size;
7912 abi_long ret;
7913
7914 if (get_user_s32(size, handle)) {
7915 return -TARGET_EFAULT;
7916 }
7917
7918 total_size = sizeof(struct file_handle) + size;
7919 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7920 if (!target_fh) {
7921 return -TARGET_EFAULT;
7922 }
7923
7924 fh = g_memdup(target_fh, total_size);
7925 fh->handle_bytes = size;
7926 fh->handle_type = tswap32(target_fh->handle_type);
7927
7928 ret = get_errno(open_by_handle_at(mount_fd, fh,
7929 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7930
7931 g_free(fh);
7932
7933 unlock_user(target_fh, handle, total_size);
7934
7935 return ret;
7936 }
7937 #endif
7938
7939 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7940
7941 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7942 {
7943 int host_flags;
7944 target_sigset_t *target_mask;
7945 sigset_t host_mask;
7946 abi_long ret;
7947
7948 if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7949 return -TARGET_EINVAL;
7950 }
7951 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7952 return -TARGET_EFAULT;
7953 }
7954
7955 target_to_host_sigset(&host_mask, target_mask);
7956
7957 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7958
7959 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7960 if (ret >= 0) {
7961 fd_trans_register(ret, &target_signalfd_trans);
7962 }
7963
7964 unlock_user_struct(target_mask, mask, 0);
7965
7966 return ret;
7967 }
7968 #endif
7969
7970 /* Map host to target signal numbers for the wait family of syscalls.
7971 Assume all other status bits are the same. */
7972 int host_to_target_waitstatus(int status)
7973 {
7974 if (WIFSIGNALED(status)) {
7975 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7976 }
7977 if (WIFSTOPPED(status)) {
7978 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7979 | (status & 0xff);
7980 }
7981 return status;
7982 }
7983
7984 static int open_self_cmdline(void *cpu_env, int fd)
7985 {
7986 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7987 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7988 int i;
7989
7990 for (i = 0; i < bprm->argc; i++) {
7991 size_t len = strlen(bprm->argv[i]) + 1;
7992
7993 if (write(fd, bprm->argv[i], len) != len) {
7994 return -1;
7995 }
7996 }
7997
7998 return 0;
7999 }
8000
8001 static int open_self_maps(void *cpu_env, int fd)
8002 {
8003 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8004 TaskState *ts = cpu->opaque;
8005 GSList *map_info = read_self_maps();
8006 GSList *s;
8007 int count;
8008
8009 for (s = map_info; s; s = g_slist_next(s)) {
8010 MapInfo *e = (MapInfo *) s->data;
8011
8012 if (h2g_valid(e->start)) {
8013 unsigned long min = e->start;
8014 unsigned long max = e->end;
8015 int flags = page_get_flags(h2g(min));
8016 const char *path;
8017
8018 max = h2g_valid(max - 1) ?
8019 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8020
8021 if (page_check_range(h2g(min), max - min, flags) == -1) {
8022 continue;
8023 }
8024
8025 if (h2g(min) == ts->info->stack_limit) {
8026 path = "[stack]";
8027 } else {
8028 path = e->path;
8029 }
8030
8031 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8032 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8033 h2g(min), h2g(max - 1) + 1,
8034 (flags & PAGE_READ) ? 'r' : '-',
8035 (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8036 (flags & PAGE_EXEC) ? 'x' : '-',
8037 e->is_priv ? 'p' : 's',
8038 (uint64_t) e->offset, e->dev, e->inode);
8039 if (path) {
8040 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8041 } else {
8042 dprintf(fd, "\n");
8043 }
8044 }
8045 }
8046
8047 free_self_maps(map_info);
8048
8049 #ifdef TARGET_VSYSCALL_PAGE
8050 /*
8051 * We only support execution from the vsyscall page.
8052 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8053 */
8054 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8055 " --xp 00000000 00:00 0",
8056 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8057 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
8058 #endif
8059
8060 return 0;
8061 }
8062
8063 static int open_self_stat(void *cpu_env, int fd)
8064 {
8065 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8066 TaskState *ts = cpu->opaque;
8067 g_autoptr(GString) buf = g_string_new(NULL);
8068 int i;
8069
8070 for (i = 0; i < 44; i++) {
8071 if (i == 0) {
8072 /* pid */
8073 g_string_printf(buf, FMT_pid " ", getpid());
8074 } else if (i == 1) {
8075 /* app name */
8076 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8077 bin = bin ? bin + 1 : ts->bprm->argv[0];
8078 g_string_printf(buf, "(%.15s) ", bin);
8079 } else if (i == 3) {
8080 /* ppid */
8081 g_string_printf(buf, FMT_pid " ", getppid());
8082 } else if (i == 21) {
8083 /* starttime */
8084 g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8085 } else if (i == 27) {
8086 /* stack bottom */
8087 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8088 } else {
8089 /* for the rest, there is MasterCard */
8090 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8091 }
8092
8093 if (write(fd, buf->str, buf->len) != buf->len) {
8094 return -1;
8095 }
8096 }
8097
8098 return 0;
8099 }
8100
8101 static int open_self_auxv(void *cpu_env, int fd)
8102 {
8103 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8104 TaskState *ts = cpu->opaque;
8105 abi_ulong auxv = ts->info->saved_auxv;
8106 abi_ulong len = ts->info->auxv_len;
8107 char *ptr;
8108
8109 /*
8110 * Auxiliary vector is stored in target process stack.
8111 * read in whole auxv vector and copy it to file
8112 */
8113 ptr = lock_user(VERIFY_READ, auxv, len, 0);
8114 if (ptr != NULL) {
8115 while (len > 0) {
8116 ssize_t r;
8117 r = write(fd, ptr, len);
8118 if (r <= 0) {
8119 break;
8120 }
8121 len -= r;
8122 ptr += r;
8123 }
8124 lseek(fd, 0, SEEK_SET);
8125 unlock_user(ptr, auxv, len);
8126 }
8127
8128 return 0;
8129 }
8130
8131 static int is_proc_myself(const char *filename, const char *entry)
8132 {
8133 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8134 filename += strlen("/proc/");
8135 if (!strncmp(filename, "self/", strlen("self/"))) {
8136 filename += strlen("self/");
8137 } else if (*filename >= '1' && *filename <= '9') {
8138 char myself[80];
8139 snprintf(myself, sizeof(myself), "%d/", getpid());
8140 if (!strncmp(filename, myself, strlen(myself))) {
8141 filename += strlen(myself);
8142 } else {
8143 return 0;
8144 }
8145 } else {
8146 return 0;
8147 }
8148 if (!strcmp(filename, entry)) {
8149 return 1;
8150 }
8151 }
8152 return 0;
8153 }
8154
8155 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8156 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8157 static int is_proc(const char *filename, const char *entry)
8158 {
8159 return strcmp(filename, entry) == 0;
8160 }
8161 #endif
8162
8163 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8164 static int open_net_route(void *cpu_env, int fd)
8165 {
8166 FILE *fp;
8167 char *line = NULL;
8168 size_t len = 0;
8169 ssize_t read;
8170
8171 fp = fopen("/proc/net/route", "r");
8172 if (fp == NULL) {
8173 return -1;
8174 }
8175
8176 /* read header */
8177
8178 read = getline(&line, &len, fp);
8179 dprintf(fd, "%s", line);
8180
8181 /* read routes */
8182
8183 while ((read = getline(&line, &len, fp)) != -1) {
8184 char iface[16];
8185 uint32_t dest, gw, mask;
8186 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8187 int fields;
8188
8189 fields = sscanf(line,
8190 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8191 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8192 &mask, &mtu, &window, &irtt);
8193 if (fields != 11) {
8194 continue;
8195 }
8196 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8197 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8198 metric, tswap32(mask), mtu, window, irtt);
8199 }
8200
8201 free(line);
8202 fclose(fp);
8203
8204 return 0;
8205 }
8206 #endif
8207
8208 #if defined(TARGET_SPARC)
8209 static int open_cpuinfo(void *cpu_env, int fd)
8210 {
8211 dprintf(fd, "type\t\t: sun4u\n");
8212 return 0;
8213 }
8214 #endif
8215
8216 #if defined(TARGET_HPPA)
8217 static int open_cpuinfo(void *cpu_env, int fd)
8218 {
8219 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8220 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8221 dprintf(fd, "capabilities\t: os32\n");
8222 dprintf(fd, "model\t\t: 9000/778/B160L\n");
8223 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8224 return 0;
8225 }
8226 #endif
8227
8228 #if defined(TARGET_M68K)
8229 static int open_hardware(void *cpu_env, int fd)
8230 {
8231 dprintf(fd, "Model:\t\tqemu-m68k\n");
8232 return 0;
8233 }
8234 #endif
8235
8236 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8237 {
8238 struct fake_open {
8239 const char *filename;
8240 int (*fill)(void *cpu_env, int fd);
8241 int (*cmp)(const char *s1, const char *s2);
8242 };
8243 const struct fake_open *fake_open;
8244 static const struct fake_open fakes[] = {
8245 { "maps", open_self_maps, is_proc_myself },
8246 { "stat", open_self_stat, is_proc_myself },
8247 { "auxv", open_self_auxv, is_proc_myself },
8248 { "cmdline", open_self_cmdline, is_proc_myself },
8249 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8250 { "/proc/net/route", open_net_route, is_proc },
8251 #endif
8252 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8253 { "/proc/cpuinfo", open_cpuinfo, is_proc },
8254 #endif
8255 #if defined(TARGET_M68K)
8256 { "/proc/hardware", open_hardware, is_proc },
8257 #endif
8258 { NULL, NULL, NULL }
8259 };
8260
8261 if (is_proc_myself(pathname, "exe")) {
8262 int execfd = qemu_getauxval(AT_EXECFD);
8263 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8264 }
8265
8266 for (fake_open = fakes; fake_open->filename; fake_open++) {
8267 if (fake_open->cmp(pathname, fake_open->filename)) {
8268 break;
8269 }
8270 }
8271
8272 if (fake_open->filename) {
8273 const char *tmpdir;
8274 char filename[PATH_MAX];
8275 int fd, r;
8276
8277 /* create temporary file to map stat to */
8278 tmpdir = getenv("TMPDIR");
8279 if (!tmpdir)
8280 tmpdir = "/tmp";
8281 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8282 fd = mkstemp(filename);
8283 if (fd < 0) {
8284 return fd;
8285 }
8286 unlink(filename);
8287
8288 if ((r = fake_open->fill(cpu_env, fd))) {
8289 int e = errno;
8290 close(fd);
8291 errno = e;
8292 return r;
8293 }
8294 lseek(fd, 0, SEEK_SET);
8295
8296 return fd;
8297 }
8298
8299 return safe_openat(dirfd, path(pathname), flags, mode);
8300 }
8301
8302 #define TIMER_MAGIC 0x0caf0000
8303 #define TIMER_MAGIC_MASK 0xffff0000
8304
8305 /* Convert QEMU provided timer ID back to internal 16bit index format */
8306 static target_timer_t get_timer_id(abi_long arg)
8307 {
8308 target_timer_t timerid = arg;
8309
8310 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8311 return -TARGET_EINVAL;
8312 }
8313
8314 timerid &= 0xffff;
8315
8316 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8317 return -TARGET_EINVAL;
8318 }
8319
8320 return timerid;
8321 }
8322
8323 static int target_to_host_cpu_mask(unsigned long *host_mask,
8324 size_t host_size,
8325 abi_ulong target_addr,
8326 size_t target_size)
8327 {
8328 unsigned target_bits = sizeof(abi_ulong) * 8;
8329 unsigned host_bits = sizeof(*host_mask) * 8;
8330 abi_ulong *target_mask;
8331 unsigned i, j;
8332
8333 assert(host_size >= target_size);
8334
8335 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8336 if (!target_mask) {
8337 return -TARGET_EFAULT;
8338 }
8339 memset(host_mask, 0, host_size);
8340
8341 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8342 unsigned bit = i * target_bits;
8343 abi_ulong val;
8344
8345 __get_user(val, &target_mask[i]);
8346 for (j = 0; j < target_bits; j++, bit++) {
8347 if (val & (1UL << j)) {
8348 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8349 }
8350 }
8351 }
8352
8353 unlock_user(target_mask, target_addr, 0);
8354 return 0;
8355 }
8356
8357 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8358 size_t host_size,
8359 abi_ulong target_addr,
8360 size_t target_size)
8361 {
8362 unsigned target_bits = sizeof(abi_ulong) * 8;
8363 unsigned host_bits = sizeof(*host_mask) * 8;
8364 abi_ulong *target_mask;
8365 unsigned i, j;
8366
8367 assert(host_size >= target_size);
8368
8369 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8370 if (!target_mask) {
8371 return -TARGET_EFAULT;
8372 }
8373
8374 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8375 unsigned bit = i * target_bits;
8376 abi_ulong val = 0;
8377
8378 for (j = 0; j < target_bits; j++, bit++) {
8379 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8380 val |= 1UL << j;
8381 }
8382 }
8383 __put_user(val, &target_mask[i]);
8384 }
8385
8386 unlock_user(target_mask, target_addr, target_size);
8387 return 0;
8388 }
8389
8390 #ifdef TARGET_NR_getdents
8391 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8392 {
8393 g_autofree void *hdirp = NULL;
8394 void *tdirp;
8395 int hlen, hoff, toff;
8396 int hreclen, treclen;
8397 off64_t prev_diroff = 0;
8398
8399 hdirp = g_try_malloc(count);
8400 if (!hdirp) {
8401 return -TARGET_ENOMEM;
8402 }
8403
8404 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8405 hlen = sys_getdents(dirfd, hdirp, count);
8406 #else
8407 hlen = sys_getdents64(dirfd, hdirp, count);
8408 #endif
8409
8410 hlen = get_errno(hlen);
8411 if (is_error(hlen)) {
8412 return hlen;
8413 }
8414
8415 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8416 if (!tdirp) {
8417 return -TARGET_EFAULT;
8418 }
8419
8420 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8421 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8422 struct linux_dirent *hde = hdirp + hoff;
8423 #else
8424 struct linux_dirent64 *hde = hdirp + hoff;
8425 #endif
8426 struct target_dirent *tde = tdirp + toff;
8427 int namelen;
8428 uint8_t type;
8429
8430 namelen = strlen(hde->d_name);
8431 hreclen = hde->d_reclen;
8432 treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8433 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8434
8435 if (toff + treclen > count) {
8436 /*
8437 * If the host struct is smaller than the target struct, or
8438 * requires less alignment and thus packs into less space,
8439 * then the host can return more entries than we can pass
8440 * on to the guest.
8441 */
8442 if (toff == 0) {
8443 toff = -TARGET_EINVAL; /* result buffer is too small */
8444 break;
8445 }
8446 /*
8447 * Return what we have, resetting the file pointer to the
8448 * location of the first record not returned.
8449 */
8450 lseek64(dirfd, prev_diroff, SEEK_SET);
8451 break;
8452 }
8453
8454 prev_diroff = hde->d_off;
8455 tde->d_ino = tswapal(hde->d_ino);
8456 tde->d_off = tswapal(hde->d_off);
8457 tde->d_reclen = tswap16(treclen);
8458 memcpy(tde->d_name, hde->d_name, namelen + 1);
8459
8460 /*
8461 * The getdents type is in what was formerly a padding byte at the
8462 * end of the structure.
8463 */
8464 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8465 type = *((uint8_t *)hde + hreclen - 1);
8466 #else
8467 type = hde->d_type;
8468 #endif
8469 *((uint8_t *)tde + treclen - 1) = type;
8470 }
8471
8472 unlock_user(tdirp, arg2, toff);
8473 return toff;
8474 }
8475 #endif /* TARGET_NR_getdents */
8476
8477 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8478 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8479 {
8480 g_autofree void *hdirp = NULL;
8481 void *tdirp;
8482 int hlen, hoff, toff;
8483 int hreclen, treclen;
8484 off64_t prev_diroff = 0;
8485
8486 hdirp = g_try_malloc(count);
8487 if (!hdirp) {
8488 return -TARGET_ENOMEM;
8489 }
8490
8491 hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8492 if (is_error(hlen)) {
8493 return hlen;
8494 }
8495
8496 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8497 if (!tdirp) {
8498 return -TARGET_EFAULT;
8499 }
8500
8501 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8502 struct linux_dirent64 *hde = hdirp + hoff;
8503 struct target_dirent64 *tde = tdirp + toff;
8504 int namelen;
8505
8506 namelen = strlen(hde->d_name) + 1;
8507 hreclen = hde->d_reclen;
8508 treclen = offsetof(struct target_dirent64, d_name) + namelen;
8509 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8510
8511 if (toff + treclen > count) {
8512 /*
8513 * If the host struct is smaller than the target struct, or
8514 * requires less alignment and thus packs into less space,
8515 * then the host can return more entries than we can pass
8516 * on to the guest.
8517 */
8518 if (toff == 0) {
8519 toff = -TARGET_EINVAL; /* result buffer is too small */
8520 break;
8521 }
8522 /*
8523 * Return what we have, resetting the file pointer to the
8524 * location of the first record not returned.
8525 */
8526 lseek64(dirfd, prev_diroff, SEEK_SET);
8527 break;
8528 }
8529
8530 prev_diroff = hde->d_off;
8531 tde->d_ino = tswap64(hde->d_ino);
8532 tde->d_off = tswap64(hde->d_off);
8533 tde->d_reclen = tswap16(treclen);
8534 tde->d_type = hde->d_type;
8535 memcpy(tde->d_name, hde->d_name, namelen);
8536 }
8537
8538 unlock_user(tdirp, arg2, toff);
8539 return toff;
8540 }
8541 #endif /* TARGET_NR_getdents64 */
8542
8543 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8544 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8545 #endif
8546
8547 /* This is an internal helper for do_syscall so that it is easier
8548 * to have a single return point, so that actions, such as logging
8549 * of syscall results, can be performed.
8550 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8551 */
8552 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8553 abi_long arg2, abi_long arg3, abi_long arg4,
8554 abi_long arg5, abi_long arg6, abi_long arg7,
8555 abi_long arg8)
8556 {
8557 CPUState *cpu = env_cpu(cpu_env);
8558 abi_long ret;
8559 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8560 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8561 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8562 || defined(TARGET_NR_statx)
8563 struct stat st;
8564 #endif
8565 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8566 || defined(TARGET_NR_fstatfs)
8567 struct statfs stfs;
8568 #endif
8569 void *p;
8570
8571 switch(num) {
8572 case TARGET_NR_exit:
8573 /* In old applications this may be used to implement _exit(2).
8574 However in threaded applications it is used for thread termination,
8575 and _exit_group is used for application termination.
8576 Do thread termination if we have more then one thread. */
8577
8578 if (block_signals()) {
8579 return -QEMU_ERESTARTSYS;
8580 }
8581
8582 pthread_mutex_lock(&clone_lock);
8583
8584 if (CPU_NEXT(first_cpu)) {
8585 TaskState *ts = cpu->opaque;
8586
8587 object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8588 object_unref(OBJECT(cpu));
8589 /*
8590 * At this point the CPU should be unrealized and removed
8591 * from cpu lists. We can clean-up the rest of the thread
8592 * data without the lock held.
8593 */
8594
8595 pthread_mutex_unlock(&clone_lock);
8596
8597 if (ts->child_tidptr) {
8598 put_user_u32(0, ts->child_tidptr);
8599 do_sys_futex(g2h(cpu, ts->child_tidptr),
8600 FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8601 }
8602 thread_cpu = NULL;
8603 g_free(ts);
8604 rcu_unregister_thread();
8605 pthread_exit(NULL);
8606 }
8607
8608 pthread_mutex_unlock(&clone_lock);
8609 preexit_cleanup(cpu_env, arg1);
8610 _exit(arg1);
8611 return 0; /* avoid warning */
8612 case TARGET_NR_read:
8613 if (arg2 == 0 && arg3 == 0) {
8614 return get_errno(safe_read(arg1, 0, 0));
8615 } else {
8616 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8617 return -TARGET_EFAULT;
8618 ret = get_errno(safe_read(arg1, p, arg3));
8619 if (ret >= 0 &&
8620 fd_trans_host_to_target_data(arg1)) {
8621 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8622 }
8623 unlock_user(p, arg2, ret);
8624 }
8625 return ret;
8626 case TARGET_NR_write:
8627 if (arg2 == 0 && arg3 == 0) {
8628 return get_errno(safe_write(arg1, 0, 0));
8629 }
8630 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8631 return -TARGET_EFAULT;
8632 if (fd_trans_target_to_host_data(arg1)) {
8633 void *copy = g_malloc(arg3);
8634 memcpy(copy, p, arg3);
8635 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8636 if (ret >= 0) {
8637 ret = get_errno(safe_write(arg1, copy, ret));
8638 }
8639 g_free(copy);
8640 } else {
8641 ret = get_errno(safe_write(arg1, p, arg3));
8642 }
8643 unlock_user(p, arg2, 0);
8644 return ret;
8645
8646 #ifdef TARGET_NR_open
8647 case TARGET_NR_open:
8648 if (!(p = lock_user_string(arg1)))
8649 return -TARGET_EFAULT;
8650 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8651 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8652 arg3));
8653 fd_trans_unregister(ret);
8654 unlock_user(p, arg1, 0);
8655 return ret;
8656 #endif
8657 case TARGET_NR_openat:
8658 if (!(p = lock_user_string(arg2)))
8659 return -TARGET_EFAULT;
8660 ret = get_errno(do_openat(cpu_env, arg1, p,
8661 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8662 arg4));
8663 fd_trans_unregister(ret);
8664 unlock_user(p, arg2, 0);
8665 return ret;
8666 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8667 case TARGET_NR_name_to_handle_at:
8668 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8669 return ret;
8670 #endif
8671 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8672 case TARGET_NR_open_by_handle_at:
8673 ret = do_open_by_handle_at(arg1, arg2, arg3);
8674 fd_trans_unregister(ret);
8675 return ret;
8676 #endif
8677 case TARGET_NR_close:
8678 fd_trans_unregister(arg1);
8679 return get_errno(close(arg1));
8680
8681 case TARGET_NR_brk:
8682 return do_brk(arg1);
8683 #ifdef TARGET_NR_fork
8684 case TARGET_NR_fork:
8685 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8686 #endif
8687 #ifdef TARGET_NR_waitpid
8688 case TARGET_NR_waitpid:
8689 {
8690 int status;
8691 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8692 if (!is_error(ret) && arg2 && ret
8693 && put_user_s32(host_to_target_waitstatus(status), arg2))
8694 return -TARGET_EFAULT;
8695 }
8696 return ret;
8697 #endif
8698 #ifdef TARGET_NR_waitid
8699 case TARGET_NR_waitid:
8700 {
8701 siginfo_t info;
8702 info.si_pid = 0;
8703 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8704 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8705 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8706 return -TARGET_EFAULT;
8707 host_to_target_siginfo(p, &info);
8708 unlock_user(p, arg3, sizeof(target_siginfo_t));
8709 }
8710 }
8711 return ret;
8712 #endif
8713 #ifdef TARGET_NR_creat /* not on alpha */
8714 case TARGET_NR_creat:
8715 if (!(p = lock_user_string(arg1)))
8716 return -TARGET_EFAULT;
8717 ret = get_errno(creat(p, arg2));
8718 fd_trans_unregister(ret);
8719 unlock_user(p, arg1, 0);
8720 return ret;
8721 #endif
8722 #ifdef TARGET_NR_link
8723 case TARGET_NR_link:
8724 {
8725 void * p2;
8726 p = lock_user_string(arg1);
8727 p2 = lock_user_string(arg2);
8728 if (!p || !p2)
8729 ret = -TARGET_EFAULT;
8730 else
8731 ret = get_errno(link(p, p2));
8732 unlock_user(p2, arg2, 0);
8733 unlock_user(p, arg1, 0);
8734 }
8735 return ret;
8736 #endif
8737 #if defined(TARGET_NR_linkat)
8738 case TARGET_NR_linkat:
8739 {
8740 void * p2 = NULL;
8741 if (!arg2 || !arg4)
8742 return -TARGET_EFAULT;
8743 p = lock_user_string(arg2);
8744 p2 = lock_user_string(arg4);
8745 if (!p || !p2)
8746 ret = -TARGET_EFAULT;
8747 else
8748 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8749 unlock_user(p, arg2, 0);
8750 unlock_user(p2, arg4, 0);
8751 }
8752 return ret;
8753 #endif
8754 #ifdef TARGET_NR_unlink
8755 case TARGET_NR_unlink:
8756 if (!(p = lock_user_string(arg1)))
8757 return -TARGET_EFAULT;
8758 ret = get_errno(unlink(p));
8759 unlock_user(p, arg1, 0);
8760 return ret;
8761 #endif
8762 #if defined(TARGET_NR_unlinkat)
8763 case TARGET_NR_unlinkat:
8764 if (!(p = lock_user_string(arg2)))
8765 return -TARGET_EFAULT;
8766 ret = get_errno(unlinkat(arg1, p, arg3));
8767 unlock_user(p, arg2, 0);
8768 return ret;
8769 #endif
8770 case TARGET_NR_execve:
8771 {
8772 char **argp, **envp;
8773 int argc, envc;
8774 abi_ulong gp;
8775 abi_ulong guest_argp;
8776 abi_ulong guest_envp;
8777 abi_ulong addr;
8778 char **q;
8779
8780 argc = 0;
8781 guest_argp = arg2;
8782 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8783 if (get_user_ual(addr, gp))
8784 return -TARGET_EFAULT;
8785 if (!addr)
8786 break;
8787 argc++;
8788 }
8789 envc = 0;
8790 guest_envp = arg3;
8791 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8792 if (get_user_ual(addr, gp))
8793 return -TARGET_EFAULT;
8794 if (!addr)
8795 break;
8796 envc++;
8797 }
8798
8799 argp = g_new0(char *, argc + 1);
8800 envp = g_new0(char *, envc + 1);
8801
8802 for (gp = guest_argp, q = argp; gp;
8803 gp += sizeof(abi_ulong), q++) {
8804 if (get_user_ual(addr, gp))
8805 goto execve_efault;
8806 if (!addr)
8807 break;
8808 if (!(*q = lock_user_string(addr)))
8809 goto execve_efault;
8810 }
8811 *q = NULL;
8812
8813 for (gp = guest_envp, q = envp; gp;
8814 gp += sizeof(abi_ulong), q++) {
8815 if (get_user_ual(addr, gp))
8816 goto execve_efault;
8817 if (!addr)
8818 break;
8819 if (!(*q = lock_user_string(addr)))
8820 goto execve_efault;
8821 }
8822 *q = NULL;
8823
8824 if (!(p = lock_user_string(arg1)))
8825 goto execve_efault;
8826 /* Although execve() is not an interruptible syscall it is
8827 * a special case where we must use the safe_syscall wrapper:
8828 * if we allow a signal to happen before we make the host
8829 * syscall then we will 'lose' it, because at the point of
8830 * execve the process leaves QEMU's control. So we use the
8831 * safe syscall wrapper to ensure that we either take the
8832 * signal as a guest signal, or else it does not happen
8833 * before the execve completes and makes it the other
8834 * program's problem.
8835 */
8836 ret = get_errno(safe_execve(p, argp, envp));
8837 unlock_user(p, arg1, 0);
8838
8839 goto execve_end;
8840
8841 execve_efault:
8842 ret = -TARGET_EFAULT;
8843
8844 execve_end:
8845 for (gp = guest_argp, q = argp; *q;
8846 gp += sizeof(abi_ulong), q++) {
8847 if (get_user_ual(addr, gp)
8848 || !addr)
8849 break;
8850 unlock_user(*q, addr, 0);
8851 }
8852 for (gp = guest_envp, q = envp; *q;
8853 gp += sizeof(abi_ulong), q++) {
8854 if (get_user_ual(addr, gp)
8855 || !addr)
8856 break;
8857 unlock_user(*q, addr, 0);
8858 }
8859
8860 g_free(argp);
8861 g_free(envp);
8862 }
8863 return ret;
8864 case TARGET_NR_chdir:
8865 if (!(p = lock_user_string(arg1)))
8866 return -TARGET_EFAULT;
8867 ret = get_errno(chdir(p));
8868 unlock_user(p, arg1, 0);
8869 return ret;
8870 #ifdef TARGET_NR_time
8871 case TARGET_NR_time:
8872 {
8873 time_t host_time;
8874 ret = get_errno(time(&host_time));
8875 if (!is_error(ret)
8876 && arg1
8877 && put_user_sal(host_time, arg1))
8878 return -TARGET_EFAULT;
8879 }
8880 return ret;
8881 #endif
8882 #ifdef TARGET_NR_mknod
8883 case TARGET_NR_mknod:
8884 if (!(p = lock_user_string(arg1)))
8885 return -TARGET_EFAULT;
8886 ret = get_errno(mknod(p, arg2, arg3));
8887 unlock_user(p, arg1, 0);
8888 return ret;
8889 #endif
8890 #if defined(TARGET_NR_mknodat)
8891 case TARGET_NR_mknodat:
8892 if (!(p = lock_user_string(arg2)))
8893 return -TARGET_EFAULT;
8894 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8895 unlock_user(p, arg2, 0);
8896 return ret;
8897 #endif
8898 #ifdef TARGET_NR_chmod
8899 case TARGET_NR_chmod:
8900 if (!(p = lock_user_string(arg1)))
8901 return -TARGET_EFAULT;
8902 ret = get_errno(chmod(p, arg2));
8903 unlock_user(p, arg1, 0);
8904 return ret;
8905 #endif
8906 #ifdef TARGET_NR_lseek
8907 case TARGET_NR_lseek:
8908 return get_errno(lseek(arg1, arg2, arg3));
8909 #endif
8910 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8911 /* Alpha specific */
8912 case TARGET_NR_getxpid:
8913 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8914 return get_errno(getpid());
8915 #endif
8916 #ifdef TARGET_NR_getpid
8917 case TARGET_NR_getpid:
8918 return get_errno(getpid());
8919 #endif
8920 case TARGET_NR_mount:
8921 {
8922 /* need to look at the data field */
8923 void *p2, *p3;
8924
8925 if (arg1) {
8926 p = lock_user_string(arg1);
8927 if (!p) {
8928 return -TARGET_EFAULT;
8929 }
8930 } else {
8931 p = NULL;
8932 }
8933
8934 p2 = lock_user_string(arg2);
8935 if (!p2) {
8936 if (arg1) {
8937 unlock_user(p, arg1, 0);
8938 }
8939 return -TARGET_EFAULT;
8940 }
8941
8942 if (arg3) {
8943 p3 = lock_user_string(arg3);
8944 if (!p3) {
8945 if (arg1) {
8946 unlock_user(p, arg1, 0);
8947 }
8948 unlock_user(p2, arg2, 0);
8949 return -TARGET_EFAULT;
8950 }
8951 } else {
8952 p3 = NULL;
8953 }
8954
8955 /* FIXME - arg5 should be locked, but it isn't clear how to
8956 * do that since it's not guaranteed to be a NULL-terminated
8957 * string.
8958 */
8959 if (!arg5) {
8960 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8961 } else {
8962 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8963 }
8964 ret = get_errno(ret);
8965
8966 if (arg1) {
8967 unlock_user(p, arg1, 0);
8968 }
8969 unlock_user(p2, arg2, 0);
8970 if (arg3) {
8971 unlock_user(p3, arg3, 0);
8972 }
8973 }
8974 return ret;
8975 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8976 #if defined(TARGET_NR_umount)
8977 case TARGET_NR_umount:
8978 #endif
8979 #if defined(TARGET_NR_oldumount)
8980 case TARGET_NR_oldumount:
8981 #endif
8982 if (!(p = lock_user_string(arg1)))
8983 return -TARGET_EFAULT;
8984 ret = get_errno(umount(p));
8985 unlock_user(p, arg1, 0);
8986 return ret;
8987 #endif
8988 #ifdef TARGET_NR_stime /* not on alpha */
8989 case TARGET_NR_stime:
8990 {
8991 struct timespec ts;
8992 ts.tv_nsec = 0;
8993 if (get_user_sal(ts.tv_sec, arg1)) {
8994 return -TARGET_EFAULT;
8995 }
8996 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8997 }
8998 #endif
8999 #ifdef TARGET_NR_alarm /* not on alpha */
9000 case TARGET_NR_alarm:
9001 return alarm(arg1);
9002 #endif
9003 #ifdef TARGET_NR_pause /* not on alpha */
9004 case TARGET_NR_pause:
9005 if (!block_signals()) {
9006 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9007 }
9008 return -TARGET_EINTR;
9009 #endif
9010 #ifdef TARGET_NR_utime
9011 case TARGET_NR_utime:
9012 {
9013 struct utimbuf tbuf, *host_tbuf;
9014 struct target_utimbuf *target_tbuf;
9015 if (arg2) {
9016 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9017 return -TARGET_EFAULT;
9018 tbuf.actime = tswapal(target_tbuf->actime);
9019 tbuf.modtime = tswapal(target_tbuf->modtime);
9020 unlock_user_struct(target_tbuf, arg2, 0);
9021 host_tbuf = &tbuf;
9022 } else {
9023 host_tbuf = NULL;
9024 }
9025 if (!(p = lock_user_string(arg1)))
9026 return -TARGET_EFAULT;
9027 ret = get_errno(utime(p, host_tbuf));
9028 unlock_user(p, arg1, 0);
9029 }
9030 return ret;
9031 #endif
9032 #ifdef TARGET_NR_utimes
9033 case TARGET_NR_utimes:
9034 {
9035 struct timeval *tvp, tv[2];
9036 if (arg2) {
9037 if (copy_from_user_timeval(&tv[0], arg2)
9038 || copy_from_user_timeval(&tv[1],
9039 arg2 + sizeof(struct target_timeval)))
9040 return -TARGET_EFAULT;
9041 tvp = tv;
9042 } else {
9043 tvp = NULL;
9044 }
9045 if (!(p = lock_user_string(arg1)))
9046 return -TARGET_EFAULT;
9047 ret = get_errno(utimes(p, tvp));
9048 unlock_user(p, arg1, 0);
9049 }
9050 return ret;
9051 #endif
9052 #if defined(TARGET_NR_futimesat)
9053 case TARGET_NR_futimesat:
9054 {
9055 struct timeval *tvp, tv[2];
9056 if (arg3) {
9057 if (copy_from_user_timeval(&tv[0], arg3)
9058 || copy_from_user_timeval(&tv[1],
9059 arg3 + sizeof(struct target_timeval)))
9060 return -TARGET_EFAULT;
9061 tvp = tv;
9062 } else {
9063 tvp = NULL;
9064 }
9065 if (!(p = lock_user_string(arg2))) {
9066 return -TARGET_EFAULT;
9067 }
9068 ret = get_errno(futimesat(arg1, path(p), tvp));
9069 unlock_user(p, arg2, 0);
9070 }
9071 return ret;
9072 #endif
9073 #ifdef TARGET_NR_access
9074 case TARGET_NR_access:
9075 if (!(p = lock_user_string(arg1))) {
9076 return -TARGET_EFAULT;
9077 }
9078 ret = get_errno(access(path(p), arg2));
9079 unlock_user(p, arg1, 0);
9080 return ret;
9081 #endif
9082 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9083 case TARGET_NR_faccessat:
9084 if (!(p = lock_user_string(arg2))) {
9085 return -TARGET_EFAULT;
9086 }
9087 ret = get_errno(faccessat(arg1, p, arg3, 0));
9088 unlock_user(p, arg2, 0);
9089 return ret;
9090 #endif
9091 #ifdef TARGET_NR_nice /* not on alpha */
9092 case TARGET_NR_nice:
9093 return get_errno(nice(arg1));
9094 #endif
9095 case TARGET_NR_sync:
9096 sync();
9097 return 0;
9098 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9099 case TARGET_NR_syncfs:
9100 return get_errno(syncfs(arg1));
9101 #endif
9102 case TARGET_NR_kill:
9103 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9104 #ifdef TARGET_NR_rename
9105 case TARGET_NR_rename:
9106 {
9107 void *p2;
9108 p = lock_user_string(arg1);
9109 p2 = lock_user_string(arg2);
9110 if (!p || !p2)
9111 ret = -TARGET_EFAULT;
9112 else
9113 ret = get_errno(rename(p, p2));
9114 unlock_user(p2, arg2, 0);
9115 unlock_user(p, arg1, 0);
9116 }
9117 return ret;
9118 #endif
9119 #if defined(TARGET_NR_renameat)
9120 case TARGET_NR_renameat:
9121 {
9122 void *p2;
9123 p = lock_user_string(arg2);
9124 p2 = lock_user_string(arg4);
9125 if (!p || !p2)
9126 ret = -TARGET_EFAULT;
9127 else
9128 ret = get_errno(renameat(arg1, p, arg3, p2));
9129 unlock_user(p2, arg4, 0);
9130 unlock_user(p, arg2, 0);
9131 }
9132 return ret;
9133 #endif
9134 #if defined(TARGET_NR_renameat2)
9135 case TARGET_NR_renameat2:
9136 {
9137 void *p2;
9138 p = lock_user_string(arg2);
9139 p2 = lock_user_string(arg4);
9140 if (!p || !p2) {
9141 ret = -TARGET_EFAULT;
9142 } else {
9143 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9144 }
9145 unlock_user(p2, arg4, 0);
9146 unlock_user(p, arg2, 0);
9147 }
9148 return ret;
9149 #endif
9150 #ifdef TARGET_NR_mkdir
9151 case TARGET_NR_mkdir:
9152 if (!(p = lock_user_string(arg1)))
9153 return -TARGET_EFAULT;
9154 ret = get_errno(mkdir(p, arg2));
9155 unlock_user(p, arg1, 0);
9156 return ret;
9157 #endif
9158 #if defined(TARGET_NR_mkdirat)
9159 case TARGET_NR_mkdirat:
9160 if (!(p = lock_user_string(arg2)))
9161 return -TARGET_EFAULT;
9162 ret = get_errno(mkdirat(arg1, p, arg3));
9163 unlock_user(p, arg2, 0);
9164 return ret;
9165 #endif
9166 #ifdef TARGET_NR_rmdir
9167 case TARGET_NR_rmdir:
9168 if (!(p = lock_user_string(arg1)))
9169 return -TARGET_EFAULT;
9170 ret = get_errno(rmdir(p));
9171 unlock_user(p, arg1, 0);
9172 return ret;
9173 #endif
9174 case TARGET_NR_dup:
9175 ret = get_errno(dup(arg1));
9176 if (ret >= 0) {
9177 fd_trans_dup(arg1, ret);
9178 }
9179 return ret;
9180 #ifdef TARGET_NR_pipe
9181 case TARGET_NR_pipe:
9182 return do_pipe(cpu_env, arg1, 0, 0);
9183 #endif
9184 #ifdef TARGET_NR_pipe2
9185 case TARGET_NR_pipe2:
9186 return do_pipe(cpu_env, arg1,
9187 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9188 #endif
9189 case TARGET_NR_times:
9190 {
9191 struct target_tms *tmsp;
9192 struct tms tms;
9193 ret = get_errno(times(&tms));
9194 if (arg1) {
9195 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9196 if (!tmsp)
9197 return -TARGET_EFAULT;
9198 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9199 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9200 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9201 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9202 }
9203 if (!is_error(ret))
9204 ret = host_to_target_clock_t(ret);
9205 }
9206 return ret;
9207 case TARGET_NR_acct:
9208 if (arg1 == 0) {
9209 ret = get_errno(acct(NULL));
9210 } else {
9211 if (!(p = lock_user_string(arg1))) {
9212 return -TARGET_EFAULT;
9213 }
9214 ret = get_errno(acct(path(p)));
9215 unlock_user(p, arg1, 0);
9216 }
9217 return ret;
9218 #ifdef TARGET_NR_umount2
9219 case TARGET_NR_umount2:
9220 if (!(p = lock_user_string(arg1)))
9221 return -TARGET_EFAULT;
9222 ret = get_errno(umount2(p, arg2));
9223 unlock_user(p, arg1, 0);
9224 return ret;
9225 #endif
9226 case TARGET_NR_ioctl:
9227 return do_ioctl(arg1, arg2, arg3);
9228 #ifdef TARGET_NR_fcntl
9229 case TARGET_NR_fcntl:
9230 return do_fcntl(arg1, arg2, arg3);
9231 #endif
9232 case TARGET_NR_setpgid:
9233 return get_errno(setpgid(arg1, arg2));
9234 case TARGET_NR_umask:
9235 return get_errno(umask(arg1));
9236 case TARGET_NR_chroot:
9237 if (!(p = lock_user_string(arg1)))
9238 return -TARGET_EFAULT;
9239 ret = get_errno(chroot(p));
9240 unlock_user(p, arg1, 0);
9241 return ret;
9242 #ifdef TARGET_NR_dup2
9243 case TARGET_NR_dup2:
9244 ret = get_errno(dup2(arg1, arg2));
9245 if (ret >= 0) {
9246 fd_trans_dup(arg1, arg2);
9247 }
9248 return ret;
9249 #endif
9250 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9251 case TARGET_NR_dup3:
9252 {
9253 int host_flags;
9254
9255 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9256 return -EINVAL;
9257 }
9258 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9259 ret = get_errno(dup3(arg1, arg2, host_flags));
9260 if (ret >= 0) {
9261 fd_trans_dup(arg1, arg2);
9262 }
9263 return ret;
9264 }
9265 #endif
9266 #ifdef TARGET_NR_getppid /* not on alpha */
9267 case TARGET_NR_getppid:
9268 return get_errno(getppid());
9269 #endif
9270 #ifdef TARGET_NR_getpgrp
9271 case TARGET_NR_getpgrp:
9272 return get_errno(getpgrp());
9273 #endif
9274 case TARGET_NR_setsid:
9275 return get_errno(setsid());
9276 #ifdef TARGET_NR_sigaction
9277 case TARGET_NR_sigaction:
9278 {
9279 #if defined(TARGET_MIPS)
9280 struct target_sigaction act, oact, *pact, *old_act;
9281
9282 if (arg2) {
9283 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9284 return -TARGET_EFAULT;
9285 act._sa_handler = old_act->_sa_handler;
9286 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9287 act.sa_flags = old_act->sa_flags;
9288 unlock_user_struct(old_act, arg2, 0);
9289 pact = &act;
9290 } else {
9291 pact = NULL;
9292 }
9293
9294 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9295
9296 if (!is_error(ret) && arg3) {
9297 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9298 return -TARGET_EFAULT;
9299 old_act->_sa_handler = oact._sa_handler;
9300 old_act->sa_flags = oact.sa_flags;
9301 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9302 old_act->sa_mask.sig[1] = 0;
9303 old_act->sa_mask.sig[2] = 0;
9304 old_act->sa_mask.sig[3] = 0;
9305 unlock_user_struct(old_act, arg3, 1);
9306 }
9307 #else
9308 struct target_old_sigaction *old_act;
9309 struct target_sigaction act, oact, *pact;
9310 if (arg2) {
9311 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9312 return -TARGET_EFAULT;
9313 act._sa_handler = old_act->_sa_handler;
9314 target_siginitset(&act.sa_mask, old_act->sa_mask);
9315 act.sa_flags = old_act->sa_flags;
9316 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9317 act.sa_restorer = old_act->sa_restorer;
9318 #endif
9319 unlock_user_struct(old_act, arg2, 0);
9320 pact = &act;
9321 } else {
9322 pact = NULL;
9323 }
9324 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9325 if (!is_error(ret) && arg3) {
9326 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9327 return -TARGET_EFAULT;
9328 old_act->_sa_handler = oact._sa_handler;
9329 old_act->sa_mask = oact.sa_mask.sig[0];
9330 old_act->sa_flags = oact.sa_flags;
9331 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9332 old_act->sa_restorer = oact.sa_restorer;
9333 #endif
9334 unlock_user_struct(old_act, arg3, 1);
9335 }
9336 #endif
9337 }
9338 return ret;
9339 #endif
9340 case TARGET_NR_rt_sigaction:
9341 {
9342 /*
9343 * For Alpha and SPARC this is a 5 argument syscall, with
9344 * a 'restorer' parameter which must be copied into the
9345 * sa_restorer field of the sigaction struct.
9346 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9347 * and arg5 is the sigsetsize.
9348 */
9349 #if defined(TARGET_ALPHA)
9350 target_ulong sigsetsize = arg4;
9351 target_ulong restorer = arg5;
9352 #elif defined(TARGET_SPARC)
9353 target_ulong restorer = arg4;
9354 target_ulong sigsetsize = arg5;
9355 #else
9356 target_ulong sigsetsize = arg4;
9357 target_ulong restorer = 0;
9358 #endif
9359 struct target_sigaction *act = NULL;
9360 struct target_sigaction *oact = NULL;
9361
9362 if (sigsetsize != sizeof(target_sigset_t)) {
9363 return -TARGET_EINVAL;
9364 }
9365 if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9366 return -TARGET_EFAULT;
9367 }
9368 if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9369 ret = -TARGET_EFAULT;
9370 } else {
9371 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9372 if (oact) {
9373 unlock_user_struct(oact, arg3, 1);
9374 }
9375 }
9376 if (act) {
9377 unlock_user_struct(act, arg2, 0);
9378 }
9379 }
9380 return ret;
9381 #ifdef TARGET_NR_sgetmask /* not on alpha */
9382 case TARGET_NR_sgetmask:
9383 {
9384 sigset_t cur_set;
9385 abi_ulong target_set;
9386 ret = do_sigprocmask(0, NULL, &cur_set);
9387 if (!ret) {
9388 host_to_target_old_sigset(&target_set, &cur_set);
9389 ret = target_set;
9390 }
9391 }
9392 return ret;
9393 #endif
9394 #ifdef TARGET_NR_ssetmask /* not on alpha */
9395 case TARGET_NR_ssetmask:
9396 {
9397 sigset_t set, oset;
9398 abi_ulong target_set = arg1;
9399 target_to_host_old_sigset(&set, &target_set);
9400 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9401 if (!ret) {
9402 host_to_target_old_sigset(&target_set, &oset);
9403 ret = target_set;
9404 }
9405 }
9406 return ret;
9407 #endif
9408 #ifdef TARGET_NR_sigprocmask
9409 case TARGET_NR_sigprocmask:
9410 {
9411 #if defined(TARGET_ALPHA)
9412 sigset_t set, oldset;
9413 abi_ulong mask;
9414 int how;
9415
9416 switch (arg1) {
9417 case TARGET_SIG_BLOCK:
9418 how = SIG_BLOCK;
9419 break;
9420 case TARGET_SIG_UNBLOCK:
9421 how = SIG_UNBLOCK;
9422 break;
9423 case TARGET_SIG_SETMASK:
9424 how = SIG_SETMASK;
9425 break;
9426 default:
9427 return -TARGET_EINVAL;
9428 }
9429 mask = arg2;
9430 target_to_host_old_sigset(&set, &mask);
9431
9432 ret = do_sigprocmask(how, &set, &oldset);
9433 if (!is_error(ret)) {
9434 host_to_target_old_sigset(&mask, &oldset);
9435 ret = mask;
9436 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9437 }
9438 #else
9439 sigset_t set, oldset, *set_ptr;
9440 int how;
9441
9442 if (arg2) {
9443 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9444 if (!p) {
9445 return -TARGET_EFAULT;
9446 }
9447 target_to_host_old_sigset(&set, p);
9448 unlock_user(p, arg2, 0);
9449 set_ptr = &set;
9450 switch (arg1) {
9451 case TARGET_SIG_BLOCK:
9452 how = SIG_BLOCK;
9453 break;
9454 case TARGET_SIG_UNBLOCK:
9455 how = SIG_UNBLOCK;
9456 break;
9457 case TARGET_SIG_SETMASK:
9458 how = SIG_SETMASK;
9459 break;
9460 default:
9461 return -TARGET_EINVAL;
9462 }
9463 } else {
9464 how = 0;
9465 set_ptr = NULL;
9466 }
9467 ret = do_sigprocmask(how, set_ptr, &oldset);
9468 if (!is_error(ret) && arg3) {
9469 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9470 return -TARGET_EFAULT;
9471 host_to_target_old_sigset(p, &oldset);
9472 unlock_user(p, arg3, sizeof(target_sigset_t));
9473 }
9474 #endif
9475 }
9476 return ret;
9477 #endif
9478 case TARGET_NR_rt_sigprocmask:
9479 {
9480 int how = arg1;
9481 sigset_t set, oldset, *set_ptr;
9482
9483 if (arg4 != sizeof(target_sigset_t)) {
9484 return -TARGET_EINVAL;
9485 }
9486
9487 if (arg2) {
9488 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9489 if (!p) {
9490 return -TARGET_EFAULT;
9491 }
9492 target_to_host_sigset(&set, p);
9493 unlock_user(p, arg2, 0);
9494 set_ptr = &set;
9495 switch(how) {
9496 case TARGET_SIG_BLOCK:
9497 how = SIG_BLOCK;
9498 break;
9499 case TARGET_SIG_UNBLOCK:
9500 how = SIG_UNBLOCK;
9501 break;
9502 case TARGET_SIG_SETMASK:
9503 how = SIG_SETMASK;
9504 break;
9505 default:
9506 return -TARGET_EINVAL;
9507 }
9508 } else {
9509 how = 0;
9510 set_ptr = NULL;
9511 }
9512 ret = do_sigprocmask(how, set_ptr, &oldset);
9513 if (!is_error(ret) && arg3) {
9514 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9515 return -TARGET_EFAULT;
9516 host_to_target_sigset(p, &oldset);
9517 unlock_user(p, arg3, sizeof(target_sigset_t));
9518 }
9519 }
9520 return ret;
9521 #ifdef TARGET_NR_sigpending
9522 case TARGET_NR_sigpending:
9523 {
9524 sigset_t set;
9525 ret = get_errno(sigpending(&set));
9526 if (!is_error(ret)) {
9527 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9528 return -TARGET_EFAULT;
9529 host_to_target_old_sigset(p, &set);
9530 unlock_user(p, arg1, sizeof(target_sigset_t));
9531 }
9532 }
9533 return ret;
9534 #endif
9535 case TARGET_NR_rt_sigpending:
9536 {
9537 sigset_t set;
9538
9539 /* Yes, this check is >, not != like most. We follow the kernel's
9540 * logic and it does it like this because it implements
9541 * NR_sigpending through the same code path, and in that case
9542 * the old_sigset_t is smaller in size.
9543 */
9544 if (arg2 > sizeof(target_sigset_t)) {
9545 return -TARGET_EINVAL;
9546 }
9547
9548 ret = get_errno(sigpending(&set));
9549 if (!is_error(ret)) {
9550 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9551 return -TARGET_EFAULT;
9552 host_to_target_sigset(p, &set);
9553 unlock_user(p, arg1, sizeof(target_sigset_t));
9554 }
9555 }
9556 return ret;
9557 #ifdef TARGET_NR_sigsuspend
9558 case TARGET_NR_sigsuspend:
9559 {
9560 TaskState *ts = cpu->opaque;
9561 #if defined(TARGET_ALPHA)
9562 /* target_to_host_old_sigset will bswap back */
9563 abi_ulong mask = tswapal(arg1);
9564 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9565 #else
9566 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9567 return -TARGET_EFAULT;
9568 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9569 unlock_user(p, arg1, 0);
9570 #endif
9571 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9572 SIGSET_T_SIZE));
9573 if (ret != -QEMU_ERESTARTSYS) {
9574 ts->in_sigsuspend = 1;
9575 }
9576 }
9577 return ret;
9578 #endif
9579 case TARGET_NR_rt_sigsuspend:
9580 {
9581 TaskState *ts = cpu->opaque;
9582
9583 if (arg2 != sizeof(target_sigset_t)) {
9584 return -TARGET_EINVAL;
9585 }
9586 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9587 return -TARGET_EFAULT;
9588 target_to_host_sigset(&ts->sigsuspend_mask, p);
9589 unlock_user(p, arg1, 0);
9590 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9591 SIGSET_T_SIZE));
9592 if (ret != -QEMU_ERESTARTSYS) {
9593 ts->in_sigsuspend = 1;
9594 }
9595 }
9596 return ret;
9597 #ifdef TARGET_NR_rt_sigtimedwait
9598 case TARGET_NR_rt_sigtimedwait:
9599 {
9600 sigset_t set;
9601 struct timespec uts, *puts;
9602 siginfo_t uinfo;
9603
9604 if (arg4 != sizeof(target_sigset_t)) {
9605 return -TARGET_EINVAL;
9606 }
9607
9608 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9609 return -TARGET_EFAULT;
9610 target_to_host_sigset(&set, p);
9611 unlock_user(p, arg1, 0);
9612 if (arg3) {
9613 puts = &uts;
9614 if (target_to_host_timespec(puts, arg3)) {
9615 return -TARGET_EFAULT;
9616 }
9617 } else {
9618 puts = NULL;
9619 }
9620 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9621 SIGSET_T_SIZE));
9622 if (!is_error(ret)) {
9623 if (arg2) {
9624 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9625 0);
9626 if (!p) {
9627 return -TARGET_EFAULT;
9628 }
9629 host_to_target_siginfo(p, &uinfo);
9630 unlock_user(p, arg2, sizeof(target_siginfo_t));
9631 }
9632 ret = host_to_target_signal(ret);
9633 }
9634 }
9635 return ret;
9636 #endif
9637 #ifdef TARGET_NR_rt_sigtimedwait_time64
9638 case TARGET_NR_rt_sigtimedwait_time64:
9639 {
9640 sigset_t set;
9641 struct timespec uts, *puts;
9642 siginfo_t uinfo;
9643
9644 if (arg4 != sizeof(target_sigset_t)) {
9645 return -TARGET_EINVAL;
9646 }
9647
9648 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9649 if (!p) {
9650 return -TARGET_EFAULT;
9651 }
9652 target_to_host_sigset(&set, p);
9653 unlock_user(p, arg1, 0);
9654 if (arg3) {
9655 puts = &uts;
9656 if (target_to_host_timespec64(puts, arg3)) {
9657 return -TARGET_EFAULT;
9658 }
9659 } else {
9660 puts = NULL;
9661 }
9662 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9663 SIGSET_T_SIZE));
9664 if (!is_error(ret)) {
9665 if (arg2) {
9666 p = lock_user(VERIFY_WRITE, arg2,
9667 sizeof(target_siginfo_t), 0);
9668 if (!p) {
9669 return -TARGET_EFAULT;
9670 }
9671 host_to_target_siginfo(p, &uinfo);
9672 unlock_user(p, arg2, sizeof(target_siginfo_t));
9673 }
9674 ret = host_to_target_signal(ret);
9675 }
9676 }
9677 return ret;
9678 #endif
9679 case TARGET_NR_rt_sigqueueinfo:
9680 {
9681 siginfo_t uinfo;
9682
9683 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9684 if (!p) {
9685 return -TARGET_EFAULT;
9686 }
9687 target_to_host_siginfo(&uinfo, p);
9688 unlock_user(p, arg3, 0);
9689 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9690 }
9691 return ret;
9692 case TARGET_NR_rt_tgsigqueueinfo:
9693 {
9694 siginfo_t uinfo;
9695
9696 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9697 if (!p) {
9698 return -TARGET_EFAULT;
9699 }
9700 target_to_host_siginfo(&uinfo, p);
9701 unlock_user(p, arg4, 0);
9702 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9703 }
9704 return ret;
9705 #ifdef TARGET_NR_sigreturn
9706 case TARGET_NR_sigreturn:
9707 if (block_signals()) {
9708 return -QEMU_ERESTARTSYS;
9709 }
9710 return do_sigreturn(cpu_env);
9711 #endif
9712 case TARGET_NR_rt_sigreturn:
9713 if (block_signals()) {
9714 return -QEMU_ERESTARTSYS;
9715 }
9716 return do_rt_sigreturn(cpu_env);
9717 case TARGET_NR_sethostname:
9718 if (!(p = lock_user_string(arg1)))
9719 return -TARGET_EFAULT;
9720 ret = get_errno(sethostname(p, arg2));
9721 unlock_user(p, arg1, 0);
9722 return ret;
9723 #ifdef TARGET_NR_setrlimit
9724 case TARGET_NR_setrlimit:
9725 {
9726 int resource = target_to_host_resource(arg1);
9727 struct target_rlimit *target_rlim;
9728 struct rlimit rlim;
9729 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9730 return -TARGET_EFAULT;
9731 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9732 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9733 unlock_user_struct(target_rlim, arg2, 0);
9734 /*
9735 * If we just passed through resource limit settings for memory then
9736 * they would also apply to QEMU's own allocations, and QEMU will
9737 * crash or hang or die if its allocations fail. Ideally we would
9738 * track the guest allocations in QEMU and apply the limits ourselves.
9739 * For now, just tell the guest the call succeeded but don't actually
9740 * limit anything.
9741 */
9742 if (resource != RLIMIT_AS &&
9743 resource != RLIMIT_DATA &&
9744 resource != RLIMIT_STACK) {
9745 return get_errno(setrlimit(resource, &rlim));
9746 } else {
9747 return 0;
9748 }
9749 }
9750 #endif
9751 #ifdef TARGET_NR_getrlimit
9752 case TARGET_NR_getrlimit:
9753 {
9754 int resource = target_to_host_resource(arg1);
9755 struct target_rlimit *target_rlim;
9756 struct rlimit rlim;
9757
9758 ret = get_errno(getrlimit(resource, &rlim));
9759 if (!is_error(ret)) {
9760 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9761 return -TARGET_EFAULT;
9762 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9763 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9764 unlock_user_struct(target_rlim, arg2, 1);
9765 }
9766 }
9767 return ret;
9768 #endif
9769 case TARGET_NR_getrusage:
9770 {
9771 struct rusage rusage;
9772 ret = get_errno(getrusage(arg1, &rusage));
9773 if (!is_error(ret)) {
9774 ret = host_to_target_rusage(arg2, &rusage);
9775 }
9776 }
9777 return ret;
9778 #if defined(TARGET_NR_gettimeofday)
9779 case TARGET_NR_gettimeofday:
9780 {
9781 struct timeval tv;
9782 struct timezone tz;
9783
9784 ret = get_errno(gettimeofday(&tv, &tz));
9785 if (!is_error(ret)) {
9786 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9787 return -TARGET_EFAULT;
9788 }
9789 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9790 return -TARGET_EFAULT;
9791 }
9792 }
9793 }
9794 return ret;
9795 #endif
9796 #if defined(TARGET_NR_settimeofday)
9797 case TARGET_NR_settimeofday:
9798 {
9799 struct timeval tv, *ptv = NULL;
9800 struct timezone tz, *ptz = NULL;
9801
9802 if (arg1) {
9803 if (copy_from_user_timeval(&tv, arg1)) {
9804 return -TARGET_EFAULT;
9805 }
9806 ptv = &tv;
9807 }
9808
9809 if (arg2) {
9810 if (copy_from_user_timezone(&tz, arg2)) {
9811 return -TARGET_EFAULT;
9812 }
9813 ptz = &tz;
9814 }
9815
9816 return get_errno(settimeofday(ptv, ptz));
9817 }
9818 #endif
9819 #if defined(TARGET_NR_select)
9820 case TARGET_NR_select:
9821 #if defined(TARGET_WANT_NI_OLD_SELECT)
9822 /* some architectures used to have old_select here
9823 * but now ENOSYS it.
9824 */
9825 ret = -TARGET_ENOSYS;
9826 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9827 ret = do_old_select(arg1);
9828 #else
9829 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9830 #endif
9831 return ret;
9832 #endif
9833 #ifdef TARGET_NR_pselect6
9834 case TARGET_NR_pselect6:
9835 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9836 #endif
9837 #ifdef TARGET_NR_pselect6_time64
9838 case TARGET_NR_pselect6_time64:
9839 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9840 #endif
9841 #ifdef TARGET_NR_symlink
9842 case TARGET_NR_symlink:
9843 {
9844 void *p2;
9845 p = lock_user_string(arg1);
9846 p2 = lock_user_string(arg2);
9847 if (!p || !p2)
9848 ret = -TARGET_EFAULT;
9849 else
9850 ret = get_errno(symlink(p, p2));
9851 unlock_user(p2, arg2, 0);
9852 unlock_user(p, arg1, 0);
9853 }
9854 return ret;
9855 #endif
9856 #if defined(TARGET_NR_symlinkat)
9857 case TARGET_NR_symlinkat:
9858 {
9859 void *p2;
9860 p = lock_user_string(arg1);
9861 p2 = lock_user_string(arg3);
9862 if (!p || !p2)
9863 ret = -TARGET_EFAULT;
9864 else
9865 ret = get_errno(symlinkat(p, arg2, p2));
9866 unlock_user(p2, arg3, 0);
9867 unlock_user(p, arg1, 0);
9868 }
9869 return ret;
9870 #endif
9871 #ifdef TARGET_NR_readlink
9872 case TARGET_NR_readlink:
9873 {
9874 void *p2;
9875 p = lock_user_string(arg1);
9876 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9877 if (!p || !p2) {
9878 ret = -TARGET_EFAULT;
9879 } else if (!arg3) {
9880 /* Short circuit this for the magic exe check. */
9881 ret = -TARGET_EINVAL;
9882 } else if (is_proc_myself((const char *)p, "exe")) {
9883 char real[PATH_MAX], *temp;
9884 temp = realpath(exec_path, real);
9885 /* Return value is # of bytes that we wrote to the buffer. */
9886 if (temp == NULL) {
9887 ret = get_errno(-1);
9888 } else {
9889 /* Don't worry about sign mismatch as earlier mapping
9890 * logic would have thrown a bad address error. */
9891 ret = MIN(strlen(real), arg3);
9892 /* We cannot NUL terminate the string. */
9893 memcpy(p2, real, ret);
9894 }
9895 } else {
9896 ret = get_errno(readlink(path(p), p2, arg3));
9897 }
9898 unlock_user(p2, arg2, ret);
9899 unlock_user(p, arg1, 0);
9900 }
9901 return ret;
9902 #endif
9903 #if defined(TARGET_NR_readlinkat)
9904 case TARGET_NR_readlinkat:
9905 {
9906 void *p2;
9907 p = lock_user_string(arg2);
9908 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9909 if (!p || !p2) {
9910 ret = -TARGET_EFAULT;
9911 } else if (is_proc_myself((const char *)p, "exe")) {
9912 char real[PATH_MAX], *temp;
9913 temp = realpath(exec_path, real);
9914 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9915 snprintf((char *)p2, arg4, "%s", real);
9916 } else {
9917 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9918 }
9919 unlock_user(p2, arg3, ret);
9920 unlock_user(p, arg2, 0);
9921 }
9922 return ret;
9923 #endif
9924 #ifdef TARGET_NR_swapon
9925 case TARGET_NR_swapon:
9926 if (!(p = lock_user_string(arg1)))
9927 return -TARGET_EFAULT;
9928 ret = get_errno(swapon(p, arg2));
9929 unlock_user(p, arg1, 0);
9930 return ret;
9931 #endif
9932 case TARGET_NR_reboot:
9933 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9934 /* arg4 must be ignored in all other cases */
9935 p = lock_user_string(arg4);
9936 if (!p) {
9937 return -TARGET_EFAULT;
9938 }
9939 ret = get_errno(reboot(arg1, arg2, arg3, p));
9940 unlock_user(p, arg4, 0);
9941 } else {
9942 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9943 }
9944 return ret;
9945 #ifdef TARGET_NR_mmap
9946 case TARGET_NR_mmap:
9947 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9948 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9949 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9950 || defined(TARGET_S390X)
9951 {
9952 abi_ulong *v;
9953 abi_ulong v1, v2, v3, v4, v5, v6;
9954 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9955 return -TARGET_EFAULT;
9956 v1 = tswapal(v[0]);
9957 v2 = tswapal(v[1]);
9958 v3 = tswapal(v[2]);
9959 v4 = tswapal(v[3]);
9960 v5 = tswapal(v[4]);
9961 v6 = tswapal(v[5]);
9962 unlock_user(v, arg1, 0);
9963 ret = get_errno(target_mmap(v1, v2, v3,
9964 target_to_host_bitmask(v4, mmap_flags_tbl),
9965 v5, v6));
9966 }
9967 #else
9968 /* mmap pointers are always untagged */
9969 ret = get_errno(target_mmap(arg1, arg2, arg3,
9970 target_to_host_bitmask(arg4, mmap_flags_tbl),
9971 arg5,
9972 arg6));
9973 #endif
9974 return ret;
9975 #endif
9976 #ifdef TARGET_NR_mmap2
9977 case TARGET_NR_mmap2:
9978 #ifndef MMAP_SHIFT
9979 #define MMAP_SHIFT 12
9980 #endif
9981 ret = target_mmap(arg1, arg2, arg3,
9982 target_to_host_bitmask(arg4, mmap_flags_tbl),
9983 arg5, arg6 << MMAP_SHIFT);
9984 return get_errno(ret);
9985 #endif
9986 case TARGET_NR_munmap:
9987 arg1 = cpu_untagged_addr(cpu, arg1);
9988 return get_errno(target_munmap(arg1, arg2));
9989 case TARGET_NR_mprotect:
9990 arg1 = cpu_untagged_addr(cpu, arg1);
9991 {
9992 TaskState *ts = cpu->opaque;
9993 /* Special hack to detect libc making the stack executable. */
9994 if ((arg3 & PROT_GROWSDOWN)
9995 && arg1 >= ts->info->stack_limit
9996 && arg1 <= ts->info->start_stack) {
9997 arg3 &= ~PROT_GROWSDOWN;
9998 arg2 = arg2 + arg1 - ts->info->stack_limit;
9999 arg1 = ts->info->stack_limit;
10000 }
10001 }
10002 return get_errno(target_mprotect(arg1, arg2, arg3));
10003 #ifdef TARGET_NR_mremap
10004 case TARGET_NR_mremap:
10005 arg1 = cpu_untagged_addr(cpu, arg1);
10006 /* mremap new_addr (arg5) is always untagged */
10007 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10008 #endif
10009 /* ??? msync/mlock/munlock are broken for softmmu. */
10010 #ifdef TARGET_NR_msync
10011 case TARGET_NR_msync:
10012 return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10013 #endif
10014 #ifdef TARGET_NR_mlock
10015 case TARGET_NR_mlock:
10016 return get_errno(mlock(g2h(cpu, arg1), arg2));
10017 #endif
10018 #ifdef TARGET_NR_munlock
10019 case TARGET_NR_munlock:
10020 return get_errno(munlock(g2h(cpu, arg1), arg2));
10021 #endif
10022 #ifdef TARGET_NR_mlockall
10023 case TARGET_NR_mlockall:
10024 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10025 #endif
10026 #ifdef TARGET_NR_munlockall
10027 case TARGET_NR_munlockall:
10028 return get_errno(munlockall());
10029 #endif
10030 #ifdef TARGET_NR_truncate
10031 case TARGET_NR_truncate:
10032 if (!(p = lock_user_string(arg1)))
10033 return -TARGET_EFAULT;
10034 ret = get_errno(truncate(p, arg2));
10035 unlock_user(p, arg1, 0);
10036 return ret;
10037 #endif
10038 #ifdef TARGET_NR_ftruncate
10039 case TARGET_NR_ftruncate:
10040 return get_errno(ftruncate(arg1, arg2));
10041 #endif
10042 case TARGET_NR_fchmod:
10043 return get_errno(fchmod(arg1, arg2));
10044 #if defined(TARGET_NR_fchmodat)
10045 case TARGET_NR_fchmodat:
10046 if (!(p = lock_user_string(arg2)))
10047 return -TARGET_EFAULT;
10048 ret = get_errno(fchmodat(arg1, p, arg3, 0));
10049 unlock_user(p, arg2, 0);
10050 return ret;
10051 #endif
10052 case TARGET_NR_getpriority:
10053 /* Note that negative values are valid for getpriority, so we must
10054 differentiate based on errno settings. */
10055 errno = 0;
10056 ret = getpriority(arg1, arg2);
10057 if (ret == -1 && errno != 0) {
10058 return -host_to_target_errno(errno);
10059 }
10060 #ifdef TARGET_ALPHA
10061 /* Return value is the unbiased priority. Signal no error. */
10062 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
10063 #else
10064 /* Return value is a biased priority to avoid negative numbers. */
10065 ret = 20 - ret;
10066 #endif
10067 return ret;
10068 case TARGET_NR_setpriority:
10069 return get_errno(setpriority(arg1, arg2, arg3));
10070 #ifdef TARGET_NR_statfs
10071 case TARGET_NR_statfs:
10072 if (!(p = lock_user_string(arg1))) {
10073 return -TARGET_EFAULT;
10074 }
10075 ret = get_errno(statfs(path(p), &stfs));
10076 unlock_user(p, arg1, 0);
10077 convert_statfs:
10078 if (!is_error(ret)) {
10079 struct target_statfs *target_stfs;
10080
10081 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10082 return -TARGET_EFAULT;
10083 __put_user(stfs.f_type, &target_stfs->f_type);
10084 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10085 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10086 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10087 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10088 __put_user(stfs.f_files, &target_stfs->f_files);
10089 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10090 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10091 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10092 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10093 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10094 #ifdef _STATFS_F_FLAGS
10095 __put_user(stfs.f_flags, &target_stfs->f_flags);
10096 #else
10097 __put_user(0, &target_stfs->f_flags);
10098 #endif
10099 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10100 unlock_user_struct(target_stfs, arg2, 1);
10101 }
10102 return ret;
10103 #endif
10104 #ifdef TARGET_NR_fstatfs
10105 case TARGET_NR_fstatfs:
10106 ret = get_errno(fstatfs(arg1, &stfs));
10107 goto convert_statfs;
10108 #endif
10109 #ifdef TARGET_NR_statfs64
10110 case TARGET_NR_statfs64:
10111 if (!(p = lock_user_string(arg1))) {
10112 return -TARGET_EFAULT;
10113 }
10114 ret = get_errno(statfs(path(p), &stfs));
10115 unlock_user(p, arg1, 0);
10116 convert_statfs64:
10117 if (!is_error(ret)) {
10118 struct target_statfs64 *target_stfs;
10119
10120 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10121 return -TARGET_EFAULT;
10122 __put_user(stfs.f_type, &target_stfs->f_type);
10123 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10124 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10125 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10126 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10127 __put_user(stfs.f_files, &target_stfs->f_files);
10128 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10129 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10130 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10131 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10132 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10133 #ifdef _STATFS_F_FLAGS
10134 __put_user(stfs.f_flags, &target_stfs->f_flags);
10135 #else
10136 __put_user(0, &target_stfs->f_flags);
10137 #endif
10138 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10139 unlock_user_struct(target_stfs, arg3, 1);
10140 }
10141 return ret;
10142 case TARGET_NR_fstatfs64:
10143 ret = get_errno(fstatfs(arg1, &stfs));
10144 goto convert_statfs64;
10145 #endif
10146 #ifdef TARGET_NR_socketcall
10147 case TARGET_NR_socketcall:
10148 return do_socketcall(arg1, arg2);
10149 #endif
10150 #ifdef TARGET_NR_accept
10151 case TARGET_NR_accept:
10152 return do_accept4(arg1, arg2, arg3, 0);
10153 #endif
10154 #ifdef TARGET_NR_accept4
10155 case TARGET_NR_accept4:
10156 return do_accept4(arg1, arg2, arg3, arg4);
10157 #endif
10158 #ifdef TARGET_NR_bind
10159 case TARGET_NR_bind:
10160 return do_bind(arg1, arg2, arg3);
10161 #endif
10162 #ifdef TARGET_NR_connect
10163 case TARGET_NR_connect:
10164 return do_connect(arg1, arg2, arg3);
10165 #endif
10166 #ifdef TARGET_NR_getpeername
10167 case TARGET_NR_getpeername:
10168 return do_getpeername(arg1, arg2, arg3);
10169 #endif
10170 #ifdef TARGET_NR_getsockname
10171 case TARGET_NR_getsockname:
10172 return do_getsockname(arg1, arg2, arg3);
10173 #endif
10174 #ifdef TARGET_NR_getsockopt
10175 case TARGET_NR_getsockopt:
10176 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10177 #endif
10178 #ifdef TARGET_NR_listen
10179 case TARGET_NR_listen:
10180 return get_errno(listen(arg1, arg2));
10181 #endif
10182 #ifdef TARGET_NR_recv
10183 case TARGET_NR_recv:
10184 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10185 #endif
10186 #ifdef TARGET_NR_recvfrom
10187 case TARGET_NR_recvfrom:
10188 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10189 #endif
10190 #ifdef TARGET_NR_recvmsg
10191 case TARGET_NR_recvmsg:
10192 return do_sendrecvmsg(arg1, arg2, arg3, 0);
10193 #endif
10194 #ifdef TARGET_NR_send
10195 case TARGET_NR_send:
10196 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10197 #endif
10198 #ifdef TARGET_NR_sendmsg
10199 case TARGET_NR_sendmsg:
10200 return do_sendrecvmsg(arg1, arg2, arg3, 1);
10201 #endif
10202 #ifdef TARGET_NR_sendmmsg
10203 case TARGET_NR_sendmmsg:
10204 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10205 #endif
10206 #ifdef TARGET_NR_recvmmsg
10207 case TARGET_NR_recvmmsg:
10208 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10209 #endif
10210 #ifdef TARGET_NR_sendto
10211 case TARGET_NR_sendto:
10212 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10213 #endif
10214 #ifdef TARGET_NR_shutdown
10215 case TARGET_NR_shutdown:
10216 return get_errno(shutdown(arg1, arg2));
10217 #endif
10218 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10219 case TARGET_NR_getrandom:
10220 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10221 if (!p) {
10222 return -TARGET_EFAULT;
10223 }
10224 ret = get_errno(getrandom(p, arg2, arg3));
10225 unlock_user(p, arg1, ret);
10226 return ret;
10227 #endif
10228 #ifdef TARGET_NR_socket
10229 case TARGET_NR_socket:
10230 return do_socket(arg1, arg2, arg3);
10231 #endif
10232 #ifdef TARGET_NR_socketpair
10233 case TARGET_NR_socketpair:
10234 return do_socketpair(arg1, arg2, arg3, arg4);
10235 #endif
10236 #ifdef TARGET_NR_setsockopt
10237 case TARGET_NR_setsockopt:
10238 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10239 #endif
10240 #if defined(TARGET_NR_syslog)
10241 case TARGET_NR_syslog:
10242 {
10243 int len = arg2;
10244
10245 switch (arg1) {
10246 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
10247 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
10248 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
10249 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
10250 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
10251 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10252 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
10253 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
10254 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10255 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
10256 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
10257 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
10258 {
10259 if (len < 0) {
10260 return -TARGET_EINVAL;
10261 }
10262 if (len == 0) {
10263 return 0;
10264 }
10265 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10266 if (!p) {
10267 return -TARGET_EFAULT;
10268 }
10269 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10270 unlock_user(p, arg2, arg3);
10271 }
10272 return ret;
10273 default:
10274 return -TARGET_EINVAL;
10275 }
10276 }
10277 break;
10278 #endif
10279 case TARGET_NR_setitimer:
10280 {
10281 struct itimerval value, ovalue, *pvalue;
10282
10283 if (arg2) {
10284 pvalue = &value;
10285 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10286 || copy_from_user_timeval(&pvalue->it_value,
10287 arg2 + sizeof(struct target_timeval)))
10288 return -TARGET_EFAULT;
10289 } else {
10290 pvalue = NULL;
10291 }
10292 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10293 if (!is_error(ret) && arg3) {
10294 if (copy_to_user_timeval(arg3,
10295 &ovalue.it_interval)
10296 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10297 &ovalue.it_value))
10298 return -TARGET_EFAULT;
10299 }
10300 }
10301 return ret;
10302 case TARGET_NR_getitimer:
10303 {
10304 struct itimerval value;
10305
10306 ret = get_errno(getitimer(arg1, &value));
10307 if (!is_error(ret) && arg2) {
10308 if (copy_to_user_timeval(arg2,
10309 &value.it_interval)
10310 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10311 &value.it_value))
10312 return -TARGET_EFAULT;
10313 }
10314 }
10315 return ret;
10316 #ifdef TARGET_NR_stat
10317 case TARGET_NR_stat:
10318 if (!(p = lock_user_string(arg1))) {
10319 return -TARGET_EFAULT;
10320 }
10321 ret = get_errno(stat(path(p), &st));
10322 unlock_user(p, arg1, 0);
10323 goto do_stat;
10324 #endif
10325 #ifdef TARGET_NR_lstat
10326 case TARGET_NR_lstat:
10327 if (!(p = lock_user_string(arg1))) {
10328 return -TARGET_EFAULT;
10329 }
10330 ret = get_errno(lstat(path(p), &st));
10331 unlock_user(p, arg1, 0);
10332 goto do_stat;
10333 #endif
10334 #ifdef TARGET_NR_fstat
10335 case TARGET_NR_fstat:
10336 {
10337 ret = get_errno(fstat(arg1, &st));
10338 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10339 do_stat:
10340 #endif
10341 if (!is_error(ret)) {
10342 struct target_stat *target_st;
10343
10344 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10345 return -TARGET_EFAULT;
10346 memset(target_st, 0, sizeof(*target_st));
10347 __put_user(st.st_dev, &target_st->st_dev);
10348 __put_user(st.st_ino, &target_st->st_ino);
10349 __put_user(st.st_mode, &target_st->st_mode);
10350 __put_user(st.st_uid, &target_st->st_uid);
10351 __put_user(st.st_gid, &target_st->st_gid);
10352 __put_user(st.st_nlink, &target_st->st_nlink);
10353 __put_user(st.st_rdev, &target_st->st_rdev);
10354 __put_user(st.st_size, &target_st->st_size);
10355 __put_user(st.st_blksize, &target_st->st_blksize);
10356 __put_user(st.st_blocks, &target_st->st_blocks);
10357 __put_user(st.st_atime, &target_st->target_st_atime);
10358 __put_user(st.st_mtime, &target_st->target_st_mtime);
10359 __put_user(st.st_ctime, &target_st->target_st_ctime);
10360 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10361 __put_user(st.st_atim.tv_nsec,
10362 &target_st->target_st_atime_nsec);
10363 __put_user(st.st_mtim.tv_nsec,
10364 &target_st->target_st_mtime_nsec);
10365 __put_user(st.st_ctim.tv_nsec,
10366 &target_st->target_st_ctime_nsec);
10367 #endif
10368 unlock_user_struct(target_st, arg2, 1);
10369 }
10370 }
10371 return ret;
10372 #endif
10373 case TARGET_NR_vhangup:
10374 return get_errno(vhangup());
10375 #ifdef TARGET_NR_syscall
10376 case TARGET_NR_syscall:
10377 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10378 arg6, arg7, arg8, 0);
10379 #endif
10380 #if defined(TARGET_NR_wait4)
10381 case TARGET_NR_wait4:
10382 {
10383 int status;
10384 abi_long status_ptr = arg2;
10385 struct rusage rusage, *rusage_ptr;
10386 abi_ulong target_rusage = arg4;
10387 abi_long rusage_err;
10388 if (target_rusage)
10389 rusage_ptr = &rusage;
10390 else
10391 rusage_ptr = NULL;
10392 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10393 if (!is_error(ret)) {
10394 if (status_ptr && ret) {
10395 status = host_to_target_waitstatus(status);
10396 if (put_user_s32(status, status_ptr))
10397 return -TARGET_EFAULT;
10398 }
10399 if (target_rusage) {
10400 rusage_err = host_to_target_rusage(target_rusage, &rusage);
10401 if (rusage_err) {
10402 ret = rusage_err;
10403 }
10404 }
10405 }
10406 }
10407 return ret;
10408 #endif
10409 #ifdef TARGET_NR_swapoff
10410 case TARGET_NR_swapoff:
10411 if (!(p = lock_user_string(arg1)))
10412 return -TARGET_EFAULT;
10413 ret = get_errno(swapoff(p));
10414 unlock_user(p, arg1, 0);
10415 return ret;
10416 #endif
10417 case TARGET_NR_sysinfo:
10418 {
10419 struct target_sysinfo *target_value;
10420 struct sysinfo value;
10421 ret = get_errno(sysinfo(&value));
10422 if (!is_error(ret) && arg1)
10423 {
10424 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10425 return -TARGET_EFAULT;
10426 __put_user(value.uptime, &target_value->uptime);
10427 __put_user(value.loads[0], &target_value->loads[0]);
10428 __put_user(value.loads[1], &target_value->loads[1]);
10429 __put_user(value.loads[2], &target_value->loads[2]);
10430 __put_user(value.totalram, &target_value->totalram);
10431 __put_user(value.freeram, &target_value->freeram);
10432 __put_user(value.sharedram, &target_value->sharedram);
10433 __put_user(value.bufferram, &target_value->bufferram);
10434 __put_user(value.totalswap, &target_value->totalswap);
10435 __put_user(value.freeswap, &target_value->freeswap);
10436 __put_user(value.procs, &target_value->procs);
10437 __put_user(value.totalhigh, &target_value->totalhigh);
10438 __put_user(value.freehigh, &target_value->freehigh);
10439 __put_user(value.mem_unit, &target_value->mem_unit);
10440 unlock_user_struct(target_value, arg1, 1);
10441 }
10442 }
10443 return ret;
10444 #ifdef TARGET_NR_ipc
10445 case TARGET_NR_ipc:
10446 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10447 #endif
10448 #ifdef TARGET_NR_semget
10449 case TARGET_NR_semget:
10450 return get_errno(semget(arg1, arg2, arg3));
10451 #endif
10452 #ifdef TARGET_NR_semop
10453 case TARGET_NR_semop:
10454 return do_semtimedop(arg1, arg2, arg3, 0, false);
10455 #endif
10456 #ifdef TARGET_NR_semtimedop
10457 case TARGET_NR_semtimedop:
10458 return do_semtimedop(arg1, arg2, arg3, arg4, false);
10459 #endif
10460 #ifdef TARGET_NR_semtimedop_time64
10461 case TARGET_NR_semtimedop_time64:
10462 return do_semtimedop(arg1, arg2, arg3, arg4, true);
10463 #endif
10464 #ifdef TARGET_NR_semctl
10465 case TARGET_NR_semctl:
10466 return do_semctl(arg1, arg2, arg3, arg4);
10467 #endif
10468 #ifdef TARGET_NR_msgctl
10469 case TARGET_NR_msgctl:
10470 return do_msgctl(arg1, arg2, arg3);
10471 #endif
10472 #ifdef TARGET_NR_msgget
10473 case TARGET_NR_msgget:
10474 return get_errno(msgget(arg1, arg2));
10475 #endif
10476 #ifdef TARGET_NR_msgrcv
10477 case TARGET_NR_msgrcv:
10478 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10479 #endif
10480 #ifdef TARGET_NR_msgsnd
10481 case TARGET_NR_msgsnd:
10482 return do_msgsnd(arg1, arg2, arg3, arg4);
10483 #endif
10484 #ifdef TARGET_NR_shmget
10485 case TARGET_NR_shmget:
10486 return get_errno(shmget(arg1, arg2, arg3));
10487 #endif
10488 #ifdef TARGET_NR_shmctl
10489 case TARGET_NR_shmctl:
10490 return do_shmctl(arg1, arg2, arg3);
10491 #endif
10492 #ifdef TARGET_NR_shmat
10493 case TARGET_NR_shmat:
10494 return do_shmat(cpu_env, arg1, arg2, arg3);
10495 #endif
10496 #ifdef TARGET_NR_shmdt
10497 case TARGET_NR_shmdt:
10498 return do_shmdt(arg1);
10499 #endif
10500 case TARGET_NR_fsync:
10501 return get_errno(fsync(arg1));
10502 case TARGET_NR_clone:
10503 /* Linux manages to have three different orderings for its
10504 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10505 * match the kernel's CONFIG_CLONE_* settings.
10506 * Microblaze is further special in that it uses a sixth
10507 * implicit argument to clone for the TLS pointer.
10508 */
10509 #if defined(TARGET_MICROBLAZE)
10510 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10511 #elif defined(TARGET_CLONE_BACKWARDS)
10512 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10513 #elif defined(TARGET_CLONE_BACKWARDS2)
10514 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10515 #else
10516 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10517 #endif
10518 return ret;
10519 #ifdef __NR_exit_group
10520 /* new thread calls */
10521 case TARGET_NR_exit_group:
10522 preexit_cleanup(cpu_env, arg1);
10523 return get_errno(exit_group(arg1));
10524 #endif
10525 case TARGET_NR_setdomainname:
10526 if (!(p = lock_user_string(arg1)))
10527 return -TARGET_EFAULT;
10528 ret = get_errno(setdomainname(p, arg2));
10529 unlock_user(p, arg1, 0);
10530 return ret;
10531 case TARGET_NR_uname:
10532 /* no need to transcode because we use the linux syscall */
10533 {
10534 struct new_utsname * buf;
10535
10536 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10537 return -TARGET_EFAULT;
10538 ret = get_errno(sys_uname(buf));
10539 if (!is_error(ret)) {
10540 /* Overwrite the native machine name with whatever is being
10541 emulated. */
10542 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10543 sizeof(buf->machine));
10544 /* Allow the user to override the reported release. */
10545 if (qemu_uname_release && *qemu_uname_release) {
10546 g_strlcpy(buf->release, qemu_uname_release,
10547 sizeof(buf->release));
10548 }
10549 }
10550 unlock_user_struct(buf, arg1, 1);
10551 }
10552 return ret;
10553 #ifdef TARGET_I386
10554 case TARGET_NR_modify_ldt:
10555 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10556 #if !defined(TARGET_X86_64)
10557 case TARGET_NR_vm86:
10558 return do_vm86(cpu_env, arg1, arg2);
10559 #endif
10560 #endif
10561 #if defined(TARGET_NR_adjtimex)
10562 case TARGET_NR_adjtimex:
10563 {
10564 struct timex host_buf;
10565
10566 if (target_to_host_timex(&host_buf, arg1) != 0) {
10567 return -TARGET_EFAULT;
10568 }
10569 ret = get_errno(adjtimex(&host_buf));
10570 if (!is_error(ret)) {
10571 if (host_to_target_timex(arg1, &host_buf) != 0) {
10572 return -TARGET_EFAULT;
10573 }
10574 }
10575 }
10576 return ret;
10577 #endif
10578 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10579 case TARGET_NR_clock_adjtime:
10580 {
10581 struct timex htx, *phtx = &htx;
10582
10583 if (target_to_host_timex(phtx, arg2) != 0) {
10584 return -TARGET_EFAULT;
10585 }
10586 ret = get_errno(clock_adjtime(arg1, phtx));
10587 if (!is_error(ret) && phtx) {
10588 if (host_to_target_timex(arg2, phtx) != 0) {
10589 return -TARGET_EFAULT;
10590 }
10591 }
10592 }
10593 return ret;
10594 #endif
10595 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10596 case TARGET_NR_clock_adjtime64:
10597 {
10598 struct timex htx;
10599
10600 if (target_to_host_timex64(&htx, arg2) != 0) {
10601 return -TARGET_EFAULT;
10602 }
10603 ret = get_errno(clock_adjtime(arg1, &htx));
10604 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10605 return -TARGET_EFAULT;
10606 }
10607 }
10608 return ret;
10609 #endif
10610 case TARGET_NR_getpgid:
10611 return get_errno(getpgid(arg1));
10612 case TARGET_NR_fchdir:
10613 return get_errno(fchdir(arg1));
10614 case TARGET_NR_personality:
10615 return get_errno(personality(arg1));
10616 #ifdef TARGET_NR__llseek /* Not on alpha */
10617 case TARGET_NR__llseek:
10618 {
10619 int64_t res;
10620 #if !defined(__NR_llseek)
10621 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10622 if (res == -1) {
10623 ret = get_errno(res);
10624 } else {
10625 ret = 0;
10626 }
10627 #else
10628 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10629 #endif
10630 if ((ret == 0) && put_user_s64(res, arg4)) {
10631 return -TARGET_EFAULT;
10632 }
10633 }
10634 return ret;
10635 #endif
10636 #ifdef TARGET_NR_getdents
10637 case TARGET_NR_getdents:
10638 return do_getdents(arg1, arg2, arg3);
10639 #endif /* TARGET_NR_getdents */
10640 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10641 case TARGET_NR_getdents64:
10642 return do_getdents64(arg1, arg2, arg3);
10643 #endif /* TARGET_NR_getdents64 */
10644 #if defined(TARGET_NR__newselect)
10645 case TARGET_NR__newselect:
10646 return do_select(arg1, arg2, arg3, arg4, arg5);
10647 #endif
10648 #ifdef TARGET_NR_poll
10649 case TARGET_NR_poll:
10650 return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10651 #endif
10652 #ifdef TARGET_NR_ppoll
10653 case TARGET_NR_ppoll:
10654 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10655 #endif
10656 #ifdef TARGET_NR_ppoll_time64
10657 case TARGET_NR_ppoll_time64:
10658 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10659 #endif
10660 case TARGET_NR_flock:
10661 /* NOTE: the flock constant seems to be the same for every
10662 Linux platform */
10663 return get_errno(safe_flock(arg1, arg2));
10664 case TARGET_NR_readv:
10665 {
10666 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10667 if (vec != NULL) {
10668 ret = get_errno(safe_readv(arg1, vec, arg3));
10669 unlock_iovec(vec, arg2, arg3, 1);
10670 } else {
10671 ret = -host_to_target_errno(errno);
10672 }
10673 }
10674 return ret;
10675 case TARGET_NR_writev:
10676 {
10677 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10678 if (vec != NULL) {
10679 ret = get_errno(safe_writev(arg1, vec, arg3));
10680 unlock_iovec(vec, arg2, arg3, 0);
10681 } else {
10682 ret = -host_to_target_errno(errno);
10683 }
10684 }
10685 return ret;
10686 #if defined(TARGET_NR_preadv)
10687 case TARGET_NR_preadv:
10688 {
10689 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10690 if (vec != NULL) {
10691 unsigned long low, high;
10692
10693 target_to_host_low_high(arg4, arg5, &low, &high);
10694 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10695 unlock_iovec(vec, arg2, arg3, 1);
10696 } else {
10697 ret = -host_to_target_errno(errno);
10698 }
10699 }
10700 return ret;
10701 #endif
10702 #if defined(TARGET_NR_pwritev)
10703 case TARGET_NR_pwritev:
10704 {
10705 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10706 if (vec != NULL) {
10707 unsigned long low, high;
10708
10709 target_to_host_low_high(arg4, arg5, &low, &high);
10710 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10711 unlock_iovec(vec, arg2, arg3, 0);
10712 } else {
10713 ret = -host_to_target_errno(errno);
10714 }
10715 }
10716 return ret;
10717 #endif
10718 case TARGET_NR_getsid:
10719 return get_errno(getsid(arg1));
10720 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10721 case TARGET_NR_fdatasync:
10722 return get_errno(fdatasync(arg1));
10723 #endif
10724 case TARGET_NR_sched_getaffinity:
10725 {
10726 unsigned int mask_size;
10727 unsigned long *mask;
10728
10729 /*
10730 * sched_getaffinity needs multiples of ulong, so need to take
10731 * care of mismatches between target ulong and host ulong sizes.
10732 */
10733 if (arg2 & (sizeof(abi_ulong) - 1)) {
10734 return -TARGET_EINVAL;
10735 }
10736 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10737
10738 mask = alloca(mask_size);
10739 memset(mask, 0, mask_size);
10740 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10741
10742 if (!is_error(ret)) {
10743 if (ret > arg2) {
10744 /* More data returned than the caller's buffer will fit.
10745 * This only happens if sizeof(abi_long) < sizeof(long)
10746 * and the caller passed us a buffer holding an odd number
10747 * of abi_longs. If the host kernel is actually using the
10748 * extra 4 bytes then fail EINVAL; otherwise we can just
10749 * ignore them and only copy the interesting part.
10750 */
10751 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10752 if (numcpus > arg2 * 8) {
10753 return -TARGET_EINVAL;
10754 }
10755 ret = arg2;
10756 }
10757
10758 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10759 return -TARGET_EFAULT;
10760 }
10761 }
10762 }
10763 return ret;
10764 case TARGET_NR_sched_setaffinity:
10765 {
10766 unsigned int mask_size;
10767 unsigned long *mask;
10768
10769 /*
10770 * sched_setaffinity needs multiples of ulong, so need to take
10771 * care of mismatches between target ulong and host ulong sizes.
10772 */
10773 if (arg2 & (sizeof(abi_ulong) - 1)) {
10774 return -TARGET_EINVAL;
10775 }
10776 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10777 mask = alloca(mask_size);
10778
10779 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10780 if (ret) {
10781 return ret;
10782 }
10783
10784 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10785 }
10786 case TARGET_NR_getcpu:
10787 {
10788 unsigned cpu, node;
10789 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10790 arg2 ? &node : NULL,
10791 NULL));
10792 if (is_error(ret)) {
10793 return ret;
10794 }
10795 if (arg1 && put_user_u32(cpu, arg1)) {
10796 return -TARGET_EFAULT;
10797 }
10798 if (arg2 && put_user_u32(node, arg2)) {
10799 return -TARGET_EFAULT;
10800 }
10801 }
10802 return ret;
10803 case TARGET_NR_sched_setparam:
10804 {
10805 struct target_sched_param *target_schp;
10806 struct sched_param schp;
10807
10808 if (arg2 == 0) {
10809 return -TARGET_EINVAL;
10810 }
10811 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10812 return -TARGET_EFAULT;
10813 }
10814 schp.sched_priority = tswap32(target_schp->sched_priority);
10815 unlock_user_struct(target_schp, arg2, 0);
10816 return get_errno(sys_sched_setparam(arg1, &schp));
10817 }
10818 case TARGET_NR_sched_getparam:
10819 {
10820 struct target_sched_param *target_schp;
10821 struct sched_param schp;
10822
10823 if (arg2 == 0) {
10824 return -TARGET_EINVAL;
10825 }
10826 ret = get_errno(sys_sched_getparam(arg1, &schp));
10827 if (!is_error(ret)) {
10828 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10829 return -TARGET_EFAULT;
10830 }
10831 target_schp->sched_priority = tswap32(schp.sched_priority);
10832 unlock_user_struct(target_schp, arg2, 1);
10833 }
10834 }
10835 return ret;
10836 case TARGET_NR_sched_setscheduler:
10837 {
10838 struct target_sched_param *target_schp;
10839 struct sched_param schp;
10840 if (arg3 == 0) {
10841 return -TARGET_EINVAL;
10842 }
10843 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10844 return -TARGET_EFAULT;
10845 }
10846 schp.sched_priority = tswap32(target_schp->sched_priority);
10847 unlock_user_struct(target_schp, arg3, 0);
10848 return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10849 }
10850 case TARGET_NR_sched_getscheduler:
10851 return get_errno(sys_sched_getscheduler(arg1));
10852 case TARGET_NR_sched_getattr:
10853 {
10854 struct target_sched_attr *target_scha;
10855 struct sched_attr scha;
10856 if (arg2 == 0) {
10857 return -TARGET_EINVAL;
10858 }
10859 if (arg3 > sizeof(scha)) {
10860 arg3 = sizeof(scha);
10861 }
10862 ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10863 if (!is_error(ret)) {
10864 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10865 if (!target_scha) {
10866 return -TARGET_EFAULT;
10867 }
10868 target_scha->size = tswap32(scha.size);
10869 target_scha->sched_policy = tswap32(scha.sched_policy);
10870 target_scha->sched_flags = tswap64(scha.sched_flags);
10871 target_scha->sched_nice = tswap32(scha.sched_nice);
10872 target_scha->sched_priority = tswap32(scha.sched_priority);
10873 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10874 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10875 target_scha->sched_period = tswap64(scha.sched_period);
10876 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10877 target_scha->sched_util_min = tswap32(scha.sched_util_min);
10878 target_scha->sched_util_max = tswap32(scha.sched_util_max);
10879 }
10880 unlock_user(target_scha, arg2, arg3);
10881 }
10882 return ret;
10883 }
10884 case TARGET_NR_sched_setattr:
10885 {
10886 struct target_sched_attr *target_scha;
10887 struct sched_attr scha;
10888 uint32_t size;
10889 int zeroed;
10890 if (arg2 == 0) {
10891 return -TARGET_EINVAL;
10892 }
10893 if (get_user_u32(size, arg2)) {
10894 return -TARGET_EFAULT;
10895 }
10896 if (!size) {
10897 size = offsetof(struct target_sched_attr, sched_util_min);
10898 }
10899 if (size < offsetof(struct target_sched_attr, sched_util_min)) {
10900 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10901 return -TARGET_EFAULT;
10902 }
10903 return -TARGET_E2BIG;
10904 }
10905
10906 zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
10907 if (zeroed < 0) {
10908 return zeroed;
10909 } else if (zeroed == 0) {
10910 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10911 return -TARGET_EFAULT;
10912 }
10913 return -TARGET_E2BIG;
10914 }
10915 if (size > sizeof(struct target_sched_attr)) {
10916 size = sizeof(struct target_sched_attr);
10917 }
10918
10919 target_scha = lock_user(VERIFY_READ, arg2, size, 1);
10920 if (!target_scha) {
10921 return -TARGET_EFAULT;
10922 }
10923 scha.size = size;
10924 scha.sched_policy = tswap32(target_scha->sched_policy);
10925 scha.sched_flags = tswap64(target_scha->sched_flags);
10926 scha.sched_nice = tswap32(target_scha->sched_nice);
10927 scha.sched_priority = tswap32(target_scha->sched_priority);
10928 scha.sched_runtime = tswap64(target_scha->sched_runtime);
10929 scha.sched_deadline = tswap64(target_scha->sched_deadline);
10930 scha.sched_period = tswap64(target_scha->sched_period);
10931 if (size > offsetof(struct target_sched_attr, sched_util_min)) {
10932 scha.sched_util_min = tswap32(target_scha->sched_util_min);
10933 scha.sched_util_max = tswap32(target_scha->sched_util_max);
10934 }
10935 unlock_user(target_scha, arg2, 0);
10936 return get_errno(sys_sched_setattr(arg1, &scha, arg3));
10937 }
10938 case TARGET_NR_sched_yield:
10939 return get_errno(sched_yield());
10940 case TARGET_NR_sched_get_priority_max:
10941 return get_errno(sched_get_priority_max(arg1));
10942 case TARGET_NR_sched_get_priority_min:
10943 return get_errno(sched_get_priority_min(arg1));
10944 #ifdef TARGET_NR_sched_rr_get_interval
10945 case TARGET_NR_sched_rr_get_interval:
10946 {
10947 struct timespec ts;
10948 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10949 if (!is_error(ret)) {
10950 ret = host_to_target_timespec(arg2, &ts);
10951 }
10952 }
10953 return ret;
10954 #endif
10955 #ifdef TARGET_NR_sched_rr_get_interval_time64
10956 case TARGET_NR_sched_rr_get_interval_time64:
10957 {
10958 struct timespec ts;
10959 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10960 if (!is_error(ret)) {
10961 ret = host_to_target_timespec64(arg2, &ts);
10962 }
10963 }
10964 return ret;
10965 #endif
10966 #if defined(TARGET_NR_nanosleep)
10967 case TARGET_NR_nanosleep:
10968 {
10969 struct timespec req, rem;
10970 target_to_host_timespec(&req, arg1);
10971 ret = get_errno(safe_nanosleep(&req, &rem));
10972 if (is_error(ret) && arg2) {
10973 host_to_target_timespec(arg2, &rem);
10974 }
10975 }
10976 return ret;
10977 #endif
10978 case TARGET_NR_prctl:
10979 return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
10980 break;
10981 #ifdef TARGET_NR_arch_prctl
10982 case TARGET_NR_arch_prctl:
10983 return do_arch_prctl(cpu_env, arg1, arg2);
10984 #endif
10985 #ifdef TARGET_NR_pread64
10986 case TARGET_NR_pread64:
10987 if (regpairs_aligned(cpu_env, num)) {
10988 arg4 = arg5;
10989 arg5 = arg6;
10990 }
10991 if (arg2 == 0 && arg3 == 0) {
10992 /* Special-case NULL buffer and zero length, which should succeed */
10993 p = 0;
10994 } else {
10995 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10996 if (!p) {
10997 return -TARGET_EFAULT;
10998 }
10999 }
11000 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11001 unlock_user(p, arg2, ret);
11002 return ret;
11003 case TARGET_NR_pwrite64:
11004 if (regpairs_aligned(cpu_env, num)) {
11005 arg4 = arg5;
11006 arg5 = arg6;
11007 }
11008 if (arg2 == 0 && arg3 == 0) {
11009 /* Special-case NULL buffer and zero length, which should succeed */
11010 p = 0;
11011 } else {
11012 p = lock_user(VERIFY_READ, arg2, arg3, 1);
11013 if (!p) {
11014 return -TARGET_EFAULT;
11015 }
11016 }
11017 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11018 unlock_user(p, arg2, 0);
11019 return ret;
11020 #endif
11021 case TARGET_NR_getcwd:
11022 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11023 return -TARGET_EFAULT;
11024 ret = get_errno(sys_getcwd1(p, arg2));
11025 unlock_user(p, arg1, ret);
11026 return ret;
11027 case TARGET_NR_capget:
11028 case TARGET_NR_capset:
11029 {
11030 struct target_user_cap_header *target_header;
11031 struct target_user_cap_data *target_data = NULL;
11032 struct __user_cap_header_struct header;
11033 struct __user_cap_data_struct data[2];
11034 struct __user_cap_data_struct *dataptr = NULL;
11035 int i, target_datalen;
11036 int data_items = 1;
11037
11038 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11039 return -TARGET_EFAULT;
11040 }
11041 header.version = tswap32(target_header->version);
11042 header.pid = tswap32(target_header->pid);
11043
11044 if (header.version != _LINUX_CAPABILITY_VERSION) {
11045 /* Version 2 and up takes pointer to two user_data structs */
11046 data_items = 2;
11047 }
11048
11049 target_datalen = sizeof(*target_data) * data_items;
11050
11051 if (arg2) {
11052 if (num == TARGET_NR_capget) {
11053 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11054 } else {
11055 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11056 }
11057 if (!target_data) {
11058 unlock_user_struct(target_header, arg1, 0);
11059 return -TARGET_EFAULT;
11060 }
11061
11062 if (num == TARGET_NR_capset) {
11063 for (i = 0; i < data_items; i++) {
11064 data[i].effective = tswap32(target_data[i].effective);
11065 data[i].permitted = tswap32(target_data[i].permitted);
11066 data[i].inheritable = tswap32(target_data[i].inheritable);
11067 }
11068 }
11069
11070 dataptr = data;
11071 }
11072
11073 if (num == TARGET_NR_capget) {
11074 ret = get_errno(capget(&header, dataptr));
11075 } else {
11076 ret = get_errno(capset(&header, dataptr));
11077 }
11078
11079 /* The kernel always updates version for both capget and capset */
11080 target_header->version = tswap32(header.version);
11081 unlock_user_struct(target_header, arg1, 1);
11082
11083 if (arg2) {
11084 if (num == TARGET_NR_capget) {
11085 for (i = 0; i < data_items; i++) {
11086 target_data[i].effective = tswap32(data[i].effective);
11087 target_data[i].permitted = tswap32(data[i].permitted);
11088 target_data[i].inheritable = tswap32(data[i].inheritable);
11089 }
11090 unlock_user(target_data, arg2, target_datalen);
11091 } else {
11092 unlock_user(target_data, arg2, 0);
11093 }
11094 }
11095 return ret;
11096 }
11097 case TARGET_NR_sigaltstack:
11098 return do_sigaltstack(arg1, arg2, cpu_env);
11099
11100 #ifdef CONFIG_SENDFILE
11101 #ifdef TARGET_NR_sendfile
11102 case TARGET_NR_sendfile:
11103 {
11104 off_t *offp = NULL;
11105 off_t off;
11106 if (arg3) {
11107 ret = get_user_sal(off, arg3);
11108 if (is_error(ret)) {
11109 return ret;
11110 }
11111 offp = &off;
11112 }
11113 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11114 if (!is_error(ret) && arg3) {
11115 abi_long ret2 = put_user_sal(off, arg3);
11116 if (is_error(ret2)) {
11117 ret = ret2;
11118 }
11119 }
11120 return ret;
11121 }
11122 #endif
11123 #ifdef TARGET_NR_sendfile64
11124 case TARGET_NR_sendfile64:
11125 {
11126 off_t *offp = NULL;
11127 off_t off;
11128 if (arg3) {
11129 ret = get_user_s64(off, arg3);
11130 if (is_error(ret)) {
11131 return ret;
11132 }
11133 offp = &off;
11134 }
11135 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11136 if (!is_error(ret) && arg3) {
11137 abi_long ret2 = put_user_s64(off, arg3);
11138 if (is_error(ret2)) {
11139 ret = ret2;
11140 }
11141 }
11142 return ret;
11143 }
11144 #endif
11145 #endif
11146 #ifdef TARGET_NR_vfork
11147 case TARGET_NR_vfork:
11148 return get_errno(do_fork(cpu_env,
11149 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11150 0, 0, 0, 0));
11151 #endif
11152 #ifdef TARGET_NR_ugetrlimit
11153 case TARGET_NR_ugetrlimit:
11154 {
11155 struct rlimit rlim;
11156 int resource = target_to_host_resource(arg1);
11157 ret = get_errno(getrlimit(resource, &rlim));
11158 if (!is_error(ret)) {
11159 struct target_rlimit *target_rlim;
11160 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11161 return -TARGET_EFAULT;
11162 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11163 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11164 unlock_user_struct(target_rlim, arg2, 1);
11165 }
11166 return ret;
11167 }
11168 #endif
11169 #ifdef TARGET_NR_truncate64
11170 case TARGET_NR_truncate64:
11171 if (!(p = lock_user_string(arg1)))
11172 return -TARGET_EFAULT;
11173 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11174 unlock_user(p, arg1, 0);
11175 return ret;
11176 #endif
11177 #ifdef TARGET_NR_ftruncate64
11178 case TARGET_NR_ftruncate64:
11179 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11180 #endif
11181 #ifdef TARGET_NR_stat64
11182 case TARGET_NR_stat64:
11183 if (!(p = lock_user_string(arg1))) {
11184 return -TARGET_EFAULT;
11185 }
11186 ret = get_errno(stat(path(p), &st));
11187 unlock_user(p, arg1, 0);
11188 if (!is_error(ret))
11189 ret = host_to_target_stat64(cpu_env, arg2, &st);
11190 return ret;
11191 #endif
11192 #ifdef TARGET_NR_lstat64
11193 case TARGET_NR_lstat64:
11194 if (!(p = lock_user_string(arg1))) {
11195 return -TARGET_EFAULT;
11196 }
11197 ret = get_errno(lstat(path(p), &st));
11198 unlock_user(p, arg1, 0);
11199 if (!is_error(ret))
11200 ret = host_to_target_stat64(cpu_env, arg2, &st);
11201 return ret;
11202 #endif
11203 #ifdef TARGET_NR_fstat64
11204 case TARGET_NR_fstat64:
11205 ret = get_errno(fstat(arg1, &st));
11206 if (!is_error(ret))
11207 ret = host_to_target_stat64(cpu_env, arg2, &st);
11208 return ret;
11209 #endif
11210 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11211 #ifdef TARGET_NR_fstatat64
11212 case TARGET_NR_fstatat64:
11213 #endif
11214 #ifdef TARGET_NR_newfstatat
11215 case TARGET_NR_newfstatat:
11216 #endif
11217 if (!(p = lock_user_string(arg2))) {
11218 return -TARGET_EFAULT;
11219 }
11220 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11221 unlock_user(p, arg2, 0);
11222 if (!is_error(ret))
11223 ret = host_to_target_stat64(cpu_env, arg3, &st);
11224 return ret;
11225 #endif
11226 #if defined(TARGET_NR_statx)
11227 case TARGET_NR_statx:
11228 {
11229 struct target_statx *target_stx;
11230 int dirfd = arg1;
11231 int flags = arg3;
11232
11233 p = lock_user_string(arg2);
11234 if (p == NULL) {
11235 return -TARGET_EFAULT;
11236 }
11237 #if defined(__NR_statx)
11238 {
11239 /*
11240 * It is assumed that struct statx is architecture independent.
11241 */
11242 struct target_statx host_stx;
11243 int mask = arg4;
11244
11245 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11246 if (!is_error(ret)) {
11247 if (host_to_target_statx(&host_stx, arg5) != 0) {
11248 unlock_user(p, arg2, 0);
11249 return -TARGET_EFAULT;
11250 }
11251 }
11252
11253 if (ret != -TARGET_ENOSYS) {
11254 unlock_user(p, arg2, 0);
11255 return ret;
11256 }
11257 }
11258 #endif
11259 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11260 unlock_user(p, arg2, 0);
11261
11262 if (!is_error(ret)) {
11263 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11264 return -TARGET_EFAULT;
11265 }
11266 memset(target_stx, 0, sizeof(*target_stx));
11267 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11268 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11269 __put_user(st.st_ino, &target_stx->stx_ino);
11270 __put_user(st.st_mode, &target_stx->stx_mode);
11271 __put_user(st.st_uid, &target_stx->stx_uid);
11272 __put_user(st.st_gid, &target_stx->stx_gid);
11273 __put_user(st.st_nlink, &target_stx->stx_nlink);
11274 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11275 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11276 __put_user(st.st_size, &target_stx->stx_size);
11277 __put_user(st.st_blksize, &target_stx->stx_blksize);
11278 __put_user(st.st_blocks, &target_stx->stx_blocks);
11279 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11280 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11281 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11282 unlock_user_struct(target_stx, arg5, 1);
11283 }
11284 }
11285 return ret;
11286 #endif
11287 #ifdef TARGET_NR_lchown
11288 case TARGET_NR_lchown:
11289 if (!(p = lock_user_string(arg1)))
11290 return -TARGET_EFAULT;
11291 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11292 unlock_user(p, arg1, 0);
11293 return ret;
11294 #endif
11295 #ifdef TARGET_NR_getuid
11296 case TARGET_NR_getuid:
11297 return get_errno(high2lowuid(getuid()));
11298 #endif
11299 #ifdef TARGET_NR_getgid
11300 case TARGET_NR_getgid:
11301 return get_errno(high2lowgid(getgid()));
11302 #endif
11303 #ifdef TARGET_NR_geteuid
11304 case TARGET_NR_geteuid:
11305 return get_errno(high2lowuid(geteuid()));
11306 #endif
11307 #ifdef TARGET_NR_getegid
11308 case TARGET_NR_getegid:
11309 return get_errno(high2lowgid(getegid()));
11310 #endif
11311 case TARGET_NR_setreuid:
11312 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11313 case TARGET_NR_setregid:
11314 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11315 case TARGET_NR_getgroups:
11316 {
11317 int gidsetsize = arg1;
11318 target_id *target_grouplist;
11319 gid_t *grouplist;
11320 int i;
11321
11322 grouplist = alloca(gidsetsize * sizeof(gid_t));
11323 ret = get_errno(getgroups(gidsetsize, grouplist));
11324 if (gidsetsize == 0)
11325 return ret;
11326 if (!is_error(ret)) {
11327 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11328 if (!target_grouplist)
11329 return -TARGET_EFAULT;
11330 for(i = 0;i < ret; i++)
11331 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11332 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11333 }
11334 }
11335 return ret;
11336 case TARGET_NR_setgroups:
11337 {
11338 int gidsetsize = arg1;
11339 target_id *target_grouplist;
11340 gid_t *grouplist = NULL;
11341 int i;
11342 if (gidsetsize) {
11343 grouplist = alloca(gidsetsize * sizeof(gid_t));
11344 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11345 if (!target_grouplist) {
11346 return -TARGET_EFAULT;
11347 }
11348 for (i = 0; i < gidsetsize; i++) {
11349 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11350 }
11351 unlock_user(target_grouplist, arg2, 0);
11352 }
11353 return get_errno(setgroups(gidsetsize, grouplist));
11354 }
11355 case TARGET_NR_fchown:
11356 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11357 #if defined(TARGET_NR_fchownat)
11358 case TARGET_NR_fchownat:
11359 if (!(p = lock_user_string(arg2)))
11360 return -TARGET_EFAULT;
11361 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11362 low2highgid(arg4), arg5));
11363 unlock_user(p, arg2, 0);
11364 return ret;
11365 #endif
11366 #ifdef TARGET_NR_setresuid
11367 case TARGET_NR_setresuid:
11368 return get_errno(sys_setresuid(low2highuid(arg1),
11369 low2highuid(arg2),
11370 low2highuid(arg3)));
11371 #endif
11372 #ifdef TARGET_NR_getresuid
11373 case TARGET_NR_getresuid:
11374 {
11375 uid_t ruid, euid, suid;
11376 ret = get_errno(getresuid(&ruid, &euid, &suid));
11377 if (!is_error(ret)) {
11378 if (put_user_id(high2lowuid(ruid), arg1)
11379 || put_user_id(high2lowuid(euid), arg2)
11380 || put_user_id(high2lowuid(suid), arg3))
11381 return -TARGET_EFAULT;
11382 }
11383 }
11384 return ret;
11385 #endif
11386 #ifdef TARGET_NR_getresgid
11387 case TARGET_NR_setresgid:
11388 return get_errno(sys_setresgid(low2highgid(arg1),
11389 low2highgid(arg2),
11390 low2highgid(arg3)));
11391 #endif
11392 #ifdef TARGET_NR_getresgid
11393 case TARGET_NR_getresgid:
11394 {
11395 gid_t rgid, egid, sgid;
11396 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11397 if (!is_error(ret)) {
11398 if (put_user_id(high2lowgid(rgid), arg1)
11399 || put_user_id(high2lowgid(egid), arg2)
11400 || put_user_id(high2lowgid(sgid), arg3))
11401 return -TARGET_EFAULT;
11402 }
11403 }
11404 return ret;
11405 #endif
11406 #ifdef TARGET_NR_chown
11407 case TARGET_NR_chown:
11408 if (!(p = lock_user_string(arg1)))
11409 return -TARGET_EFAULT;
11410 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11411 unlock_user(p, arg1, 0);
11412 return ret;
11413 #endif
11414 case TARGET_NR_setuid:
11415 return get_errno(sys_setuid(low2highuid(arg1)));
11416 case TARGET_NR_setgid:
11417 return get_errno(sys_setgid(low2highgid(arg1)));
11418 case TARGET_NR_setfsuid:
11419 return get_errno(setfsuid(arg1));
11420 case TARGET_NR_setfsgid:
11421 return get_errno(setfsgid(arg1));
11422
11423 #ifdef TARGET_NR_lchown32
11424 case TARGET_NR_lchown32:
11425 if (!(p = lock_user_string(arg1)))
11426 return -TARGET_EFAULT;
11427 ret = get_errno(lchown(p, arg2, arg3));
11428 unlock_user(p, arg1, 0);
11429 return ret;
11430 #endif
11431 #ifdef TARGET_NR_getuid32
11432 case TARGET_NR_getuid32:
11433 return get_errno(getuid());
11434 #endif
11435
11436 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11437 /* Alpha specific */
11438 case TARGET_NR_getxuid:
11439 {
11440 uid_t euid;
11441 euid=geteuid();
11442 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11443 }
11444 return get_errno(getuid());
11445 #endif
11446 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11447 /* Alpha specific */
11448 case TARGET_NR_getxgid:
11449 {
11450 uid_t egid;
11451 egid=getegid();
11452 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11453 }
11454 return get_errno(getgid());
11455 #endif
11456 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11457 /* Alpha specific */
11458 case TARGET_NR_osf_getsysinfo:
11459 ret = -TARGET_EOPNOTSUPP;
11460 switch (arg1) {
11461 case TARGET_GSI_IEEE_FP_CONTROL:
11462 {
11463 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11464 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11465
11466 swcr &= ~SWCR_STATUS_MASK;
11467 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11468
11469 if (put_user_u64 (swcr, arg2))
11470 return -TARGET_EFAULT;
11471 ret = 0;
11472 }
11473 break;
11474
11475 /* case GSI_IEEE_STATE_AT_SIGNAL:
11476 -- Not implemented in linux kernel.
11477 case GSI_UACPROC:
11478 -- Retrieves current unaligned access state; not much used.
11479 case GSI_PROC_TYPE:
11480 -- Retrieves implver information; surely not used.
11481 case GSI_GET_HWRPB:
11482 -- Grabs a copy of the HWRPB; surely not used.
11483 */
11484 }
11485 return ret;
11486 #endif
11487 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11488 /* Alpha specific */
11489 case TARGET_NR_osf_setsysinfo:
11490 ret = -TARGET_EOPNOTSUPP;
11491 switch (arg1) {
11492 case TARGET_SSI_IEEE_FP_CONTROL:
11493 {
11494 uint64_t swcr, fpcr;
11495
11496 if (get_user_u64 (swcr, arg2)) {
11497 return -TARGET_EFAULT;
11498 }
11499
11500 /*
11501 * The kernel calls swcr_update_status to update the
11502 * status bits from the fpcr at every point that it
11503 * could be queried. Therefore, we store the status
11504 * bits only in FPCR.
11505 */
11506 ((CPUAlphaState *)cpu_env)->swcr
11507 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11508
11509 fpcr = cpu_alpha_load_fpcr(cpu_env);
11510 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11511 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11512 cpu_alpha_store_fpcr(cpu_env, fpcr);
11513 ret = 0;
11514 }
11515 break;
11516
11517 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11518 {
11519 uint64_t exc, fpcr, fex;
11520
11521 if (get_user_u64(exc, arg2)) {
11522 return -TARGET_EFAULT;
11523 }
11524 exc &= SWCR_STATUS_MASK;
11525 fpcr = cpu_alpha_load_fpcr(cpu_env);
11526
11527 /* Old exceptions are not signaled. */
11528 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11529 fex = exc & ~fex;
11530 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11531 fex &= ((CPUArchState *)cpu_env)->swcr;
11532
11533 /* Update the hardware fpcr. */
11534 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11535 cpu_alpha_store_fpcr(cpu_env, fpcr);
11536
11537 if (fex) {
11538 int si_code = TARGET_FPE_FLTUNK;
11539 target_siginfo_t info;
11540
11541 if (fex & SWCR_TRAP_ENABLE_DNO) {
11542 si_code = TARGET_FPE_FLTUND;
11543 }
11544 if (fex & SWCR_TRAP_ENABLE_INE) {
11545 si_code = TARGET_FPE_FLTRES;
11546 }
11547 if (fex & SWCR_TRAP_ENABLE_UNF) {
11548 si_code = TARGET_FPE_FLTUND;
11549 }
11550 if (fex & SWCR_TRAP_ENABLE_OVF) {
11551 si_code = TARGET_FPE_FLTOVF;
11552 }
11553 if (fex & SWCR_TRAP_ENABLE_DZE) {
11554 si_code = TARGET_FPE_FLTDIV;
11555 }
11556 if (fex & SWCR_TRAP_ENABLE_INV) {
11557 si_code = TARGET_FPE_FLTINV;
11558 }
11559
11560 info.si_signo = SIGFPE;
11561 info.si_errno = 0;
11562 info.si_code = si_code;
11563 info._sifields._sigfault._addr
11564 = ((CPUArchState *)cpu_env)->pc;
11565 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11566 QEMU_SI_FAULT, &info);
11567 }
11568 ret = 0;
11569 }
11570 break;
11571
11572 /* case SSI_NVPAIRS:
11573 -- Used with SSIN_UACPROC to enable unaligned accesses.
11574 case SSI_IEEE_STATE_AT_SIGNAL:
11575 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11576 -- Not implemented in linux kernel
11577 */
11578 }
11579 return ret;
11580 #endif
11581 #ifdef TARGET_NR_osf_sigprocmask
11582 /* Alpha specific. */
11583 case TARGET_NR_osf_sigprocmask:
11584 {
11585 abi_ulong mask;
11586 int how;
11587 sigset_t set, oldset;
11588
11589 switch(arg1) {
11590 case TARGET_SIG_BLOCK:
11591 how = SIG_BLOCK;
11592 break;
11593 case TARGET_SIG_UNBLOCK:
11594 how = SIG_UNBLOCK;
11595 break;
11596 case TARGET_SIG_SETMASK:
11597 how = SIG_SETMASK;
11598 break;
11599 default:
11600 return -TARGET_EINVAL;
11601 }
11602 mask = arg2;
11603 target_to_host_old_sigset(&set, &mask);
11604 ret = do_sigprocmask(how, &set, &oldset);
11605 if (!ret) {
11606 host_to_target_old_sigset(&mask, &oldset);
11607 ret = mask;
11608 }
11609 }
11610 return ret;
11611 #endif
11612
11613 #ifdef TARGET_NR_getgid32
11614 case TARGET_NR_getgid32:
11615 return get_errno(getgid());
11616 #endif
11617 #ifdef TARGET_NR_geteuid32
11618 case TARGET_NR_geteuid32:
11619 return get_errno(geteuid());
11620 #endif
11621 #ifdef TARGET_NR_getegid32
11622 case TARGET_NR_getegid32:
11623 return get_errno(getegid());
11624 #endif
11625 #ifdef TARGET_NR_setreuid32
11626 case TARGET_NR_setreuid32:
11627 return get_errno(setreuid(arg1, arg2));
11628 #endif
11629 #ifdef TARGET_NR_setregid32
11630 case TARGET_NR_setregid32:
11631 return get_errno(setregid(arg1, arg2));
11632 #endif
11633 #ifdef TARGET_NR_getgroups32
11634 case TARGET_NR_getgroups32:
11635 {
11636 int gidsetsize = arg1;
11637 uint32_t *target_grouplist;
11638 gid_t *grouplist;
11639 int i;
11640
11641 grouplist = alloca(gidsetsize * sizeof(gid_t));
11642 ret = get_errno(getgroups(gidsetsize, grouplist));
11643 if (gidsetsize == 0)
11644 return ret;
11645 if (!is_error(ret)) {
11646 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11647 if (!target_grouplist) {
11648 return -TARGET_EFAULT;
11649 }
11650 for(i = 0;i < ret; i++)
11651 target_grouplist[i] = tswap32(grouplist[i]);
11652 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11653 }
11654 }
11655 return ret;
11656 #endif
11657 #ifdef TARGET_NR_setgroups32
11658 case TARGET_NR_setgroups32:
11659 {
11660 int gidsetsize = arg1;
11661 uint32_t *target_grouplist;
11662 gid_t *grouplist;
11663 int i;
11664
11665 grouplist = alloca(gidsetsize * sizeof(gid_t));
11666 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11667 if (!target_grouplist) {
11668 return -TARGET_EFAULT;
11669 }
11670 for(i = 0;i < gidsetsize; i++)
11671 grouplist[i] = tswap32(target_grouplist[i]);
11672 unlock_user(target_grouplist, arg2, 0);
11673 return get_errno(setgroups(gidsetsize, grouplist));
11674 }
11675 #endif
11676 #ifdef TARGET_NR_fchown32
11677 case TARGET_NR_fchown32:
11678 return get_errno(fchown(arg1, arg2, arg3));
11679 #endif
11680 #ifdef TARGET_NR_setresuid32
11681 case TARGET_NR_setresuid32:
11682 return get_errno(sys_setresuid(arg1, arg2, arg3));
11683 #endif
11684 #ifdef TARGET_NR_getresuid32
11685 case TARGET_NR_getresuid32:
11686 {
11687 uid_t ruid, euid, suid;
11688 ret = get_errno(getresuid(&ruid, &euid, &suid));
11689 if (!is_error(ret)) {
11690 if (put_user_u32(ruid, arg1)
11691 || put_user_u32(euid, arg2)
11692 || put_user_u32(suid, arg3))
11693 return -TARGET_EFAULT;
11694 }
11695 }
11696 return ret;
11697 #endif
11698 #ifdef TARGET_NR_setresgid32
11699 case TARGET_NR_setresgid32:
11700 return get_errno(sys_setresgid(arg1, arg2, arg3));
11701 #endif
11702 #ifdef TARGET_NR_getresgid32
11703 case TARGET_NR_getresgid32:
11704 {
11705 gid_t rgid, egid, sgid;
11706 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11707 if (!is_error(ret)) {
11708 if (put_user_u32(rgid, arg1)
11709 || put_user_u32(egid, arg2)
11710 || put_user_u32(sgid, arg3))
11711 return -TARGET_EFAULT;
11712 }
11713 }
11714 return ret;
11715 #endif
11716 #ifdef TARGET_NR_chown32
11717 case TARGET_NR_chown32:
11718 if (!(p = lock_user_string(arg1)))
11719 return -TARGET_EFAULT;
11720 ret = get_errno(chown(p, arg2, arg3));
11721 unlock_user(p, arg1, 0);
11722 return ret;
11723 #endif
11724 #ifdef TARGET_NR_setuid32
11725 case TARGET_NR_setuid32:
11726 return get_errno(sys_setuid(arg1));
11727 #endif
11728 #ifdef TARGET_NR_setgid32
11729 case TARGET_NR_setgid32:
11730 return get_errno(sys_setgid(arg1));
11731 #endif
11732 #ifdef TARGET_NR_setfsuid32
11733 case TARGET_NR_setfsuid32:
11734 return get_errno(setfsuid(arg1));
11735 #endif
11736 #ifdef TARGET_NR_setfsgid32
11737 case TARGET_NR_setfsgid32:
11738 return get_errno(setfsgid(arg1));
11739 #endif
11740 #ifdef TARGET_NR_mincore
11741 case TARGET_NR_mincore:
11742 {
11743 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11744 if (!a) {
11745 return -TARGET_ENOMEM;
11746 }
11747 p = lock_user_string(arg3);
11748 if (!p) {
11749 ret = -TARGET_EFAULT;
11750 } else {
11751 ret = get_errno(mincore(a, arg2, p));
11752 unlock_user(p, arg3, ret);
11753 }
11754 unlock_user(a, arg1, 0);
11755 }
11756 return ret;
11757 #endif
11758 #ifdef TARGET_NR_arm_fadvise64_64
11759 case TARGET_NR_arm_fadvise64_64:
11760 /* arm_fadvise64_64 looks like fadvise64_64 but
11761 * with different argument order: fd, advice, offset, len
11762 * rather than the usual fd, offset, len, advice.
11763 * Note that offset and len are both 64-bit so appear as
11764 * pairs of 32-bit registers.
11765 */
11766 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11767 target_offset64(arg5, arg6), arg2);
11768 return -host_to_target_errno(ret);
11769 #endif
11770
11771 #if TARGET_ABI_BITS == 32
11772
11773 #ifdef TARGET_NR_fadvise64_64
11774 case TARGET_NR_fadvise64_64:
11775 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11776 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11777 ret = arg2;
11778 arg2 = arg3;
11779 arg3 = arg4;
11780 arg4 = arg5;
11781 arg5 = arg6;
11782 arg6 = ret;
11783 #else
11784 /* 6 args: fd, offset (high, low), len (high, low), advice */
11785 if (regpairs_aligned(cpu_env, num)) {
11786 /* offset is in (3,4), len in (5,6) and advice in 7 */
11787 arg2 = arg3;
11788 arg3 = arg4;
11789 arg4 = arg5;
11790 arg5 = arg6;
11791 arg6 = arg7;
11792 }
11793 #endif
11794 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11795 target_offset64(arg4, arg5), arg6);
11796 return -host_to_target_errno(ret);
11797 #endif
11798
11799 #ifdef TARGET_NR_fadvise64
11800 case TARGET_NR_fadvise64:
11801 /* 5 args: fd, offset (high, low), len, advice */
11802 if (regpairs_aligned(cpu_env, num)) {
11803 /* offset is in (3,4), len in 5 and advice in 6 */
11804 arg2 = arg3;
11805 arg3 = arg4;
11806 arg4 = arg5;
11807 arg5 = arg6;
11808 }
11809 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11810 return -host_to_target_errno(ret);
11811 #endif
11812
11813 #else /* not a 32-bit ABI */
11814 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11815 #ifdef TARGET_NR_fadvise64_64
11816 case TARGET_NR_fadvise64_64:
11817 #endif
11818 #ifdef TARGET_NR_fadvise64
11819 case TARGET_NR_fadvise64:
11820 #endif
11821 #ifdef TARGET_S390X
11822 switch (arg4) {
11823 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11824 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11825 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11826 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11827 default: break;
11828 }
11829 #endif
11830 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11831 #endif
11832 #endif /* end of 64-bit ABI fadvise handling */
11833
11834 #ifdef TARGET_NR_madvise
11835 case TARGET_NR_madvise:
11836 /* A straight passthrough may not be safe because qemu sometimes
11837 turns private file-backed mappings into anonymous mappings.
11838 This will break MADV_DONTNEED.
11839 This is a hint, so ignoring and returning success is ok. */
11840 return 0;
11841 #endif
11842 #ifdef TARGET_NR_fcntl64
11843 case TARGET_NR_fcntl64:
11844 {
11845 int cmd;
11846 struct flock64 fl;
11847 from_flock64_fn *copyfrom = copy_from_user_flock64;
11848 to_flock64_fn *copyto = copy_to_user_flock64;
11849
11850 #ifdef TARGET_ARM
11851 if (!((CPUARMState *)cpu_env)->eabi) {
11852 copyfrom = copy_from_user_oabi_flock64;
11853 copyto = copy_to_user_oabi_flock64;
11854 }
11855 #endif
11856
11857 cmd = target_to_host_fcntl_cmd(arg2);
11858 if (cmd == -TARGET_EINVAL) {
11859 return cmd;
11860 }
11861
11862 switch(arg2) {
11863 case TARGET_F_GETLK64:
11864 ret = copyfrom(&fl, arg3);
11865 if (ret) {
11866 break;
11867 }
11868 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11869 if (ret == 0) {
11870 ret = copyto(arg3, &fl);
11871 }
11872 break;
11873
11874 case TARGET_F_SETLK64:
11875 case TARGET_F_SETLKW64:
11876 ret = copyfrom(&fl, arg3);
11877 if (ret) {
11878 break;
11879 }
11880 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11881 break;
11882 default:
11883 ret = do_fcntl(arg1, arg2, arg3);
11884 break;
11885 }
11886 return ret;
11887 }
11888 #endif
11889 #ifdef TARGET_NR_cacheflush
11890 case TARGET_NR_cacheflush:
11891 /* self-modifying code is handled automatically, so nothing needed */
11892 return 0;
11893 #endif
11894 #ifdef TARGET_NR_getpagesize
11895 case TARGET_NR_getpagesize:
11896 return TARGET_PAGE_SIZE;
11897 #endif
11898 case TARGET_NR_gettid:
11899 return get_errno(sys_gettid());
11900 #ifdef TARGET_NR_readahead
11901 case TARGET_NR_readahead:
11902 #if TARGET_ABI_BITS == 32
11903 if (regpairs_aligned(cpu_env, num)) {
11904 arg2 = arg3;
11905 arg3 = arg4;
11906 arg4 = arg5;
11907 }
11908 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11909 #else
11910 ret = get_errno(readahead(arg1, arg2, arg3));
11911 #endif
11912 return ret;
11913 #endif
11914 #ifdef CONFIG_ATTR
11915 #ifdef TARGET_NR_setxattr
11916 case TARGET_NR_listxattr:
11917 case TARGET_NR_llistxattr:
11918 {
11919 void *p, *b = 0;
11920 if (arg2) {
11921 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11922 if (!b) {
11923 return -TARGET_EFAULT;
11924 }
11925 }
11926 p = lock_user_string(arg1);
11927 if (p) {
11928 if (num == TARGET_NR_listxattr) {
11929 ret = get_errno(listxattr(p, b, arg3));
11930 } else {
11931 ret = get_errno(llistxattr(p, b, arg3));
11932 }
11933 } else {
11934 ret = -TARGET_EFAULT;
11935 }
11936 unlock_user(p, arg1, 0);
11937 unlock_user(b, arg2, arg3);
11938 return ret;
11939 }
11940 case TARGET_NR_flistxattr:
11941 {
11942 void *b = 0;
11943 if (arg2) {
11944 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11945 if (!b) {
11946 return -TARGET_EFAULT;
11947 }
11948 }
11949 ret = get_errno(flistxattr(arg1, b, arg3));
11950 unlock_user(b, arg2, arg3);
11951 return ret;
11952 }
11953 case TARGET_NR_setxattr:
11954 case TARGET_NR_lsetxattr:
11955 {
11956 void *p, *n, *v = 0;
11957 if (arg3) {
11958 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11959 if (!v) {
11960 return -TARGET_EFAULT;
11961 }
11962 }
11963 p = lock_user_string(arg1);
11964 n = lock_user_string(arg2);
11965 if (p && n) {
11966 if (num == TARGET_NR_setxattr) {
11967 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11968 } else {
11969 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11970 }
11971 } else {
11972 ret = -TARGET_EFAULT;
11973 }
11974 unlock_user(p, arg1, 0);
11975 unlock_user(n, arg2, 0);
11976 unlock_user(v, arg3, 0);
11977 }
11978 return ret;
11979 case TARGET_NR_fsetxattr:
11980 {
11981 void *n, *v = 0;
11982 if (arg3) {
11983 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11984 if (!v) {
11985 return -TARGET_EFAULT;
11986 }
11987 }
11988 n = lock_user_string(arg2);
11989 if (n) {
11990 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11991 } else {
11992 ret = -TARGET_EFAULT;
11993 }
11994 unlock_user(n, arg2, 0);
11995 unlock_user(v, arg3, 0);
11996 }
11997 return ret;
11998 case TARGET_NR_getxattr:
11999 case TARGET_NR_lgetxattr:
12000 {
12001 void *p, *n, *v = 0;
12002 if (arg3) {
12003 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12004 if (!v) {
12005 return -TARGET_EFAULT;
12006 }
12007 }
12008 p = lock_user_string(arg1);
12009 n = lock_user_string(arg2);
12010 if (p && n) {
12011 if (num == TARGET_NR_getxattr) {
12012 ret = get_errno(getxattr(p, n, v, arg4));
12013 } else {
12014 ret = get_errno(lgetxattr(p, n, v, arg4));
12015 }
12016 } else {
12017 ret = -TARGET_EFAULT;
12018 }
12019 unlock_user(p, arg1, 0);
12020 unlock_user(n, arg2, 0);
12021 unlock_user(v, arg3, arg4);
12022 }
12023 return ret;
12024 case TARGET_NR_fgetxattr:
12025 {
12026 void *n, *v = 0;
12027 if (arg3) {
12028 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12029 if (!v) {
12030 return -TARGET_EFAULT;
12031 }
12032 }
12033 n = lock_user_string(arg2);
12034 if (n) {
12035 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12036 } else {
12037 ret = -TARGET_EFAULT;
12038 }
12039 unlock_user(n, arg2, 0);
12040 unlock_user(v, arg3, arg4);
12041 }
12042 return ret;
12043 case TARGET_NR_removexattr:
12044 case TARGET_NR_lremovexattr:
12045 {
12046 void *p, *n;
12047 p = lock_user_string(arg1);
12048 n = lock_user_string(arg2);
12049 if (p && n) {
12050 if (num == TARGET_NR_removexattr) {
12051 ret = get_errno(removexattr(p, n));
12052 } else {
12053 ret = get_errno(lremovexattr(p, n));
12054 }
12055 } else {
12056 ret = -TARGET_EFAULT;
12057 }
12058 unlock_user(p, arg1, 0);
12059 unlock_user(n, arg2, 0);
12060 }
12061 return ret;
12062 case TARGET_NR_fremovexattr:
12063 {
12064 void *n;
12065 n = lock_user_string(arg2);
12066 if (n) {
12067 ret = get_errno(fremovexattr(arg1, n));
12068 } else {
12069 ret = -TARGET_EFAULT;
12070 }
12071 unlock_user(n, arg2, 0);
12072 }
12073 return ret;
12074 #endif
12075 #endif /* CONFIG_ATTR */
12076 #ifdef TARGET_NR_set_thread_area
12077 case TARGET_NR_set_thread_area:
12078 #if defined(TARGET_MIPS)
12079 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12080 return 0;
12081 #elif defined(TARGET_CRIS)
12082 if (arg1 & 0xff)
12083 ret = -TARGET_EINVAL;
12084 else {
12085 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12086 ret = 0;
12087 }
12088 return ret;
12089 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12090 return do_set_thread_area(cpu_env, arg1);
12091 #elif defined(TARGET_M68K)
12092 {
12093 TaskState *ts = cpu->opaque;
12094 ts->tp_value = arg1;
12095 return 0;
12096 }
12097 #else
12098 return -TARGET_ENOSYS;
12099 #endif
12100 #endif
12101 #ifdef TARGET_NR_get_thread_area
12102 case TARGET_NR_get_thread_area:
12103 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12104 return do_get_thread_area(cpu_env, arg1);
12105 #elif defined(TARGET_M68K)
12106 {
12107 TaskState *ts = cpu->opaque;
12108 return ts->tp_value;
12109 }
12110 #else
12111 return -TARGET_ENOSYS;
12112 #endif
12113 #endif
12114 #ifdef TARGET_NR_getdomainname
12115 case TARGET_NR_getdomainname:
12116 return -TARGET_ENOSYS;
12117 #endif
12118
12119 #ifdef TARGET_NR_clock_settime
12120 case TARGET_NR_clock_settime:
12121 {
12122 struct timespec ts;
12123
12124 ret = target_to_host_timespec(&ts, arg2);
12125 if (!is_error(ret)) {
12126 ret = get_errno(clock_settime(arg1, &ts));
12127 }
12128 return ret;
12129 }
12130 #endif
12131 #ifdef TARGET_NR_clock_settime64
12132 case TARGET_NR_clock_settime64:
12133 {
12134 struct timespec ts;
12135
12136 ret = target_to_host_timespec64(&ts, arg2);
12137 if (!is_error(ret)) {
12138 ret = get_errno(clock_settime(arg1, &ts));
12139 }
12140 return ret;
12141 }
12142 #endif
12143 #ifdef TARGET_NR_clock_gettime
12144 case TARGET_NR_clock_gettime:
12145 {
12146 struct timespec ts;
12147 ret = get_errno(clock_gettime(arg1, &ts));
12148 if (!is_error(ret)) {
12149 ret = host_to_target_timespec(arg2, &ts);
12150 }
12151 return ret;
12152 }
12153 #endif
12154 #ifdef TARGET_NR_clock_gettime64
12155 case TARGET_NR_clock_gettime64:
12156 {
12157 struct timespec ts;
12158 ret = get_errno(clock_gettime(arg1, &ts));
12159 if (!is_error(ret)) {
12160 ret = host_to_target_timespec64(arg2, &ts);
12161 }
12162 return ret;
12163 }
12164 #endif
12165 #ifdef TARGET_NR_clock_getres
12166 case TARGET_NR_clock_getres:
12167 {
12168 struct timespec ts;
12169 ret = get_errno(clock_getres(arg1, &ts));
12170 if (!is_error(ret)) {
12171 host_to_target_timespec(arg2, &ts);
12172 }
12173 return ret;
12174 }
12175 #endif
12176 #ifdef TARGET_NR_clock_getres_time64
12177 case TARGET_NR_clock_getres_time64:
12178 {
12179 struct timespec ts;
12180 ret = get_errno(clock_getres(arg1, &ts));
12181 if (!is_error(ret)) {
12182 host_to_target_timespec64(arg2, &ts);
12183 }
12184 return ret;
12185 }
12186 #endif
12187 #ifdef TARGET_NR_clock_nanosleep
12188 case TARGET_NR_clock_nanosleep:
12189 {
12190 struct timespec ts;
12191 if (target_to_host_timespec(&ts, arg3)) {
12192 return -TARGET_EFAULT;
12193 }
12194 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12195 &ts, arg4 ? &ts : NULL));
12196 /*
12197 * if the call is interrupted by a signal handler, it fails
12198 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12199 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12200 */
12201 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12202 host_to_target_timespec(arg4, &ts)) {
12203 return -TARGET_EFAULT;
12204 }
12205
12206 return ret;
12207 }
12208 #endif
12209 #ifdef TARGET_NR_clock_nanosleep_time64
12210 case TARGET_NR_clock_nanosleep_time64:
12211 {
12212 struct timespec ts;
12213
12214 if (target_to_host_timespec64(&ts, arg3)) {
12215 return -TARGET_EFAULT;
12216 }
12217
12218 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12219 &ts, arg4 ? &ts : NULL));
12220
12221 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12222 host_to_target_timespec64(arg4, &ts)) {
12223 return -TARGET_EFAULT;
12224 }
12225 return ret;
12226 }
12227 #endif
12228
12229 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12230 case TARGET_NR_set_tid_address:
12231 return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12232 #endif
12233
12234 case TARGET_NR_tkill:
12235 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12236
12237 case TARGET_NR_tgkill:
12238 return get_errno(safe_tgkill((int)arg1, (int)arg2,
12239 target_to_host_signal(arg3)));
12240
12241 #ifdef TARGET_NR_set_robust_list
12242 case TARGET_NR_set_robust_list:
12243 case TARGET_NR_get_robust_list:
12244 /* The ABI for supporting robust futexes has userspace pass
12245 * the kernel a pointer to a linked list which is updated by
12246 * userspace after the syscall; the list is walked by the kernel
12247 * when the thread exits. Since the linked list in QEMU guest
12248 * memory isn't a valid linked list for the host and we have
12249 * no way to reliably intercept the thread-death event, we can't
12250 * support these. Silently return ENOSYS so that guest userspace
12251 * falls back to a non-robust futex implementation (which should
12252 * be OK except in the corner case of the guest crashing while
12253 * holding a mutex that is shared with another process via
12254 * shared memory).
12255 */
12256 return -TARGET_ENOSYS;
12257 #endif
12258
12259 #if defined(TARGET_NR_utimensat)
12260 case TARGET_NR_utimensat:
12261 {
12262 struct timespec *tsp, ts[2];
12263 if (!arg3) {
12264 tsp = NULL;
12265 } else {
12266 if (target_to_host_timespec(ts, arg3)) {
12267 return -TARGET_EFAULT;
12268 }
12269 if (target_to_host_timespec(ts + 1, arg3 +
12270 sizeof(struct target_timespec))) {
12271 return -TARGET_EFAULT;
12272 }
12273 tsp = ts;
12274 }
12275 if (!arg2)
12276 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12277 else {
12278 if (!(p = lock_user_string(arg2))) {
12279 return -TARGET_EFAULT;
12280 }
12281 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12282 unlock_user(p, arg2, 0);
12283 }
12284 }
12285 return ret;
12286 #endif
12287 #ifdef TARGET_NR_utimensat_time64
12288 case TARGET_NR_utimensat_time64:
12289 {
12290 struct timespec *tsp, ts[2];
12291 if (!arg3) {
12292 tsp = NULL;
12293 } else {
12294 if (target_to_host_timespec64(ts, arg3)) {
12295 return -TARGET_EFAULT;
12296 }
12297 if (target_to_host_timespec64(ts + 1, arg3 +
12298 sizeof(struct target__kernel_timespec))) {
12299 return -TARGET_EFAULT;
12300 }
12301 tsp = ts;
12302 }
12303 if (!arg2)
12304 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12305 else {
12306 p = lock_user_string(arg2);
12307 if (!p) {
12308 return -TARGET_EFAULT;
12309 }
12310 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12311 unlock_user(p, arg2, 0);
12312 }
12313 }
12314 return ret;
12315 #endif
12316 #ifdef TARGET_NR_futex
12317 case TARGET_NR_futex:
12318 return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12319 #endif
12320 #ifdef TARGET_NR_futex_time64
12321 case TARGET_NR_futex_time64:
12322 return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12323 #endif
12324 #ifdef CONFIG_INOTIFY
12325 #if defined(TARGET_NR_inotify_init)
12326 case TARGET_NR_inotify_init:
12327 ret = get_errno(inotify_init());
12328 if (ret >= 0) {
12329 fd_trans_register(ret, &target_inotify_trans);
12330 }
12331 return ret;
12332 #endif
12333 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12334 case TARGET_NR_inotify_init1:
12335 ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12336 fcntl_flags_tbl)));
12337 if (ret >= 0) {
12338 fd_trans_register(ret, &target_inotify_trans);
12339 }
12340 return ret;
12341 #endif
12342 #if defined(TARGET_NR_inotify_add_watch)
12343 case TARGET_NR_inotify_add_watch:
12344 p = lock_user_string(arg2);
12345 ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12346 unlock_user(p, arg2, 0);
12347 return ret;
12348 #endif
12349 #if defined(TARGET_NR_inotify_rm_watch)
12350 case TARGET_NR_inotify_rm_watch:
12351 return get_errno(inotify_rm_watch(arg1, arg2));
12352 #endif
12353 #endif
12354
12355 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12356 case TARGET_NR_mq_open:
12357 {
12358 struct mq_attr posix_mq_attr;
12359 struct mq_attr *pposix_mq_attr;
12360 int host_flags;
12361
12362 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12363 pposix_mq_attr = NULL;
12364 if (arg4) {
12365 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12366 return -TARGET_EFAULT;
12367 }
12368 pposix_mq_attr = &posix_mq_attr;
12369 }
12370 p = lock_user_string(arg1 - 1);
12371 if (!p) {
12372 return -TARGET_EFAULT;
12373 }
12374 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12375 unlock_user (p, arg1, 0);
12376 }
12377 return ret;
12378
12379 case TARGET_NR_mq_unlink:
12380 p = lock_user_string(arg1 - 1);
12381 if (!p) {
12382 return -TARGET_EFAULT;
12383 }
12384 ret = get_errno(mq_unlink(p));
12385 unlock_user (p, arg1, 0);
12386 return ret;
12387
12388 #ifdef TARGET_NR_mq_timedsend
12389 case TARGET_NR_mq_timedsend:
12390 {
12391 struct timespec ts;
12392
12393 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12394 if (arg5 != 0) {
12395 if (target_to_host_timespec(&ts, arg5)) {
12396 return -TARGET_EFAULT;
12397 }
12398 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12399 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12400 return -TARGET_EFAULT;
12401 }
12402 } else {
12403 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12404 }
12405 unlock_user (p, arg2, arg3);
12406 }
12407 return ret;
12408 #endif
12409 #ifdef TARGET_NR_mq_timedsend_time64
12410 case TARGET_NR_mq_timedsend_time64:
12411 {
12412 struct timespec ts;
12413
12414 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12415 if (arg5 != 0) {
12416 if (target_to_host_timespec64(&ts, arg5)) {
12417 return -TARGET_EFAULT;
12418 }
12419 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12420 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12421 return -TARGET_EFAULT;
12422 }
12423 } else {
12424 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12425 }
12426 unlock_user(p, arg2, arg3);
12427 }
12428 return ret;
12429 #endif
12430
12431 #ifdef TARGET_NR_mq_timedreceive
12432 case TARGET_NR_mq_timedreceive:
12433 {
12434 struct timespec ts;
12435 unsigned int prio;
12436
12437 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12438 if (arg5 != 0) {
12439 if (target_to_host_timespec(&ts, arg5)) {
12440 return -TARGET_EFAULT;
12441 }
12442 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12443 &prio, &ts));
12444 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12445 return -TARGET_EFAULT;
12446 }
12447 } else {
12448 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12449 &prio, NULL));
12450 }
12451 unlock_user (p, arg2, arg3);
12452 if (arg4 != 0)
12453 put_user_u32(prio, arg4);
12454 }
12455 return ret;
12456 #endif
12457 #ifdef TARGET_NR_mq_timedreceive_time64
12458 case TARGET_NR_mq_timedreceive_time64:
12459 {
12460 struct timespec ts;
12461 unsigned int prio;
12462
12463 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12464 if (arg5 != 0) {
12465 if (target_to_host_timespec64(&ts, arg5)) {
12466 return -TARGET_EFAULT;
12467 }
12468 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12469 &prio, &ts));
12470 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12471 return -TARGET_EFAULT;
12472 }
12473 } else {
12474 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12475 &prio, NULL));
12476 }
12477 unlock_user(p, arg2, arg3);
12478 if (arg4 != 0) {
12479 put_user_u32(prio, arg4);
12480 }
12481 }
12482 return ret;
12483 #endif
12484
12485 /* Not implemented for now... */
12486 /* case TARGET_NR_mq_notify: */
12487 /* break; */
12488
12489 case TARGET_NR_mq_getsetattr:
12490 {
12491 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12492 ret = 0;
12493 if (arg2 != 0) {
12494 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12495 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12496 &posix_mq_attr_out));
12497 } else if (arg3 != 0) {
12498 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12499 }
12500 if (ret == 0 && arg3 != 0) {
12501 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12502 }
12503 }
12504 return ret;
12505 #endif
12506
12507 #ifdef CONFIG_SPLICE
12508 #ifdef TARGET_NR_tee
12509 case TARGET_NR_tee:
12510 {
12511 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12512 }
12513 return ret;
12514 #endif
12515 #ifdef TARGET_NR_splice
12516 case TARGET_NR_splice:
12517 {
12518 loff_t loff_in, loff_out;
12519 loff_t *ploff_in = NULL, *ploff_out = NULL;
12520 if (arg2) {
12521 if (get_user_u64(loff_in, arg2)) {
12522 return -TARGET_EFAULT;
12523 }
12524 ploff_in = &loff_in;
12525 }
12526 if (arg4) {
12527 if (get_user_u64(loff_out, arg4)) {
12528 return -TARGET_EFAULT;
12529 }
12530 ploff_out = &loff_out;
12531 }
12532 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12533 if (arg2) {
12534 if (put_user_u64(loff_in, arg2)) {
12535 return -TARGET_EFAULT;
12536 }
12537 }
12538 if (arg4) {
12539 if (put_user_u64(loff_out, arg4)) {
12540 return -TARGET_EFAULT;
12541 }
12542 }
12543 }
12544 return ret;
12545 #endif
12546 #ifdef TARGET_NR_vmsplice
12547 case TARGET_NR_vmsplice:
12548 {
12549 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12550 if (vec != NULL) {
12551 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12552 unlock_iovec(vec, arg2, arg3, 0);
12553 } else {
12554 ret = -host_to_target_errno(errno);
12555 }
12556 }
12557 return ret;
12558 #endif
12559 #endif /* CONFIG_SPLICE */
12560 #ifdef CONFIG_EVENTFD
12561 #if defined(TARGET_NR_eventfd)
12562 case TARGET_NR_eventfd:
12563 ret = get_errno(eventfd(arg1, 0));
12564 if (ret >= 0) {
12565 fd_trans_register(ret, &target_eventfd_trans);
12566 }
12567 return ret;
12568 #endif
12569 #if defined(TARGET_NR_eventfd2)
12570 case TARGET_NR_eventfd2:
12571 {
12572 int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12573 if (arg2 & TARGET_O_NONBLOCK) {
12574 host_flags |= O_NONBLOCK;
12575 }
12576 if (arg2 & TARGET_O_CLOEXEC) {
12577 host_flags |= O_CLOEXEC;
12578 }
12579 ret = get_errno(eventfd(arg1, host_flags));
12580 if (ret >= 0) {
12581 fd_trans_register(ret, &target_eventfd_trans);
12582 }
12583 return ret;
12584 }
12585 #endif
12586 #endif /* CONFIG_EVENTFD */
12587 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12588 case TARGET_NR_fallocate:
12589 #if TARGET_ABI_BITS == 32
12590 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12591 target_offset64(arg5, arg6)));
12592 #else
12593 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12594 #endif
12595 return ret;
12596 #endif
12597 #if defined(CONFIG_SYNC_FILE_RANGE)
12598 #if defined(TARGET_NR_sync_file_range)
12599 case TARGET_NR_sync_file_range:
12600 #if TARGET_ABI_BITS == 32
12601 #if defined(TARGET_MIPS)
12602 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12603 target_offset64(arg5, arg6), arg7));
12604 #else
12605 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12606 target_offset64(arg4, arg5), arg6));
12607 #endif /* !TARGET_MIPS */
12608 #else
12609 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12610 #endif
12611 return ret;
12612 #endif
12613 #if defined(TARGET_NR_sync_file_range2) || \
12614 defined(TARGET_NR_arm_sync_file_range)
12615 #if defined(TARGET_NR_sync_file_range2)
12616 case TARGET_NR_sync_file_range2:
12617 #endif
12618 #if defined(TARGET_NR_arm_sync_file_range)
12619 case TARGET_NR_arm_sync_file_range:
12620 #endif
12621 /* This is like sync_file_range but the arguments are reordered */
12622 #if TARGET_ABI_BITS == 32
12623 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12624 target_offset64(arg5, arg6), arg2));
12625 #else
12626 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12627 #endif
12628 return ret;
12629 #endif
12630 #endif
12631 #if defined(TARGET_NR_signalfd4)
12632 case TARGET_NR_signalfd4:
12633 return do_signalfd4(arg1, arg2, arg4);
12634 #endif
12635 #if defined(TARGET_NR_signalfd)
12636 case TARGET_NR_signalfd:
12637 return do_signalfd4(arg1, arg2, 0);
12638 #endif
12639 #if defined(CONFIG_EPOLL)
12640 #if defined(TARGET_NR_epoll_create)
12641 case TARGET_NR_epoll_create:
12642 return get_errno(epoll_create(arg1));
12643 #endif
12644 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12645 case TARGET_NR_epoll_create1:
12646 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12647 #endif
12648 #if defined(TARGET_NR_epoll_ctl)
12649 case TARGET_NR_epoll_ctl:
12650 {
12651 struct epoll_event ep;
12652 struct epoll_event *epp = 0;
12653 if (arg4) {
12654 if (arg2 != EPOLL_CTL_DEL) {
12655 struct target_epoll_event *target_ep;
12656 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12657 return -TARGET_EFAULT;
12658 }
12659 ep.events = tswap32(target_ep->events);
12660 /*
12661 * The epoll_data_t union is just opaque data to the kernel,
12662 * so we transfer all 64 bits across and need not worry what
12663 * actual data type it is.
12664 */
12665 ep.data.u64 = tswap64(target_ep->data.u64);
12666 unlock_user_struct(target_ep, arg4, 0);
12667 }
12668 /*
12669 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12670 * non-null pointer, even though this argument is ignored.
12671 *
12672 */
12673 epp = &ep;
12674 }
12675 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12676 }
12677 #endif
12678
12679 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12680 #if defined(TARGET_NR_epoll_wait)
12681 case TARGET_NR_epoll_wait:
12682 #endif
12683 #if defined(TARGET_NR_epoll_pwait)
12684 case TARGET_NR_epoll_pwait:
12685 #endif
12686 {
12687 struct target_epoll_event *target_ep;
12688 struct epoll_event *ep;
12689 int epfd = arg1;
12690 int maxevents = arg3;
12691 int timeout = arg4;
12692
12693 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12694 return -TARGET_EINVAL;
12695 }
12696
12697 target_ep = lock_user(VERIFY_WRITE, arg2,
12698 maxevents * sizeof(struct target_epoll_event), 1);
12699 if (!target_ep) {
12700 return -TARGET_EFAULT;
12701 }
12702
12703 ep = g_try_new(struct epoll_event, maxevents);
12704 if (!ep) {
12705 unlock_user(target_ep, arg2, 0);
12706 return -TARGET_ENOMEM;
12707 }
12708
12709 switch (num) {
12710 #if defined(TARGET_NR_epoll_pwait)
12711 case TARGET_NR_epoll_pwait:
12712 {
12713 target_sigset_t *target_set;
12714 sigset_t _set, *set = &_set;
12715
12716 if (arg5) {
12717 if (arg6 != sizeof(target_sigset_t)) {
12718 ret = -TARGET_EINVAL;
12719 break;
12720 }
12721
12722 target_set = lock_user(VERIFY_READ, arg5,
12723 sizeof(target_sigset_t), 1);
12724 if (!target_set) {
12725 ret = -TARGET_EFAULT;
12726 break;
12727 }
12728 target_to_host_sigset(set, target_set);
12729 unlock_user(target_set, arg5, 0);
12730 } else {
12731 set = NULL;
12732 }
12733
12734 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12735 set, SIGSET_T_SIZE));
12736 break;
12737 }
12738 #endif
12739 #if defined(TARGET_NR_epoll_wait)
12740 case TARGET_NR_epoll_wait:
12741 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12742 NULL, 0));
12743 break;
12744 #endif
12745 default:
12746 ret = -TARGET_ENOSYS;
12747 }
12748 if (!is_error(ret)) {
12749 int i;
12750 for (i = 0; i < ret; i++) {
12751 target_ep[i].events = tswap32(ep[i].events);
12752 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12753 }
12754 unlock_user(target_ep, arg2,
12755 ret * sizeof(struct target_epoll_event));
12756 } else {
12757 unlock_user(target_ep, arg2, 0);
12758 }
12759 g_free(ep);
12760 return ret;
12761 }
12762 #endif
12763 #endif
12764 #ifdef TARGET_NR_prlimit64
12765 case TARGET_NR_prlimit64:
12766 {
12767 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12768 struct target_rlimit64 *target_rnew, *target_rold;
12769 struct host_rlimit64 rnew, rold, *rnewp = 0;
12770 int resource = target_to_host_resource(arg2);
12771
12772 if (arg3 && (resource != RLIMIT_AS &&
12773 resource != RLIMIT_DATA &&
12774 resource != RLIMIT_STACK)) {
12775 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12776 return -TARGET_EFAULT;
12777 }
12778 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12779 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12780 unlock_user_struct(target_rnew, arg3, 0);
12781 rnewp = &rnew;
12782 }
12783
12784 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12785 if (!is_error(ret) && arg4) {
12786 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12787 return -TARGET_EFAULT;
12788 }
12789 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12790 target_rold->rlim_max = tswap64(rold.rlim_max);
12791 unlock_user_struct(target_rold, arg4, 1);
12792 }
12793 return ret;
12794 }
12795 #endif
12796 #ifdef TARGET_NR_gethostname
12797 case TARGET_NR_gethostname:
12798 {
12799 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12800 if (name) {
12801 ret = get_errno(gethostname(name, arg2));
12802 unlock_user(name, arg1, arg2);
12803 } else {
12804 ret = -TARGET_EFAULT;
12805 }
12806 return ret;
12807 }
12808 #endif
12809 #ifdef TARGET_NR_atomic_cmpxchg_32
12810 case TARGET_NR_atomic_cmpxchg_32:
12811 {
12812 /* should use start_exclusive from main.c */
12813 abi_ulong mem_value;
12814 if (get_user_u32(mem_value, arg6)) {
12815 target_siginfo_t info;
12816 info.si_signo = SIGSEGV;
12817 info.si_errno = 0;
12818 info.si_code = TARGET_SEGV_MAPERR;
12819 info._sifields._sigfault._addr = arg6;
12820 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12821 QEMU_SI_FAULT, &info);
12822 ret = 0xdeadbeef;
12823
12824 }
12825 if (mem_value == arg2)
12826 put_user_u32(arg1, arg6);
12827 return mem_value;
12828 }
12829 #endif
12830 #ifdef TARGET_NR_atomic_barrier
12831 case TARGET_NR_atomic_barrier:
12832 /* Like the kernel implementation and the
12833 qemu arm barrier, no-op this? */
12834 return 0;
12835 #endif
12836
12837 #ifdef TARGET_NR_timer_create
12838 case TARGET_NR_timer_create:
12839 {
12840 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12841
12842 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12843
12844 int clkid = arg1;
12845 int timer_index = next_free_host_timer();
12846
12847 if (timer_index < 0) {
12848 ret = -TARGET_EAGAIN;
12849 } else {
12850 timer_t *phtimer = g_posix_timers + timer_index;
12851
12852 if (arg2) {
12853 phost_sevp = &host_sevp;
12854 ret = target_to_host_sigevent(phost_sevp, arg2);
12855 if (ret != 0) {
12856 return ret;
12857 }
12858 }
12859
12860 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12861 if (ret) {
12862 phtimer = NULL;
12863 } else {
12864 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12865 return -TARGET_EFAULT;
12866 }
12867 }
12868 }
12869 return ret;
12870 }
12871 #endif
12872
12873 #ifdef TARGET_NR_timer_settime
12874 case TARGET_NR_timer_settime:
12875 {
12876 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12877 * struct itimerspec * old_value */
12878 target_timer_t timerid = get_timer_id(arg1);
12879
12880 if (timerid < 0) {
12881 ret = timerid;
12882 } else if (arg3 == 0) {
12883 ret = -TARGET_EINVAL;
12884 } else {
12885 timer_t htimer = g_posix_timers[timerid];
12886 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12887
12888 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12889 return -TARGET_EFAULT;
12890 }
12891 ret = get_errno(
12892 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12893 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12894 return -TARGET_EFAULT;
12895 }
12896 }
12897 return ret;
12898 }
12899 #endif
12900
12901 #ifdef TARGET_NR_timer_settime64
12902 case TARGET_NR_timer_settime64:
12903 {
12904 target_timer_t timerid = get_timer_id(arg1);
12905
12906 if (timerid < 0) {
12907 ret = timerid;
12908 } else if (arg3 == 0) {
12909 ret = -TARGET_EINVAL;
12910 } else {
12911 timer_t htimer = g_posix_timers[timerid];
12912 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12913
12914 if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12915 return -TARGET_EFAULT;
12916 }
12917 ret = get_errno(
12918 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12919 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12920 return -TARGET_EFAULT;
12921 }
12922 }
12923 return ret;
12924 }
12925 #endif
12926
12927 #ifdef TARGET_NR_timer_gettime
12928 case TARGET_NR_timer_gettime:
12929 {
12930 /* args: timer_t timerid, struct itimerspec *curr_value */
12931 target_timer_t timerid = get_timer_id(arg1);
12932
12933 if (timerid < 0) {
12934 ret = timerid;
12935 } else if (!arg2) {
12936 ret = -TARGET_EFAULT;
12937 } else {
12938 timer_t htimer = g_posix_timers[timerid];
12939 struct itimerspec hspec;
12940 ret = get_errno(timer_gettime(htimer, &hspec));
12941
12942 if (host_to_target_itimerspec(arg2, &hspec)) {
12943 ret = -TARGET_EFAULT;
12944 }
12945 }
12946 return ret;
12947 }
12948 #endif
12949
12950 #ifdef TARGET_NR_timer_gettime64
12951 case TARGET_NR_timer_gettime64:
12952 {
12953 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12954 target_timer_t timerid = get_timer_id(arg1);
12955
12956 if (timerid < 0) {
12957 ret = timerid;
12958 } else if (!arg2) {
12959 ret = -TARGET_EFAULT;
12960 } else {
12961 timer_t htimer = g_posix_timers[timerid];
12962 struct itimerspec hspec;
12963 ret = get_errno(timer_gettime(htimer, &hspec));
12964
12965 if (host_to_target_itimerspec64(arg2, &hspec)) {
12966 ret = -TARGET_EFAULT;
12967 }
12968 }
12969 return ret;
12970 }
12971 #endif
12972
12973 #ifdef TARGET_NR_timer_getoverrun
12974 case TARGET_NR_timer_getoverrun:
12975 {
12976 /* args: timer_t timerid */
12977 target_timer_t timerid = get_timer_id(arg1);
12978
12979 if (timerid < 0) {
12980 ret = timerid;
12981 } else {
12982 timer_t htimer = g_posix_timers[timerid];
12983 ret = get_errno(timer_getoverrun(htimer));
12984 }
12985 return ret;
12986 }
12987 #endif
12988
12989 #ifdef TARGET_NR_timer_delete
12990 case TARGET_NR_timer_delete:
12991 {
12992 /* args: timer_t timerid */
12993 target_timer_t timerid = get_timer_id(arg1);
12994
12995 if (timerid < 0) {
12996 ret = timerid;
12997 } else {
12998 timer_t htimer = g_posix_timers[timerid];
12999 ret = get_errno(timer_delete(htimer));
13000 g_posix_timers[timerid] = 0;
13001 }
13002 return ret;
13003 }
13004 #endif
13005
13006 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13007 case TARGET_NR_timerfd_create:
13008 return get_errno(timerfd_create(arg1,
13009 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13010 #endif
13011
13012 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13013 case TARGET_NR_timerfd_gettime:
13014 {
13015 struct itimerspec its_curr;
13016
13017 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13018
13019 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13020 return -TARGET_EFAULT;
13021 }
13022 }
13023 return ret;
13024 #endif
13025
13026 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13027 case TARGET_NR_timerfd_gettime64:
13028 {
13029 struct itimerspec its_curr;
13030
13031 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13032
13033 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13034 return -TARGET_EFAULT;
13035 }
13036 }
13037 return ret;
13038 #endif
13039
13040 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13041 case TARGET_NR_timerfd_settime:
13042 {
13043 struct itimerspec its_new, its_old, *p_new;
13044
13045 if (arg3) {
13046 if (target_to_host_itimerspec(&its_new, arg3)) {
13047 return -TARGET_EFAULT;
13048 }
13049 p_new = &its_new;
13050 } else {
13051 p_new = NULL;
13052 }
13053
13054 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13055
13056 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13057 return -TARGET_EFAULT;
13058 }
13059 }
13060 return ret;
13061 #endif
13062
13063 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13064 case TARGET_NR_timerfd_settime64:
13065 {
13066 struct itimerspec its_new, its_old, *p_new;
13067
13068 if (arg3) {
13069 if (target_to_host_itimerspec64(&its_new, arg3)) {
13070 return -TARGET_EFAULT;
13071 }
13072 p_new = &its_new;
13073 } else {
13074 p_new = NULL;
13075 }
13076
13077 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13078
13079 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13080 return -TARGET_EFAULT;
13081 }
13082 }
13083 return ret;
13084 #endif
13085
13086 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13087 case TARGET_NR_ioprio_get:
13088 return get_errno(ioprio_get(arg1, arg2));
13089 #endif
13090
13091 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13092 case TARGET_NR_ioprio_set:
13093 return get_errno(ioprio_set(arg1, arg2, arg3));
13094 #endif
13095
13096 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13097 case TARGET_NR_setns:
13098 return get_errno(setns(arg1, arg2));
13099 #endif
13100 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13101 case TARGET_NR_unshare:
13102 return get_errno(unshare(arg1));
13103 #endif
13104 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13105 case TARGET_NR_kcmp:
13106 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13107 #endif
13108 #ifdef TARGET_NR_swapcontext
13109 case TARGET_NR_swapcontext:
13110 /* PowerPC specific. */
13111 return do_swapcontext(cpu_env, arg1, arg2, arg3);
13112 #endif
13113 #ifdef TARGET_NR_memfd_create
13114 case TARGET_NR_memfd_create:
13115 p = lock_user_string(arg1);
13116 if (!p) {
13117 return -TARGET_EFAULT;
13118 }
13119 ret = get_errno(memfd_create(p, arg2));
13120 fd_trans_unregister(ret);
13121 unlock_user(p, arg1, 0);
13122 return ret;
13123 #endif
13124 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13125 case TARGET_NR_membarrier:
13126 return get_errno(membarrier(arg1, arg2));
13127 #endif
13128
13129 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13130 case TARGET_NR_copy_file_range:
13131 {
13132 loff_t inoff, outoff;
13133 loff_t *pinoff = NULL, *poutoff = NULL;
13134
13135 if (arg2) {
13136 if (get_user_u64(inoff, arg2)) {
13137 return -TARGET_EFAULT;
13138 }
13139 pinoff = &inoff;
13140 }
13141 if (arg4) {
13142 if (get_user_u64(outoff, arg4)) {
13143 return -TARGET_EFAULT;
13144 }
13145 poutoff = &outoff;
13146 }
13147 /* Do not sign-extend the count parameter. */
13148 ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13149 (abi_ulong)arg5, arg6));
13150 if (!is_error(ret) && ret > 0) {
13151 if (arg2) {
13152 if (put_user_u64(inoff, arg2)) {
13153 return -TARGET_EFAULT;
13154 }
13155 }
13156 if (arg4) {
13157 if (put_user_u64(outoff, arg4)) {
13158 return -TARGET_EFAULT;
13159 }
13160 }
13161 }
13162 }
13163 return ret;
13164 #endif
13165
13166 #if defined(TARGET_NR_pivot_root)
13167 case TARGET_NR_pivot_root:
13168 {
13169 void *p2;
13170 p = lock_user_string(arg1); /* new_root */
13171 p2 = lock_user_string(arg2); /* put_old */
13172 if (!p || !p2) {
13173 ret = -TARGET_EFAULT;
13174 } else {
13175 ret = get_errno(pivot_root(p, p2));
13176 }
13177 unlock_user(p2, arg2, 0);
13178 unlock_user(p, arg1, 0);
13179 }
13180 return ret;
13181 #endif
13182
13183 default:
13184 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13185 return -TARGET_ENOSYS;
13186 }
13187 return ret;
13188 }
13189
13190 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13191 abi_long arg2, abi_long arg3, abi_long arg4,
13192 abi_long arg5, abi_long arg6, abi_long arg7,
13193 abi_long arg8)
13194 {
13195 CPUState *cpu = env_cpu(cpu_env);
13196 abi_long ret;
13197
13198 #ifdef DEBUG_ERESTARTSYS
13199 /* Debug-only code for exercising the syscall-restart code paths
13200 * in the per-architecture cpu main loops: restart every syscall
13201 * the guest makes once before letting it through.
13202 */
13203 {
13204 static bool flag;
13205 flag = !flag;
13206 if (flag) {
13207 return -QEMU_ERESTARTSYS;
13208 }
13209 }
13210 #endif
13211
13212 record_syscall_start(cpu, num, arg1,
13213 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13214
13215 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13216 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13217 }
13218
13219 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13220 arg5, arg6, arg7, arg8);
13221
13222 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13223 print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13224 arg3, arg4, arg5, arg6);
13225 }
13226
13227 record_syscall_return(cpu, num, ret);
13228 return ret;
13229 }