]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/syscall.c
Merge tag 'pull-tcg-20230709' of https://gitlab.com/rth7680/qemu into staging
[mirror_qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "target_mman.h"
27 #include <elf.h>
28 #include <endian.h>
29 #include <grp.h>
30 #include <sys/ipc.h>
31 #include <sys/msg.h>
32 #include <sys/wait.h>
33 #include <sys/mount.h>
34 #include <sys/file.h>
35 #include <sys/fsuid.h>
36 #include <sys/personality.h>
37 #include <sys/prctl.h>
38 #include <sys/resource.h>
39 #include <sys/swap.h>
40 #include <linux/capability.h>
41 #include <sched.h>
42 #include <sys/timex.h>
43 #include <sys/socket.h>
44 #include <linux/sockios.h>
45 #include <sys/un.h>
46 #include <sys/uio.h>
47 #include <poll.h>
48 #include <sys/times.h>
49 #include <sys/shm.h>
50 #include <sys/sem.h>
51 #include <sys/statfs.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <netinet/udp.h>
60 #include <linux/wireless.h>
61 #include <linux/icmp.h>
62 #include <linux/icmpv6.h>
63 #include <linux/if_tun.h>
64 #include <linux/in6.h>
65 #include <linux/errqueue.h>
66 #include <linux/random.h>
67 #ifdef CONFIG_TIMERFD
68 #include <sys/timerfd.h>
69 #endif
70 #ifdef CONFIG_EVENTFD
71 #include <sys/eventfd.h>
72 #endif
73 #ifdef CONFIG_EPOLL
74 #include <sys/epoll.h>
75 #endif
76 #ifdef CONFIG_ATTR
77 #include "qemu/xattr.h"
78 #endif
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
81 #endif
82 #ifdef HAVE_SYS_KCOV_H
83 #include <sys/kcov.h>
84 #endif
85
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
92
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #include <linux/fd.h>
102 #if defined(CONFIG_FIEMAP)
103 #include <linux/fiemap.h>
104 #endif
105 #include <linux/fb.h>
106 #if defined(CONFIG_USBFS)
107 #include <linux/usbdevice_fs.h>
108 #include <linux/usb/ch9.h>
109 #endif
110 #include <linux/vt.h>
111 #include <linux/dm-ioctl.h>
112 #include <linux/reboot.h>
113 #include <linux/route.h>
114 #include <linux/filter.h>
115 #include <linux/blkpg.h>
116 #include <netpacket/packet.h>
117 #include <linux/netlink.h>
118 #include <linux/if_alg.h>
119 #include <linux/rtc.h>
120 #include <sound/asound.h>
121 #ifdef HAVE_BTRFS_H
122 #include <linux/btrfs.h>
123 #endif
124 #ifdef HAVE_DRM_H
125 #include <libdrm/drm.h>
126 #include <libdrm/i915_drm.h>
127 #endif
128 #include "linux_loop.h"
129 #include "uname.h"
130
131 #include "qemu.h"
132 #include "user-internals.h"
133 #include "strace.h"
134 #include "signal-common.h"
135 #include "loader.h"
136 #include "user-mmap.h"
137 #include "user/safe-syscall.h"
138 #include "qemu/guest-random.h"
139 #include "qemu/selfmap.h"
140 #include "user/syscall-trace.h"
141 #include "special-errno.h"
142 #include "qapi/error.h"
143 #include "fd-trans.h"
144 #include "tcg/tcg.h"
145 #include "cpu_loop-common.h"
146
147 #ifndef CLONE_IO
148 #define CLONE_IO 0x80000000 /* Clone io context */
149 #endif
150
151 /* We can't directly call the host clone syscall, because this will
152 * badly confuse libc (breaking mutexes, for example). So we must
153 * divide clone flags into:
154 * * flag combinations that look like pthread_create()
155 * * flag combinations that look like fork()
156 * * flags we can implement within QEMU itself
157 * * flags we can't support and will return an error for
158 */
159 /* For thread creation, all these flags must be present; for
160 * fork, none must be present.
161 */
162 #define CLONE_THREAD_FLAGS \
163 (CLONE_VM | CLONE_FS | CLONE_FILES | \
164 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
165
166 /* These flags are ignored:
167 * CLONE_DETACHED is now ignored by the kernel;
168 * CLONE_IO is just an optimisation hint to the I/O scheduler
169 */
170 #define CLONE_IGNORED_FLAGS \
171 (CLONE_DETACHED | CLONE_IO)
172
173 #ifndef CLONE_PIDFD
174 # define CLONE_PIDFD 0x00001000
175 #endif
176
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS \
179 (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
181
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS \
184 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
185 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
186
187 #define CLONE_INVALID_FORK_FLAGS \
188 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
189
190 #define CLONE_INVALID_THREAD_FLAGS \
191 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
192 CLONE_IGNORED_FLAGS))
193
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195 * have almost all been allocated. We cannot support any of
196 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198 * The checks against the invalid thread masks above will catch these.
199 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
200 */
201
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203 * once. This exercises the codepaths for restart.
204 */
205 //#define DEBUG_ERESTARTSYS
206
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
212
213 #undef _syscall0
214 #undef _syscall1
215 #undef _syscall2
216 #undef _syscall3
217 #undef _syscall4
218 #undef _syscall5
219 #undef _syscall6
220
221 #define _syscall0(type,name) \
222 static type name (void) \
223 { \
224 return syscall(__NR_##name); \
225 }
226
227 #define _syscall1(type,name,type1,arg1) \
228 static type name (type1 arg1) \
229 { \
230 return syscall(__NR_##name, arg1); \
231 }
232
233 #define _syscall2(type,name,type1,arg1,type2,arg2) \
234 static type name (type1 arg1,type2 arg2) \
235 { \
236 return syscall(__NR_##name, arg1, arg2); \
237 }
238
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
240 static type name (type1 arg1,type2 arg2,type3 arg3) \
241 { \
242 return syscall(__NR_##name, arg1, arg2, arg3); \
243 }
244
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
247 { \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
249 }
250
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
252 type5,arg5) \
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
254 { \
255 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
256 }
257
258
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
260 type5,arg5,type6,arg6) \
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
262 type6 arg6) \
263 { \
264 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
265 }
266
267
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
278 #endif
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
281 #endif
282 #define __NR_sys_statx __NR_statx
283
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
286 #endif
287
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
291 #endif
292
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296 #endif
297
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid)
300
301 /* For the 64-bit guest on 32-bit host case we must emulate
302 * getdents using getdents64, because otherwise the host
303 * might hand us back more dirent records than we can fit
304 * into the guest buffer after structure format conversion.
305 * Otherwise we emulate getdents with getdents if the host has it.
306 */
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
309 #endif
310
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
313 #endif
314 #if (defined(TARGET_NR_getdents) && \
315 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
318 #endif
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
321 loff_t *, res, uint, wh);
322 #endif
323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325 siginfo_t *, uinfo)
326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group,int,error_code)
329 #endif
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC (1U << 2)
335 #endif
336 #endif
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339 const struct timespec *,timeout,int *,uaddr2,int,val3)
340 #endif
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343 const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347 #endif
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350 unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354 #endif
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357 unsigned long *, user_mask_ptr);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360 unsigned long *, user_mask_ptr);
361 /* sched_attr is not defined in glibc */
362 struct sched_attr {
363 uint32_t size;
364 uint32_t sched_policy;
365 uint64_t sched_flags;
366 int32_t sched_nice;
367 uint32_t sched_priority;
368 uint64_t sched_runtime;
369 uint64_t sched_deadline;
370 uint64_t sched_period;
371 uint32_t sched_util_min;
372 uint32_t sched_util_max;
373 };
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
376 unsigned int, size, unsigned int, flags);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
379 unsigned int, flags);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
384 const struct sched_param *, param);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam, pid_t, pid,
387 struct sched_param *, param);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam, pid_t, pid,
390 const struct sched_param *, param);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
393 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
394 void *, arg);
395 _syscall2(int, capget, struct __user_cap_header_struct *, header,
396 struct __user_cap_data_struct *, data);
397 _syscall2(int, capset, struct __user_cap_header_struct *, header,
398 struct __user_cap_data_struct *, data);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get, int, which, int, who)
401 #endif
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
404 #endif
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
407 #endif
408
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
411 unsigned long, idx1, unsigned long, idx2)
412 #endif
413
414 /*
415 * It is assumed that struct statx is architecture independent.
416 */
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
419 unsigned int, mask, struct target_statx *, statxbuf)
420 #endif
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier, int, cmd, int, flags)
423 #endif
424
425 static const bitmask_transtbl fcntl_flags_tbl[] = {
426 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
427 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
428 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
429 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
430 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
431 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
432 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
433 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
434 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
435 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
436 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
437 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
438 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
439 #if defined(O_DIRECT)
440 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
441 #endif
442 #if defined(O_NOATIME)
443 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
444 #endif
445 #if defined(O_CLOEXEC)
446 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
447 #endif
448 #if defined(O_PATH)
449 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
450 #endif
451 #if defined(O_TMPFILE)
452 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
453 #endif
454 /* Don't terminate the list prematurely on 64-bit host+guest. */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
457 #endif
458 { 0, 0, 0, 0 }
459 };
460
461 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
462
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
467 const struct timespec *,tsp,int,flags)
468 #else
469 static int sys_utimensat(int dirfd, const char *pathname,
470 const struct timespec times[2], int flags)
471 {
472 errno = ENOSYS;
473 return -1;
474 }
475 #endif
476 #endif /* TARGET_NR_utimensat */
477
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
482 const char *, new, unsigned int, flags)
483 #else
484 static int sys_renameat2(int oldfd, const char *old,
485 int newfd, const char *new, int flags)
486 {
487 if (flags == 0) {
488 return renameat(oldfd, old, newfd, new);
489 }
490 errno = ENOSYS;
491 return -1;
492 }
493 #endif
494 #endif /* TARGET_NR_renameat2 */
495
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
498 #else
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY */
505
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
509 #endif
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64 {
513 uint64_t rlim_cur;
514 uint64_t rlim_max;
515 };
516 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
517 const struct host_rlimit64 *, new_limit,
518 struct host_rlimit64 *, old_limit)
519 #endif
520
521
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers[GUEST_TIMER_MAX];
526 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
527
528 static inline int next_free_host_timer(void)
529 {
530 int k;
531 for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
532 if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
533 return k;
534 }
535 }
536 return -1;
537 }
538
539 static inline void free_host_timer_slot(int id)
540 {
541 qatomic_store_release(g_posix_timer_allocated + id, 0);
542 }
543 #endif
544
545 static inline int host_to_target_errno(int host_errno)
546 {
547 switch (host_errno) {
548 #define E(X) case X: return TARGET_##X;
549 #include "errnos.c.inc"
550 #undef E
551 default:
552 return host_errno;
553 }
554 }
555
556 static inline int target_to_host_errno(int target_errno)
557 {
558 switch (target_errno) {
559 #define E(X) case TARGET_##X: return X;
560 #include "errnos.c.inc"
561 #undef E
562 default:
563 return target_errno;
564 }
565 }
566
567 abi_long get_errno(abi_long ret)
568 {
569 if (ret == -1)
570 return -host_to_target_errno(errno);
571 else
572 return ret;
573 }
574
575 const char *target_strerror(int err)
576 {
577 if (err == QEMU_ERESTARTSYS) {
578 return "To be restarted";
579 }
580 if (err == QEMU_ESIGRETURN) {
581 return "Successful exit from sigreturn";
582 }
583
584 return strerror(target_to_host_errno(err));
585 }
586
587 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
588 {
589 int i;
590 uint8_t b;
591 if (usize <= ksize) {
592 return 1;
593 }
594 for (i = ksize; i < usize; i++) {
595 if (get_user_u8(b, addr + i)) {
596 return -TARGET_EFAULT;
597 }
598 if (b != 0) {
599 return 0;
600 }
601 }
602 return 1;
603 }
604
605 #define safe_syscall0(type, name) \
606 static type safe_##name(void) \
607 { \
608 return safe_syscall(__NR_##name); \
609 }
610
611 #define safe_syscall1(type, name, type1, arg1) \
612 static type safe_##name(type1 arg1) \
613 { \
614 return safe_syscall(__NR_##name, arg1); \
615 }
616
617 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
618 static type safe_##name(type1 arg1, type2 arg2) \
619 { \
620 return safe_syscall(__NR_##name, arg1, arg2); \
621 }
622
623 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
625 { \
626 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
627 }
628
629 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
630 type4, arg4) \
631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
632 { \
633 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
634 }
635
636 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
637 type4, arg4, type5, arg5) \
638 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
639 type5 arg5) \
640 { \
641 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
642 }
643
644 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
645 type4, arg4, type5, arg5, type6, arg6) \
646 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
647 type5 arg5, type6 arg6) \
648 { \
649 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
650 }
651
652 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
653 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
654 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
655 int, flags, mode_t, mode)
656 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
657 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
658 struct rusage *, rusage)
659 #endif
660 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
661 int, options, struct rusage *, rusage)
662 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
663 char **, argv, char **, envp, int, flags)
664 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
665 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
666 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
667 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
668 #endif
669 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
670 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
671 struct timespec *, tsp, const sigset_t *, sigmask,
672 size_t, sigsetsize)
673 #endif
674 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
675 int, maxevents, int, timeout, const sigset_t *, sigmask,
676 size_t, sigsetsize)
677 #if defined(__NR_futex)
678 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
679 const struct timespec *,timeout,int *,uaddr2,int,val3)
680 #endif
681 #if defined(__NR_futex_time64)
682 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
683 const struct timespec *,timeout,int *,uaddr2,int,val3)
684 #endif
685 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
686 safe_syscall2(int, kill, pid_t, pid, int, sig)
687 safe_syscall2(int, tkill, int, tid, int, sig)
688 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
689 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
690 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
691 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
692 unsigned long, pos_l, unsigned long, pos_h)
693 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
694 unsigned long, pos_l, unsigned long, pos_h)
695 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
696 socklen_t, addrlen)
697 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
698 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
699 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
700 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
701 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
702 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
703 safe_syscall2(int, flock, int, fd, int, operation)
704 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
705 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
706 const struct timespec *, uts, size_t, sigsetsize)
707 #endif
708 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
709 int, flags)
710 #if defined(TARGET_NR_nanosleep)
711 safe_syscall2(int, nanosleep, const struct timespec *, req,
712 struct timespec *, rem)
713 #endif
714 #if defined(TARGET_NR_clock_nanosleep) || \
715 defined(TARGET_NR_clock_nanosleep_time64)
716 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
717 const struct timespec *, req, struct timespec *, rem)
718 #endif
719 #ifdef __NR_ipc
720 #ifdef __s390x__
721 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
722 void *, ptr)
723 #else
724 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
725 void *, ptr, long, fifth)
726 #endif
727 #endif
728 #ifdef __NR_msgsnd
729 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
730 int, flags)
731 #endif
732 #ifdef __NR_msgrcv
733 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
734 long, msgtype, int, flags)
735 #endif
736 #ifdef __NR_semtimedop
737 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
738 unsigned, nsops, const struct timespec *, timeout)
739 #endif
740 #if defined(TARGET_NR_mq_timedsend) || \
741 defined(TARGET_NR_mq_timedsend_time64)
742 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
743 size_t, len, unsigned, prio, const struct timespec *, timeout)
744 #endif
745 #if defined(TARGET_NR_mq_timedreceive) || \
746 defined(TARGET_NR_mq_timedreceive_time64)
747 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
748 size_t, len, unsigned *, prio, const struct timespec *, timeout)
749 #endif
750 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
751 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
752 int, outfd, loff_t *, poutoff, size_t, length,
753 unsigned int, flags)
754 #endif
755
756 /* We do ioctl like this rather than via safe_syscall3 to preserve the
757 * "third argument might be integer or pointer or not present" behaviour of
758 * the libc function.
759 */
760 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
761 /* Similarly for fcntl. Note that callers must always:
762 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
763 * use the flock64 struct rather than unsuffixed flock
764 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
765 */
766 #ifdef __NR_fcntl64
767 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
768 #else
769 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
770 #endif
771
772 static inline int host_to_target_sock_type(int host_type)
773 {
774 int target_type;
775
776 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
777 case SOCK_DGRAM:
778 target_type = TARGET_SOCK_DGRAM;
779 break;
780 case SOCK_STREAM:
781 target_type = TARGET_SOCK_STREAM;
782 break;
783 default:
784 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
785 break;
786 }
787
788 #if defined(SOCK_CLOEXEC)
789 if (host_type & SOCK_CLOEXEC) {
790 target_type |= TARGET_SOCK_CLOEXEC;
791 }
792 #endif
793
794 #if defined(SOCK_NONBLOCK)
795 if (host_type & SOCK_NONBLOCK) {
796 target_type |= TARGET_SOCK_NONBLOCK;
797 }
798 #endif
799
800 return target_type;
801 }
802
803 static abi_ulong target_brk;
804 static abi_ulong brk_page;
805
806 void target_set_brk(abi_ulong new_brk)
807 {
808 target_brk = new_brk;
809 brk_page = HOST_PAGE_ALIGN(target_brk);
810 }
811
812 /* do_brk() must return target values and target errnos. */
813 abi_long do_brk(abi_ulong brk_val)
814 {
815 abi_long mapped_addr;
816 abi_ulong new_alloc_size;
817 abi_ulong new_brk, new_host_brk_page;
818
819 /* brk pointers are always untagged */
820
821 /* return old brk value if brk_val unchanged or zero */
822 if (!brk_val || brk_val == target_brk) {
823 return target_brk;
824 }
825
826 new_brk = TARGET_PAGE_ALIGN(brk_val);
827 new_host_brk_page = HOST_PAGE_ALIGN(brk_val);
828
829 /* brk_val and old target_brk might be on the same page */
830 if (new_brk == TARGET_PAGE_ALIGN(target_brk)) {
831 if (brk_val > target_brk) {
832 /* empty remaining bytes in (possibly larger) host page */
833 memset(g2h_untagged(target_brk), 0, new_host_brk_page - target_brk);
834 }
835 target_brk = brk_val;
836 return target_brk;
837 }
838
839 /* Release heap if necesary */
840 if (new_brk < target_brk) {
841 /* empty remaining bytes in (possibly larger) host page */
842 memset(g2h_untagged(brk_val), 0, new_host_brk_page - brk_val);
843
844 /* free unused host pages and set new brk_page */
845 target_munmap(new_host_brk_page, brk_page - new_host_brk_page);
846 brk_page = new_host_brk_page;
847
848 target_brk = brk_val;
849 return target_brk;
850 }
851
852 /* We need to allocate more memory after the brk... Note that
853 * we don't use MAP_FIXED because that will map over the top of
854 * any existing mapping (like the one with the host libc or qemu
855 * itself); instead we treat "mapped but at wrong address" as
856 * a failure and unmap again.
857 */
858 new_alloc_size = new_host_brk_page - brk_page;
859 if (new_alloc_size) {
860 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
861 PROT_READ|PROT_WRITE,
862 MAP_ANON|MAP_PRIVATE, 0, 0));
863 } else {
864 mapped_addr = brk_page;
865 }
866
867 if (mapped_addr == brk_page) {
868 /* Heap contents are initialized to zero, as for anonymous
869 * mapped pages. Technically the new pages are already
870 * initialized to zero since they *are* anonymous mapped
871 * pages, however we have to take care with the contents that
872 * come from the remaining part of the previous page: it may
873 * contains garbage data due to a previous heap usage (grown
874 * then shrunken). */
875 memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
876
877 target_brk = brk_val;
878 brk_page = new_host_brk_page;
879 return target_brk;
880 } else if (mapped_addr != -1) {
881 /* Mapped but at wrong address, meaning there wasn't actually
882 * enough space for this brk.
883 */
884 target_munmap(mapped_addr, new_alloc_size);
885 mapped_addr = -1;
886 }
887
888 #if defined(TARGET_ALPHA)
889 /* We (partially) emulate OSF/1 on Alpha, which requires we
890 return a proper errno, not an unchanged brk value. */
891 return -TARGET_ENOMEM;
892 #endif
893 /* For everything else, return the previous break. */
894 return target_brk;
895 }
896
897 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
898 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
899 static inline abi_long copy_from_user_fdset(fd_set *fds,
900 abi_ulong target_fds_addr,
901 int n)
902 {
903 int i, nw, j, k;
904 abi_ulong b, *target_fds;
905
906 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
907 if (!(target_fds = lock_user(VERIFY_READ,
908 target_fds_addr,
909 sizeof(abi_ulong) * nw,
910 1)))
911 return -TARGET_EFAULT;
912
913 FD_ZERO(fds);
914 k = 0;
915 for (i = 0; i < nw; i++) {
916 /* grab the abi_ulong */
917 __get_user(b, &target_fds[i]);
918 for (j = 0; j < TARGET_ABI_BITS; j++) {
919 /* check the bit inside the abi_ulong */
920 if ((b >> j) & 1)
921 FD_SET(k, fds);
922 k++;
923 }
924 }
925
926 unlock_user(target_fds, target_fds_addr, 0);
927
928 return 0;
929 }
930
931 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
932 abi_ulong target_fds_addr,
933 int n)
934 {
935 if (target_fds_addr) {
936 if (copy_from_user_fdset(fds, target_fds_addr, n))
937 return -TARGET_EFAULT;
938 *fds_ptr = fds;
939 } else {
940 *fds_ptr = NULL;
941 }
942 return 0;
943 }
944
945 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
946 const fd_set *fds,
947 int n)
948 {
949 int i, nw, j, k;
950 abi_long v;
951 abi_ulong *target_fds;
952
953 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
954 if (!(target_fds = lock_user(VERIFY_WRITE,
955 target_fds_addr,
956 sizeof(abi_ulong) * nw,
957 0)))
958 return -TARGET_EFAULT;
959
960 k = 0;
961 for (i = 0; i < nw; i++) {
962 v = 0;
963 for (j = 0; j < TARGET_ABI_BITS; j++) {
964 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
965 k++;
966 }
967 __put_user(v, &target_fds[i]);
968 }
969
970 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
971
972 return 0;
973 }
974 #endif
975
976 #if defined(__alpha__)
977 #define HOST_HZ 1024
978 #else
979 #define HOST_HZ 100
980 #endif
981
982 static inline abi_long host_to_target_clock_t(long ticks)
983 {
984 #if HOST_HZ == TARGET_HZ
985 return ticks;
986 #else
987 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
988 #endif
989 }
990
991 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
992 const struct rusage *rusage)
993 {
994 struct target_rusage *target_rusage;
995
996 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
997 return -TARGET_EFAULT;
998 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
999 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1000 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1001 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1002 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1003 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1004 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1005 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1006 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1007 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1008 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1009 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1010 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1011 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1012 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1013 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1014 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1015 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1016 unlock_user_struct(target_rusage, target_addr, 1);
1017
1018 return 0;
1019 }
1020
1021 #ifdef TARGET_NR_setrlimit
1022 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1023 {
1024 abi_ulong target_rlim_swap;
1025 rlim_t result;
1026
1027 target_rlim_swap = tswapal(target_rlim);
1028 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1029 return RLIM_INFINITY;
1030
1031 result = target_rlim_swap;
1032 if (target_rlim_swap != (rlim_t)result)
1033 return RLIM_INFINITY;
1034
1035 return result;
1036 }
1037 #endif
1038
1039 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1040 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1041 {
1042 abi_ulong target_rlim_swap;
1043 abi_ulong result;
1044
1045 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1046 target_rlim_swap = TARGET_RLIM_INFINITY;
1047 else
1048 target_rlim_swap = rlim;
1049 result = tswapal(target_rlim_swap);
1050
1051 return result;
1052 }
1053 #endif
1054
1055 static inline int target_to_host_resource(int code)
1056 {
1057 switch (code) {
1058 case TARGET_RLIMIT_AS:
1059 return RLIMIT_AS;
1060 case TARGET_RLIMIT_CORE:
1061 return RLIMIT_CORE;
1062 case TARGET_RLIMIT_CPU:
1063 return RLIMIT_CPU;
1064 case TARGET_RLIMIT_DATA:
1065 return RLIMIT_DATA;
1066 case TARGET_RLIMIT_FSIZE:
1067 return RLIMIT_FSIZE;
1068 case TARGET_RLIMIT_LOCKS:
1069 return RLIMIT_LOCKS;
1070 case TARGET_RLIMIT_MEMLOCK:
1071 return RLIMIT_MEMLOCK;
1072 case TARGET_RLIMIT_MSGQUEUE:
1073 return RLIMIT_MSGQUEUE;
1074 case TARGET_RLIMIT_NICE:
1075 return RLIMIT_NICE;
1076 case TARGET_RLIMIT_NOFILE:
1077 return RLIMIT_NOFILE;
1078 case TARGET_RLIMIT_NPROC:
1079 return RLIMIT_NPROC;
1080 case TARGET_RLIMIT_RSS:
1081 return RLIMIT_RSS;
1082 case TARGET_RLIMIT_RTPRIO:
1083 return RLIMIT_RTPRIO;
1084 #ifdef RLIMIT_RTTIME
1085 case TARGET_RLIMIT_RTTIME:
1086 return RLIMIT_RTTIME;
1087 #endif
1088 case TARGET_RLIMIT_SIGPENDING:
1089 return RLIMIT_SIGPENDING;
1090 case TARGET_RLIMIT_STACK:
1091 return RLIMIT_STACK;
1092 default:
1093 return code;
1094 }
1095 }
1096
1097 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1098 abi_ulong target_tv_addr)
1099 {
1100 struct target_timeval *target_tv;
1101
1102 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1103 return -TARGET_EFAULT;
1104 }
1105
1106 __get_user(tv->tv_sec, &target_tv->tv_sec);
1107 __get_user(tv->tv_usec, &target_tv->tv_usec);
1108
1109 unlock_user_struct(target_tv, target_tv_addr, 0);
1110
1111 return 0;
1112 }
1113
1114 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1115 const struct timeval *tv)
1116 {
1117 struct target_timeval *target_tv;
1118
1119 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1120 return -TARGET_EFAULT;
1121 }
1122
1123 __put_user(tv->tv_sec, &target_tv->tv_sec);
1124 __put_user(tv->tv_usec, &target_tv->tv_usec);
1125
1126 unlock_user_struct(target_tv, target_tv_addr, 1);
1127
1128 return 0;
1129 }
1130
1131 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1132 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1133 abi_ulong target_tv_addr)
1134 {
1135 struct target__kernel_sock_timeval *target_tv;
1136
1137 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1138 return -TARGET_EFAULT;
1139 }
1140
1141 __get_user(tv->tv_sec, &target_tv->tv_sec);
1142 __get_user(tv->tv_usec, &target_tv->tv_usec);
1143
1144 unlock_user_struct(target_tv, target_tv_addr, 0);
1145
1146 return 0;
1147 }
1148 #endif
1149
1150 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1151 const struct timeval *tv)
1152 {
1153 struct target__kernel_sock_timeval *target_tv;
1154
1155 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1156 return -TARGET_EFAULT;
1157 }
1158
1159 __put_user(tv->tv_sec, &target_tv->tv_sec);
1160 __put_user(tv->tv_usec, &target_tv->tv_usec);
1161
1162 unlock_user_struct(target_tv, target_tv_addr, 1);
1163
1164 return 0;
1165 }
1166
1167 #if defined(TARGET_NR_futex) || \
1168 defined(TARGET_NR_rt_sigtimedwait) || \
1169 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1170 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1171 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1172 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1173 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1174 defined(TARGET_NR_timer_settime) || \
1175 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1176 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1177 abi_ulong target_addr)
1178 {
1179 struct target_timespec *target_ts;
1180
1181 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1182 return -TARGET_EFAULT;
1183 }
1184 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1185 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1186 unlock_user_struct(target_ts, target_addr, 0);
1187 return 0;
1188 }
1189 #endif
1190
1191 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1192 defined(TARGET_NR_timer_settime64) || \
1193 defined(TARGET_NR_mq_timedsend_time64) || \
1194 defined(TARGET_NR_mq_timedreceive_time64) || \
1195 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1196 defined(TARGET_NR_clock_nanosleep_time64) || \
1197 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1198 defined(TARGET_NR_utimensat) || \
1199 defined(TARGET_NR_utimensat_time64) || \
1200 defined(TARGET_NR_semtimedop_time64) || \
1201 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1202 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1203 abi_ulong target_addr)
1204 {
1205 struct target__kernel_timespec *target_ts;
1206
1207 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1208 return -TARGET_EFAULT;
1209 }
1210 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1211 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1212 /* in 32bit mode, this drops the padding */
1213 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1214 unlock_user_struct(target_ts, target_addr, 0);
1215 return 0;
1216 }
1217 #endif
1218
1219 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1220 struct timespec *host_ts)
1221 {
1222 struct target_timespec *target_ts;
1223
1224 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1225 return -TARGET_EFAULT;
1226 }
1227 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1228 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1229 unlock_user_struct(target_ts, target_addr, 1);
1230 return 0;
1231 }
1232
1233 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1234 struct timespec *host_ts)
1235 {
1236 struct target__kernel_timespec *target_ts;
1237
1238 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1239 return -TARGET_EFAULT;
1240 }
1241 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1242 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1243 unlock_user_struct(target_ts, target_addr, 1);
1244 return 0;
1245 }
1246
1247 #if defined(TARGET_NR_gettimeofday)
1248 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1249 struct timezone *tz)
1250 {
1251 struct target_timezone *target_tz;
1252
1253 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1254 return -TARGET_EFAULT;
1255 }
1256
1257 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1258 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1259
1260 unlock_user_struct(target_tz, target_tz_addr, 1);
1261
1262 return 0;
1263 }
1264 #endif
1265
1266 #if defined(TARGET_NR_settimeofday)
1267 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1268 abi_ulong target_tz_addr)
1269 {
1270 struct target_timezone *target_tz;
1271
1272 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1273 return -TARGET_EFAULT;
1274 }
1275
1276 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1277 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1278
1279 unlock_user_struct(target_tz, target_tz_addr, 0);
1280
1281 return 0;
1282 }
1283 #endif
1284
1285 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1286 #include <mqueue.h>
1287
1288 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1289 abi_ulong target_mq_attr_addr)
1290 {
1291 struct target_mq_attr *target_mq_attr;
1292
1293 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1294 target_mq_attr_addr, 1))
1295 return -TARGET_EFAULT;
1296
1297 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1298 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1299 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1300 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1301
1302 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1303
1304 return 0;
1305 }
1306
1307 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1308 const struct mq_attr *attr)
1309 {
1310 struct target_mq_attr *target_mq_attr;
1311
1312 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1313 target_mq_attr_addr, 0))
1314 return -TARGET_EFAULT;
1315
1316 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1317 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1318 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1319 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1320
1321 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1322
1323 return 0;
1324 }
1325 #endif
1326
1327 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1328 /* do_select() must return target values and target errnos. */
1329 static abi_long do_select(int n,
1330 abi_ulong rfd_addr, abi_ulong wfd_addr,
1331 abi_ulong efd_addr, abi_ulong target_tv_addr)
1332 {
1333 fd_set rfds, wfds, efds;
1334 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1335 struct timeval tv;
1336 struct timespec ts, *ts_ptr;
1337 abi_long ret;
1338
1339 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1340 if (ret) {
1341 return ret;
1342 }
1343 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1344 if (ret) {
1345 return ret;
1346 }
1347 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1348 if (ret) {
1349 return ret;
1350 }
1351
1352 if (target_tv_addr) {
1353 if (copy_from_user_timeval(&tv, target_tv_addr))
1354 return -TARGET_EFAULT;
1355 ts.tv_sec = tv.tv_sec;
1356 ts.tv_nsec = tv.tv_usec * 1000;
1357 ts_ptr = &ts;
1358 } else {
1359 ts_ptr = NULL;
1360 }
1361
1362 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1363 ts_ptr, NULL));
1364
1365 if (!is_error(ret)) {
1366 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1367 return -TARGET_EFAULT;
1368 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1369 return -TARGET_EFAULT;
1370 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1371 return -TARGET_EFAULT;
1372
1373 if (target_tv_addr) {
1374 tv.tv_sec = ts.tv_sec;
1375 tv.tv_usec = ts.tv_nsec / 1000;
1376 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1377 return -TARGET_EFAULT;
1378 }
1379 }
1380 }
1381
1382 return ret;
1383 }
1384
1385 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1386 static abi_long do_old_select(abi_ulong arg1)
1387 {
1388 struct target_sel_arg_struct *sel;
1389 abi_ulong inp, outp, exp, tvp;
1390 long nsel;
1391
1392 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1393 return -TARGET_EFAULT;
1394 }
1395
1396 nsel = tswapal(sel->n);
1397 inp = tswapal(sel->inp);
1398 outp = tswapal(sel->outp);
1399 exp = tswapal(sel->exp);
1400 tvp = tswapal(sel->tvp);
1401
1402 unlock_user_struct(sel, arg1, 0);
1403
1404 return do_select(nsel, inp, outp, exp, tvp);
1405 }
1406 #endif
1407 #endif
1408
1409 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1410 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1411 abi_long arg4, abi_long arg5, abi_long arg6,
1412 bool time64)
1413 {
1414 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1415 fd_set rfds, wfds, efds;
1416 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1417 struct timespec ts, *ts_ptr;
1418 abi_long ret;
1419
1420 /*
1421 * The 6th arg is actually two args smashed together,
1422 * so we cannot use the C library.
1423 */
1424 struct {
1425 sigset_t *set;
1426 size_t size;
1427 } sig, *sig_ptr;
1428
1429 abi_ulong arg_sigset, arg_sigsize, *arg7;
1430
1431 n = arg1;
1432 rfd_addr = arg2;
1433 wfd_addr = arg3;
1434 efd_addr = arg4;
1435 ts_addr = arg5;
1436
1437 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1438 if (ret) {
1439 return ret;
1440 }
1441 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1442 if (ret) {
1443 return ret;
1444 }
1445 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1446 if (ret) {
1447 return ret;
1448 }
1449
1450 /*
1451 * This takes a timespec, and not a timeval, so we cannot
1452 * use the do_select() helper ...
1453 */
1454 if (ts_addr) {
1455 if (time64) {
1456 if (target_to_host_timespec64(&ts, ts_addr)) {
1457 return -TARGET_EFAULT;
1458 }
1459 } else {
1460 if (target_to_host_timespec(&ts, ts_addr)) {
1461 return -TARGET_EFAULT;
1462 }
1463 }
1464 ts_ptr = &ts;
1465 } else {
1466 ts_ptr = NULL;
1467 }
1468
1469 /* Extract the two packed args for the sigset */
1470 sig_ptr = NULL;
1471 if (arg6) {
1472 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1473 if (!arg7) {
1474 return -TARGET_EFAULT;
1475 }
1476 arg_sigset = tswapal(arg7[0]);
1477 arg_sigsize = tswapal(arg7[1]);
1478 unlock_user(arg7, arg6, 0);
1479
1480 if (arg_sigset) {
1481 ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1482 if (ret != 0) {
1483 return ret;
1484 }
1485 sig_ptr = &sig;
1486 sig.size = SIGSET_T_SIZE;
1487 }
1488 }
1489
1490 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1491 ts_ptr, sig_ptr));
1492
1493 if (sig_ptr) {
1494 finish_sigsuspend_mask(ret);
1495 }
1496
1497 if (!is_error(ret)) {
1498 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1499 return -TARGET_EFAULT;
1500 }
1501 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1502 return -TARGET_EFAULT;
1503 }
1504 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1505 return -TARGET_EFAULT;
1506 }
1507 if (time64) {
1508 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1509 return -TARGET_EFAULT;
1510 }
1511 } else {
1512 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1513 return -TARGET_EFAULT;
1514 }
1515 }
1516 }
1517 return ret;
1518 }
1519 #endif
1520
1521 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1522 defined(TARGET_NR_ppoll_time64)
1523 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1524 abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1525 {
1526 struct target_pollfd *target_pfd;
1527 unsigned int nfds = arg2;
1528 struct pollfd *pfd;
1529 unsigned int i;
1530 abi_long ret;
1531
1532 pfd = NULL;
1533 target_pfd = NULL;
1534 if (nfds) {
1535 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1536 return -TARGET_EINVAL;
1537 }
1538 target_pfd = lock_user(VERIFY_WRITE, arg1,
1539 sizeof(struct target_pollfd) * nfds, 1);
1540 if (!target_pfd) {
1541 return -TARGET_EFAULT;
1542 }
1543
1544 pfd = alloca(sizeof(struct pollfd) * nfds);
1545 for (i = 0; i < nfds; i++) {
1546 pfd[i].fd = tswap32(target_pfd[i].fd);
1547 pfd[i].events = tswap16(target_pfd[i].events);
1548 }
1549 }
1550 if (ppoll) {
1551 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1552 sigset_t *set = NULL;
1553
1554 if (arg3) {
1555 if (time64) {
1556 if (target_to_host_timespec64(timeout_ts, arg3)) {
1557 unlock_user(target_pfd, arg1, 0);
1558 return -TARGET_EFAULT;
1559 }
1560 } else {
1561 if (target_to_host_timespec(timeout_ts, arg3)) {
1562 unlock_user(target_pfd, arg1, 0);
1563 return -TARGET_EFAULT;
1564 }
1565 }
1566 } else {
1567 timeout_ts = NULL;
1568 }
1569
1570 if (arg4) {
1571 ret = process_sigsuspend_mask(&set, arg4, arg5);
1572 if (ret != 0) {
1573 unlock_user(target_pfd, arg1, 0);
1574 return ret;
1575 }
1576 }
1577
1578 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1579 set, SIGSET_T_SIZE));
1580
1581 if (set) {
1582 finish_sigsuspend_mask(ret);
1583 }
1584 if (!is_error(ret) && arg3) {
1585 if (time64) {
1586 if (host_to_target_timespec64(arg3, timeout_ts)) {
1587 return -TARGET_EFAULT;
1588 }
1589 } else {
1590 if (host_to_target_timespec(arg3, timeout_ts)) {
1591 return -TARGET_EFAULT;
1592 }
1593 }
1594 }
1595 } else {
1596 struct timespec ts, *pts;
1597
1598 if (arg3 >= 0) {
1599 /* Convert ms to secs, ns */
1600 ts.tv_sec = arg3 / 1000;
1601 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1602 pts = &ts;
1603 } else {
1604 /* -ve poll() timeout means "infinite" */
1605 pts = NULL;
1606 }
1607 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1608 }
1609
1610 if (!is_error(ret)) {
1611 for (i = 0; i < nfds; i++) {
1612 target_pfd[i].revents = tswap16(pfd[i].revents);
1613 }
1614 }
1615 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1616 return ret;
1617 }
1618 #endif
1619
1620 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1621 int flags, int is_pipe2)
1622 {
1623 int host_pipe[2];
1624 abi_long ret;
1625 ret = pipe2(host_pipe, flags);
1626
1627 if (is_error(ret))
1628 return get_errno(ret);
1629
1630 /* Several targets have special calling conventions for the original
1631 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1632 if (!is_pipe2) {
1633 #if defined(TARGET_ALPHA)
1634 cpu_env->ir[IR_A4] = host_pipe[1];
1635 return host_pipe[0];
1636 #elif defined(TARGET_MIPS)
1637 cpu_env->active_tc.gpr[3] = host_pipe[1];
1638 return host_pipe[0];
1639 #elif defined(TARGET_SH4)
1640 cpu_env->gregs[1] = host_pipe[1];
1641 return host_pipe[0];
1642 #elif defined(TARGET_SPARC)
1643 cpu_env->regwptr[1] = host_pipe[1];
1644 return host_pipe[0];
1645 #endif
1646 }
1647
1648 if (put_user_s32(host_pipe[0], pipedes)
1649 || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1650 return -TARGET_EFAULT;
1651 return get_errno(ret);
1652 }
1653
1654 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1655 abi_ulong target_addr,
1656 socklen_t len)
1657 {
1658 struct target_ip_mreqn *target_smreqn;
1659
1660 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1661 if (!target_smreqn)
1662 return -TARGET_EFAULT;
1663 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1664 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1665 if (len == sizeof(struct target_ip_mreqn))
1666 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1667 unlock_user(target_smreqn, target_addr, 0);
1668
1669 return 0;
1670 }
1671
1672 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1673 abi_ulong target_addr,
1674 socklen_t len)
1675 {
1676 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1677 sa_family_t sa_family;
1678 struct target_sockaddr *target_saddr;
1679
1680 if (fd_trans_target_to_host_addr(fd)) {
1681 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1682 }
1683
1684 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1685 if (!target_saddr)
1686 return -TARGET_EFAULT;
1687
1688 sa_family = tswap16(target_saddr->sa_family);
1689
1690 /* Oops. The caller might send a incomplete sun_path; sun_path
1691 * must be terminated by \0 (see the manual page), but
1692 * unfortunately it is quite common to specify sockaddr_un
1693 * length as "strlen(x->sun_path)" while it should be
1694 * "strlen(...) + 1". We'll fix that here if needed.
1695 * Linux kernel has a similar feature.
1696 */
1697
1698 if (sa_family == AF_UNIX) {
1699 if (len < unix_maxlen && len > 0) {
1700 char *cp = (char*)target_saddr;
1701
1702 if ( cp[len-1] && !cp[len] )
1703 len++;
1704 }
1705 if (len > unix_maxlen)
1706 len = unix_maxlen;
1707 }
1708
1709 memcpy(addr, target_saddr, len);
1710 addr->sa_family = sa_family;
1711 if (sa_family == AF_NETLINK) {
1712 struct sockaddr_nl *nladdr;
1713
1714 nladdr = (struct sockaddr_nl *)addr;
1715 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1716 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1717 } else if (sa_family == AF_PACKET) {
1718 struct target_sockaddr_ll *lladdr;
1719
1720 lladdr = (struct target_sockaddr_ll *)addr;
1721 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1722 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1723 } else if (sa_family == AF_INET6) {
1724 struct sockaddr_in6 *in6addr;
1725
1726 in6addr = (struct sockaddr_in6 *)addr;
1727 in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1728 }
1729 unlock_user(target_saddr, target_addr, 0);
1730
1731 return 0;
1732 }
1733
1734 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1735 struct sockaddr *addr,
1736 socklen_t len)
1737 {
1738 struct target_sockaddr *target_saddr;
1739
1740 if (len == 0) {
1741 return 0;
1742 }
1743 assert(addr);
1744
1745 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1746 if (!target_saddr)
1747 return -TARGET_EFAULT;
1748 memcpy(target_saddr, addr, len);
1749 if (len >= offsetof(struct target_sockaddr, sa_family) +
1750 sizeof(target_saddr->sa_family)) {
1751 target_saddr->sa_family = tswap16(addr->sa_family);
1752 }
1753 if (addr->sa_family == AF_NETLINK &&
1754 len >= sizeof(struct target_sockaddr_nl)) {
1755 struct target_sockaddr_nl *target_nl =
1756 (struct target_sockaddr_nl *)target_saddr;
1757 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1758 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1759 } else if (addr->sa_family == AF_PACKET) {
1760 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1761 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1762 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1763 } else if (addr->sa_family == AF_INET6 &&
1764 len >= sizeof(struct target_sockaddr_in6)) {
1765 struct target_sockaddr_in6 *target_in6 =
1766 (struct target_sockaddr_in6 *)target_saddr;
1767 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1768 }
1769 unlock_user(target_saddr, target_addr, len);
1770
1771 return 0;
1772 }
1773
1774 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1775 struct target_msghdr *target_msgh)
1776 {
1777 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1778 abi_long msg_controllen;
1779 abi_ulong target_cmsg_addr;
1780 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1781 socklen_t space = 0;
1782
1783 msg_controllen = tswapal(target_msgh->msg_controllen);
1784 if (msg_controllen < sizeof (struct target_cmsghdr))
1785 goto the_end;
1786 target_cmsg_addr = tswapal(target_msgh->msg_control);
1787 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1788 target_cmsg_start = target_cmsg;
1789 if (!target_cmsg)
1790 return -TARGET_EFAULT;
1791
1792 while (cmsg && target_cmsg) {
1793 void *data = CMSG_DATA(cmsg);
1794 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1795
1796 int len = tswapal(target_cmsg->cmsg_len)
1797 - sizeof(struct target_cmsghdr);
1798
1799 space += CMSG_SPACE(len);
1800 if (space > msgh->msg_controllen) {
1801 space -= CMSG_SPACE(len);
1802 /* This is a QEMU bug, since we allocated the payload
1803 * area ourselves (unlike overflow in host-to-target
1804 * conversion, which is just the guest giving us a buffer
1805 * that's too small). It can't happen for the payload types
1806 * we currently support; if it becomes an issue in future
1807 * we would need to improve our allocation strategy to
1808 * something more intelligent than "twice the size of the
1809 * target buffer we're reading from".
1810 */
1811 qemu_log_mask(LOG_UNIMP,
1812 ("Unsupported ancillary data %d/%d: "
1813 "unhandled msg size\n"),
1814 tswap32(target_cmsg->cmsg_level),
1815 tswap32(target_cmsg->cmsg_type));
1816 break;
1817 }
1818
1819 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1820 cmsg->cmsg_level = SOL_SOCKET;
1821 } else {
1822 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1823 }
1824 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1825 cmsg->cmsg_len = CMSG_LEN(len);
1826
1827 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1828 int *fd = (int *)data;
1829 int *target_fd = (int *)target_data;
1830 int i, numfds = len / sizeof(int);
1831
1832 for (i = 0; i < numfds; i++) {
1833 __get_user(fd[i], target_fd + i);
1834 }
1835 } else if (cmsg->cmsg_level == SOL_SOCKET
1836 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1837 struct ucred *cred = (struct ucred *)data;
1838 struct target_ucred *target_cred =
1839 (struct target_ucred *)target_data;
1840
1841 __get_user(cred->pid, &target_cred->pid);
1842 __get_user(cred->uid, &target_cred->uid);
1843 __get_user(cred->gid, &target_cred->gid);
1844 } else if (cmsg->cmsg_level == SOL_ALG) {
1845 uint32_t *dst = (uint32_t *)data;
1846
1847 memcpy(dst, target_data, len);
1848 /* fix endianess of first 32-bit word */
1849 if (len >= sizeof(uint32_t)) {
1850 *dst = tswap32(*dst);
1851 }
1852 } else {
1853 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1854 cmsg->cmsg_level, cmsg->cmsg_type);
1855 memcpy(data, target_data, len);
1856 }
1857
1858 cmsg = CMSG_NXTHDR(msgh, cmsg);
1859 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1860 target_cmsg_start);
1861 }
1862 unlock_user(target_cmsg, target_cmsg_addr, 0);
1863 the_end:
1864 msgh->msg_controllen = space;
1865 return 0;
1866 }
1867
1868 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1869 struct msghdr *msgh)
1870 {
1871 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1872 abi_long msg_controllen;
1873 abi_ulong target_cmsg_addr;
1874 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1875 socklen_t space = 0;
1876
1877 msg_controllen = tswapal(target_msgh->msg_controllen);
1878 if (msg_controllen < sizeof (struct target_cmsghdr))
1879 goto the_end;
1880 target_cmsg_addr = tswapal(target_msgh->msg_control);
1881 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1882 target_cmsg_start = target_cmsg;
1883 if (!target_cmsg)
1884 return -TARGET_EFAULT;
1885
1886 while (cmsg && target_cmsg) {
1887 void *data = CMSG_DATA(cmsg);
1888 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1889
1890 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1891 int tgt_len, tgt_space;
1892
1893 /* We never copy a half-header but may copy half-data;
1894 * this is Linux's behaviour in put_cmsg(). Note that
1895 * truncation here is a guest problem (which we report
1896 * to the guest via the CTRUNC bit), unlike truncation
1897 * in target_to_host_cmsg, which is a QEMU bug.
1898 */
1899 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1900 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1901 break;
1902 }
1903
1904 if (cmsg->cmsg_level == SOL_SOCKET) {
1905 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1906 } else {
1907 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1908 }
1909 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1910
1911 /* Payload types which need a different size of payload on
1912 * the target must adjust tgt_len here.
1913 */
1914 tgt_len = len;
1915 switch (cmsg->cmsg_level) {
1916 case SOL_SOCKET:
1917 switch (cmsg->cmsg_type) {
1918 case SO_TIMESTAMP:
1919 tgt_len = sizeof(struct target_timeval);
1920 break;
1921 default:
1922 break;
1923 }
1924 break;
1925 default:
1926 break;
1927 }
1928
1929 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1930 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1931 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1932 }
1933
1934 /* We must now copy-and-convert len bytes of payload
1935 * into tgt_len bytes of destination space. Bear in mind
1936 * that in both source and destination we may be dealing
1937 * with a truncated value!
1938 */
1939 switch (cmsg->cmsg_level) {
1940 case SOL_SOCKET:
1941 switch (cmsg->cmsg_type) {
1942 case SCM_RIGHTS:
1943 {
1944 int *fd = (int *)data;
1945 int *target_fd = (int *)target_data;
1946 int i, numfds = tgt_len / sizeof(int);
1947
1948 for (i = 0; i < numfds; i++) {
1949 __put_user(fd[i], target_fd + i);
1950 }
1951 break;
1952 }
1953 case SO_TIMESTAMP:
1954 {
1955 struct timeval *tv = (struct timeval *)data;
1956 struct target_timeval *target_tv =
1957 (struct target_timeval *)target_data;
1958
1959 if (len != sizeof(struct timeval) ||
1960 tgt_len != sizeof(struct target_timeval)) {
1961 goto unimplemented;
1962 }
1963
1964 /* copy struct timeval to target */
1965 __put_user(tv->tv_sec, &target_tv->tv_sec);
1966 __put_user(tv->tv_usec, &target_tv->tv_usec);
1967 break;
1968 }
1969 case SCM_CREDENTIALS:
1970 {
1971 struct ucred *cred = (struct ucred *)data;
1972 struct target_ucred *target_cred =
1973 (struct target_ucred *)target_data;
1974
1975 __put_user(cred->pid, &target_cred->pid);
1976 __put_user(cred->uid, &target_cred->uid);
1977 __put_user(cred->gid, &target_cred->gid);
1978 break;
1979 }
1980 default:
1981 goto unimplemented;
1982 }
1983 break;
1984
1985 case SOL_IP:
1986 switch (cmsg->cmsg_type) {
1987 case IP_TTL:
1988 {
1989 uint32_t *v = (uint32_t *)data;
1990 uint32_t *t_int = (uint32_t *)target_data;
1991
1992 if (len != sizeof(uint32_t) ||
1993 tgt_len != sizeof(uint32_t)) {
1994 goto unimplemented;
1995 }
1996 __put_user(*v, t_int);
1997 break;
1998 }
1999 case IP_RECVERR:
2000 {
2001 struct errhdr_t {
2002 struct sock_extended_err ee;
2003 struct sockaddr_in offender;
2004 };
2005 struct errhdr_t *errh = (struct errhdr_t *)data;
2006 struct errhdr_t *target_errh =
2007 (struct errhdr_t *)target_data;
2008
2009 if (len != sizeof(struct errhdr_t) ||
2010 tgt_len != sizeof(struct errhdr_t)) {
2011 goto unimplemented;
2012 }
2013 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2014 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2015 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2016 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2017 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2018 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2019 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2020 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2021 (void *) &errh->offender, sizeof(errh->offender));
2022 break;
2023 }
2024 default:
2025 goto unimplemented;
2026 }
2027 break;
2028
2029 case SOL_IPV6:
2030 switch (cmsg->cmsg_type) {
2031 case IPV6_HOPLIMIT:
2032 {
2033 uint32_t *v = (uint32_t *)data;
2034 uint32_t *t_int = (uint32_t *)target_data;
2035
2036 if (len != sizeof(uint32_t) ||
2037 tgt_len != sizeof(uint32_t)) {
2038 goto unimplemented;
2039 }
2040 __put_user(*v, t_int);
2041 break;
2042 }
2043 case IPV6_RECVERR:
2044 {
2045 struct errhdr6_t {
2046 struct sock_extended_err ee;
2047 struct sockaddr_in6 offender;
2048 };
2049 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2050 struct errhdr6_t *target_errh =
2051 (struct errhdr6_t *)target_data;
2052
2053 if (len != sizeof(struct errhdr6_t) ||
2054 tgt_len != sizeof(struct errhdr6_t)) {
2055 goto unimplemented;
2056 }
2057 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2058 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2059 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2060 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2061 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2062 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2063 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2064 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2065 (void *) &errh->offender, sizeof(errh->offender));
2066 break;
2067 }
2068 default:
2069 goto unimplemented;
2070 }
2071 break;
2072
2073 default:
2074 unimplemented:
2075 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2076 cmsg->cmsg_level, cmsg->cmsg_type);
2077 memcpy(target_data, data, MIN(len, tgt_len));
2078 if (tgt_len > len) {
2079 memset(target_data + len, 0, tgt_len - len);
2080 }
2081 }
2082
2083 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2084 tgt_space = TARGET_CMSG_SPACE(tgt_len);
2085 if (msg_controllen < tgt_space) {
2086 tgt_space = msg_controllen;
2087 }
2088 msg_controllen -= tgt_space;
2089 space += tgt_space;
2090 cmsg = CMSG_NXTHDR(msgh, cmsg);
2091 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2092 target_cmsg_start);
2093 }
2094 unlock_user(target_cmsg, target_cmsg_addr, space);
2095 the_end:
2096 target_msgh->msg_controllen = tswapal(space);
2097 return 0;
2098 }
2099
2100 /* do_setsockopt() Must return target values and target errnos. */
2101 static abi_long do_setsockopt(int sockfd, int level, int optname,
2102 abi_ulong optval_addr, socklen_t optlen)
2103 {
2104 abi_long ret;
2105 int val;
2106 struct ip_mreqn *ip_mreq;
2107 struct ip_mreq_source *ip_mreq_source;
2108
2109 switch(level) {
2110 case SOL_TCP:
2111 case SOL_UDP:
2112 /* TCP and UDP options all take an 'int' value. */
2113 if (optlen < sizeof(uint32_t))
2114 return -TARGET_EINVAL;
2115
2116 if (get_user_u32(val, optval_addr))
2117 return -TARGET_EFAULT;
2118 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2119 break;
2120 case SOL_IP:
2121 switch(optname) {
2122 case IP_TOS:
2123 case IP_TTL:
2124 case IP_HDRINCL:
2125 case IP_ROUTER_ALERT:
2126 case IP_RECVOPTS:
2127 case IP_RETOPTS:
2128 case IP_PKTINFO:
2129 case IP_MTU_DISCOVER:
2130 case IP_RECVERR:
2131 case IP_RECVTTL:
2132 case IP_RECVTOS:
2133 #ifdef IP_FREEBIND
2134 case IP_FREEBIND:
2135 #endif
2136 case IP_MULTICAST_TTL:
2137 case IP_MULTICAST_LOOP:
2138 val = 0;
2139 if (optlen >= sizeof(uint32_t)) {
2140 if (get_user_u32(val, optval_addr))
2141 return -TARGET_EFAULT;
2142 } else if (optlen >= 1) {
2143 if (get_user_u8(val, optval_addr))
2144 return -TARGET_EFAULT;
2145 }
2146 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2147 break;
2148 case IP_ADD_MEMBERSHIP:
2149 case IP_DROP_MEMBERSHIP:
2150 if (optlen < sizeof (struct target_ip_mreq) ||
2151 optlen > sizeof (struct target_ip_mreqn))
2152 return -TARGET_EINVAL;
2153
2154 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2155 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2156 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2157 break;
2158
2159 case IP_BLOCK_SOURCE:
2160 case IP_UNBLOCK_SOURCE:
2161 case IP_ADD_SOURCE_MEMBERSHIP:
2162 case IP_DROP_SOURCE_MEMBERSHIP:
2163 if (optlen != sizeof (struct target_ip_mreq_source))
2164 return -TARGET_EINVAL;
2165
2166 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2167 if (!ip_mreq_source) {
2168 return -TARGET_EFAULT;
2169 }
2170 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2171 unlock_user (ip_mreq_source, optval_addr, 0);
2172 break;
2173
2174 default:
2175 goto unimplemented;
2176 }
2177 break;
2178 case SOL_IPV6:
2179 switch (optname) {
2180 case IPV6_MTU_DISCOVER:
2181 case IPV6_MTU:
2182 case IPV6_V6ONLY:
2183 case IPV6_RECVPKTINFO:
2184 case IPV6_UNICAST_HOPS:
2185 case IPV6_MULTICAST_HOPS:
2186 case IPV6_MULTICAST_LOOP:
2187 case IPV6_RECVERR:
2188 case IPV6_RECVHOPLIMIT:
2189 case IPV6_2292HOPLIMIT:
2190 case IPV6_CHECKSUM:
2191 case IPV6_ADDRFORM:
2192 case IPV6_2292PKTINFO:
2193 case IPV6_RECVTCLASS:
2194 case IPV6_RECVRTHDR:
2195 case IPV6_2292RTHDR:
2196 case IPV6_RECVHOPOPTS:
2197 case IPV6_2292HOPOPTS:
2198 case IPV6_RECVDSTOPTS:
2199 case IPV6_2292DSTOPTS:
2200 case IPV6_TCLASS:
2201 case IPV6_ADDR_PREFERENCES:
2202 #ifdef IPV6_RECVPATHMTU
2203 case IPV6_RECVPATHMTU:
2204 #endif
2205 #ifdef IPV6_TRANSPARENT
2206 case IPV6_TRANSPARENT:
2207 #endif
2208 #ifdef IPV6_FREEBIND
2209 case IPV6_FREEBIND:
2210 #endif
2211 #ifdef IPV6_RECVORIGDSTADDR
2212 case IPV6_RECVORIGDSTADDR:
2213 #endif
2214 val = 0;
2215 if (optlen < sizeof(uint32_t)) {
2216 return -TARGET_EINVAL;
2217 }
2218 if (get_user_u32(val, optval_addr)) {
2219 return -TARGET_EFAULT;
2220 }
2221 ret = get_errno(setsockopt(sockfd, level, optname,
2222 &val, sizeof(val)));
2223 break;
2224 case IPV6_PKTINFO:
2225 {
2226 struct in6_pktinfo pki;
2227
2228 if (optlen < sizeof(pki)) {
2229 return -TARGET_EINVAL;
2230 }
2231
2232 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2233 return -TARGET_EFAULT;
2234 }
2235
2236 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2237
2238 ret = get_errno(setsockopt(sockfd, level, optname,
2239 &pki, sizeof(pki)));
2240 break;
2241 }
2242 case IPV6_ADD_MEMBERSHIP:
2243 case IPV6_DROP_MEMBERSHIP:
2244 {
2245 struct ipv6_mreq ipv6mreq;
2246
2247 if (optlen < sizeof(ipv6mreq)) {
2248 return -TARGET_EINVAL;
2249 }
2250
2251 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2252 return -TARGET_EFAULT;
2253 }
2254
2255 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2256
2257 ret = get_errno(setsockopt(sockfd, level, optname,
2258 &ipv6mreq, sizeof(ipv6mreq)));
2259 break;
2260 }
2261 default:
2262 goto unimplemented;
2263 }
2264 break;
2265 case SOL_ICMPV6:
2266 switch (optname) {
2267 case ICMPV6_FILTER:
2268 {
2269 struct icmp6_filter icmp6f;
2270
2271 if (optlen > sizeof(icmp6f)) {
2272 optlen = sizeof(icmp6f);
2273 }
2274
2275 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2276 return -TARGET_EFAULT;
2277 }
2278
2279 for (val = 0; val < 8; val++) {
2280 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2281 }
2282
2283 ret = get_errno(setsockopt(sockfd, level, optname,
2284 &icmp6f, optlen));
2285 break;
2286 }
2287 default:
2288 goto unimplemented;
2289 }
2290 break;
2291 case SOL_RAW:
2292 switch (optname) {
2293 case ICMP_FILTER:
2294 case IPV6_CHECKSUM:
2295 /* those take an u32 value */
2296 if (optlen < sizeof(uint32_t)) {
2297 return -TARGET_EINVAL;
2298 }
2299
2300 if (get_user_u32(val, optval_addr)) {
2301 return -TARGET_EFAULT;
2302 }
2303 ret = get_errno(setsockopt(sockfd, level, optname,
2304 &val, sizeof(val)));
2305 break;
2306
2307 default:
2308 goto unimplemented;
2309 }
2310 break;
2311 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2312 case SOL_ALG:
2313 switch (optname) {
2314 case ALG_SET_KEY:
2315 {
2316 char *alg_key = g_malloc(optlen);
2317
2318 if (!alg_key) {
2319 return -TARGET_ENOMEM;
2320 }
2321 if (copy_from_user(alg_key, optval_addr, optlen)) {
2322 g_free(alg_key);
2323 return -TARGET_EFAULT;
2324 }
2325 ret = get_errno(setsockopt(sockfd, level, optname,
2326 alg_key, optlen));
2327 g_free(alg_key);
2328 break;
2329 }
2330 case ALG_SET_AEAD_AUTHSIZE:
2331 {
2332 ret = get_errno(setsockopt(sockfd, level, optname,
2333 NULL, optlen));
2334 break;
2335 }
2336 default:
2337 goto unimplemented;
2338 }
2339 break;
2340 #endif
2341 case TARGET_SOL_SOCKET:
2342 switch (optname) {
2343 case TARGET_SO_RCVTIMEO:
2344 {
2345 struct timeval tv;
2346
2347 optname = SO_RCVTIMEO;
2348
2349 set_timeout:
2350 if (optlen != sizeof(struct target_timeval)) {
2351 return -TARGET_EINVAL;
2352 }
2353
2354 if (copy_from_user_timeval(&tv, optval_addr)) {
2355 return -TARGET_EFAULT;
2356 }
2357
2358 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2359 &tv, sizeof(tv)));
2360 return ret;
2361 }
2362 case TARGET_SO_SNDTIMEO:
2363 optname = SO_SNDTIMEO;
2364 goto set_timeout;
2365 case TARGET_SO_ATTACH_FILTER:
2366 {
2367 struct target_sock_fprog *tfprog;
2368 struct target_sock_filter *tfilter;
2369 struct sock_fprog fprog;
2370 struct sock_filter *filter;
2371 int i;
2372
2373 if (optlen != sizeof(*tfprog)) {
2374 return -TARGET_EINVAL;
2375 }
2376 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2377 return -TARGET_EFAULT;
2378 }
2379 if (!lock_user_struct(VERIFY_READ, tfilter,
2380 tswapal(tfprog->filter), 0)) {
2381 unlock_user_struct(tfprog, optval_addr, 1);
2382 return -TARGET_EFAULT;
2383 }
2384
2385 fprog.len = tswap16(tfprog->len);
2386 filter = g_try_new(struct sock_filter, fprog.len);
2387 if (filter == NULL) {
2388 unlock_user_struct(tfilter, tfprog->filter, 1);
2389 unlock_user_struct(tfprog, optval_addr, 1);
2390 return -TARGET_ENOMEM;
2391 }
2392 for (i = 0; i < fprog.len; i++) {
2393 filter[i].code = tswap16(tfilter[i].code);
2394 filter[i].jt = tfilter[i].jt;
2395 filter[i].jf = tfilter[i].jf;
2396 filter[i].k = tswap32(tfilter[i].k);
2397 }
2398 fprog.filter = filter;
2399
2400 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2401 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2402 g_free(filter);
2403
2404 unlock_user_struct(tfilter, tfprog->filter, 1);
2405 unlock_user_struct(tfprog, optval_addr, 1);
2406 return ret;
2407 }
2408 case TARGET_SO_BINDTODEVICE:
2409 {
2410 char *dev_ifname, *addr_ifname;
2411
2412 if (optlen > IFNAMSIZ - 1) {
2413 optlen = IFNAMSIZ - 1;
2414 }
2415 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2416 if (!dev_ifname) {
2417 return -TARGET_EFAULT;
2418 }
2419 optname = SO_BINDTODEVICE;
2420 addr_ifname = alloca(IFNAMSIZ);
2421 memcpy(addr_ifname, dev_ifname, optlen);
2422 addr_ifname[optlen] = 0;
2423 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2424 addr_ifname, optlen));
2425 unlock_user (dev_ifname, optval_addr, 0);
2426 return ret;
2427 }
2428 case TARGET_SO_LINGER:
2429 {
2430 struct linger lg;
2431 struct target_linger *tlg;
2432
2433 if (optlen != sizeof(struct target_linger)) {
2434 return -TARGET_EINVAL;
2435 }
2436 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2437 return -TARGET_EFAULT;
2438 }
2439 __get_user(lg.l_onoff, &tlg->l_onoff);
2440 __get_user(lg.l_linger, &tlg->l_linger);
2441 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2442 &lg, sizeof(lg)));
2443 unlock_user_struct(tlg, optval_addr, 0);
2444 return ret;
2445 }
2446 /* Options with 'int' argument. */
2447 case TARGET_SO_DEBUG:
2448 optname = SO_DEBUG;
2449 break;
2450 case TARGET_SO_REUSEADDR:
2451 optname = SO_REUSEADDR;
2452 break;
2453 #ifdef SO_REUSEPORT
2454 case TARGET_SO_REUSEPORT:
2455 optname = SO_REUSEPORT;
2456 break;
2457 #endif
2458 case TARGET_SO_TYPE:
2459 optname = SO_TYPE;
2460 break;
2461 case TARGET_SO_ERROR:
2462 optname = SO_ERROR;
2463 break;
2464 case TARGET_SO_DONTROUTE:
2465 optname = SO_DONTROUTE;
2466 break;
2467 case TARGET_SO_BROADCAST:
2468 optname = SO_BROADCAST;
2469 break;
2470 case TARGET_SO_SNDBUF:
2471 optname = SO_SNDBUF;
2472 break;
2473 case TARGET_SO_SNDBUFFORCE:
2474 optname = SO_SNDBUFFORCE;
2475 break;
2476 case TARGET_SO_RCVBUF:
2477 optname = SO_RCVBUF;
2478 break;
2479 case TARGET_SO_RCVBUFFORCE:
2480 optname = SO_RCVBUFFORCE;
2481 break;
2482 case TARGET_SO_KEEPALIVE:
2483 optname = SO_KEEPALIVE;
2484 break;
2485 case TARGET_SO_OOBINLINE:
2486 optname = SO_OOBINLINE;
2487 break;
2488 case TARGET_SO_NO_CHECK:
2489 optname = SO_NO_CHECK;
2490 break;
2491 case TARGET_SO_PRIORITY:
2492 optname = SO_PRIORITY;
2493 break;
2494 #ifdef SO_BSDCOMPAT
2495 case TARGET_SO_BSDCOMPAT:
2496 optname = SO_BSDCOMPAT;
2497 break;
2498 #endif
2499 case TARGET_SO_PASSCRED:
2500 optname = SO_PASSCRED;
2501 break;
2502 case TARGET_SO_PASSSEC:
2503 optname = SO_PASSSEC;
2504 break;
2505 case TARGET_SO_TIMESTAMP:
2506 optname = SO_TIMESTAMP;
2507 break;
2508 case TARGET_SO_RCVLOWAT:
2509 optname = SO_RCVLOWAT;
2510 break;
2511 default:
2512 goto unimplemented;
2513 }
2514 if (optlen < sizeof(uint32_t))
2515 return -TARGET_EINVAL;
2516
2517 if (get_user_u32(val, optval_addr))
2518 return -TARGET_EFAULT;
2519 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2520 break;
2521 #ifdef SOL_NETLINK
2522 case SOL_NETLINK:
2523 switch (optname) {
2524 case NETLINK_PKTINFO:
2525 case NETLINK_ADD_MEMBERSHIP:
2526 case NETLINK_DROP_MEMBERSHIP:
2527 case NETLINK_BROADCAST_ERROR:
2528 case NETLINK_NO_ENOBUFS:
2529 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2530 case NETLINK_LISTEN_ALL_NSID:
2531 case NETLINK_CAP_ACK:
2532 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2533 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2534 case NETLINK_EXT_ACK:
2535 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2536 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2537 case NETLINK_GET_STRICT_CHK:
2538 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2539 break;
2540 default:
2541 goto unimplemented;
2542 }
2543 val = 0;
2544 if (optlen < sizeof(uint32_t)) {
2545 return -TARGET_EINVAL;
2546 }
2547 if (get_user_u32(val, optval_addr)) {
2548 return -TARGET_EFAULT;
2549 }
2550 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2551 sizeof(val)));
2552 break;
2553 #endif /* SOL_NETLINK */
2554 default:
2555 unimplemented:
2556 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2557 level, optname);
2558 ret = -TARGET_ENOPROTOOPT;
2559 }
2560 return ret;
2561 }
2562
2563 /* do_getsockopt() Must return target values and target errnos. */
2564 static abi_long do_getsockopt(int sockfd, int level, int optname,
2565 abi_ulong optval_addr, abi_ulong optlen)
2566 {
2567 abi_long ret;
2568 int len, val;
2569 socklen_t lv;
2570
2571 switch(level) {
2572 case TARGET_SOL_SOCKET:
2573 level = SOL_SOCKET;
2574 switch (optname) {
2575 /* These don't just return a single integer */
2576 case TARGET_SO_PEERNAME:
2577 goto unimplemented;
2578 case TARGET_SO_RCVTIMEO: {
2579 struct timeval tv;
2580 socklen_t tvlen;
2581
2582 optname = SO_RCVTIMEO;
2583
2584 get_timeout:
2585 if (get_user_u32(len, optlen)) {
2586 return -TARGET_EFAULT;
2587 }
2588 if (len < 0) {
2589 return -TARGET_EINVAL;
2590 }
2591
2592 tvlen = sizeof(tv);
2593 ret = get_errno(getsockopt(sockfd, level, optname,
2594 &tv, &tvlen));
2595 if (ret < 0) {
2596 return ret;
2597 }
2598 if (len > sizeof(struct target_timeval)) {
2599 len = sizeof(struct target_timeval);
2600 }
2601 if (copy_to_user_timeval(optval_addr, &tv)) {
2602 return -TARGET_EFAULT;
2603 }
2604 if (put_user_u32(len, optlen)) {
2605 return -TARGET_EFAULT;
2606 }
2607 break;
2608 }
2609 case TARGET_SO_SNDTIMEO:
2610 optname = SO_SNDTIMEO;
2611 goto get_timeout;
2612 case TARGET_SO_PEERCRED: {
2613 struct ucred cr;
2614 socklen_t crlen;
2615 struct target_ucred *tcr;
2616
2617 if (get_user_u32(len, optlen)) {
2618 return -TARGET_EFAULT;
2619 }
2620 if (len < 0) {
2621 return -TARGET_EINVAL;
2622 }
2623
2624 crlen = sizeof(cr);
2625 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2626 &cr, &crlen));
2627 if (ret < 0) {
2628 return ret;
2629 }
2630 if (len > crlen) {
2631 len = crlen;
2632 }
2633 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2634 return -TARGET_EFAULT;
2635 }
2636 __put_user(cr.pid, &tcr->pid);
2637 __put_user(cr.uid, &tcr->uid);
2638 __put_user(cr.gid, &tcr->gid);
2639 unlock_user_struct(tcr, optval_addr, 1);
2640 if (put_user_u32(len, optlen)) {
2641 return -TARGET_EFAULT;
2642 }
2643 break;
2644 }
2645 case TARGET_SO_PEERSEC: {
2646 char *name;
2647
2648 if (get_user_u32(len, optlen)) {
2649 return -TARGET_EFAULT;
2650 }
2651 if (len < 0) {
2652 return -TARGET_EINVAL;
2653 }
2654 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2655 if (!name) {
2656 return -TARGET_EFAULT;
2657 }
2658 lv = len;
2659 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2660 name, &lv));
2661 if (put_user_u32(lv, optlen)) {
2662 ret = -TARGET_EFAULT;
2663 }
2664 unlock_user(name, optval_addr, lv);
2665 break;
2666 }
2667 case TARGET_SO_LINGER:
2668 {
2669 struct linger lg;
2670 socklen_t lglen;
2671 struct target_linger *tlg;
2672
2673 if (get_user_u32(len, optlen)) {
2674 return -TARGET_EFAULT;
2675 }
2676 if (len < 0) {
2677 return -TARGET_EINVAL;
2678 }
2679
2680 lglen = sizeof(lg);
2681 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2682 &lg, &lglen));
2683 if (ret < 0) {
2684 return ret;
2685 }
2686 if (len > lglen) {
2687 len = lglen;
2688 }
2689 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2690 return -TARGET_EFAULT;
2691 }
2692 __put_user(lg.l_onoff, &tlg->l_onoff);
2693 __put_user(lg.l_linger, &tlg->l_linger);
2694 unlock_user_struct(tlg, optval_addr, 1);
2695 if (put_user_u32(len, optlen)) {
2696 return -TARGET_EFAULT;
2697 }
2698 break;
2699 }
2700 /* Options with 'int' argument. */
2701 case TARGET_SO_DEBUG:
2702 optname = SO_DEBUG;
2703 goto int_case;
2704 case TARGET_SO_REUSEADDR:
2705 optname = SO_REUSEADDR;
2706 goto int_case;
2707 #ifdef SO_REUSEPORT
2708 case TARGET_SO_REUSEPORT:
2709 optname = SO_REUSEPORT;
2710 goto int_case;
2711 #endif
2712 case TARGET_SO_TYPE:
2713 optname = SO_TYPE;
2714 goto int_case;
2715 case TARGET_SO_ERROR:
2716 optname = SO_ERROR;
2717 goto int_case;
2718 case TARGET_SO_DONTROUTE:
2719 optname = SO_DONTROUTE;
2720 goto int_case;
2721 case TARGET_SO_BROADCAST:
2722 optname = SO_BROADCAST;
2723 goto int_case;
2724 case TARGET_SO_SNDBUF:
2725 optname = SO_SNDBUF;
2726 goto int_case;
2727 case TARGET_SO_RCVBUF:
2728 optname = SO_RCVBUF;
2729 goto int_case;
2730 case TARGET_SO_KEEPALIVE:
2731 optname = SO_KEEPALIVE;
2732 goto int_case;
2733 case TARGET_SO_OOBINLINE:
2734 optname = SO_OOBINLINE;
2735 goto int_case;
2736 case TARGET_SO_NO_CHECK:
2737 optname = SO_NO_CHECK;
2738 goto int_case;
2739 case TARGET_SO_PRIORITY:
2740 optname = SO_PRIORITY;
2741 goto int_case;
2742 #ifdef SO_BSDCOMPAT
2743 case TARGET_SO_BSDCOMPAT:
2744 optname = SO_BSDCOMPAT;
2745 goto int_case;
2746 #endif
2747 case TARGET_SO_PASSCRED:
2748 optname = SO_PASSCRED;
2749 goto int_case;
2750 case TARGET_SO_TIMESTAMP:
2751 optname = SO_TIMESTAMP;
2752 goto int_case;
2753 case TARGET_SO_RCVLOWAT:
2754 optname = SO_RCVLOWAT;
2755 goto int_case;
2756 case TARGET_SO_ACCEPTCONN:
2757 optname = SO_ACCEPTCONN;
2758 goto int_case;
2759 case TARGET_SO_PROTOCOL:
2760 optname = SO_PROTOCOL;
2761 goto int_case;
2762 case TARGET_SO_DOMAIN:
2763 optname = SO_DOMAIN;
2764 goto int_case;
2765 default:
2766 goto int_case;
2767 }
2768 break;
2769 case SOL_TCP:
2770 case SOL_UDP:
2771 /* TCP and UDP options all take an 'int' value. */
2772 int_case:
2773 if (get_user_u32(len, optlen))
2774 return -TARGET_EFAULT;
2775 if (len < 0)
2776 return -TARGET_EINVAL;
2777 lv = sizeof(lv);
2778 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2779 if (ret < 0)
2780 return ret;
2781 switch (optname) {
2782 case SO_TYPE:
2783 val = host_to_target_sock_type(val);
2784 break;
2785 case SO_ERROR:
2786 val = host_to_target_errno(val);
2787 break;
2788 }
2789 if (len > lv)
2790 len = lv;
2791 if (len == 4) {
2792 if (put_user_u32(val, optval_addr))
2793 return -TARGET_EFAULT;
2794 } else {
2795 if (put_user_u8(val, optval_addr))
2796 return -TARGET_EFAULT;
2797 }
2798 if (put_user_u32(len, optlen))
2799 return -TARGET_EFAULT;
2800 break;
2801 case SOL_IP:
2802 switch(optname) {
2803 case IP_TOS:
2804 case IP_TTL:
2805 case IP_HDRINCL:
2806 case IP_ROUTER_ALERT:
2807 case IP_RECVOPTS:
2808 case IP_RETOPTS:
2809 case IP_PKTINFO:
2810 case IP_MTU_DISCOVER:
2811 case IP_RECVERR:
2812 case IP_RECVTOS:
2813 #ifdef IP_FREEBIND
2814 case IP_FREEBIND:
2815 #endif
2816 case IP_MULTICAST_TTL:
2817 case IP_MULTICAST_LOOP:
2818 if (get_user_u32(len, optlen))
2819 return -TARGET_EFAULT;
2820 if (len < 0)
2821 return -TARGET_EINVAL;
2822 lv = sizeof(lv);
2823 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2824 if (ret < 0)
2825 return ret;
2826 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2827 len = 1;
2828 if (put_user_u32(len, optlen)
2829 || put_user_u8(val, optval_addr))
2830 return -TARGET_EFAULT;
2831 } else {
2832 if (len > sizeof(int))
2833 len = sizeof(int);
2834 if (put_user_u32(len, optlen)
2835 || put_user_u32(val, optval_addr))
2836 return -TARGET_EFAULT;
2837 }
2838 break;
2839 default:
2840 ret = -TARGET_ENOPROTOOPT;
2841 break;
2842 }
2843 break;
2844 case SOL_IPV6:
2845 switch (optname) {
2846 case IPV6_MTU_DISCOVER:
2847 case IPV6_MTU:
2848 case IPV6_V6ONLY:
2849 case IPV6_RECVPKTINFO:
2850 case IPV6_UNICAST_HOPS:
2851 case IPV6_MULTICAST_HOPS:
2852 case IPV6_MULTICAST_LOOP:
2853 case IPV6_RECVERR:
2854 case IPV6_RECVHOPLIMIT:
2855 case IPV6_2292HOPLIMIT:
2856 case IPV6_CHECKSUM:
2857 case IPV6_ADDRFORM:
2858 case IPV6_2292PKTINFO:
2859 case IPV6_RECVTCLASS:
2860 case IPV6_RECVRTHDR:
2861 case IPV6_2292RTHDR:
2862 case IPV6_RECVHOPOPTS:
2863 case IPV6_2292HOPOPTS:
2864 case IPV6_RECVDSTOPTS:
2865 case IPV6_2292DSTOPTS:
2866 case IPV6_TCLASS:
2867 case IPV6_ADDR_PREFERENCES:
2868 #ifdef IPV6_RECVPATHMTU
2869 case IPV6_RECVPATHMTU:
2870 #endif
2871 #ifdef IPV6_TRANSPARENT
2872 case IPV6_TRANSPARENT:
2873 #endif
2874 #ifdef IPV6_FREEBIND
2875 case IPV6_FREEBIND:
2876 #endif
2877 #ifdef IPV6_RECVORIGDSTADDR
2878 case IPV6_RECVORIGDSTADDR:
2879 #endif
2880 if (get_user_u32(len, optlen))
2881 return -TARGET_EFAULT;
2882 if (len < 0)
2883 return -TARGET_EINVAL;
2884 lv = sizeof(lv);
2885 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2886 if (ret < 0)
2887 return ret;
2888 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2889 len = 1;
2890 if (put_user_u32(len, optlen)
2891 || put_user_u8(val, optval_addr))
2892 return -TARGET_EFAULT;
2893 } else {
2894 if (len > sizeof(int))
2895 len = sizeof(int);
2896 if (put_user_u32(len, optlen)
2897 || put_user_u32(val, optval_addr))
2898 return -TARGET_EFAULT;
2899 }
2900 break;
2901 default:
2902 ret = -TARGET_ENOPROTOOPT;
2903 break;
2904 }
2905 break;
2906 #ifdef SOL_NETLINK
2907 case SOL_NETLINK:
2908 switch (optname) {
2909 case NETLINK_PKTINFO:
2910 case NETLINK_BROADCAST_ERROR:
2911 case NETLINK_NO_ENOBUFS:
2912 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2913 case NETLINK_LISTEN_ALL_NSID:
2914 case NETLINK_CAP_ACK:
2915 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2916 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2917 case NETLINK_EXT_ACK:
2918 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2919 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2920 case NETLINK_GET_STRICT_CHK:
2921 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2922 if (get_user_u32(len, optlen)) {
2923 return -TARGET_EFAULT;
2924 }
2925 if (len != sizeof(val)) {
2926 return -TARGET_EINVAL;
2927 }
2928 lv = len;
2929 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2930 if (ret < 0) {
2931 return ret;
2932 }
2933 if (put_user_u32(lv, optlen)
2934 || put_user_u32(val, optval_addr)) {
2935 return -TARGET_EFAULT;
2936 }
2937 break;
2938 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2939 case NETLINK_LIST_MEMBERSHIPS:
2940 {
2941 uint32_t *results;
2942 int i;
2943 if (get_user_u32(len, optlen)) {
2944 return -TARGET_EFAULT;
2945 }
2946 if (len < 0) {
2947 return -TARGET_EINVAL;
2948 }
2949 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2950 if (!results && len > 0) {
2951 return -TARGET_EFAULT;
2952 }
2953 lv = len;
2954 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2955 if (ret < 0) {
2956 unlock_user(results, optval_addr, 0);
2957 return ret;
2958 }
2959 /* swap host endianess to target endianess. */
2960 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2961 results[i] = tswap32(results[i]);
2962 }
2963 if (put_user_u32(lv, optlen)) {
2964 return -TARGET_EFAULT;
2965 }
2966 unlock_user(results, optval_addr, 0);
2967 break;
2968 }
2969 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2970 default:
2971 goto unimplemented;
2972 }
2973 break;
2974 #endif /* SOL_NETLINK */
2975 default:
2976 unimplemented:
2977 qemu_log_mask(LOG_UNIMP,
2978 "getsockopt level=%d optname=%d not yet supported\n",
2979 level, optname);
2980 ret = -TARGET_EOPNOTSUPP;
2981 break;
2982 }
2983 return ret;
2984 }
2985
2986 /* Convert target low/high pair representing file offset into the host
2987 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2988 * as the kernel doesn't handle them either.
2989 */
2990 static void target_to_host_low_high(abi_ulong tlow,
2991 abi_ulong thigh,
2992 unsigned long *hlow,
2993 unsigned long *hhigh)
2994 {
2995 uint64_t off = tlow |
2996 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2997 TARGET_LONG_BITS / 2;
2998
2999 *hlow = off;
3000 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3001 }
3002
3003 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3004 abi_ulong count, int copy)
3005 {
3006 struct target_iovec *target_vec;
3007 struct iovec *vec;
3008 abi_ulong total_len, max_len;
3009 int i;
3010 int err = 0;
3011 bool bad_address = false;
3012
3013 if (count == 0) {
3014 errno = 0;
3015 return NULL;
3016 }
3017 if (count > IOV_MAX) {
3018 errno = EINVAL;
3019 return NULL;
3020 }
3021
3022 vec = g_try_new0(struct iovec, count);
3023 if (vec == NULL) {
3024 errno = ENOMEM;
3025 return NULL;
3026 }
3027
3028 target_vec = lock_user(VERIFY_READ, target_addr,
3029 count * sizeof(struct target_iovec), 1);
3030 if (target_vec == NULL) {
3031 err = EFAULT;
3032 goto fail2;
3033 }
3034
3035 /* ??? If host page size > target page size, this will result in a
3036 value larger than what we can actually support. */
3037 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3038 total_len = 0;
3039
3040 for (i = 0; i < count; i++) {
3041 abi_ulong base = tswapal(target_vec[i].iov_base);
3042 abi_long len = tswapal(target_vec[i].iov_len);
3043
3044 if (len < 0) {
3045 err = EINVAL;
3046 goto fail;
3047 } else if (len == 0) {
3048 /* Zero length pointer is ignored. */
3049 vec[i].iov_base = 0;
3050 } else {
3051 vec[i].iov_base = lock_user(type, base, len, copy);
3052 /* If the first buffer pointer is bad, this is a fault. But
3053 * subsequent bad buffers will result in a partial write; this
3054 * is realized by filling the vector with null pointers and
3055 * zero lengths. */
3056 if (!vec[i].iov_base) {
3057 if (i == 0) {
3058 err = EFAULT;
3059 goto fail;
3060 } else {
3061 bad_address = true;
3062 }
3063 }
3064 if (bad_address) {
3065 len = 0;
3066 }
3067 if (len > max_len - total_len) {
3068 len = max_len - total_len;
3069 }
3070 }
3071 vec[i].iov_len = len;
3072 total_len += len;
3073 }
3074
3075 unlock_user(target_vec, target_addr, 0);
3076 return vec;
3077
3078 fail:
3079 while (--i >= 0) {
3080 if (tswapal(target_vec[i].iov_len) > 0) {
3081 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3082 }
3083 }
3084 unlock_user(target_vec, target_addr, 0);
3085 fail2:
3086 g_free(vec);
3087 errno = err;
3088 return NULL;
3089 }
3090
3091 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3092 abi_ulong count, int copy)
3093 {
3094 struct target_iovec *target_vec;
3095 int i;
3096
3097 target_vec = lock_user(VERIFY_READ, target_addr,
3098 count * sizeof(struct target_iovec), 1);
3099 if (target_vec) {
3100 for (i = 0; i < count; i++) {
3101 abi_ulong base = tswapal(target_vec[i].iov_base);
3102 abi_long len = tswapal(target_vec[i].iov_len);
3103 if (len < 0) {
3104 break;
3105 }
3106 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3107 }
3108 unlock_user(target_vec, target_addr, 0);
3109 }
3110
3111 g_free(vec);
3112 }
3113
3114 static inline int target_to_host_sock_type(int *type)
3115 {
3116 int host_type = 0;
3117 int target_type = *type;
3118
3119 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3120 case TARGET_SOCK_DGRAM:
3121 host_type = SOCK_DGRAM;
3122 break;
3123 case TARGET_SOCK_STREAM:
3124 host_type = SOCK_STREAM;
3125 break;
3126 default:
3127 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3128 break;
3129 }
3130 if (target_type & TARGET_SOCK_CLOEXEC) {
3131 #if defined(SOCK_CLOEXEC)
3132 host_type |= SOCK_CLOEXEC;
3133 #else
3134 return -TARGET_EINVAL;
3135 #endif
3136 }
3137 if (target_type & TARGET_SOCK_NONBLOCK) {
3138 #if defined(SOCK_NONBLOCK)
3139 host_type |= SOCK_NONBLOCK;
3140 #elif !defined(O_NONBLOCK)
3141 return -TARGET_EINVAL;
3142 #endif
3143 }
3144 *type = host_type;
3145 return 0;
3146 }
3147
3148 /* Try to emulate socket type flags after socket creation. */
3149 static int sock_flags_fixup(int fd, int target_type)
3150 {
3151 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3152 if (target_type & TARGET_SOCK_NONBLOCK) {
3153 int flags = fcntl(fd, F_GETFL);
3154 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3155 close(fd);
3156 return -TARGET_EINVAL;
3157 }
3158 }
3159 #endif
3160 return fd;
3161 }
3162
3163 /* do_socket() Must return target values and target errnos. */
3164 static abi_long do_socket(int domain, int type, int protocol)
3165 {
3166 int target_type = type;
3167 int ret;
3168
3169 ret = target_to_host_sock_type(&type);
3170 if (ret) {
3171 return ret;
3172 }
3173
3174 if (domain == PF_NETLINK && !(
3175 #ifdef CONFIG_RTNETLINK
3176 protocol == NETLINK_ROUTE ||
3177 #endif
3178 protocol == NETLINK_KOBJECT_UEVENT ||
3179 protocol == NETLINK_AUDIT)) {
3180 return -TARGET_EPROTONOSUPPORT;
3181 }
3182
3183 if (domain == AF_PACKET ||
3184 (domain == AF_INET && type == SOCK_PACKET)) {
3185 protocol = tswap16(protocol);
3186 }
3187
3188 ret = get_errno(socket(domain, type, protocol));
3189 if (ret >= 0) {
3190 ret = sock_flags_fixup(ret, target_type);
3191 if (type == SOCK_PACKET) {
3192 /* Manage an obsolete case :
3193 * if socket type is SOCK_PACKET, bind by name
3194 */
3195 fd_trans_register(ret, &target_packet_trans);
3196 } else if (domain == PF_NETLINK) {
3197 switch (protocol) {
3198 #ifdef CONFIG_RTNETLINK
3199 case NETLINK_ROUTE:
3200 fd_trans_register(ret, &target_netlink_route_trans);
3201 break;
3202 #endif
3203 case NETLINK_KOBJECT_UEVENT:
3204 /* nothing to do: messages are strings */
3205 break;
3206 case NETLINK_AUDIT:
3207 fd_trans_register(ret, &target_netlink_audit_trans);
3208 break;
3209 default:
3210 g_assert_not_reached();
3211 }
3212 }
3213 }
3214 return ret;
3215 }
3216
3217 /* do_bind() Must return target values and target errnos. */
3218 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3219 socklen_t addrlen)
3220 {
3221 void *addr;
3222 abi_long ret;
3223
3224 if ((int)addrlen < 0) {
3225 return -TARGET_EINVAL;
3226 }
3227
3228 addr = alloca(addrlen+1);
3229
3230 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3231 if (ret)
3232 return ret;
3233
3234 return get_errno(bind(sockfd, addr, addrlen));
3235 }
3236
3237 /* do_connect() Must return target values and target errnos. */
3238 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3239 socklen_t addrlen)
3240 {
3241 void *addr;
3242 abi_long ret;
3243
3244 if ((int)addrlen < 0) {
3245 return -TARGET_EINVAL;
3246 }
3247
3248 addr = alloca(addrlen+1);
3249
3250 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3251 if (ret)
3252 return ret;
3253
3254 return get_errno(safe_connect(sockfd, addr, addrlen));
3255 }
3256
3257 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3258 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3259 int flags, int send)
3260 {
3261 abi_long ret, len;
3262 struct msghdr msg;
3263 abi_ulong count;
3264 struct iovec *vec;
3265 abi_ulong target_vec;
3266
3267 if (msgp->msg_name) {
3268 msg.msg_namelen = tswap32(msgp->msg_namelen);
3269 msg.msg_name = alloca(msg.msg_namelen+1);
3270 ret = target_to_host_sockaddr(fd, msg.msg_name,
3271 tswapal(msgp->msg_name),
3272 msg.msg_namelen);
3273 if (ret == -TARGET_EFAULT) {
3274 /* For connected sockets msg_name and msg_namelen must
3275 * be ignored, so returning EFAULT immediately is wrong.
3276 * Instead, pass a bad msg_name to the host kernel, and
3277 * let it decide whether to return EFAULT or not.
3278 */
3279 msg.msg_name = (void *)-1;
3280 } else if (ret) {
3281 goto out2;
3282 }
3283 } else {
3284 msg.msg_name = NULL;
3285 msg.msg_namelen = 0;
3286 }
3287 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3288 msg.msg_control = alloca(msg.msg_controllen);
3289 memset(msg.msg_control, 0, msg.msg_controllen);
3290
3291 msg.msg_flags = tswap32(msgp->msg_flags);
3292
3293 count = tswapal(msgp->msg_iovlen);
3294 target_vec = tswapal(msgp->msg_iov);
3295
3296 if (count > IOV_MAX) {
3297 /* sendrcvmsg returns a different errno for this condition than
3298 * readv/writev, so we must catch it here before lock_iovec() does.
3299 */
3300 ret = -TARGET_EMSGSIZE;
3301 goto out2;
3302 }
3303
3304 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3305 target_vec, count, send);
3306 if (vec == NULL) {
3307 ret = -host_to_target_errno(errno);
3308 /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3309 if (!send || ret) {
3310 goto out2;
3311 }
3312 }
3313 msg.msg_iovlen = count;
3314 msg.msg_iov = vec;
3315
3316 if (send) {
3317 if (fd_trans_target_to_host_data(fd)) {
3318 void *host_msg;
3319
3320 host_msg = g_malloc(msg.msg_iov->iov_len);
3321 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3322 ret = fd_trans_target_to_host_data(fd)(host_msg,
3323 msg.msg_iov->iov_len);
3324 if (ret >= 0) {
3325 msg.msg_iov->iov_base = host_msg;
3326 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3327 }
3328 g_free(host_msg);
3329 } else {
3330 ret = target_to_host_cmsg(&msg, msgp);
3331 if (ret == 0) {
3332 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3333 }
3334 }
3335 } else {
3336 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3337 if (!is_error(ret)) {
3338 len = ret;
3339 if (fd_trans_host_to_target_data(fd)) {
3340 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3341 MIN(msg.msg_iov->iov_len, len));
3342 }
3343 if (!is_error(ret)) {
3344 ret = host_to_target_cmsg(msgp, &msg);
3345 }
3346 if (!is_error(ret)) {
3347 msgp->msg_namelen = tswap32(msg.msg_namelen);
3348 msgp->msg_flags = tswap32(msg.msg_flags);
3349 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3350 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3351 msg.msg_name, msg.msg_namelen);
3352 if (ret) {
3353 goto out;
3354 }
3355 }
3356
3357 ret = len;
3358 }
3359 }
3360 }
3361
3362 out:
3363 if (vec) {
3364 unlock_iovec(vec, target_vec, count, !send);
3365 }
3366 out2:
3367 return ret;
3368 }
3369
3370 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3371 int flags, int send)
3372 {
3373 abi_long ret;
3374 struct target_msghdr *msgp;
3375
3376 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3377 msgp,
3378 target_msg,
3379 send ? 1 : 0)) {
3380 return -TARGET_EFAULT;
3381 }
3382 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3383 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3384 return ret;
3385 }
3386
3387 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3388 * so it might not have this *mmsg-specific flag either.
3389 */
3390 #ifndef MSG_WAITFORONE
3391 #define MSG_WAITFORONE 0x10000
3392 #endif
3393
3394 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3395 unsigned int vlen, unsigned int flags,
3396 int send)
3397 {
3398 struct target_mmsghdr *mmsgp;
3399 abi_long ret = 0;
3400 int i;
3401
3402 if (vlen > UIO_MAXIOV) {
3403 vlen = UIO_MAXIOV;
3404 }
3405
3406 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3407 if (!mmsgp) {
3408 return -TARGET_EFAULT;
3409 }
3410
3411 for (i = 0; i < vlen; i++) {
3412 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3413 if (is_error(ret)) {
3414 break;
3415 }
3416 mmsgp[i].msg_len = tswap32(ret);
3417 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3418 if (flags & MSG_WAITFORONE) {
3419 flags |= MSG_DONTWAIT;
3420 }
3421 }
3422
3423 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3424
3425 /* Return number of datagrams sent if we sent any at all;
3426 * otherwise return the error.
3427 */
3428 if (i) {
3429 return i;
3430 }
3431 return ret;
3432 }
3433
3434 /* do_accept4() Must return target values and target errnos. */
3435 static abi_long do_accept4(int fd, abi_ulong target_addr,
3436 abi_ulong target_addrlen_addr, int flags)
3437 {
3438 socklen_t addrlen, ret_addrlen;
3439 void *addr;
3440 abi_long ret;
3441 int host_flags;
3442
3443 if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3444 return -TARGET_EINVAL;
3445 }
3446
3447 host_flags = 0;
3448 if (flags & TARGET_SOCK_NONBLOCK) {
3449 host_flags |= SOCK_NONBLOCK;
3450 }
3451 if (flags & TARGET_SOCK_CLOEXEC) {
3452 host_flags |= SOCK_CLOEXEC;
3453 }
3454
3455 if (target_addr == 0) {
3456 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3457 }
3458
3459 /* linux returns EFAULT if addrlen pointer is invalid */
3460 if (get_user_u32(addrlen, target_addrlen_addr))
3461 return -TARGET_EFAULT;
3462
3463 if ((int)addrlen < 0) {
3464 return -TARGET_EINVAL;
3465 }
3466
3467 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3468 return -TARGET_EFAULT;
3469 }
3470
3471 addr = alloca(addrlen);
3472
3473 ret_addrlen = addrlen;
3474 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3475 if (!is_error(ret)) {
3476 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3477 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3478 ret = -TARGET_EFAULT;
3479 }
3480 }
3481 return ret;
3482 }
3483
3484 /* do_getpeername() Must return target values and target errnos. */
3485 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3486 abi_ulong target_addrlen_addr)
3487 {
3488 socklen_t addrlen, ret_addrlen;
3489 void *addr;
3490 abi_long ret;
3491
3492 if (get_user_u32(addrlen, target_addrlen_addr))
3493 return -TARGET_EFAULT;
3494
3495 if ((int)addrlen < 0) {
3496 return -TARGET_EINVAL;
3497 }
3498
3499 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3500 return -TARGET_EFAULT;
3501 }
3502
3503 addr = alloca(addrlen);
3504
3505 ret_addrlen = addrlen;
3506 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3507 if (!is_error(ret)) {
3508 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3509 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3510 ret = -TARGET_EFAULT;
3511 }
3512 }
3513 return ret;
3514 }
3515
3516 /* do_getsockname() Must return target values and target errnos. */
3517 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3518 abi_ulong target_addrlen_addr)
3519 {
3520 socklen_t addrlen, ret_addrlen;
3521 void *addr;
3522 abi_long ret;
3523
3524 if (get_user_u32(addrlen, target_addrlen_addr))
3525 return -TARGET_EFAULT;
3526
3527 if ((int)addrlen < 0) {
3528 return -TARGET_EINVAL;
3529 }
3530
3531 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3532 return -TARGET_EFAULT;
3533 }
3534
3535 addr = alloca(addrlen);
3536
3537 ret_addrlen = addrlen;
3538 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3539 if (!is_error(ret)) {
3540 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3541 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3542 ret = -TARGET_EFAULT;
3543 }
3544 }
3545 return ret;
3546 }
3547
3548 /* do_socketpair() Must return target values and target errnos. */
3549 static abi_long do_socketpair(int domain, int type, int protocol,
3550 abi_ulong target_tab_addr)
3551 {
3552 int tab[2];
3553 abi_long ret;
3554
3555 target_to_host_sock_type(&type);
3556
3557 ret = get_errno(socketpair(domain, type, protocol, tab));
3558 if (!is_error(ret)) {
3559 if (put_user_s32(tab[0], target_tab_addr)
3560 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3561 ret = -TARGET_EFAULT;
3562 }
3563 return ret;
3564 }
3565
3566 /* do_sendto() Must return target values and target errnos. */
3567 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3568 abi_ulong target_addr, socklen_t addrlen)
3569 {
3570 void *addr;
3571 void *host_msg;
3572 void *copy_msg = NULL;
3573 abi_long ret;
3574
3575 if ((int)addrlen < 0) {
3576 return -TARGET_EINVAL;
3577 }
3578
3579 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3580 if (!host_msg)
3581 return -TARGET_EFAULT;
3582 if (fd_trans_target_to_host_data(fd)) {
3583 copy_msg = host_msg;
3584 host_msg = g_malloc(len);
3585 memcpy(host_msg, copy_msg, len);
3586 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3587 if (ret < 0) {
3588 goto fail;
3589 }
3590 }
3591 if (target_addr) {
3592 addr = alloca(addrlen+1);
3593 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3594 if (ret) {
3595 goto fail;
3596 }
3597 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3598 } else {
3599 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3600 }
3601 fail:
3602 if (copy_msg) {
3603 g_free(host_msg);
3604 host_msg = copy_msg;
3605 }
3606 unlock_user(host_msg, msg, 0);
3607 return ret;
3608 }
3609
3610 /* do_recvfrom() Must return target values and target errnos. */
3611 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3612 abi_ulong target_addr,
3613 abi_ulong target_addrlen)
3614 {
3615 socklen_t addrlen, ret_addrlen;
3616 void *addr;
3617 void *host_msg;
3618 abi_long ret;
3619
3620 if (!msg) {
3621 host_msg = NULL;
3622 } else {
3623 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3624 if (!host_msg) {
3625 return -TARGET_EFAULT;
3626 }
3627 }
3628 if (target_addr) {
3629 if (get_user_u32(addrlen, target_addrlen)) {
3630 ret = -TARGET_EFAULT;
3631 goto fail;
3632 }
3633 if ((int)addrlen < 0) {
3634 ret = -TARGET_EINVAL;
3635 goto fail;
3636 }
3637 addr = alloca(addrlen);
3638 ret_addrlen = addrlen;
3639 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3640 addr, &ret_addrlen));
3641 } else {
3642 addr = NULL; /* To keep compiler quiet. */
3643 addrlen = 0; /* To keep compiler quiet. */
3644 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3645 }
3646 if (!is_error(ret)) {
3647 if (fd_trans_host_to_target_data(fd)) {
3648 abi_long trans;
3649 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3650 if (is_error(trans)) {
3651 ret = trans;
3652 goto fail;
3653 }
3654 }
3655 if (target_addr) {
3656 host_to_target_sockaddr(target_addr, addr,
3657 MIN(addrlen, ret_addrlen));
3658 if (put_user_u32(ret_addrlen, target_addrlen)) {
3659 ret = -TARGET_EFAULT;
3660 goto fail;
3661 }
3662 }
3663 unlock_user(host_msg, msg, len);
3664 } else {
3665 fail:
3666 unlock_user(host_msg, msg, 0);
3667 }
3668 return ret;
3669 }
3670
3671 #ifdef TARGET_NR_socketcall
3672 /* do_socketcall() must return target values and target errnos. */
3673 static abi_long do_socketcall(int num, abi_ulong vptr)
3674 {
3675 static const unsigned nargs[] = { /* number of arguments per operation */
3676 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3677 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3678 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3679 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3680 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3681 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3682 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3683 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3684 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3685 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3686 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3687 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3688 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3689 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3690 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3691 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3692 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3693 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3694 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3695 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3696 };
3697 abi_long a[6]; /* max 6 args */
3698 unsigned i;
3699
3700 /* check the range of the first argument num */
3701 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3702 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3703 return -TARGET_EINVAL;
3704 }
3705 /* ensure we have space for args */
3706 if (nargs[num] > ARRAY_SIZE(a)) {
3707 return -TARGET_EINVAL;
3708 }
3709 /* collect the arguments in a[] according to nargs[] */
3710 for (i = 0; i < nargs[num]; ++i) {
3711 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3712 return -TARGET_EFAULT;
3713 }
3714 }
3715 /* now when we have the args, invoke the appropriate underlying function */
3716 switch (num) {
3717 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3718 return do_socket(a[0], a[1], a[2]);
3719 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3720 return do_bind(a[0], a[1], a[2]);
3721 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3722 return do_connect(a[0], a[1], a[2]);
3723 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3724 return get_errno(listen(a[0], a[1]));
3725 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3726 return do_accept4(a[0], a[1], a[2], 0);
3727 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3728 return do_getsockname(a[0], a[1], a[2]);
3729 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3730 return do_getpeername(a[0], a[1], a[2]);
3731 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3732 return do_socketpair(a[0], a[1], a[2], a[3]);
3733 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3734 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3735 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3736 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3737 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3738 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3739 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3740 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3741 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3742 return get_errno(shutdown(a[0], a[1]));
3743 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3744 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3745 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3746 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3747 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3748 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3749 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3750 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3751 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3752 return do_accept4(a[0], a[1], a[2], a[3]);
3753 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3754 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3755 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3756 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3757 default:
3758 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3759 return -TARGET_EINVAL;
3760 }
3761 }
3762 #endif
3763
3764 #define N_SHM_REGIONS 32
3765
3766 static struct shm_region {
3767 abi_ulong start;
3768 abi_ulong size;
3769 bool in_use;
3770 } shm_regions[N_SHM_REGIONS];
3771
3772 #ifndef TARGET_SEMID64_DS
3773 /* asm-generic version of this struct */
3774 struct target_semid64_ds
3775 {
3776 struct target_ipc_perm sem_perm;
3777 abi_ulong sem_otime;
3778 #if TARGET_ABI_BITS == 32
3779 abi_ulong __unused1;
3780 #endif
3781 abi_ulong sem_ctime;
3782 #if TARGET_ABI_BITS == 32
3783 abi_ulong __unused2;
3784 #endif
3785 abi_ulong sem_nsems;
3786 abi_ulong __unused3;
3787 abi_ulong __unused4;
3788 };
3789 #endif
3790
3791 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3792 abi_ulong target_addr)
3793 {
3794 struct target_ipc_perm *target_ip;
3795 struct target_semid64_ds *target_sd;
3796
3797 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3798 return -TARGET_EFAULT;
3799 target_ip = &(target_sd->sem_perm);
3800 host_ip->__key = tswap32(target_ip->__key);
3801 host_ip->uid = tswap32(target_ip->uid);
3802 host_ip->gid = tswap32(target_ip->gid);
3803 host_ip->cuid = tswap32(target_ip->cuid);
3804 host_ip->cgid = tswap32(target_ip->cgid);
3805 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3806 host_ip->mode = tswap32(target_ip->mode);
3807 #else
3808 host_ip->mode = tswap16(target_ip->mode);
3809 #endif
3810 #if defined(TARGET_PPC)
3811 host_ip->__seq = tswap32(target_ip->__seq);
3812 #else
3813 host_ip->__seq = tswap16(target_ip->__seq);
3814 #endif
3815 unlock_user_struct(target_sd, target_addr, 0);
3816 return 0;
3817 }
3818
3819 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3820 struct ipc_perm *host_ip)
3821 {
3822 struct target_ipc_perm *target_ip;
3823 struct target_semid64_ds *target_sd;
3824
3825 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3826 return -TARGET_EFAULT;
3827 target_ip = &(target_sd->sem_perm);
3828 target_ip->__key = tswap32(host_ip->__key);
3829 target_ip->uid = tswap32(host_ip->uid);
3830 target_ip->gid = tswap32(host_ip->gid);
3831 target_ip->cuid = tswap32(host_ip->cuid);
3832 target_ip->cgid = tswap32(host_ip->cgid);
3833 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3834 target_ip->mode = tswap32(host_ip->mode);
3835 #else
3836 target_ip->mode = tswap16(host_ip->mode);
3837 #endif
3838 #if defined(TARGET_PPC)
3839 target_ip->__seq = tswap32(host_ip->__seq);
3840 #else
3841 target_ip->__seq = tswap16(host_ip->__seq);
3842 #endif
3843 unlock_user_struct(target_sd, target_addr, 1);
3844 return 0;
3845 }
3846
3847 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3848 abi_ulong target_addr)
3849 {
3850 struct target_semid64_ds *target_sd;
3851
3852 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3853 return -TARGET_EFAULT;
3854 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3855 return -TARGET_EFAULT;
3856 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3857 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3858 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3859 unlock_user_struct(target_sd, target_addr, 0);
3860 return 0;
3861 }
3862
3863 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3864 struct semid_ds *host_sd)
3865 {
3866 struct target_semid64_ds *target_sd;
3867
3868 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3869 return -TARGET_EFAULT;
3870 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3871 return -TARGET_EFAULT;
3872 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3873 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3874 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3875 unlock_user_struct(target_sd, target_addr, 1);
3876 return 0;
3877 }
3878
3879 struct target_seminfo {
3880 int semmap;
3881 int semmni;
3882 int semmns;
3883 int semmnu;
3884 int semmsl;
3885 int semopm;
3886 int semume;
3887 int semusz;
3888 int semvmx;
3889 int semaem;
3890 };
3891
3892 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3893 struct seminfo *host_seminfo)
3894 {
3895 struct target_seminfo *target_seminfo;
3896 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3897 return -TARGET_EFAULT;
3898 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3899 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3900 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3901 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3902 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3903 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3904 __put_user(host_seminfo->semume, &target_seminfo->semume);
3905 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3906 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3907 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3908 unlock_user_struct(target_seminfo, target_addr, 1);
3909 return 0;
3910 }
3911
3912 union semun {
3913 int val;
3914 struct semid_ds *buf;
3915 unsigned short *array;
3916 struct seminfo *__buf;
3917 };
3918
3919 union target_semun {
3920 int val;
3921 abi_ulong buf;
3922 abi_ulong array;
3923 abi_ulong __buf;
3924 };
3925
3926 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3927 abi_ulong target_addr)
3928 {
3929 int nsems;
3930 unsigned short *array;
3931 union semun semun;
3932 struct semid_ds semid_ds;
3933 int i, ret;
3934
3935 semun.buf = &semid_ds;
3936
3937 ret = semctl(semid, 0, IPC_STAT, semun);
3938 if (ret == -1)
3939 return get_errno(ret);
3940
3941 nsems = semid_ds.sem_nsems;
3942
3943 *host_array = g_try_new(unsigned short, nsems);
3944 if (!*host_array) {
3945 return -TARGET_ENOMEM;
3946 }
3947 array = lock_user(VERIFY_READ, target_addr,
3948 nsems*sizeof(unsigned short), 1);
3949 if (!array) {
3950 g_free(*host_array);
3951 return -TARGET_EFAULT;
3952 }
3953
3954 for(i=0; i<nsems; i++) {
3955 __get_user((*host_array)[i], &array[i]);
3956 }
3957 unlock_user(array, target_addr, 0);
3958
3959 return 0;
3960 }
3961
3962 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3963 unsigned short **host_array)
3964 {
3965 int nsems;
3966 unsigned short *array;
3967 union semun semun;
3968 struct semid_ds semid_ds;
3969 int i, ret;
3970
3971 semun.buf = &semid_ds;
3972
3973 ret = semctl(semid, 0, IPC_STAT, semun);
3974 if (ret == -1)
3975 return get_errno(ret);
3976
3977 nsems = semid_ds.sem_nsems;
3978
3979 array = lock_user(VERIFY_WRITE, target_addr,
3980 nsems*sizeof(unsigned short), 0);
3981 if (!array)
3982 return -TARGET_EFAULT;
3983
3984 for(i=0; i<nsems; i++) {
3985 __put_user((*host_array)[i], &array[i]);
3986 }
3987 g_free(*host_array);
3988 unlock_user(array, target_addr, 1);
3989
3990 return 0;
3991 }
3992
3993 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3994 abi_ulong target_arg)
3995 {
3996 union target_semun target_su = { .buf = target_arg };
3997 union semun arg;
3998 struct semid_ds dsarg;
3999 unsigned short *array = NULL;
4000 struct seminfo seminfo;
4001 abi_long ret = -TARGET_EINVAL;
4002 abi_long err;
4003 cmd &= 0xff;
4004
4005 switch( cmd ) {
4006 case GETVAL:
4007 case SETVAL:
4008 /* In 64 bit cross-endian situations, we will erroneously pick up
4009 * the wrong half of the union for the "val" element. To rectify
4010 * this, the entire 8-byte structure is byteswapped, followed by
4011 * a swap of the 4 byte val field. In other cases, the data is
4012 * already in proper host byte order. */
4013 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4014 target_su.buf = tswapal(target_su.buf);
4015 arg.val = tswap32(target_su.val);
4016 } else {
4017 arg.val = target_su.val;
4018 }
4019 ret = get_errno(semctl(semid, semnum, cmd, arg));
4020 break;
4021 case GETALL:
4022 case SETALL:
4023 err = target_to_host_semarray(semid, &array, target_su.array);
4024 if (err)
4025 return err;
4026 arg.array = array;
4027 ret = get_errno(semctl(semid, semnum, cmd, arg));
4028 err = host_to_target_semarray(semid, target_su.array, &array);
4029 if (err)
4030 return err;
4031 break;
4032 case IPC_STAT:
4033 case IPC_SET:
4034 case SEM_STAT:
4035 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4036 if (err)
4037 return err;
4038 arg.buf = &dsarg;
4039 ret = get_errno(semctl(semid, semnum, cmd, arg));
4040 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4041 if (err)
4042 return err;
4043 break;
4044 case IPC_INFO:
4045 case SEM_INFO:
4046 arg.__buf = &seminfo;
4047 ret = get_errno(semctl(semid, semnum, cmd, arg));
4048 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4049 if (err)
4050 return err;
4051 break;
4052 case IPC_RMID:
4053 case GETPID:
4054 case GETNCNT:
4055 case GETZCNT:
4056 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4057 break;
4058 }
4059
4060 return ret;
4061 }
4062
4063 struct target_sembuf {
4064 unsigned short sem_num;
4065 short sem_op;
4066 short sem_flg;
4067 };
4068
4069 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4070 abi_ulong target_addr,
4071 unsigned nsops)
4072 {
4073 struct target_sembuf *target_sembuf;
4074 int i;
4075
4076 target_sembuf = lock_user(VERIFY_READ, target_addr,
4077 nsops*sizeof(struct target_sembuf), 1);
4078 if (!target_sembuf)
4079 return -TARGET_EFAULT;
4080
4081 for(i=0; i<nsops; i++) {
4082 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4083 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4084 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4085 }
4086
4087 unlock_user(target_sembuf, target_addr, 0);
4088
4089 return 0;
4090 }
4091
4092 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4093 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4094
4095 /*
4096 * This macro is required to handle the s390 variants, which passes the
4097 * arguments in a different order than default.
4098 */
4099 #ifdef __s390x__
4100 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4101 (__nsops), (__timeout), (__sops)
4102 #else
4103 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4104 (__nsops), 0, (__sops), (__timeout)
4105 #endif
4106
4107 static inline abi_long do_semtimedop(int semid,
4108 abi_long ptr,
4109 unsigned nsops,
4110 abi_long timeout, bool time64)
4111 {
4112 struct sembuf *sops;
4113 struct timespec ts, *pts = NULL;
4114 abi_long ret;
4115
4116 if (timeout) {
4117 pts = &ts;
4118 if (time64) {
4119 if (target_to_host_timespec64(pts, timeout)) {
4120 return -TARGET_EFAULT;
4121 }
4122 } else {
4123 if (target_to_host_timespec(pts, timeout)) {
4124 return -TARGET_EFAULT;
4125 }
4126 }
4127 }
4128
4129 if (nsops > TARGET_SEMOPM) {
4130 return -TARGET_E2BIG;
4131 }
4132
4133 sops = g_new(struct sembuf, nsops);
4134
4135 if (target_to_host_sembuf(sops, ptr, nsops)) {
4136 g_free(sops);
4137 return -TARGET_EFAULT;
4138 }
4139
4140 ret = -TARGET_ENOSYS;
4141 #ifdef __NR_semtimedop
4142 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4143 #endif
4144 #ifdef __NR_ipc
4145 if (ret == -TARGET_ENOSYS) {
4146 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4147 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4148 }
4149 #endif
4150 g_free(sops);
4151 return ret;
4152 }
4153 #endif
4154
4155 struct target_msqid_ds
4156 {
4157 struct target_ipc_perm msg_perm;
4158 abi_ulong msg_stime;
4159 #if TARGET_ABI_BITS == 32
4160 abi_ulong __unused1;
4161 #endif
4162 abi_ulong msg_rtime;
4163 #if TARGET_ABI_BITS == 32
4164 abi_ulong __unused2;
4165 #endif
4166 abi_ulong msg_ctime;
4167 #if TARGET_ABI_BITS == 32
4168 abi_ulong __unused3;
4169 #endif
4170 abi_ulong __msg_cbytes;
4171 abi_ulong msg_qnum;
4172 abi_ulong msg_qbytes;
4173 abi_ulong msg_lspid;
4174 abi_ulong msg_lrpid;
4175 abi_ulong __unused4;
4176 abi_ulong __unused5;
4177 };
4178
4179 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4180 abi_ulong target_addr)
4181 {
4182 struct target_msqid_ds *target_md;
4183
4184 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4185 return -TARGET_EFAULT;
4186 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4187 return -TARGET_EFAULT;
4188 host_md->msg_stime = tswapal(target_md->msg_stime);
4189 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4190 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4191 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4192 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4193 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4194 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4195 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4196 unlock_user_struct(target_md, target_addr, 0);
4197 return 0;
4198 }
4199
4200 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4201 struct msqid_ds *host_md)
4202 {
4203 struct target_msqid_ds *target_md;
4204
4205 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4206 return -TARGET_EFAULT;
4207 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4208 return -TARGET_EFAULT;
4209 target_md->msg_stime = tswapal(host_md->msg_stime);
4210 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4211 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4212 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4213 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4214 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4215 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4216 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4217 unlock_user_struct(target_md, target_addr, 1);
4218 return 0;
4219 }
4220
4221 struct target_msginfo {
4222 int msgpool;
4223 int msgmap;
4224 int msgmax;
4225 int msgmnb;
4226 int msgmni;
4227 int msgssz;
4228 int msgtql;
4229 unsigned short int msgseg;
4230 };
4231
4232 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4233 struct msginfo *host_msginfo)
4234 {
4235 struct target_msginfo *target_msginfo;
4236 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4237 return -TARGET_EFAULT;
4238 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4239 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4240 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4241 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4242 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4243 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4244 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4245 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4246 unlock_user_struct(target_msginfo, target_addr, 1);
4247 return 0;
4248 }
4249
4250 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4251 {
4252 struct msqid_ds dsarg;
4253 struct msginfo msginfo;
4254 abi_long ret = -TARGET_EINVAL;
4255
4256 cmd &= 0xff;
4257
4258 switch (cmd) {
4259 case IPC_STAT:
4260 case IPC_SET:
4261 case MSG_STAT:
4262 if (target_to_host_msqid_ds(&dsarg,ptr))
4263 return -TARGET_EFAULT;
4264 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4265 if (host_to_target_msqid_ds(ptr,&dsarg))
4266 return -TARGET_EFAULT;
4267 break;
4268 case IPC_RMID:
4269 ret = get_errno(msgctl(msgid, cmd, NULL));
4270 break;
4271 case IPC_INFO:
4272 case MSG_INFO:
4273 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4274 if (host_to_target_msginfo(ptr, &msginfo))
4275 return -TARGET_EFAULT;
4276 break;
4277 }
4278
4279 return ret;
4280 }
4281
4282 struct target_msgbuf {
4283 abi_long mtype;
4284 char mtext[1];
4285 };
4286
4287 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4288 ssize_t msgsz, int msgflg)
4289 {
4290 struct target_msgbuf *target_mb;
4291 struct msgbuf *host_mb;
4292 abi_long ret = 0;
4293
4294 if (msgsz < 0) {
4295 return -TARGET_EINVAL;
4296 }
4297
4298 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4299 return -TARGET_EFAULT;
4300 host_mb = g_try_malloc(msgsz + sizeof(long));
4301 if (!host_mb) {
4302 unlock_user_struct(target_mb, msgp, 0);
4303 return -TARGET_ENOMEM;
4304 }
4305 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4306 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4307 ret = -TARGET_ENOSYS;
4308 #ifdef __NR_msgsnd
4309 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4310 #endif
4311 #ifdef __NR_ipc
4312 if (ret == -TARGET_ENOSYS) {
4313 #ifdef __s390x__
4314 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4315 host_mb));
4316 #else
4317 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4318 host_mb, 0));
4319 #endif
4320 }
4321 #endif
4322 g_free(host_mb);
4323 unlock_user_struct(target_mb, msgp, 0);
4324
4325 return ret;
4326 }
4327
4328 #ifdef __NR_ipc
4329 #if defined(__sparc__)
4330 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4331 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4332 #elif defined(__s390x__)
4333 /* The s390 sys_ipc variant has only five parameters. */
4334 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4335 ((long int[]){(long int)__msgp, __msgtyp})
4336 #else
4337 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4338 ((long int[]){(long int)__msgp, __msgtyp}), 0
4339 #endif
4340 #endif
4341
4342 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4343 ssize_t msgsz, abi_long msgtyp,
4344 int msgflg)
4345 {
4346 struct target_msgbuf *target_mb;
4347 char *target_mtext;
4348 struct msgbuf *host_mb;
4349 abi_long ret = 0;
4350
4351 if (msgsz < 0) {
4352 return -TARGET_EINVAL;
4353 }
4354
4355 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4356 return -TARGET_EFAULT;
4357
4358 host_mb = g_try_malloc(msgsz + sizeof(long));
4359 if (!host_mb) {
4360 ret = -TARGET_ENOMEM;
4361 goto end;
4362 }
4363 ret = -TARGET_ENOSYS;
4364 #ifdef __NR_msgrcv
4365 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4366 #endif
4367 #ifdef __NR_ipc
4368 if (ret == -TARGET_ENOSYS) {
4369 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4370 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4371 }
4372 #endif
4373
4374 if (ret > 0) {
4375 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4376 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4377 if (!target_mtext) {
4378 ret = -TARGET_EFAULT;
4379 goto end;
4380 }
4381 memcpy(target_mb->mtext, host_mb->mtext, ret);
4382 unlock_user(target_mtext, target_mtext_addr, ret);
4383 }
4384
4385 target_mb->mtype = tswapal(host_mb->mtype);
4386
4387 end:
4388 if (target_mb)
4389 unlock_user_struct(target_mb, msgp, 1);
4390 g_free(host_mb);
4391 return ret;
4392 }
4393
4394 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4395 abi_ulong target_addr)
4396 {
4397 struct target_shmid_ds *target_sd;
4398
4399 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4400 return -TARGET_EFAULT;
4401 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4402 return -TARGET_EFAULT;
4403 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4404 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4405 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4406 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4407 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4408 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4409 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4410 unlock_user_struct(target_sd, target_addr, 0);
4411 return 0;
4412 }
4413
4414 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4415 struct shmid_ds *host_sd)
4416 {
4417 struct target_shmid_ds *target_sd;
4418
4419 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4420 return -TARGET_EFAULT;
4421 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4422 return -TARGET_EFAULT;
4423 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4424 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4425 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4426 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4427 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4428 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4429 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4430 unlock_user_struct(target_sd, target_addr, 1);
4431 return 0;
4432 }
4433
4434 struct target_shminfo {
4435 abi_ulong shmmax;
4436 abi_ulong shmmin;
4437 abi_ulong shmmni;
4438 abi_ulong shmseg;
4439 abi_ulong shmall;
4440 };
4441
4442 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4443 struct shminfo *host_shminfo)
4444 {
4445 struct target_shminfo *target_shminfo;
4446 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4447 return -TARGET_EFAULT;
4448 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4449 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4450 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4451 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4452 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4453 unlock_user_struct(target_shminfo, target_addr, 1);
4454 return 0;
4455 }
4456
4457 struct target_shm_info {
4458 int used_ids;
4459 abi_ulong shm_tot;
4460 abi_ulong shm_rss;
4461 abi_ulong shm_swp;
4462 abi_ulong swap_attempts;
4463 abi_ulong swap_successes;
4464 };
4465
4466 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4467 struct shm_info *host_shm_info)
4468 {
4469 struct target_shm_info *target_shm_info;
4470 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4471 return -TARGET_EFAULT;
4472 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4473 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4474 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4475 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4476 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4477 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4478 unlock_user_struct(target_shm_info, target_addr, 1);
4479 return 0;
4480 }
4481
4482 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4483 {
4484 struct shmid_ds dsarg;
4485 struct shminfo shminfo;
4486 struct shm_info shm_info;
4487 abi_long ret = -TARGET_EINVAL;
4488
4489 cmd &= 0xff;
4490
4491 switch(cmd) {
4492 case IPC_STAT:
4493 case IPC_SET:
4494 case SHM_STAT:
4495 if (target_to_host_shmid_ds(&dsarg, buf))
4496 return -TARGET_EFAULT;
4497 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4498 if (host_to_target_shmid_ds(buf, &dsarg))
4499 return -TARGET_EFAULT;
4500 break;
4501 case IPC_INFO:
4502 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4503 if (host_to_target_shminfo(buf, &shminfo))
4504 return -TARGET_EFAULT;
4505 break;
4506 case SHM_INFO:
4507 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4508 if (host_to_target_shm_info(buf, &shm_info))
4509 return -TARGET_EFAULT;
4510 break;
4511 case IPC_RMID:
4512 case SHM_LOCK:
4513 case SHM_UNLOCK:
4514 ret = get_errno(shmctl(shmid, cmd, NULL));
4515 break;
4516 }
4517
4518 return ret;
4519 }
4520
4521 #ifndef TARGET_FORCE_SHMLBA
4522 /* For most architectures, SHMLBA is the same as the page size;
4523 * some architectures have larger values, in which case they should
4524 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4525 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4526 * and defining its own value for SHMLBA.
4527 *
4528 * The kernel also permits SHMLBA to be set by the architecture to a
4529 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4530 * this means that addresses are rounded to the large size if
4531 * SHM_RND is set but addresses not aligned to that size are not rejected
4532 * as long as they are at least page-aligned. Since the only architecture
4533 * which uses this is ia64 this code doesn't provide for that oddity.
4534 */
4535 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4536 {
4537 return TARGET_PAGE_SIZE;
4538 }
4539 #endif
4540
4541 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4542 int shmid, abi_ulong shmaddr, int shmflg)
4543 {
4544 CPUState *cpu = env_cpu(cpu_env);
4545 abi_long raddr;
4546 void *host_raddr;
4547 struct shmid_ds shm_info;
4548 int i,ret;
4549 abi_ulong shmlba;
4550
4551 /* shmat pointers are always untagged */
4552
4553 /* find out the length of the shared memory segment */
4554 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4555 if (is_error(ret)) {
4556 /* can't get length, bail out */
4557 return ret;
4558 }
4559
4560 shmlba = target_shmlba(cpu_env);
4561
4562 if (shmaddr & (shmlba - 1)) {
4563 if (shmflg & SHM_RND) {
4564 shmaddr &= ~(shmlba - 1);
4565 } else {
4566 return -TARGET_EINVAL;
4567 }
4568 }
4569 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4570 return -TARGET_EINVAL;
4571 }
4572
4573 mmap_lock();
4574
4575 /*
4576 * We're mapping shared memory, so ensure we generate code for parallel
4577 * execution and flush old translations. This will work up to the level
4578 * supported by the host -- anything that requires EXCP_ATOMIC will not
4579 * be atomic with respect to an external process.
4580 */
4581 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4582 cpu->tcg_cflags |= CF_PARALLEL;
4583 tb_flush(cpu);
4584 }
4585
4586 if (shmaddr)
4587 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4588 else {
4589 abi_ulong mmap_start;
4590
4591 /* In order to use the host shmat, we need to honor host SHMLBA. */
4592 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4593
4594 if (mmap_start == -1) {
4595 errno = ENOMEM;
4596 host_raddr = (void *)-1;
4597 } else
4598 host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4599 shmflg | SHM_REMAP);
4600 }
4601
4602 if (host_raddr == (void *)-1) {
4603 mmap_unlock();
4604 return get_errno((long)host_raddr);
4605 }
4606 raddr=h2g((unsigned long)host_raddr);
4607
4608 page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
4609 PAGE_VALID | PAGE_RESET | PAGE_READ |
4610 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4611
4612 for (i = 0; i < N_SHM_REGIONS; i++) {
4613 if (!shm_regions[i].in_use) {
4614 shm_regions[i].in_use = true;
4615 shm_regions[i].start = raddr;
4616 shm_regions[i].size = shm_info.shm_segsz;
4617 break;
4618 }
4619 }
4620
4621 mmap_unlock();
4622 return raddr;
4623
4624 }
4625
4626 static inline abi_long do_shmdt(abi_ulong shmaddr)
4627 {
4628 int i;
4629 abi_long rv;
4630
4631 /* shmdt pointers are always untagged */
4632
4633 mmap_lock();
4634
4635 for (i = 0; i < N_SHM_REGIONS; ++i) {
4636 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4637 shm_regions[i].in_use = false;
4638 page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
4639 break;
4640 }
4641 }
4642 rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4643
4644 mmap_unlock();
4645
4646 return rv;
4647 }
4648
4649 #ifdef TARGET_NR_ipc
4650 /* ??? This only works with linear mappings. */
4651 /* do_ipc() must return target values and target errnos. */
4652 static abi_long do_ipc(CPUArchState *cpu_env,
4653 unsigned int call, abi_long first,
4654 abi_long second, abi_long third,
4655 abi_long ptr, abi_long fifth)
4656 {
4657 int version;
4658 abi_long ret = 0;
4659
4660 version = call >> 16;
4661 call &= 0xffff;
4662
4663 switch (call) {
4664 case IPCOP_semop:
4665 ret = do_semtimedop(first, ptr, second, 0, false);
4666 break;
4667 case IPCOP_semtimedop:
4668 /*
4669 * The s390 sys_ipc variant has only five parameters instead of six
4670 * (as for default variant) and the only difference is the handling of
4671 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4672 * to a struct timespec where the generic variant uses fifth parameter.
4673 */
4674 #if defined(TARGET_S390X)
4675 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4676 #else
4677 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4678 #endif
4679 break;
4680
4681 case IPCOP_semget:
4682 ret = get_errno(semget(first, second, third));
4683 break;
4684
4685 case IPCOP_semctl: {
4686 /* The semun argument to semctl is passed by value, so dereference the
4687 * ptr argument. */
4688 abi_ulong atptr;
4689 get_user_ual(atptr, ptr);
4690 ret = do_semctl(first, second, third, atptr);
4691 break;
4692 }
4693
4694 case IPCOP_msgget:
4695 ret = get_errno(msgget(first, second));
4696 break;
4697
4698 case IPCOP_msgsnd:
4699 ret = do_msgsnd(first, ptr, second, third);
4700 break;
4701
4702 case IPCOP_msgctl:
4703 ret = do_msgctl(first, second, ptr);
4704 break;
4705
4706 case IPCOP_msgrcv:
4707 switch (version) {
4708 case 0:
4709 {
4710 struct target_ipc_kludge {
4711 abi_long msgp;
4712 abi_long msgtyp;
4713 } *tmp;
4714
4715 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4716 ret = -TARGET_EFAULT;
4717 break;
4718 }
4719
4720 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4721
4722 unlock_user_struct(tmp, ptr, 0);
4723 break;
4724 }
4725 default:
4726 ret = do_msgrcv(first, ptr, second, fifth, third);
4727 }
4728 break;
4729
4730 case IPCOP_shmat:
4731 switch (version) {
4732 default:
4733 {
4734 abi_ulong raddr;
4735 raddr = do_shmat(cpu_env, first, ptr, second);
4736 if (is_error(raddr))
4737 return get_errno(raddr);
4738 if (put_user_ual(raddr, third))
4739 return -TARGET_EFAULT;
4740 break;
4741 }
4742 case 1:
4743 ret = -TARGET_EINVAL;
4744 break;
4745 }
4746 break;
4747 case IPCOP_shmdt:
4748 ret = do_shmdt(ptr);
4749 break;
4750
4751 case IPCOP_shmget:
4752 /* IPC_* flag values are the same on all linux platforms */
4753 ret = get_errno(shmget(first, second, third));
4754 break;
4755
4756 /* IPC_* and SHM_* command values are the same on all linux platforms */
4757 case IPCOP_shmctl:
4758 ret = do_shmctl(first, second, ptr);
4759 break;
4760 default:
4761 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4762 call, version);
4763 ret = -TARGET_ENOSYS;
4764 break;
4765 }
4766 return ret;
4767 }
4768 #endif
4769
4770 /* kernel structure types definitions */
4771
4772 #define STRUCT(name, ...) STRUCT_ ## name,
4773 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4774 enum {
4775 #include "syscall_types.h"
4776 STRUCT_MAX
4777 };
4778 #undef STRUCT
4779 #undef STRUCT_SPECIAL
4780
4781 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4782 #define STRUCT_SPECIAL(name)
4783 #include "syscall_types.h"
4784 #undef STRUCT
4785 #undef STRUCT_SPECIAL
4786
4787 #define MAX_STRUCT_SIZE 4096
4788
4789 #ifdef CONFIG_FIEMAP
4790 /* So fiemap access checks don't overflow on 32 bit systems.
4791 * This is very slightly smaller than the limit imposed by
4792 * the underlying kernel.
4793 */
4794 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4795 / sizeof(struct fiemap_extent))
4796
4797 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4798 int fd, int cmd, abi_long arg)
4799 {
4800 /* The parameter for this ioctl is a struct fiemap followed
4801 * by an array of struct fiemap_extent whose size is set
4802 * in fiemap->fm_extent_count. The array is filled in by the
4803 * ioctl.
4804 */
4805 int target_size_in, target_size_out;
4806 struct fiemap *fm;
4807 const argtype *arg_type = ie->arg_type;
4808 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4809 void *argptr, *p;
4810 abi_long ret;
4811 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4812 uint32_t outbufsz;
4813 int free_fm = 0;
4814
4815 assert(arg_type[0] == TYPE_PTR);
4816 assert(ie->access == IOC_RW);
4817 arg_type++;
4818 target_size_in = thunk_type_size(arg_type, 0);
4819 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4820 if (!argptr) {
4821 return -TARGET_EFAULT;
4822 }
4823 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4824 unlock_user(argptr, arg, 0);
4825 fm = (struct fiemap *)buf_temp;
4826 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4827 return -TARGET_EINVAL;
4828 }
4829
4830 outbufsz = sizeof (*fm) +
4831 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4832
4833 if (outbufsz > MAX_STRUCT_SIZE) {
4834 /* We can't fit all the extents into the fixed size buffer.
4835 * Allocate one that is large enough and use it instead.
4836 */
4837 fm = g_try_malloc(outbufsz);
4838 if (!fm) {
4839 return -TARGET_ENOMEM;
4840 }
4841 memcpy(fm, buf_temp, sizeof(struct fiemap));
4842 free_fm = 1;
4843 }
4844 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4845 if (!is_error(ret)) {
4846 target_size_out = target_size_in;
4847 /* An extent_count of 0 means we were only counting the extents
4848 * so there are no structs to copy
4849 */
4850 if (fm->fm_extent_count != 0) {
4851 target_size_out += fm->fm_mapped_extents * extent_size;
4852 }
4853 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4854 if (!argptr) {
4855 ret = -TARGET_EFAULT;
4856 } else {
4857 /* Convert the struct fiemap */
4858 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4859 if (fm->fm_extent_count != 0) {
4860 p = argptr + target_size_in;
4861 /* ...and then all the struct fiemap_extents */
4862 for (i = 0; i < fm->fm_mapped_extents; i++) {
4863 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4864 THUNK_TARGET);
4865 p += extent_size;
4866 }
4867 }
4868 unlock_user(argptr, arg, target_size_out);
4869 }
4870 }
4871 if (free_fm) {
4872 g_free(fm);
4873 }
4874 return ret;
4875 }
4876 #endif
4877
4878 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4879 int fd, int cmd, abi_long arg)
4880 {
4881 const argtype *arg_type = ie->arg_type;
4882 int target_size;
4883 void *argptr;
4884 int ret;
4885 struct ifconf *host_ifconf;
4886 uint32_t outbufsz;
4887 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4888 const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4889 int target_ifreq_size;
4890 int nb_ifreq;
4891 int free_buf = 0;
4892 int i;
4893 int target_ifc_len;
4894 abi_long target_ifc_buf;
4895 int host_ifc_len;
4896 char *host_ifc_buf;
4897
4898 assert(arg_type[0] == TYPE_PTR);
4899 assert(ie->access == IOC_RW);
4900
4901 arg_type++;
4902 target_size = thunk_type_size(arg_type, 0);
4903
4904 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4905 if (!argptr)
4906 return -TARGET_EFAULT;
4907 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4908 unlock_user(argptr, arg, 0);
4909
4910 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4911 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4912 target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4913
4914 if (target_ifc_buf != 0) {
4915 target_ifc_len = host_ifconf->ifc_len;
4916 nb_ifreq = target_ifc_len / target_ifreq_size;
4917 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4918
4919 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4920 if (outbufsz > MAX_STRUCT_SIZE) {
4921 /*
4922 * We can't fit all the extents into the fixed size buffer.
4923 * Allocate one that is large enough and use it instead.
4924 */
4925 host_ifconf = g_try_malloc(outbufsz);
4926 if (!host_ifconf) {
4927 return -TARGET_ENOMEM;
4928 }
4929 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4930 free_buf = 1;
4931 }
4932 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4933
4934 host_ifconf->ifc_len = host_ifc_len;
4935 } else {
4936 host_ifc_buf = NULL;
4937 }
4938 host_ifconf->ifc_buf = host_ifc_buf;
4939
4940 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4941 if (!is_error(ret)) {
4942 /* convert host ifc_len to target ifc_len */
4943
4944 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4945 target_ifc_len = nb_ifreq * target_ifreq_size;
4946 host_ifconf->ifc_len = target_ifc_len;
4947
4948 /* restore target ifc_buf */
4949
4950 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4951
4952 /* copy struct ifconf to target user */
4953
4954 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4955 if (!argptr)
4956 return -TARGET_EFAULT;
4957 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4958 unlock_user(argptr, arg, target_size);
4959
4960 if (target_ifc_buf != 0) {
4961 /* copy ifreq[] to target user */
4962 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4963 for (i = 0; i < nb_ifreq ; i++) {
4964 thunk_convert(argptr + i * target_ifreq_size,
4965 host_ifc_buf + i * sizeof(struct ifreq),
4966 ifreq_arg_type, THUNK_TARGET);
4967 }
4968 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4969 }
4970 }
4971
4972 if (free_buf) {
4973 g_free(host_ifconf);
4974 }
4975
4976 return ret;
4977 }
4978
4979 #if defined(CONFIG_USBFS)
4980 #if HOST_LONG_BITS > 64
4981 #error USBDEVFS thunks do not support >64 bit hosts yet.
4982 #endif
4983 struct live_urb {
4984 uint64_t target_urb_adr;
4985 uint64_t target_buf_adr;
4986 char *target_buf_ptr;
4987 struct usbdevfs_urb host_urb;
4988 };
4989
4990 static GHashTable *usbdevfs_urb_hashtable(void)
4991 {
4992 static GHashTable *urb_hashtable;
4993
4994 if (!urb_hashtable) {
4995 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4996 }
4997 return urb_hashtable;
4998 }
4999
5000 static void urb_hashtable_insert(struct live_urb *urb)
5001 {
5002 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5003 g_hash_table_insert(urb_hashtable, urb, urb);
5004 }
5005
5006 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5007 {
5008 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5009 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5010 }
5011
5012 static void urb_hashtable_remove(struct live_urb *urb)
5013 {
5014 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5015 g_hash_table_remove(urb_hashtable, urb);
5016 }
5017
5018 static abi_long
5019 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5020 int fd, int cmd, abi_long arg)
5021 {
5022 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5023 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5024 struct live_urb *lurb;
5025 void *argptr;
5026 uint64_t hurb;
5027 int target_size;
5028 uintptr_t target_urb_adr;
5029 abi_long ret;
5030
5031 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5032
5033 memset(buf_temp, 0, sizeof(uint64_t));
5034 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5035 if (is_error(ret)) {
5036 return ret;
5037 }
5038
5039 memcpy(&hurb, buf_temp, sizeof(uint64_t));
5040 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5041 if (!lurb->target_urb_adr) {
5042 return -TARGET_EFAULT;
5043 }
5044 urb_hashtable_remove(lurb);
5045 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5046 lurb->host_urb.buffer_length);
5047 lurb->target_buf_ptr = NULL;
5048
5049 /* restore the guest buffer pointer */
5050 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5051
5052 /* update the guest urb struct */
5053 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5054 if (!argptr) {
5055 g_free(lurb);
5056 return -TARGET_EFAULT;
5057 }
5058 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5059 unlock_user(argptr, lurb->target_urb_adr, target_size);
5060
5061 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5062 /* write back the urb handle */
5063 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5064 if (!argptr) {
5065 g_free(lurb);
5066 return -TARGET_EFAULT;
5067 }
5068
5069 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5070 target_urb_adr = lurb->target_urb_adr;
5071 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5072 unlock_user(argptr, arg, target_size);
5073
5074 g_free(lurb);
5075 return ret;
5076 }
5077
5078 static abi_long
5079 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5080 uint8_t *buf_temp __attribute__((unused)),
5081 int fd, int cmd, abi_long arg)
5082 {
5083 struct live_urb *lurb;
5084
5085 /* map target address back to host URB with metadata. */
5086 lurb = urb_hashtable_lookup(arg);
5087 if (!lurb) {
5088 return -TARGET_EFAULT;
5089 }
5090 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5091 }
5092
5093 static abi_long
5094 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5095 int fd, int cmd, abi_long arg)
5096 {
5097 const argtype *arg_type = ie->arg_type;
5098 int target_size;
5099 abi_long ret;
5100 void *argptr;
5101 int rw_dir;
5102 struct live_urb *lurb;
5103
5104 /*
5105 * each submitted URB needs to map to a unique ID for the
5106 * kernel, and that unique ID needs to be a pointer to
5107 * host memory. hence, we need to malloc for each URB.
5108 * isochronous transfers have a variable length struct.
5109 */
5110 arg_type++;
5111 target_size = thunk_type_size(arg_type, THUNK_TARGET);
5112
5113 /* construct host copy of urb and metadata */
5114 lurb = g_try_new0(struct live_urb, 1);
5115 if (!lurb) {
5116 return -TARGET_ENOMEM;
5117 }
5118
5119 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5120 if (!argptr) {
5121 g_free(lurb);
5122 return -TARGET_EFAULT;
5123 }
5124 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5125 unlock_user(argptr, arg, 0);
5126
5127 lurb->target_urb_adr = arg;
5128 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5129
5130 /* buffer space used depends on endpoint type so lock the entire buffer */
5131 /* control type urbs should check the buffer contents for true direction */
5132 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5133 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5134 lurb->host_urb.buffer_length, 1);
5135 if (lurb->target_buf_ptr == NULL) {
5136 g_free(lurb);
5137 return -TARGET_EFAULT;
5138 }
5139
5140 /* update buffer pointer in host copy */
5141 lurb->host_urb.buffer = lurb->target_buf_ptr;
5142
5143 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5144 if (is_error(ret)) {
5145 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5146 g_free(lurb);
5147 } else {
5148 urb_hashtable_insert(lurb);
5149 }
5150
5151 return ret;
5152 }
5153 #endif /* CONFIG_USBFS */
5154
5155 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5156 int cmd, abi_long arg)
5157 {
5158 void *argptr;
5159 struct dm_ioctl *host_dm;
5160 abi_long guest_data;
5161 uint32_t guest_data_size;
5162 int target_size;
5163 const argtype *arg_type = ie->arg_type;
5164 abi_long ret;
5165 void *big_buf = NULL;
5166 char *host_data;
5167
5168 arg_type++;
5169 target_size = thunk_type_size(arg_type, 0);
5170 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5171 if (!argptr) {
5172 ret = -TARGET_EFAULT;
5173 goto out;
5174 }
5175 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5176 unlock_user(argptr, arg, 0);
5177
5178 /* buf_temp is too small, so fetch things into a bigger buffer */
5179 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5180 memcpy(big_buf, buf_temp, target_size);
5181 buf_temp = big_buf;
5182 host_dm = big_buf;
5183
5184 guest_data = arg + host_dm->data_start;
5185 if ((guest_data - arg) < 0) {
5186 ret = -TARGET_EINVAL;
5187 goto out;
5188 }
5189 guest_data_size = host_dm->data_size - host_dm->data_start;
5190 host_data = (char*)host_dm + host_dm->data_start;
5191
5192 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5193 if (!argptr) {
5194 ret = -TARGET_EFAULT;
5195 goto out;
5196 }
5197
5198 switch (ie->host_cmd) {
5199 case DM_REMOVE_ALL:
5200 case DM_LIST_DEVICES:
5201 case DM_DEV_CREATE:
5202 case DM_DEV_REMOVE:
5203 case DM_DEV_SUSPEND:
5204 case DM_DEV_STATUS:
5205 case DM_DEV_WAIT:
5206 case DM_TABLE_STATUS:
5207 case DM_TABLE_CLEAR:
5208 case DM_TABLE_DEPS:
5209 case DM_LIST_VERSIONS:
5210 /* no input data */
5211 break;
5212 case DM_DEV_RENAME:
5213 case DM_DEV_SET_GEOMETRY:
5214 /* data contains only strings */
5215 memcpy(host_data, argptr, guest_data_size);
5216 break;
5217 case DM_TARGET_MSG:
5218 memcpy(host_data, argptr, guest_data_size);
5219 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5220 break;
5221 case DM_TABLE_LOAD:
5222 {
5223 void *gspec = argptr;
5224 void *cur_data = host_data;
5225 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5226 int spec_size = thunk_type_size(arg_type, 0);
5227 int i;
5228
5229 for (i = 0; i < host_dm->target_count; i++) {
5230 struct dm_target_spec *spec = cur_data;
5231 uint32_t next;
5232 int slen;
5233
5234 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5235 slen = strlen((char*)gspec + spec_size) + 1;
5236 next = spec->next;
5237 spec->next = sizeof(*spec) + slen;
5238 strcpy((char*)&spec[1], gspec + spec_size);
5239 gspec += next;
5240 cur_data += spec->next;
5241 }
5242 break;
5243 }
5244 default:
5245 ret = -TARGET_EINVAL;
5246 unlock_user(argptr, guest_data, 0);
5247 goto out;
5248 }
5249 unlock_user(argptr, guest_data, 0);
5250
5251 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5252 if (!is_error(ret)) {
5253 guest_data = arg + host_dm->data_start;
5254 guest_data_size = host_dm->data_size - host_dm->data_start;
5255 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5256 switch (ie->host_cmd) {
5257 case DM_REMOVE_ALL:
5258 case DM_DEV_CREATE:
5259 case DM_DEV_REMOVE:
5260 case DM_DEV_RENAME:
5261 case DM_DEV_SUSPEND:
5262 case DM_DEV_STATUS:
5263 case DM_TABLE_LOAD:
5264 case DM_TABLE_CLEAR:
5265 case DM_TARGET_MSG:
5266 case DM_DEV_SET_GEOMETRY:
5267 /* no return data */
5268 break;
5269 case DM_LIST_DEVICES:
5270 {
5271 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5272 uint32_t remaining_data = guest_data_size;
5273 void *cur_data = argptr;
5274 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5275 int nl_size = 12; /* can't use thunk_size due to alignment */
5276
5277 while (1) {
5278 uint32_t next = nl->next;
5279 if (next) {
5280 nl->next = nl_size + (strlen(nl->name) + 1);
5281 }
5282 if (remaining_data < nl->next) {
5283 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5284 break;
5285 }
5286 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5287 strcpy(cur_data + nl_size, nl->name);
5288 cur_data += nl->next;
5289 remaining_data -= nl->next;
5290 if (!next) {
5291 break;
5292 }
5293 nl = (void*)nl + next;
5294 }
5295 break;
5296 }
5297 case DM_DEV_WAIT:
5298 case DM_TABLE_STATUS:
5299 {
5300 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5301 void *cur_data = argptr;
5302 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5303 int spec_size = thunk_type_size(arg_type, 0);
5304 int i;
5305
5306 for (i = 0; i < host_dm->target_count; i++) {
5307 uint32_t next = spec->next;
5308 int slen = strlen((char*)&spec[1]) + 1;
5309 spec->next = (cur_data - argptr) + spec_size + slen;
5310 if (guest_data_size < spec->next) {
5311 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5312 break;
5313 }
5314 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5315 strcpy(cur_data + spec_size, (char*)&spec[1]);
5316 cur_data = argptr + spec->next;
5317 spec = (void*)host_dm + host_dm->data_start + next;
5318 }
5319 break;
5320 }
5321 case DM_TABLE_DEPS:
5322 {
5323 void *hdata = (void*)host_dm + host_dm->data_start;
5324 int count = *(uint32_t*)hdata;
5325 uint64_t *hdev = hdata + 8;
5326 uint64_t *gdev = argptr + 8;
5327 int i;
5328
5329 *(uint32_t*)argptr = tswap32(count);
5330 for (i = 0; i < count; i++) {
5331 *gdev = tswap64(*hdev);
5332 gdev++;
5333 hdev++;
5334 }
5335 break;
5336 }
5337 case DM_LIST_VERSIONS:
5338 {
5339 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5340 uint32_t remaining_data = guest_data_size;
5341 void *cur_data = argptr;
5342 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5343 int vers_size = thunk_type_size(arg_type, 0);
5344
5345 while (1) {
5346 uint32_t next = vers->next;
5347 if (next) {
5348 vers->next = vers_size + (strlen(vers->name) + 1);
5349 }
5350 if (remaining_data < vers->next) {
5351 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5352 break;
5353 }
5354 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5355 strcpy(cur_data + vers_size, vers->name);
5356 cur_data += vers->next;
5357 remaining_data -= vers->next;
5358 if (!next) {
5359 break;
5360 }
5361 vers = (void*)vers + next;
5362 }
5363 break;
5364 }
5365 default:
5366 unlock_user(argptr, guest_data, 0);
5367 ret = -TARGET_EINVAL;
5368 goto out;
5369 }
5370 unlock_user(argptr, guest_data, guest_data_size);
5371
5372 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5373 if (!argptr) {
5374 ret = -TARGET_EFAULT;
5375 goto out;
5376 }
5377 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5378 unlock_user(argptr, arg, target_size);
5379 }
5380 out:
5381 g_free(big_buf);
5382 return ret;
5383 }
5384
5385 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5386 int cmd, abi_long arg)
5387 {
5388 void *argptr;
5389 int target_size;
5390 const argtype *arg_type = ie->arg_type;
5391 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5392 abi_long ret;
5393
5394 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5395 struct blkpg_partition host_part;
5396
5397 /* Read and convert blkpg */
5398 arg_type++;
5399 target_size = thunk_type_size(arg_type, 0);
5400 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5401 if (!argptr) {
5402 ret = -TARGET_EFAULT;
5403 goto out;
5404 }
5405 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5406 unlock_user(argptr, arg, 0);
5407
5408 switch (host_blkpg->op) {
5409 case BLKPG_ADD_PARTITION:
5410 case BLKPG_DEL_PARTITION:
5411 /* payload is struct blkpg_partition */
5412 break;
5413 default:
5414 /* Unknown opcode */
5415 ret = -TARGET_EINVAL;
5416 goto out;
5417 }
5418
5419 /* Read and convert blkpg->data */
5420 arg = (abi_long)(uintptr_t)host_blkpg->data;
5421 target_size = thunk_type_size(part_arg_type, 0);
5422 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5423 if (!argptr) {
5424 ret = -TARGET_EFAULT;
5425 goto out;
5426 }
5427 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5428 unlock_user(argptr, arg, 0);
5429
5430 /* Swizzle the data pointer to our local copy and call! */
5431 host_blkpg->data = &host_part;
5432 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5433
5434 out:
5435 return ret;
5436 }
5437
5438 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5439 int fd, int cmd, abi_long arg)
5440 {
5441 const argtype *arg_type = ie->arg_type;
5442 const StructEntry *se;
5443 const argtype *field_types;
5444 const int *dst_offsets, *src_offsets;
5445 int target_size;
5446 void *argptr;
5447 abi_ulong *target_rt_dev_ptr = NULL;
5448 unsigned long *host_rt_dev_ptr = NULL;
5449 abi_long ret;
5450 int i;
5451
5452 assert(ie->access == IOC_W);
5453 assert(*arg_type == TYPE_PTR);
5454 arg_type++;
5455 assert(*arg_type == TYPE_STRUCT);
5456 target_size = thunk_type_size(arg_type, 0);
5457 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5458 if (!argptr) {
5459 return -TARGET_EFAULT;
5460 }
5461 arg_type++;
5462 assert(*arg_type == (int)STRUCT_rtentry);
5463 se = struct_entries + *arg_type++;
5464 assert(se->convert[0] == NULL);
5465 /* convert struct here to be able to catch rt_dev string */
5466 field_types = se->field_types;
5467 dst_offsets = se->field_offsets[THUNK_HOST];
5468 src_offsets = se->field_offsets[THUNK_TARGET];
5469 for (i = 0; i < se->nb_fields; i++) {
5470 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5471 assert(*field_types == TYPE_PTRVOID);
5472 target_rt_dev_ptr = argptr + src_offsets[i];
5473 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5474 if (*target_rt_dev_ptr != 0) {
5475 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5476 tswapal(*target_rt_dev_ptr));
5477 if (!*host_rt_dev_ptr) {
5478 unlock_user(argptr, arg, 0);
5479 return -TARGET_EFAULT;
5480 }
5481 } else {
5482 *host_rt_dev_ptr = 0;
5483 }
5484 field_types++;
5485 continue;
5486 }
5487 field_types = thunk_convert(buf_temp + dst_offsets[i],
5488 argptr + src_offsets[i],
5489 field_types, THUNK_HOST);
5490 }
5491 unlock_user(argptr, arg, 0);
5492
5493 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5494
5495 assert(host_rt_dev_ptr != NULL);
5496 assert(target_rt_dev_ptr != NULL);
5497 if (*host_rt_dev_ptr != 0) {
5498 unlock_user((void *)*host_rt_dev_ptr,
5499 *target_rt_dev_ptr, 0);
5500 }
5501 return ret;
5502 }
5503
5504 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5505 int fd, int cmd, abi_long arg)
5506 {
5507 int sig = target_to_host_signal(arg);
5508 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5509 }
5510
5511 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5512 int fd, int cmd, abi_long arg)
5513 {
5514 struct timeval tv;
5515 abi_long ret;
5516
5517 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5518 if (is_error(ret)) {
5519 return ret;
5520 }
5521
5522 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5523 if (copy_to_user_timeval(arg, &tv)) {
5524 return -TARGET_EFAULT;
5525 }
5526 } else {
5527 if (copy_to_user_timeval64(arg, &tv)) {
5528 return -TARGET_EFAULT;
5529 }
5530 }
5531
5532 return ret;
5533 }
5534
5535 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5536 int fd, int cmd, abi_long arg)
5537 {
5538 struct timespec ts;
5539 abi_long ret;
5540
5541 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5542 if (is_error(ret)) {
5543 return ret;
5544 }
5545
5546 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5547 if (host_to_target_timespec(arg, &ts)) {
5548 return -TARGET_EFAULT;
5549 }
5550 } else{
5551 if (host_to_target_timespec64(arg, &ts)) {
5552 return -TARGET_EFAULT;
5553 }
5554 }
5555
5556 return ret;
5557 }
5558
5559 #ifdef TIOCGPTPEER
5560 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5561 int fd, int cmd, abi_long arg)
5562 {
5563 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5564 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5565 }
5566 #endif
5567
5568 #ifdef HAVE_DRM_H
5569
5570 static void unlock_drm_version(struct drm_version *host_ver,
5571 struct target_drm_version *target_ver,
5572 bool copy)
5573 {
5574 unlock_user(host_ver->name, target_ver->name,
5575 copy ? host_ver->name_len : 0);
5576 unlock_user(host_ver->date, target_ver->date,
5577 copy ? host_ver->date_len : 0);
5578 unlock_user(host_ver->desc, target_ver->desc,
5579 copy ? host_ver->desc_len : 0);
5580 }
5581
5582 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5583 struct target_drm_version *target_ver)
5584 {
5585 memset(host_ver, 0, sizeof(*host_ver));
5586
5587 __get_user(host_ver->name_len, &target_ver->name_len);
5588 if (host_ver->name_len) {
5589 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5590 target_ver->name_len, 0);
5591 if (!host_ver->name) {
5592 return -EFAULT;
5593 }
5594 }
5595
5596 __get_user(host_ver->date_len, &target_ver->date_len);
5597 if (host_ver->date_len) {
5598 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5599 target_ver->date_len, 0);
5600 if (!host_ver->date) {
5601 goto err;
5602 }
5603 }
5604
5605 __get_user(host_ver->desc_len, &target_ver->desc_len);
5606 if (host_ver->desc_len) {
5607 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5608 target_ver->desc_len, 0);
5609 if (!host_ver->desc) {
5610 goto err;
5611 }
5612 }
5613
5614 return 0;
5615 err:
5616 unlock_drm_version(host_ver, target_ver, false);
5617 return -EFAULT;
5618 }
5619
5620 static inline void host_to_target_drmversion(
5621 struct target_drm_version *target_ver,
5622 struct drm_version *host_ver)
5623 {
5624 __put_user(host_ver->version_major, &target_ver->version_major);
5625 __put_user(host_ver->version_minor, &target_ver->version_minor);
5626 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5627 __put_user(host_ver->name_len, &target_ver->name_len);
5628 __put_user(host_ver->date_len, &target_ver->date_len);
5629 __put_user(host_ver->desc_len, &target_ver->desc_len);
5630 unlock_drm_version(host_ver, target_ver, true);
5631 }
5632
5633 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5634 int fd, int cmd, abi_long arg)
5635 {
5636 struct drm_version *ver;
5637 struct target_drm_version *target_ver;
5638 abi_long ret;
5639
5640 switch (ie->host_cmd) {
5641 case DRM_IOCTL_VERSION:
5642 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5643 return -TARGET_EFAULT;
5644 }
5645 ver = (struct drm_version *)buf_temp;
5646 ret = target_to_host_drmversion(ver, target_ver);
5647 if (!is_error(ret)) {
5648 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5649 if (is_error(ret)) {
5650 unlock_drm_version(ver, target_ver, false);
5651 } else {
5652 host_to_target_drmversion(target_ver, ver);
5653 }
5654 }
5655 unlock_user_struct(target_ver, arg, 0);
5656 return ret;
5657 }
5658 return -TARGET_ENOSYS;
5659 }
5660
5661 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5662 struct drm_i915_getparam *gparam,
5663 int fd, abi_long arg)
5664 {
5665 abi_long ret;
5666 int value;
5667 struct target_drm_i915_getparam *target_gparam;
5668
5669 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5670 return -TARGET_EFAULT;
5671 }
5672
5673 __get_user(gparam->param, &target_gparam->param);
5674 gparam->value = &value;
5675 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5676 put_user_s32(value, target_gparam->value);
5677
5678 unlock_user_struct(target_gparam, arg, 0);
5679 return ret;
5680 }
5681
5682 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5683 int fd, int cmd, abi_long arg)
5684 {
5685 switch (ie->host_cmd) {
5686 case DRM_IOCTL_I915_GETPARAM:
5687 return do_ioctl_drm_i915_getparam(ie,
5688 (struct drm_i915_getparam *)buf_temp,
5689 fd, arg);
5690 default:
5691 return -TARGET_ENOSYS;
5692 }
5693 }
5694
5695 #endif
5696
5697 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5698 int fd, int cmd, abi_long arg)
5699 {
5700 struct tun_filter *filter = (struct tun_filter *)buf_temp;
5701 struct tun_filter *target_filter;
5702 char *target_addr;
5703
5704 assert(ie->access == IOC_W);
5705
5706 target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5707 if (!target_filter) {
5708 return -TARGET_EFAULT;
5709 }
5710 filter->flags = tswap16(target_filter->flags);
5711 filter->count = tswap16(target_filter->count);
5712 unlock_user(target_filter, arg, 0);
5713
5714 if (filter->count) {
5715 if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5716 MAX_STRUCT_SIZE) {
5717 return -TARGET_EFAULT;
5718 }
5719
5720 target_addr = lock_user(VERIFY_READ,
5721 arg + offsetof(struct tun_filter, addr),
5722 filter->count * ETH_ALEN, 1);
5723 if (!target_addr) {
5724 return -TARGET_EFAULT;
5725 }
5726 memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5727 unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5728 }
5729
5730 return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5731 }
5732
5733 IOCTLEntry ioctl_entries[] = {
5734 #define IOCTL(cmd, access, ...) \
5735 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5736 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5737 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5738 #define IOCTL_IGNORE(cmd) \
5739 { TARGET_ ## cmd, 0, #cmd },
5740 #include "ioctls.h"
5741 { 0, 0, },
5742 };
5743
5744 /* ??? Implement proper locking for ioctls. */
5745 /* do_ioctl() Must return target values and target errnos. */
5746 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5747 {
5748 const IOCTLEntry *ie;
5749 const argtype *arg_type;
5750 abi_long ret;
5751 uint8_t buf_temp[MAX_STRUCT_SIZE];
5752 int target_size;
5753 void *argptr;
5754
5755 ie = ioctl_entries;
5756 for(;;) {
5757 if (ie->target_cmd == 0) {
5758 qemu_log_mask(
5759 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5760 return -TARGET_ENOTTY;
5761 }
5762 if (ie->target_cmd == cmd)
5763 break;
5764 ie++;
5765 }
5766 arg_type = ie->arg_type;
5767 if (ie->do_ioctl) {
5768 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5769 } else if (!ie->host_cmd) {
5770 /* Some architectures define BSD ioctls in their headers
5771 that are not implemented in Linux. */
5772 return -TARGET_ENOTTY;
5773 }
5774
5775 switch(arg_type[0]) {
5776 case TYPE_NULL:
5777 /* no argument */
5778 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5779 break;
5780 case TYPE_PTRVOID:
5781 case TYPE_INT:
5782 case TYPE_LONG:
5783 case TYPE_ULONG:
5784 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5785 break;
5786 case TYPE_PTR:
5787 arg_type++;
5788 target_size = thunk_type_size(arg_type, 0);
5789 switch(ie->access) {
5790 case IOC_R:
5791 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5792 if (!is_error(ret)) {
5793 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5794 if (!argptr)
5795 return -TARGET_EFAULT;
5796 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5797 unlock_user(argptr, arg, target_size);
5798 }
5799 break;
5800 case IOC_W:
5801 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5802 if (!argptr)
5803 return -TARGET_EFAULT;
5804 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5805 unlock_user(argptr, arg, 0);
5806 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5807 break;
5808 default:
5809 case IOC_RW:
5810 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5811 if (!argptr)
5812 return -TARGET_EFAULT;
5813 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5814 unlock_user(argptr, arg, 0);
5815 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5816 if (!is_error(ret)) {
5817 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5818 if (!argptr)
5819 return -TARGET_EFAULT;
5820 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5821 unlock_user(argptr, arg, target_size);
5822 }
5823 break;
5824 }
5825 break;
5826 default:
5827 qemu_log_mask(LOG_UNIMP,
5828 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5829 (long)cmd, arg_type[0]);
5830 ret = -TARGET_ENOTTY;
5831 break;
5832 }
5833 return ret;
5834 }
5835
5836 static const bitmask_transtbl iflag_tbl[] = {
5837 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5838 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5839 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5840 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5841 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5842 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5843 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5844 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5845 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5846 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5847 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5848 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5849 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5850 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5851 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5852 { 0, 0, 0, 0 }
5853 };
5854
5855 static const bitmask_transtbl oflag_tbl[] = {
5856 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5857 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5858 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5859 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5860 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5861 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5862 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5863 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5864 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5865 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5866 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5867 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5868 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5869 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5870 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5871 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5872 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5873 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5874 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5875 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5876 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5877 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5878 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5879 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5880 { 0, 0, 0, 0 }
5881 };
5882
5883 static const bitmask_transtbl cflag_tbl[] = {
5884 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5885 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5886 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5887 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5888 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5889 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5890 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5891 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5892 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5893 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5894 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5895 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5896 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5897 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5898 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5899 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5900 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5901 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5902 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5903 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5904 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5905 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5906 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5907 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5908 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5909 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5910 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5911 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5912 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5913 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5914 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5915 { 0, 0, 0, 0 }
5916 };
5917
5918 static const bitmask_transtbl lflag_tbl[] = {
5919 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5920 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5921 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5922 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5923 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5924 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5925 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5926 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5927 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5928 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5929 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5930 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5931 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5932 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5933 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5934 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5935 { 0, 0, 0, 0 }
5936 };
5937
5938 static void target_to_host_termios (void *dst, const void *src)
5939 {
5940 struct host_termios *host = dst;
5941 const struct target_termios *target = src;
5942
5943 host->c_iflag =
5944 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5945 host->c_oflag =
5946 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5947 host->c_cflag =
5948 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5949 host->c_lflag =
5950 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5951 host->c_line = target->c_line;
5952
5953 memset(host->c_cc, 0, sizeof(host->c_cc));
5954 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5955 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5956 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5957 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5958 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5959 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5960 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5961 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5962 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5963 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5964 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5965 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5966 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5967 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5968 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5969 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5970 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5971 }
5972
5973 static void host_to_target_termios (void *dst, const void *src)
5974 {
5975 struct target_termios *target = dst;
5976 const struct host_termios *host = src;
5977
5978 target->c_iflag =
5979 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5980 target->c_oflag =
5981 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5982 target->c_cflag =
5983 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5984 target->c_lflag =
5985 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5986 target->c_line = host->c_line;
5987
5988 memset(target->c_cc, 0, sizeof(target->c_cc));
5989 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5990 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5991 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5992 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5993 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5994 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5995 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5996 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5997 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5998 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5999 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6000 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6001 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6002 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6003 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6004 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6005 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6006 }
6007
6008 static const StructEntry struct_termios_def = {
6009 .convert = { host_to_target_termios, target_to_host_termios },
6010 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6011 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6012 .print = print_termios,
6013 };
6014
6015 static const bitmask_transtbl mmap_flags_tbl[] = {
6016 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6017 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6018 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6019 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6020 MAP_ANONYMOUS, MAP_ANONYMOUS },
6021 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6022 MAP_GROWSDOWN, MAP_GROWSDOWN },
6023 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6024 MAP_DENYWRITE, MAP_DENYWRITE },
6025 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6026 MAP_EXECUTABLE, MAP_EXECUTABLE },
6027 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6028 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6029 MAP_NORESERVE, MAP_NORESERVE },
6030 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6031 /* MAP_STACK had been ignored by the kernel for quite some time.
6032 Recognize it for the target insofar as we do not want to pass
6033 it through to the host. */
6034 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6035 { 0, 0, 0, 0 }
6036 };
6037
6038 /*
6039 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6040 * TARGET_I386 is defined if TARGET_X86_64 is defined
6041 */
6042 #if defined(TARGET_I386)
6043
6044 /* NOTE: there is really one LDT for all the threads */
6045 static uint8_t *ldt_table;
6046
6047 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6048 {
6049 int size;
6050 void *p;
6051
6052 if (!ldt_table)
6053 return 0;
6054 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6055 if (size > bytecount)
6056 size = bytecount;
6057 p = lock_user(VERIFY_WRITE, ptr, size, 0);
6058 if (!p)
6059 return -TARGET_EFAULT;
6060 /* ??? Should this by byteswapped? */
6061 memcpy(p, ldt_table, size);
6062 unlock_user(p, ptr, size);
6063 return size;
6064 }
6065
6066 /* XXX: add locking support */
6067 static abi_long write_ldt(CPUX86State *env,
6068 abi_ulong ptr, unsigned long bytecount, int oldmode)
6069 {
6070 struct target_modify_ldt_ldt_s ldt_info;
6071 struct target_modify_ldt_ldt_s *target_ldt_info;
6072 int seg_32bit, contents, read_exec_only, limit_in_pages;
6073 int seg_not_present, useable, lm;
6074 uint32_t *lp, entry_1, entry_2;
6075
6076 if (bytecount != sizeof(ldt_info))
6077 return -TARGET_EINVAL;
6078 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6079 return -TARGET_EFAULT;
6080 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6081 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6082 ldt_info.limit = tswap32(target_ldt_info->limit);
6083 ldt_info.flags = tswap32(target_ldt_info->flags);
6084 unlock_user_struct(target_ldt_info, ptr, 0);
6085
6086 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6087 return -TARGET_EINVAL;
6088 seg_32bit = ldt_info.flags & 1;
6089 contents = (ldt_info.flags >> 1) & 3;
6090 read_exec_only = (ldt_info.flags >> 3) & 1;
6091 limit_in_pages = (ldt_info.flags >> 4) & 1;
6092 seg_not_present = (ldt_info.flags >> 5) & 1;
6093 useable = (ldt_info.flags >> 6) & 1;
6094 #ifdef TARGET_ABI32
6095 lm = 0;
6096 #else
6097 lm = (ldt_info.flags >> 7) & 1;
6098 #endif
6099 if (contents == 3) {
6100 if (oldmode)
6101 return -TARGET_EINVAL;
6102 if (seg_not_present == 0)
6103 return -TARGET_EINVAL;
6104 }
6105 /* allocate the LDT */
6106 if (!ldt_table) {
6107 env->ldt.base = target_mmap(0,
6108 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6109 PROT_READ|PROT_WRITE,
6110 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6111 if (env->ldt.base == -1)
6112 return -TARGET_ENOMEM;
6113 memset(g2h_untagged(env->ldt.base), 0,
6114 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6115 env->ldt.limit = 0xffff;
6116 ldt_table = g2h_untagged(env->ldt.base);
6117 }
6118
6119 /* NOTE: same code as Linux kernel */
6120 /* Allow LDTs to be cleared by the user. */
6121 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6122 if (oldmode ||
6123 (contents == 0 &&
6124 read_exec_only == 1 &&
6125 seg_32bit == 0 &&
6126 limit_in_pages == 0 &&
6127 seg_not_present == 1 &&
6128 useable == 0 )) {
6129 entry_1 = 0;
6130 entry_2 = 0;
6131 goto install;
6132 }
6133 }
6134
6135 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6136 (ldt_info.limit & 0x0ffff);
6137 entry_2 = (ldt_info.base_addr & 0xff000000) |
6138 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6139 (ldt_info.limit & 0xf0000) |
6140 ((read_exec_only ^ 1) << 9) |
6141 (contents << 10) |
6142 ((seg_not_present ^ 1) << 15) |
6143 (seg_32bit << 22) |
6144 (limit_in_pages << 23) |
6145 (lm << 21) |
6146 0x7000;
6147 if (!oldmode)
6148 entry_2 |= (useable << 20);
6149
6150 /* Install the new entry ... */
6151 install:
6152 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6153 lp[0] = tswap32(entry_1);
6154 lp[1] = tswap32(entry_2);
6155 return 0;
6156 }
6157
6158 /* specific and weird i386 syscalls */
6159 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6160 unsigned long bytecount)
6161 {
6162 abi_long ret;
6163
6164 switch (func) {
6165 case 0:
6166 ret = read_ldt(ptr, bytecount);
6167 break;
6168 case 1:
6169 ret = write_ldt(env, ptr, bytecount, 1);
6170 break;
6171 case 0x11:
6172 ret = write_ldt(env, ptr, bytecount, 0);
6173 break;
6174 default:
6175 ret = -TARGET_ENOSYS;
6176 break;
6177 }
6178 return ret;
6179 }
6180
6181 #if defined(TARGET_ABI32)
6182 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6183 {
6184 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6185 struct target_modify_ldt_ldt_s ldt_info;
6186 struct target_modify_ldt_ldt_s *target_ldt_info;
6187 int seg_32bit, contents, read_exec_only, limit_in_pages;
6188 int seg_not_present, useable, lm;
6189 uint32_t *lp, entry_1, entry_2;
6190 int i;
6191
6192 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6193 if (!target_ldt_info)
6194 return -TARGET_EFAULT;
6195 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6196 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6197 ldt_info.limit = tswap32(target_ldt_info->limit);
6198 ldt_info.flags = tswap32(target_ldt_info->flags);
6199 if (ldt_info.entry_number == -1) {
6200 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6201 if (gdt_table[i] == 0) {
6202 ldt_info.entry_number = i;
6203 target_ldt_info->entry_number = tswap32(i);
6204 break;
6205 }
6206 }
6207 }
6208 unlock_user_struct(target_ldt_info, ptr, 1);
6209
6210 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6211 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6212 return -TARGET_EINVAL;
6213 seg_32bit = ldt_info.flags & 1;
6214 contents = (ldt_info.flags >> 1) & 3;
6215 read_exec_only = (ldt_info.flags >> 3) & 1;
6216 limit_in_pages = (ldt_info.flags >> 4) & 1;
6217 seg_not_present = (ldt_info.flags >> 5) & 1;
6218 useable = (ldt_info.flags >> 6) & 1;
6219 #ifdef TARGET_ABI32
6220 lm = 0;
6221 #else
6222 lm = (ldt_info.flags >> 7) & 1;
6223 #endif
6224
6225 if (contents == 3) {
6226 if (seg_not_present == 0)
6227 return -TARGET_EINVAL;
6228 }
6229
6230 /* NOTE: same code as Linux kernel */
6231 /* Allow LDTs to be cleared by the user. */
6232 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6233 if ((contents == 0 &&
6234 read_exec_only == 1 &&
6235 seg_32bit == 0 &&
6236 limit_in_pages == 0 &&
6237 seg_not_present == 1 &&
6238 useable == 0 )) {
6239 entry_1 = 0;
6240 entry_2 = 0;
6241 goto install;
6242 }
6243 }
6244
6245 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6246 (ldt_info.limit & 0x0ffff);
6247 entry_2 = (ldt_info.base_addr & 0xff000000) |
6248 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6249 (ldt_info.limit & 0xf0000) |
6250 ((read_exec_only ^ 1) << 9) |
6251 (contents << 10) |
6252 ((seg_not_present ^ 1) << 15) |
6253 (seg_32bit << 22) |
6254 (limit_in_pages << 23) |
6255 (useable << 20) |
6256 (lm << 21) |
6257 0x7000;
6258
6259 /* Install the new entry ... */
6260 install:
6261 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6262 lp[0] = tswap32(entry_1);
6263 lp[1] = tswap32(entry_2);
6264 return 0;
6265 }
6266
6267 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6268 {
6269 struct target_modify_ldt_ldt_s *target_ldt_info;
6270 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6271 uint32_t base_addr, limit, flags;
6272 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6273 int seg_not_present, useable, lm;
6274 uint32_t *lp, entry_1, entry_2;
6275
6276 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6277 if (!target_ldt_info)
6278 return -TARGET_EFAULT;
6279 idx = tswap32(target_ldt_info->entry_number);
6280 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6281 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6282 unlock_user_struct(target_ldt_info, ptr, 1);
6283 return -TARGET_EINVAL;
6284 }
6285 lp = (uint32_t *)(gdt_table + idx);
6286 entry_1 = tswap32(lp[0]);
6287 entry_2 = tswap32(lp[1]);
6288
6289 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6290 contents = (entry_2 >> 10) & 3;
6291 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6292 seg_32bit = (entry_2 >> 22) & 1;
6293 limit_in_pages = (entry_2 >> 23) & 1;
6294 useable = (entry_2 >> 20) & 1;
6295 #ifdef TARGET_ABI32
6296 lm = 0;
6297 #else
6298 lm = (entry_2 >> 21) & 1;
6299 #endif
6300 flags = (seg_32bit << 0) | (contents << 1) |
6301 (read_exec_only << 3) | (limit_in_pages << 4) |
6302 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6303 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6304 base_addr = (entry_1 >> 16) |
6305 (entry_2 & 0xff000000) |
6306 ((entry_2 & 0xff) << 16);
6307 target_ldt_info->base_addr = tswapal(base_addr);
6308 target_ldt_info->limit = tswap32(limit);
6309 target_ldt_info->flags = tswap32(flags);
6310 unlock_user_struct(target_ldt_info, ptr, 1);
6311 return 0;
6312 }
6313
6314 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6315 {
6316 return -TARGET_ENOSYS;
6317 }
6318 #else
6319 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6320 {
6321 abi_long ret = 0;
6322 abi_ulong val;
6323 int idx;
6324
6325 switch(code) {
6326 case TARGET_ARCH_SET_GS:
6327 case TARGET_ARCH_SET_FS:
6328 if (code == TARGET_ARCH_SET_GS)
6329 idx = R_GS;
6330 else
6331 idx = R_FS;
6332 cpu_x86_load_seg(env, idx, 0);
6333 env->segs[idx].base = addr;
6334 break;
6335 case TARGET_ARCH_GET_GS:
6336 case TARGET_ARCH_GET_FS:
6337 if (code == TARGET_ARCH_GET_GS)
6338 idx = R_GS;
6339 else
6340 idx = R_FS;
6341 val = env->segs[idx].base;
6342 if (put_user(val, addr, abi_ulong))
6343 ret = -TARGET_EFAULT;
6344 break;
6345 default:
6346 ret = -TARGET_EINVAL;
6347 break;
6348 }
6349 return ret;
6350 }
6351 #endif /* defined(TARGET_ABI32 */
6352 #endif /* defined(TARGET_I386) */
6353
6354 /*
6355 * These constants are generic. Supply any that are missing from the host.
6356 */
6357 #ifndef PR_SET_NAME
6358 # define PR_SET_NAME 15
6359 # define PR_GET_NAME 16
6360 #endif
6361 #ifndef PR_SET_FP_MODE
6362 # define PR_SET_FP_MODE 45
6363 # define PR_GET_FP_MODE 46
6364 # define PR_FP_MODE_FR (1 << 0)
6365 # define PR_FP_MODE_FRE (1 << 1)
6366 #endif
6367 #ifndef PR_SVE_SET_VL
6368 # define PR_SVE_SET_VL 50
6369 # define PR_SVE_GET_VL 51
6370 # define PR_SVE_VL_LEN_MASK 0xffff
6371 # define PR_SVE_VL_INHERIT (1 << 17)
6372 #endif
6373 #ifndef PR_PAC_RESET_KEYS
6374 # define PR_PAC_RESET_KEYS 54
6375 # define PR_PAC_APIAKEY (1 << 0)
6376 # define PR_PAC_APIBKEY (1 << 1)
6377 # define PR_PAC_APDAKEY (1 << 2)
6378 # define PR_PAC_APDBKEY (1 << 3)
6379 # define PR_PAC_APGAKEY (1 << 4)
6380 #endif
6381 #ifndef PR_SET_TAGGED_ADDR_CTRL
6382 # define PR_SET_TAGGED_ADDR_CTRL 55
6383 # define PR_GET_TAGGED_ADDR_CTRL 56
6384 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6385 #endif
6386 #ifndef PR_MTE_TCF_SHIFT
6387 # define PR_MTE_TCF_SHIFT 1
6388 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6389 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6390 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6391 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6392 # define PR_MTE_TAG_SHIFT 3
6393 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6394 #endif
6395 #ifndef PR_SET_IO_FLUSHER
6396 # define PR_SET_IO_FLUSHER 57
6397 # define PR_GET_IO_FLUSHER 58
6398 #endif
6399 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6400 # define PR_SET_SYSCALL_USER_DISPATCH 59
6401 #endif
6402 #ifndef PR_SME_SET_VL
6403 # define PR_SME_SET_VL 63
6404 # define PR_SME_GET_VL 64
6405 # define PR_SME_VL_LEN_MASK 0xffff
6406 # define PR_SME_VL_INHERIT (1 << 17)
6407 #endif
6408
6409 #include "target_prctl.h"
6410
6411 static abi_long do_prctl_inval0(CPUArchState *env)
6412 {
6413 return -TARGET_EINVAL;
6414 }
6415
6416 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6417 {
6418 return -TARGET_EINVAL;
6419 }
6420
6421 #ifndef do_prctl_get_fp_mode
6422 #define do_prctl_get_fp_mode do_prctl_inval0
6423 #endif
6424 #ifndef do_prctl_set_fp_mode
6425 #define do_prctl_set_fp_mode do_prctl_inval1
6426 #endif
6427 #ifndef do_prctl_sve_get_vl
6428 #define do_prctl_sve_get_vl do_prctl_inval0
6429 #endif
6430 #ifndef do_prctl_sve_set_vl
6431 #define do_prctl_sve_set_vl do_prctl_inval1
6432 #endif
6433 #ifndef do_prctl_reset_keys
6434 #define do_prctl_reset_keys do_prctl_inval1
6435 #endif
6436 #ifndef do_prctl_set_tagged_addr_ctrl
6437 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6438 #endif
6439 #ifndef do_prctl_get_tagged_addr_ctrl
6440 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6441 #endif
6442 #ifndef do_prctl_get_unalign
6443 #define do_prctl_get_unalign do_prctl_inval1
6444 #endif
6445 #ifndef do_prctl_set_unalign
6446 #define do_prctl_set_unalign do_prctl_inval1
6447 #endif
6448 #ifndef do_prctl_sme_get_vl
6449 #define do_prctl_sme_get_vl do_prctl_inval0
6450 #endif
6451 #ifndef do_prctl_sme_set_vl
6452 #define do_prctl_sme_set_vl do_prctl_inval1
6453 #endif
6454
6455 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6456 abi_long arg3, abi_long arg4, abi_long arg5)
6457 {
6458 abi_long ret;
6459
6460 switch (option) {
6461 case PR_GET_PDEATHSIG:
6462 {
6463 int deathsig;
6464 ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6465 arg3, arg4, arg5));
6466 if (!is_error(ret) &&
6467 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6468 return -TARGET_EFAULT;
6469 }
6470 return ret;
6471 }
6472 case PR_SET_PDEATHSIG:
6473 return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6474 arg3, arg4, arg5));
6475 case PR_GET_NAME:
6476 {
6477 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6478 if (!name) {
6479 return -TARGET_EFAULT;
6480 }
6481 ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6482 arg3, arg4, arg5));
6483 unlock_user(name, arg2, 16);
6484 return ret;
6485 }
6486 case PR_SET_NAME:
6487 {
6488 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6489 if (!name) {
6490 return -TARGET_EFAULT;
6491 }
6492 ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6493 arg3, arg4, arg5));
6494 unlock_user(name, arg2, 0);
6495 return ret;
6496 }
6497 case PR_GET_FP_MODE:
6498 return do_prctl_get_fp_mode(env);
6499 case PR_SET_FP_MODE:
6500 return do_prctl_set_fp_mode(env, arg2);
6501 case PR_SVE_GET_VL:
6502 return do_prctl_sve_get_vl(env);
6503 case PR_SVE_SET_VL:
6504 return do_prctl_sve_set_vl(env, arg2);
6505 case PR_SME_GET_VL:
6506 return do_prctl_sme_get_vl(env);
6507 case PR_SME_SET_VL:
6508 return do_prctl_sme_set_vl(env, arg2);
6509 case PR_PAC_RESET_KEYS:
6510 if (arg3 || arg4 || arg5) {
6511 return -TARGET_EINVAL;
6512 }
6513 return do_prctl_reset_keys(env, arg2);
6514 case PR_SET_TAGGED_ADDR_CTRL:
6515 if (arg3 || arg4 || arg5) {
6516 return -TARGET_EINVAL;
6517 }
6518 return do_prctl_set_tagged_addr_ctrl(env, arg2);
6519 case PR_GET_TAGGED_ADDR_CTRL:
6520 if (arg2 || arg3 || arg4 || arg5) {
6521 return -TARGET_EINVAL;
6522 }
6523 return do_prctl_get_tagged_addr_ctrl(env);
6524
6525 case PR_GET_UNALIGN:
6526 return do_prctl_get_unalign(env, arg2);
6527 case PR_SET_UNALIGN:
6528 return do_prctl_set_unalign(env, arg2);
6529
6530 case PR_CAP_AMBIENT:
6531 case PR_CAPBSET_READ:
6532 case PR_CAPBSET_DROP:
6533 case PR_GET_DUMPABLE:
6534 case PR_SET_DUMPABLE:
6535 case PR_GET_KEEPCAPS:
6536 case PR_SET_KEEPCAPS:
6537 case PR_GET_SECUREBITS:
6538 case PR_SET_SECUREBITS:
6539 case PR_GET_TIMING:
6540 case PR_SET_TIMING:
6541 case PR_GET_TIMERSLACK:
6542 case PR_SET_TIMERSLACK:
6543 case PR_MCE_KILL:
6544 case PR_MCE_KILL_GET:
6545 case PR_GET_NO_NEW_PRIVS:
6546 case PR_SET_NO_NEW_PRIVS:
6547 case PR_GET_IO_FLUSHER:
6548 case PR_SET_IO_FLUSHER:
6549 /* Some prctl options have no pointer arguments and we can pass on. */
6550 return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6551
6552 case PR_GET_CHILD_SUBREAPER:
6553 case PR_SET_CHILD_SUBREAPER:
6554 case PR_GET_SPECULATION_CTRL:
6555 case PR_SET_SPECULATION_CTRL:
6556 case PR_GET_TID_ADDRESS:
6557 /* TODO */
6558 return -TARGET_EINVAL;
6559
6560 case PR_GET_FPEXC:
6561 case PR_SET_FPEXC:
6562 /* Was used for SPE on PowerPC. */
6563 return -TARGET_EINVAL;
6564
6565 case PR_GET_ENDIAN:
6566 case PR_SET_ENDIAN:
6567 case PR_GET_FPEMU:
6568 case PR_SET_FPEMU:
6569 case PR_SET_MM:
6570 case PR_GET_SECCOMP:
6571 case PR_SET_SECCOMP:
6572 case PR_SET_SYSCALL_USER_DISPATCH:
6573 case PR_GET_THP_DISABLE:
6574 case PR_SET_THP_DISABLE:
6575 case PR_GET_TSC:
6576 case PR_SET_TSC:
6577 /* Disable to prevent the target disabling stuff we need. */
6578 return -TARGET_EINVAL;
6579
6580 default:
6581 qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6582 option);
6583 return -TARGET_EINVAL;
6584 }
6585 }
6586
6587 #define NEW_STACK_SIZE 0x40000
6588
6589
6590 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6591 typedef struct {
6592 CPUArchState *env;
6593 pthread_mutex_t mutex;
6594 pthread_cond_t cond;
6595 pthread_t thread;
6596 uint32_t tid;
6597 abi_ulong child_tidptr;
6598 abi_ulong parent_tidptr;
6599 sigset_t sigmask;
6600 } new_thread_info;
6601
6602 static void *clone_func(void *arg)
6603 {
6604 new_thread_info *info = arg;
6605 CPUArchState *env;
6606 CPUState *cpu;
6607 TaskState *ts;
6608
6609 rcu_register_thread();
6610 tcg_register_thread();
6611 env = info->env;
6612 cpu = env_cpu(env);
6613 thread_cpu = cpu;
6614 ts = (TaskState *)cpu->opaque;
6615 info->tid = sys_gettid();
6616 task_settid(ts);
6617 if (info->child_tidptr)
6618 put_user_u32(info->tid, info->child_tidptr);
6619 if (info->parent_tidptr)
6620 put_user_u32(info->tid, info->parent_tidptr);
6621 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6622 /* Enable signals. */
6623 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6624 /* Signal to the parent that we're ready. */
6625 pthread_mutex_lock(&info->mutex);
6626 pthread_cond_broadcast(&info->cond);
6627 pthread_mutex_unlock(&info->mutex);
6628 /* Wait until the parent has finished initializing the tls state. */
6629 pthread_mutex_lock(&clone_lock);
6630 pthread_mutex_unlock(&clone_lock);
6631 cpu_loop(env);
6632 /* never exits */
6633 return NULL;
6634 }
6635
6636 /* do_fork() Must return host values and target errnos (unlike most
6637 do_*() functions). */
6638 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6639 abi_ulong parent_tidptr, target_ulong newtls,
6640 abi_ulong child_tidptr)
6641 {
6642 CPUState *cpu = env_cpu(env);
6643 int ret;
6644 TaskState *ts;
6645 CPUState *new_cpu;
6646 CPUArchState *new_env;
6647 sigset_t sigmask;
6648
6649 flags &= ~CLONE_IGNORED_FLAGS;
6650
6651 /* Emulate vfork() with fork() */
6652 if (flags & CLONE_VFORK)
6653 flags &= ~(CLONE_VFORK | CLONE_VM);
6654
6655 if (flags & CLONE_VM) {
6656 TaskState *parent_ts = (TaskState *)cpu->opaque;
6657 new_thread_info info;
6658 pthread_attr_t attr;
6659
6660 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6661 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6662 return -TARGET_EINVAL;
6663 }
6664
6665 ts = g_new0(TaskState, 1);
6666 init_task_state(ts);
6667
6668 /* Grab a mutex so that thread setup appears atomic. */
6669 pthread_mutex_lock(&clone_lock);
6670
6671 /*
6672 * If this is our first additional thread, we need to ensure we
6673 * generate code for parallel execution and flush old translations.
6674 * Do this now so that the copy gets CF_PARALLEL too.
6675 */
6676 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6677 cpu->tcg_cflags |= CF_PARALLEL;
6678 tb_flush(cpu);
6679 }
6680
6681 /* we create a new CPU instance. */
6682 new_env = cpu_copy(env);
6683 /* Init regs that differ from the parent. */
6684 cpu_clone_regs_child(new_env, newsp, flags);
6685 cpu_clone_regs_parent(env, flags);
6686 new_cpu = env_cpu(new_env);
6687 new_cpu->opaque = ts;
6688 ts->bprm = parent_ts->bprm;
6689 ts->info = parent_ts->info;
6690 ts->signal_mask = parent_ts->signal_mask;
6691
6692 if (flags & CLONE_CHILD_CLEARTID) {
6693 ts->child_tidptr = child_tidptr;
6694 }
6695
6696 if (flags & CLONE_SETTLS) {
6697 cpu_set_tls (new_env, newtls);
6698 }
6699
6700 memset(&info, 0, sizeof(info));
6701 pthread_mutex_init(&info.mutex, NULL);
6702 pthread_mutex_lock(&info.mutex);
6703 pthread_cond_init(&info.cond, NULL);
6704 info.env = new_env;
6705 if (flags & CLONE_CHILD_SETTID) {
6706 info.child_tidptr = child_tidptr;
6707 }
6708 if (flags & CLONE_PARENT_SETTID) {
6709 info.parent_tidptr = parent_tidptr;
6710 }
6711
6712 ret = pthread_attr_init(&attr);
6713 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6714 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6715 /* It is not safe to deliver signals until the child has finished
6716 initializing, so temporarily block all signals. */
6717 sigfillset(&sigmask);
6718 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6719 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6720
6721 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6722 /* TODO: Free new CPU state if thread creation failed. */
6723
6724 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6725 pthread_attr_destroy(&attr);
6726 if (ret == 0) {
6727 /* Wait for the child to initialize. */
6728 pthread_cond_wait(&info.cond, &info.mutex);
6729 ret = info.tid;
6730 } else {
6731 ret = -1;
6732 }
6733 pthread_mutex_unlock(&info.mutex);
6734 pthread_cond_destroy(&info.cond);
6735 pthread_mutex_destroy(&info.mutex);
6736 pthread_mutex_unlock(&clone_lock);
6737 } else {
6738 /* if no CLONE_VM, we consider it is a fork */
6739 if (flags & CLONE_INVALID_FORK_FLAGS) {
6740 return -TARGET_EINVAL;
6741 }
6742
6743 /* We can't support custom termination signals */
6744 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6745 return -TARGET_EINVAL;
6746 }
6747
6748 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6749 if (flags & CLONE_PIDFD) {
6750 return -TARGET_EINVAL;
6751 }
6752 #endif
6753
6754 /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6755 if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6756 return -TARGET_EINVAL;
6757 }
6758
6759 if (block_signals()) {
6760 return -QEMU_ERESTARTSYS;
6761 }
6762
6763 fork_start();
6764 ret = fork();
6765 if (ret == 0) {
6766 /* Child Process. */
6767 cpu_clone_regs_child(env, newsp, flags);
6768 fork_end(1);
6769 /* There is a race condition here. The parent process could
6770 theoretically read the TID in the child process before the child
6771 tid is set. This would require using either ptrace
6772 (not implemented) or having *_tidptr to point at a shared memory
6773 mapping. We can't repeat the spinlock hack used above because
6774 the child process gets its own copy of the lock. */
6775 if (flags & CLONE_CHILD_SETTID)
6776 put_user_u32(sys_gettid(), child_tidptr);
6777 if (flags & CLONE_PARENT_SETTID)
6778 put_user_u32(sys_gettid(), parent_tidptr);
6779 ts = (TaskState *)cpu->opaque;
6780 if (flags & CLONE_SETTLS)
6781 cpu_set_tls (env, newtls);
6782 if (flags & CLONE_CHILD_CLEARTID)
6783 ts->child_tidptr = child_tidptr;
6784 } else {
6785 cpu_clone_regs_parent(env, flags);
6786 if (flags & CLONE_PIDFD) {
6787 int pid_fd = 0;
6788 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6789 int pid_child = ret;
6790 pid_fd = pidfd_open(pid_child, 0);
6791 if (pid_fd >= 0) {
6792 fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6793 | FD_CLOEXEC);
6794 } else {
6795 pid_fd = 0;
6796 }
6797 #endif
6798 put_user_u32(pid_fd, parent_tidptr);
6799 }
6800 fork_end(0);
6801 }
6802 g_assert(!cpu_in_exclusive_context(cpu));
6803 }
6804 return ret;
6805 }
6806
6807 /* warning : doesn't handle linux specific flags... */
6808 static int target_to_host_fcntl_cmd(int cmd)
6809 {
6810 int ret;
6811
6812 switch(cmd) {
6813 case TARGET_F_DUPFD:
6814 case TARGET_F_GETFD:
6815 case TARGET_F_SETFD:
6816 case TARGET_F_GETFL:
6817 case TARGET_F_SETFL:
6818 case TARGET_F_OFD_GETLK:
6819 case TARGET_F_OFD_SETLK:
6820 case TARGET_F_OFD_SETLKW:
6821 ret = cmd;
6822 break;
6823 case TARGET_F_GETLK:
6824 ret = F_GETLK64;
6825 break;
6826 case TARGET_F_SETLK:
6827 ret = F_SETLK64;
6828 break;
6829 case TARGET_F_SETLKW:
6830 ret = F_SETLKW64;
6831 break;
6832 case TARGET_F_GETOWN:
6833 ret = F_GETOWN;
6834 break;
6835 case TARGET_F_SETOWN:
6836 ret = F_SETOWN;
6837 break;
6838 case TARGET_F_GETSIG:
6839 ret = F_GETSIG;
6840 break;
6841 case TARGET_F_SETSIG:
6842 ret = F_SETSIG;
6843 break;
6844 #if TARGET_ABI_BITS == 32
6845 case TARGET_F_GETLK64:
6846 ret = F_GETLK64;
6847 break;
6848 case TARGET_F_SETLK64:
6849 ret = F_SETLK64;
6850 break;
6851 case TARGET_F_SETLKW64:
6852 ret = F_SETLKW64;
6853 break;
6854 #endif
6855 case TARGET_F_SETLEASE:
6856 ret = F_SETLEASE;
6857 break;
6858 case TARGET_F_GETLEASE:
6859 ret = F_GETLEASE;
6860 break;
6861 #ifdef F_DUPFD_CLOEXEC
6862 case TARGET_F_DUPFD_CLOEXEC:
6863 ret = F_DUPFD_CLOEXEC;
6864 break;
6865 #endif
6866 case TARGET_F_NOTIFY:
6867 ret = F_NOTIFY;
6868 break;
6869 #ifdef F_GETOWN_EX
6870 case TARGET_F_GETOWN_EX:
6871 ret = F_GETOWN_EX;
6872 break;
6873 #endif
6874 #ifdef F_SETOWN_EX
6875 case TARGET_F_SETOWN_EX:
6876 ret = F_SETOWN_EX;
6877 break;
6878 #endif
6879 #ifdef F_SETPIPE_SZ
6880 case TARGET_F_SETPIPE_SZ:
6881 ret = F_SETPIPE_SZ;
6882 break;
6883 case TARGET_F_GETPIPE_SZ:
6884 ret = F_GETPIPE_SZ;
6885 break;
6886 #endif
6887 #ifdef F_ADD_SEALS
6888 case TARGET_F_ADD_SEALS:
6889 ret = F_ADD_SEALS;
6890 break;
6891 case TARGET_F_GET_SEALS:
6892 ret = F_GET_SEALS;
6893 break;
6894 #endif
6895 default:
6896 ret = -TARGET_EINVAL;
6897 break;
6898 }
6899
6900 #if defined(__powerpc64__)
6901 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6902 * is not supported by kernel. The glibc fcntl call actually adjusts
6903 * them to 5, 6 and 7 before making the syscall(). Since we make the
6904 * syscall directly, adjust to what is supported by the kernel.
6905 */
6906 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6907 ret -= F_GETLK64 - 5;
6908 }
6909 #endif
6910
6911 return ret;
6912 }
6913
6914 #define FLOCK_TRANSTBL \
6915 switch (type) { \
6916 TRANSTBL_CONVERT(F_RDLCK); \
6917 TRANSTBL_CONVERT(F_WRLCK); \
6918 TRANSTBL_CONVERT(F_UNLCK); \
6919 }
6920
6921 static int target_to_host_flock(int type)
6922 {
6923 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6924 FLOCK_TRANSTBL
6925 #undef TRANSTBL_CONVERT
6926 return -TARGET_EINVAL;
6927 }
6928
6929 static int host_to_target_flock(int type)
6930 {
6931 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6932 FLOCK_TRANSTBL
6933 #undef TRANSTBL_CONVERT
6934 /* if we don't know how to convert the value coming
6935 * from the host we copy to the target field as-is
6936 */
6937 return type;
6938 }
6939
6940 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6941 abi_ulong target_flock_addr)
6942 {
6943 struct target_flock *target_fl;
6944 int l_type;
6945
6946 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6947 return -TARGET_EFAULT;
6948 }
6949
6950 __get_user(l_type, &target_fl->l_type);
6951 l_type = target_to_host_flock(l_type);
6952 if (l_type < 0) {
6953 return l_type;
6954 }
6955 fl->l_type = l_type;
6956 __get_user(fl->l_whence, &target_fl->l_whence);
6957 __get_user(fl->l_start, &target_fl->l_start);
6958 __get_user(fl->l_len, &target_fl->l_len);
6959 __get_user(fl->l_pid, &target_fl->l_pid);
6960 unlock_user_struct(target_fl, target_flock_addr, 0);
6961 return 0;
6962 }
6963
6964 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6965 const struct flock64 *fl)
6966 {
6967 struct target_flock *target_fl;
6968 short l_type;
6969
6970 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6971 return -TARGET_EFAULT;
6972 }
6973
6974 l_type = host_to_target_flock(fl->l_type);
6975 __put_user(l_type, &target_fl->l_type);
6976 __put_user(fl->l_whence, &target_fl->l_whence);
6977 __put_user(fl->l_start, &target_fl->l_start);
6978 __put_user(fl->l_len, &target_fl->l_len);
6979 __put_user(fl->l_pid, &target_fl->l_pid);
6980 unlock_user_struct(target_fl, target_flock_addr, 1);
6981 return 0;
6982 }
6983
6984 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6985 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6986
6987 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6988 struct target_oabi_flock64 {
6989 abi_short l_type;
6990 abi_short l_whence;
6991 abi_llong l_start;
6992 abi_llong l_len;
6993 abi_int l_pid;
6994 } QEMU_PACKED;
6995
6996 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6997 abi_ulong target_flock_addr)
6998 {
6999 struct target_oabi_flock64 *target_fl;
7000 int l_type;
7001
7002 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7003 return -TARGET_EFAULT;
7004 }
7005
7006 __get_user(l_type, &target_fl->l_type);
7007 l_type = target_to_host_flock(l_type);
7008 if (l_type < 0) {
7009 return l_type;
7010 }
7011 fl->l_type = l_type;
7012 __get_user(fl->l_whence, &target_fl->l_whence);
7013 __get_user(fl->l_start, &target_fl->l_start);
7014 __get_user(fl->l_len, &target_fl->l_len);
7015 __get_user(fl->l_pid, &target_fl->l_pid);
7016 unlock_user_struct(target_fl, target_flock_addr, 0);
7017 return 0;
7018 }
7019
7020 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
7021 const struct flock64 *fl)
7022 {
7023 struct target_oabi_flock64 *target_fl;
7024 short l_type;
7025
7026 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7027 return -TARGET_EFAULT;
7028 }
7029
7030 l_type = host_to_target_flock(fl->l_type);
7031 __put_user(l_type, &target_fl->l_type);
7032 __put_user(fl->l_whence, &target_fl->l_whence);
7033 __put_user(fl->l_start, &target_fl->l_start);
7034 __put_user(fl->l_len, &target_fl->l_len);
7035 __put_user(fl->l_pid, &target_fl->l_pid);
7036 unlock_user_struct(target_fl, target_flock_addr, 1);
7037 return 0;
7038 }
7039 #endif
7040
7041 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7042 abi_ulong target_flock_addr)
7043 {
7044 struct target_flock64 *target_fl;
7045 int l_type;
7046
7047 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7048 return -TARGET_EFAULT;
7049 }
7050
7051 __get_user(l_type, &target_fl->l_type);
7052 l_type = target_to_host_flock(l_type);
7053 if (l_type < 0) {
7054 return l_type;
7055 }
7056 fl->l_type = l_type;
7057 __get_user(fl->l_whence, &target_fl->l_whence);
7058 __get_user(fl->l_start, &target_fl->l_start);
7059 __get_user(fl->l_len, &target_fl->l_len);
7060 __get_user(fl->l_pid, &target_fl->l_pid);
7061 unlock_user_struct(target_fl, target_flock_addr, 0);
7062 return 0;
7063 }
7064
7065 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7066 const struct flock64 *fl)
7067 {
7068 struct target_flock64 *target_fl;
7069 short l_type;
7070
7071 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7072 return -TARGET_EFAULT;
7073 }
7074
7075 l_type = host_to_target_flock(fl->l_type);
7076 __put_user(l_type, &target_fl->l_type);
7077 __put_user(fl->l_whence, &target_fl->l_whence);
7078 __put_user(fl->l_start, &target_fl->l_start);
7079 __put_user(fl->l_len, &target_fl->l_len);
7080 __put_user(fl->l_pid, &target_fl->l_pid);
7081 unlock_user_struct(target_fl, target_flock_addr, 1);
7082 return 0;
7083 }
7084
7085 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7086 {
7087 struct flock64 fl64;
7088 #ifdef F_GETOWN_EX
7089 struct f_owner_ex fox;
7090 struct target_f_owner_ex *target_fox;
7091 #endif
7092 abi_long ret;
7093 int host_cmd = target_to_host_fcntl_cmd(cmd);
7094
7095 if (host_cmd == -TARGET_EINVAL)
7096 return host_cmd;
7097
7098 switch(cmd) {
7099 case TARGET_F_GETLK:
7100 ret = copy_from_user_flock(&fl64, arg);
7101 if (ret) {
7102 return ret;
7103 }
7104 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7105 if (ret == 0) {
7106 ret = copy_to_user_flock(arg, &fl64);
7107 }
7108 break;
7109
7110 case TARGET_F_SETLK:
7111 case TARGET_F_SETLKW:
7112 ret = copy_from_user_flock(&fl64, arg);
7113 if (ret) {
7114 return ret;
7115 }
7116 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7117 break;
7118
7119 case TARGET_F_GETLK64:
7120 case TARGET_F_OFD_GETLK:
7121 ret = copy_from_user_flock64(&fl64, arg);
7122 if (ret) {
7123 return ret;
7124 }
7125 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7126 if (ret == 0) {
7127 ret = copy_to_user_flock64(arg, &fl64);
7128 }
7129 break;
7130 case TARGET_F_SETLK64:
7131 case TARGET_F_SETLKW64:
7132 case TARGET_F_OFD_SETLK:
7133 case TARGET_F_OFD_SETLKW:
7134 ret = copy_from_user_flock64(&fl64, arg);
7135 if (ret) {
7136 return ret;
7137 }
7138 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7139 break;
7140
7141 case TARGET_F_GETFL:
7142 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7143 if (ret >= 0) {
7144 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7145 /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7146 if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7147 ret |= TARGET_O_LARGEFILE;
7148 }
7149 }
7150 break;
7151
7152 case TARGET_F_SETFL:
7153 ret = get_errno(safe_fcntl(fd, host_cmd,
7154 target_to_host_bitmask(arg,
7155 fcntl_flags_tbl)));
7156 break;
7157
7158 #ifdef F_GETOWN_EX
7159 case TARGET_F_GETOWN_EX:
7160 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7161 if (ret >= 0) {
7162 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7163 return -TARGET_EFAULT;
7164 target_fox->type = tswap32(fox.type);
7165 target_fox->pid = tswap32(fox.pid);
7166 unlock_user_struct(target_fox, arg, 1);
7167 }
7168 break;
7169 #endif
7170
7171 #ifdef F_SETOWN_EX
7172 case TARGET_F_SETOWN_EX:
7173 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7174 return -TARGET_EFAULT;
7175 fox.type = tswap32(target_fox->type);
7176 fox.pid = tswap32(target_fox->pid);
7177 unlock_user_struct(target_fox, arg, 0);
7178 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7179 break;
7180 #endif
7181
7182 case TARGET_F_SETSIG:
7183 ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7184 break;
7185
7186 case TARGET_F_GETSIG:
7187 ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7188 break;
7189
7190 case TARGET_F_SETOWN:
7191 case TARGET_F_GETOWN:
7192 case TARGET_F_SETLEASE:
7193 case TARGET_F_GETLEASE:
7194 case TARGET_F_SETPIPE_SZ:
7195 case TARGET_F_GETPIPE_SZ:
7196 case TARGET_F_ADD_SEALS:
7197 case TARGET_F_GET_SEALS:
7198 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7199 break;
7200
7201 default:
7202 ret = get_errno(safe_fcntl(fd, cmd, arg));
7203 break;
7204 }
7205 return ret;
7206 }
7207
7208 #ifdef USE_UID16
7209
7210 static inline int high2lowuid(int uid)
7211 {
7212 if (uid > 65535)
7213 return 65534;
7214 else
7215 return uid;
7216 }
7217
7218 static inline int high2lowgid(int gid)
7219 {
7220 if (gid > 65535)
7221 return 65534;
7222 else
7223 return gid;
7224 }
7225
7226 static inline int low2highuid(int uid)
7227 {
7228 if ((int16_t)uid == -1)
7229 return -1;
7230 else
7231 return uid;
7232 }
7233
7234 static inline int low2highgid(int gid)
7235 {
7236 if ((int16_t)gid == -1)
7237 return -1;
7238 else
7239 return gid;
7240 }
7241 static inline int tswapid(int id)
7242 {
7243 return tswap16(id);
7244 }
7245
7246 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7247
7248 #else /* !USE_UID16 */
7249 static inline int high2lowuid(int uid)
7250 {
7251 return uid;
7252 }
7253 static inline int high2lowgid(int gid)
7254 {
7255 return gid;
7256 }
7257 static inline int low2highuid(int uid)
7258 {
7259 return uid;
7260 }
7261 static inline int low2highgid(int gid)
7262 {
7263 return gid;
7264 }
7265 static inline int tswapid(int id)
7266 {
7267 return tswap32(id);
7268 }
7269
7270 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7271
7272 #endif /* USE_UID16 */
7273
7274 /* We must do direct syscalls for setting UID/GID, because we want to
7275 * implement the Linux system call semantics of "change only for this thread",
7276 * not the libc/POSIX semantics of "change for all threads in process".
7277 * (See http://ewontfix.com/17/ for more details.)
7278 * We use the 32-bit version of the syscalls if present; if it is not
7279 * then either the host architecture supports 32-bit UIDs natively with
7280 * the standard syscall, or the 16-bit UID is the best we can do.
7281 */
7282 #ifdef __NR_setuid32
7283 #define __NR_sys_setuid __NR_setuid32
7284 #else
7285 #define __NR_sys_setuid __NR_setuid
7286 #endif
7287 #ifdef __NR_setgid32
7288 #define __NR_sys_setgid __NR_setgid32
7289 #else
7290 #define __NR_sys_setgid __NR_setgid
7291 #endif
7292 #ifdef __NR_setresuid32
7293 #define __NR_sys_setresuid __NR_setresuid32
7294 #else
7295 #define __NR_sys_setresuid __NR_setresuid
7296 #endif
7297 #ifdef __NR_setresgid32
7298 #define __NR_sys_setresgid __NR_setresgid32
7299 #else
7300 #define __NR_sys_setresgid __NR_setresgid
7301 #endif
7302
7303 _syscall1(int, sys_setuid, uid_t, uid)
7304 _syscall1(int, sys_setgid, gid_t, gid)
7305 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7306 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7307
7308 void syscall_init(void)
7309 {
7310 IOCTLEntry *ie;
7311 const argtype *arg_type;
7312 int size;
7313
7314 thunk_init(STRUCT_MAX);
7315
7316 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7317 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7318 #include "syscall_types.h"
7319 #undef STRUCT
7320 #undef STRUCT_SPECIAL
7321
7322 /* we patch the ioctl size if necessary. We rely on the fact that
7323 no ioctl has all the bits at '1' in the size field */
7324 ie = ioctl_entries;
7325 while (ie->target_cmd != 0) {
7326 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7327 TARGET_IOC_SIZEMASK) {
7328 arg_type = ie->arg_type;
7329 if (arg_type[0] != TYPE_PTR) {
7330 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7331 ie->target_cmd);
7332 exit(1);
7333 }
7334 arg_type++;
7335 size = thunk_type_size(arg_type, 0);
7336 ie->target_cmd = (ie->target_cmd &
7337 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7338 (size << TARGET_IOC_SIZESHIFT);
7339 }
7340
7341 /* automatic consistency check if same arch */
7342 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7343 (defined(__x86_64__) && defined(TARGET_X86_64))
7344 if (unlikely(ie->target_cmd != ie->host_cmd)) {
7345 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7346 ie->name, ie->target_cmd, ie->host_cmd);
7347 }
7348 #endif
7349 ie++;
7350 }
7351 }
7352
7353 #ifdef TARGET_NR_truncate64
7354 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7355 abi_long arg2,
7356 abi_long arg3,
7357 abi_long arg4)
7358 {
7359 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7360 arg2 = arg3;
7361 arg3 = arg4;
7362 }
7363 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7364 }
7365 #endif
7366
7367 #ifdef TARGET_NR_ftruncate64
7368 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7369 abi_long arg2,
7370 abi_long arg3,
7371 abi_long arg4)
7372 {
7373 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7374 arg2 = arg3;
7375 arg3 = arg4;
7376 }
7377 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7378 }
7379 #endif
7380
7381 #if defined(TARGET_NR_timer_settime) || \
7382 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7383 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7384 abi_ulong target_addr)
7385 {
7386 if (target_to_host_timespec(&host_its->it_interval, target_addr +
7387 offsetof(struct target_itimerspec,
7388 it_interval)) ||
7389 target_to_host_timespec(&host_its->it_value, target_addr +
7390 offsetof(struct target_itimerspec,
7391 it_value))) {
7392 return -TARGET_EFAULT;
7393 }
7394
7395 return 0;
7396 }
7397 #endif
7398
7399 #if defined(TARGET_NR_timer_settime64) || \
7400 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7401 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7402 abi_ulong target_addr)
7403 {
7404 if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7405 offsetof(struct target__kernel_itimerspec,
7406 it_interval)) ||
7407 target_to_host_timespec64(&host_its->it_value, target_addr +
7408 offsetof(struct target__kernel_itimerspec,
7409 it_value))) {
7410 return -TARGET_EFAULT;
7411 }
7412
7413 return 0;
7414 }
7415 #endif
7416
7417 #if ((defined(TARGET_NR_timerfd_gettime) || \
7418 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7419 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7420 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7421 struct itimerspec *host_its)
7422 {
7423 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7424 it_interval),
7425 &host_its->it_interval) ||
7426 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7427 it_value),
7428 &host_its->it_value)) {
7429 return -TARGET_EFAULT;
7430 }
7431 return 0;
7432 }
7433 #endif
7434
7435 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7436 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7437 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7438 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7439 struct itimerspec *host_its)
7440 {
7441 if (host_to_target_timespec64(target_addr +
7442 offsetof(struct target__kernel_itimerspec,
7443 it_interval),
7444 &host_its->it_interval) ||
7445 host_to_target_timespec64(target_addr +
7446 offsetof(struct target__kernel_itimerspec,
7447 it_value),
7448 &host_its->it_value)) {
7449 return -TARGET_EFAULT;
7450 }
7451 return 0;
7452 }
7453 #endif
7454
7455 #if defined(TARGET_NR_adjtimex) || \
7456 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7457 static inline abi_long target_to_host_timex(struct timex *host_tx,
7458 abi_long target_addr)
7459 {
7460 struct target_timex *target_tx;
7461
7462 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7463 return -TARGET_EFAULT;
7464 }
7465
7466 __get_user(host_tx->modes, &target_tx->modes);
7467 __get_user(host_tx->offset, &target_tx->offset);
7468 __get_user(host_tx->freq, &target_tx->freq);
7469 __get_user(host_tx->maxerror, &target_tx->maxerror);
7470 __get_user(host_tx->esterror, &target_tx->esterror);
7471 __get_user(host_tx->status, &target_tx->status);
7472 __get_user(host_tx->constant, &target_tx->constant);
7473 __get_user(host_tx->precision, &target_tx->precision);
7474 __get_user(host_tx->tolerance, &target_tx->tolerance);
7475 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7476 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7477 __get_user(host_tx->tick, &target_tx->tick);
7478 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7479 __get_user(host_tx->jitter, &target_tx->jitter);
7480 __get_user(host_tx->shift, &target_tx->shift);
7481 __get_user(host_tx->stabil, &target_tx->stabil);
7482 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7483 __get_user(host_tx->calcnt, &target_tx->calcnt);
7484 __get_user(host_tx->errcnt, &target_tx->errcnt);
7485 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7486 __get_user(host_tx->tai, &target_tx->tai);
7487
7488 unlock_user_struct(target_tx, target_addr, 0);
7489 return 0;
7490 }
7491
7492 static inline abi_long host_to_target_timex(abi_long target_addr,
7493 struct timex *host_tx)
7494 {
7495 struct target_timex *target_tx;
7496
7497 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7498 return -TARGET_EFAULT;
7499 }
7500
7501 __put_user(host_tx->modes, &target_tx->modes);
7502 __put_user(host_tx->offset, &target_tx->offset);
7503 __put_user(host_tx->freq, &target_tx->freq);
7504 __put_user(host_tx->maxerror, &target_tx->maxerror);
7505 __put_user(host_tx->esterror, &target_tx->esterror);
7506 __put_user(host_tx->status, &target_tx->status);
7507 __put_user(host_tx->constant, &target_tx->constant);
7508 __put_user(host_tx->precision, &target_tx->precision);
7509 __put_user(host_tx->tolerance, &target_tx->tolerance);
7510 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7511 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7512 __put_user(host_tx->tick, &target_tx->tick);
7513 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7514 __put_user(host_tx->jitter, &target_tx->jitter);
7515 __put_user(host_tx->shift, &target_tx->shift);
7516 __put_user(host_tx->stabil, &target_tx->stabil);
7517 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7518 __put_user(host_tx->calcnt, &target_tx->calcnt);
7519 __put_user(host_tx->errcnt, &target_tx->errcnt);
7520 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7521 __put_user(host_tx->tai, &target_tx->tai);
7522
7523 unlock_user_struct(target_tx, target_addr, 1);
7524 return 0;
7525 }
7526 #endif
7527
7528
7529 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7530 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7531 abi_long target_addr)
7532 {
7533 struct target__kernel_timex *target_tx;
7534
7535 if (copy_from_user_timeval64(&host_tx->time, target_addr +
7536 offsetof(struct target__kernel_timex,
7537 time))) {
7538 return -TARGET_EFAULT;
7539 }
7540
7541 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7542 return -TARGET_EFAULT;
7543 }
7544
7545 __get_user(host_tx->modes, &target_tx->modes);
7546 __get_user(host_tx->offset, &target_tx->offset);
7547 __get_user(host_tx->freq, &target_tx->freq);
7548 __get_user(host_tx->maxerror, &target_tx->maxerror);
7549 __get_user(host_tx->esterror, &target_tx->esterror);
7550 __get_user(host_tx->status, &target_tx->status);
7551 __get_user(host_tx->constant, &target_tx->constant);
7552 __get_user(host_tx->precision, &target_tx->precision);
7553 __get_user(host_tx->tolerance, &target_tx->tolerance);
7554 __get_user(host_tx->tick, &target_tx->tick);
7555 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7556 __get_user(host_tx->jitter, &target_tx->jitter);
7557 __get_user(host_tx->shift, &target_tx->shift);
7558 __get_user(host_tx->stabil, &target_tx->stabil);
7559 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7560 __get_user(host_tx->calcnt, &target_tx->calcnt);
7561 __get_user(host_tx->errcnt, &target_tx->errcnt);
7562 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7563 __get_user(host_tx->tai, &target_tx->tai);
7564
7565 unlock_user_struct(target_tx, target_addr, 0);
7566 return 0;
7567 }
7568
7569 static inline abi_long host_to_target_timex64(abi_long target_addr,
7570 struct timex *host_tx)
7571 {
7572 struct target__kernel_timex *target_tx;
7573
7574 if (copy_to_user_timeval64(target_addr +
7575 offsetof(struct target__kernel_timex, time),
7576 &host_tx->time)) {
7577 return -TARGET_EFAULT;
7578 }
7579
7580 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7581 return -TARGET_EFAULT;
7582 }
7583
7584 __put_user(host_tx->modes, &target_tx->modes);
7585 __put_user(host_tx->offset, &target_tx->offset);
7586 __put_user(host_tx->freq, &target_tx->freq);
7587 __put_user(host_tx->maxerror, &target_tx->maxerror);
7588 __put_user(host_tx->esterror, &target_tx->esterror);
7589 __put_user(host_tx->status, &target_tx->status);
7590 __put_user(host_tx->constant, &target_tx->constant);
7591 __put_user(host_tx->precision, &target_tx->precision);
7592 __put_user(host_tx->tolerance, &target_tx->tolerance);
7593 __put_user(host_tx->tick, &target_tx->tick);
7594 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7595 __put_user(host_tx->jitter, &target_tx->jitter);
7596 __put_user(host_tx->shift, &target_tx->shift);
7597 __put_user(host_tx->stabil, &target_tx->stabil);
7598 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7599 __put_user(host_tx->calcnt, &target_tx->calcnt);
7600 __put_user(host_tx->errcnt, &target_tx->errcnt);
7601 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7602 __put_user(host_tx->tai, &target_tx->tai);
7603
7604 unlock_user_struct(target_tx, target_addr, 1);
7605 return 0;
7606 }
7607 #endif
7608
7609 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7610 #define sigev_notify_thread_id _sigev_un._tid
7611 #endif
7612
7613 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7614 abi_ulong target_addr)
7615 {
7616 struct target_sigevent *target_sevp;
7617
7618 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7619 return -TARGET_EFAULT;
7620 }
7621
7622 /* This union is awkward on 64 bit systems because it has a 32 bit
7623 * integer and a pointer in it; we follow the conversion approach
7624 * used for handling sigval types in signal.c so the guest should get
7625 * the correct value back even if we did a 64 bit byteswap and it's
7626 * using the 32 bit integer.
7627 */
7628 host_sevp->sigev_value.sival_ptr =
7629 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7630 host_sevp->sigev_signo =
7631 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7632 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7633 host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7634
7635 unlock_user_struct(target_sevp, target_addr, 1);
7636 return 0;
7637 }
7638
7639 #if defined(TARGET_NR_mlockall)
7640 static inline int target_to_host_mlockall_arg(int arg)
7641 {
7642 int result = 0;
7643
7644 if (arg & TARGET_MCL_CURRENT) {
7645 result |= MCL_CURRENT;
7646 }
7647 if (arg & TARGET_MCL_FUTURE) {
7648 result |= MCL_FUTURE;
7649 }
7650 #ifdef MCL_ONFAULT
7651 if (arg & TARGET_MCL_ONFAULT) {
7652 result |= MCL_ONFAULT;
7653 }
7654 #endif
7655
7656 return result;
7657 }
7658 #endif
7659
7660 static inline int target_to_host_msync_arg(abi_long arg)
7661 {
7662 return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7663 ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7664 ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7665 (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7666 }
7667
7668 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7669 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7670 defined(TARGET_NR_newfstatat))
7671 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7672 abi_ulong target_addr,
7673 struct stat *host_st)
7674 {
7675 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7676 if (cpu_env->eabi) {
7677 struct target_eabi_stat64 *target_st;
7678
7679 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7680 return -TARGET_EFAULT;
7681 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7682 __put_user(host_st->st_dev, &target_st->st_dev);
7683 __put_user(host_st->st_ino, &target_st->st_ino);
7684 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7685 __put_user(host_st->st_ino, &target_st->__st_ino);
7686 #endif
7687 __put_user(host_st->st_mode, &target_st->st_mode);
7688 __put_user(host_st->st_nlink, &target_st->st_nlink);
7689 __put_user(host_st->st_uid, &target_st->st_uid);
7690 __put_user(host_st->st_gid, &target_st->st_gid);
7691 __put_user(host_st->st_rdev, &target_st->st_rdev);
7692 __put_user(host_st->st_size, &target_st->st_size);
7693 __put_user(host_st->st_blksize, &target_st->st_blksize);
7694 __put_user(host_st->st_blocks, &target_st->st_blocks);
7695 __put_user(host_st->st_atime, &target_st->target_st_atime);
7696 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7697 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7698 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7699 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7700 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7701 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7702 #endif
7703 unlock_user_struct(target_st, target_addr, 1);
7704 } else
7705 #endif
7706 {
7707 #if defined(TARGET_HAS_STRUCT_STAT64)
7708 struct target_stat64 *target_st;
7709 #else
7710 struct target_stat *target_st;
7711 #endif
7712
7713 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7714 return -TARGET_EFAULT;
7715 memset(target_st, 0, sizeof(*target_st));
7716 __put_user(host_st->st_dev, &target_st->st_dev);
7717 __put_user(host_st->st_ino, &target_st->st_ino);
7718 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7719 __put_user(host_st->st_ino, &target_st->__st_ino);
7720 #endif
7721 __put_user(host_st->st_mode, &target_st->st_mode);
7722 __put_user(host_st->st_nlink, &target_st->st_nlink);
7723 __put_user(host_st->st_uid, &target_st->st_uid);
7724 __put_user(host_st->st_gid, &target_st->st_gid);
7725 __put_user(host_st->st_rdev, &target_st->st_rdev);
7726 /* XXX: better use of kernel struct */
7727 __put_user(host_st->st_size, &target_st->st_size);
7728 __put_user(host_st->st_blksize, &target_st->st_blksize);
7729 __put_user(host_st->st_blocks, &target_st->st_blocks);
7730 __put_user(host_st->st_atime, &target_st->target_st_atime);
7731 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7732 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7733 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7734 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7735 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7736 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7737 #endif
7738 unlock_user_struct(target_st, target_addr, 1);
7739 }
7740
7741 return 0;
7742 }
7743 #endif
7744
7745 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7746 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7747 abi_ulong target_addr)
7748 {
7749 struct target_statx *target_stx;
7750
7751 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7752 return -TARGET_EFAULT;
7753 }
7754 memset(target_stx, 0, sizeof(*target_stx));
7755
7756 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7757 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7758 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7759 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7760 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7761 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7762 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7763 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7764 __put_user(host_stx->stx_size, &target_stx->stx_size);
7765 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7766 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7767 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7768 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7769 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7770 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7771 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7772 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7773 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7774 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7775 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7776 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7777 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7778 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7779
7780 unlock_user_struct(target_stx, target_addr, 1);
7781
7782 return 0;
7783 }
7784 #endif
7785
7786 static int do_sys_futex(int *uaddr, int op, int val,
7787 const struct timespec *timeout, int *uaddr2,
7788 int val3)
7789 {
7790 #if HOST_LONG_BITS == 64
7791 #if defined(__NR_futex)
7792 /* always a 64-bit time_t, it doesn't define _time64 version */
7793 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7794
7795 #endif
7796 #else /* HOST_LONG_BITS == 64 */
7797 #if defined(__NR_futex_time64)
7798 if (sizeof(timeout->tv_sec) == 8) {
7799 /* _time64 function on 32bit arch */
7800 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7801 }
7802 #endif
7803 #if defined(__NR_futex)
7804 /* old function on 32bit arch */
7805 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7806 #endif
7807 #endif /* HOST_LONG_BITS == 64 */
7808 g_assert_not_reached();
7809 }
7810
7811 static int do_safe_futex(int *uaddr, int op, int val,
7812 const struct timespec *timeout, int *uaddr2,
7813 int val3)
7814 {
7815 #if HOST_LONG_BITS == 64
7816 #if defined(__NR_futex)
7817 /* always a 64-bit time_t, it doesn't define _time64 version */
7818 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7819 #endif
7820 #else /* HOST_LONG_BITS == 64 */
7821 #if defined(__NR_futex_time64)
7822 if (sizeof(timeout->tv_sec) == 8) {
7823 /* _time64 function on 32bit arch */
7824 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7825 val3));
7826 }
7827 #endif
7828 #if defined(__NR_futex)
7829 /* old function on 32bit arch */
7830 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7831 #endif
7832 #endif /* HOST_LONG_BITS == 64 */
7833 return -TARGET_ENOSYS;
7834 }
7835
7836 /* ??? Using host futex calls even when target atomic operations
7837 are not really atomic probably breaks things. However implementing
7838 futexes locally would make futexes shared between multiple processes
7839 tricky. However they're probably useless because guest atomic
7840 operations won't work either. */
7841 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7842 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7843 int op, int val, target_ulong timeout,
7844 target_ulong uaddr2, int val3)
7845 {
7846 struct timespec ts, *pts = NULL;
7847 void *haddr2 = NULL;
7848 int base_op;
7849
7850 /* We assume FUTEX_* constants are the same on both host and target. */
7851 #ifdef FUTEX_CMD_MASK
7852 base_op = op & FUTEX_CMD_MASK;
7853 #else
7854 base_op = op;
7855 #endif
7856 switch (base_op) {
7857 case FUTEX_WAIT:
7858 case FUTEX_WAIT_BITSET:
7859 val = tswap32(val);
7860 break;
7861 case FUTEX_WAIT_REQUEUE_PI:
7862 val = tswap32(val);
7863 haddr2 = g2h(cpu, uaddr2);
7864 break;
7865 case FUTEX_LOCK_PI:
7866 case FUTEX_LOCK_PI2:
7867 break;
7868 case FUTEX_WAKE:
7869 case FUTEX_WAKE_BITSET:
7870 case FUTEX_TRYLOCK_PI:
7871 case FUTEX_UNLOCK_PI:
7872 timeout = 0;
7873 break;
7874 case FUTEX_FD:
7875 val = target_to_host_signal(val);
7876 timeout = 0;
7877 break;
7878 case FUTEX_CMP_REQUEUE:
7879 case FUTEX_CMP_REQUEUE_PI:
7880 val3 = tswap32(val3);
7881 /* fall through */
7882 case FUTEX_REQUEUE:
7883 case FUTEX_WAKE_OP:
7884 /*
7885 * For these, the 4th argument is not TIMEOUT, but VAL2.
7886 * But the prototype of do_safe_futex takes a pointer, so
7887 * insert casts to satisfy the compiler. We do not need
7888 * to tswap VAL2 since it's not compared to guest memory.
7889 */
7890 pts = (struct timespec *)(uintptr_t)timeout;
7891 timeout = 0;
7892 haddr2 = g2h(cpu, uaddr2);
7893 break;
7894 default:
7895 return -TARGET_ENOSYS;
7896 }
7897 if (timeout) {
7898 pts = &ts;
7899 if (time64
7900 ? target_to_host_timespec64(pts, timeout)
7901 : target_to_host_timespec(pts, timeout)) {
7902 return -TARGET_EFAULT;
7903 }
7904 }
7905 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7906 }
7907 #endif
7908
7909 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7910 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7911 abi_long handle, abi_long mount_id,
7912 abi_long flags)
7913 {
7914 struct file_handle *target_fh;
7915 struct file_handle *fh;
7916 int mid = 0;
7917 abi_long ret;
7918 char *name;
7919 unsigned int size, total_size;
7920
7921 if (get_user_s32(size, handle)) {
7922 return -TARGET_EFAULT;
7923 }
7924
7925 name = lock_user_string(pathname);
7926 if (!name) {
7927 return -TARGET_EFAULT;
7928 }
7929
7930 total_size = sizeof(struct file_handle) + size;
7931 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7932 if (!target_fh) {
7933 unlock_user(name, pathname, 0);
7934 return -TARGET_EFAULT;
7935 }
7936
7937 fh = g_malloc0(total_size);
7938 fh->handle_bytes = size;
7939
7940 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7941 unlock_user(name, pathname, 0);
7942
7943 /* man name_to_handle_at(2):
7944 * Other than the use of the handle_bytes field, the caller should treat
7945 * the file_handle structure as an opaque data type
7946 */
7947
7948 memcpy(target_fh, fh, total_size);
7949 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7950 target_fh->handle_type = tswap32(fh->handle_type);
7951 g_free(fh);
7952 unlock_user(target_fh, handle, total_size);
7953
7954 if (put_user_s32(mid, mount_id)) {
7955 return -TARGET_EFAULT;
7956 }
7957
7958 return ret;
7959
7960 }
7961 #endif
7962
7963 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7964 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7965 abi_long flags)
7966 {
7967 struct file_handle *target_fh;
7968 struct file_handle *fh;
7969 unsigned int size, total_size;
7970 abi_long ret;
7971
7972 if (get_user_s32(size, handle)) {
7973 return -TARGET_EFAULT;
7974 }
7975
7976 total_size = sizeof(struct file_handle) + size;
7977 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7978 if (!target_fh) {
7979 return -TARGET_EFAULT;
7980 }
7981
7982 fh = g_memdup(target_fh, total_size);
7983 fh->handle_bytes = size;
7984 fh->handle_type = tswap32(target_fh->handle_type);
7985
7986 ret = get_errno(open_by_handle_at(mount_fd, fh,
7987 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7988
7989 g_free(fh);
7990
7991 unlock_user(target_fh, handle, total_size);
7992
7993 return ret;
7994 }
7995 #endif
7996
7997 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7998
7999 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
8000 {
8001 int host_flags;
8002 target_sigset_t *target_mask;
8003 sigset_t host_mask;
8004 abi_long ret;
8005
8006 if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
8007 return -TARGET_EINVAL;
8008 }
8009 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
8010 return -TARGET_EFAULT;
8011 }
8012
8013 target_to_host_sigset(&host_mask, target_mask);
8014
8015 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
8016
8017 ret = get_errno(signalfd(fd, &host_mask, host_flags));
8018 if (ret >= 0) {
8019 fd_trans_register(ret, &target_signalfd_trans);
8020 }
8021
8022 unlock_user_struct(target_mask, mask, 0);
8023
8024 return ret;
8025 }
8026 #endif
8027
8028 /* Map host to target signal numbers for the wait family of syscalls.
8029 Assume all other status bits are the same. */
8030 int host_to_target_waitstatus(int status)
8031 {
8032 if (WIFSIGNALED(status)) {
8033 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8034 }
8035 if (WIFSTOPPED(status)) {
8036 return (host_to_target_signal(WSTOPSIG(status)) << 8)
8037 | (status & 0xff);
8038 }
8039 return status;
8040 }
8041
8042 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8043 {
8044 CPUState *cpu = env_cpu(cpu_env);
8045 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8046 int i;
8047
8048 for (i = 0; i < bprm->argc; i++) {
8049 size_t len = strlen(bprm->argv[i]) + 1;
8050
8051 if (write(fd, bprm->argv[i], len) != len) {
8052 return -1;
8053 }
8054 }
8055
8056 return 0;
8057 }
8058
8059 static void show_smaps(int fd, unsigned long size)
8060 {
8061 unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8062 unsigned long size_kb = size >> 10;
8063
8064 dprintf(fd, "Size: %lu kB\n"
8065 "KernelPageSize: %lu kB\n"
8066 "MMUPageSize: %lu kB\n"
8067 "Rss: 0 kB\n"
8068 "Pss: 0 kB\n"
8069 "Pss_Dirty: 0 kB\n"
8070 "Shared_Clean: 0 kB\n"
8071 "Shared_Dirty: 0 kB\n"
8072 "Private_Clean: 0 kB\n"
8073 "Private_Dirty: 0 kB\n"
8074 "Referenced: 0 kB\n"
8075 "Anonymous: 0 kB\n"
8076 "LazyFree: 0 kB\n"
8077 "AnonHugePages: 0 kB\n"
8078 "ShmemPmdMapped: 0 kB\n"
8079 "FilePmdMapped: 0 kB\n"
8080 "Shared_Hugetlb: 0 kB\n"
8081 "Private_Hugetlb: 0 kB\n"
8082 "Swap: 0 kB\n"
8083 "SwapPss: 0 kB\n"
8084 "Locked: 0 kB\n"
8085 "THPeligible: 0\n", size_kb, page_size_kb, page_size_kb);
8086 }
8087
8088 static int open_self_maps_1(CPUArchState *cpu_env, int fd, bool smaps)
8089 {
8090 CPUState *cpu = env_cpu(cpu_env);
8091 TaskState *ts = cpu->opaque;
8092 GSList *map_info = read_self_maps();
8093 GSList *s;
8094 int count;
8095
8096 for (s = map_info; s; s = g_slist_next(s)) {
8097 MapInfo *e = (MapInfo *) s->data;
8098
8099 if (h2g_valid(e->start)) {
8100 unsigned long min = e->start;
8101 unsigned long max = e->end;
8102 int flags = page_get_flags(h2g(min));
8103 const char *path;
8104
8105 max = h2g_valid(max - 1) ?
8106 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8107
8108 if (page_check_range(h2g(min), max - min, flags) == -1) {
8109 continue;
8110 }
8111
8112 #ifdef TARGET_HPPA
8113 if (h2g(max) == ts->info->stack_limit) {
8114 #else
8115 if (h2g(min) == ts->info->stack_limit) {
8116 #endif
8117 path = "[stack]";
8118 } else {
8119 path = e->path;
8120 }
8121
8122 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8123 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8124 h2g(min), h2g(max - 1) + 1,
8125 (flags & PAGE_READ) ? 'r' : '-',
8126 (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8127 (flags & PAGE_EXEC) ? 'x' : '-',
8128 e->is_priv ? 'p' : 's',
8129 (uint64_t) e->offset, e->dev, e->inode);
8130 if (path) {
8131 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8132 } else {
8133 dprintf(fd, "\n");
8134 }
8135 if (smaps) {
8136 show_smaps(fd, max - min);
8137 dprintf(fd, "VmFlags:%s%s%s%s%s%s%s%s\n",
8138 (flags & PAGE_READ) ? " rd" : "",
8139 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8140 (flags & PAGE_EXEC) ? " ex" : "",
8141 e->is_priv ? "" : " sh",
8142 (flags & PAGE_READ) ? " mr" : "",
8143 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8144 (flags & PAGE_EXEC) ? " me" : "",
8145 e->is_priv ? "" : " ms");
8146 }
8147 }
8148 }
8149
8150 free_self_maps(map_info);
8151
8152 #ifdef TARGET_VSYSCALL_PAGE
8153 /*
8154 * We only support execution from the vsyscall page.
8155 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8156 */
8157 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8158 " --xp 00000000 00:00 0",
8159 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8160 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
8161 if (smaps) {
8162 show_smaps(fd, TARGET_PAGE_SIZE);
8163 dprintf(fd, "VmFlags: ex\n");
8164 }
8165 #endif
8166
8167 return 0;
8168 }
8169
8170 static int open_self_maps(CPUArchState *cpu_env, int fd)
8171 {
8172 return open_self_maps_1(cpu_env, fd, false);
8173 }
8174
8175 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8176 {
8177 return open_self_maps_1(cpu_env, fd, true);
8178 }
8179
8180 static int open_self_stat(CPUArchState *cpu_env, int fd)
8181 {
8182 CPUState *cpu = env_cpu(cpu_env);
8183 TaskState *ts = cpu->opaque;
8184 g_autoptr(GString) buf = g_string_new(NULL);
8185 int i;
8186
8187 for (i = 0; i < 44; i++) {
8188 if (i == 0) {
8189 /* pid */
8190 g_string_printf(buf, FMT_pid " ", getpid());
8191 } else if (i == 1) {
8192 /* app name */
8193 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8194 bin = bin ? bin + 1 : ts->bprm->argv[0];
8195 g_string_printf(buf, "(%.15s) ", bin);
8196 } else if (i == 2) {
8197 /* task state */
8198 g_string_assign(buf, "R "); /* we are running right now */
8199 } else if (i == 3) {
8200 /* ppid */
8201 g_string_printf(buf, FMT_pid " ", getppid());
8202 } else if (i == 21) {
8203 /* starttime */
8204 g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8205 } else if (i == 27) {
8206 /* stack bottom */
8207 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8208 } else {
8209 /* for the rest, there is MasterCard */
8210 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8211 }
8212
8213 if (write(fd, buf->str, buf->len) != buf->len) {
8214 return -1;
8215 }
8216 }
8217
8218 return 0;
8219 }
8220
8221 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8222 {
8223 CPUState *cpu = env_cpu(cpu_env);
8224 TaskState *ts = cpu->opaque;
8225 abi_ulong auxv = ts->info->saved_auxv;
8226 abi_ulong len = ts->info->auxv_len;
8227 char *ptr;
8228
8229 /*
8230 * Auxiliary vector is stored in target process stack.
8231 * read in whole auxv vector and copy it to file
8232 */
8233 ptr = lock_user(VERIFY_READ, auxv, len, 0);
8234 if (ptr != NULL) {
8235 while (len > 0) {
8236 ssize_t r;
8237 r = write(fd, ptr, len);
8238 if (r <= 0) {
8239 break;
8240 }
8241 len -= r;
8242 ptr += r;
8243 }
8244 lseek(fd, 0, SEEK_SET);
8245 unlock_user(ptr, auxv, len);
8246 }
8247
8248 return 0;
8249 }
8250
8251 static int is_proc_myself(const char *filename, const char *entry)
8252 {
8253 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8254 filename += strlen("/proc/");
8255 if (!strncmp(filename, "self/", strlen("self/"))) {
8256 filename += strlen("self/");
8257 } else if (*filename >= '1' && *filename <= '9') {
8258 char myself[80];
8259 snprintf(myself, sizeof(myself), "%d/", getpid());
8260 if (!strncmp(filename, myself, strlen(myself))) {
8261 filename += strlen(myself);
8262 } else {
8263 return 0;
8264 }
8265 } else {
8266 return 0;
8267 }
8268 if (!strcmp(filename, entry)) {
8269 return 1;
8270 }
8271 }
8272 return 0;
8273 }
8274
8275 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8276 const char *fmt, int code)
8277 {
8278 if (logfile) {
8279 CPUState *cs = env_cpu(env);
8280
8281 fprintf(logfile, fmt, code);
8282 fprintf(logfile, "Failing executable: %s\n", exec_path);
8283 cpu_dump_state(cs, logfile, 0);
8284 open_self_maps(env, fileno(logfile));
8285 }
8286 }
8287
8288 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8289 {
8290 /* dump to console */
8291 excp_dump_file(stderr, env, fmt, code);
8292
8293 /* dump to log file */
8294 if (qemu_log_separate()) {
8295 FILE *logfile = qemu_log_trylock();
8296
8297 excp_dump_file(logfile, env, fmt, code);
8298 qemu_log_unlock(logfile);
8299 }
8300 }
8301
8302 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8303 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) || \
8304 defined(TARGET_RISCV) || defined(TARGET_S390X)
8305 static int is_proc(const char *filename, const char *entry)
8306 {
8307 return strcmp(filename, entry) == 0;
8308 }
8309 #endif
8310
8311 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8312 static int open_net_route(CPUArchState *cpu_env, int fd)
8313 {
8314 FILE *fp;
8315 char *line = NULL;
8316 size_t len = 0;
8317 ssize_t read;
8318
8319 fp = fopen("/proc/net/route", "r");
8320 if (fp == NULL) {
8321 return -1;
8322 }
8323
8324 /* read header */
8325
8326 read = getline(&line, &len, fp);
8327 dprintf(fd, "%s", line);
8328
8329 /* read routes */
8330
8331 while ((read = getline(&line, &len, fp)) != -1) {
8332 char iface[16];
8333 uint32_t dest, gw, mask;
8334 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8335 int fields;
8336
8337 fields = sscanf(line,
8338 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8339 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8340 &mask, &mtu, &window, &irtt);
8341 if (fields != 11) {
8342 continue;
8343 }
8344 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8345 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8346 metric, tswap32(mask), mtu, window, irtt);
8347 }
8348
8349 free(line);
8350 fclose(fp);
8351
8352 return 0;
8353 }
8354 #endif
8355
8356 #if defined(TARGET_SPARC)
8357 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8358 {
8359 dprintf(fd, "type\t\t: sun4u\n");
8360 return 0;
8361 }
8362 #endif
8363
8364 #if defined(TARGET_HPPA)
8365 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8366 {
8367 int i, num_cpus;
8368
8369 num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8370 for (i = 0; i < num_cpus; i++) {
8371 dprintf(fd, "processor\t: %d\n", i);
8372 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8373 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8374 dprintf(fd, "capabilities\t: os32\n");
8375 dprintf(fd, "model\t\t: 9000/778/B160L - "
8376 "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8377 }
8378 return 0;
8379 }
8380 #endif
8381
8382 #if defined(TARGET_RISCV)
8383 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8384 {
8385 int i;
8386 int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8387 RISCVCPU *cpu = env_archcpu(cpu_env);
8388 const RISCVCPUConfig *cfg = riscv_cpu_cfg((CPURISCVState *) cpu_env);
8389 char *isa_string = riscv_isa_string(cpu);
8390 const char *mmu;
8391
8392 if (cfg->mmu) {
8393 mmu = (cpu_env->xl == MXL_RV32) ? "sv32" : "sv48";
8394 } else {
8395 mmu = "none";
8396 }
8397
8398 for (i = 0; i < num_cpus; i++) {
8399 dprintf(fd, "processor\t: %d\n", i);
8400 dprintf(fd, "hart\t\t: %d\n", i);
8401 dprintf(fd, "isa\t\t: %s\n", isa_string);
8402 dprintf(fd, "mmu\t\t: %s\n", mmu);
8403 dprintf(fd, "uarch\t\t: qemu\n\n");
8404 }
8405
8406 g_free(isa_string);
8407 return 0;
8408 }
8409 #endif
8410
8411 #if defined(TARGET_S390X)
8412 /*
8413 * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would
8414 * show in /proc/cpuinfo.
8415 *
8416 * Skip the following in order to match the missing support in op_ecag():
8417 * - show_cacheinfo().
8418 * - show_cpu_topology().
8419 * - show_cpu_mhz().
8420 *
8421 * Use fixed values for certain fields:
8422 * - bogomips per cpu - from a qemu-system-s390x run.
8423 * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported.
8424 *
8425 * Keep the code structure close to arch/s390/kernel/processor.c.
8426 */
8427
8428 static void show_facilities(int fd)
8429 {
8430 size_t sizeof_stfl_bytes = 2048;
8431 g_autofree uint8_t *stfl_bytes = g_new0(uint8_t, sizeof_stfl_bytes);
8432 unsigned int bit;
8433
8434 dprintf(fd, "facilities :");
8435 s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
8436 for (bit = 0; bit < sizeof_stfl_bytes * 8; bit++) {
8437 if (test_be_bit(bit, stfl_bytes)) {
8438 dprintf(fd, " %d", bit);
8439 }
8440 }
8441 dprintf(fd, "\n");
8442 }
8443
8444 static int cpu_ident(unsigned long n)
8445 {
8446 return deposit32(0, CPU_ID_BITS - CPU_PHYS_ADDR_BITS, CPU_PHYS_ADDR_BITS,
8447 n);
8448 }
8449
8450 static void show_cpu_summary(CPUArchState *cpu_env, int fd)
8451 {
8452 S390CPUModel *model = env_archcpu(cpu_env)->model;
8453 int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8454 uint32_t elf_hwcap = get_elf_hwcap();
8455 const char *hwcap_str;
8456 int i;
8457
8458 dprintf(fd, "vendor_id : IBM/S390\n"
8459 "# processors : %i\n"
8460 "bogomips per cpu: 13370.00\n",
8461 num_cpus);
8462 dprintf(fd, "max thread id : 0\n");
8463 dprintf(fd, "features\t: ");
8464 for (i = 0; i < sizeof(elf_hwcap) * 8; i++) {
8465 if (!(elf_hwcap & (1 << i))) {
8466 continue;
8467 }
8468 hwcap_str = elf_hwcap_str(i);
8469 if (hwcap_str) {
8470 dprintf(fd, "%s ", hwcap_str);
8471 }
8472 }
8473 dprintf(fd, "\n");
8474 show_facilities(fd);
8475 for (i = 0; i < num_cpus; i++) {
8476 dprintf(fd, "processor %d: "
8477 "version = %02X, "
8478 "identification = %06X, "
8479 "machine = %04X\n",
8480 i, model->cpu_ver, cpu_ident(i), model->def->type);
8481 }
8482 }
8483
8484 static void show_cpu_ids(CPUArchState *cpu_env, int fd, unsigned long n)
8485 {
8486 S390CPUModel *model = env_archcpu(cpu_env)->model;
8487
8488 dprintf(fd, "version : %02X\n", model->cpu_ver);
8489 dprintf(fd, "identification : %06X\n", cpu_ident(n));
8490 dprintf(fd, "machine : %04X\n", model->def->type);
8491 }
8492
8493 static void show_cpuinfo(CPUArchState *cpu_env, int fd, unsigned long n)
8494 {
8495 dprintf(fd, "\ncpu number : %ld\n", n);
8496 show_cpu_ids(cpu_env, fd, n);
8497 }
8498
8499 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8500 {
8501 int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8502 int i;
8503
8504 show_cpu_summary(cpu_env, fd);
8505 for (i = 0; i < num_cpus; i++) {
8506 show_cpuinfo(cpu_env, fd, i);
8507 }
8508 return 0;
8509 }
8510 #endif
8511
8512 #if defined(TARGET_M68K)
8513 static int open_hardware(CPUArchState *cpu_env, int fd)
8514 {
8515 dprintf(fd, "Model:\t\tqemu-m68k\n");
8516 return 0;
8517 }
8518 #endif
8519
8520 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8521 int flags, mode_t mode, bool safe)
8522 {
8523 struct fake_open {
8524 const char *filename;
8525 int (*fill)(CPUArchState *cpu_env, int fd);
8526 int (*cmp)(const char *s1, const char *s2);
8527 };
8528 const struct fake_open *fake_open;
8529 static const struct fake_open fakes[] = {
8530 { "maps", open_self_maps, is_proc_myself },
8531 { "smaps", open_self_smaps, is_proc_myself },
8532 { "stat", open_self_stat, is_proc_myself },
8533 { "auxv", open_self_auxv, is_proc_myself },
8534 { "cmdline", open_self_cmdline, is_proc_myself },
8535 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8536 { "/proc/net/route", open_net_route, is_proc },
8537 #endif
8538 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) || \
8539 defined(TARGET_RISCV) || defined(TARGET_S390X)
8540 { "/proc/cpuinfo", open_cpuinfo, is_proc },
8541 #endif
8542 #if defined(TARGET_M68K)
8543 { "/proc/hardware", open_hardware, is_proc },
8544 #endif
8545 { NULL, NULL, NULL }
8546 };
8547
8548 if (is_proc_myself(pathname, "exe")) {
8549 if (safe) {
8550 return safe_openat(dirfd, exec_path, flags, mode);
8551 } else {
8552 return openat(dirfd, exec_path, flags, mode);
8553 }
8554 }
8555
8556 for (fake_open = fakes; fake_open->filename; fake_open++) {
8557 if (fake_open->cmp(pathname, fake_open->filename)) {
8558 break;
8559 }
8560 }
8561
8562 if (fake_open->filename) {
8563 const char *tmpdir;
8564 char filename[PATH_MAX];
8565 int fd, r;
8566
8567 fd = memfd_create("qemu-open", 0);
8568 if (fd < 0) {
8569 if (errno != ENOSYS) {
8570 return fd;
8571 }
8572 /* create temporary file to map stat to */
8573 tmpdir = getenv("TMPDIR");
8574 if (!tmpdir)
8575 tmpdir = "/tmp";
8576 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8577 fd = mkstemp(filename);
8578 if (fd < 0) {
8579 return fd;
8580 }
8581 unlink(filename);
8582 }
8583
8584 if ((r = fake_open->fill(cpu_env, fd))) {
8585 int e = errno;
8586 close(fd);
8587 errno = e;
8588 return r;
8589 }
8590 lseek(fd, 0, SEEK_SET);
8591
8592 return fd;
8593 }
8594
8595 if (safe) {
8596 return safe_openat(dirfd, path(pathname), flags, mode);
8597 } else {
8598 return openat(dirfd, path(pathname), flags, mode);
8599 }
8600 }
8601
8602 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8603 {
8604 ssize_t ret;
8605
8606 if (!pathname || !buf) {
8607 errno = EFAULT;
8608 return -1;
8609 }
8610
8611 if (!bufsiz) {
8612 /* Short circuit this for the magic exe check. */
8613 errno = EINVAL;
8614 return -1;
8615 }
8616
8617 if (is_proc_myself((const char *)pathname, "exe")) {
8618 /*
8619 * Don't worry about sign mismatch as earlier mapping
8620 * logic would have thrown a bad address error.
8621 */
8622 ret = MIN(strlen(exec_path), bufsiz);
8623 /* We cannot NUL terminate the string. */
8624 memcpy(buf, exec_path, ret);
8625 } else {
8626 ret = readlink(path(pathname), buf, bufsiz);
8627 }
8628
8629 return ret;
8630 }
8631
8632 static int do_execveat(CPUArchState *cpu_env, int dirfd,
8633 abi_long pathname, abi_long guest_argp,
8634 abi_long guest_envp, int flags)
8635 {
8636 int ret;
8637 char **argp, **envp;
8638 int argc, envc;
8639 abi_ulong gp;
8640 abi_ulong addr;
8641 char **q;
8642 void *p;
8643
8644 argc = 0;
8645
8646 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8647 if (get_user_ual(addr, gp)) {
8648 return -TARGET_EFAULT;
8649 }
8650 if (!addr) {
8651 break;
8652 }
8653 argc++;
8654 }
8655 envc = 0;
8656 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8657 if (get_user_ual(addr, gp)) {
8658 return -TARGET_EFAULT;
8659 }
8660 if (!addr) {
8661 break;
8662 }
8663 envc++;
8664 }
8665
8666 argp = g_new0(char *, argc + 1);
8667 envp = g_new0(char *, envc + 1);
8668
8669 for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8670 if (get_user_ual(addr, gp)) {
8671 goto execve_efault;
8672 }
8673 if (!addr) {
8674 break;
8675 }
8676 *q = lock_user_string(addr);
8677 if (!*q) {
8678 goto execve_efault;
8679 }
8680 }
8681 *q = NULL;
8682
8683 for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8684 if (get_user_ual(addr, gp)) {
8685 goto execve_efault;
8686 }
8687 if (!addr) {
8688 break;
8689 }
8690 *q = lock_user_string(addr);
8691 if (!*q) {
8692 goto execve_efault;
8693 }
8694 }
8695 *q = NULL;
8696
8697 /*
8698 * Although execve() is not an interruptible syscall it is
8699 * a special case where we must use the safe_syscall wrapper:
8700 * if we allow a signal to happen before we make the host
8701 * syscall then we will 'lose' it, because at the point of
8702 * execve the process leaves QEMU's control. So we use the
8703 * safe syscall wrapper to ensure that we either take the
8704 * signal as a guest signal, or else it does not happen
8705 * before the execve completes and makes it the other
8706 * program's problem.
8707 */
8708 p = lock_user_string(pathname);
8709 if (!p) {
8710 goto execve_efault;
8711 }
8712
8713 if (is_proc_myself(p, "exe")) {
8714 ret = get_errno(safe_execveat(dirfd, exec_path, argp, envp, flags));
8715 } else {
8716 ret = get_errno(safe_execveat(dirfd, p, argp, envp, flags));
8717 }
8718
8719 unlock_user(p, pathname, 0);
8720
8721 goto execve_end;
8722
8723 execve_efault:
8724 ret = -TARGET_EFAULT;
8725
8726 execve_end:
8727 for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8728 if (get_user_ual(addr, gp) || !addr) {
8729 break;
8730 }
8731 unlock_user(*q, addr, 0);
8732 }
8733 for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8734 if (get_user_ual(addr, gp) || !addr) {
8735 break;
8736 }
8737 unlock_user(*q, addr, 0);
8738 }
8739
8740 g_free(argp);
8741 g_free(envp);
8742 return ret;
8743 }
8744
8745 #define TIMER_MAGIC 0x0caf0000
8746 #define TIMER_MAGIC_MASK 0xffff0000
8747
8748 /* Convert QEMU provided timer ID back to internal 16bit index format */
8749 static target_timer_t get_timer_id(abi_long arg)
8750 {
8751 target_timer_t timerid = arg;
8752
8753 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8754 return -TARGET_EINVAL;
8755 }
8756
8757 timerid &= 0xffff;
8758
8759 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8760 return -TARGET_EINVAL;
8761 }
8762
8763 return timerid;
8764 }
8765
8766 static int target_to_host_cpu_mask(unsigned long *host_mask,
8767 size_t host_size,
8768 abi_ulong target_addr,
8769 size_t target_size)
8770 {
8771 unsigned target_bits = sizeof(abi_ulong) * 8;
8772 unsigned host_bits = sizeof(*host_mask) * 8;
8773 abi_ulong *target_mask;
8774 unsigned i, j;
8775
8776 assert(host_size >= target_size);
8777
8778 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8779 if (!target_mask) {
8780 return -TARGET_EFAULT;
8781 }
8782 memset(host_mask, 0, host_size);
8783
8784 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8785 unsigned bit = i * target_bits;
8786 abi_ulong val;
8787
8788 __get_user(val, &target_mask[i]);
8789 for (j = 0; j < target_bits; j++, bit++) {
8790 if (val & (1UL << j)) {
8791 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8792 }
8793 }
8794 }
8795
8796 unlock_user(target_mask, target_addr, 0);
8797 return 0;
8798 }
8799
8800 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8801 size_t host_size,
8802 abi_ulong target_addr,
8803 size_t target_size)
8804 {
8805 unsigned target_bits = sizeof(abi_ulong) * 8;
8806 unsigned host_bits = sizeof(*host_mask) * 8;
8807 abi_ulong *target_mask;
8808 unsigned i, j;
8809
8810 assert(host_size >= target_size);
8811
8812 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8813 if (!target_mask) {
8814 return -TARGET_EFAULT;
8815 }
8816
8817 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8818 unsigned bit = i * target_bits;
8819 abi_ulong val = 0;
8820
8821 for (j = 0; j < target_bits; j++, bit++) {
8822 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8823 val |= 1UL << j;
8824 }
8825 }
8826 __put_user(val, &target_mask[i]);
8827 }
8828
8829 unlock_user(target_mask, target_addr, target_size);
8830 return 0;
8831 }
8832
8833 #ifdef TARGET_NR_getdents
8834 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8835 {
8836 g_autofree void *hdirp = NULL;
8837 void *tdirp;
8838 int hlen, hoff, toff;
8839 int hreclen, treclen;
8840 off64_t prev_diroff = 0;
8841
8842 hdirp = g_try_malloc(count);
8843 if (!hdirp) {
8844 return -TARGET_ENOMEM;
8845 }
8846
8847 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8848 hlen = sys_getdents(dirfd, hdirp, count);
8849 #else
8850 hlen = sys_getdents64(dirfd, hdirp, count);
8851 #endif
8852
8853 hlen = get_errno(hlen);
8854 if (is_error(hlen)) {
8855 return hlen;
8856 }
8857
8858 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8859 if (!tdirp) {
8860 return -TARGET_EFAULT;
8861 }
8862
8863 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8864 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8865 struct linux_dirent *hde = hdirp + hoff;
8866 #else
8867 struct linux_dirent64 *hde = hdirp + hoff;
8868 #endif
8869 struct target_dirent *tde = tdirp + toff;
8870 int namelen;
8871 uint8_t type;
8872
8873 namelen = strlen(hde->d_name);
8874 hreclen = hde->d_reclen;
8875 treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8876 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8877
8878 if (toff + treclen > count) {
8879 /*
8880 * If the host struct is smaller than the target struct, or
8881 * requires less alignment and thus packs into less space,
8882 * then the host can return more entries than we can pass
8883 * on to the guest.
8884 */
8885 if (toff == 0) {
8886 toff = -TARGET_EINVAL; /* result buffer is too small */
8887 break;
8888 }
8889 /*
8890 * Return what we have, resetting the file pointer to the
8891 * location of the first record not returned.
8892 */
8893 lseek64(dirfd, prev_diroff, SEEK_SET);
8894 break;
8895 }
8896
8897 prev_diroff = hde->d_off;
8898 tde->d_ino = tswapal(hde->d_ino);
8899 tde->d_off = tswapal(hde->d_off);
8900 tde->d_reclen = tswap16(treclen);
8901 memcpy(tde->d_name, hde->d_name, namelen + 1);
8902
8903 /*
8904 * The getdents type is in what was formerly a padding byte at the
8905 * end of the structure.
8906 */
8907 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8908 type = *((uint8_t *)hde + hreclen - 1);
8909 #else
8910 type = hde->d_type;
8911 #endif
8912 *((uint8_t *)tde + treclen - 1) = type;
8913 }
8914
8915 unlock_user(tdirp, arg2, toff);
8916 return toff;
8917 }
8918 #endif /* TARGET_NR_getdents */
8919
8920 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8921 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8922 {
8923 g_autofree void *hdirp = NULL;
8924 void *tdirp;
8925 int hlen, hoff, toff;
8926 int hreclen, treclen;
8927 off64_t prev_diroff = 0;
8928
8929 hdirp = g_try_malloc(count);
8930 if (!hdirp) {
8931 return -TARGET_ENOMEM;
8932 }
8933
8934 hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8935 if (is_error(hlen)) {
8936 return hlen;
8937 }
8938
8939 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8940 if (!tdirp) {
8941 return -TARGET_EFAULT;
8942 }
8943
8944 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8945 struct linux_dirent64 *hde = hdirp + hoff;
8946 struct target_dirent64 *tde = tdirp + toff;
8947 int namelen;
8948
8949 namelen = strlen(hde->d_name) + 1;
8950 hreclen = hde->d_reclen;
8951 treclen = offsetof(struct target_dirent64, d_name) + namelen;
8952 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8953
8954 if (toff + treclen > count) {
8955 /*
8956 * If the host struct is smaller than the target struct, or
8957 * requires less alignment and thus packs into less space,
8958 * then the host can return more entries than we can pass
8959 * on to the guest.
8960 */
8961 if (toff == 0) {
8962 toff = -TARGET_EINVAL; /* result buffer is too small */
8963 break;
8964 }
8965 /*
8966 * Return what we have, resetting the file pointer to the
8967 * location of the first record not returned.
8968 */
8969 lseek64(dirfd, prev_diroff, SEEK_SET);
8970 break;
8971 }
8972
8973 prev_diroff = hde->d_off;
8974 tde->d_ino = tswap64(hde->d_ino);
8975 tde->d_off = tswap64(hde->d_off);
8976 tde->d_reclen = tswap16(treclen);
8977 tde->d_type = hde->d_type;
8978 memcpy(tde->d_name, hde->d_name, namelen);
8979 }
8980
8981 unlock_user(tdirp, arg2, toff);
8982 return toff;
8983 }
8984 #endif /* TARGET_NR_getdents64 */
8985
8986 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8987 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8988 #endif
8989
8990 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
8991 #define __NR_sys_open_tree __NR_open_tree
8992 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
8993 unsigned int, __flags)
8994 #endif
8995
8996 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
8997 #define __NR_sys_move_mount __NR_move_mount
8998 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
8999 int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9000 #endif
9001
9002 /* This is an internal helper for do_syscall so that it is easier
9003 * to have a single return point, so that actions, such as logging
9004 * of syscall results, can be performed.
9005 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9006 */
9007 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9008 abi_long arg2, abi_long arg3, abi_long arg4,
9009 abi_long arg5, abi_long arg6, abi_long arg7,
9010 abi_long arg8)
9011 {
9012 CPUState *cpu = env_cpu(cpu_env);
9013 abi_long ret;
9014 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9015 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9016 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9017 || defined(TARGET_NR_statx)
9018 struct stat st;
9019 #endif
9020 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9021 || defined(TARGET_NR_fstatfs)
9022 struct statfs stfs;
9023 #endif
9024 void *p;
9025
9026 switch(num) {
9027 case TARGET_NR_exit:
9028 /* In old applications this may be used to implement _exit(2).
9029 However in threaded applications it is used for thread termination,
9030 and _exit_group is used for application termination.
9031 Do thread termination if we have more then one thread. */
9032
9033 if (block_signals()) {
9034 return -QEMU_ERESTARTSYS;
9035 }
9036
9037 pthread_mutex_lock(&clone_lock);
9038
9039 if (CPU_NEXT(first_cpu)) {
9040 TaskState *ts = cpu->opaque;
9041
9042 if (ts->child_tidptr) {
9043 put_user_u32(0, ts->child_tidptr);
9044 do_sys_futex(g2h(cpu, ts->child_tidptr),
9045 FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9046 }
9047
9048 object_unparent(OBJECT(cpu));
9049 object_unref(OBJECT(cpu));
9050 /*
9051 * At this point the CPU should be unrealized and removed
9052 * from cpu lists. We can clean-up the rest of the thread
9053 * data without the lock held.
9054 */
9055
9056 pthread_mutex_unlock(&clone_lock);
9057
9058 thread_cpu = NULL;
9059 g_free(ts);
9060 rcu_unregister_thread();
9061 pthread_exit(NULL);
9062 }
9063
9064 pthread_mutex_unlock(&clone_lock);
9065 preexit_cleanup(cpu_env, arg1);
9066 _exit(arg1);
9067 return 0; /* avoid warning */
9068 case TARGET_NR_read:
9069 if (arg2 == 0 && arg3 == 0) {
9070 return get_errno(safe_read(arg1, 0, 0));
9071 } else {
9072 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9073 return -TARGET_EFAULT;
9074 ret = get_errno(safe_read(arg1, p, arg3));
9075 if (ret >= 0 &&
9076 fd_trans_host_to_target_data(arg1)) {
9077 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9078 }
9079 unlock_user(p, arg2, ret);
9080 }
9081 return ret;
9082 case TARGET_NR_write:
9083 if (arg2 == 0 && arg3 == 0) {
9084 return get_errno(safe_write(arg1, 0, 0));
9085 }
9086 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9087 return -TARGET_EFAULT;
9088 if (fd_trans_target_to_host_data(arg1)) {
9089 void *copy = g_malloc(arg3);
9090 memcpy(copy, p, arg3);
9091 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9092 if (ret >= 0) {
9093 ret = get_errno(safe_write(arg1, copy, ret));
9094 }
9095 g_free(copy);
9096 } else {
9097 ret = get_errno(safe_write(arg1, p, arg3));
9098 }
9099 unlock_user(p, arg2, 0);
9100 return ret;
9101
9102 #ifdef TARGET_NR_open
9103 case TARGET_NR_open:
9104 if (!(p = lock_user_string(arg1)))
9105 return -TARGET_EFAULT;
9106 ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9107 target_to_host_bitmask(arg2, fcntl_flags_tbl),
9108 arg3, true));
9109 fd_trans_unregister(ret);
9110 unlock_user(p, arg1, 0);
9111 return ret;
9112 #endif
9113 case TARGET_NR_openat:
9114 if (!(p = lock_user_string(arg2)))
9115 return -TARGET_EFAULT;
9116 ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9117 target_to_host_bitmask(arg3, fcntl_flags_tbl),
9118 arg4, true));
9119 fd_trans_unregister(ret);
9120 unlock_user(p, arg2, 0);
9121 return ret;
9122 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9123 case TARGET_NR_name_to_handle_at:
9124 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9125 return ret;
9126 #endif
9127 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9128 case TARGET_NR_open_by_handle_at:
9129 ret = do_open_by_handle_at(arg1, arg2, arg3);
9130 fd_trans_unregister(ret);
9131 return ret;
9132 #endif
9133 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9134 case TARGET_NR_pidfd_open:
9135 return get_errno(pidfd_open(arg1, arg2));
9136 #endif
9137 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9138 case TARGET_NR_pidfd_send_signal:
9139 {
9140 siginfo_t uinfo, *puinfo;
9141
9142 if (arg3) {
9143 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9144 if (!p) {
9145 return -TARGET_EFAULT;
9146 }
9147 target_to_host_siginfo(&uinfo, p);
9148 unlock_user(p, arg3, 0);
9149 puinfo = &uinfo;
9150 } else {
9151 puinfo = NULL;
9152 }
9153 ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9154 puinfo, arg4));
9155 }
9156 return ret;
9157 #endif
9158 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9159 case TARGET_NR_pidfd_getfd:
9160 return get_errno(pidfd_getfd(arg1, arg2, arg3));
9161 #endif
9162 case TARGET_NR_close:
9163 fd_trans_unregister(arg1);
9164 return get_errno(close(arg1));
9165 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9166 case TARGET_NR_close_range:
9167 ret = get_errno(sys_close_range(arg1, arg2, arg3));
9168 if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9169 abi_long fd, maxfd;
9170 maxfd = MIN(arg2, target_fd_max);
9171 for (fd = arg1; fd < maxfd; fd++) {
9172 fd_trans_unregister(fd);
9173 }
9174 }
9175 return ret;
9176 #endif
9177
9178 case TARGET_NR_brk:
9179 return do_brk(arg1);
9180 #ifdef TARGET_NR_fork
9181 case TARGET_NR_fork:
9182 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9183 #endif
9184 #ifdef TARGET_NR_waitpid
9185 case TARGET_NR_waitpid:
9186 {
9187 int status;
9188 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9189 if (!is_error(ret) && arg2 && ret
9190 && put_user_s32(host_to_target_waitstatus(status), arg2))
9191 return -TARGET_EFAULT;
9192 }
9193 return ret;
9194 #endif
9195 #ifdef TARGET_NR_waitid
9196 case TARGET_NR_waitid:
9197 {
9198 siginfo_t info;
9199 info.si_pid = 0;
9200 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
9201 if (!is_error(ret) && arg3 && info.si_pid != 0) {
9202 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
9203 return -TARGET_EFAULT;
9204 host_to_target_siginfo(p, &info);
9205 unlock_user(p, arg3, sizeof(target_siginfo_t));
9206 }
9207 }
9208 return ret;
9209 #endif
9210 #ifdef TARGET_NR_creat /* not on alpha */
9211 case TARGET_NR_creat:
9212 if (!(p = lock_user_string(arg1)))
9213 return -TARGET_EFAULT;
9214 ret = get_errno(creat(p, arg2));
9215 fd_trans_unregister(ret);
9216 unlock_user(p, arg1, 0);
9217 return ret;
9218 #endif
9219 #ifdef TARGET_NR_link
9220 case TARGET_NR_link:
9221 {
9222 void * p2;
9223 p = lock_user_string(arg1);
9224 p2 = lock_user_string(arg2);
9225 if (!p || !p2)
9226 ret = -TARGET_EFAULT;
9227 else
9228 ret = get_errno(link(p, p2));
9229 unlock_user(p2, arg2, 0);
9230 unlock_user(p, arg1, 0);
9231 }
9232 return ret;
9233 #endif
9234 #if defined(TARGET_NR_linkat)
9235 case TARGET_NR_linkat:
9236 {
9237 void * p2 = NULL;
9238 if (!arg2 || !arg4)
9239 return -TARGET_EFAULT;
9240 p = lock_user_string(arg2);
9241 p2 = lock_user_string(arg4);
9242 if (!p || !p2)
9243 ret = -TARGET_EFAULT;
9244 else
9245 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9246 unlock_user(p, arg2, 0);
9247 unlock_user(p2, arg4, 0);
9248 }
9249 return ret;
9250 #endif
9251 #ifdef TARGET_NR_unlink
9252 case TARGET_NR_unlink:
9253 if (!(p = lock_user_string(arg1)))
9254 return -TARGET_EFAULT;
9255 ret = get_errno(unlink(p));
9256 unlock_user(p, arg1, 0);
9257 return ret;
9258 #endif
9259 #if defined(TARGET_NR_unlinkat)
9260 case TARGET_NR_unlinkat:
9261 if (!(p = lock_user_string(arg2)))
9262 return -TARGET_EFAULT;
9263 ret = get_errno(unlinkat(arg1, p, arg3));
9264 unlock_user(p, arg2, 0);
9265 return ret;
9266 #endif
9267 case TARGET_NR_execveat:
9268 return do_execveat(cpu_env, arg1, arg2, arg3, arg4, arg5);
9269 case TARGET_NR_execve:
9270 return do_execveat(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0);
9271 case TARGET_NR_chdir:
9272 if (!(p = lock_user_string(arg1)))
9273 return -TARGET_EFAULT;
9274 ret = get_errno(chdir(p));
9275 unlock_user(p, arg1, 0);
9276 return ret;
9277 #ifdef TARGET_NR_time
9278 case TARGET_NR_time:
9279 {
9280 time_t host_time;
9281 ret = get_errno(time(&host_time));
9282 if (!is_error(ret)
9283 && arg1
9284 && put_user_sal(host_time, arg1))
9285 return -TARGET_EFAULT;
9286 }
9287 return ret;
9288 #endif
9289 #ifdef TARGET_NR_mknod
9290 case TARGET_NR_mknod:
9291 if (!(p = lock_user_string(arg1)))
9292 return -TARGET_EFAULT;
9293 ret = get_errno(mknod(p, arg2, arg3));
9294 unlock_user(p, arg1, 0);
9295 return ret;
9296 #endif
9297 #if defined(TARGET_NR_mknodat)
9298 case TARGET_NR_mknodat:
9299 if (!(p = lock_user_string(arg2)))
9300 return -TARGET_EFAULT;
9301 ret = get_errno(mknodat(arg1, p, arg3, arg4));
9302 unlock_user(p, arg2, 0);
9303 return ret;
9304 #endif
9305 #ifdef TARGET_NR_chmod
9306 case TARGET_NR_chmod:
9307 if (!(p = lock_user_string(arg1)))
9308 return -TARGET_EFAULT;
9309 ret = get_errno(chmod(p, arg2));
9310 unlock_user(p, arg1, 0);
9311 return ret;
9312 #endif
9313 #ifdef TARGET_NR_lseek
9314 case TARGET_NR_lseek:
9315 return get_errno(lseek(arg1, arg2, arg3));
9316 #endif
9317 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9318 /* Alpha specific */
9319 case TARGET_NR_getxpid:
9320 cpu_env->ir[IR_A4] = getppid();
9321 return get_errno(getpid());
9322 #endif
9323 #ifdef TARGET_NR_getpid
9324 case TARGET_NR_getpid:
9325 return get_errno(getpid());
9326 #endif
9327 case TARGET_NR_mount:
9328 {
9329 /* need to look at the data field */
9330 void *p2, *p3;
9331
9332 if (arg1) {
9333 p = lock_user_string(arg1);
9334 if (!p) {
9335 return -TARGET_EFAULT;
9336 }
9337 } else {
9338 p = NULL;
9339 }
9340
9341 p2 = lock_user_string(arg2);
9342 if (!p2) {
9343 if (arg1) {
9344 unlock_user(p, arg1, 0);
9345 }
9346 return -TARGET_EFAULT;
9347 }
9348
9349 if (arg3) {
9350 p3 = lock_user_string(arg3);
9351 if (!p3) {
9352 if (arg1) {
9353 unlock_user(p, arg1, 0);
9354 }
9355 unlock_user(p2, arg2, 0);
9356 return -TARGET_EFAULT;
9357 }
9358 } else {
9359 p3 = NULL;
9360 }
9361
9362 /* FIXME - arg5 should be locked, but it isn't clear how to
9363 * do that since it's not guaranteed to be a NULL-terminated
9364 * string.
9365 */
9366 if (!arg5) {
9367 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9368 } else {
9369 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9370 }
9371 ret = get_errno(ret);
9372
9373 if (arg1) {
9374 unlock_user(p, arg1, 0);
9375 }
9376 unlock_user(p2, arg2, 0);
9377 if (arg3) {
9378 unlock_user(p3, arg3, 0);
9379 }
9380 }
9381 return ret;
9382 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9383 #if defined(TARGET_NR_umount)
9384 case TARGET_NR_umount:
9385 #endif
9386 #if defined(TARGET_NR_oldumount)
9387 case TARGET_NR_oldumount:
9388 #endif
9389 if (!(p = lock_user_string(arg1)))
9390 return -TARGET_EFAULT;
9391 ret = get_errno(umount(p));
9392 unlock_user(p, arg1, 0);
9393 return ret;
9394 #endif
9395 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9396 case TARGET_NR_move_mount:
9397 {
9398 void *p2, *p4;
9399
9400 if (!arg2 || !arg4) {
9401 return -TARGET_EFAULT;
9402 }
9403
9404 p2 = lock_user_string(arg2);
9405 if (!p2) {
9406 return -TARGET_EFAULT;
9407 }
9408
9409 p4 = lock_user_string(arg4);
9410 if (!p4) {
9411 unlock_user(p2, arg2, 0);
9412 return -TARGET_EFAULT;
9413 }
9414 ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9415
9416 unlock_user(p2, arg2, 0);
9417 unlock_user(p4, arg4, 0);
9418
9419 return ret;
9420 }
9421 #endif
9422 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9423 case TARGET_NR_open_tree:
9424 {
9425 void *p2;
9426 int host_flags;
9427
9428 if (!arg2) {
9429 return -TARGET_EFAULT;
9430 }
9431
9432 p2 = lock_user_string(arg2);
9433 if (!p2) {
9434 return -TARGET_EFAULT;
9435 }
9436
9437 host_flags = arg3 & ~TARGET_O_CLOEXEC;
9438 if (arg3 & TARGET_O_CLOEXEC) {
9439 host_flags |= O_CLOEXEC;
9440 }
9441
9442 ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9443
9444 unlock_user(p2, arg2, 0);
9445
9446 return ret;
9447 }
9448 #endif
9449 #ifdef TARGET_NR_stime /* not on alpha */
9450 case TARGET_NR_stime:
9451 {
9452 struct timespec ts;
9453 ts.tv_nsec = 0;
9454 if (get_user_sal(ts.tv_sec, arg1)) {
9455 return -TARGET_EFAULT;
9456 }
9457 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9458 }
9459 #endif
9460 #ifdef TARGET_NR_alarm /* not on alpha */
9461 case TARGET_NR_alarm:
9462 return alarm(arg1);
9463 #endif
9464 #ifdef TARGET_NR_pause /* not on alpha */
9465 case TARGET_NR_pause:
9466 if (!block_signals()) {
9467 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9468 }
9469 return -TARGET_EINTR;
9470 #endif
9471 #ifdef TARGET_NR_utime
9472 case TARGET_NR_utime:
9473 {
9474 struct utimbuf tbuf, *host_tbuf;
9475 struct target_utimbuf *target_tbuf;
9476 if (arg2) {
9477 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9478 return -TARGET_EFAULT;
9479 tbuf.actime = tswapal(target_tbuf->actime);
9480 tbuf.modtime = tswapal(target_tbuf->modtime);
9481 unlock_user_struct(target_tbuf, arg2, 0);
9482 host_tbuf = &tbuf;
9483 } else {
9484 host_tbuf = NULL;
9485 }
9486 if (!(p = lock_user_string(arg1)))
9487 return -TARGET_EFAULT;
9488 ret = get_errno(utime(p, host_tbuf));
9489 unlock_user(p, arg1, 0);
9490 }
9491 return ret;
9492 #endif
9493 #ifdef TARGET_NR_utimes
9494 case TARGET_NR_utimes:
9495 {
9496 struct timeval *tvp, tv[2];
9497 if (arg2) {
9498 if (copy_from_user_timeval(&tv[0], arg2)
9499 || copy_from_user_timeval(&tv[1],
9500 arg2 + sizeof(struct target_timeval)))
9501 return -TARGET_EFAULT;
9502 tvp = tv;
9503 } else {
9504 tvp = NULL;
9505 }
9506 if (!(p = lock_user_string(arg1)))
9507 return -TARGET_EFAULT;
9508 ret = get_errno(utimes(p, tvp));
9509 unlock_user(p, arg1, 0);
9510 }
9511 return ret;
9512 #endif
9513 #if defined(TARGET_NR_futimesat)
9514 case TARGET_NR_futimesat:
9515 {
9516 struct timeval *tvp, tv[2];
9517 if (arg3) {
9518 if (copy_from_user_timeval(&tv[0], arg3)
9519 || copy_from_user_timeval(&tv[1],
9520 arg3 + sizeof(struct target_timeval)))
9521 return -TARGET_EFAULT;
9522 tvp = tv;
9523 } else {
9524 tvp = NULL;
9525 }
9526 if (!(p = lock_user_string(arg2))) {
9527 return -TARGET_EFAULT;
9528 }
9529 ret = get_errno(futimesat(arg1, path(p), tvp));
9530 unlock_user(p, arg2, 0);
9531 }
9532 return ret;
9533 #endif
9534 #ifdef TARGET_NR_access
9535 case TARGET_NR_access:
9536 if (!(p = lock_user_string(arg1))) {
9537 return -TARGET_EFAULT;
9538 }
9539 ret = get_errno(access(path(p), arg2));
9540 unlock_user(p, arg1, 0);
9541 return ret;
9542 #endif
9543 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9544 case TARGET_NR_faccessat:
9545 if (!(p = lock_user_string(arg2))) {
9546 return -TARGET_EFAULT;
9547 }
9548 ret = get_errno(faccessat(arg1, p, arg3, 0));
9549 unlock_user(p, arg2, 0);
9550 return ret;
9551 #endif
9552 #if defined(TARGET_NR_faccessat2)
9553 case TARGET_NR_faccessat2:
9554 if (!(p = lock_user_string(arg2))) {
9555 return -TARGET_EFAULT;
9556 }
9557 ret = get_errno(faccessat(arg1, p, arg3, arg4));
9558 unlock_user(p, arg2, 0);
9559 return ret;
9560 #endif
9561 #ifdef TARGET_NR_nice /* not on alpha */
9562 case TARGET_NR_nice:
9563 return get_errno(nice(arg1));
9564 #endif
9565 case TARGET_NR_sync:
9566 sync();
9567 return 0;
9568 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9569 case TARGET_NR_syncfs:
9570 return get_errno(syncfs(arg1));
9571 #endif
9572 case TARGET_NR_kill:
9573 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9574 #ifdef TARGET_NR_rename
9575 case TARGET_NR_rename:
9576 {
9577 void *p2;
9578 p = lock_user_string(arg1);
9579 p2 = lock_user_string(arg2);
9580 if (!p || !p2)
9581 ret = -TARGET_EFAULT;
9582 else
9583 ret = get_errno(rename(p, p2));
9584 unlock_user(p2, arg2, 0);
9585 unlock_user(p, arg1, 0);
9586 }
9587 return ret;
9588 #endif
9589 #if defined(TARGET_NR_renameat)
9590 case TARGET_NR_renameat:
9591 {
9592 void *p2;
9593 p = lock_user_string(arg2);
9594 p2 = lock_user_string(arg4);
9595 if (!p || !p2)
9596 ret = -TARGET_EFAULT;
9597 else
9598 ret = get_errno(renameat(arg1, p, arg3, p2));
9599 unlock_user(p2, arg4, 0);
9600 unlock_user(p, arg2, 0);
9601 }
9602 return ret;
9603 #endif
9604 #if defined(TARGET_NR_renameat2)
9605 case TARGET_NR_renameat2:
9606 {
9607 void *p2;
9608 p = lock_user_string(arg2);
9609 p2 = lock_user_string(arg4);
9610 if (!p || !p2) {
9611 ret = -TARGET_EFAULT;
9612 } else {
9613 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9614 }
9615 unlock_user(p2, arg4, 0);
9616 unlock_user(p, arg2, 0);
9617 }
9618 return ret;
9619 #endif
9620 #ifdef TARGET_NR_mkdir
9621 case TARGET_NR_mkdir:
9622 if (!(p = lock_user_string(arg1)))
9623 return -TARGET_EFAULT;
9624 ret = get_errno(mkdir(p, arg2));
9625 unlock_user(p, arg1, 0);
9626 return ret;
9627 #endif
9628 #if defined(TARGET_NR_mkdirat)
9629 case TARGET_NR_mkdirat:
9630 if (!(p = lock_user_string(arg2)))
9631 return -TARGET_EFAULT;
9632 ret = get_errno(mkdirat(arg1, p, arg3));
9633 unlock_user(p, arg2, 0);
9634 return ret;
9635 #endif
9636 #ifdef TARGET_NR_rmdir
9637 case TARGET_NR_rmdir:
9638 if (!(p = lock_user_string(arg1)))
9639 return -TARGET_EFAULT;
9640 ret = get_errno(rmdir(p));
9641 unlock_user(p, arg1, 0);
9642 return ret;
9643 #endif
9644 case TARGET_NR_dup:
9645 ret = get_errno(dup(arg1));
9646 if (ret >= 0) {
9647 fd_trans_dup(arg1, ret);
9648 }
9649 return ret;
9650 #ifdef TARGET_NR_pipe
9651 case TARGET_NR_pipe:
9652 return do_pipe(cpu_env, arg1, 0, 0);
9653 #endif
9654 #ifdef TARGET_NR_pipe2
9655 case TARGET_NR_pipe2:
9656 return do_pipe(cpu_env, arg1,
9657 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9658 #endif
9659 case TARGET_NR_times:
9660 {
9661 struct target_tms *tmsp;
9662 struct tms tms;
9663 ret = get_errno(times(&tms));
9664 if (arg1) {
9665 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9666 if (!tmsp)
9667 return -TARGET_EFAULT;
9668 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9669 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9670 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9671 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9672 }
9673 if (!is_error(ret))
9674 ret = host_to_target_clock_t(ret);
9675 }
9676 return ret;
9677 case TARGET_NR_acct:
9678 if (arg1 == 0) {
9679 ret = get_errno(acct(NULL));
9680 } else {
9681 if (!(p = lock_user_string(arg1))) {
9682 return -TARGET_EFAULT;
9683 }
9684 ret = get_errno(acct(path(p)));
9685 unlock_user(p, arg1, 0);
9686 }
9687 return ret;
9688 #ifdef TARGET_NR_umount2
9689 case TARGET_NR_umount2:
9690 if (!(p = lock_user_string(arg1)))
9691 return -TARGET_EFAULT;
9692 ret = get_errno(umount2(p, arg2));
9693 unlock_user(p, arg1, 0);
9694 return ret;
9695 #endif
9696 case TARGET_NR_ioctl:
9697 return do_ioctl(arg1, arg2, arg3);
9698 #ifdef TARGET_NR_fcntl
9699 case TARGET_NR_fcntl:
9700 return do_fcntl(arg1, arg2, arg3);
9701 #endif
9702 case TARGET_NR_setpgid:
9703 return get_errno(setpgid(arg1, arg2));
9704 case TARGET_NR_umask:
9705 return get_errno(umask(arg1));
9706 case TARGET_NR_chroot:
9707 if (!(p = lock_user_string(arg1)))
9708 return -TARGET_EFAULT;
9709 ret = get_errno(chroot(p));
9710 unlock_user(p, arg1, 0);
9711 return ret;
9712 #ifdef TARGET_NR_dup2
9713 case TARGET_NR_dup2:
9714 ret = get_errno(dup2(arg1, arg2));
9715 if (ret >= 0) {
9716 fd_trans_dup(arg1, arg2);
9717 }
9718 return ret;
9719 #endif
9720 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9721 case TARGET_NR_dup3:
9722 {
9723 int host_flags;
9724
9725 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9726 return -EINVAL;
9727 }
9728 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9729 ret = get_errno(dup3(arg1, arg2, host_flags));
9730 if (ret >= 0) {
9731 fd_trans_dup(arg1, arg2);
9732 }
9733 return ret;
9734 }
9735 #endif
9736 #ifdef TARGET_NR_getppid /* not on alpha */
9737 case TARGET_NR_getppid:
9738 return get_errno(getppid());
9739 #endif
9740 #ifdef TARGET_NR_getpgrp
9741 case TARGET_NR_getpgrp:
9742 return get_errno(getpgrp());
9743 #endif
9744 case TARGET_NR_setsid:
9745 return get_errno(setsid());
9746 #ifdef TARGET_NR_sigaction
9747 case TARGET_NR_sigaction:
9748 {
9749 #if defined(TARGET_MIPS)
9750 struct target_sigaction act, oact, *pact, *old_act;
9751
9752 if (arg2) {
9753 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9754 return -TARGET_EFAULT;
9755 act._sa_handler = old_act->_sa_handler;
9756 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9757 act.sa_flags = old_act->sa_flags;
9758 unlock_user_struct(old_act, arg2, 0);
9759 pact = &act;
9760 } else {
9761 pact = NULL;
9762 }
9763
9764 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9765
9766 if (!is_error(ret) && arg3) {
9767 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9768 return -TARGET_EFAULT;
9769 old_act->_sa_handler = oact._sa_handler;
9770 old_act->sa_flags = oact.sa_flags;
9771 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9772 old_act->sa_mask.sig[1] = 0;
9773 old_act->sa_mask.sig[2] = 0;
9774 old_act->sa_mask.sig[3] = 0;
9775 unlock_user_struct(old_act, arg3, 1);
9776 }
9777 #else
9778 struct target_old_sigaction *old_act;
9779 struct target_sigaction act, oact, *pact;
9780 if (arg2) {
9781 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9782 return -TARGET_EFAULT;
9783 act._sa_handler = old_act->_sa_handler;
9784 target_siginitset(&act.sa_mask, old_act->sa_mask);
9785 act.sa_flags = old_act->sa_flags;
9786 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9787 act.sa_restorer = old_act->sa_restorer;
9788 #endif
9789 unlock_user_struct(old_act, arg2, 0);
9790 pact = &act;
9791 } else {
9792 pact = NULL;
9793 }
9794 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9795 if (!is_error(ret) && arg3) {
9796 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9797 return -TARGET_EFAULT;
9798 old_act->_sa_handler = oact._sa_handler;
9799 old_act->sa_mask = oact.sa_mask.sig[0];
9800 old_act->sa_flags = oact.sa_flags;
9801 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9802 old_act->sa_restorer = oact.sa_restorer;
9803 #endif
9804 unlock_user_struct(old_act, arg3, 1);
9805 }
9806 #endif
9807 }
9808 return ret;
9809 #endif
9810 case TARGET_NR_rt_sigaction:
9811 {
9812 /*
9813 * For Alpha and SPARC this is a 5 argument syscall, with
9814 * a 'restorer' parameter which must be copied into the
9815 * sa_restorer field of the sigaction struct.
9816 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9817 * and arg5 is the sigsetsize.
9818 */
9819 #if defined(TARGET_ALPHA)
9820 target_ulong sigsetsize = arg4;
9821 target_ulong restorer = arg5;
9822 #elif defined(TARGET_SPARC)
9823 target_ulong restorer = arg4;
9824 target_ulong sigsetsize = arg5;
9825 #else
9826 target_ulong sigsetsize = arg4;
9827 target_ulong restorer = 0;
9828 #endif
9829 struct target_sigaction *act = NULL;
9830 struct target_sigaction *oact = NULL;
9831
9832 if (sigsetsize != sizeof(target_sigset_t)) {
9833 return -TARGET_EINVAL;
9834 }
9835 if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9836 return -TARGET_EFAULT;
9837 }
9838 if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9839 ret = -TARGET_EFAULT;
9840 } else {
9841 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9842 if (oact) {
9843 unlock_user_struct(oact, arg3, 1);
9844 }
9845 }
9846 if (act) {
9847 unlock_user_struct(act, arg2, 0);
9848 }
9849 }
9850 return ret;
9851 #ifdef TARGET_NR_sgetmask /* not on alpha */
9852 case TARGET_NR_sgetmask:
9853 {
9854 sigset_t cur_set;
9855 abi_ulong target_set;
9856 ret = do_sigprocmask(0, NULL, &cur_set);
9857 if (!ret) {
9858 host_to_target_old_sigset(&target_set, &cur_set);
9859 ret = target_set;
9860 }
9861 }
9862 return ret;
9863 #endif
9864 #ifdef TARGET_NR_ssetmask /* not on alpha */
9865 case TARGET_NR_ssetmask:
9866 {
9867 sigset_t set, oset;
9868 abi_ulong target_set = arg1;
9869 target_to_host_old_sigset(&set, &target_set);
9870 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9871 if (!ret) {
9872 host_to_target_old_sigset(&target_set, &oset);
9873 ret = target_set;
9874 }
9875 }
9876 return ret;
9877 #endif
9878 #ifdef TARGET_NR_sigprocmask
9879 case TARGET_NR_sigprocmask:
9880 {
9881 #if defined(TARGET_ALPHA)
9882 sigset_t set, oldset;
9883 abi_ulong mask;
9884 int how;
9885
9886 switch (arg1) {
9887 case TARGET_SIG_BLOCK:
9888 how = SIG_BLOCK;
9889 break;
9890 case TARGET_SIG_UNBLOCK:
9891 how = SIG_UNBLOCK;
9892 break;
9893 case TARGET_SIG_SETMASK:
9894 how = SIG_SETMASK;
9895 break;
9896 default:
9897 return -TARGET_EINVAL;
9898 }
9899 mask = arg2;
9900 target_to_host_old_sigset(&set, &mask);
9901
9902 ret = do_sigprocmask(how, &set, &oldset);
9903 if (!is_error(ret)) {
9904 host_to_target_old_sigset(&mask, &oldset);
9905 ret = mask;
9906 cpu_env->ir[IR_V0] = 0; /* force no error */
9907 }
9908 #else
9909 sigset_t set, oldset, *set_ptr;
9910 int how;
9911
9912 if (arg2) {
9913 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9914 if (!p) {
9915 return -TARGET_EFAULT;
9916 }
9917 target_to_host_old_sigset(&set, p);
9918 unlock_user(p, arg2, 0);
9919 set_ptr = &set;
9920 switch (arg1) {
9921 case TARGET_SIG_BLOCK:
9922 how = SIG_BLOCK;
9923 break;
9924 case TARGET_SIG_UNBLOCK:
9925 how = SIG_UNBLOCK;
9926 break;
9927 case TARGET_SIG_SETMASK:
9928 how = SIG_SETMASK;
9929 break;
9930 default:
9931 return -TARGET_EINVAL;
9932 }
9933 } else {
9934 how = 0;
9935 set_ptr = NULL;
9936 }
9937 ret = do_sigprocmask(how, set_ptr, &oldset);
9938 if (!is_error(ret) && arg3) {
9939 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9940 return -TARGET_EFAULT;
9941 host_to_target_old_sigset(p, &oldset);
9942 unlock_user(p, arg3, sizeof(target_sigset_t));
9943 }
9944 #endif
9945 }
9946 return ret;
9947 #endif
9948 case TARGET_NR_rt_sigprocmask:
9949 {
9950 int how = arg1;
9951 sigset_t set, oldset, *set_ptr;
9952
9953 if (arg4 != sizeof(target_sigset_t)) {
9954 return -TARGET_EINVAL;
9955 }
9956
9957 if (arg2) {
9958 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9959 if (!p) {
9960 return -TARGET_EFAULT;
9961 }
9962 target_to_host_sigset(&set, p);
9963 unlock_user(p, arg2, 0);
9964 set_ptr = &set;
9965 switch(how) {
9966 case TARGET_SIG_BLOCK:
9967 how = SIG_BLOCK;
9968 break;
9969 case TARGET_SIG_UNBLOCK:
9970 how = SIG_UNBLOCK;
9971 break;
9972 case TARGET_SIG_SETMASK:
9973 how = SIG_SETMASK;
9974 break;
9975 default:
9976 return -TARGET_EINVAL;
9977 }
9978 } else {
9979 how = 0;
9980 set_ptr = NULL;
9981 }
9982 ret = do_sigprocmask(how, set_ptr, &oldset);
9983 if (!is_error(ret) && arg3) {
9984 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9985 return -TARGET_EFAULT;
9986 host_to_target_sigset(p, &oldset);
9987 unlock_user(p, arg3, sizeof(target_sigset_t));
9988 }
9989 }
9990 return ret;
9991 #ifdef TARGET_NR_sigpending
9992 case TARGET_NR_sigpending:
9993 {
9994 sigset_t set;
9995 ret = get_errno(sigpending(&set));
9996 if (!is_error(ret)) {
9997 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9998 return -TARGET_EFAULT;
9999 host_to_target_old_sigset(p, &set);
10000 unlock_user(p, arg1, sizeof(target_sigset_t));
10001 }
10002 }
10003 return ret;
10004 #endif
10005 case TARGET_NR_rt_sigpending:
10006 {
10007 sigset_t set;
10008
10009 /* Yes, this check is >, not != like most. We follow the kernel's
10010 * logic and it does it like this because it implements
10011 * NR_sigpending through the same code path, and in that case
10012 * the old_sigset_t is smaller in size.
10013 */
10014 if (arg2 > sizeof(target_sigset_t)) {
10015 return -TARGET_EINVAL;
10016 }
10017
10018 ret = get_errno(sigpending(&set));
10019 if (!is_error(ret)) {
10020 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10021 return -TARGET_EFAULT;
10022 host_to_target_sigset(p, &set);
10023 unlock_user(p, arg1, sizeof(target_sigset_t));
10024 }
10025 }
10026 return ret;
10027 #ifdef TARGET_NR_sigsuspend
10028 case TARGET_NR_sigsuspend:
10029 {
10030 sigset_t *set;
10031
10032 #if defined(TARGET_ALPHA)
10033 TaskState *ts = cpu->opaque;
10034 /* target_to_host_old_sigset will bswap back */
10035 abi_ulong mask = tswapal(arg1);
10036 set = &ts->sigsuspend_mask;
10037 target_to_host_old_sigset(set, &mask);
10038 #else
10039 ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10040 if (ret != 0) {
10041 return ret;
10042 }
10043 #endif
10044 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10045 finish_sigsuspend_mask(ret);
10046 }
10047 return ret;
10048 #endif
10049 case TARGET_NR_rt_sigsuspend:
10050 {
10051 sigset_t *set;
10052
10053 ret = process_sigsuspend_mask(&set, arg1, arg2);
10054 if (ret != 0) {
10055 return ret;
10056 }
10057 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10058 finish_sigsuspend_mask(ret);
10059 }
10060 return ret;
10061 #ifdef TARGET_NR_rt_sigtimedwait
10062 case TARGET_NR_rt_sigtimedwait:
10063 {
10064 sigset_t set;
10065 struct timespec uts, *puts;
10066 siginfo_t uinfo;
10067
10068 if (arg4 != sizeof(target_sigset_t)) {
10069 return -TARGET_EINVAL;
10070 }
10071
10072 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10073 return -TARGET_EFAULT;
10074 target_to_host_sigset(&set, p);
10075 unlock_user(p, arg1, 0);
10076 if (arg3) {
10077 puts = &uts;
10078 if (target_to_host_timespec(puts, arg3)) {
10079 return -TARGET_EFAULT;
10080 }
10081 } else {
10082 puts = NULL;
10083 }
10084 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10085 SIGSET_T_SIZE));
10086 if (!is_error(ret)) {
10087 if (arg2) {
10088 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10089 0);
10090 if (!p) {
10091 return -TARGET_EFAULT;
10092 }
10093 host_to_target_siginfo(p, &uinfo);
10094 unlock_user(p, arg2, sizeof(target_siginfo_t));
10095 }
10096 ret = host_to_target_signal(ret);
10097 }
10098 }
10099 return ret;
10100 #endif
10101 #ifdef TARGET_NR_rt_sigtimedwait_time64
10102 case TARGET_NR_rt_sigtimedwait_time64:
10103 {
10104 sigset_t set;
10105 struct timespec uts, *puts;
10106 siginfo_t uinfo;
10107
10108 if (arg4 != sizeof(target_sigset_t)) {
10109 return -TARGET_EINVAL;
10110 }
10111
10112 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10113 if (!p) {
10114 return -TARGET_EFAULT;
10115 }
10116 target_to_host_sigset(&set, p);
10117 unlock_user(p, arg1, 0);
10118 if (arg3) {
10119 puts = &uts;
10120 if (target_to_host_timespec64(puts, arg3)) {
10121 return -TARGET_EFAULT;
10122 }
10123 } else {
10124 puts = NULL;
10125 }
10126 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10127 SIGSET_T_SIZE));
10128 if (!is_error(ret)) {
10129 if (arg2) {
10130 p = lock_user(VERIFY_WRITE, arg2,
10131 sizeof(target_siginfo_t), 0);
10132 if (!p) {
10133 return -TARGET_EFAULT;
10134 }
10135 host_to_target_siginfo(p, &uinfo);
10136 unlock_user(p, arg2, sizeof(target_siginfo_t));
10137 }
10138 ret = host_to_target_signal(ret);
10139 }
10140 }
10141 return ret;
10142 #endif
10143 case TARGET_NR_rt_sigqueueinfo:
10144 {
10145 siginfo_t uinfo;
10146
10147 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10148 if (!p) {
10149 return -TARGET_EFAULT;
10150 }
10151 target_to_host_siginfo(&uinfo, p);
10152 unlock_user(p, arg3, 0);
10153 ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10154 }
10155 return ret;
10156 case TARGET_NR_rt_tgsigqueueinfo:
10157 {
10158 siginfo_t uinfo;
10159
10160 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10161 if (!p) {
10162 return -TARGET_EFAULT;
10163 }
10164 target_to_host_siginfo(&uinfo, p);
10165 unlock_user(p, arg4, 0);
10166 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10167 }
10168 return ret;
10169 #ifdef TARGET_NR_sigreturn
10170 case TARGET_NR_sigreturn:
10171 if (block_signals()) {
10172 return -QEMU_ERESTARTSYS;
10173 }
10174 return do_sigreturn(cpu_env);
10175 #endif
10176 case TARGET_NR_rt_sigreturn:
10177 if (block_signals()) {
10178 return -QEMU_ERESTARTSYS;
10179 }
10180 return do_rt_sigreturn(cpu_env);
10181 case TARGET_NR_sethostname:
10182 if (!(p = lock_user_string(arg1)))
10183 return -TARGET_EFAULT;
10184 ret = get_errno(sethostname(p, arg2));
10185 unlock_user(p, arg1, 0);
10186 return ret;
10187 #ifdef TARGET_NR_setrlimit
10188 case TARGET_NR_setrlimit:
10189 {
10190 int resource = target_to_host_resource(arg1);
10191 struct target_rlimit *target_rlim;
10192 struct rlimit rlim;
10193 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10194 return -TARGET_EFAULT;
10195 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10196 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10197 unlock_user_struct(target_rlim, arg2, 0);
10198 /*
10199 * If we just passed through resource limit settings for memory then
10200 * they would also apply to QEMU's own allocations, and QEMU will
10201 * crash or hang or die if its allocations fail. Ideally we would
10202 * track the guest allocations in QEMU and apply the limits ourselves.
10203 * For now, just tell the guest the call succeeded but don't actually
10204 * limit anything.
10205 */
10206 if (resource != RLIMIT_AS &&
10207 resource != RLIMIT_DATA &&
10208 resource != RLIMIT_STACK) {
10209 return get_errno(setrlimit(resource, &rlim));
10210 } else {
10211 return 0;
10212 }
10213 }
10214 #endif
10215 #ifdef TARGET_NR_getrlimit
10216 case TARGET_NR_getrlimit:
10217 {
10218 int resource = target_to_host_resource(arg1);
10219 struct target_rlimit *target_rlim;
10220 struct rlimit rlim;
10221
10222 ret = get_errno(getrlimit(resource, &rlim));
10223 if (!is_error(ret)) {
10224 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10225 return -TARGET_EFAULT;
10226 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10227 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10228 unlock_user_struct(target_rlim, arg2, 1);
10229 }
10230 }
10231 return ret;
10232 #endif
10233 case TARGET_NR_getrusage:
10234 {
10235 struct rusage rusage;
10236 ret = get_errno(getrusage(arg1, &rusage));
10237 if (!is_error(ret)) {
10238 ret = host_to_target_rusage(arg2, &rusage);
10239 }
10240 }
10241 return ret;
10242 #if defined(TARGET_NR_gettimeofday)
10243 case TARGET_NR_gettimeofday:
10244 {
10245 struct timeval tv;
10246 struct timezone tz;
10247
10248 ret = get_errno(gettimeofday(&tv, &tz));
10249 if (!is_error(ret)) {
10250 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10251 return -TARGET_EFAULT;
10252 }
10253 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10254 return -TARGET_EFAULT;
10255 }
10256 }
10257 }
10258 return ret;
10259 #endif
10260 #if defined(TARGET_NR_settimeofday)
10261 case TARGET_NR_settimeofday:
10262 {
10263 struct timeval tv, *ptv = NULL;
10264 struct timezone tz, *ptz = NULL;
10265
10266 if (arg1) {
10267 if (copy_from_user_timeval(&tv, arg1)) {
10268 return -TARGET_EFAULT;
10269 }
10270 ptv = &tv;
10271 }
10272
10273 if (arg2) {
10274 if (copy_from_user_timezone(&tz, arg2)) {
10275 return -TARGET_EFAULT;
10276 }
10277 ptz = &tz;
10278 }
10279
10280 return get_errno(settimeofday(ptv, ptz));
10281 }
10282 #endif
10283 #if defined(TARGET_NR_select)
10284 case TARGET_NR_select:
10285 #if defined(TARGET_WANT_NI_OLD_SELECT)
10286 /* some architectures used to have old_select here
10287 * but now ENOSYS it.
10288 */
10289 ret = -TARGET_ENOSYS;
10290 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10291 ret = do_old_select(arg1);
10292 #else
10293 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10294 #endif
10295 return ret;
10296 #endif
10297 #ifdef TARGET_NR_pselect6
10298 case TARGET_NR_pselect6:
10299 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10300 #endif
10301 #ifdef TARGET_NR_pselect6_time64
10302 case TARGET_NR_pselect6_time64:
10303 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10304 #endif
10305 #ifdef TARGET_NR_symlink
10306 case TARGET_NR_symlink:
10307 {
10308 void *p2;
10309 p = lock_user_string(arg1);
10310 p2 = lock_user_string(arg2);
10311 if (!p || !p2)
10312 ret = -TARGET_EFAULT;
10313 else
10314 ret = get_errno(symlink(p, p2));
10315 unlock_user(p2, arg2, 0);
10316 unlock_user(p, arg1, 0);
10317 }
10318 return ret;
10319 #endif
10320 #if defined(TARGET_NR_symlinkat)
10321 case TARGET_NR_symlinkat:
10322 {
10323 void *p2;
10324 p = lock_user_string(arg1);
10325 p2 = lock_user_string(arg3);
10326 if (!p || !p2)
10327 ret = -TARGET_EFAULT;
10328 else
10329 ret = get_errno(symlinkat(p, arg2, p2));
10330 unlock_user(p2, arg3, 0);
10331 unlock_user(p, arg1, 0);
10332 }
10333 return ret;
10334 #endif
10335 #ifdef TARGET_NR_readlink
10336 case TARGET_NR_readlink:
10337 {
10338 void *p2;
10339 p = lock_user_string(arg1);
10340 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10341 ret = get_errno(do_guest_readlink(p, p2, arg3));
10342 unlock_user(p2, arg2, ret);
10343 unlock_user(p, arg1, 0);
10344 }
10345 return ret;
10346 #endif
10347 #if defined(TARGET_NR_readlinkat)
10348 case TARGET_NR_readlinkat:
10349 {
10350 void *p2;
10351 p = lock_user_string(arg2);
10352 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10353 if (!p || !p2) {
10354 ret = -TARGET_EFAULT;
10355 } else if (!arg4) {
10356 /* Short circuit this for the magic exe check. */
10357 ret = -TARGET_EINVAL;
10358 } else if (is_proc_myself((const char *)p, "exe")) {
10359 /*
10360 * Don't worry about sign mismatch as earlier mapping
10361 * logic would have thrown a bad address error.
10362 */
10363 ret = MIN(strlen(exec_path), arg4);
10364 /* We cannot NUL terminate the string. */
10365 memcpy(p2, exec_path, ret);
10366 } else {
10367 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10368 }
10369 unlock_user(p2, arg3, ret);
10370 unlock_user(p, arg2, 0);
10371 }
10372 return ret;
10373 #endif
10374 #ifdef TARGET_NR_swapon
10375 case TARGET_NR_swapon:
10376 if (!(p = lock_user_string(arg1)))
10377 return -TARGET_EFAULT;
10378 ret = get_errno(swapon(p, arg2));
10379 unlock_user(p, arg1, 0);
10380 return ret;
10381 #endif
10382 case TARGET_NR_reboot:
10383 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10384 /* arg4 must be ignored in all other cases */
10385 p = lock_user_string(arg4);
10386 if (!p) {
10387 return -TARGET_EFAULT;
10388 }
10389 ret = get_errno(reboot(arg1, arg2, arg3, p));
10390 unlock_user(p, arg4, 0);
10391 } else {
10392 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10393 }
10394 return ret;
10395 #ifdef TARGET_NR_mmap
10396 case TARGET_NR_mmap:
10397 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10398 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10399 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10400 || defined(TARGET_S390X)
10401 {
10402 abi_ulong *v;
10403 abi_ulong v1, v2, v3, v4, v5, v6;
10404 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10405 return -TARGET_EFAULT;
10406 v1 = tswapal(v[0]);
10407 v2 = tswapal(v[1]);
10408 v3 = tswapal(v[2]);
10409 v4 = tswapal(v[3]);
10410 v5 = tswapal(v[4]);
10411 v6 = tswapal(v[5]);
10412 unlock_user(v, arg1, 0);
10413 ret = get_errno(target_mmap(v1, v2, v3,
10414 target_to_host_bitmask(v4, mmap_flags_tbl),
10415 v5, v6));
10416 }
10417 #else
10418 /* mmap pointers are always untagged */
10419 ret = get_errno(target_mmap(arg1, arg2, arg3,
10420 target_to_host_bitmask(arg4, mmap_flags_tbl),
10421 arg5,
10422 arg6));
10423 #endif
10424 return ret;
10425 #endif
10426 #ifdef TARGET_NR_mmap2
10427 case TARGET_NR_mmap2:
10428 #ifndef MMAP_SHIFT
10429 #define MMAP_SHIFT 12
10430 #endif
10431 ret = target_mmap(arg1, arg2, arg3,
10432 target_to_host_bitmask(arg4, mmap_flags_tbl),
10433 arg5, arg6 << MMAP_SHIFT);
10434 return get_errno(ret);
10435 #endif
10436 case TARGET_NR_munmap:
10437 arg1 = cpu_untagged_addr(cpu, arg1);
10438 return get_errno(target_munmap(arg1, arg2));
10439 case TARGET_NR_mprotect:
10440 arg1 = cpu_untagged_addr(cpu, arg1);
10441 {
10442 TaskState *ts = cpu->opaque;
10443 /* Special hack to detect libc making the stack executable. */
10444 if ((arg3 & PROT_GROWSDOWN)
10445 && arg1 >= ts->info->stack_limit
10446 && arg1 <= ts->info->start_stack) {
10447 arg3 &= ~PROT_GROWSDOWN;
10448 arg2 = arg2 + arg1 - ts->info->stack_limit;
10449 arg1 = ts->info->stack_limit;
10450 }
10451 }
10452 return get_errno(target_mprotect(arg1, arg2, arg3));
10453 #ifdef TARGET_NR_mremap
10454 case TARGET_NR_mremap:
10455 arg1 = cpu_untagged_addr(cpu, arg1);
10456 /* mremap new_addr (arg5) is always untagged */
10457 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10458 #endif
10459 /* ??? msync/mlock/munlock are broken for softmmu. */
10460 #ifdef TARGET_NR_msync
10461 case TARGET_NR_msync:
10462 return get_errno(msync(g2h(cpu, arg1), arg2,
10463 target_to_host_msync_arg(arg3)));
10464 #endif
10465 #ifdef TARGET_NR_mlock
10466 case TARGET_NR_mlock:
10467 return get_errno(mlock(g2h(cpu, arg1), arg2));
10468 #endif
10469 #ifdef TARGET_NR_munlock
10470 case TARGET_NR_munlock:
10471 return get_errno(munlock(g2h(cpu, arg1), arg2));
10472 #endif
10473 #ifdef TARGET_NR_mlockall
10474 case TARGET_NR_mlockall:
10475 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10476 #endif
10477 #ifdef TARGET_NR_munlockall
10478 case TARGET_NR_munlockall:
10479 return get_errno(munlockall());
10480 #endif
10481 #ifdef TARGET_NR_truncate
10482 case TARGET_NR_truncate:
10483 if (!(p = lock_user_string(arg1)))
10484 return -TARGET_EFAULT;
10485 ret = get_errno(truncate(p, arg2));
10486 unlock_user(p, arg1, 0);
10487 return ret;
10488 #endif
10489 #ifdef TARGET_NR_ftruncate
10490 case TARGET_NR_ftruncate:
10491 return get_errno(ftruncate(arg1, arg2));
10492 #endif
10493 case TARGET_NR_fchmod:
10494 return get_errno(fchmod(arg1, arg2));
10495 #if defined(TARGET_NR_fchmodat)
10496 case TARGET_NR_fchmodat:
10497 if (!(p = lock_user_string(arg2)))
10498 return -TARGET_EFAULT;
10499 ret = get_errno(fchmodat(arg1, p, arg3, 0));
10500 unlock_user(p, arg2, 0);
10501 return ret;
10502 #endif
10503 case TARGET_NR_getpriority:
10504 /* Note that negative values are valid for getpriority, so we must
10505 differentiate based on errno settings. */
10506 errno = 0;
10507 ret = getpriority(arg1, arg2);
10508 if (ret == -1 && errno != 0) {
10509 return -host_to_target_errno(errno);
10510 }
10511 #ifdef TARGET_ALPHA
10512 /* Return value is the unbiased priority. Signal no error. */
10513 cpu_env->ir[IR_V0] = 0;
10514 #else
10515 /* Return value is a biased priority to avoid negative numbers. */
10516 ret = 20 - ret;
10517 #endif
10518 return ret;
10519 case TARGET_NR_setpriority:
10520 return get_errno(setpriority(arg1, arg2, arg3));
10521 #ifdef TARGET_NR_statfs
10522 case TARGET_NR_statfs:
10523 if (!(p = lock_user_string(arg1))) {
10524 return -TARGET_EFAULT;
10525 }
10526 ret = get_errno(statfs(path(p), &stfs));
10527 unlock_user(p, arg1, 0);
10528 convert_statfs:
10529 if (!is_error(ret)) {
10530 struct target_statfs *target_stfs;
10531
10532 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10533 return -TARGET_EFAULT;
10534 __put_user(stfs.f_type, &target_stfs->f_type);
10535 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10536 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10537 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10538 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10539 __put_user(stfs.f_files, &target_stfs->f_files);
10540 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10541 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10542 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10543 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10544 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10545 #ifdef _STATFS_F_FLAGS
10546 __put_user(stfs.f_flags, &target_stfs->f_flags);
10547 #else
10548 __put_user(0, &target_stfs->f_flags);
10549 #endif
10550 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10551 unlock_user_struct(target_stfs, arg2, 1);
10552 }
10553 return ret;
10554 #endif
10555 #ifdef TARGET_NR_fstatfs
10556 case TARGET_NR_fstatfs:
10557 ret = get_errno(fstatfs(arg1, &stfs));
10558 goto convert_statfs;
10559 #endif
10560 #ifdef TARGET_NR_statfs64
10561 case TARGET_NR_statfs64:
10562 if (!(p = lock_user_string(arg1))) {
10563 return -TARGET_EFAULT;
10564 }
10565 ret = get_errno(statfs(path(p), &stfs));
10566 unlock_user(p, arg1, 0);
10567 convert_statfs64:
10568 if (!is_error(ret)) {
10569 struct target_statfs64 *target_stfs;
10570
10571 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10572 return -TARGET_EFAULT;
10573 __put_user(stfs.f_type, &target_stfs->f_type);
10574 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10575 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10576 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10577 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10578 __put_user(stfs.f_files, &target_stfs->f_files);
10579 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10580 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10581 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10582 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10583 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10584 #ifdef _STATFS_F_FLAGS
10585 __put_user(stfs.f_flags, &target_stfs->f_flags);
10586 #else
10587 __put_user(0, &target_stfs->f_flags);
10588 #endif
10589 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10590 unlock_user_struct(target_stfs, arg3, 1);
10591 }
10592 return ret;
10593 case TARGET_NR_fstatfs64:
10594 ret = get_errno(fstatfs(arg1, &stfs));
10595 goto convert_statfs64;
10596 #endif
10597 #ifdef TARGET_NR_socketcall
10598 case TARGET_NR_socketcall:
10599 return do_socketcall(arg1, arg2);
10600 #endif
10601 #ifdef TARGET_NR_accept
10602 case TARGET_NR_accept:
10603 return do_accept4(arg1, arg2, arg3, 0);
10604 #endif
10605 #ifdef TARGET_NR_accept4
10606 case TARGET_NR_accept4:
10607 return do_accept4(arg1, arg2, arg3, arg4);
10608 #endif
10609 #ifdef TARGET_NR_bind
10610 case TARGET_NR_bind:
10611 return do_bind(arg1, arg2, arg3);
10612 #endif
10613 #ifdef TARGET_NR_connect
10614 case TARGET_NR_connect:
10615 return do_connect(arg1, arg2, arg3);
10616 #endif
10617 #ifdef TARGET_NR_getpeername
10618 case TARGET_NR_getpeername:
10619 return do_getpeername(arg1, arg2, arg3);
10620 #endif
10621 #ifdef TARGET_NR_getsockname
10622 case TARGET_NR_getsockname:
10623 return do_getsockname(arg1, arg2, arg3);
10624 #endif
10625 #ifdef TARGET_NR_getsockopt
10626 case TARGET_NR_getsockopt:
10627 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10628 #endif
10629 #ifdef TARGET_NR_listen
10630 case TARGET_NR_listen:
10631 return get_errno(listen(arg1, arg2));
10632 #endif
10633 #ifdef TARGET_NR_recv
10634 case TARGET_NR_recv:
10635 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10636 #endif
10637 #ifdef TARGET_NR_recvfrom
10638 case TARGET_NR_recvfrom:
10639 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10640 #endif
10641 #ifdef TARGET_NR_recvmsg
10642 case TARGET_NR_recvmsg:
10643 return do_sendrecvmsg(arg1, arg2, arg3, 0);
10644 #endif
10645 #ifdef TARGET_NR_send
10646 case TARGET_NR_send:
10647 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10648 #endif
10649 #ifdef TARGET_NR_sendmsg
10650 case TARGET_NR_sendmsg:
10651 return do_sendrecvmsg(arg1, arg2, arg3, 1);
10652 #endif
10653 #ifdef TARGET_NR_sendmmsg
10654 case TARGET_NR_sendmmsg:
10655 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10656 #endif
10657 #ifdef TARGET_NR_recvmmsg
10658 case TARGET_NR_recvmmsg:
10659 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10660 #endif
10661 #ifdef TARGET_NR_sendto
10662 case TARGET_NR_sendto:
10663 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10664 #endif
10665 #ifdef TARGET_NR_shutdown
10666 case TARGET_NR_shutdown:
10667 return get_errno(shutdown(arg1, arg2));
10668 #endif
10669 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10670 case TARGET_NR_getrandom:
10671 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10672 if (!p) {
10673 return -TARGET_EFAULT;
10674 }
10675 ret = get_errno(getrandom(p, arg2, arg3));
10676 unlock_user(p, arg1, ret);
10677 return ret;
10678 #endif
10679 #ifdef TARGET_NR_socket
10680 case TARGET_NR_socket:
10681 return do_socket(arg1, arg2, arg3);
10682 #endif
10683 #ifdef TARGET_NR_socketpair
10684 case TARGET_NR_socketpair:
10685 return do_socketpair(arg1, arg2, arg3, arg4);
10686 #endif
10687 #ifdef TARGET_NR_setsockopt
10688 case TARGET_NR_setsockopt:
10689 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10690 #endif
10691 #if defined(TARGET_NR_syslog)
10692 case TARGET_NR_syslog:
10693 {
10694 int len = arg2;
10695
10696 switch (arg1) {
10697 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
10698 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
10699 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
10700 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
10701 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
10702 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10703 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
10704 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
10705 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10706 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
10707 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
10708 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
10709 {
10710 if (len < 0) {
10711 return -TARGET_EINVAL;
10712 }
10713 if (len == 0) {
10714 return 0;
10715 }
10716 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10717 if (!p) {
10718 return -TARGET_EFAULT;
10719 }
10720 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10721 unlock_user(p, arg2, arg3);
10722 }
10723 return ret;
10724 default:
10725 return -TARGET_EINVAL;
10726 }
10727 }
10728 break;
10729 #endif
10730 case TARGET_NR_setitimer:
10731 {
10732 struct itimerval value, ovalue, *pvalue;
10733
10734 if (arg2) {
10735 pvalue = &value;
10736 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10737 || copy_from_user_timeval(&pvalue->it_value,
10738 arg2 + sizeof(struct target_timeval)))
10739 return -TARGET_EFAULT;
10740 } else {
10741 pvalue = NULL;
10742 }
10743 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10744 if (!is_error(ret) && arg3) {
10745 if (copy_to_user_timeval(arg3,
10746 &ovalue.it_interval)
10747 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10748 &ovalue.it_value))
10749 return -TARGET_EFAULT;
10750 }
10751 }
10752 return ret;
10753 case TARGET_NR_getitimer:
10754 {
10755 struct itimerval value;
10756
10757 ret = get_errno(getitimer(arg1, &value));
10758 if (!is_error(ret) && arg2) {
10759 if (copy_to_user_timeval(arg2,
10760 &value.it_interval)
10761 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10762 &value.it_value))
10763 return -TARGET_EFAULT;
10764 }
10765 }
10766 return ret;
10767 #ifdef TARGET_NR_stat
10768 case TARGET_NR_stat:
10769 if (!(p = lock_user_string(arg1))) {
10770 return -TARGET_EFAULT;
10771 }
10772 ret = get_errno(stat(path(p), &st));
10773 unlock_user(p, arg1, 0);
10774 goto do_stat;
10775 #endif
10776 #ifdef TARGET_NR_lstat
10777 case TARGET_NR_lstat:
10778 if (!(p = lock_user_string(arg1))) {
10779 return -TARGET_EFAULT;
10780 }
10781 ret = get_errno(lstat(path(p), &st));
10782 unlock_user(p, arg1, 0);
10783 goto do_stat;
10784 #endif
10785 #ifdef TARGET_NR_fstat
10786 case TARGET_NR_fstat:
10787 {
10788 ret = get_errno(fstat(arg1, &st));
10789 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10790 do_stat:
10791 #endif
10792 if (!is_error(ret)) {
10793 struct target_stat *target_st;
10794
10795 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10796 return -TARGET_EFAULT;
10797 memset(target_st, 0, sizeof(*target_st));
10798 __put_user(st.st_dev, &target_st->st_dev);
10799 __put_user(st.st_ino, &target_st->st_ino);
10800 __put_user(st.st_mode, &target_st->st_mode);
10801 __put_user(st.st_uid, &target_st->st_uid);
10802 __put_user(st.st_gid, &target_st->st_gid);
10803 __put_user(st.st_nlink, &target_st->st_nlink);
10804 __put_user(st.st_rdev, &target_st->st_rdev);
10805 __put_user(st.st_size, &target_st->st_size);
10806 __put_user(st.st_blksize, &target_st->st_blksize);
10807 __put_user(st.st_blocks, &target_st->st_blocks);
10808 __put_user(st.st_atime, &target_st->target_st_atime);
10809 __put_user(st.st_mtime, &target_st->target_st_mtime);
10810 __put_user(st.st_ctime, &target_st->target_st_ctime);
10811 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10812 __put_user(st.st_atim.tv_nsec,
10813 &target_st->target_st_atime_nsec);
10814 __put_user(st.st_mtim.tv_nsec,
10815 &target_st->target_st_mtime_nsec);
10816 __put_user(st.st_ctim.tv_nsec,
10817 &target_st->target_st_ctime_nsec);
10818 #endif
10819 unlock_user_struct(target_st, arg2, 1);
10820 }
10821 }
10822 return ret;
10823 #endif
10824 case TARGET_NR_vhangup:
10825 return get_errno(vhangup());
10826 #ifdef TARGET_NR_syscall
10827 case TARGET_NR_syscall:
10828 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10829 arg6, arg7, arg8, 0);
10830 #endif
10831 #if defined(TARGET_NR_wait4)
10832 case TARGET_NR_wait4:
10833 {
10834 int status;
10835 abi_long status_ptr = arg2;
10836 struct rusage rusage, *rusage_ptr;
10837 abi_ulong target_rusage = arg4;
10838 abi_long rusage_err;
10839 if (target_rusage)
10840 rusage_ptr = &rusage;
10841 else
10842 rusage_ptr = NULL;
10843 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10844 if (!is_error(ret)) {
10845 if (status_ptr && ret) {
10846 status = host_to_target_waitstatus(status);
10847 if (put_user_s32(status, status_ptr))
10848 return -TARGET_EFAULT;
10849 }
10850 if (target_rusage) {
10851 rusage_err = host_to_target_rusage(target_rusage, &rusage);
10852 if (rusage_err) {
10853 ret = rusage_err;
10854 }
10855 }
10856 }
10857 }
10858 return ret;
10859 #endif
10860 #ifdef TARGET_NR_swapoff
10861 case TARGET_NR_swapoff:
10862 if (!(p = lock_user_string(arg1)))
10863 return -TARGET_EFAULT;
10864 ret = get_errno(swapoff(p));
10865 unlock_user(p, arg1, 0);
10866 return ret;
10867 #endif
10868 case TARGET_NR_sysinfo:
10869 {
10870 struct target_sysinfo *target_value;
10871 struct sysinfo value;
10872 ret = get_errno(sysinfo(&value));
10873 if (!is_error(ret) && arg1)
10874 {
10875 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10876 return -TARGET_EFAULT;
10877 __put_user(value.uptime, &target_value->uptime);
10878 __put_user(value.loads[0], &target_value->loads[0]);
10879 __put_user(value.loads[1], &target_value->loads[1]);
10880 __put_user(value.loads[2], &target_value->loads[2]);
10881 __put_user(value.totalram, &target_value->totalram);
10882 __put_user(value.freeram, &target_value->freeram);
10883 __put_user(value.sharedram, &target_value->sharedram);
10884 __put_user(value.bufferram, &target_value->bufferram);
10885 __put_user(value.totalswap, &target_value->totalswap);
10886 __put_user(value.freeswap, &target_value->freeswap);
10887 __put_user(value.procs, &target_value->procs);
10888 __put_user(value.totalhigh, &target_value->totalhigh);
10889 __put_user(value.freehigh, &target_value->freehigh);
10890 __put_user(value.mem_unit, &target_value->mem_unit);
10891 unlock_user_struct(target_value, arg1, 1);
10892 }
10893 }
10894 return ret;
10895 #ifdef TARGET_NR_ipc
10896 case TARGET_NR_ipc:
10897 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10898 #endif
10899 #ifdef TARGET_NR_semget
10900 case TARGET_NR_semget:
10901 return get_errno(semget(arg1, arg2, arg3));
10902 #endif
10903 #ifdef TARGET_NR_semop
10904 case TARGET_NR_semop:
10905 return do_semtimedop(arg1, arg2, arg3, 0, false);
10906 #endif
10907 #ifdef TARGET_NR_semtimedop
10908 case TARGET_NR_semtimedop:
10909 return do_semtimedop(arg1, arg2, arg3, arg4, false);
10910 #endif
10911 #ifdef TARGET_NR_semtimedop_time64
10912 case TARGET_NR_semtimedop_time64:
10913 return do_semtimedop(arg1, arg2, arg3, arg4, true);
10914 #endif
10915 #ifdef TARGET_NR_semctl
10916 case TARGET_NR_semctl:
10917 return do_semctl(arg1, arg2, arg3, arg4);
10918 #endif
10919 #ifdef TARGET_NR_msgctl
10920 case TARGET_NR_msgctl:
10921 return do_msgctl(arg1, arg2, arg3);
10922 #endif
10923 #ifdef TARGET_NR_msgget
10924 case TARGET_NR_msgget:
10925 return get_errno(msgget(arg1, arg2));
10926 #endif
10927 #ifdef TARGET_NR_msgrcv
10928 case TARGET_NR_msgrcv:
10929 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10930 #endif
10931 #ifdef TARGET_NR_msgsnd
10932 case TARGET_NR_msgsnd:
10933 return do_msgsnd(arg1, arg2, arg3, arg4);
10934 #endif
10935 #ifdef TARGET_NR_shmget
10936 case TARGET_NR_shmget:
10937 return get_errno(shmget(arg1, arg2, arg3));
10938 #endif
10939 #ifdef TARGET_NR_shmctl
10940 case TARGET_NR_shmctl:
10941 return do_shmctl(arg1, arg2, arg3);
10942 #endif
10943 #ifdef TARGET_NR_shmat
10944 case TARGET_NR_shmat:
10945 return do_shmat(cpu_env, arg1, arg2, arg3);
10946 #endif
10947 #ifdef TARGET_NR_shmdt
10948 case TARGET_NR_shmdt:
10949 return do_shmdt(arg1);
10950 #endif
10951 case TARGET_NR_fsync:
10952 return get_errno(fsync(arg1));
10953 case TARGET_NR_clone:
10954 /* Linux manages to have three different orderings for its
10955 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10956 * match the kernel's CONFIG_CLONE_* settings.
10957 * Microblaze is further special in that it uses a sixth
10958 * implicit argument to clone for the TLS pointer.
10959 */
10960 #if defined(TARGET_MICROBLAZE)
10961 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10962 #elif defined(TARGET_CLONE_BACKWARDS)
10963 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10964 #elif defined(TARGET_CLONE_BACKWARDS2)
10965 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10966 #else
10967 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10968 #endif
10969 return ret;
10970 #ifdef __NR_exit_group
10971 /* new thread calls */
10972 case TARGET_NR_exit_group:
10973 preexit_cleanup(cpu_env, arg1);
10974 return get_errno(exit_group(arg1));
10975 #endif
10976 case TARGET_NR_setdomainname:
10977 if (!(p = lock_user_string(arg1)))
10978 return -TARGET_EFAULT;
10979 ret = get_errno(setdomainname(p, arg2));
10980 unlock_user(p, arg1, 0);
10981 return ret;
10982 case TARGET_NR_uname:
10983 /* no need to transcode because we use the linux syscall */
10984 {
10985 struct new_utsname * buf;
10986
10987 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10988 return -TARGET_EFAULT;
10989 ret = get_errno(sys_uname(buf));
10990 if (!is_error(ret)) {
10991 /* Overwrite the native machine name with whatever is being
10992 emulated. */
10993 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10994 sizeof(buf->machine));
10995 /* Allow the user to override the reported release. */
10996 if (qemu_uname_release && *qemu_uname_release) {
10997 g_strlcpy(buf->release, qemu_uname_release,
10998 sizeof(buf->release));
10999 }
11000 }
11001 unlock_user_struct(buf, arg1, 1);
11002 }
11003 return ret;
11004 #ifdef TARGET_I386
11005 case TARGET_NR_modify_ldt:
11006 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11007 #if !defined(TARGET_X86_64)
11008 case TARGET_NR_vm86:
11009 return do_vm86(cpu_env, arg1, arg2);
11010 #endif
11011 #endif
11012 #if defined(TARGET_NR_adjtimex)
11013 case TARGET_NR_adjtimex:
11014 {
11015 struct timex host_buf;
11016
11017 if (target_to_host_timex(&host_buf, arg1) != 0) {
11018 return -TARGET_EFAULT;
11019 }
11020 ret = get_errno(adjtimex(&host_buf));
11021 if (!is_error(ret)) {
11022 if (host_to_target_timex(arg1, &host_buf) != 0) {
11023 return -TARGET_EFAULT;
11024 }
11025 }
11026 }
11027 return ret;
11028 #endif
11029 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11030 case TARGET_NR_clock_adjtime:
11031 {
11032 struct timex htx, *phtx = &htx;
11033
11034 if (target_to_host_timex(phtx, arg2) != 0) {
11035 return -TARGET_EFAULT;
11036 }
11037 ret = get_errno(clock_adjtime(arg1, phtx));
11038 if (!is_error(ret) && phtx) {
11039 if (host_to_target_timex(arg2, phtx) != 0) {
11040 return -TARGET_EFAULT;
11041 }
11042 }
11043 }
11044 return ret;
11045 #endif
11046 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11047 case TARGET_NR_clock_adjtime64:
11048 {
11049 struct timex htx;
11050
11051 if (target_to_host_timex64(&htx, arg2) != 0) {
11052 return -TARGET_EFAULT;
11053 }
11054 ret = get_errno(clock_adjtime(arg1, &htx));
11055 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11056 return -TARGET_EFAULT;
11057 }
11058 }
11059 return ret;
11060 #endif
11061 case TARGET_NR_getpgid:
11062 return get_errno(getpgid(arg1));
11063 case TARGET_NR_fchdir:
11064 return get_errno(fchdir(arg1));
11065 case TARGET_NR_personality:
11066 return get_errno(personality(arg1));
11067 #ifdef TARGET_NR__llseek /* Not on alpha */
11068 case TARGET_NR__llseek:
11069 {
11070 int64_t res;
11071 #if !defined(__NR_llseek)
11072 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11073 if (res == -1) {
11074 ret = get_errno(res);
11075 } else {
11076 ret = 0;
11077 }
11078 #else
11079 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11080 #endif
11081 if ((ret == 0) && put_user_s64(res, arg4)) {
11082 return -TARGET_EFAULT;
11083 }
11084 }
11085 return ret;
11086 #endif
11087 #ifdef TARGET_NR_getdents
11088 case TARGET_NR_getdents:
11089 return do_getdents(arg1, arg2, arg3);
11090 #endif /* TARGET_NR_getdents */
11091 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11092 case TARGET_NR_getdents64:
11093 return do_getdents64(arg1, arg2, arg3);
11094 #endif /* TARGET_NR_getdents64 */
11095 #if defined(TARGET_NR__newselect)
11096 case TARGET_NR__newselect:
11097 return do_select(arg1, arg2, arg3, arg4, arg5);
11098 #endif
11099 #ifdef TARGET_NR_poll
11100 case TARGET_NR_poll:
11101 return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11102 #endif
11103 #ifdef TARGET_NR_ppoll
11104 case TARGET_NR_ppoll:
11105 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11106 #endif
11107 #ifdef TARGET_NR_ppoll_time64
11108 case TARGET_NR_ppoll_time64:
11109 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11110 #endif
11111 case TARGET_NR_flock:
11112 /* NOTE: the flock constant seems to be the same for every
11113 Linux platform */
11114 return get_errno(safe_flock(arg1, arg2));
11115 case TARGET_NR_readv:
11116 {
11117 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11118 if (vec != NULL) {
11119 ret = get_errno(safe_readv(arg1, vec, arg3));
11120 unlock_iovec(vec, arg2, arg3, 1);
11121 } else {
11122 ret = -host_to_target_errno(errno);
11123 }
11124 }
11125 return ret;
11126 case TARGET_NR_writev:
11127 {
11128 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11129 if (vec != NULL) {
11130 ret = get_errno(safe_writev(arg1, vec, arg3));
11131 unlock_iovec(vec, arg2, arg3, 0);
11132 } else {
11133 ret = -host_to_target_errno(errno);
11134 }
11135 }
11136 return ret;
11137 #if defined(TARGET_NR_preadv)
11138 case TARGET_NR_preadv:
11139 {
11140 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11141 if (vec != NULL) {
11142 unsigned long low, high;
11143
11144 target_to_host_low_high(arg4, arg5, &low, &high);
11145 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11146 unlock_iovec(vec, arg2, arg3, 1);
11147 } else {
11148 ret = -host_to_target_errno(errno);
11149 }
11150 }
11151 return ret;
11152 #endif
11153 #if defined(TARGET_NR_pwritev)
11154 case TARGET_NR_pwritev:
11155 {
11156 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11157 if (vec != NULL) {
11158 unsigned long low, high;
11159
11160 target_to_host_low_high(arg4, arg5, &low, &high);
11161 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11162 unlock_iovec(vec, arg2, arg3, 0);
11163 } else {
11164 ret = -host_to_target_errno(errno);
11165 }
11166 }
11167 return ret;
11168 #endif
11169 case TARGET_NR_getsid:
11170 return get_errno(getsid(arg1));
11171 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11172 case TARGET_NR_fdatasync:
11173 return get_errno(fdatasync(arg1));
11174 #endif
11175 case TARGET_NR_sched_getaffinity:
11176 {
11177 unsigned int mask_size;
11178 unsigned long *mask;
11179
11180 /*
11181 * sched_getaffinity needs multiples of ulong, so need to take
11182 * care of mismatches between target ulong and host ulong sizes.
11183 */
11184 if (arg2 & (sizeof(abi_ulong) - 1)) {
11185 return -TARGET_EINVAL;
11186 }
11187 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11188
11189 mask = alloca(mask_size);
11190 memset(mask, 0, mask_size);
11191 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11192
11193 if (!is_error(ret)) {
11194 if (ret > arg2) {
11195 /* More data returned than the caller's buffer will fit.
11196 * This only happens if sizeof(abi_long) < sizeof(long)
11197 * and the caller passed us a buffer holding an odd number
11198 * of abi_longs. If the host kernel is actually using the
11199 * extra 4 bytes then fail EINVAL; otherwise we can just
11200 * ignore them and only copy the interesting part.
11201 */
11202 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11203 if (numcpus > arg2 * 8) {
11204 return -TARGET_EINVAL;
11205 }
11206 ret = arg2;
11207 }
11208
11209 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11210 return -TARGET_EFAULT;
11211 }
11212 }
11213 }
11214 return ret;
11215 case TARGET_NR_sched_setaffinity:
11216 {
11217 unsigned int mask_size;
11218 unsigned long *mask;
11219
11220 /*
11221 * sched_setaffinity needs multiples of ulong, so need to take
11222 * care of mismatches between target ulong and host ulong sizes.
11223 */
11224 if (arg2 & (sizeof(abi_ulong) - 1)) {
11225 return -TARGET_EINVAL;
11226 }
11227 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11228 mask = alloca(mask_size);
11229
11230 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11231 if (ret) {
11232 return ret;
11233 }
11234
11235 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11236 }
11237 case TARGET_NR_getcpu:
11238 {
11239 unsigned cpu, node;
11240 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
11241 arg2 ? &node : NULL,
11242 NULL));
11243 if (is_error(ret)) {
11244 return ret;
11245 }
11246 if (arg1 && put_user_u32(cpu, arg1)) {
11247 return -TARGET_EFAULT;
11248 }
11249 if (arg2 && put_user_u32(node, arg2)) {
11250 return -TARGET_EFAULT;
11251 }
11252 }
11253 return ret;
11254 case TARGET_NR_sched_setparam:
11255 {
11256 struct target_sched_param *target_schp;
11257 struct sched_param schp;
11258
11259 if (arg2 == 0) {
11260 return -TARGET_EINVAL;
11261 }
11262 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11263 return -TARGET_EFAULT;
11264 }
11265 schp.sched_priority = tswap32(target_schp->sched_priority);
11266 unlock_user_struct(target_schp, arg2, 0);
11267 return get_errno(sys_sched_setparam(arg1, &schp));
11268 }
11269 case TARGET_NR_sched_getparam:
11270 {
11271 struct target_sched_param *target_schp;
11272 struct sched_param schp;
11273
11274 if (arg2 == 0) {
11275 return -TARGET_EINVAL;
11276 }
11277 ret = get_errno(sys_sched_getparam(arg1, &schp));
11278 if (!is_error(ret)) {
11279 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11280 return -TARGET_EFAULT;
11281 }
11282 target_schp->sched_priority = tswap32(schp.sched_priority);
11283 unlock_user_struct(target_schp, arg2, 1);
11284 }
11285 }
11286 return ret;
11287 case TARGET_NR_sched_setscheduler:
11288 {
11289 struct target_sched_param *target_schp;
11290 struct sched_param schp;
11291 if (arg3 == 0) {
11292 return -TARGET_EINVAL;
11293 }
11294 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11295 return -TARGET_EFAULT;
11296 }
11297 schp.sched_priority = tswap32(target_schp->sched_priority);
11298 unlock_user_struct(target_schp, arg3, 0);
11299 return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11300 }
11301 case TARGET_NR_sched_getscheduler:
11302 return get_errno(sys_sched_getscheduler(arg1));
11303 case TARGET_NR_sched_getattr:
11304 {
11305 struct target_sched_attr *target_scha;
11306 struct sched_attr scha;
11307 if (arg2 == 0) {
11308 return -TARGET_EINVAL;
11309 }
11310 if (arg3 > sizeof(scha)) {
11311 arg3 = sizeof(scha);
11312 }
11313 ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11314 if (!is_error(ret)) {
11315 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11316 if (!target_scha) {
11317 return -TARGET_EFAULT;
11318 }
11319 target_scha->size = tswap32(scha.size);
11320 target_scha->sched_policy = tswap32(scha.sched_policy);
11321 target_scha->sched_flags = tswap64(scha.sched_flags);
11322 target_scha->sched_nice = tswap32(scha.sched_nice);
11323 target_scha->sched_priority = tswap32(scha.sched_priority);
11324 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11325 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11326 target_scha->sched_period = tswap64(scha.sched_period);
11327 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11328 target_scha->sched_util_min = tswap32(scha.sched_util_min);
11329 target_scha->sched_util_max = tswap32(scha.sched_util_max);
11330 }
11331 unlock_user(target_scha, arg2, arg3);
11332 }
11333 return ret;
11334 }
11335 case TARGET_NR_sched_setattr:
11336 {
11337 struct target_sched_attr *target_scha;
11338 struct sched_attr scha;
11339 uint32_t size;
11340 int zeroed;
11341 if (arg2 == 0) {
11342 return -TARGET_EINVAL;
11343 }
11344 if (get_user_u32(size, arg2)) {
11345 return -TARGET_EFAULT;
11346 }
11347 if (!size) {
11348 size = offsetof(struct target_sched_attr, sched_util_min);
11349 }
11350 if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11351 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11352 return -TARGET_EFAULT;
11353 }
11354 return -TARGET_E2BIG;
11355 }
11356
11357 zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11358 if (zeroed < 0) {
11359 return zeroed;
11360 } else if (zeroed == 0) {
11361 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11362 return -TARGET_EFAULT;
11363 }
11364 return -TARGET_E2BIG;
11365 }
11366 if (size > sizeof(struct target_sched_attr)) {
11367 size = sizeof(struct target_sched_attr);
11368 }
11369
11370 target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11371 if (!target_scha) {
11372 return -TARGET_EFAULT;
11373 }
11374 scha.size = size;
11375 scha.sched_policy = tswap32(target_scha->sched_policy);
11376 scha.sched_flags = tswap64(target_scha->sched_flags);
11377 scha.sched_nice = tswap32(target_scha->sched_nice);
11378 scha.sched_priority = tswap32(target_scha->sched_priority);
11379 scha.sched_runtime = tswap64(target_scha->sched_runtime);
11380 scha.sched_deadline = tswap64(target_scha->sched_deadline);
11381 scha.sched_period = tswap64(target_scha->sched_period);
11382 if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11383 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11384 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11385 }
11386 unlock_user(target_scha, arg2, 0);
11387 return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11388 }
11389 case TARGET_NR_sched_yield:
11390 return get_errno(sched_yield());
11391 case TARGET_NR_sched_get_priority_max:
11392 return get_errno(sched_get_priority_max(arg1));
11393 case TARGET_NR_sched_get_priority_min:
11394 return get_errno(sched_get_priority_min(arg1));
11395 #ifdef TARGET_NR_sched_rr_get_interval
11396 case TARGET_NR_sched_rr_get_interval:
11397 {
11398 struct timespec ts;
11399 ret = get_errno(sched_rr_get_interval(arg1, &ts));
11400 if (!is_error(ret)) {
11401 ret = host_to_target_timespec(arg2, &ts);
11402 }
11403 }
11404 return ret;
11405 #endif
11406 #ifdef TARGET_NR_sched_rr_get_interval_time64
11407 case TARGET_NR_sched_rr_get_interval_time64:
11408 {
11409 struct timespec ts;
11410 ret = get_errno(sched_rr_get_interval(arg1, &ts));
11411 if (!is_error(ret)) {
11412 ret = host_to_target_timespec64(arg2, &ts);
11413 }
11414 }
11415 return ret;
11416 #endif
11417 #if defined(TARGET_NR_nanosleep)
11418 case TARGET_NR_nanosleep:
11419 {
11420 struct timespec req, rem;
11421 target_to_host_timespec(&req, arg1);
11422 ret = get_errno(safe_nanosleep(&req, &rem));
11423 if (is_error(ret) && arg2) {
11424 host_to_target_timespec(arg2, &rem);
11425 }
11426 }
11427 return ret;
11428 #endif
11429 case TARGET_NR_prctl:
11430 return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11431 break;
11432 #ifdef TARGET_NR_arch_prctl
11433 case TARGET_NR_arch_prctl:
11434 return do_arch_prctl(cpu_env, arg1, arg2);
11435 #endif
11436 #ifdef TARGET_NR_pread64
11437 case TARGET_NR_pread64:
11438 if (regpairs_aligned(cpu_env, num)) {
11439 arg4 = arg5;
11440 arg5 = arg6;
11441 }
11442 if (arg2 == 0 && arg3 == 0) {
11443 /* Special-case NULL buffer and zero length, which should succeed */
11444 p = 0;
11445 } else {
11446 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11447 if (!p) {
11448 return -TARGET_EFAULT;
11449 }
11450 }
11451 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11452 unlock_user(p, arg2, ret);
11453 return ret;
11454 case TARGET_NR_pwrite64:
11455 if (regpairs_aligned(cpu_env, num)) {
11456 arg4 = arg5;
11457 arg5 = arg6;
11458 }
11459 if (arg2 == 0 && arg3 == 0) {
11460 /* Special-case NULL buffer and zero length, which should succeed */
11461 p = 0;
11462 } else {
11463 p = lock_user(VERIFY_READ, arg2, arg3, 1);
11464 if (!p) {
11465 return -TARGET_EFAULT;
11466 }
11467 }
11468 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11469 unlock_user(p, arg2, 0);
11470 return ret;
11471 #endif
11472 case TARGET_NR_getcwd:
11473 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11474 return -TARGET_EFAULT;
11475 ret = get_errno(sys_getcwd1(p, arg2));
11476 unlock_user(p, arg1, ret);
11477 return ret;
11478 case TARGET_NR_capget:
11479 case TARGET_NR_capset:
11480 {
11481 struct target_user_cap_header *target_header;
11482 struct target_user_cap_data *target_data = NULL;
11483 struct __user_cap_header_struct header;
11484 struct __user_cap_data_struct data[2];
11485 struct __user_cap_data_struct *dataptr = NULL;
11486 int i, target_datalen;
11487 int data_items = 1;
11488
11489 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11490 return -TARGET_EFAULT;
11491 }
11492 header.version = tswap32(target_header->version);
11493 header.pid = tswap32(target_header->pid);
11494
11495 if (header.version != _LINUX_CAPABILITY_VERSION) {
11496 /* Version 2 and up takes pointer to two user_data structs */
11497 data_items = 2;
11498 }
11499
11500 target_datalen = sizeof(*target_data) * data_items;
11501
11502 if (arg2) {
11503 if (num == TARGET_NR_capget) {
11504 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11505 } else {
11506 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11507 }
11508 if (!target_data) {
11509 unlock_user_struct(target_header, arg1, 0);
11510 return -TARGET_EFAULT;
11511 }
11512
11513 if (num == TARGET_NR_capset) {
11514 for (i = 0; i < data_items; i++) {
11515 data[i].effective = tswap32(target_data[i].effective);
11516 data[i].permitted = tswap32(target_data[i].permitted);
11517 data[i].inheritable = tswap32(target_data[i].inheritable);
11518 }
11519 }
11520
11521 dataptr = data;
11522 }
11523
11524 if (num == TARGET_NR_capget) {
11525 ret = get_errno(capget(&header, dataptr));
11526 } else {
11527 ret = get_errno(capset(&header, dataptr));
11528 }
11529
11530 /* The kernel always updates version for both capget and capset */
11531 target_header->version = tswap32(header.version);
11532 unlock_user_struct(target_header, arg1, 1);
11533
11534 if (arg2) {
11535 if (num == TARGET_NR_capget) {
11536 for (i = 0; i < data_items; i++) {
11537 target_data[i].effective = tswap32(data[i].effective);
11538 target_data[i].permitted = tswap32(data[i].permitted);
11539 target_data[i].inheritable = tswap32(data[i].inheritable);
11540 }
11541 unlock_user(target_data, arg2, target_datalen);
11542 } else {
11543 unlock_user(target_data, arg2, 0);
11544 }
11545 }
11546 return ret;
11547 }
11548 case TARGET_NR_sigaltstack:
11549 return do_sigaltstack(arg1, arg2, cpu_env);
11550
11551 #ifdef CONFIG_SENDFILE
11552 #ifdef TARGET_NR_sendfile
11553 case TARGET_NR_sendfile:
11554 {
11555 off_t *offp = NULL;
11556 off_t off;
11557 if (arg3) {
11558 ret = get_user_sal(off, arg3);
11559 if (is_error(ret)) {
11560 return ret;
11561 }
11562 offp = &off;
11563 }
11564 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11565 if (!is_error(ret) && arg3) {
11566 abi_long ret2 = put_user_sal(off, arg3);
11567 if (is_error(ret2)) {
11568 ret = ret2;
11569 }
11570 }
11571 return ret;
11572 }
11573 #endif
11574 #ifdef TARGET_NR_sendfile64
11575 case TARGET_NR_sendfile64:
11576 {
11577 off_t *offp = NULL;
11578 off_t off;
11579 if (arg3) {
11580 ret = get_user_s64(off, arg3);
11581 if (is_error(ret)) {
11582 return ret;
11583 }
11584 offp = &off;
11585 }
11586 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11587 if (!is_error(ret) && arg3) {
11588 abi_long ret2 = put_user_s64(off, arg3);
11589 if (is_error(ret2)) {
11590 ret = ret2;
11591 }
11592 }
11593 return ret;
11594 }
11595 #endif
11596 #endif
11597 #ifdef TARGET_NR_vfork
11598 case TARGET_NR_vfork:
11599 return get_errno(do_fork(cpu_env,
11600 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11601 0, 0, 0, 0));
11602 #endif
11603 #ifdef TARGET_NR_ugetrlimit
11604 case TARGET_NR_ugetrlimit:
11605 {
11606 struct rlimit rlim;
11607 int resource = target_to_host_resource(arg1);
11608 ret = get_errno(getrlimit(resource, &rlim));
11609 if (!is_error(ret)) {
11610 struct target_rlimit *target_rlim;
11611 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11612 return -TARGET_EFAULT;
11613 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11614 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11615 unlock_user_struct(target_rlim, arg2, 1);
11616 }
11617 return ret;
11618 }
11619 #endif
11620 #ifdef TARGET_NR_truncate64
11621 case TARGET_NR_truncate64:
11622 if (!(p = lock_user_string(arg1)))
11623 return -TARGET_EFAULT;
11624 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11625 unlock_user(p, arg1, 0);
11626 return ret;
11627 #endif
11628 #ifdef TARGET_NR_ftruncate64
11629 case TARGET_NR_ftruncate64:
11630 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11631 #endif
11632 #ifdef TARGET_NR_stat64
11633 case TARGET_NR_stat64:
11634 if (!(p = lock_user_string(arg1))) {
11635 return -TARGET_EFAULT;
11636 }
11637 ret = get_errno(stat(path(p), &st));
11638 unlock_user(p, arg1, 0);
11639 if (!is_error(ret))
11640 ret = host_to_target_stat64(cpu_env, arg2, &st);
11641 return ret;
11642 #endif
11643 #ifdef TARGET_NR_lstat64
11644 case TARGET_NR_lstat64:
11645 if (!(p = lock_user_string(arg1))) {
11646 return -TARGET_EFAULT;
11647 }
11648 ret = get_errno(lstat(path(p), &st));
11649 unlock_user(p, arg1, 0);
11650 if (!is_error(ret))
11651 ret = host_to_target_stat64(cpu_env, arg2, &st);
11652 return ret;
11653 #endif
11654 #ifdef TARGET_NR_fstat64
11655 case TARGET_NR_fstat64:
11656 ret = get_errno(fstat(arg1, &st));
11657 if (!is_error(ret))
11658 ret = host_to_target_stat64(cpu_env, arg2, &st);
11659 return ret;
11660 #endif
11661 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11662 #ifdef TARGET_NR_fstatat64
11663 case TARGET_NR_fstatat64:
11664 #endif
11665 #ifdef TARGET_NR_newfstatat
11666 case TARGET_NR_newfstatat:
11667 #endif
11668 if (!(p = lock_user_string(arg2))) {
11669 return -TARGET_EFAULT;
11670 }
11671 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11672 unlock_user(p, arg2, 0);
11673 if (!is_error(ret))
11674 ret = host_to_target_stat64(cpu_env, arg3, &st);
11675 return ret;
11676 #endif
11677 #if defined(TARGET_NR_statx)
11678 case TARGET_NR_statx:
11679 {
11680 struct target_statx *target_stx;
11681 int dirfd = arg1;
11682 int flags = arg3;
11683
11684 p = lock_user_string(arg2);
11685 if (p == NULL) {
11686 return -TARGET_EFAULT;
11687 }
11688 #if defined(__NR_statx)
11689 {
11690 /*
11691 * It is assumed that struct statx is architecture independent.
11692 */
11693 struct target_statx host_stx;
11694 int mask = arg4;
11695
11696 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11697 if (!is_error(ret)) {
11698 if (host_to_target_statx(&host_stx, arg5) != 0) {
11699 unlock_user(p, arg2, 0);
11700 return -TARGET_EFAULT;
11701 }
11702 }
11703
11704 if (ret != -TARGET_ENOSYS) {
11705 unlock_user(p, arg2, 0);
11706 return ret;
11707 }
11708 }
11709 #endif
11710 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11711 unlock_user(p, arg2, 0);
11712
11713 if (!is_error(ret)) {
11714 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11715 return -TARGET_EFAULT;
11716 }
11717 memset(target_stx, 0, sizeof(*target_stx));
11718 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11719 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11720 __put_user(st.st_ino, &target_stx->stx_ino);
11721 __put_user(st.st_mode, &target_stx->stx_mode);
11722 __put_user(st.st_uid, &target_stx->stx_uid);
11723 __put_user(st.st_gid, &target_stx->stx_gid);
11724 __put_user(st.st_nlink, &target_stx->stx_nlink);
11725 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11726 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11727 __put_user(st.st_size, &target_stx->stx_size);
11728 __put_user(st.st_blksize, &target_stx->stx_blksize);
11729 __put_user(st.st_blocks, &target_stx->stx_blocks);
11730 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11731 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11732 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11733 unlock_user_struct(target_stx, arg5, 1);
11734 }
11735 }
11736 return ret;
11737 #endif
11738 #ifdef TARGET_NR_lchown
11739 case TARGET_NR_lchown:
11740 if (!(p = lock_user_string(arg1)))
11741 return -TARGET_EFAULT;
11742 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11743 unlock_user(p, arg1, 0);
11744 return ret;
11745 #endif
11746 #ifdef TARGET_NR_getuid
11747 case TARGET_NR_getuid:
11748 return get_errno(high2lowuid(getuid()));
11749 #endif
11750 #ifdef TARGET_NR_getgid
11751 case TARGET_NR_getgid:
11752 return get_errno(high2lowgid(getgid()));
11753 #endif
11754 #ifdef TARGET_NR_geteuid
11755 case TARGET_NR_geteuid:
11756 return get_errno(high2lowuid(geteuid()));
11757 #endif
11758 #ifdef TARGET_NR_getegid
11759 case TARGET_NR_getegid:
11760 return get_errno(high2lowgid(getegid()));
11761 #endif
11762 case TARGET_NR_setreuid:
11763 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11764 case TARGET_NR_setregid:
11765 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11766 case TARGET_NR_getgroups:
11767 { /* the same code as for TARGET_NR_getgroups32 */
11768 int gidsetsize = arg1;
11769 target_id *target_grouplist;
11770 g_autofree gid_t *grouplist = NULL;
11771 int i;
11772
11773 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11774 return -TARGET_EINVAL;
11775 }
11776 if (gidsetsize > 0) {
11777 grouplist = g_try_new(gid_t, gidsetsize);
11778 if (!grouplist) {
11779 return -TARGET_ENOMEM;
11780 }
11781 }
11782 ret = get_errno(getgroups(gidsetsize, grouplist));
11783 if (!is_error(ret) && gidsetsize > 0) {
11784 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11785 gidsetsize * sizeof(target_id), 0);
11786 if (!target_grouplist) {
11787 return -TARGET_EFAULT;
11788 }
11789 for (i = 0; i < ret; i++) {
11790 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11791 }
11792 unlock_user(target_grouplist, arg2,
11793 gidsetsize * sizeof(target_id));
11794 }
11795 return ret;
11796 }
11797 case TARGET_NR_setgroups:
11798 { /* the same code as for TARGET_NR_setgroups32 */
11799 int gidsetsize = arg1;
11800 target_id *target_grouplist;
11801 g_autofree gid_t *grouplist = NULL;
11802 int i;
11803
11804 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11805 return -TARGET_EINVAL;
11806 }
11807 if (gidsetsize > 0) {
11808 grouplist = g_try_new(gid_t, gidsetsize);
11809 if (!grouplist) {
11810 return -TARGET_ENOMEM;
11811 }
11812 target_grouplist = lock_user(VERIFY_READ, arg2,
11813 gidsetsize * sizeof(target_id), 1);
11814 if (!target_grouplist) {
11815 return -TARGET_EFAULT;
11816 }
11817 for (i = 0; i < gidsetsize; i++) {
11818 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11819 }
11820 unlock_user(target_grouplist, arg2,
11821 gidsetsize * sizeof(target_id));
11822 }
11823 return get_errno(setgroups(gidsetsize, grouplist));
11824 }
11825 case TARGET_NR_fchown:
11826 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11827 #if defined(TARGET_NR_fchownat)
11828 case TARGET_NR_fchownat:
11829 if (!(p = lock_user_string(arg2)))
11830 return -TARGET_EFAULT;
11831 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11832 low2highgid(arg4), arg5));
11833 unlock_user(p, arg2, 0);
11834 return ret;
11835 #endif
11836 #ifdef TARGET_NR_setresuid
11837 case TARGET_NR_setresuid:
11838 return get_errno(sys_setresuid(low2highuid(arg1),
11839 low2highuid(arg2),
11840 low2highuid(arg3)));
11841 #endif
11842 #ifdef TARGET_NR_getresuid
11843 case TARGET_NR_getresuid:
11844 {
11845 uid_t ruid, euid, suid;
11846 ret = get_errno(getresuid(&ruid, &euid, &suid));
11847 if (!is_error(ret)) {
11848 if (put_user_id(high2lowuid(ruid), arg1)
11849 || put_user_id(high2lowuid(euid), arg2)
11850 || put_user_id(high2lowuid(suid), arg3))
11851 return -TARGET_EFAULT;
11852 }
11853 }
11854 return ret;
11855 #endif
11856 #ifdef TARGET_NR_getresgid
11857 case TARGET_NR_setresgid:
11858 return get_errno(sys_setresgid(low2highgid(arg1),
11859 low2highgid(arg2),
11860 low2highgid(arg3)));
11861 #endif
11862 #ifdef TARGET_NR_getresgid
11863 case TARGET_NR_getresgid:
11864 {
11865 gid_t rgid, egid, sgid;
11866 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11867 if (!is_error(ret)) {
11868 if (put_user_id(high2lowgid(rgid), arg1)
11869 || put_user_id(high2lowgid(egid), arg2)
11870 || put_user_id(high2lowgid(sgid), arg3))
11871 return -TARGET_EFAULT;
11872 }
11873 }
11874 return ret;
11875 #endif
11876 #ifdef TARGET_NR_chown
11877 case TARGET_NR_chown:
11878 if (!(p = lock_user_string(arg1)))
11879 return -TARGET_EFAULT;
11880 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11881 unlock_user(p, arg1, 0);
11882 return ret;
11883 #endif
11884 case TARGET_NR_setuid:
11885 return get_errno(sys_setuid(low2highuid(arg1)));
11886 case TARGET_NR_setgid:
11887 return get_errno(sys_setgid(low2highgid(arg1)));
11888 case TARGET_NR_setfsuid:
11889 return get_errno(setfsuid(arg1));
11890 case TARGET_NR_setfsgid:
11891 return get_errno(setfsgid(arg1));
11892
11893 #ifdef TARGET_NR_lchown32
11894 case TARGET_NR_lchown32:
11895 if (!(p = lock_user_string(arg1)))
11896 return -TARGET_EFAULT;
11897 ret = get_errno(lchown(p, arg2, arg3));
11898 unlock_user(p, arg1, 0);
11899 return ret;
11900 #endif
11901 #ifdef TARGET_NR_getuid32
11902 case TARGET_NR_getuid32:
11903 return get_errno(getuid());
11904 #endif
11905
11906 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11907 /* Alpha specific */
11908 case TARGET_NR_getxuid:
11909 {
11910 uid_t euid;
11911 euid=geteuid();
11912 cpu_env->ir[IR_A4]=euid;
11913 }
11914 return get_errno(getuid());
11915 #endif
11916 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11917 /* Alpha specific */
11918 case TARGET_NR_getxgid:
11919 {
11920 uid_t egid;
11921 egid=getegid();
11922 cpu_env->ir[IR_A4]=egid;
11923 }
11924 return get_errno(getgid());
11925 #endif
11926 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11927 /* Alpha specific */
11928 case TARGET_NR_osf_getsysinfo:
11929 ret = -TARGET_EOPNOTSUPP;
11930 switch (arg1) {
11931 case TARGET_GSI_IEEE_FP_CONTROL:
11932 {
11933 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11934 uint64_t swcr = cpu_env->swcr;
11935
11936 swcr &= ~SWCR_STATUS_MASK;
11937 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11938
11939 if (put_user_u64 (swcr, arg2))
11940 return -TARGET_EFAULT;
11941 ret = 0;
11942 }
11943 break;
11944
11945 /* case GSI_IEEE_STATE_AT_SIGNAL:
11946 -- Not implemented in linux kernel.
11947 case GSI_UACPROC:
11948 -- Retrieves current unaligned access state; not much used.
11949 case GSI_PROC_TYPE:
11950 -- Retrieves implver information; surely not used.
11951 case GSI_GET_HWRPB:
11952 -- Grabs a copy of the HWRPB; surely not used.
11953 */
11954 }
11955 return ret;
11956 #endif
11957 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11958 /* Alpha specific */
11959 case TARGET_NR_osf_setsysinfo:
11960 ret = -TARGET_EOPNOTSUPP;
11961 switch (arg1) {
11962 case TARGET_SSI_IEEE_FP_CONTROL:
11963 {
11964 uint64_t swcr, fpcr;
11965
11966 if (get_user_u64 (swcr, arg2)) {
11967 return -TARGET_EFAULT;
11968 }
11969
11970 /*
11971 * The kernel calls swcr_update_status to update the
11972 * status bits from the fpcr at every point that it
11973 * could be queried. Therefore, we store the status
11974 * bits only in FPCR.
11975 */
11976 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11977
11978 fpcr = cpu_alpha_load_fpcr(cpu_env);
11979 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11980 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11981 cpu_alpha_store_fpcr(cpu_env, fpcr);
11982 ret = 0;
11983 }
11984 break;
11985
11986 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11987 {
11988 uint64_t exc, fpcr, fex;
11989
11990 if (get_user_u64(exc, arg2)) {
11991 return -TARGET_EFAULT;
11992 }
11993 exc &= SWCR_STATUS_MASK;
11994 fpcr = cpu_alpha_load_fpcr(cpu_env);
11995
11996 /* Old exceptions are not signaled. */
11997 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11998 fex = exc & ~fex;
11999 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12000 fex &= (cpu_env)->swcr;
12001
12002 /* Update the hardware fpcr. */
12003 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12004 cpu_alpha_store_fpcr(cpu_env, fpcr);
12005
12006 if (fex) {
12007 int si_code = TARGET_FPE_FLTUNK;
12008 target_siginfo_t info;
12009
12010 if (fex & SWCR_TRAP_ENABLE_DNO) {
12011 si_code = TARGET_FPE_FLTUND;
12012 }
12013 if (fex & SWCR_TRAP_ENABLE_INE) {
12014 si_code = TARGET_FPE_FLTRES;
12015 }
12016 if (fex & SWCR_TRAP_ENABLE_UNF) {
12017 si_code = TARGET_FPE_FLTUND;
12018 }
12019 if (fex & SWCR_TRAP_ENABLE_OVF) {
12020 si_code = TARGET_FPE_FLTOVF;
12021 }
12022 if (fex & SWCR_TRAP_ENABLE_DZE) {
12023 si_code = TARGET_FPE_FLTDIV;
12024 }
12025 if (fex & SWCR_TRAP_ENABLE_INV) {
12026 si_code = TARGET_FPE_FLTINV;
12027 }
12028
12029 info.si_signo = SIGFPE;
12030 info.si_errno = 0;
12031 info.si_code = si_code;
12032 info._sifields._sigfault._addr = (cpu_env)->pc;
12033 queue_signal(cpu_env, info.si_signo,
12034 QEMU_SI_FAULT, &info);
12035 }
12036 ret = 0;
12037 }
12038 break;
12039
12040 /* case SSI_NVPAIRS:
12041 -- Used with SSIN_UACPROC to enable unaligned accesses.
12042 case SSI_IEEE_STATE_AT_SIGNAL:
12043 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12044 -- Not implemented in linux kernel
12045 */
12046 }
12047 return ret;
12048 #endif
12049 #ifdef TARGET_NR_osf_sigprocmask
12050 /* Alpha specific. */
12051 case TARGET_NR_osf_sigprocmask:
12052 {
12053 abi_ulong mask;
12054 int how;
12055 sigset_t set, oldset;
12056
12057 switch(arg1) {
12058 case TARGET_SIG_BLOCK:
12059 how = SIG_BLOCK;
12060 break;
12061 case TARGET_SIG_UNBLOCK:
12062 how = SIG_UNBLOCK;
12063 break;
12064 case TARGET_SIG_SETMASK:
12065 how = SIG_SETMASK;
12066 break;
12067 default:
12068 return -TARGET_EINVAL;
12069 }
12070 mask = arg2;
12071 target_to_host_old_sigset(&set, &mask);
12072 ret = do_sigprocmask(how, &set, &oldset);
12073 if (!ret) {
12074 host_to_target_old_sigset(&mask, &oldset);
12075 ret = mask;
12076 }
12077 }
12078 return ret;
12079 #endif
12080
12081 #ifdef TARGET_NR_getgid32
12082 case TARGET_NR_getgid32:
12083 return get_errno(getgid());
12084 #endif
12085 #ifdef TARGET_NR_geteuid32
12086 case TARGET_NR_geteuid32:
12087 return get_errno(geteuid());
12088 #endif
12089 #ifdef TARGET_NR_getegid32
12090 case TARGET_NR_getegid32:
12091 return get_errno(getegid());
12092 #endif
12093 #ifdef TARGET_NR_setreuid32
12094 case TARGET_NR_setreuid32:
12095 return get_errno(setreuid(arg1, arg2));
12096 #endif
12097 #ifdef TARGET_NR_setregid32
12098 case TARGET_NR_setregid32:
12099 return get_errno(setregid(arg1, arg2));
12100 #endif
12101 #ifdef TARGET_NR_getgroups32
12102 case TARGET_NR_getgroups32:
12103 { /* the same code as for TARGET_NR_getgroups */
12104 int gidsetsize = arg1;
12105 uint32_t *target_grouplist;
12106 g_autofree gid_t *grouplist = NULL;
12107 int i;
12108
12109 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12110 return -TARGET_EINVAL;
12111 }
12112 if (gidsetsize > 0) {
12113 grouplist = g_try_new(gid_t, gidsetsize);
12114 if (!grouplist) {
12115 return -TARGET_ENOMEM;
12116 }
12117 }
12118 ret = get_errno(getgroups(gidsetsize, grouplist));
12119 if (!is_error(ret) && gidsetsize > 0) {
12120 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12121 gidsetsize * 4, 0);
12122 if (!target_grouplist) {
12123 return -TARGET_EFAULT;
12124 }
12125 for (i = 0; i < ret; i++) {
12126 target_grouplist[i] = tswap32(grouplist[i]);
12127 }
12128 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12129 }
12130 return ret;
12131 }
12132 #endif
12133 #ifdef TARGET_NR_setgroups32
12134 case TARGET_NR_setgroups32:
12135 { /* the same code as for TARGET_NR_setgroups */
12136 int gidsetsize = arg1;
12137 uint32_t *target_grouplist;
12138 g_autofree gid_t *grouplist = NULL;
12139 int i;
12140
12141 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12142 return -TARGET_EINVAL;
12143 }
12144 if (gidsetsize > 0) {
12145 grouplist = g_try_new(gid_t, gidsetsize);
12146 if (!grouplist) {
12147 return -TARGET_ENOMEM;
12148 }
12149 target_grouplist = lock_user(VERIFY_READ, arg2,
12150 gidsetsize * 4, 1);
12151 if (!target_grouplist) {
12152 return -TARGET_EFAULT;
12153 }
12154 for (i = 0; i < gidsetsize; i++) {
12155 grouplist[i] = tswap32(target_grouplist[i]);
12156 }
12157 unlock_user(target_grouplist, arg2, 0);
12158 }
12159 return get_errno(setgroups(gidsetsize, grouplist));
12160 }
12161 #endif
12162 #ifdef TARGET_NR_fchown32
12163 case TARGET_NR_fchown32:
12164 return get_errno(fchown(arg1, arg2, arg3));
12165 #endif
12166 #ifdef TARGET_NR_setresuid32
12167 case TARGET_NR_setresuid32:
12168 return get_errno(sys_setresuid(arg1, arg2, arg3));
12169 #endif
12170 #ifdef TARGET_NR_getresuid32
12171 case TARGET_NR_getresuid32:
12172 {
12173 uid_t ruid, euid, suid;
12174 ret = get_errno(getresuid(&ruid, &euid, &suid));
12175 if (!is_error(ret)) {
12176 if (put_user_u32(ruid, arg1)
12177 || put_user_u32(euid, arg2)
12178 || put_user_u32(suid, arg3))
12179 return -TARGET_EFAULT;
12180 }
12181 }
12182 return ret;
12183 #endif
12184 #ifdef TARGET_NR_setresgid32
12185 case TARGET_NR_setresgid32:
12186 return get_errno(sys_setresgid(arg1, arg2, arg3));
12187 #endif
12188 #ifdef TARGET_NR_getresgid32
12189 case TARGET_NR_getresgid32:
12190 {
12191 gid_t rgid, egid, sgid;
12192 ret = get_errno(getresgid(&rgid, &egid, &sgid));
12193 if (!is_error(ret)) {
12194 if (put_user_u32(rgid, arg1)
12195 || put_user_u32(egid, arg2)
12196 || put_user_u32(sgid, arg3))
12197 return -TARGET_EFAULT;
12198 }
12199 }
12200 return ret;
12201 #endif
12202 #ifdef TARGET_NR_chown32
12203 case TARGET_NR_chown32:
12204 if (!(p = lock_user_string(arg1)))
12205 return -TARGET_EFAULT;
12206 ret = get_errno(chown(p, arg2, arg3));
12207 unlock_user(p, arg1, 0);
12208 return ret;
12209 #endif
12210 #ifdef TARGET_NR_setuid32
12211 case TARGET_NR_setuid32:
12212 return get_errno(sys_setuid(arg1));
12213 #endif
12214 #ifdef TARGET_NR_setgid32
12215 case TARGET_NR_setgid32:
12216 return get_errno(sys_setgid(arg1));
12217 #endif
12218 #ifdef TARGET_NR_setfsuid32
12219 case TARGET_NR_setfsuid32:
12220 return get_errno(setfsuid(arg1));
12221 #endif
12222 #ifdef TARGET_NR_setfsgid32
12223 case TARGET_NR_setfsgid32:
12224 return get_errno(setfsgid(arg1));
12225 #endif
12226 #ifdef TARGET_NR_mincore
12227 case TARGET_NR_mincore:
12228 {
12229 void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12230 if (!a) {
12231 return -TARGET_ENOMEM;
12232 }
12233 p = lock_user_string(arg3);
12234 if (!p) {
12235 ret = -TARGET_EFAULT;
12236 } else {
12237 ret = get_errno(mincore(a, arg2, p));
12238 unlock_user(p, arg3, ret);
12239 }
12240 unlock_user(a, arg1, 0);
12241 }
12242 return ret;
12243 #endif
12244 #ifdef TARGET_NR_arm_fadvise64_64
12245 case TARGET_NR_arm_fadvise64_64:
12246 /* arm_fadvise64_64 looks like fadvise64_64 but
12247 * with different argument order: fd, advice, offset, len
12248 * rather than the usual fd, offset, len, advice.
12249 * Note that offset and len are both 64-bit so appear as
12250 * pairs of 32-bit registers.
12251 */
12252 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12253 target_offset64(arg5, arg6), arg2);
12254 return -host_to_target_errno(ret);
12255 #endif
12256
12257 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12258
12259 #ifdef TARGET_NR_fadvise64_64
12260 case TARGET_NR_fadvise64_64:
12261 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12262 /* 6 args: fd, advice, offset (high, low), len (high, low) */
12263 ret = arg2;
12264 arg2 = arg3;
12265 arg3 = arg4;
12266 arg4 = arg5;
12267 arg5 = arg6;
12268 arg6 = ret;
12269 #else
12270 /* 6 args: fd, offset (high, low), len (high, low), advice */
12271 if (regpairs_aligned(cpu_env, num)) {
12272 /* offset is in (3,4), len in (5,6) and advice in 7 */
12273 arg2 = arg3;
12274 arg3 = arg4;
12275 arg4 = arg5;
12276 arg5 = arg6;
12277 arg6 = arg7;
12278 }
12279 #endif
12280 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12281 target_offset64(arg4, arg5), arg6);
12282 return -host_to_target_errno(ret);
12283 #endif
12284
12285 #ifdef TARGET_NR_fadvise64
12286 case TARGET_NR_fadvise64:
12287 /* 5 args: fd, offset (high, low), len, advice */
12288 if (regpairs_aligned(cpu_env, num)) {
12289 /* offset is in (3,4), len in 5 and advice in 6 */
12290 arg2 = arg3;
12291 arg3 = arg4;
12292 arg4 = arg5;
12293 arg5 = arg6;
12294 }
12295 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12296 return -host_to_target_errno(ret);
12297 #endif
12298
12299 #else /* not a 32-bit ABI */
12300 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12301 #ifdef TARGET_NR_fadvise64_64
12302 case TARGET_NR_fadvise64_64:
12303 #endif
12304 #ifdef TARGET_NR_fadvise64
12305 case TARGET_NR_fadvise64:
12306 #endif
12307 #ifdef TARGET_S390X
12308 switch (arg4) {
12309 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12310 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12311 case 6: arg4 = POSIX_FADV_DONTNEED; break;
12312 case 7: arg4 = POSIX_FADV_NOREUSE; break;
12313 default: break;
12314 }
12315 #endif
12316 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12317 #endif
12318 #endif /* end of 64-bit ABI fadvise handling */
12319
12320 #ifdef TARGET_NR_madvise
12321 case TARGET_NR_madvise:
12322 return target_madvise(arg1, arg2, arg3);
12323 #endif
12324 #ifdef TARGET_NR_fcntl64
12325 case TARGET_NR_fcntl64:
12326 {
12327 int cmd;
12328 struct flock64 fl;
12329 from_flock64_fn *copyfrom = copy_from_user_flock64;
12330 to_flock64_fn *copyto = copy_to_user_flock64;
12331
12332 #ifdef TARGET_ARM
12333 if (!cpu_env->eabi) {
12334 copyfrom = copy_from_user_oabi_flock64;
12335 copyto = copy_to_user_oabi_flock64;
12336 }
12337 #endif
12338
12339 cmd = target_to_host_fcntl_cmd(arg2);
12340 if (cmd == -TARGET_EINVAL) {
12341 return cmd;
12342 }
12343
12344 switch(arg2) {
12345 case TARGET_F_GETLK64:
12346 ret = copyfrom(&fl, arg3);
12347 if (ret) {
12348 break;
12349 }
12350 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12351 if (ret == 0) {
12352 ret = copyto(arg3, &fl);
12353 }
12354 break;
12355
12356 case TARGET_F_SETLK64:
12357 case TARGET_F_SETLKW64:
12358 ret = copyfrom(&fl, arg3);
12359 if (ret) {
12360 break;
12361 }
12362 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12363 break;
12364 default:
12365 ret = do_fcntl(arg1, arg2, arg3);
12366 break;
12367 }
12368 return ret;
12369 }
12370 #endif
12371 #ifdef TARGET_NR_cacheflush
12372 case TARGET_NR_cacheflush:
12373 /* self-modifying code is handled automatically, so nothing needed */
12374 return 0;
12375 #endif
12376 #ifdef TARGET_NR_getpagesize
12377 case TARGET_NR_getpagesize:
12378 return TARGET_PAGE_SIZE;
12379 #endif
12380 case TARGET_NR_gettid:
12381 return get_errno(sys_gettid());
12382 #ifdef TARGET_NR_readahead
12383 case TARGET_NR_readahead:
12384 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12385 if (regpairs_aligned(cpu_env, num)) {
12386 arg2 = arg3;
12387 arg3 = arg4;
12388 arg4 = arg5;
12389 }
12390 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12391 #else
12392 ret = get_errno(readahead(arg1, arg2, arg3));
12393 #endif
12394 return ret;
12395 #endif
12396 #ifdef CONFIG_ATTR
12397 #ifdef TARGET_NR_setxattr
12398 case TARGET_NR_listxattr:
12399 case TARGET_NR_llistxattr:
12400 {
12401 void *p, *b = 0;
12402 if (arg2) {
12403 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12404 if (!b) {
12405 return -TARGET_EFAULT;
12406 }
12407 }
12408 p = lock_user_string(arg1);
12409 if (p) {
12410 if (num == TARGET_NR_listxattr) {
12411 ret = get_errno(listxattr(p, b, arg3));
12412 } else {
12413 ret = get_errno(llistxattr(p, b, arg3));
12414 }
12415 } else {
12416 ret = -TARGET_EFAULT;
12417 }
12418 unlock_user(p, arg1, 0);
12419 unlock_user(b, arg2, arg3);
12420 return ret;
12421 }
12422 case TARGET_NR_flistxattr:
12423 {
12424 void *b = 0;
12425 if (arg2) {
12426 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12427 if (!b) {
12428 return -TARGET_EFAULT;
12429 }
12430 }
12431 ret = get_errno(flistxattr(arg1, b, arg3));
12432 unlock_user(b, arg2, arg3);
12433 return ret;
12434 }
12435 case TARGET_NR_setxattr:
12436 case TARGET_NR_lsetxattr:
12437 {
12438 void *p, *n, *v = 0;
12439 if (arg3) {
12440 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12441 if (!v) {
12442 return -TARGET_EFAULT;
12443 }
12444 }
12445 p = lock_user_string(arg1);
12446 n = lock_user_string(arg2);
12447 if (p && n) {
12448 if (num == TARGET_NR_setxattr) {
12449 ret = get_errno(setxattr(p, n, v, arg4, arg5));
12450 } else {
12451 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12452 }
12453 } else {
12454 ret = -TARGET_EFAULT;
12455 }
12456 unlock_user(p, arg1, 0);
12457 unlock_user(n, arg2, 0);
12458 unlock_user(v, arg3, 0);
12459 }
12460 return ret;
12461 case TARGET_NR_fsetxattr:
12462 {
12463 void *n, *v = 0;
12464 if (arg3) {
12465 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12466 if (!v) {
12467 return -TARGET_EFAULT;
12468 }
12469 }
12470 n = lock_user_string(arg2);
12471 if (n) {
12472 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12473 } else {
12474 ret = -TARGET_EFAULT;
12475 }
12476 unlock_user(n, arg2, 0);
12477 unlock_user(v, arg3, 0);
12478 }
12479 return ret;
12480 case TARGET_NR_getxattr:
12481 case TARGET_NR_lgetxattr:
12482 {
12483 void *p, *n, *v = 0;
12484 if (arg3) {
12485 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12486 if (!v) {
12487 return -TARGET_EFAULT;
12488 }
12489 }
12490 p = lock_user_string(arg1);
12491 n = lock_user_string(arg2);
12492 if (p && n) {
12493 if (num == TARGET_NR_getxattr) {
12494 ret = get_errno(getxattr(p, n, v, arg4));
12495 } else {
12496 ret = get_errno(lgetxattr(p, n, v, arg4));
12497 }
12498 } else {
12499 ret = -TARGET_EFAULT;
12500 }
12501 unlock_user(p, arg1, 0);
12502 unlock_user(n, arg2, 0);
12503 unlock_user(v, arg3, arg4);
12504 }
12505 return ret;
12506 case TARGET_NR_fgetxattr:
12507 {
12508 void *n, *v = 0;
12509 if (arg3) {
12510 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12511 if (!v) {
12512 return -TARGET_EFAULT;
12513 }
12514 }
12515 n = lock_user_string(arg2);
12516 if (n) {
12517 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12518 } else {
12519 ret = -TARGET_EFAULT;
12520 }
12521 unlock_user(n, arg2, 0);
12522 unlock_user(v, arg3, arg4);
12523 }
12524 return ret;
12525 case TARGET_NR_removexattr:
12526 case TARGET_NR_lremovexattr:
12527 {
12528 void *p, *n;
12529 p = lock_user_string(arg1);
12530 n = lock_user_string(arg2);
12531 if (p && n) {
12532 if (num == TARGET_NR_removexattr) {
12533 ret = get_errno(removexattr(p, n));
12534 } else {
12535 ret = get_errno(lremovexattr(p, n));
12536 }
12537 } else {
12538 ret = -TARGET_EFAULT;
12539 }
12540 unlock_user(p, arg1, 0);
12541 unlock_user(n, arg2, 0);
12542 }
12543 return ret;
12544 case TARGET_NR_fremovexattr:
12545 {
12546 void *n;
12547 n = lock_user_string(arg2);
12548 if (n) {
12549 ret = get_errno(fremovexattr(arg1, n));
12550 } else {
12551 ret = -TARGET_EFAULT;
12552 }
12553 unlock_user(n, arg2, 0);
12554 }
12555 return ret;
12556 #endif
12557 #endif /* CONFIG_ATTR */
12558 #ifdef TARGET_NR_set_thread_area
12559 case TARGET_NR_set_thread_area:
12560 #if defined(TARGET_MIPS)
12561 cpu_env->active_tc.CP0_UserLocal = arg1;
12562 return 0;
12563 #elif defined(TARGET_CRIS)
12564 if (arg1 & 0xff)
12565 ret = -TARGET_EINVAL;
12566 else {
12567 cpu_env->pregs[PR_PID] = arg1;
12568 ret = 0;
12569 }
12570 return ret;
12571 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12572 return do_set_thread_area(cpu_env, arg1);
12573 #elif defined(TARGET_M68K)
12574 {
12575 TaskState *ts = cpu->opaque;
12576 ts->tp_value = arg1;
12577 return 0;
12578 }
12579 #else
12580 return -TARGET_ENOSYS;
12581 #endif
12582 #endif
12583 #ifdef TARGET_NR_get_thread_area
12584 case TARGET_NR_get_thread_area:
12585 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12586 return do_get_thread_area(cpu_env, arg1);
12587 #elif defined(TARGET_M68K)
12588 {
12589 TaskState *ts = cpu->opaque;
12590 return ts->tp_value;
12591 }
12592 #else
12593 return -TARGET_ENOSYS;
12594 #endif
12595 #endif
12596 #ifdef TARGET_NR_getdomainname
12597 case TARGET_NR_getdomainname:
12598 return -TARGET_ENOSYS;
12599 #endif
12600
12601 #ifdef TARGET_NR_clock_settime
12602 case TARGET_NR_clock_settime:
12603 {
12604 struct timespec ts;
12605
12606 ret = target_to_host_timespec(&ts, arg2);
12607 if (!is_error(ret)) {
12608 ret = get_errno(clock_settime(arg1, &ts));
12609 }
12610 return ret;
12611 }
12612 #endif
12613 #ifdef TARGET_NR_clock_settime64
12614 case TARGET_NR_clock_settime64:
12615 {
12616 struct timespec ts;
12617
12618 ret = target_to_host_timespec64(&ts, arg2);
12619 if (!is_error(ret)) {
12620 ret = get_errno(clock_settime(arg1, &ts));
12621 }
12622 return ret;
12623 }
12624 #endif
12625 #ifdef TARGET_NR_clock_gettime
12626 case TARGET_NR_clock_gettime:
12627 {
12628 struct timespec ts;
12629 ret = get_errno(clock_gettime(arg1, &ts));
12630 if (!is_error(ret)) {
12631 ret = host_to_target_timespec(arg2, &ts);
12632 }
12633 return ret;
12634 }
12635 #endif
12636 #ifdef TARGET_NR_clock_gettime64
12637 case TARGET_NR_clock_gettime64:
12638 {
12639 struct timespec ts;
12640 ret = get_errno(clock_gettime(arg1, &ts));
12641 if (!is_error(ret)) {
12642 ret = host_to_target_timespec64(arg2, &ts);
12643 }
12644 return ret;
12645 }
12646 #endif
12647 #ifdef TARGET_NR_clock_getres
12648 case TARGET_NR_clock_getres:
12649 {
12650 struct timespec ts;
12651 ret = get_errno(clock_getres(arg1, &ts));
12652 if (!is_error(ret)) {
12653 host_to_target_timespec(arg2, &ts);
12654 }
12655 return ret;
12656 }
12657 #endif
12658 #ifdef TARGET_NR_clock_getres_time64
12659 case TARGET_NR_clock_getres_time64:
12660 {
12661 struct timespec ts;
12662 ret = get_errno(clock_getres(arg1, &ts));
12663 if (!is_error(ret)) {
12664 host_to_target_timespec64(arg2, &ts);
12665 }
12666 return ret;
12667 }
12668 #endif
12669 #ifdef TARGET_NR_clock_nanosleep
12670 case TARGET_NR_clock_nanosleep:
12671 {
12672 struct timespec ts;
12673 if (target_to_host_timespec(&ts, arg3)) {
12674 return -TARGET_EFAULT;
12675 }
12676 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12677 &ts, arg4 ? &ts : NULL));
12678 /*
12679 * if the call is interrupted by a signal handler, it fails
12680 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12681 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12682 */
12683 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12684 host_to_target_timespec(arg4, &ts)) {
12685 return -TARGET_EFAULT;
12686 }
12687
12688 return ret;
12689 }
12690 #endif
12691 #ifdef TARGET_NR_clock_nanosleep_time64
12692 case TARGET_NR_clock_nanosleep_time64:
12693 {
12694 struct timespec ts;
12695
12696 if (target_to_host_timespec64(&ts, arg3)) {
12697 return -TARGET_EFAULT;
12698 }
12699
12700 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12701 &ts, arg4 ? &ts : NULL));
12702
12703 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12704 host_to_target_timespec64(arg4, &ts)) {
12705 return -TARGET_EFAULT;
12706 }
12707 return ret;
12708 }
12709 #endif
12710
12711 #if defined(TARGET_NR_set_tid_address)
12712 case TARGET_NR_set_tid_address:
12713 {
12714 TaskState *ts = cpu->opaque;
12715 ts->child_tidptr = arg1;
12716 /* do not call host set_tid_address() syscall, instead return tid() */
12717 return get_errno(sys_gettid());
12718 }
12719 #endif
12720
12721 case TARGET_NR_tkill:
12722 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12723
12724 case TARGET_NR_tgkill:
12725 return get_errno(safe_tgkill((int)arg1, (int)arg2,
12726 target_to_host_signal(arg3)));
12727
12728 #ifdef TARGET_NR_set_robust_list
12729 case TARGET_NR_set_robust_list:
12730 case TARGET_NR_get_robust_list:
12731 /* The ABI for supporting robust futexes has userspace pass
12732 * the kernel a pointer to a linked list which is updated by
12733 * userspace after the syscall; the list is walked by the kernel
12734 * when the thread exits. Since the linked list in QEMU guest
12735 * memory isn't a valid linked list for the host and we have
12736 * no way to reliably intercept the thread-death event, we can't
12737 * support these. Silently return ENOSYS so that guest userspace
12738 * falls back to a non-robust futex implementation (which should
12739 * be OK except in the corner case of the guest crashing while
12740 * holding a mutex that is shared with another process via
12741 * shared memory).
12742 */
12743 return -TARGET_ENOSYS;
12744 #endif
12745
12746 #if defined(TARGET_NR_utimensat)
12747 case TARGET_NR_utimensat:
12748 {
12749 struct timespec *tsp, ts[2];
12750 if (!arg3) {
12751 tsp = NULL;
12752 } else {
12753 if (target_to_host_timespec(ts, arg3)) {
12754 return -TARGET_EFAULT;
12755 }
12756 if (target_to_host_timespec(ts + 1, arg3 +
12757 sizeof(struct target_timespec))) {
12758 return -TARGET_EFAULT;
12759 }
12760 tsp = ts;
12761 }
12762 if (!arg2)
12763 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12764 else {
12765 if (!(p = lock_user_string(arg2))) {
12766 return -TARGET_EFAULT;
12767 }
12768 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12769 unlock_user(p, arg2, 0);
12770 }
12771 }
12772 return ret;
12773 #endif
12774 #ifdef TARGET_NR_utimensat_time64
12775 case TARGET_NR_utimensat_time64:
12776 {
12777 struct timespec *tsp, ts[2];
12778 if (!arg3) {
12779 tsp = NULL;
12780 } else {
12781 if (target_to_host_timespec64(ts, arg3)) {
12782 return -TARGET_EFAULT;
12783 }
12784 if (target_to_host_timespec64(ts + 1, arg3 +
12785 sizeof(struct target__kernel_timespec))) {
12786 return -TARGET_EFAULT;
12787 }
12788 tsp = ts;
12789 }
12790 if (!arg2)
12791 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12792 else {
12793 p = lock_user_string(arg2);
12794 if (!p) {
12795 return -TARGET_EFAULT;
12796 }
12797 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12798 unlock_user(p, arg2, 0);
12799 }
12800 }
12801 return ret;
12802 #endif
12803 #ifdef TARGET_NR_futex
12804 case TARGET_NR_futex:
12805 return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12806 #endif
12807 #ifdef TARGET_NR_futex_time64
12808 case TARGET_NR_futex_time64:
12809 return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12810 #endif
12811 #ifdef CONFIG_INOTIFY
12812 #if defined(TARGET_NR_inotify_init)
12813 case TARGET_NR_inotify_init:
12814 ret = get_errno(inotify_init());
12815 if (ret >= 0) {
12816 fd_trans_register(ret, &target_inotify_trans);
12817 }
12818 return ret;
12819 #endif
12820 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12821 case TARGET_NR_inotify_init1:
12822 ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12823 fcntl_flags_tbl)));
12824 if (ret >= 0) {
12825 fd_trans_register(ret, &target_inotify_trans);
12826 }
12827 return ret;
12828 #endif
12829 #if defined(TARGET_NR_inotify_add_watch)
12830 case TARGET_NR_inotify_add_watch:
12831 p = lock_user_string(arg2);
12832 ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12833 unlock_user(p, arg2, 0);
12834 return ret;
12835 #endif
12836 #if defined(TARGET_NR_inotify_rm_watch)
12837 case TARGET_NR_inotify_rm_watch:
12838 return get_errno(inotify_rm_watch(arg1, arg2));
12839 #endif
12840 #endif
12841
12842 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12843 case TARGET_NR_mq_open:
12844 {
12845 struct mq_attr posix_mq_attr;
12846 struct mq_attr *pposix_mq_attr;
12847 int host_flags;
12848
12849 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12850 pposix_mq_attr = NULL;
12851 if (arg4) {
12852 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12853 return -TARGET_EFAULT;
12854 }
12855 pposix_mq_attr = &posix_mq_attr;
12856 }
12857 p = lock_user_string(arg1 - 1);
12858 if (!p) {
12859 return -TARGET_EFAULT;
12860 }
12861 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12862 unlock_user (p, arg1, 0);
12863 }
12864 return ret;
12865
12866 case TARGET_NR_mq_unlink:
12867 p = lock_user_string(arg1 - 1);
12868 if (!p) {
12869 return -TARGET_EFAULT;
12870 }
12871 ret = get_errno(mq_unlink(p));
12872 unlock_user (p, arg1, 0);
12873 return ret;
12874
12875 #ifdef TARGET_NR_mq_timedsend
12876 case TARGET_NR_mq_timedsend:
12877 {
12878 struct timespec ts;
12879
12880 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12881 if (arg5 != 0) {
12882 if (target_to_host_timespec(&ts, arg5)) {
12883 return -TARGET_EFAULT;
12884 }
12885 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12886 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12887 return -TARGET_EFAULT;
12888 }
12889 } else {
12890 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12891 }
12892 unlock_user (p, arg2, arg3);
12893 }
12894 return ret;
12895 #endif
12896 #ifdef TARGET_NR_mq_timedsend_time64
12897 case TARGET_NR_mq_timedsend_time64:
12898 {
12899 struct timespec ts;
12900
12901 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12902 if (arg5 != 0) {
12903 if (target_to_host_timespec64(&ts, arg5)) {
12904 return -TARGET_EFAULT;
12905 }
12906 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12907 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12908 return -TARGET_EFAULT;
12909 }
12910 } else {
12911 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12912 }
12913 unlock_user(p, arg2, arg3);
12914 }
12915 return ret;
12916 #endif
12917
12918 #ifdef TARGET_NR_mq_timedreceive
12919 case TARGET_NR_mq_timedreceive:
12920 {
12921 struct timespec ts;
12922 unsigned int prio;
12923
12924 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12925 if (arg5 != 0) {
12926 if (target_to_host_timespec(&ts, arg5)) {
12927 return -TARGET_EFAULT;
12928 }
12929 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12930 &prio, &ts));
12931 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12932 return -TARGET_EFAULT;
12933 }
12934 } else {
12935 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12936 &prio, NULL));
12937 }
12938 unlock_user (p, arg2, arg3);
12939 if (arg4 != 0)
12940 put_user_u32(prio, arg4);
12941 }
12942 return ret;
12943 #endif
12944 #ifdef TARGET_NR_mq_timedreceive_time64
12945 case TARGET_NR_mq_timedreceive_time64:
12946 {
12947 struct timespec ts;
12948 unsigned int prio;
12949
12950 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12951 if (arg5 != 0) {
12952 if (target_to_host_timespec64(&ts, arg5)) {
12953 return -TARGET_EFAULT;
12954 }
12955 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12956 &prio, &ts));
12957 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12958 return -TARGET_EFAULT;
12959 }
12960 } else {
12961 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12962 &prio, NULL));
12963 }
12964 unlock_user(p, arg2, arg3);
12965 if (arg4 != 0) {
12966 put_user_u32(prio, arg4);
12967 }
12968 }
12969 return ret;
12970 #endif
12971
12972 /* Not implemented for now... */
12973 /* case TARGET_NR_mq_notify: */
12974 /* break; */
12975
12976 case TARGET_NR_mq_getsetattr:
12977 {
12978 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12979 ret = 0;
12980 if (arg2 != 0) {
12981 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12982 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12983 &posix_mq_attr_out));
12984 } else if (arg3 != 0) {
12985 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12986 }
12987 if (ret == 0 && arg3 != 0) {
12988 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12989 }
12990 }
12991 return ret;
12992 #endif
12993
12994 #ifdef CONFIG_SPLICE
12995 #ifdef TARGET_NR_tee
12996 case TARGET_NR_tee:
12997 {
12998 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12999 }
13000 return ret;
13001 #endif
13002 #ifdef TARGET_NR_splice
13003 case TARGET_NR_splice:
13004 {
13005 loff_t loff_in, loff_out;
13006 loff_t *ploff_in = NULL, *ploff_out = NULL;
13007 if (arg2) {
13008 if (get_user_u64(loff_in, arg2)) {
13009 return -TARGET_EFAULT;
13010 }
13011 ploff_in = &loff_in;
13012 }
13013 if (arg4) {
13014 if (get_user_u64(loff_out, arg4)) {
13015 return -TARGET_EFAULT;
13016 }
13017 ploff_out = &loff_out;
13018 }
13019 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13020 if (arg2) {
13021 if (put_user_u64(loff_in, arg2)) {
13022 return -TARGET_EFAULT;
13023 }
13024 }
13025 if (arg4) {
13026 if (put_user_u64(loff_out, arg4)) {
13027 return -TARGET_EFAULT;
13028 }
13029 }
13030 }
13031 return ret;
13032 #endif
13033 #ifdef TARGET_NR_vmsplice
13034 case TARGET_NR_vmsplice:
13035 {
13036 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13037 if (vec != NULL) {
13038 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13039 unlock_iovec(vec, arg2, arg3, 0);
13040 } else {
13041 ret = -host_to_target_errno(errno);
13042 }
13043 }
13044 return ret;
13045 #endif
13046 #endif /* CONFIG_SPLICE */
13047 #ifdef CONFIG_EVENTFD
13048 #if defined(TARGET_NR_eventfd)
13049 case TARGET_NR_eventfd:
13050 ret = get_errno(eventfd(arg1, 0));
13051 if (ret >= 0) {
13052 fd_trans_register(ret, &target_eventfd_trans);
13053 }
13054 return ret;
13055 #endif
13056 #if defined(TARGET_NR_eventfd2)
13057 case TARGET_NR_eventfd2:
13058 {
13059 int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13060 if (arg2 & TARGET_O_NONBLOCK) {
13061 host_flags |= O_NONBLOCK;
13062 }
13063 if (arg2 & TARGET_O_CLOEXEC) {
13064 host_flags |= O_CLOEXEC;
13065 }
13066 ret = get_errno(eventfd(arg1, host_flags));
13067 if (ret >= 0) {
13068 fd_trans_register(ret, &target_eventfd_trans);
13069 }
13070 return ret;
13071 }
13072 #endif
13073 #endif /* CONFIG_EVENTFD */
13074 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13075 case TARGET_NR_fallocate:
13076 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13077 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13078 target_offset64(arg5, arg6)));
13079 #else
13080 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13081 #endif
13082 return ret;
13083 #endif
13084 #if defined(CONFIG_SYNC_FILE_RANGE)
13085 #if defined(TARGET_NR_sync_file_range)
13086 case TARGET_NR_sync_file_range:
13087 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13088 #if defined(TARGET_MIPS)
13089 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13090 target_offset64(arg5, arg6), arg7));
13091 #else
13092 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13093 target_offset64(arg4, arg5), arg6));
13094 #endif /* !TARGET_MIPS */
13095 #else
13096 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13097 #endif
13098 return ret;
13099 #endif
13100 #if defined(TARGET_NR_sync_file_range2) || \
13101 defined(TARGET_NR_arm_sync_file_range)
13102 #if defined(TARGET_NR_sync_file_range2)
13103 case TARGET_NR_sync_file_range2:
13104 #endif
13105 #if defined(TARGET_NR_arm_sync_file_range)
13106 case TARGET_NR_arm_sync_file_range:
13107 #endif
13108 /* This is like sync_file_range but the arguments are reordered */
13109 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13110 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13111 target_offset64(arg5, arg6), arg2));
13112 #else
13113 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13114 #endif
13115 return ret;
13116 #endif
13117 #endif
13118 #if defined(TARGET_NR_signalfd4)
13119 case TARGET_NR_signalfd4:
13120 return do_signalfd4(arg1, arg2, arg4);
13121 #endif
13122 #if defined(TARGET_NR_signalfd)
13123 case TARGET_NR_signalfd:
13124 return do_signalfd4(arg1, arg2, 0);
13125 #endif
13126 #if defined(CONFIG_EPOLL)
13127 #if defined(TARGET_NR_epoll_create)
13128 case TARGET_NR_epoll_create:
13129 return get_errno(epoll_create(arg1));
13130 #endif
13131 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13132 case TARGET_NR_epoll_create1:
13133 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13134 #endif
13135 #if defined(TARGET_NR_epoll_ctl)
13136 case TARGET_NR_epoll_ctl:
13137 {
13138 struct epoll_event ep;
13139 struct epoll_event *epp = 0;
13140 if (arg4) {
13141 if (arg2 != EPOLL_CTL_DEL) {
13142 struct target_epoll_event *target_ep;
13143 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13144 return -TARGET_EFAULT;
13145 }
13146 ep.events = tswap32(target_ep->events);
13147 /*
13148 * The epoll_data_t union is just opaque data to the kernel,
13149 * so we transfer all 64 bits across and need not worry what
13150 * actual data type it is.
13151 */
13152 ep.data.u64 = tswap64(target_ep->data.u64);
13153 unlock_user_struct(target_ep, arg4, 0);
13154 }
13155 /*
13156 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13157 * non-null pointer, even though this argument is ignored.
13158 *
13159 */
13160 epp = &ep;
13161 }
13162 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13163 }
13164 #endif
13165
13166 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13167 #if defined(TARGET_NR_epoll_wait)
13168 case TARGET_NR_epoll_wait:
13169 #endif
13170 #if defined(TARGET_NR_epoll_pwait)
13171 case TARGET_NR_epoll_pwait:
13172 #endif
13173 {
13174 struct target_epoll_event *target_ep;
13175 struct epoll_event *ep;
13176 int epfd = arg1;
13177 int maxevents = arg3;
13178 int timeout = arg4;
13179
13180 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13181 return -TARGET_EINVAL;
13182 }
13183
13184 target_ep = lock_user(VERIFY_WRITE, arg2,
13185 maxevents * sizeof(struct target_epoll_event), 1);
13186 if (!target_ep) {
13187 return -TARGET_EFAULT;
13188 }
13189
13190 ep = g_try_new(struct epoll_event, maxevents);
13191 if (!ep) {
13192 unlock_user(target_ep, arg2, 0);
13193 return -TARGET_ENOMEM;
13194 }
13195
13196 switch (num) {
13197 #if defined(TARGET_NR_epoll_pwait)
13198 case TARGET_NR_epoll_pwait:
13199 {
13200 sigset_t *set = NULL;
13201
13202 if (arg5) {
13203 ret = process_sigsuspend_mask(&set, arg5, arg6);
13204 if (ret != 0) {
13205 break;
13206 }
13207 }
13208
13209 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13210 set, SIGSET_T_SIZE));
13211
13212 if (set) {
13213 finish_sigsuspend_mask(ret);
13214 }
13215 break;
13216 }
13217 #endif
13218 #if defined(TARGET_NR_epoll_wait)
13219 case TARGET_NR_epoll_wait:
13220 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13221 NULL, 0));
13222 break;
13223 #endif
13224 default:
13225 ret = -TARGET_ENOSYS;
13226 }
13227 if (!is_error(ret)) {
13228 int i;
13229 for (i = 0; i < ret; i++) {
13230 target_ep[i].events = tswap32(ep[i].events);
13231 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13232 }
13233 unlock_user(target_ep, arg2,
13234 ret * sizeof(struct target_epoll_event));
13235 } else {
13236 unlock_user(target_ep, arg2, 0);
13237 }
13238 g_free(ep);
13239 return ret;
13240 }
13241 #endif
13242 #endif
13243 #ifdef TARGET_NR_prlimit64
13244 case TARGET_NR_prlimit64:
13245 {
13246 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13247 struct target_rlimit64 *target_rnew, *target_rold;
13248 struct host_rlimit64 rnew, rold, *rnewp = 0;
13249 int resource = target_to_host_resource(arg2);
13250
13251 if (arg3 && (resource != RLIMIT_AS &&
13252 resource != RLIMIT_DATA &&
13253 resource != RLIMIT_STACK)) {
13254 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13255 return -TARGET_EFAULT;
13256 }
13257 __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13258 __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13259 unlock_user_struct(target_rnew, arg3, 0);
13260 rnewp = &rnew;
13261 }
13262
13263 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13264 if (!is_error(ret) && arg4) {
13265 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13266 return -TARGET_EFAULT;
13267 }
13268 __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13269 __put_user(rold.rlim_max, &target_rold->rlim_max);
13270 unlock_user_struct(target_rold, arg4, 1);
13271 }
13272 return ret;
13273 }
13274 #endif
13275 #ifdef TARGET_NR_gethostname
13276 case TARGET_NR_gethostname:
13277 {
13278 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13279 if (name) {
13280 ret = get_errno(gethostname(name, arg2));
13281 unlock_user(name, arg1, arg2);
13282 } else {
13283 ret = -TARGET_EFAULT;
13284 }
13285 return ret;
13286 }
13287 #endif
13288 #ifdef TARGET_NR_atomic_cmpxchg_32
13289 case TARGET_NR_atomic_cmpxchg_32:
13290 {
13291 /* should use start_exclusive from main.c */
13292 abi_ulong mem_value;
13293 if (get_user_u32(mem_value, arg6)) {
13294 target_siginfo_t info;
13295 info.si_signo = SIGSEGV;
13296 info.si_errno = 0;
13297 info.si_code = TARGET_SEGV_MAPERR;
13298 info._sifields._sigfault._addr = arg6;
13299 queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13300 ret = 0xdeadbeef;
13301
13302 }
13303 if (mem_value == arg2)
13304 put_user_u32(arg1, arg6);
13305 return mem_value;
13306 }
13307 #endif
13308 #ifdef TARGET_NR_atomic_barrier
13309 case TARGET_NR_atomic_barrier:
13310 /* Like the kernel implementation and the
13311 qemu arm barrier, no-op this? */
13312 return 0;
13313 #endif
13314
13315 #ifdef TARGET_NR_timer_create
13316 case TARGET_NR_timer_create:
13317 {
13318 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13319
13320 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13321
13322 int clkid = arg1;
13323 int timer_index = next_free_host_timer();
13324
13325 if (timer_index < 0) {
13326 ret = -TARGET_EAGAIN;
13327 } else {
13328 timer_t *phtimer = g_posix_timers + timer_index;
13329
13330 if (arg2) {
13331 phost_sevp = &host_sevp;
13332 ret = target_to_host_sigevent(phost_sevp, arg2);
13333 if (ret != 0) {
13334 free_host_timer_slot(timer_index);
13335 return ret;
13336 }
13337 }
13338
13339 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13340 if (ret) {
13341 free_host_timer_slot(timer_index);
13342 } else {
13343 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13344 timer_delete(*phtimer);
13345 free_host_timer_slot(timer_index);
13346 return -TARGET_EFAULT;
13347 }
13348 }
13349 }
13350 return ret;
13351 }
13352 #endif
13353
13354 #ifdef TARGET_NR_timer_settime
13355 case TARGET_NR_timer_settime:
13356 {
13357 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13358 * struct itimerspec * old_value */
13359 target_timer_t timerid = get_timer_id(arg1);
13360
13361 if (timerid < 0) {
13362 ret = timerid;
13363 } else if (arg3 == 0) {
13364 ret = -TARGET_EINVAL;
13365 } else {
13366 timer_t htimer = g_posix_timers[timerid];
13367 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13368
13369 if (target_to_host_itimerspec(&hspec_new, arg3)) {
13370 return -TARGET_EFAULT;
13371 }
13372 ret = get_errno(
13373 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13374 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13375 return -TARGET_EFAULT;
13376 }
13377 }
13378 return ret;
13379 }
13380 #endif
13381
13382 #ifdef TARGET_NR_timer_settime64
13383 case TARGET_NR_timer_settime64:
13384 {
13385 target_timer_t timerid = get_timer_id(arg1);
13386
13387 if (timerid < 0) {
13388 ret = timerid;
13389 } else if (arg3 == 0) {
13390 ret = -TARGET_EINVAL;
13391 } else {
13392 timer_t htimer = g_posix_timers[timerid];
13393 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13394
13395 if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13396 return -TARGET_EFAULT;
13397 }
13398 ret = get_errno(
13399 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13400 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13401 return -TARGET_EFAULT;
13402 }
13403 }
13404 return ret;
13405 }
13406 #endif
13407
13408 #ifdef TARGET_NR_timer_gettime
13409 case TARGET_NR_timer_gettime:
13410 {
13411 /* args: timer_t timerid, struct itimerspec *curr_value */
13412 target_timer_t timerid = get_timer_id(arg1);
13413
13414 if (timerid < 0) {
13415 ret = timerid;
13416 } else if (!arg2) {
13417 ret = -TARGET_EFAULT;
13418 } else {
13419 timer_t htimer = g_posix_timers[timerid];
13420 struct itimerspec hspec;
13421 ret = get_errno(timer_gettime(htimer, &hspec));
13422
13423 if (host_to_target_itimerspec(arg2, &hspec)) {
13424 ret = -TARGET_EFAULT;
13425 }
13426 }
13427 return ret;
13428 }
13429 #endif
13430
13431 #ifdef TARGET_NR_timer_gettime64
13432 case TARGET_NR_timer_gettime64:
13433 {
13434 /* args: timer_t timerid, struct itimerspec64 *curr_value */
13435 target_timer_t timerid = get_timer_id(arg1);
13436
13437 if (timerid < 0) {
13438 ret = timerid;
13439 } else if (!arg2) {
13440 ret = -TARGET_EFAULT;
13441 } else {
13442 timer_t htimer = g_posix_timers[timerid];
13443 struct itimerspec hspec;
13444 ret = get_errno(timer_gettime(htimer, &hspec));
13445
13446 if (host_to_target_itimerspec64(arg2, &hspec)) {
13447 ret = -TARGET_EFAULT;
13448 }
13449 }
13450 return ret;
13451 }
13452 #endif
13453
13454 #ifdef TARGET_NR_timer_getoverrun
13455 case TARGET_NR_timer_getoverrun:
13456 {
13457 /* args: timer_t timerid */
13458 target_timer_t timerid = get_timer_id(arg1);
13459
13460 if (timerid < 0) {
13461 ret = timerid;
13462 } else {
13463 timer_t htimer = g_posix_timers[timerid];
13464 ret = get_errno(timer_getoverrun(htimer));
13465 }
13466 return ret;
13467 }
13468 #endif
13469
13470 #ifdef TARGET_NR_timer_delete
13471 case TARGET_NR_timer_delete:
13472 {
13473 /* args: timer_t timerid */
13474 target_timer_t timerid = get_timer_id(arg1);
13475
13476 if (timerid < 0) {
13477 ret = timerid;
13478 } else {
13479 timer_t htimer = g_posix_timers[timerid];
13480 ret = get_errno(timer_delete(htimer));
13481 free_host_timer_slot(timerid);
13482 }
13483 return ret;
13484 }
13485 #endif
13486
13487 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13488 case TARGET_NR_timerfd_create:
13489 ret = get_errno(timerfd_create(arg1,
13490 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13491 if (ret >= 0) {
13492 fd_trans_register(ret, &target_timerfd_trans);
13493 }
13494 return ret;
13495 #endif
13496
13497 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13498 case TARGET_NR_timerfd_gettime:
13499 {
13500 struct itimerspec its_curr;
13501
13502 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13503
13504 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13505 return -TARGET_EFAULT;
13506 }
13507 }
13508 return ret;
13509 #endif
13510
13511 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13512 case TARGET_NR_timerfd_gettime64:
13513 {
13514 struct itimerspec its_curr;
13515
13516 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13517
13518 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13519 return -TARGET_EFAULT;
13520 }
13521 }
13522 return ret;
13523 #endif
13524
13525 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13526 case TARGET_NR_timerfd_settime:
13527 {
13528 struct itimerspec its_new, its_old, *p_new;
13529
13530 if (arg3) {
13531 if (target_to_host_itimerspec(&its_new, arg3)) {
13532 return -TARGET_EFAULT;
13533 }
13534 p_new = &its_new;
13535 } else {
13536 p_new = NULL;
13537 }
13538
13539 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13540
13541 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13542 return -TARGET_EFAULT;
13543 }
13544 }
13545 return ret;
13546 #endif
13547
13548 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13549 case TARGET_NR_timerfd_settime64:
13550 {
13551 struct itimerspec its_new, its_old, *p_new;
13552
13553 if (arg3) {
13554 if (target_to_host_itimerspec64(&its_new, arg3)) {
13555 return -TARGET_EFAULT;
13556 }
13557 p_new = &its_new;
13558 } else {
13559 p_new = NULL;
13560 }
13561
13562 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13563
13564 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13565 return -TARGET_EFAULT;
13566 }
13567 }
13568 return ret;
13569 #endif
13570
13571 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13572 case TARGET_NR_ioprio_get:
13573 return get_errno(ioprio_get(arg1, arg2));
13574 #endif
13575
13576 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13577 case TARGET_NR_ioprio_set:
13578 return get_errno(ioprio_set(arg1, arg2, arg3));
13579 #endif
13580
13581 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13582 case TARGET_NR_setns:
13583 return get_errno(setns(arg1, arg2));
13584 #endif
13585 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13586 case TARGET_NR_unshare:
13587 return get_errno(unshare(arg1));
13588 #endif
13589 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13590 case TARGET_NR_kcmp:
13591 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13592 #endif
13593 #ifdef TARGET_NR_swapcontext
13594 case TARGET_NR_swapcontext:
13595 /* PowerPC specific. */
13596 return do_swapcontext(cpu_env, arg1, arg2, arg3);
13597 #endif
13598 #ifdef TARGET_NR_memfd_create
13599 case TARGET_NR_memfd_create:
13600 p = lock_user_string(arg1);
13601 if (!p) {
13602 return -TARGET_EFAULT;
13603 }
13604 ret = get_errno(memfd_create(p, arg2));
13605 fd_trans_unregister(ret);
13606 unlock_user(p, arg1, 0);
13607 return ret;
13608 #endif
13609 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13610 case TARGET_NR_membarrier:
13611 return get_errno(membarrier(arg1, arg2));
13612 #endif
13613
13614 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13615 case TARGET_NR_copy_file_range:
13616 {
13617 loff_t inoff, outoff;
13618 loff_t *pinoff = NULL, *poutoff = NULL;
13619
13620 if (arg2) {
13621 if (get_user_u64(inoff, arg2)) {
13622 return -TARGET_EFAULT;
13623 }
13624 pinoff = &inoff;
13625 }
13626 if (arg4) {
13627 if (get_user_u64(outoff, arg4)) {
13628 return -TARGET_EFAULT;
13629 }
13630 poutoff = &outoff;
13631 }
13632 /* Do not sign-extend the count parameter. */
13633 ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13634 (abi_ulong)arg5, arg6));
13635 if (!is_error(ret) && ret > 0) {
13636 if (arg2) {
13637 if (put_user_u64(inoff, arg2)) {
13638 return -TARGET_EFAULT;
13639 }
13640 }
13641 if (arg4) {
13642 if (put_user_u64(outoff, arg4)) {
13643 return -TARGET_EFAULT;
13644 }
13645 }
13646 }
13647 }
13648 return ret;
13649 #endif
13650
13651 #if defined(TARGET_NR_pivot_root)
13652 case TARGET_NR_pivot_root:
13653 {
13654 void *p2;
13655 p = lock_user_string(arg1); /* new_root */
13656 p2 = lock_user_string(arg2); /* put_old */
13657 if (!p || !p2) {
13658 ret = -TARGET_EFAULT;
13659 } else {
13660 ret = get_errno(pivot_root(p, p2));
13661 }
13662 unlock_user(p2, arg2, 0);
13663 unlock_user(p, arg1, 0);
13664 }
13665 return ret;
13666 #endif
13667
13668 default:
13669 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13670 return -TARGET_ENOSYS;
13671 }
13672 return ret;
13673 }
13674
13675 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13676 abi_long arg2, abi_long arg3, abi_long arg4,
13677 abi_long arg5, abi_long arg6, abi_long arg7,
13678 abi_long arg8)
13679 {
13680 CPUState *cpu = env_cpu(cpu_env);
13681 abi_long ret;
13682
13683 #ifdef DEBUG_ERESTARTSYS
13684 /* Debug-only code for exercising the syscall-restart code paths
13685 * in the per-architecture cpu main loops: restart every syscall
13686 * the guest makes once before letting it through.
13687 */
13688 {
13689 static bool flag;
13690 flag = !flag;
13691 if (flag) {
13692 return -QEMU_ERESTARTSYS;
13693 }
13694 }
13695 #endif
13696
13697 record_syscall_start(cpu, num, arg1,
13698 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13699
13700 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13701 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13702 }
13703
13704 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13705 arg5, arg6, arg7, arg8);
13706
13707 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13708 print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13709 arg3, arg4, arg5, arg6);
13710 }
13711
13712 record_syscall_return(cpu, num, ret);
13713 return ret;
13714 }