]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/syscall.c
util/selfmap: Rewrite using qemu/interval-tree.h
[mirror_qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "target_mman.h"
27 #include <elf.h>
28 #include <endian.h>
29 #include <grp.h>
30 #include <sys/ipc.h>
31 #include <sys/msg.h>
32 #include <sys/wait.h>
33 #include <sys/mount.h>
34 #include <sys/file.h>
35 #include <sys/fsuid.h>
36 #include <sys/personality.h>
37 #include <sys/prctl.h>
38 #include <sys/resource.h>
39 #include <sys/swap.h>
40 #include <linux/capability.h>
41 #include <sched.h>
42 #include <sys/timex.h>
43 #include <sys/socket.h>
44 #include <linux/sockios.h>
45 #include <sys/un.h>
46 #include <sys/uio.h>
47 #include <poll.h>
48 #include <sys/times.h>
49 #include <sys/shm.h>
50 #include <sys/sem.h>
51 #include <sys/statfs.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <netinet/udp.h>
60 #include <linux/wireless.h>
61 #include <linux/icmp.h>
62 #include <linux/icmpv6.h>
63 #include <linux/if_tun.h>
64 #include <linux/in6.h>
65 #include <linux/errqueue.h>
66 #include <linux/random.h>
67 #ifdef CONFIG_TIMERFD
68 #include <sys/timerfd.h>
69 #endif
70 #ifdef CONFIG_EVENTFD
71 #include <sys/eventfd.h>
72 #endif
73 #ifdef CONFIG_EPOLL
74 #include <sys/epoll.h>
75 #endif
76 #ifdef CONFIG_ATTR
77 #include "qemu/xattr.h"
78 #endif
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
81 #endif
82 #ifdef HAVE_SYS_KCOV_H
83 #include <sys/kcov.h>
84 #endif
85
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
92
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #include <linux/fd.h>
102 #if defined(CONFIG_FIEMAP)
103 #include <linux/fiemap.h>
104 #endif
105 #include <linux/fb.h>
106 #if defined(CONFIG_USBFS)
107 #include <linux/usbdevice_fs.h>
108 #include <linux/usb/ch9.h>
109 #endif
110 #include <linux/vt.h>
111 #include <linux/dm-ioctl.h>
112 #include <linux/reboot.h>
113 #include <linux/route.h>
114 #include <linux/filter.h>
115 #include <linux/blkpg.h>
116 #include <netpacket/packet.h>
117 #include <linux/netlink.h>
118 #include <linux/if_alg.h>
119 #include <linux/rtc.h>
120 #include <sound/asound.h>
121 #ifdef HAVE_BTRFS_H
122 #include <linux/btrfs.h>
123 #endif
124 #ifdef HAVE_DRM_H
125 #include <libdrm/drm.h>
126 #include <libdrm/i915_drm.h>
127 #endif
128 #include "linux_loop.h"
129 #include "uname.h"
130
131 #include "qemu.h"
132 #include "user-internals.h"
133 #include "strace.h"
134 #include "signal-common.h"
135 #include "loader.h"
136 #include "user-mmap.h"
137 #include "user/safe-syscall.h"
138 #include "qemu/guest-random.h"
139 #include "qemu/selfmap.h"
140 #include "user/syscall-trace.h"
141 #include "special-errno.h"
142 #include "qapi/error.h"
143 #include "fd-trans.h"
144 #include "tcg/tcg.h"
145 #include "cpu_loop-common.h"
146
147 #ifndef CLONE_IO
148 #define CLONE_IO 0x80000000 /* Clone io context */
149 #endif
150
151 /* We can't directly call the host clone syscall, because this will
152 * badly confuse libc (breaking mutexes, for example). So we must
153 * divide clone flags into:
154 * * flag combinations that look like pthread_create()
155 * * flag combinations that look like fork()
156 * * flags we can implement within QEMU itself
157 * * flags we can't support and will return an error for
158 */
159 /* For thread creation, all these flags must be present; for
160 * fork, none must be present.
161 */
162 #define CLONE_THREAD_FLAGS \
163 (CLONE_VM | CLONE_FS | CLONE_FILES | \
164 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
165
166 /* These flags are ignored:
167 * CLONE_DETACHED is now ignored by the kernel;
168 * CLONE_IO is just an optimisation hint to the I/O scheduler
169 */
170 #define CLONE_IGNORED_FLAGS \
171 (CLONE_DETACHED | CLONE_IO)
172
173 #ifndef CLONE_PIDFD
174 # define CLONE_PIDFD 0x00001000
175 #endif
176
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS \
179 (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
181
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS \
184 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
185 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
186
187 #define CLONE_INVALID_FORK_FLAGS \
188 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
189
190 #define CLONE_INVALID_THREAD_FLAGS \
191 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
192 CLONE_IGNORED_FLAGS))
193
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195 * have almost all been allocated. We cannot support any of
196 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198 * The checks against the invalid thread masks above will catch these.
199 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
200 */
201
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203 * once. This exercises the codepaths for restart.
204 */
205 //#define DEBUG_ERESTARTSYS
206
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
212
213 #undef _syscall0
214 #undef _syscall1
215 #undef _syscall2
216 #undef _syscall3
217 #undef _syscall4
218 #undef _syscall5
219 #undef _syscall6
220
221 #define _syscall0(type,name) \
222 static type name (void) \
223 { \
224 return syscall(__NR_##name); \
225 }
226
227 #define _syscall1(type,name,type1,arg1) \
228 static type name (type1 arg1) \
229 { \
230 return syscall(__NR_##name, arg1); \
231 }
232
233 #define _syscall2(type,name,type1,arg1,type2,arg2) \
234 static type name (type1 arg1,type2 arg2) \
235 { \
236 return syscall(__NR_##name, arg1, arg2); \
237 }
238
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
240 static type name (type1 arg1,type2 arg2,type3 arg3) \
241 { \
242 return syscall(__NR_##name, arg1, arg2, arg3); \
243 }
244
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
247 { \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
249 }
250
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
252 type5,arg5) \
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
254 { \
255 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
256 }
257
258
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
260 type5,arg5,type6,arg6) \
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
262 type6 arg6) \
263 { \
264 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
265 }
266
267
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
278 #endif
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
281 #endif
282 #define __NR_sys_statx __NR_statx
283
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
286 #endif
287
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
291 #endif
292
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296 #endif
297
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid)
300
301 /* For the 64-bit guest on 32-bit host case we must emulate
302 * getdents using getdents64, because otherwise the host
303 * might hand us back more dirent records than we can fit
304 * into the guest buffer after structure format conversion.
305 * Otherwise we emulate getdents with getdents if the host has it.
306 */
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
309 #endif
310
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
313 #endif
314 #if (defined(TARGET_NR_getdents) && \
315 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
318 #endif
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek, unsigned int, fd, unsigned long, hi, unsigned long, lo,
321 loff_t *, res, unsigned int, wh);
322 #endif
323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325 siginfo_t *, uinfo)
326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group,int,error_code)
329 #endif
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC (1U << 2)
335 #endif
336 #endif
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339 const struct timespec *,timeout,int *,uaddr2,int,val3)
340 #endif
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343 const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347 #endif
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350 unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354 #endif
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357 unsigned long *, user_mask_ptr);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360 unsigned long *, user_mask_ptr);
361 /* sched_attr is not defined in glibc */
362 struct sched_attr {
363 uint32_t size;
364 uint32_t sched_policy;
365 uint64_t sched_flags;
366 int32_t sched_nice;
367 uint32_t sched_priority;
368 uint64_t sched_runtime;
369 uint64_t sched_deadline;
370 uint64_t sched_period;
371 uint32_t sched_util_min;
372 uint32_t sched_util_max;
373 };
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
376 unsigned int, size, unsigned int, flags);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
379 unsigned int, flags);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
384 const struct sched_param *, param);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam, pid_t, pid,
387 struct sched_param *, param);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam, pid_t, pid,
390 const struct sched_param *, param);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
393 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
394 void *, arg);
395 _syscall2(int, capget, struct __user_cap_header_struct *, header,
396 struct __user_cap_data_struct *, data);
397 _syscall2(int, capset, struct __user_cap_header_struct *, header,
398 struct __user_cap_data_struct *, data);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get, int, which, int, who)
401 #endif
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
404 #endif
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
407 #endif
408
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
411 unsigned long, idx1, unsigned long, idx2)
412 #endif
413
414 /*
415 * It is assumed that struct statx is architecture independent.
416 */
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
419 unsigned int, mask, struct target_statx *, statxbuf)
420 #endif
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier, int, cmd, int, flags)
423 #endif
424
425 static const bitmask_transtbl fcntl_flags_tbl[] = {
426 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
427 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
428 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
429 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
430 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
431 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
432 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
433 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
434 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
435 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
436 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
437 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
438 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
439 #if defined(O_DIRECT)
440 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
441 #endif
442 #if defined(O_NOATIME)
443 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
444 #endif
445 #if defined(O_CLOEXEC)
446 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
447 #endif
448 #if defined(O_PATH)
449 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
450 #endif
451 #if defined(O_TMPFILE)
452 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
453 #endif
454 /* Don't terminate the list prematurely on 64-bit host+guest. */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
457 #endif
458 { 0, 0, 0, 0 }
459 };
460
461 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
462
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
467 const struct timespec *,tsp,int,flags)
468 #else
469 static int sys_utimensat(int dirfd, const char *pathname,
470 const struct timespec times[2], int flags)
471 {
472 errno = ENOSYS;
473 return -1;
474 }
475 #endif
476 #endif /* TARGET_NR_utimensat */
477
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
482 const char *, new, unsigned int, flags)
483 #else
484 static int sys_renameat2(int oldfd, const char *old,
485 int newfd, const char *new, int flags)
486 {
487 if (flags == 0) {
488 return renameat(oldfd, old, newfd, new);
489 }
490 errno = ENOSYS;
491 return -1;
492 }
493 #endif
494 #endif /* TARGET_NR_renameat2 */
495
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
498 #else
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY */
505
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
509 #endif
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64 {
513 uint64_t rlim_cur;
514 uint64_t rlim_max;
515 };
516 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
517 const struct host_rlimit64 *, new_limit,
518 struct host_rlimit64 *, old_limit)
519 #endif
520
521
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers[GUEST_TIMER_MAX];
526 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
527
528 static inline int next_free_host_timer(void)
529 {
530 int k;
531 for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
532 if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
533 return k;
534 }
535 }
536 return -1;
537 }
538
539 static inline void free_host_timer_slot(int id)
540 {
541 qatomic_store_release(g_posix_timer_allocated + id, 0);
542 }
543 #endif
544
545 static inline int host_to_target_errno(int host_errno)
546 {
547 switch (host_errno) {
548 #define E(X) case X: return TARGET_##X;
549 #include "errnos.c.inc"
550 #undef E
551 default:
552 return host_errno;
553 }
554 }
555
556 static inline int target_to_host_errno(int target_errno)
557 {
558 switch (target_errno) {
559 #define E(X) case TARGET_##X: return X;
560 #include "errnos.c.inc"
561 #undef E
562 default:
563 return target_errno;
564 }
565 }
566
567 abi_long get_errno(abi_long ret)
568 {
569 if (ret == -1)
570 return -host_to_target_errno(errno);
571 else
572 return ret;
573 }
574
575 const char *target_strerror(int err)
576 {
577 if (err == QEMU_ERESTARTSYS) {
578 return "To be restarted";
579 }
580 if (err == QEMU_ESIGRETURN) {
581 return "Successful exit from sigreturn";
582 }
583
584 return strerror(target_to_host_errno(err));
585 }
586
587 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
588 {
589 int i;
590 uint8_t b;
591 if (usize <= ksize) {
592 return 1;
593 }
594 for (i = ksize; i < usize; i++) {
595 if (get_user_u8(b, addr + i)) {
596 return -TARGET_EFAULT;
597 }
598 if (b != 0) {
599 return 0;
600 }
601 }
602 return 1;
603 }
604
605 #define safe_syscall0(type, name) \
606 static type safe_##name(void) \
607 { \
608 return safe_syscall(__NR_##name); \
609 }
610
611 #define safe_syscall1(type, name, type1, arg1) \
612 static type safe_##name(type1 arg1) \
613 { \
614 return safe_syscall(__NR_##name, arg1); \
615 }
616
617 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
618 static type safe_##name(type1 arg1, type2 arg2) \
619 { \
620 return safe_syscall(__NR_##name, arg1, arg2); \
621 }
622
623 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
625 { \
626 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
627 }
628
629 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
630 type4, arg4) \
631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
632 { \
633 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
634 }
635
636 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
637 type4, arg4, type5, arg5) \
638 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
639 type5 arg5) \
640 { \
641 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
642 }
643
644 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
645 type4, arg4, type5, arg5, type6, arg6) \
646 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
647 type5 arg5, type6 arg6) \
648 { \
649 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
650 }
651
652 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
653 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
654 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
655 int, flags, mode_t, mode)
656 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
657 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
658 struct rusage *, rusage)
659 #endif
660 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
661 int, options, struct rusage *, rusage)
662 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
663 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
664 char **, argv, char **, envp, int, flags)
665 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
666 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
667 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
668 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
669 #endif
670 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
671 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
672 struct timespec *, tsp, const sigset_t *, sigmask,
673 size_t, sigsetsize)
674 #endif
675 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
676 int, maxevents, int, timeout, const sigset_t *, sigmask,
677 size_t, sigsetsize)
678 #if defined(__NR_futex)
679 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
680 const struct timespec *,timeout,int *,uaddr2,int,val3)
681 #endif
682 #if defined(__NR_futex_time64)
683 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
684 const struct timespec *,timeout,int *,uaddr2,int,val3)
685 #endif
686 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
687 safe_syscall2(int, kill, pid_t, pid, int, sig)
688 safe_syscall2(int, tkill, int, tid, int, sig)
689 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
690 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
691 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
692 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
693 unsigned long, pos_l, unsigned long, pos_h)
694 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
695 unsigned long, pos_l, unsigned long, pos_h)
696 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
697 socklen_t, addrlen)
698 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
699 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
700 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
701 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
702 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
703 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
704 safe_syscall2(int, flock, int, fd, int, operation)
705 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
706 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
707 const struct timespec *, uts, size_t, sigsetsize)
708 #endif
709 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
710 int, flags)
711 #if defined(TARGET_NR_nanosleep)
712 safe_syscall2(int, nanosleep, const struct timespec *, req,
713 struct timespec *, rem)
714 #endif
715 #if defined(TARGET_NR_clock_nanosleep) || \
716 defined(TARGET_NR_clock_nanosleep_time64)
717 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
718 const struct timespec *, req, struct timespec *, rem)
719 #endif
720 #ifdef __NR_ipc
721 #ifdef __s390x__
722 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
723 void *, ptr)
724 #else
725 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
726 void *, ptr, long, fifth)
727 #endif
728 #endif
729 #ifdef __NR_msgsnd
730 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
731 int, flags)
732 #endif
733 #ifdef __NR_msgrcv
734 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
735 long, msgtype, int, flags)
736 #endif
737 #ifdef __NR_semtimedop
738 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
739 unsigned, nsops, const struct timespec *, timeout)
740 #endif
741 #if defined(TARGET_NR_mq_timedsend) || \
742 defined(TARGET_NR_mq_timedsend_time64)
743 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
744 size_t, len, unsigned, prio, const struct timespec *, timeout)
745 #endif
746 #if defined(TARGET_NR_mq_timedreceive) || \
747 defined(TARGET_NR_mq_timedreceive_time64)
748 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
749 size_t, len, unsigned *, prio, const struct timespec *, timeout)
750 #endif
751 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
752 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
753 int, outfd, loff_t *, poutoff, size_t, length,
754 unsigned int, flags)
755 #endif
756
757 /* We do ioctl like this rather than via safe_syscall3 to preserve the
758 * "third argument might be integer or pointer or not present" behaviour of
759 * the libc function.
760 */
761 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
762 /* Similarly for fcntl. Note that callers must always:
763 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
764 * use the flock64 struct rather than unsuffixed flock
765 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
766 */
767 #ifdef __NR_fcntl64
768 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
769 #else
770 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
771 #endif
772
773 static inline int host_to_target_sock_type(int host_type)
774 {
775 int target_type;
776
777 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
778 case SOCK_DGRAM:
779 target_type = TARGET_SOCK_DGRAM;
780 break;
781 case SOCK_STREAM:
782 target_type = TARGET_SOCK_STREAM;
783 break;
784 default:
785 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
786 break;
787 }
788
789 #if defined(SOCK_CLOEXEC)
790 if (host_type & SOCK_CLOEXEC) {
791 target_type |= TARGET_SOCK_CLOEXEC;
792 }
793 #endif
794
795 #if defined(SOCK_NONBLOCK)
796 if (host_type & SOCK_NONBLOCK) {
797 target_type |= TARGET_SOCK_NONBLOCK;
798 }
799 #endif
800
801 return target_type;
802 }
803
804 static abi_ulong target_brk, initial_target_brk;
805
806 void target_set_brk(abi_ulong new_brk)
807 {
808 target_brk = TARGET_PAGE_ALIGN(new_brk);
809 initial_target_brk = target_brk;
810 }
811
812 /* do_brk() must return target values and target errnos. */
813 abi_long do_brk(abi_ulong brk_val)
814 {
815 abi_long mapped_addr;
816 abi_ulong new_brk;
817 abi_ulong old_brk;
818
819 /* brk pointers are always untagged */
820
821 /* do not allow to shrink below initial brk value */
822 if (brk_val < initial_target_brk) {
823 return target_brk;
824 }
825
826 new_brk = TARGET_PAGE_ALIGN(brk_val);
827 old_brk = TARGET_PAGE_ALIGN(target_brk);
828
829 /* new and old target_brk might be on the same page */
830 if (new_brk == old_brk) {
831 target_brk = brk_val;
832 return target_brk;
833 }
834
835 /* Release heap if necesary */
836 if (new_brk < old_brk) {
837 target_munmap(new_brk, old_brk - new_brk);
838
839 target_brk = brk_val;
840 return target_brk;
841 }
842
843 mapped_addr = target_mmap(old_brk, new_brk - old_brk,
844 PROT_READ | PROT_WRITE,
845 MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
846 -1, 0);
847
848 if (mapped_addr == old_brk) {
849 target_brk = brk_val;
850 return target_brk;
851 }
852
853 #if defined(TARGET_ALPHA)
854 /* We (partially) emulate OSF/1 on Alpha, which requires we
855 return a proper errno, not an unchanged brk value. */
856 return -TARGET_ENOMEM;
857 #endif
858 /* For everything else, return the previous break. */
859 return target_brk;
860 }
861
862 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
863 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
864 static inline abi_long copy_from_user_fdset(fd_set *fds,
865 abi_ulong target_fds_addr,
866 int n)
867 {
868 int i, nw, j, k;
869 abi_ulong b, *target_fds;
870
871 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
872 if (!(target_fds = lock_user(VERIFY_READ,
873 target_fds_addr,
874 sizeof(abi_ulong) * nw,
875 1)))
876 return -TARGET_EFAULT;
877
878 FD_ZERO(fds);
879 k = 0;
880 for (i = 0; i < nw; i++) {
881 /* grab the abi_ulong */
882 __get_user(b, &target_fds[i]);
883 for (j = 0; j < TARGET_ABI_BITS; j++) {
884 /* check the bit inside the abi_ulong */
885 if ((b >> j) & 1)
886 FD_SET(k, fds);
887 k++;
888 }
889 }
890
891 unlock_user(target_fds, target_fds_addr, 0);
892
893 return 0;
894 }
895
896 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
897 abi_ulong target_fds_addr,
898 int n)
899 {
900 if (target_fds_addr) {
901 if (copy_from_user_fdset(fds, target_fds_addr, n))
902 return -TARGET_EFAULT;
903 *fds_ptr = fds;
904 } else {
905 *fds_ptr = NULL;
906 }
907 return 0;
908 }
909
910 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
911 const fd_set *fds,
912 int n)
913 {
914 int i, nw, j, k;
915 abi_long v;
916 abi_ulong *target_fds;
917
918 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
919 if (!(target_fds = lock_user(VERIFY_WRITE,
920 target_fds_addr,
921 sizeof(abi_ulong) * nw,
922 0)))
923 return -TARGET_EFAULT;
924
925 k = 0;
926 for (i = 0; i < nw; i++) {
927 v = 0;
928 for (j = 0; j < TARGET_ABI_BITS; j++) {
929 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
930 k++;
931 }
932 __put_user(v, &target_fds[i]);
933 }
934
935 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
936
937 return 0;
938 }
939 #endif
940
941 #if defined(__alpha__)
942 #define HOST_HZ 1024
943 #else
944 #define HOST_HZ 100
945 #endif
946
947 static inline abi_long host_to_target_clock_t(long ticks)
948 {
949 #if HOST_HZ == TARGET_HZ
950 return ticks;
951 #else
952 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
953 #endif
954 }
955
956 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
957 const struct rusage *rusage)
958 {
959 struct target_rusage *target_rusage;
960
961 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
962 return -TARGET_EFAULT;
963 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
964 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
965 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
966 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
967 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
968 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
969 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
970 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
971 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
972 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
973 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
974 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
975 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
976 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
977 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
978 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
979 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
980 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
981 unlock_user_struct(target_rusage, target_addr, 1);
982
983 return 0;
984 }
985
986 #ifdef TARGET_NR_setrlimit
987 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
988 {
989 abi_ulong target_rlim_swap;
990 rlim_t result;
991
992 target_rlim_swap = tswapal(target_rlim);
993 if (target_rlim_swap == TARGET_RLIM_INFINITY)
994 return RLIM_INFINITY;
995
996 result = target_rlim_swap;
997 if (target_rlim_swap != (rlim_t)result)
998 return RLIM_INFINITY;
999
1000 return result;
1001 }
1002 #endif
1003
1004 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1005 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1006 {
1007 abi_ulong target_rlim_swap;
1008 abi_ulong result;
1009
1010 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1011 target_rlim_swap = TARGET_RLIM_INFINITY;
1012 else
1013 target_rlim_swap = rlim;
1014 result = tswapal(target_rlim_swap);
1015
1016 return result;
1017 }
1018 #endif
1019
1020 static inline int target_to_host_resource(int code)
1021 {
1022 switch (code) {
1023 case TARGET_RLIMIT_AS:
1024 return RLIMIT_AS;
1025 case TARGET_RLIMIT_CORE:
1026 return RLIMIT_CORE;
1027 case TARGET_RLIMIT_CPU:
1028 return RLIMIT_CPU;
1029 case TARGET_RLIMIT_DATA:
1030 return RLIMIT_DATA;
1031 case TARGET_RLIMIT_FSIZE:
1032 return RLIMIT_FSIZE;
1033 case TARGET_RLIMIT_LOCKS:
1034 return RLIMIT_LOCKS;
1035 case TARGET_RLIMIT_MEMLOCK:
1036 return RLIMIT_MEMLOCK;
1037 case TARGET_RLIMIT_MSGQUEUE:
1038 return RLIMIT_MSGQUEUE;
1039 case TARGET_RLIMIT_NICE:
1040 return RLIMIT_NICE;
1041 case TARGET_RLIMIT_NOFILE:
1042 return RLIMIT_NOFILE;
1043 case TARGET_RLIMIT_NPROC:
1044 return RLIMIT_NPROC;
1045 case TARGET_RLIMIT_RSS:
1046 return RLIMIT_RSS;
1047 case TARGET_RLIMIT_RTPRIO:
1048 return RLIMIT_RTPRIO;
1049 #ifdef RLIMIT_RTTIME
1050 case TARGET_RLIMIT_RTTIME:
1051 return RLIMIT_RTTIME;
1052 #endif
1053 case TARGET_RLIMIT_SIGPENDING:
1054 return RLIMIT_SIGPENDING;
1055 case TARGET_RLIMIT_STACK:
1056 return RLIMIT_STACK;
1057 default:
1058 return code;
1059 }
1060 }
1061
1062 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1063 abi_ulong target_tv_addr)
1064 {
1065 struct target_timeval *target_tv;
1066
1067 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1068 return -TARGET_EFAULT;
1069 }
1070
1071 __get_user(tv->tv_sec, &target_tv->tv_sec);
1072 __get_user(tv->tv_usec, &target_tv->tv_usec);
1073
1074 unlock_user_struct(target_tv, target_tv_addr, 0);
1075
1076 return 0;
1077 }
1078
1079 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1080 const struct timeval *tv)
1081 {
1082 struct target_timeval *target_tv;
1083
1084 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1085 return -TARGET_EFAULT;
1086 }
1087
1088 __put_user(tv->tv_sec, &target_tv->tv_sec);
1089 __put_user(tv->tv_usec, &target_tv->tv_usec);
1090
1091 unlock_user_struct(target_tv, target_tv_addr, 1);
1092
1093 return 0;
1094 }
1095
1096 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1097 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1098 abi_ulong target_tv_addr)
1099 {
1100 struct target__kernel_sock_timeval *target_tv;
1101
1102 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1103 return -TARGET_EFAULT;
1104 }
1105
1106 __get_user(tv->tv_sec, &target_tv->tv_sec);
1107 __get_user(tv->tv_usec, &target_tv->tv_usec);
1108
1109 unlock_user_struct(target_tv, target_tv_addr, 0);
1110
1111 return 0;
1112 }
1113 #endif
1114
1115 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1116 const struct timeval *tv)
1117 {
1118 struct target__kernel_sock_timeval *target_tv;
1119
1120 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1121 return -TARGET_EFAULT;
1122 }
1123
1124 __put_user(tv->tv_sec, &target_tv->tv_sec);
1125 __put_user(tv->tv_usec, &target_tv->tv_usec);
1126
1127 unlock_user_struct(target_tv, target_tv_addr, 1);
1128
1129 return 0;
1130 }
1131
1132 #if defined(TARGET_NR_futex) || \
1133 defined(TARGET_NR_rt_sigtimedwait) || \
1134 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1135 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1136 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1137 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1138 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1139 defined(TARGET_NR_timer_settime) || \
1140 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1141 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1142 abi_ulong target_addr)
1143 {
1144 struct target_timespec *target_ts;
1145
1146 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1147 return -TARGET_EFAULT;
1148 }
1149 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1150 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1151 unlock_user_struct(target_ts, target_addr, 0);
1152 return 0;
1153 }
1154 #endif
1155
1156 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1157 defined(TARGET_NR_timer_settime64) || \
1158 defined(TARGET_NR_mq_timedsend_time64) || \
1159 defined(TARGET_NR_mq_timedreceive_time64) || \
1160 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1161 defined(TARGET_NR_clock_nanosleep_time64) || \
1162 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1163 defined(TARGET_NR_utimensat) || \
1164 defined(TARGET_NR_utimensat_time64) || \
1165 defined(TARGET_NR_semtimedop_time64) || \
1166 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1167 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1168 abi_ulong target_addr)
1169 {
1170 struct target__kernel_timespec *target_ts;
1171
1172 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1173 return -TARGET_EFAULT;
1174 }
1175 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1176 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1177 /* in 32bit mode, this drops the padding */
1178 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1179 unlock_user_struct(target_ts, target_addr, 0);
1180 return 0;
1181 }
1182 #endif
1183
1184 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1185 struct timespec *host_ts)
1186 {
1187 struct target_timespec *target_ts;
1188
1189 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1190 return -TARGET_EFAULT;
1191 }
1192 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1193 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1194 unlock_user_struct(target_ts, target_addr, 1);
1195 return 0;
1196 }
1197
1198 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1199 struct timespec *host_ts)
1200 {
1201 struct target__kernel_timespec *target_ts;
1202
1203 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1204 return -TARGET_EFAULT;
1205 }
1206 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1207 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1208 unlock_user_struct(target_ts, target_addr, 1);
1209 return 0;
1210 }
1211
1212 #if defined(TARGET_NR_gettimeofday)
1213 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1214 struct timezone *tz)
1215 {
1216 struct target_timezone *target_tz;
1217
1218 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1219 return -TARGET_EFAULT;
1220 }
1221
1222 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1223 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1224
1225 unlock_user_struct(target_tz, target_tz_addr, 1);
1226
1227 return 0;
1228 }
1229 #endif
1230
1231 #if defined(TARGET_NR_settimeofday)
1232 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1233 abi_ulong target_tz_addr)
1234 {
1235 struct target_timezone *target_tz;
1236
1237 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1238 return -TARGET_EFAULT;
1239 }
1240
1241 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1242 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1243
1244 unlock_user_struct(target_tz, target_tz_addr, 0);
1245
1246 return 0;
1247 }
1248 #endif
1249
1250 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1251 #include <mqueue.h>
1252
1253 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1254 abi_ulong target_mq_attr_addr)
1255 {
1256 struct target_mq_attr *target_mq_attr;
1257
1258 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1259 target_mq_attr_addr, 1))
1260 return -TARGET_EFAULT;
1261
1262 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1263 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1264 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1265 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1266
1267 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1268
1269 return 0;
1270 }
1271
1272 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1273 const struct mq_attr *attr)
1274 {
1275 struct target_mq_attr *target_mq_attr;
1276
1277 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1278 target_mq_attr_addr, 0))
1279 return -TARGET_EFAULT;
1280
1281 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1282 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1283 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1284 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1285
1286 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1287
1288 return 0;
1289 }
1290 #endif
1291
1292 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1293 /* do_select() must return target values and target errnos. */
1294 static abi_long do_select(int n,
1295 abi_ulong rfd_addr, abi_ulong wfd_addr,
1296 abi_ulong efd_addr, abi_ulong target_tv_addr)
1297 {
1298 fd_set rfds, wfds, efds;
1299 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1300 struct timeval tv;
1301 struct timespec ts, *ts_ptr;
1302 abi_long ret;
1303
1304 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1305 if (ret) {
1306 return ret;
1307 }
1308 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1309 if (ret) {
1310 return ret;
1311 }
1312 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1313 if (ret) {
1314 return ret;
1315 }
1316
1317 if (target_tv_addr) {
1318 if (copy_from_user_timeval(&tv, target_tv_addr))
1319 return -TARGET_EFAULT;
1320 ts.tv_sec = tv.tv_sec;
1321 ts.tv_nsec = tv.tv_usec * 1000;
1322 ts_ptr = &ts;
1323 } else {
1324 ts_ptr = NULL;
1325 }
1326
1327 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1328 ts_ptr, NULL));
1329
1330 if (!is_error(ret)) {
1331 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1332 return -TARGET_EFAULT;
1333 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1334 return -TARGET_EFAULT;
1335 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1336 return -TARGET_EFAULT;
1337
1338 if (target_tv_addr) {
1339 tv.tv_sec = ts.tv_sec;
1340 tv.tv_usec = ts.tv_nsec / 1000;
1341 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1342 return -TARGET_EFAULT;
1343 }
1344 }
1345 }
1346
1347 return ret;
1348 }
1349
1350 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1351 static abi_long do_old_select(abi_ulong arg1)
1352 {
1353 struct target_sel_arg_struct *sel;
1354 abi_ulong inp, outp, exp, tvp;
1355 long nsel;
1356
1357 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1358 return -TARGET_EFAULT;
1359 }
1360
1361 nsel = tswapal(sel->n);
1362 inp = tswapal(sel->inp);
1363 outp = tswapal(sel->outp);
1364 exp = tswapal(sel->exp);
1365 tvp = tswapal(sel->tvp);
1366
1367 unlock_user_struct(sel, arg1, 0);
1368
1369 return do_select(nsel, inp, outp, exp, tvp);
1370 }
1371 #endif
1372 #endif
1373
1374 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1375 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1376 abi_long arg4, abi_long arg5, abi_long arg6,
1377 bool time64)
1378 {
1379 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1380 fd_set rfds, wfds, efds;
1381 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1382 struct timespec ts, *ts_ptr;
1383 abi_long ret;
1384
1385 /*
1386 * The 6th arg is actually two args smashed together,
1387 * so we cannot use the C library.
1388 */
1389 struct {
1390 sigset_t *set;
1391 size_t size;
1392 } sig, *sig_ptr;
1393
1394 abi_ulong arg_sigset, arg_sigsize, *arg7;
1395
1396 n = arg1;
1397 rfd_addr = arg2;
1398 wfd_addr = arg3;
1399 efd_addr = arg4;
1400 ts_addr = arg5;
1401
1402 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1403 if (ret) {
1404 return ret;
1405 }
1406 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1407 if (ret) {
1408 return ret;
1409 }
1410 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1411 if (ret) {
1412 return ret;
1413 }
1414
1415 /*
1416 * This takes a timespec, and not a timeval, so we cannot
1417 * use the do_select() helper ...
1418 */
1419 if (ts_addr) {
1420 if (time64) {
1421 if (target_to_host_timespec64(&ts, ts_addr)) {
1422 return -TARGET_EFAULT;
1423 }
1424 } else {
1425 if (target_to_host_timespec(&ts, ts_addr)) {
1426 return -TARGET_EFAULT;
1427 }
1428 }
1429 ts_ptr = &ts;
1430 } else {
1431 ts_ptr = NULL;
1432 }
1433
1434 /* Extract the two packed args for the sigset */
1435 sig_ptr = NULL;
1436 if (arg6) {
1437 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1438 if (!arg7) {
1439 return -TARGET_EFAULT;
1440 }
1441 arg_sigset = tswapal(arg7[0]);
1442 arg_sigsize = tswapal(arg7[1]);
1443 unlock_user(arg7, arg6, 0);
1444
1445 if (arg_sigset) {
1446 ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1447 if (ret != 0) {
1448 return ret;
1449 }
1450 sig_ptr = &sig;
1451 sig.size = SIGSET_T_SIZE;
1452 }
1453 }
1454
1455 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1456 ts_ptr, sig_ptr));
1457
1458 if (sig_ptr) {
1459 finish_sigsuspend_mask(ret);
1460 }
1461
1462 if (!is_error(ret)) {
1463 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1464 return -TARGET_EFAULT;
1465 }
1466 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1467 return -TARGET_EFAULT;
1468 }
1469 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1470 return -TARGET_EFAULT;
1471 }
1472 if (time64) {
1473 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1474 return -TARGET_EFAULT;
1475 }
1476 } else {
1477 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1478 return -TARGET_EFAULT;
1479 }
1480 }
1481 }
1482 return ret;
1483 }
1484 #endif
1485
1486 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1487 defined(TARGET_NR_ppoll_time64)
1488 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1489 abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1490 {
1491 struct target_pollfd *target_pfd;
1492 unsigned int nfds = arg2;
1493 struct pollfd *pfd;
1494 unsigned int i;
1495 abi_long ret;
1496
1497 pfd = NULL;
1498 target_pfd = NULL;
1499 if (nfds) {
1500 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1501 return -TARGET_EINVAL;
1502 }
1503 target_pfd = lock_user(VERIFY_WRITE, arg1,
1504 sizeof(struct target_pollfd) * nfds, 1);
1505 if (!target_pfd) {
1506 return -TARGET_EFAULT;
1507 }
1508
1509 pfd = alloca(sizeof(struct pollfd) * nfds);
1510 for (i = 0; i < nfds; i++) {
1511 pfd[i].fd = tswap32(target_pfd[i].fd);
1512 pfd[i].events = tswap16(target_pfd[i].events);
1513 }
1514 }
1515 if (ppoll) {
1516 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1517 sigset_t *set = NULL;
1518
1519 if (arg3) {
1520 if (time64) {
1521 if (target_to_host_timespec64(timeout_ts, arg3)) {
1522 unlock_user(target_pfd, arg1, 0);
1523 return -TARGET_EFAULT;
1524 }
1525 } else {
1526 if (target_to_host_timespec(timeout_ts, arg3)) {
1527 unlock_user(target_pfd, arg1, 0);
1528 return -TARGET_EFAULT;
1529 }
1530 }
1531 } else {
1532 timeout_ts = NULL;
1533 }
1534
1535 if (arg4) {
1536 ret = process_sigsuspend_mask(&set, arg4, arg5);
1537 if (ret != 0) {
1538 unlock_user(target_pfd, arg1, 0);
1539 return ret;
1540 }
1541 }
1542
1543 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1544 set, SIGSET_T_SIZE));
1545
1546 if (set) {
1547 finish_sigsuspend_mask(ret);
1548 }
1549 if (!is_error(ret) && arg3) {
1550 if (time64) {
1551 if (host_to_target_timespec64(arg3, timeout_ts)) {
1552 return -TARGET_EFAULT;
1553 }
1554 } else {
1555 if (host_to_target_timespec(arg3, timeout_ts)) {
1556 return -TARGET_EFAULT;
1557 }
1558 }
1559 }
1560 } else {
1561 struct timespec ts, *pts;
1562
1563 if (arg3 >= 0) {
1564 /* Convert ms to secs, ns */
1565 ts.tv_sec = arg3 / 1000;
1566 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1567 pts = &ts;
1568 } else {
1569 /* -ve poll() timeout means "infinite" */
1570 pts = NULL;
1571 }
1572 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1573 }
1574
1575 if (!is_error(ret)) {
1576 for (i = 0; i < nfds; i++) {
1577 target_pfd[i].revents = tswap16(pfd[i].revents);
1578 }
1579 }
1580 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1581 return ret;
1582 }
1583 #endif
1584
1585 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1586 int flags, int is_pipe2)
1587 {
1588 int host_pipe[2];
1589 abi_long ret;
1590 ret = pipe2(host_pipe, flags);
1591
1592 if (is_error(ret))
1593 return get_errno(ret);
1594
1595 /* Several targets have special calling conventions for the original
1596 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1597 if (!is_pipe2) {
1598 #if defined(TARGET_ALPHA)
1599 cpu_env->ir[IR_A4] = host_pipe[1];
1600 return host_pipe[0];
1601 #elif defined(TARGET_MIPS)
1602 cpu_env->active_tc.gpr[3] = host_pipe[1];
1603 return host_pipe[0];
1604 #elif defined(TARGET_SH4)
1605 cpu_env->gregs[1] = host_pipe[1];
1606 return host_pipe[0];
1607 #elif defined(TARGET_SPARC)
1608 cpu_env->regwptr[1] = host_pipe[1];
1609 return host_pipe[0];
1610 #endif
1611 }
1612
1613 if (put_user_s32(host_pipe[0], pipedes)
1614 || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1615 return -TARGET_EFAULT;
1616 return get_errno(ret);
1617 }
1618
1619 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1620 abi_ulong target_addr,
1621 socklen_t len)
1622 {
1623 struct target_ip_mreqn *target_smreqn;
1624
1625 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1626 if (!target_smreqn)
1627 return -TARGET_EFAULT;
1628 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1629 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1630 if (len == sizeof(struct target_ip_mreqn))
1631 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1632 unlock_user(target_smreqn, target_addr, 0);
1633
1634 return 0;
1635 }
1636
1637 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1638 abi_ulong target_addr,
1639 socklen_t len)
1640 {
1641 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1642 sa_family_t sa_family;
1643 struct target_sockaddr *target_saddr;
1644
1645 if (fd_trans_target_to_host_addr(fd)) {
1646 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1647 }
1648
1649 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1650 if (!target_saddr)
1651 return -TARGET_EFAULT;
1652
1653 sa_family = tswap16(target_saddr->sa_family);
1654
1655 /* Oops. The caller might send a incomplete sun_path; sun_path
1656 * must be terminated by \0 (see the manual page), but
1657 * unfortunately it is quite common to specify sockaddr_un
1658 * length as "strlen(x->sun_path)" while it should be
1659 * "strlen(...) + 1". We'll fix that here if needed.
1660 * Linux kernel has a similar feature.
1661 */
1662
1663 if (sa_family == AF_UNIX) {
1664 if (len < unix_maxlen && len > 0) {
1665 char *cp = (char*)target_saddr;
1666
1667 if ( cp[len-1] && !cp[len] )
1668 len++;
1669 }
1670 if (len > unix_maxlen)
1671 len = unix_maxlen;
1672 }
1673
1674 memcpy(addr, target_saddr, len);
1675 addr->sa_family = sa_family;
1676 if (sa_family == AF_NETLINK) {
1677 struct sockaddr_nl *nladdr;
1678
1679 nladdr = (struct sockaddr_nl *)addr;
1680 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1681 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1682 } else if (sa_family == AF_PACKET) {
1683 struct target_sockaddr_ll *lladdr;
1684
1685 lladdr = (struct target_sockaddr_ll *)addr;
1686 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1687 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1688 } else if (sa_family == AF_INET6) {
1689 struct sockaddr_in6 *in6addr;
1690
1691 in6addr = (struct sockaddr_in6 *)addr;
1692 in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1693 }
1694 unlock_user(target_saddr, target_addr, 0);
1695
1696 return 0;
1697 }
1698
1699 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1700 struct sockaddr *addr,
1701 socklen_t len)
1702 {
1703 struct target_sockaddr *target_saddr;
1704
1705 if (len == 0) {
1706 return 0;
1707 }
1708 assert(addr);
1709
1710 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1711 if (!target_saddr)
1712 return -TARGET_EFAULT;
1713 memcpy(target_saddr, addr, len);
1714 if (len >= offsetof(struct target_sockaddr, sa_family) +
1715 sizeof(target_saddr->sa_family)) {
1716 target_saddr->sa_family = tswap16(addr->sa_family);
1717 }
1718 if (addr->sa_family == AF_NETLINK &&
1719 len >= sizeof(struct target_sockaddr_nl)) {
1720 struct target_sockaddr_nl *target_nl =
1721 (struct target_sockaddr_nl *)target_saddr;
1722 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1723 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1724 } else if (addr->sa_family == AF_PACKET) {
1725 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1726 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1727 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1728 } else if (addr->sa_family == AF_INET6 &&
1729 len >= sizeof(struct target_sockaddr_in6)) {
1730 struct target_sockaddr_in6 *target_in6 =
1731 (struct target_sockaddr_in6 *)target_saddr;
1732 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1733 }
1734 unlock_user(target_saddr, target_addr, len);
1735
1736 return 0;
1737 }
1738
1739 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1740 struct target_msghdr *target_msgh)
1741 {
1742 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1743 abi_long msg_controllen;
1744 abi_ulong target_cmsg_addr;
1745 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1746 socklen_t space = 0;
1747
1748 msg_controllen = tswapal(target_msgh->msg_controllen);
1749 if (msg_controllen < sizeof (struct target_cmsghdr))
1750 goto the_end;
1751 target_cmsg_addr = tswapal(target_msgh->msg_control);
1752 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1753 target_cmsg_start = target_cmsg;
1754 if (!target_cmsg)
1755 return -TARGET_EFAULT;
1756
1757 while (cmsg && target_cmsg) {
1758 void *data = CMSG_DATA(cmsg);
1759 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1760
1761 int len = tswapal(target_cmsg->cmsg_len)
1762 - sizeof(struct target_cmsghdr);
1763
1764 space += CMSG_SPACE(len);
1765 if (space > msgh->msg_controllen) {
1766 space -= CMSG_SPACE(len);
1767 /* This is a QEMU bug, since we allocated the payload
1768 * area ourselves (unlike overflow in host-to-target
1769 * conversion, which is just the guest giving us a buffer
1770 * that's too small). It can't happen for the payload types
1771 * we currently support; if it becomes an issue in future
1772 * we would need to improve our allocation strategy to
1773 * something more intelligent than "twice the size of the
1774 * target buffer we're reading from".
1775 */
1776 qemu_log_mask(LOG_UNIMP,
1777 ("Unsupported ancillary data %d/%d: "
1778 "unhandled msg size\n"),
1779 tswap32(target_cmsg->cmsg_level),
1780 tswap32(target_cmsg->cmsg_type));
1781 break;
1782 }
1783
1784 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1785 cmsg->cmsg_level = SOL_SOCKET;
1786 } else {
1787 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1788 }
1789 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1790 cmsg->cmsg_len = CMSG_LEN(len);
1791
1792 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1793 int *fd = (int *)data;
1794 int *target_fd = (int *)target_data;
1795 int i, numfds = len / sizeof(int);
1796
1797 for (i = 0; i < numfds; i++) {
1798 __get_user(fd[i], target_fd + i);
1799 }
1800 } else if (cmsg->cmsg_level == SOL_SOCKET
1801 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1802 struct ucred *cred = (struct ucred *)data;
1803 struct target_ucred *target_cred =
1804 (struct target_ucred *)target_data;
1805
1806 __get_user(cred->pid, &target_cred->pid);
1807 __get_user(cred->uid, &target_cred->uid);
1808 __get_user(cred->gid, &target_cred->gid);
1809 } else if (cmsg->cmsg_level == SOL_ALG) {
1810 uint32_t *dst = (uint32_t *)data;
1811
1812 memcpy(dst, target_data, len);
1813 /* fix endianess of first 32-bit word */
1814 if (len >= sizeof(uint32_t)) {
1815 *dst = tswap32(*dst);
1816 }
1817 } else {
1818 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1819 cmsg->cmsg_level, cmsg->cmsg_type);
1820 memcpy(data, target_data, len);
1821 }
1822
1823 cmsg = CMSG_NXTHDR(msgh, cmsg);
1824 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1825 target_cmsg_start);
1826 }
1827 unlock_user(target_cmsg, target_cmsg_addr, 0);
1828 the_end:
1829 msgh->msg_controllen = space;
1830 return 0;
1831 }
1832
1833 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1834 struct msghdr *msgh)
1835 {
1836 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1837 abi_long msg_controllen;
1838 abi_ulong target_cmsg_addr;
1839 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1840 socklen_t space = 0;
1841
1842 msg_controllen = tswapal(target_msgh->msg_controllen);
1843 if (msg_controllen < sizeof (struct target_cmsghdr))
1844 goto the_end;
1845 target_cmsg_addr = tswapal(target_msgh->msg_control);
1846 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1847 target_cmsg_start = target_cmsg;
1848 if (!target_cmsg)
1849 return -TARGET_EFAULT;
1850
1851 while (cmsg && target_cmsg) {
1852 void *data = CMSG_DATA(cmsg);
1853 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1854
1855 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1856 int tgt_len, tgt_space;
1857
1858 /* We never copy a half-header but may copy half-data;
1859 * this is Linux's behaviour in put_cmsg(). Note that
1860 * truncation here is a guest problem (which we report
1861 * to the guest via the CTRUNC bit), unlike truncation
1862 * in target_to_host_cmsg, which is a QEMU bug.
1863 */
1864 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1865 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1866 break;
1867 }
1868
1869 if (cmsg->cmsg_level == SOL_SOCKET) {
1870 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1871 } else {
1872 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1873 }
1874 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1875
1876 /* Payload types which need a different size of payload on
1877 * the target must adjust tgt_len here.
1878 */
1879 tgt_len = len;
1880 switch (cmsg->cmsg_level) {
1881 case SOL_SOCKET:
1882 switch (cmsg->cmsg_type) {
1883 case SO_TIMESTAMP:
1884 tgt_len = sizeof(struct target_timeval);
1885 break;
1886 default:
1887 break;
1888 }
1889 break;
1890 default:
1891 break;
1892 }
1893
1894 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1895 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1896 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1897 }
1898
1899 /* We must now copy-and-convert len bytes of payload
1900 * into tgt_len bytes of destination space. Bear in mind
1901 * that in both source and destination we may be dealing
1902 * with a truncated value!
1903 */
1904 switch (cmsg->cmsg_level) {
1905 case SOL_SOCKET:
1906 switch (cmsg->cmsg_type) {
1907 case SCM_RIGHTS:
1908 {
1909 int *fd = (int *)data;
1910 int *target_fd = (int *)target_data;
1911 int i, numfds = tgt_len / sizeof(int);
1912
1913 for (i = 0; i < numfds; i++) {
1914 __put_user(fd[i], target_fd + i);
1915 }
1916 break;
1917 }
1918 case SO_TIMESTAMP:
1919 {
1920 struct timeval *tv = (struct timeval *)data;
1921 struct target_timeval *target_tv =
1922 (struct target_timeval *)target_data;
1923
1924 if (len != sizeof(struct timeval) ||
1925 tgt_len != sizeof(struct target_timeval)) {
1926 goto unimplemented;
1927 }
1928
1929 /* copy struct timeval to target */
1930 __put_user(tv->tv_sec, &target_tv->tv_sec);
1931 __put_user(tv->tv_usec, &target_tv->tv_usec);
1932 break;
1933 }
1934 case SCM_CREDENTIALS:
1935 {
1936 struct ucred *cred = (struct ucred *)data;
1937 struct target_ucred *target_cred =
1938 (struct target_ucred *)target_data;
1939
1940 __put_user(cred->pid, &target_cred->pid);
1941 __put_user(cred->uid, &target_cred->uid);
1942 __put_user(cred->gid, &target_cred->gid);
1943 break;
1944 }
1945 default:
1946 goto unimplemented;
1947 }
1948 break;
1949
1950 case SOL_IP:
1951 switch (cmsg->cmsg_type) {
1952 case IP_TTL:
1953 {
1954 uint32_t *v = (uint32_t *)data;
1955 uint32_t *t_int = (uint32_t *)target_data;
1956
1957 if (len != sizeof(uint32_t) ||
1958 tgt_len != sizeof(uint32_t)) {
1959 goto unimplemented;
1960 }
1961 __put_user(*v, t_int);
1962 break;
1963 }
1964 case IP_RECVERR:
1965 {
1966 struct errhdr_t {
1967 struct sock_extended_err ee;
1968 struct sockaddr_in offender;
1969 };
1970 struct errhdr_t *errh = (struct errhdr_t *)data;
1971 struct errhdr_t *target_errh =
1972 (struct errhdr_t *)target_data;
1973
1974 if (len != sizeof(struct errhdr_t) ||
1975 tgt_len != sizeof(struct errhdr_t)) {
1976 goto unimplemented;
1977 }
1978 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1979 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1980 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1981 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1982 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1983 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1984 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1985 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1986 (void *) &errh->offender, sizeof(errh->offender));
1987 break;
1988 }
1989 default:
1990 goto unimplemented;
1991 }
1992 break;
1993
1994 case SOL_IPV6:
1995 switch (cmsg->cmsg_type) {
1996 case IPV6_HOPLIMIT:
1997 {
1998 uint32_t *v = (uint32_t *)data;
1999 uint32_t *t_int = (uint32_t *)target_data;
2000
2001 if (len != sizeof(uint32_t) ||
2002 tgt_len != sizeof(uint32_t)) {
2003 goto unimplemented;
2004 }
2005 __put_user(*v, t_int);
2006 break;
2007 }
2008 case IPV6_RECVERR:
2009 {
2010 struct errhdr6_t {
2011 struct sock_extended_err ee;
2012 struct sockaddr_in6 offender;
2013 };
2014 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2015 struct errhdr6_t *target_errh =
2016 (struct errhdr6_t *)target_data;
2017
2018 if (len != sizeof(struct errhdr6_t) ||
2019 tgt_len != sizeof(struct errhdr6_t)) {
2020 goto unimplemented;
2021 }
2022 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2023 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2024 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2025 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2026 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2027 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2028 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2029 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2030 (void *) &errh->offender, sizeof(errh->offender));
2031 break;
2032 }
2033 default:
2034 goto unimplemented;
2035 }
2036 break;
2037
2038 default:
2039 unimplemented:
2040 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2041 cmsg->cmsg_level, cmsg->cmsg_type);
2042 memcpy(target_data, data, MIN(len, tgt_len));
2043 if (tgt_len > len) {
2044 memset(target_data + len, 0, tgt_len - len);
2045 }
2046 }
2047
2048 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2049 tgt_space = TARGET_CMSG_SPACE(tgt_len);
2050 if (msg_controllen < tgt_space) {
2051 tgt_space = msg_controllen;
2052 }
2053 msg_controllen -= tgt_space;
2054 space += tgt_space;
2055 cmsg = CMSG_NXTHDR(msgh, cmsg);
2056 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2057 target_cmsg_start);
2058 }
2059 unlock_user(target_cmsg, target_cmsg_addr, space);
2060 the_end:
2061 target_msgh->msg_controllen = tswapal(space);
2062 return 0;
2063 }
2064
2065 /* do_setsockopt() Must return target values and target errnos. */
2066 static abi_long do_setsockopt(int sockfd, int level, int optname,
2067 abi_ulong optval_addr, socklen_t optlen)
2068 {
2069 abi_long ret;
2070 int val;
2071 struct ip_mreqn *ip_mreq;
2072 struct ip_mreq_source *ip_mreq_source;
2073
2074 switch(level) {
2075 case SOL_TCP:
2076 case SOL_UDP:
2077 /* TCP and UDP options all take an 'int' value. */
2078 if (optlen < sizeof(uint32_t))
2079 return -TARGET_EINVAL;
2080
2081 if (get_user_u32(val, optval_addr))
2082 return -TARGET_EFAULT;
2083 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2084 break;
2085 case SOL_IP:
2086 switch(optname) {
2087 case IP_TOS:
2088 case IP_TTL:
2089 case IP_HDRINCL:
2090 case IP_ROUTER_ALERT:
2091 case IP_RECVOPTS:
2092 case IP_RETOPTS:
2093 case IP_PKTINFO:
2094 case IP_MTU_DISCOVER:
2095 case IP_RECVERR:
2096 case IP_RECVTTL:
2097 case IP_RECVTOS:
2098 #ifdef IP_FREEBIND
2099 case IP_FREEBIND:
2100 #endif
2101 case IP_MULTICAST_TTL:
2102 case IP_MULTICAST_LOOP:
2103 val = 0;
2104 if (optlen >= sizeof(uint32_t)) {
2105 if (get_user_u32(val, optval_addr))
2106 return -TARGET_EFAULT;
2107 } else if (optlen >= 1) {
2108 if (get_user_u8(val, optval_addr))
2109 return -TARGET_EFAULT;
2110 }
2111 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2112 break;
2113 case IP_ADD_MEMBERSHIP:
2114 case IP_DROP_MEMBERSHIP:
2115 if (optlen < sizeof (struct target_ip_mreq) ||
2116 optlen > sizeof (struct target_ip_mreqn))
2117 return -TARGET_EINVAL;
2118
2119 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2120 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2121 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2122 break;
2123
2124 case IP_BLOCK_SOURCE:
2125 case IP_UNBLOCK_SOURCE:
2126 case IP_ADD_SOURCE_MEMBERSHIP:
2127 case IP_DROP_SOURCE_MEMBERSHIP:
2128 if (optlen != sizeof (struct target_ip_mreq_source))
2129 return -TARGET_EINVAL;
2130
2131 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2132 if (!ip_mreq_source) {
2133 return -TARGET_EFAULT;
2134 }
2135 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2136 unlock_user (ip_mreq_source, optval_addr, 0);
2137 break;
2138
2139 default:
2140 goto unimplemented;
2141 }
2142 break;
2143 case SOL_IPV6:
2144 switch (optname) {
2145 case IPV6_MTU_DISCOVER:
2146 case IPV6_MTU:
2147 case IPV6_V6ONLY:
2148 case IPV6_RECVPKTINFO:
2149 case IPV6_UNICAST_HOPS:
2150 case IPV6_MULTICAST_HOPS:
2151 case IPV6_MULTICAST_LOOP:
2152 case IPV6_RECVERR:
2153 case IPV6_RECVHOPLIMIT:
2154 case IPV6_2292HOPLIMIT:
2155 case IPV6_CHECKSUM:
2156 case IPV6_ADDRFORM:
2157 case IPV6_2292PKTINFO:
2158 case IPV6_RECVTCLASS:
2159 case IPV6_RECVRTHDR:
2160 case IPV6_2292RTHDR:
2161 case IPV6_RECVHOPOPTS:
2162 case IPV6_2292HOPOPTS:
2163 case IPV6_RECVDSTOPTS:
2164 case IPV6_2292DSTOPTS:
2165 case IPV6_TCLASS:
2166 case IPV6_ADDR_PREFERENCES:
2167 #ifdef IPV6_RECVPATHMTU
2168 case IPV6_RECVPATHMTU:
2169 #endif
2170 #ifdef IPV6_TRANSPARENT
2171 case IPV6_TRANSPARENT:
2172 #endif
2173 #ifdef IPV6_FREEBIND
2174 case IPV6_FREEBIND:
2175 #endif
2176 #ifdef IPV6_RECVORIGDSTADDR
2177 case IPV6_RECVORIGDSTADDR:
2178 #endif
2179 val = 0;
2180 if (optlen < sizeof(uint32_t)) {
2181 return -TARGET_EINVAL;
2182 }
2183 if (get_user_u32(val, optval_addr)) {
2184 return -TARGET_EFAULT;
2185 }
2186 ret = get_errno(setsockopt(sockfd, level, optname,
2187 &val, sizeof(val)));
2188 break;
2189 case IPV6_PKTINFO:
2190 {
2191 struct in6_pktinfo pki;
2192
2193 if (optlen < sizeof(pki)) {
2194 return -TARGET_EINVAL;
2195 }
2196
2197 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2198 return -TARGET_EFAULT;
2199 }
2200
2201 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2202
2203 ret = get_errno(setsockopt(sockfd, level, optname,
2204 &pki, sizeof(pki)));
2205 break;
2206 }
2207 case IPV6_ADD_MEMBERSHIP:
2208 case IPV6_DROP_MEMBERSHIP:
2209 {
2210 struct ipv6_mreq ipv6mreq;
2211
2212 if (optlen < sizeof(ipv6mreq)) {
2213 return -TARGET_EINVAL;
2214 }
2215
2216 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2217 return -TARGET_EFAULT;
2218 }
2219
2220 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2221
2222 ret = get_errno(setsockopt(sockfd, level, optname,
2223 &ipv6mreq, sizeof(ipv6mreq)));
2224 break;
2225 }
2226 default:
2227 goto unimplemented;
2228 }
2229 break;
2230 case SOL_ICMPV6:
2231 switch (optname) {
2232 case ICMPV6_FILTER:
2233 {
2234 struct icmp6_filter icmp6f;
2235
2236 if (optlen > sizeof(icmp6f)) {
2237 optlen = sizeof(icmp6f);
2238 }
2239
2240 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2241 return -TARGET_EFAULT;
2242 }
2243
2244 for (val = 0; val < 8; val++) {
2245 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2246 }
2247
2248 ret = get_errno(setsockopt(sockfd, level, optname,
2249 &icmp6f, optlen));
2250 break;
2251 }
2252 default:
2253 goto unimplemented;
2254 }
2255 break;
2256 case SOL_RAW:
2257 switch (optname) {
2258 case ICMP_FILTER:
2259 case IPV6_CHECKSUM:
2260 /* those take an u32 value */
2261 if (optlen < sizeof(uint32_t)) {
2262 return -TARGET_EINVAL;
2263 }
2264
2265 if (get_user_u32(val, optval_addr)) {
2266 return -TARGET_EFAULT;
2267 }
2268 ret = get_errno(setsockopt(sockfd, level, optname,
2269 &val, sizeof(val)));
2270 break;
2271
2272 default:
2273 goto unimplemented;
2274 }
2275 break;
2276 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2277 case SOL_ALG:
2278 switch (optname) {
2279 case ALG_SET_KEY:
2280 {
2281 char *alg_key = g_malloc(optlen);
2282
2283 if (!alg_key) {
2284 return -TARGET_ENOMEM;
2285 }
2286 if (copy_from_user(alg_key, optval_addr, optlen)) {
2287 g_free(alg_key);
2288 return -TARGET_EFAULT;
2289 }
2290 ret = get_errno(setsockopt(sockfd, level, optname,
2291 alg_key, optlen));
2292 g_free(alg_key);
2293 break;
2294 }
2295 case ALG_SET_AEAD_AUTHSIZE:
2296 {
2297 ret = get_errno(setsockopt(sockfd, level, optname,
2298 NULL, optlen));
2299 break;
2300 }
2301 default:
2302 goto unimplemented;
2303 }
2304 break;
2305 #endif
2306 case TARGET_SOL_SOCKET:
2307 switch (optname) {
2308 case TARGET_SO_RCVTIMEO:
2309 {
2310 struct timeval tv;
2311
2312 optname = SO_RCVTIMEO;
2313
2314 set_timeout:
2315 if (optlen != sizeof(struct target_timeval)) {
2316 return -TARGET_EINVAL;
2317 }
2318
2319 if (copy_from_user_timeval(&tv, optval_addr)) {
2320 return -TARGET_EFAULT;
2321 }
2322
2323 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2324 &tv, sizeof(tv)));
2325 return ret;
2326 }
2327 case TARGET_SO_SNDTIMEO:
2328 optname = SO_SNDTIMEO;
2329 goto set_timeout;
2330 case TARGET_SO_ATTACH_FILTER:
2331 {
2332 struct target_sock_fprog *tfprog;
2333 struct target_sock_filter *tfilter;
2334 struct sock_fprog fprog;
2335 struct sock_filter *filter;
2336 int i;
2337
2338 if (optlen != sizeof(*tfprog)) {
2339 return -TARGET_EINVAL;
2340 }
2341 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2342 return -TARGET_EFAULT;
2343 }
2344 if (!lock_user_struct(VERIFY_READ, tfilter,
2345 tswapal(tfprog->filter), 0)) {
2346 unlock_user_struct(tfprog, optval_addr, 1);
2347 return -TARGET_EFAULT;
2348 }
2349
2350 fprog.len = tswap16(tfprog->len);
2351 filter = g_try_new(struct sock_filter, fprog.len);
2352 if (filter == NULL) {
2353 unlock_user_struct(tfilter, tfprog->filter, 1);
2354 unlock_user_struct(tfprog, optval_addr, 1);
2355 return -TARGET_ENOMEM;
2356 }
2357 for (i = 0; i < fprog.len; i++) {
2358 filter[i].code = tswap16(tfilter[i].code);
2359 filter[i].jt = tfilter[i].jt;
2360 filter[i].jf = tfilter[i].jf;
2361 filter[i].k = tswap32(tfilter[i].k);
2362 }
2363 fprog.filter = filter;
2364
2365 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2366 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2367 g_free(filter);
2368
2369 unlock_user_struct(tfilter, tfprog->filter, 1);
2370 unlock_user_struct(tfprog, optval_addr, 1);
2371 return ret;
2372 }
2373 case TARGET_SO_BINDTODEVICE:
2374 {
2375 char *dev_ifname, *addr_ifname;
2376
2377 if (optlen > IFNAMSIZ - 1) {
2378 optlen = IFNAMSIZ - 1;
2379 }
2380 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2381 if (!dev_ifname) {
2382 return -TARGET_EFAULT;
2383 }
2384 optname = SO_BINDTODEVICE;
2385 addr_ifname = alloca(IFNAMSIZ);
2386 memcpy(addr_ifname, dev_ifname, optlen);
2387 addr_ifname[optlen] = 0;
2388 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2389 addr_ifname, optlen));
2390 unlock_user (dev_ifname, optval_addr, 0);
2391 return ret;
2392 }
2393 case TARGET_SO_LINGER:
2394 {
2395 struct linger lg;
2396 struct target_linger *tlg;
2397
2398 if (optlen != sizeof(struct target_linger)) {
2399 return -TARGET_EINVAL;
2400 }
2401 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2402 return -TARGET_EFAULT;
2403 }
2404 __get_user(lg.l_onoff, &tlg->l_onoff);
2405 __get_user(lg.l_linger, &tlg->l_linger);
2406 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2407 &lg, sizeof(lg)));
2408 unlock_user_struct(tlg, optval_addr, 0);
2409 return ret;
2410 }
2411 /* Options with 'int' argument. */
2412 case TARGET_SO_DEBUG:
2413 optname = SO_DEBUG;
2414 break;
2415 case TARGET_SO_REUSEADDR:
2416 optname = SO_REUSEADDR;
2417 break;
2418 #ifdef SO_REUSEPORT
2419 case TARGET_SO_REUSEPORT:
2420 optname = SO_REUSEPORT;
2421 break;
2422 #endif
2423 case TARGET_SO_TYPE:
2424 optname = SO_TYPE;
2425 break;
2426 case TARGET_SO_ERROR:
2427 optname = SO_ERROR;
2428 break;
2429 case TARGET_SO_DONTROUTE:
2430 optname = SO_DONTROUTE;
2431 break;
2432 case TARGET_SO_BROADCAST:
2433 optname = SO_BROADCAST;
2434 break;
2435 case TARGET_SO_SNDBUF:
2436 optname = SO_SNDBUF;
2437 break;
2438 case TARGET_SO_SNDBUFFORCE:
2439 optname = SO_SNDBUFFORCE;
2440 break;
2441 case TARGET_SO_RCVBUF:
2442 optname = SO_RCVBUF;
2443 break;
2444 case TARGET_SO_RCVBUFFORCE:
2445 optname = SO_RCVBUFFORCE;
2446 break;
2447 case TARGET_SO_KEEPALIVE:
2448 optname = SO_KEEPALIVE;
2449 break;
2450 case TARGET_SO_OOBINLINE:
2451 optname = SO_OOBINLINE;
2452 break;
2453 case TARGET_SO_NO_CHECK:
2454 optname = SO_NO_CHECK;
2455 break;
2456 case TARGET_SO_PRIORITY:
2457 optname = SO_PRIORITY;
2458 break;
2459 #ifdef SO_BSDCOMPAT
2460 case TARGET_SO_BSDCOMPAT:
2461 optname = SO_BSDCOMPAT;
2462 break;
2463 #endif
2464 case TARGET_SO_PASSCRED:
2465 optname = SO_PASSCRED;
2466 break;
2467 case TARGET_SO_PASSSEC:
2468 optname = SO_PASSSEC;
2469 break;
2470 case TARGET_SO_TIMESTAMP:
2471 optname = SO_TIMESTAMP;
2472 break;
2473 case TARGET_SO_RCVLOWAT:
2474 optname = SO_RCVLOWAT;
2475 break;
2476 default:
2477 goto unimplemented;
2478 }
2479 if (optlen < sizeof(uint32_t))
2480 return -TARGET_EINVAL;
2481
2482 if (get_user_u32(val, optval_addr))
2483 return -TARGET_EFAULT;
2484 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2485 break;
2486 #ifdef SOL_NETLINK
2487 case SOL_NETLINK:
2488 switch (optname) {
2489 case NETLINK_PKTINFO:
2490 case NETLINK_ADD_MEMBERSHIP:
2491 case NETLINK_DROP_MEMBERSHIP:
2492 case NETLINK_BROADCAST_ERROR:
2493 case NETLINK_NO_ENOBUFS:
2494 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2495 case NETLINK_LISTEN_ALL_NSID:
2496 case NETLINK_CAP_ACK:
2497 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2498 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2499 case NETLINK_EXT_ACK:
2500 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2501 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2502 case NETLINK_GET_STRICT_CHK:
2503 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2504 break;
2505 default:
2506 goto unimplemented;
2507 }
2508 val = 0;
2509 if (optlen < sizeof(uint32_t)) {
2510 return -TARGET_EINVAL;
2511 }
2512 if (get_user_u32(val, optval_addr)) {
2513 return -TARGET_EFAULT;
2514 }
2515 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2516 sizeof(val)));
2517 break;
2518 #endif /* SOL_NETLINK */
2519 default:
2520 unimplemented:
2521 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2522 level, optname);
2523 ret = -TARGET_ENOPROTOOPT;
2524 }
2525 return ret;
2526 }
2527
2528 /* do_getsockopt() Must return target values and target errnos. */
2529 static abi_long do_getsockopt(int sockfd, int level, int optname,
2530 abi_ulong optval_addr, abi_ulong optlen)
2531 {
2532 abi_long ret;
2533 int len, val;
2534 socklen_t lv;
2535
2536 switch(level) {
2537 case TARGET_SOL_SOCKET:
2538 level = SOL_SOCKET;
2539 switch (optname) {
2540 /* These don't just return a single integer */
2541 case TARGET_SO_PEERNAME:
2542 goto unimplemented;
2543 case TARGET_SO_RCVTIMEO: {
2544 struct timeval tv;
2545 socklen_t tvlen;
2546
2547 optname = SO_RCVTIMEO;
2548
2549 get_timeout:
2550 if (get_user_u32(len, optlen)) {
2551 return -TARGET_EFAULT;
2552 }
2553 if (len < 0) {
2554 return -TARGET_EINVAL;
2555 }
2556
2557 tvlen = sizeof(tv);
2558 ret = get_errno(getsockopt(sockfd, level, optname,
2559 &tv, &tvlen));
2560 if (ret < 0) {
2561 return ret;
2562 }
2563 if (len > sizeof(struct target_timeval)) {
2564 len = sizeof(struct target_timeval);
2565 }
2566 if (copy_to_user_timeval(optval_addr, &tv)) {
2567 return -TARGET_EFAULT;
2568 }
2569 if (put_user_u32(len, optlen)) {
2570 return -TARGET_EFAULT;
2571 }
2572 break;
2573 }
2574 case TARGET_SO_SNDTIMEO:
2575 optname = SO_SNDTIMEO;
2576 goto get_timeout;
2577 case TARGET_SO_PEERCRED: {
2578 struct ucred cr;
2579 socklen_t crlen;
2580 struct target_ucred *tcr;
2581
2582 if (get_user_u32(len, optlen)) {
2583 return -TARGET_EFAULT;
2584 }
2585 if (len < 0) {
2586 return -TARGET_EINVAL;
2587 }
2588
2589 crlen = sizeof(cr);
2590 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2591 &cr, &crlen));
2592 if (ret < 0) {
2593 return ret;
2594 }
2595 if (len > crlen) {
2596 len = crlen;
2597 }
2598 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2599 return -TARGET_EFAULT;
2600 }
2601 __put_user(cr.pid, &tcr->pid);
2602 __put_user(cr.uid, &tcr->uid);
2603 __put_user(cr.gid, &tcr->gid);
2604 unlock_user_struct(tcr, optval_addr, 1);
2605 if (put_user_u32(len, optlen)) {
2606 return -TARGET_EFAULT;
2607 }
2608 break;
2609 }
2610 case TARGET_SO_PEERSEC: {
2611 char *name;
2612
2613 if (get_user_u32(len, optlen)) {
2614 return -TARGET_EFAULT;
2615 }
2616 if (len < 0) {
2617 return -TARGET_EINVAL;
2618 }
2619 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2620 if (!name) {
2621 return -TARGET_EFAULT;
2622 }
2623 lv = len;
2624 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2625 name, &lv));
2626 if (put_user_u32(lv, optlen)) {
2627 ret = -TARGET_EFAULT;
2628 }
2629 unlock_user(name, optval_addr, lv);
2630 break;
2631 }
2632 case TARGET_SO_LINGER:
2633 {
2634 struct linger lg;
2635 socklen_t lglen;
2636 struct target_linger *tlg;
2637
2638 if (get_user_u32(len, optlen)) {
2639 return -TARGET_EFAULT;
2640 }
2641 if (len < 0) {
2642 return -TARGET_EINVAL;
2643 }
2644
2645 lglen = sizeof(lg);
2646 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2647 &lg, &lglen));
2648 if (ret < 0) {
2649 return ret;
2650 }
2651 if (len > lglen) {
2652 len = lglen;
2653 }
2654 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2655 return -TARGET_EFAULT;
2656 }
2657 __put_user(lg.l_onoff, &tlg->l_onoff);
2658 __put_user(lg.l_linger, &tlg->l_linger);
2659 unlock_user_struct(tlg, optval_addr, 1);
2660 if (put_user_u32(len, optlen)) {
2661 return -TARGET_EFAULT;
2662 }
2663 break;
2664 }
2665 /* Options with 'int' argument. */
2666 case TARGET_SO_DEBUG:
2667 optname = SO_DEBUG;
2668 goto int_case;
2669 case TARGET_SO_REUSEADDR:
2670 optname = SO_REUSEADDR;
2671 goto int_case;
2672 #ifdef SO_REUSEPORT
2673 case TARGET_SO_REUSEPORT:
2674 optname = SO_REUSEPORT;
2675 goto int_case;
2676 #endif
2677 case TARGET_SO_TYPE:
2678 optname = SO_TYPE;
2679 goto int_case;
2680 case TARGET_SO_ERROR:
2681 optname = SO_ERROR;
2682 goto int_case;
2683 case TARGET_SO_DONTROUTE:
2684 optname = SO_DONTROUTE;
2685 goto int_case;
2686 case TARGET_SO_BROADCAST:
2687 optname = SO_BROADCAST;
2688 goto int_case;
2689 case TARGET_SO_SNDBUF:
2690 optname = SO_SNDBUF;
2691 goto int_case;
2692 case TARGET_SO_RCVBUF:
2693 optname = SO_RCVBUF;
2694 goto int_case;
2695 case TARGET_SO_KEEPALIVE:
2696 optname = SO_KEEPALIVE;
2697 goto int_case;
2698 case TARGET_SO_OOBINLINE:
2699 optname = SO_OOBINLINE;
2700 goto int_case;
2701 case TARGET_SO_NO_CHECK:
2702 optname = SO_NO_CHECK;
2703 goto int_case;
2704 case TARGET_SO_PRIORITY:
2705 optname = SO_PRIORITY;
2706 goto int_case;
2707 #ifdef SO_BSDCOMPAT
2708 case TARGET_SO_BSDCOMPAT:
2709 optname = SO_BSDCOMPAT;
2710 goto int_case;
2711 #endif
2712 case TARGET_SO_PASSCRED:
2713 optname = SO_PASSCRED;
2714 goto int_case;
2715 case TARGET_SO_TIMESTAMP:
2716 optname = SO_TIMESTAMP;
2717 goto int_case;
2718 case TARGET_SO_RCVLOWAT:
2719 optname = SO_RCVLOWAT;
2720 goto int_case;
2721 case TARGET_SO_ACCEPTCONN:
2722 optname = SO_ACCEPTCONN;
2723 goto int_case;
2724 case TARGET_SO_PROTOCOL:
2725 optname = SO_PROTOCOL;
2726 goto int_case;
2727 case TARGET_SO_DOMAIN:
2728 optname = SO_DOMAIN;
2729 goto int_case;
2730 default:
2731 goto int_case;
2732 }
2733 break;
2734 case SOL_TCP:
2735 case SOL_UDP:
2736 /* TCP and UDP options all take an 'int' value. */
2737 int_case:
2738 if (get_user_u32(len, optlen))
2739 return -TARGET_EFAULT;
2740 if (len < 0)
2741 return -TARGET_EINVAL;
2742 lv = sizeof(lv);
2743 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2744 if (ret < 0)
2745 return ret;
2746 switch (optname) {
2747 case SO_TYPE:
2748 val = host_to_target_sock_type(val);
2749 break;
2750 case SO_ERROR:
2751 val = host_to_target_errno(val);
2752 break;
2753 }
2754 if (len > lv)
2755 len = lv;
2756 if (len == 4) {
2757 if (put_user_u32(val, optval_addr))
2758 return -TARGET_EFAULT;
2759 } else {
2760 if (put_user_u8(val, optval_addr))
2761 return -TARGET_EFAULT;
2762 }
2763 if (put_user_u32(len, optlen))
2764 return -TARGET_EFAULT;
2765 break;
2766 case SOL_IP:
2767 switch(optname) {
2768 case IP_TOS:
2769 case IP_TTL:
2770 case IP_HDRINCL:
2771 case IP_ROUTER_ALERT:
2772 case IP_RECVOPTS:
2773 case IP_RETOPTS:
2774 case IP_PKTINFO:
2775 case IP_MTU_DISCOVER:
2776 case IP_RECVERR:
2777 case IP_RECVTOS:
2778 #ifdef IP_FREEBIND
2779 case IP_FREEBIND:
2780 #endif
2781 case IP_MULTICAST_TTL:
2782 case IP_MULTICAST_LOOP:
2783 if (get_user_u32(len, optlen))
2784 return -TARGET_EFAULT;
2785 if (len < 0)
2786 return -TARGET_EINVAL;
2787 lv = sizeof(lv);
2788 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2789 if (ret < 0)
2790 return ret;
2791 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2792 len = 1;
2793 if (put_user_u32(len, optlen)
2794 || put_user_u8(val, optval_addr))
2795 return -TARGET_EFAULT;
2796 } else {
2797 if (len > sizeof(int))
2798 len = sizeof(int);
2799 if (put_user_u32(len, optlen)
2800 || put_user_u32(val, optval_addr))
2801 return -TARGET_EFAULT;
2802 }
2803 break;
2804 default:
2805 ret = -TARGET_ENOPROTOOPT;
2806 break;
2807 }
2808 break;
2809 case SOL_IPV6:
2810 switch (optname) {
2811 case IPV6_MTU_DISCOVER:
2812 case IPV6_MTU:
2813 case IPV6_V6ONLY:
2814 case IPV6_RECVPKTINFO:
2815 case IPV6_UNICAST_HOPS:
2816 case IPV6_MULTICAST_HOPS:
2817 case IPV6_MULTICAST_LOOP:
2818 case IPV6_RECVERR:
2819 case IPV6_RECVHOPLIMIT:
2820 case IPV6_2292HOPLIMIT:
2821 case IPV6_CHECKSUM:
2822 case IPV6_ADDRFORM:
2823 case IPV6_2292PKTINFO:
2824 case IPV6_RECVTCLASS:
2825 case IPV6_RECVRTHDR:
2826 case IPV6_2292RTHDR:
2827 case IPV6_RECVHOPOPTS:
2828 case IPV6_2292HOPOPTS:
2829 case IPV6_RECVDSTOPTS:
2830 case IPV6_2292DSTOPTS:
2831 case IPV6_TCLASS:
2832 case IPV6_ADDR_PREFERENCES:
2833 #ifdef IPV6_RECVPATHMTU
2834 case IPV6_RECVPATHMTU:
2835 #endif
2836 #ifdef IPV6_TRANSPARENT
2837 case IPV6_TRANSPARENT:
2838 #endif
2839 #ifdef IPV6_FREEBIND
2840 case IPV6_FREEBIND:
2841 #endif
2842 #ifdef IPV6_RECVORIGDSTADDR
2843 case IPV6_RECVORIGDSTADDR:
2844 #endif
2845 if (get_user_u32(len, optlen))
2846 return -TARGET_EFAULT;
2847 if (len < 0)
2848 return -TARGET_EINVAL;
2849 lv = sizeof(lv);
2850 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2851 if (ret < 0)
2852 return ret;
2853 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2854 len = 1;
2855 if (put_user_u32(len, optlen)
2856 || put_user_u8(val, optval_addr))
2857 return -TARGET_EFAULT;
2858 } else {
2859 if (len > sizeof(int))
2860 len = sizeof(int);
2861 if (put_user_u32(len, optlen)
2862 || put_user_u32(val, optval_addr))
2863 return -TARGET_EFAULT;
2864 }
2865 break;
2866 default:
2867 ret = -TARGET_ENOPROTOOPT;
2868 break;
2869 }
2870 break;
2871 #ifdef SOL_NETLINK
2872 case SOL_NETLINK:
2873 switch (optname) {
2874 case NETLINK_PKTINFO:
2875 case NETLINK_BROADCAST_ERROR:
2876 case NETLINK_NO_ENOBUFS:
2877 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2878 case NETLINK_LISTEN_ALL_NSID:
2879 case NETLINK_CAP_ACK:
2880 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2881 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2882 case NETLINK_EXT_ACK:
2883 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2884 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2885 case NETLINK_GET_STRICT_CHK:
2886 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2887 if (get_user_u32(len, optlen)) {
2888 return -TARGET_EFAULT;
2889 }
2890 if (len != sizeof(val)) {
2891 return -TARGET_EINVAL;
2892 }
2893 lv = len;
2894 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2895 if (ret < 0) {
2896 return ret;
2897 }
2898 if (put_user_u32(lv, optlen)
2899 || put_user_u32(val, optval_addr)) {
2900 return -TARGET_EFAULT;
2901 }
2902 break;
2903 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2904 case NETLINK_LIST_MEMBERSHIPS:
2905 {
2906 uint32_t *results;
2907 int i;
2908 if (get_user_u32(len, optlen)) {
2909 return -TARGET_EFAULT;
2910 }
2911 if (len < 0) {
2912 return -TARGET_EINVAL;
2913 }
2914 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2915 if (!results && len > 0) {
2916 return -TARGET_EFAULT;
2917 }
2918 lv = len;
2919 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2920 if (ret < 0) {
2921 unlock_user(results, optval_addr, 0);
2922 return ret;
2923 }
2924 /* swap host endianess to target endianess. */
2925 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2926 results[i] = tswap32(results[i]);
2927 }
2928 if (put_user_u32(lv, optlen)) {
2929 return -TARGET_EFAULT;
2930 }
2931 unlock_user(results, optval_addr, 0);
2932 break;
2933 }
2934 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2935 default:
2936 goto unimplemented;
2937 }
2938 break;
2939 #endif /* SOL_NETLINK */
2940 default:
2941 unimplemented:
2942 qemu_log_mask(LOG_UNIMP,
2943 "getsockopt level=%d optname=%d not yet supported\n",
2944 level, optname);
2945 ret = -TARGET_EOPNOTSUPP;
2946 break;
2947 }
2948 return ret;
2949 }
2950
2951 /* Convert target low/high pair representing file offset into the host
2952 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2953 * as the kernel doesn't handle them either.
2954 */
2955 static void target_to_host_low_high(abi_ulong tlow,
2956 abi_ulong thigh,
2957 unsigned long *hlow,
2958 unsigned long *hhigh)
2959 {
2960 uint64_t off = tlow |
2961 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2962 TARGET_LONG_BITS / 2;
2963
2964 *hlow = off;
2965 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2966 }
2967
2968 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2969 abi_ulong count, int copy)
2970 {
2971 struct target_iovec *target_vec;
2972 struct iovec *vec;
2973 abi_ulong total_len, max_len;
2974 int i;
2975 int err = 0;
2976 bool bad_address = false;
2977
2978 if (count == 0) {
2979 errno = 0;
2980 return NULL;
2981 }
2982 if (count > IOV_MAX) {
2983 errno = EINVAL;
2984 return NULL;
2985 }
2986
2987 vec = g_try_new0(struct iovec, count);
2988 if (vec == NULL) {
2989 errno = ENOMEM;
2990 return NULL;
2991 }
2992
2993 target_vec = lock_user(VERIFY_READ, target_addr,
2994 count * sizeof(struct target_iovec), 1);
2995 if (target_vec == NULL) {
2996 err = EFAULT;
2997 goto fail2;
2998 }
2999
3000 /* ??? If host page size > target page size, this will result in a
3001 value larger than what we can actually support. */
3002 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3003 total_len = 0;
3004
3005 for (i = 0; i < count; i++) {
3006 abi_ulong base = tswapal(target_vec[i].iov_base);
3007 abi_long len = tswapal(target_vec[i].iov_len);
3008
3009 if (len < 0) {
3010 err = EINVAL;
3011 goto fail;
3012 } else if (len == 0) {
3013 /* Zero length pointer is ignored. */
3014 vec[i].iov_base = 0;
3015 } else {
3016 vec[i].iov_base = lock_user(type, base, len, copy);
3017 /* If the first buffer pointer is bad, this is a fault. But
3018 * subsequent bad buffers will result in a partial write; this
3019 * is realized by filling the vector with null pointers and
3020 * zero lengths. */
3021 if (!vec[i].iov_base) {
3022 if (i == 0) {
3023 err = EFAULT;
3024 goto fail;
3025 } else {
3026 bad_address = true;
3027 }
3028 }
3029 if (bad_address) {
3030 len = 0;
3031 }
3032 if (len > max_len - total_len) {
3033 len = max_len - total_len;
3034 }
3035 }
3036 vec[i].iov_len = len;
3037 total_len += len;
3038 }
3039
3040 unlock_user(target_vec, target_addr, 0);
3041 return vec;
3042
3043 fail:
3044 while (--i >= 0) {
3045 if (tswapal(target_vec[i].iov_len) > 0) {
3046 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3047 }
3048 }
3049 unlock_user(target_vec, target_addr, 0);
3050 fail2:
3051 g_free(vec);
3052 errno = err;
3053 return NULL;
3054 }
3055
3056 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3057 abi_ulong count, int copy)
3058 {
3059 struct target_iovec *target_vec;
3060 int i;
3061
3062 target_vec = lock_user(VERIFY_READ, target_addr,
3063 count * sizeof(struct target_iovec), 1);
3064 if (target_vec) {
3065 for (i = 0; i < count; i++) {
3066 abi_ulong base = tswapal(target_vec[i].iov_base);
3067 abi_long len = tswapal(target_vec[i].iov_len);
3068 if (len < 0) {
3069 break;
3070 }
3071 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3072 }
3073 unlock_user(target_vec, target_addr, 0);
3074 }
3075
3076 g_free(vec);
3077 }
3078
3079 static inline int target_to_host_sock_type(int *type)
3080 {
3081 int host_type = 0;
3082 int target_type = *type;
3083
3084 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3085 case TARGET_SOCK_DGRAM:
3086 host_type = SOCK_DGRAM;
3087 break;
3088 case TARGET_SOCK_STREAM:
3089 host_type = SOCK_STREAM;
3090 break;
3091 default:
3092 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3093 break;
3094 }
3095 if (target_type & TARGET_SOCK_CLOEXEC) {
3096 #if defined(SOCK_CLOEXEC)
3097 host_type |= SOCK_CLOEXEC;
3098 #else
3099 return -TARGET_EINVAL;
3100 #endif
3101 }
3102 if (target_type & TARGET_SOCK_NONBLOCK) {
3103 #if defined(SOCK_NONBLOCK)
3104 host_type |= SOCK_NONBLOCK;
3105 #elif !defined(O_NONBLOCK)
3106 return -TARGET_EINVAL;
3107 #endif
3108 }
3109 *type = host_type;
3110 return 0;
3111 }
3112
3113 /* Try to emulate socket type flags after socket creation. */
3114 static int sock_flags_fixup(int fd, int target_type)
3115 {
3116 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3117 if (target_type & TARGET_SOCK_NONBLOCK) {
3118 int flags = fcntl(fd, F_GETFL);
3119 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3120 close(fd);
3121 return -TARGET_EINVAL;
3122 }
3123 }
3124 #endif
3125 return fd;
3126 }
3127
3128 /* do_socket() Must return target values and target errnos. */
3129 static abi_long do_socket(int domain, int type, int protocol)
3130 {
3131 int target_type = type;
3132 int ret;
3133
3134 ret = target_to_host_sock_type(&type);
3135 if (ret) {
3136 return ret;
3137 }
3138
3139 if (domain == PF_NETLINK && !(
3140 #ifdef CONFIG_RTNETLINK
3141 protocol == NETLINK_ROUTE ||
3142 #endif
3143 protocol == NETLINK_KOBJECT_UEVENT ||
3144 protocol == NETLINK_AUDIT)) {
3145 return -TARGET_EPROTONOSUPPORT;
3146 }
3147
3148 if (domain == AF_PACKET ||
3149 (domain == AF_INET && type == SOCK_PACKET)) {
3150 protocol = tswap16(protocol);
3151 }
3152
3153 ret = get_errno(socket(domain, type, protocol));
3154 if (ret >= 0) {
3155 ret = sock_flags_fixup(ret, target_type);
3156 if (type == SOCK_PACKET) {
3157 /* Manage an obsolete case :
3158 * if socket type is SOCK_PACKET, bind by name
3159 */
3160 fd_trans_register(ret, &target_packet_trans);
3161 } else if (domain == PF_NETLINK) {
3162 switch (protocol) {
3163 #ifdef CONFIG_RTNETLINK
3164 case NETLINK_ROUTE:
3165 fd_trans_register(ret, &target_netlink_route_trans);
3166 break;
3167 #endif
3168 case NETLINK_KOBJECT_UEVENT:
3169 /* nothing to do: messages are strings */
3170 break;
3171 case NETLINK_AUDIT:
3172 fd_trans_register(ret, &target_netlink_audit_trans);
3173 break;
3174 default:
3175 g_assert_not_reached();
3176 }
3177 }
3178 }
3179 return ret;
3180 }
3181
3182 /* do_bind() Must return target values and target errnos. */
3183 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3184 socklen_t addrlen)
3185 {
3186 void *addr;
3187 abi_long ret;
3188
3189 if ((int)addrlen < 0) {
3190 return -TARGET_EINVAL;
3191 }
3192
3193 addr = alloca(addrlen+1);
3194
3195 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3196 if (ret)
3197 return ret;
3198
3199 return get_errno(bind(sockfd, addr, addrlen));
3200 }
3201
3202 /* do_connect() Must return target values and target errnos. */
3203 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3204 socklen_t addrlen)
3205 {
3206 void *addr;
3207 abi_long ret;
3208
3209 if ((int)addrlen < 0) {
3210 return -TARGET_EINVAL;
3211 }
3212
3213 addr = alloca(addrlen+1);
3214
3215 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3216 if (ret)
3217 return ret;
3218
3219 return get_errno(safe_connect(sockfd, addr, addrlen));
3220 }
3221
3222 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3223 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3224 int flags, int send)
3225 {
3226 abi_long ret, len;
3227 struct msghdr msg;
3228 abi_ulong count;
3229 struct iovec *vec;
3230 abi_ulong target_vec;
3231
3232 if (msgp->msg_name) {
3233 msg.msg_namelen = tswap32(msgp->msg_namelen);
3234 msg.msg_name = alloca(msg.msg_namelen+1);
3235 ret = target_to_host_sockaddr(fd, msg.msg_name,
3236 tswapal(msgp->msg_name),
3237 msg.msg_namelen);
3238 if (ret == -TARGET_EFAULT) {
3239 /* For connected sockets msg_name and msg_namelen must
3240 * be ignored, so returning EFAULT immediately is wrong.
3241 * Instead, pass a bad msg_name to the host kernel, and
3242 * let it decide whether to return EFAULT or not.
3243 */
3244 msg.msg_name = (void *)-1;
3245 } else if (ret) {
3246 goto out2;
3247 }
3248 } else {
3249 msg.msg_name = NULL;
3250 msg.msg_namelen = 0;
3251 }
3252 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3253 msg.msg_control = alloca(msg.msg_controllen);
3254 memset(msg.msg_control, 0, msg.msg_controllen);
3255
3256 msg.msg_flags = tswap32(msgp->msg_flags);
3257
3258 count = tswapal(msgp->msg_iovlen);
3259 target_vec = tswapal(msgp->msg_iov);
3260
3261 if (count > IOV_MAX) {
3262 /* sendrcvmsg returns a different errno for this condition than
3263 * readv/writev, so we must catch it here before lock_iovec() does.
3264 */
3265 ret = -TARGET_EMSGSIZE;
3266 goto out2;
3267 }
3268
3269 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3270 target_vec, count, send);
3271 if (vec == NULL) {
3272 ret = -host_to_target_errno(errno);
3273 /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3274 if (!send || ret) {
3275 goto out2;
3276 }
3277 }
3278 msg.msg_iovlen = count;
3279 msg.msg_iov = vec;
3280
3281 if (send) {
3282 if (fd_trans_target_to_host_data(fd)) {
3283 void *host_msg;
3284
3285 host_msg = g_malloc(msg.msg_iov->iov_len);
3286 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3287 ret = fd_trans_target_to_host_data(fd)(host_msg,
3288 msg.msg_iov->iov_len);
3289 if (ret >= 0) {
3290 msg.msg_iov->iov_base = host_msg;
3291 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3292 }
3293 g_free(host_msg);
3294 } else {
3295 ret = target_to_host_cmsg(&msg, msgp);
3296 if (ret == 0) {
3297 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3298 }
3299 }
3300 } else {
3301 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3302 if (!is_error(ret)) {
3303 len = ret;
3304 if (fd_trans_host_to_target_data(fd)) {
3305 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3306 MIN(msg.msg_iov->iov_len, len));
3307 }
3308 if (!is_error(ret)) {
3309 ret = host_to_target_cmsg(msgp, &msg);
3310 }
3311 if (!is_error(ret)) {
3312 msgp->msg_namelen = tswap32(msg.msg_namelen);
3313 msgp->msg_flags = tswap32(msg.msg_flags);
3314 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3315 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3316 msg.msg_name, msg.msg_namelen);
3317 if (ret) {
3318 goto out;
3319 }
3320 }
3321
3322 ret = len;
3323 }
3324 }
3325 }
3326
3327 out:
3328 if (vec) {
3329 unlock_iovec(vec, target_vec, count, !send);
3330 }
3331 out2:
3332 return ret;
3333 }
3334
3335 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3336 int flags, int send)
3337 {
3338 abi_long ret;
3339 struct target_msghdr *msgp;
3340
3341 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3342 msgp,
3343 target_msg,
3344 send ? 1 : 0)) {
3345 return -TARGET_EFAULT;
3346 }
3347 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3348 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3349 return ret;
3350 }
3351
3352 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3353 * so it might not have this *mmsg-specific flag either.
3354 */
3355 #ifndef MSG_WAITFORONE
3356 #define MSG_WAITFORONE 0x10000
3357 #endif
3358
3359 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3360 unsigned int vlen, unsigned int flags,
3361 int send)
3362 {
3363 struct target_mmsghdr *mmsgp;
3364 abi_long ret = 0;
3365 int i;
3366
3367 if (vlen > UIO_MAXIOV) {
3368 vlen = UIO_MAXIOV;
3369 }
3370
3371 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3372 if (!mmsgp) {
3373 return -TARGET_EFAULT;
3374 }
3375
3376 for (i = 0; i < vlen; i++) {
3377 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3378 if (is_error(ret)) {
3379 break;
3380 }
3381 mmsgp[i].msg_len = tswap32(ret);
3382 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3383 if (flags & MSG_WAITFORONE) {
3384 flags |= MSG_DONTWAIT;
3385 }
3386 }
3387
3388 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3389
3390 /* Return number of datagrams sent if we sent any at all;
3391 * otherwise return the error.
3392 */
3393 if (i) {
3394 return i;
3395 }
3396 return ret;
3397 }
3398
3399 /* do_accept4() Must return target values and target errnos. */
3400 static abi_long do_accept4(int fd, abi_ulong target_addr,
3401 abi_ulong target_addrlen_addr, int flags)
3402 {
3403 socklen_t addrlen, ret_addrlen;
3404 void *addr;
3405 abi_long ret;
3406 int host_flags;
3407
3408 if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3409 return -TARGET_EINVAL;
3410 }
3411
3412 host_flags = 0;
3413 if (flags & TARGET_SOCK_NONBLOCK) {
3414 host_flags |= SOCK_NONBLOCK;
3415 }
3416 if (flags & TARGET_SOCK_CLOEXEC) {
3417 host_flags |= SOCK_CLOEXEC;
3418 }
3419
3420 if (target_addr == 0) {
3421 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3422 }
3423
3424 /* linux returns EFAULT if addrlen pointer is invalid */
3425 if (get_user_u32(addrlen, target_addrlen_addr))
3426 return -TARGET_EFAULT;
3427
3428 if ((int)addrlen < 0) {
3429 return -TARGET_EINVAL;
3430 }
3431
3432 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3433 return -TARGET_EFAULT;
3434 }
3435
3436 addr = alloca(addrlen);
3437
3438 ret_addrlen = addrlen;
3439 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3440 if (!is_error(ret)) {
3441 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3442 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3443 ret = -TARGET_EFAULT;
3444 }
3445 }
3446 return ret;
3447 }
3448
3449 /* do_getpeername() Must return target values and target errnos. */
3450 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3451 abi_ulong target_addrlen_addr)
3452 {
3453 socklen_t addrlen, ret_addrlen;
3454 void *addr;
3455 abi_long ret;
3456
3457 if (get_user_u32(addrlen, target_addrlen_addr))
3458 return -TARGET_EFAULT;
3459
3460 if ((int)addrlen < 0) {
3461 return -TARGET_EINVAL;
3462 }
3463
3464 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3465 return -TARGET_EFAULT;
3466 }
3467
3468 addr = alloca(addrlen);
3469
3470 ret_addrlen = addrlen;
3471 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3472 if (!is_error(ret)) {
3473 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3474 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3475 ret = -TARGET_EFAULT;
3476 }
3477 }
3478 return ret;
3479 }
3480
3481 /* do_getsockname() Must return target values and target errnos. */
3482 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3483 abi_ulong target_addrlen_addr)
3484 {
3485 socklen_t addrlen, ret_addrlen;
3486 void *addr;
3487 abi_long ret;
3488
3489 if (get_user_u32(addrlen, target_addrlen_addr))
3490 return -TARGET_EFAULT;
3491
3492 if ((int)addrlen < 0) {
3493 return -TARGET_EINVAL;
3494 }
3495
3496 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3497 return -TARGET_EFAULT;
3498 }
3499
3500 addr = alloca(addrlen);
3501
3502 ret_addrlen = addrlen;
3503 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3504 if (!is_error(ret)) {
3505 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3506 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3507 ret = -TARGET_EFAULT;
3508 }
3509 }
3510 return ret;
3511 }
3512
3513 /* do_socketpair() Must return target values and target errnos. */
3514 static abi_long do_socketpair(int domain, int type, int protocol,
3515 abi_ulong target_tab_addr)
3516 {
3517 int tab[2];
3518 abi_long ret;
3519
3520 target_to_host_sock_type(&type);
3521
3522 ret = get_errno(socketpair(domain, type, protocol, tab));
3523 if (!is_error(ret)) {
3524 if (put_user_s32(tab[0], target_tab_addr)
3525 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3526 ret = -TARGET_EFAULT;
3527 }
3528 return ret;
3529 }
3530
3531 /* do_sendto() Must return target values and target errnos. */
3532 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3533 abi_ulong target_addr, socklen_t addrlen)
3534 {
3535 void *addr;
3536 void *host_msg;
3537 void *copy_msg = NULL;
3538 abi_long ret;
3539
3540 if ((int)addrlen < 0) {
3541 return -TARGET_EINVAL;
3542 }
3543
3544 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3545 if (!host_msg)
3546 return -TARGET_EFAULT;
3547 if (fd_trans_target_to_host_data(fd)) {
3548 copy_msg = host_msg;
3549 host_msg = g_malloc(len);
3550 memcpy(host_msg, copy_msg, len);
3551 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3552 if (ret < 0) {
3553 goto fail;
3554 }
3555 }
3556 if (target_addr) {
3557 addr = alloca(addrlen+1);
3558 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3559 if (ret) {
3560 goto fail;
3561 }
3562 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3563 } else {
3564 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3565 }
3566 fail:
3567 if (copy_msg) {
3568 g_free(host_msg);
3569 host_msg = copy_msg;
3570 }
3571 unlock_user(host_msg, msg, 0);
3572 return ret;
3573 }
3574
3575 /* do_recvfrom() Must return target values and target errnos. */
3576 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3577 abi_ulong target_addr,
3578 abi_ulong target_addrlen)
3579 {
3580 socklen_t addrlen, ret_addrlen;
3581 void *addr;
3582 void *host_msg;
3583 abi_long ret;
3584
3585 if (!msg) {
3586 host_msg = NULL;
3587 } else {
3588 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3589 if (!host_msg) {
3590 return -TARGET_EFAULT;
3591 }
3592 }
3593 if (target_addr) {
3594 if (get_user_u32(addrlen, target_addrlen)) {
3595 ret = -TARGET_EFAULT;
3596 goto fail;
3597 }
3598 if ((int)addrlen < 0) {
3599 ret = -TARGET_EINVAL;
3600 goto fail;
3601 }
3602 addr = alloca(addrlen);
3603 ret_addrlen = addrlen;
3604 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3605 addr, &ret_addrlen));
3606 } else {
3607 addr = NULL; /* To keep compiler quiet. */
3608 addrlen = 0; /* To keep compiler quiet. */
3609 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3610 }
3611 if (!is_error(ret)) {
3612 if (fd_trans_host_to_target_data(fd)) {
3613 abi_long trans;
3614 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3615 if (is_error(trans)) {
3616 ret = trans;
3617 goto fail;
3618 }
3619 }
3620 if (target_addr) {
3621 host_to_target_sockaddr(target_addr, addr,
3622 MIN(addrlen, ret_addrlen));
3623 if (put_user_u32(ret_addrlen, target_addrlen)) {
3624 ret = -TARGET_EFAULT;
3625 goto fail;
3626 }
3627 }
3628 unlock_user(host_msg, msg, len);
3629 } else {
3630 fail:
3631 unlock_user(host_msg, msg, 0);
3632 }
3633 return ret;
3634 }
3635
3636 #ifdef TARGET_NR_socketcall
3637 /* do_socketcall() must return target values and target errnos. */
3638 static abi_long do_socketcall(int num, abi_ulong vptr)
3639 {
3640 static const unsigned nargs[] = { /* number of arguments per operation */
3641 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3642 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3643 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3644 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3645 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3646 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3647 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3648 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3649 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3650 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3651 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3652 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3653 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3654 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3655 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3656 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3657 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3658 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3659 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3660 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3661 };
3662 abi_long a[6]; /* max 6 args */
3663 unsigned i;
3664
3665 /* check the range of the first argument num */
3666 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3667 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3668 return -TARGET_EINVAL;
3669 }
3670 /* ensure we have space for args */
3671 if (nargs[num] > ARRAY_SIZE(a)) {
3672 return -TARGET_EINVAL;
3673 }
3674 /* collect the arguments in a[] according to nargs[] */
3675 for (i = 0; i < nargs[num]; ++i) {
3676 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3677 return -TARGET_EFAULT;
3678 }
3679 }
3680 /* now when we have the args, invoke the appropriate underlying function */
3681 switch (num) {
3682 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3683 return do_socket(a[0], a[1], a[2]);
3684 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3685 return do_bind(a[0], a[1], a[2]);
3686 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3687 return do_connect(a[0], a[1], a[2]);
3688 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3689 return get_errno(listen(a[0], a[1]));
3690 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3691 return do_accept4(a[0], a[1], a[2], 0);
3692 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3693 return do_getsockname(a[0], a[1], a[2]);
3694 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3695 return do_getpeername(a[0], a[1], a[2]);
3696 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3697 return do_socketpair(a[0], a[1], a[2], a[3]);
3698 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3699 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3700 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3701 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3702 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3703 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3704 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3705 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3706 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3707 return get_errno(shutdown(a[0], a[1]));
3708 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3709 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3710 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3711 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3712 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3713 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3714 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3715 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3716 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3717 return do_accept4(a[0], a[1], a[2], a[3]);
3718 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3719 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3720 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3721 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3722 default:
3723 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3724 return -TARGET_EINVAL;
3725 }
3726 }
3727 #endif
3728
3729 #define N_SHM_REGIONS 32
3730
3731 static struct shm_region {
3732 abi_ulong start;
3733 abi_ulong size;
3734 bool in_use;
3735 } shm_regions[N_SHM_REGIONS];
3736
3737 #ifndef TARGET_SEMID64_DS
3738 /* asm-generic version of this struct */
3739 struct target_semid64_ds
3740 {
3741 struct target_ipc_perm sem_perm;
3742 abi_ulong sem_otime;
3743 #if TARGET_ABI_BITS == 32
3744 abi_ulong __unused1;
3745 #endif
3746 abi_ulong sem_ctime;
3747 #if TARGET_ABI_BITS == 32
3748 abi_ulong __unused2;
3749 #endif
3750 abi_ulong sem_nsems;
3751 abi_ulong __unused3;
3752 abi_ulong __unused4;
3753 };
3754 #endif
3755
3756 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3757 abi_ulong target_addr)
3758 {
3759 struct target_ipc_perm *target_ip;
3760 struct target_semid64_ds *target_sd;
3761
3762 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3763 return -TARGET_EFAULT;
3764 target_ip = &(target_sd->sem_perm);
3765 host_ip->__key = tswap32(target_ip->__key);
3766 host_ip->uid = tswap32(target_ip->uid);
3767 host_ip->gid = tswap32(target_ip->gid);
3768 host_ip->cuid = tswap32(target_ip->cuid);
3769 host_ip->cgid = tswap32(target_ip->cgid);
3770 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3771 host_ip->mode = tswap32(target_ip->mode);
3772 #else
3773 host_ip->mode = tswap16(target_ip->mode);
3774 #endif
3775 #if defined(TARGET_PPC)
3776 host_ip->__seq = tswap32(target_ip->__seq);
3777 #else
3778 host_ip->__seq = tswap16(target_ip->__seq);
3779 #endif
3780 unlock_user_struct(target_sd, target_addr, 0);
3781 return 0;
3782 }
3783
3784 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3785 struct ipc_perm *host_ip)
3786 {
3787 struct target_ipc_perm *target_ip;
3788 struct target_semid64_ds *target_sd;
3789
3790 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3791 return -TARGET_EFAULT;
3792 target_ip = &(target_sd->sem_perm);
3793 target_ip->__key = tswap32(host_ip->__key);
3794 target_ip->uid = tswap32(host_ip->uid);
3795 target_ip->gid = tswap32(host_ip->gid);
3796 target_ip->cuid = tswap32(host_ip->cuid);
3797 target_ip->cgid = tswap32(host_ip->cgid);
3798 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3799 target_ip->mode = tswap32(host_ip->mode);
3800 #else
3801 target_ip->mode = tswap16(host_ip->mode);
3802 #endif
3803 #if defined(TARGET_PPC)
3804 target_ip->__seq = tswap32(host_ip->__seq);
3805 #else
3806 target_ip->__seq = tswap16(host_ip->__seq);
3807 #endif
3808 unlock_user_struct(target_sd, target_addr, 1);
3809 return 0;
3810 }
3811
3812 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3813 abi_ulong target_addr)
3814 {
3815 struct target_semid64_ds *target_sd;
3816
3817 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3818 return -TARGET_EFAULT;
3819 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3820 return -TARGET_EFAULT;
3821 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3822 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3823 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3824 unlock_user_struct(target_sd, target_addr, 0);
3825 return 0;
3826 }
3827
3828 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3829 struct semid_ds *host_sd)
3830 {
3831 struct target_semid64_ds *target_sd;
3832
3833 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3834 return -TARGET_EFAULT;
3835 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3836 return -TARGET_EFAULT;
3837 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3838 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3839 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3840 unlock_user_struct(target_sd, target_addr, 1);
3841 return 0;
3842 }
3843
3844 struct target_seminfo {
3845 int semmap;
3846 int semmni;
3847 int semmns;
3848 int semmnu;
3849 int semmsl;
3850 int semopm;
3851 int semume;
3852 int semusz;
3853 int semvmx;
3854 int semaem;
3855 };
3856
3857 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3858 struct seminfo *host_seminfo)
3859 {
3860 struct target_seminfo *target_seminfo;
3861 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3862 return -TARGET_EFAULT;
3863 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3864 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3865 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3866 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3867 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3868 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3869 __put_user(host_seminfo->semume, &target_seminfo->semume);
3870 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3871 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3872 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3873 unlock_user_struct(target_seminfo, target_addr, 1);
3874 return 0;
3875 }
3876
3877 union semun {
3878 int val;
3879 struct semid_ds *buf;
3880 unsigned short *array;
3881 struct seminfo *__buf;
3882 };
3883
3884 union target_semun {
3885 int val;
3886 abi_ulong buf;
3887 abi_ulong array;
3888 abi_ulong __buf;
3889 };
3890
3891 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3892 abi_ulong target_addr)
3893 {
3894 int nsems;
3895 unsigned short *array;
3896 union semun semun;
3897 struct semid_ds semid_ds;
3898 int i, ret;
3899
3900 semun.buf = &semid_ds;
3901
3902 ret = semctl(semid, 0, IPC_STAT, semun);
3903 if (ret == -1)
3904 return get_errno(ret);
3905
3906 nsems = semid_ds.sem_nsems;
3907
3908 *host_array = g_try_new(unsigned short, nsems);
3909 if (!*host_array) {
3910 return -TARGET_ENOMEM;
3911 }
3912 array = lock_user(VERIFY_READ, target_addr,
3913 nsems*sizeof(unsigned short), 1);
3914 if (!array) {
3915 g_free(*host_array);
3916 return -TARGET_EFAULT;
3917 }
3918
3919 for(i=0; i<nsems; i++) {
3920 __get_user((*host_array)[i], &array[i]);
3921 }
3922 unlock_user(array, target_addr, 0);
3923
3924 return 0;
3925 }
3926
3927 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3928 unsigned short **host_array)
3929 {
3930 int nsems;
3931 unsigned short *array;
3932 union semun semun;
3933 struct semid_ds semid_ds;
3934 int i, ret;
3935
3936 semun.buf = &semid_ds;
3937
3938 ret = semctl(semid, 0, IPC_STAT, semun);
3939 if (ret == -1)
3940 return get_errno(ret);
3941
3942 nsems = semid_ds.sem_nsems;
3943
3944 array = lock_user(VERIFY_WRITE, target_addr,
3945 nsems*sizeof(unsigned short), 0);
3946 if (!array)
3947 return -TARGET_EFAULT;
3948
3949 for(i=0; i<nsems; i++) {
3950 __put_user((*host_array)[i], &array[i]);
3951 }
3952 g_free(*host_array);
3953 unlock_user(array, target_addr, 1);
3954
3955 return 0;
3956 }
3957
3958 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3959 abi_ulong target_arg)
3960 {
3961 union target_semun target_su = { .buf = target_arg };
3962 union semun arg;
3963 struct semid_ds dsarg;
3964 unsigned short *array = NULL;
3965 struct seminfo seminfo;
3966 abi_long ret = -TARGET_EINVAL;
3967 abi_long err;
3968 cmd &= 0xff;
3969
3970 switch( cmd ) {
3971 case GETVAL:
3972 case SETVAL:
3973 /* In 64 bit cross-endian situations, we will erroneously pick up
3974 * the wrong half of the union for the "val" element. To rectify
3975 * this, the entire 8-byte structure is byteswapped, followed by
3976 * a swap of the 4 byte val field. In other cases, the data is
3977 * already in proper host byte order. */
3978 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3979 target_su.buf = tswapal(target_su.buf);
3980 arg.val = tswap32(target_su.val);
3981 } else {
3982 arg.val = target_su.val;
3983 }
3984 ret = get_errno(semctl(semid, semnum, cmd, arg));
3985 break;
3986 case GETALL:
3987 case SETALL:
3988 err = target_to_host_semarray(semid, &array, target_su.array);
3989 if (err)
3990 return err;
3991 arg.array = array;
3992 ret = get_errno(semctl(semid, semnum, cmd, arg));
3993 err = host_to_target_semarray(semid, target_su.array, &array);
3994 if (err)
3995 return err;
3996 break;
3997 case IPC_STAT:
3998 case IPC_SET:
3999 case SEM_STAT:
4000 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4001 if (err)
4002 return err;
4003 arg.buf = &dsarg;
4004 ret = get_errno(semctl(semid, semnum, cmd, arg));
4005 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4006 if (err)
4007 return err;
4008 break;
4009 case IPC_INFO:
4010 case SEM_INFO:
4011 arg.__buf = &seminfo;
4012 ret = get_errno(semctl(semid, semnum, cmd, arg));
4013 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4014 if (err)
4015 return err;
4016 break;
4017 case IPC_RMID:
4018 case GETPID:
4019 case GETNCNT:
4020 case GETZCNT:
4021 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4022 break;
4023 }
4024
4025 return ret;
4026 }
4027
4028 struct target_sembuf {
4029 unsigned short sem_num;
4030 short sem_op;
4031 short sem_flg;
4032 };
4033
4034 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4035 abi_ulong target_addr,
4036 unsigned nsops)
4037 {
4038 struct target_sembuf *target_sembuf;
4039 int i;
4040
4041 target_sembuf = lock_user(VERIFY_READ, target_addr,
4042 nsops*sizeof(struct target_sembuf), 1);
4043 if (!target_sembuf)
4044 return -TARGET_EFAULT;
4045
4046 for(i=0; i<nsops; i++) {
4047 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4048 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4049 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4050 }
4051
4052 unlock_user(target_sembuf, target_addr, 0);
4053
4054 return 0;
4055 }
4056
4057 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4058 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4059
4060 /*
4061 * This macro is required to handle the s390 variants, which passes the
4062 * arguments in a different order than default.
4063 */
4064 #ifdef __s390x__
4065 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4066 (__nsops), (__timeout), (__sops)
4067 #else
4068 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4069 (__nsops), 0, (__sops), (__timeout)
4070 #endif
4071
4072 static inline abi_long do_semtimedop(int semid,
4073 abi_long ptr,
4074 unsigned nsops,
4075 abi_long timeout, bool time64)
4076 {
4077 struct sembuf *sops;
4078 struct timespec ts, *pts = NULL;
4079 abi_long ret;
4080
4081 if (timeout) {
4082 pts = &ts;
4083 if (time64) {
4084 if (target_to_host_timespec64(pts, timeout)) {
4085 return -TARGET_EFAULT;
4086 }
4087 } else {
4088 if (target_to_host_timespec(pts, timeout)) {
4089 return -TARGET_EFAULT;
4090 }
4091 }
4092 }
4093
4094 if (nsops > TARGET_SEMOPM) {
4095 return -TARGET_E2BIG;
4096 }
4097
4098 sops = g_new(struct sembuf, nsops);
4099
4100 if (target_to_host_sembuf(sops, ptr, nsops)) {
4101 g_free(sops);
4102 return -TARGET_EFAULT;
4103 }
4104
4105 ret = -TARGET_ENOSYS;
4106 #ifdef __NR_semtimedop
4107 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4108 #endif
4109 #ifdef __NR_ipc
4110 if (ret == -TARGET_ENOSYS) {
4111 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4112 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4113 }
4114 #endif
4115 g_free(sops);
4116 return ret;
4117 }
4118 #endif
4119
4120 struct target_msqid_ds
4121 {
4122 struct target_ipc_perm msg_perm;
4123 abi_ulong msg_stime;
4124 #if TARGET_ABI_BITS == 32
4125 abi_ulong __unused1;
4126 #endif
4127 abi_ulong msg_rtime;
4128 #if TARGET_ABI_BITS == 32
4129 abi_ulong __unused2;
4130 #endif
4131 abi_ulong msg_ctime;
4132 #if TARGET_ABI_BITS == 32
4133 abi_ulong __unused3;
4134 #endif
4135 abi_ulong __msg_cbytes;
4136 abi_ulong msg_qnum;
4137 abi_ulong msg_qbytes;
4138 abi_ulong msg_lspid;
4139 abi_ulong msg_lrpid;
4140 abi_ulong __unused4;
4141 abi_ulong __unused5;
4142 };
4143
4144 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4145 abi_ulong target_addr)
4146 {
4147 struct target_msqid_ds *target_md;
4148
4149 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4150 return -TARGET_EFAULT;
4151 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4152 return -TARGET_EFAULT;
4153 host_md->msg_stime = tswapal(target_md->msg_stime);
4154 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4155 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4156 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4157 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4158 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4159 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4160 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4161 unlock_user_struct(target_md, target_addr, 0);
4162 return 0;
4163 }
4164
4165 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4166 struct msqid_ds *host_md)
4167 {
4168 struct target_msqid_ds *target_md;
4169
4170 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4171 return -TARGET_EFAULT;
4172 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4173 return -TARGET_EFAULT;
4174 target_md->msg_stime = tswapal(host_md->msg_stime);
4175 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4176 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4177 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4178 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4179 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4180 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4181 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4182 unlock_user_struct(target_md, target_addr, 1);
4183 return 0;
4184 }
4185
4186 struct target_msginfo {
4187 int msgpool;
4188 int msgmap;
4189 int msgmax;
4190 int msgmnb;
4191 int msgmni;
4192 int msgssz;
4193 int msgtql;
4194 unsigned short int msgseg;
4195 };
4196
4197 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4198 struct msginfo *host_msginfo)
4199 {
4200 struct target_msginfo *target_msginfo;
4201 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4202 return -TARGET_EFAULT;
4203 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4204 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4205 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4206 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4207 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4208 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4209 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4210 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4211 unlock_user_struct(target_msginfo, target_addr, 1);
4212 return 0;
4213 }
4214
4215 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4216 {
4217 struct msqid_ds dsarg;
4218 struct msginfo msginfo;
4219 abi_long ret = -TARGET_EINVAL;
4220
4221 cmd &= 0xff;
4222
4223 switch (cmd) {
4224 case IPC_STAT:
4225 case IPC_SET:
4226 case MSG_STAT:
4227 if (target_to_host_msqid_ds(&dsarg,ptr))
4228 return -TARGET_EFAULT;
4229 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4230 if (host_to_target_msqid_ds(ptr,&dsarg))
4231 return -TARGET_EFAULT;
4232 break;
4233 case IPC_RMID:
4234 ret = get_errno(msgctl(msgid, cmd, NULL));
4235 break;
4236 case IPC_INFO:
4237 case MSG_INFO:
4238 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4239 if (host_to_target_msginfo(ptr, &msginfo))
4240 return -TARGET_EFAULT;
4241 break;
4242 }
4243
4244 return ret;
4245 }
4246
4247 struct target_msgbuf {
4248 abi_long mtype;
4249 char mtext[1];
4250 };
4251
4252 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4253 ssize_t msgsz, int msgflg)
4254 {
4255 struct target_msgbuf *target_mb;
4256 struct msgbuf *host_mb;
4257 abi_long ret = 0;
4258
4259 if (msgsz < 0) {
4260 return -TARGET_EINVAL;
4261 }
4262
4263 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4264 return -TARGET_EFAULT;
4265 host_mb = g_try_malloc(msgsz + sizeof(long));
4266 if (!host_mb) {
4267 unlock_user_struct(target_mb, msgp, 0);
4268 return -TARGET_ENOMEM;
4269 }
4270 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4271 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4272 ret = -TARGET_ENOSYS;
4273 #ifdef __NR_msgsnd
4274 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4275 #endif
4276 #ifdef __NR_ipc
4277 if (ret == -TARGET_ENOSYS) {
4278 #ifdef __s390x__
4279 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4280 host_mb));
4281 #else
4282 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4283 host_mb, 0));
4284 #endif
4285 }
4286 #endif
4287 g_free(host_mb);
4288 unlock_user_struct(target_mb, msgp, 0);
4289
4290 return ret;
4291 }
4292
4293 #ifdef __NR_ipc
4294 #if defined(__sparc__)
4295 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4296 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4297 #elif defined(__s390x__)
4298 /* The s390 sys_ipc variant has only five parameters. */
4299 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4300 ((long int[]){(long int)__msgp, __msgtyp})
4301 #else
4302 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4303 ((long int[]){(long int)__msgp, __msgtyp}), 0
4304 #endif
4305 #endif
4306
4307 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4308 ssize_t msgsz, abi_long msgtyp,
4309 int msgflg)
4310 {
4311 struct target_msgbuf *target_mb;
4312 char *target_mtext;
4313 struct msgbuf *host_mb;
4314 abi_long ret = 0;
4315
4316 if (msgsz < 0) {
4317 return -TARGET_EINVAL;
4318 }
4319
4320 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4321 return -TARGET_EFAULT;
4322
4323 host_mb = g_try_malloc(msgsz + sizeof(long));
4324 if (!host_mb) {
4325 ret = -TARGET_ENOMEM;
4326 goto end;
4327 }
4328 ret = -TARGET_ENOSYS;
4329 #ifdef __NR_msgrcv
4330 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4331 #endif
4332 #ifdef __NR_ipc
4333 if (ret == -TARGET_ENOSYS) {
4334 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4335 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4336 }
4337 #endif
4338
4339 if (ret > 0) {
4340 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4341 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4342 if (!target_mtext) {
4343 ret = -TARGET_EFAULT;
4344 goto end;
4345 }
4346 memcpy(target_mb->mtext, host_mb->mtext, ret);
4347 unlock_user(target_mtext, target_mtext_addr, ret);
4348 }
4349
4350 target_mb->mtype = tswapal(host_mb->mtype);
4351
4352 end:
4353 if (target_mb)
4354 unlock_user_struct(target_mb, msgp, 1);
4355 g_free(host_mb);
4356 return ret;
4357 }
4358
4359 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4360 abi_ulong target_addr)
4361 {
4362 struct target_shmid_ds *target_sd;
4363
4364 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4365 return -TARGET_EFAULT;
4366 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4367 return -TARGET_EFAULT;
4368 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4369 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4370 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4371 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4372 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4373 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4374 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4375 unlock_user_struct(target_sd, target_addr, 0);
4376 return 0;
4377 }
4378
4379 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4380 struct shmid_ds *host_sd)
4381 {
4382 struct target_shmid_ds *target_sd;
4383
4384 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4385 return -TARGET_EFAULT;
4386 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4387 return -TARGET_EFAULT;
4388 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4389 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4390 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4391 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4392 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4393 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4394 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4395 unlock_user_struct(target_sd, target_addr, 1);
4396 return 0;
4397 }
4398
4399 struct target_shminfo {
4400 abi_ulong shmmax;
4401 abi_ulong shmmin;
4402 abi_ulong shmmni;
4403 abi_ulong shmseg;
4404 abi_ulong shmall;
4405 };
4406
4407 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4408 struct shminfo *host_shminfo)
4409 {
4410 struct target_shminfo *target_shminfo;
4411 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4412 return -TARGET_EFAULT;
4413 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4414 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4415 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4416 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4417 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4418 unlock_user_struct(target_shminfo, target_addr, 1);
4419 return 0;
4420 }
4421
4422 struct target_shm_info {
4423 int used_ids;
4424 abi_ulong shm_tot;
4425 abi_ulong shm_rss;
4426 abi_ulong shm_swp;
4427 abi_ulong swap_attempts;
4428 abi_ulong swap_successes;
4429 };
4430
4431 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4432 struct shm_info *host_shm_info)
4433 {
4434 struct target_shm_info *target_shm_info;
4435 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4436 return -TARGET_EFAULT;
4437 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4438 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4439 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4440 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4441 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4442 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4443 unlock_user_struct(target_shm_info, target_addr, 1);
4444 return 0;
4445 }
4446
4447 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4448 {
4449 struct shmid_ds dsarg;
4450 struct shminfo shminfo;
4451 struct shm_info shm_info;
4452 abi_long ret = -TARGET_EINVAL;
4453
4454 cmd &= 0xff;
4455
4456 switch(cmd) {
4457 case IPC_STAT:
4458 case IPC_SET:
4459 case SHM_STAT:
4460 if (target_to_host_shmid_ds(&dsarg, buf))
4461 return -TARGET_EFAULT;
4462 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4463 if (host_to_target_shmid_ds(buf, &dsarg))
4464 return -TARGET_EFAULT;
4465 break;
4466 case IPC_INFO:
4467 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4468 if (host_to_target_shminfo(buf, &shminfo))
4469 return -TARGET_EFAULT;
4470 break;
4471 case SHM_INFO:
4472 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4473 if (host_to_target_shm_info(buf, &shm_info))
4474 return -TARGET_EFAULT;
4475 break;
4476 case IPC_RMID:
4477 case SHM_LOCK:
4478 case SHM_UNLOCK:
4479 ret = get_errno(shmctl(shmid, cmd, NULL));
4480 break;
4481 }
4482
4483 return ret;
4484 }
4485
4486 #ifndef TARGET_FORCE_SHMLBA
4487 /* For most architectures, SHMLBA is the same as the page size;
4488 * some architectures have larger values, in which case they should
4489 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4490 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4491 * and defining its own value for SHMLBA.
4492 *
4493 * The kernel also permits SHMLBA to be set by the architecture to a
4494 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4495 * this means that addresses are rounded to the large size if
4496 * SHM_RND is set but addresses not aligned to that size are not rejected
4497 * as long as they are at least page-aligned. Since the only architecture
4498 * which uses this is ia64 this code doesn't provide for that oddity.
4499 */
4500 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4501 {
4502 return TARGET_PAGE_SIZE;
4503 }
4504 #endif
4505
4506 static abi_ulong do_shmat(CPUArchState *cpu_env, int shmid,
4507 abi_ulong shmaddr, int shmflg)
4508 {
4509 CPUState *cpu = env_cpu(cpu_env);
4510 abi_ulong raddr;
4511 void *host_raddr;
4512 struct shmid_ds shm_info;
4513 int i, ret;
4514 abi_ulong shmlba;
4515
4516 /* shmat pointers are always untagged */
4517
4518 /* find out the length of the shared memory segment */
4519 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4520 if (is_error(ret)) {
4521 /* can't get length, bail out */
4522 return ret;
4523 }
4524
4525 shmlba = target_shmlba(cpu_env);
4526
4527 if (shmaddr & (shmlba - 1)) {
4528 if (shmflg & SHM_RND) {
4529 shmaddr &= ~(shmlba - 1);
4530 } else {
4531 return -TARGET_EINVAL;
4532 }
4533 }
4534 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4535 return -TARGET_EINVAL;
4536 }
4537
4538 mmap_lock();
4539
4540 /*
4541 * We're mapping shared memory, so ensure we generate code for parallel
4542 * execution and flush old translations. This will work up to the level
4543 * supported by the host -- anything that requires EXCP_ATOMIC will not
4544 * be atomic with respect to an external process.
4545 */
4546 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4547 cpu->tcg_cflags |= CF_PARALLEL;
4548 tb_flush(cpu);
4549 }
4550
4551 if (shmaddr)
4552 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4553 else {
4554 abi_ulong mmap_start;
4555
4556 /* In order to use the host shmat, we need to honor host SHMLBA. */
4557 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4558
4559 if (mmap_start == -1) {
4560 errno = ENOMEM;
4561 host_raddr = (void *)-1;
4562 } else
4563 host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4564 shmflg | SHM_REMAP);
4565 }
4566
4567 if (host_raddr == (void *)-1) {
4568 mmap_unlock();
4569 return get_errno((intptr_t)host_raddr);
4570 }
4571 raddr = h2g((uintptr_t)host_raddr);
4572
4573 page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
4574 PAGE_VALID | PAGE_RESET | PAGE_READ |
4575 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4576
4577 for (i = 0; i < N_SHM_REGIONS; i++) {
4578 if (!shm_regions[i].in_use) {
4579 shm_regions[i].in_use = true;
4580 shm_regions[i].start = raddr;
4581 shm_regions[i].size = shm_info.shm_segsz;
4582 break;
4583 }
4584 }
4585
4586 mmap_unlock();
4587 return raddr;
4588 }
4589
4590 static inline abi_long do_shmdt(abi_ulong shmaddr)
4591 {
4592 int i;
4593 abi_long rv;
4594
4595 /* shmdt pointers are always untagged */
4596
4597 mmap_lock();
4598
4599 for (i = 0; i < N_SHM_REGIONS; ++i) {
4600 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4601 shm_regions[i].in_use = false;
4602 page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
4603 break;
4604 }
4605 }
4606 rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4607
4608 mmap_unlock();
4609
4610 return rv;
4611 }
4612
4613 #ifdef TARGET_NR_ipc
4614 /* ??? This only works with linear mappings. */
4615 /* do_ipc() must return target values and target errnos. */
4616 static abi_long do_ipc(CPUArchState *cpu_env,
4617 unsigned int call, abi_long first,
4618 abi_long second, abi_long third,
4619 abi_long ptr, abi_long fifth)
4620 {
4621 int version;
4622 abi_long ret = 0;
4623
4624 version = call >> 16;
4625 call &= 0xffff;
4626
4627 switch (call) {
4628 case IPCOP_semop:
4629 ret = do_semtimedop(first, ptr, second, 0, false);
4630 break;
4631 case IPCOP_semtimedop:
4632 /*
4633 * The s390 sys_ipc variant has only five parameters instead of six
4634 * (as for default variant) and the only difference is the handling of
4635 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4636 * to a struct timespec where the generic variant uses fifth parameter.
4637 */
4638 #if defined(TARGET_S390X)
4639 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4640 #else
4641 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4642 #endif
4643 break;
4644
4645 case IPCOP_semget:
4646 ret = get_errno(semget(first, second, third));
4647 break;
4648
4649 case IPCOP_semctl: {
4650 /* The semun argument to semctl is passed by value, so dereference the
4651 * ptr argument. */
4652 abi_ulong atptr;
4653 get_user_ual(atptr, ptr);
4654 ret = do_semctl(first, second, third, atptr);
4655 break;
4656 }
4657
4658 case IPCOP_msgget:
4659 ret = get_errno(msgget(first, second));
4660 break;
4661
4662 case IPCOP_msgsnd:
4663 ret = do_msgsnd(first, ptr, second, third);
4664 break;
4665
4666 case IPCOP_msgctl:
4667 ret = do_msgctl(first, second, ptr);
4668 break;
4669
4670 case IPCOP_msgrcv:
4671 switch (version) {
4672 case 0:
4673 {
4674 struct target_ipc_kludge {
4675 abi_long msgp;
4676 abi_long msgtyp;
4677 } *tmp;
4678
4679 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4680 ret = -TARGET_EFAULT;
4681 break;
4682 }
4683
4684 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4685
4686 unlock_user_struct(tmp, ptr, 0);
4687 break;
4688 }
4689 default:
4690 ret = do_msgrcv(first, ptr, second, fifth, third);
4691 }
4692 break;
4693
4694 case IPCOP_shmat:
4695 switch (version) {
4696 default:
4697 {
4698 abi_ulong raddr;
4699 raddr = do_shmat(cpu_env, first, ptr, second);
4700 if (is_error(raddr))
4701 return get_errno(raddr);
4702 if (put_user_ual(raddr, third))
4703 return -TARGET_EFAULT;
4704 break;
4705 }
4706 case 1:
4707 ret = -TARGET_EINVAL;
4708 break;
4709 }
4710 break;
4711 case IPCOP_shmdt:
4712 ret = do_shmdt(ptr);
4713 break;
4714
4715 case IPCOP_shmget:
4716 /* IPC_* flag values are the same on all linux platforms */
4717 ret = get_errno(shmget(first, second, third));
4718 break;
4719
4720 /* IPC_* and SHM_* command values are the same on all linux platforms */
4721 case IPCOP_shmctl:
4722 ret = do_shmctl(first, second, ptr);
4723 break;
4724 default:
4725 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4726 call, version);
4727 ret = -TARGET_ENOSYS;
4728 break;
4729 }
4730 return ret;
4731 }
4732 #endif
4733
4734 /* kernel structure types definitions */
4735
4736 #define STRUCT(name, ...) STRUCT_ ## name,
4737 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4738 enum {
4739 #include "syscall_types.h"
4740 STRUCT_MAX
4741 };
4742 #undef STRUCT
4743 #undef STRUCT_SPECIAL
4744
4745 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4746 #define STRUCT_SPECIAL(name)
4747 #include "syscall_types.h"
4748 #undef STRUCT
4749 #undef STRUCT_SPECIAL
4750
4751 #define MAX_STRUCT_SIZE 4096
4752
4753 #ifdef CONFIG_FIEMAP
4754 /* So fiemap access checks don't overflow on 32 bit systems.
4755 * This is very slightly smaller than the limit imposed by
4756 * the underlying kernel.
4757 */
4758 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4759 / sizeof(struct fiemap_extent))
4760
4761 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4762 int fd, int cmd, abi_long arg)
4763 {
4764 /* The parameter for this ioctl is a struct fiemap followed
4765 * by an array of struct fiemap_extent whose size is set
4766 * in fiemap->fm_extent_count. The array is filled in by the
4767 * ioctl.
4768 */
4769 int target_size_in, target_size_out;
4770 struct fiemap *fm;
4771 const argtype *arg_type = ie->arg_type;
4772 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4773 void *argptr, *p;
4774 abi_long ret;
4775 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4776 uint32_t outbufsz;
4777 int free_fm = 0;
4778
4779 assert(arg_type[0] == TYPE_PTR);
4780 assert(ie->access == IOC_RW);
4781 arg_type++;
4782 target_size_in = thunk_type_size(arg_type, 0);
4783 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4784 if (!argptr) {
4785 return -TARGET_EFAULT;
4786 }
4787 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4788 unlock_user(argptr, arg, 0);
4789 fm = (struct fiemap *)buf_temp;
4790 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4791 return -TARGET_EINVAL;
4792 }
4793
4794 outbufsz = sizeof (*fm) +
4795 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4796
4797 if (outbufsz > MAX_STRUCT_SIZE) {
4798 /* We can't fit all the extents into the fixed size buffer.
4799 * Allocate one that is large enough and use it instead.
4800 */
4801 fm = g_try_malloc(outbufsz);
4802 if (!fm) {
4803 return -TARGET_ENOMEM;
4804 }
4805 memcpy(fm, buf_temp, sizeof(struct fiemap));
4806 free_fm = 1;
4807 }
4808 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4809 if (!is_error(ret)) {
4810 target_size_out = target_size_in;
4811 /* An extent_count of 0 means we were only counting the extents
4812 * so there are no structs to copy
4813 */
4814 if (fm->fm_extent_count != 0) {
4815 target_size_out += fm->fm_mapped_extents * extent_size;
4816 }
4817 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4818 if (!argptr) {
4819 ret = -TARGET_EFAULT;
4820 } else {
4821 /* Convert the struct fiemap */
4822 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4823 if (fm->fm_extent_count != 0) {
4824 p = argptr + target_size_in;
4825 /* ...and then all the struct fiemap_extents */
4826 for (i = 0; i < fm->fm_mapped_extents; i++) {
4827 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4828 THUNK_TARGET);
4829 p += extent_size;
4830 }
4831 }
4832 unlock_user(argptr, arg, target_size_out);
4833 }
4834 }
4835 if (free_fm) {
4836 g_free(fm);
4837 }
4838 return ret;
4839 }
4840 #endif
4841
4842 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4843 int fd, int cmd, abi_long arg)
4844 {
4845 const argtype *arg_type = ie->arg_type;
4846 int target_size;
4847 void *argptr;
4848 int ret;
4849 struct ifconf *host_ifconf;
4850 uint32_t outbufsz;
4851 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4852 const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4853 int target_ifreq_size;
4854 int nb_ifreq;
4855 int free_buf = 0;
4856 int i;
4857 int target_ifc_len;
4858 abi_long target_ifc_buf;
4859 int host_ifc_len;
4860 char *host_ifc_buf;
4861
4862 assert(arg_type[0] == TYPE_PTR);
4863 assert(ie->access == IOC_RW);
4864
4865 arg_type++;
4866 target_size = thunk_type_size(arg_type, 0);
4867
4868 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4869 if (!argptr)
4870 return -TARGET_EFAULT;
4871 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4872 unlock_user(argptr, arg, 0);
4873
4874 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4875 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4876 target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4877
4878 if (target_ifc_buf != 0) {
4879 target_ifc_len = host_ifconf->ifc_len;
4880 nb_ifreq = target_ifc_len / target_ifreq_size;
4881 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4882
4883 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4884 if (outbufsz > MAX_STRUCT_SIZE) {
4885 /*
4886 * We can't fit all the extents into the fixed size buffer.
4887 * Allocate one that is large enough and use it instead.
4888 */
4889 host_ifconf = g_try_malloc(outbufsz);
4890 if (!host_ifconf) {
4891 return -TARGET_ENOMEM;
4892 }
4893 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4894 free_buf = 1;
4895 }
4896 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4897
4898 host_ifconf->ifc_len = host_ifc_len;
4899 } else {
4900 host_ifc_buf = NULL;
4901 }
4902 host_ifconf->ifc_buf = host_ifc_buf;
4903
4904 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4905 if (!is_error(ret)) {
4906 /* convert host ifc_len to target ifc_len */
4907
4908 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4909 target_ifc_len = nb_ifreq * target_ifreq_size;
4910 host_ifconf->ifc_len = target_ifc_len;
4911
4912 /* restore target ifc_buf */
4913
4914 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4915
4916 /* copy struct ifconf to target user */
4917
4918 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4919 if (!argptr)
4920 return -TARGET_EFAULT;
4921 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4922 unlock_user(argptr, arg, target_size);
4923
4924 if (target_ifc_buf != 0) {
4925 /* copy ifreq[] to target user */
4926 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4927 for (i = 0; i < nb_ifreq ; i++) {
4928 thunk_convert(argptr + i * target_ifreq_size,
4929 host_ifc_buf + i * sizeof(struct ifreq),
4930 ifreq_arg_type, THUNK_TARGET);
4931 }
4932 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4933 }
4934 }
4935
4936 if (free_buf) {
4937 g_free(host_ifconf);
4938 }
4939
4940 return ret;
4941 }
4942
4943 #if defined(CONFIG_USBFS)
4944 #if HOST_LONG_BITS > 64
4945 #error USBDEVFS thunks do not support >64 bit hosts yet.
4946 #endif
4947 struct live_urb {
4948 uint64_t target_urb_adr;
4949 uint64_t target_buf_adr;
4950 char *target_buf_ptr;
4951 struct usbdevfs_urb host_urb;
4952 };
4953
4954 static GHashTable *usbdevfs_urb_hashtable(void)
4955 {
4956 static GHashTable *urb_hashtable;
4957
4958 if (!urb_hashtable) {
4959 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4960 }
4961 return urb_hashtable;
4962 }
4963
4964 static void urb_hashtable_insert(struct live_urb *urb)
4965 {
4966 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4967 g_hash_table_insert(urb_hashtable, urb, urb);
4968 }
4969
4970 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4971 {
4972 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4973 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4974 }
4975
4976 static void urb_hashtable_remove(struct live_urb *urb)
4977 {
4978 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4979 g_hash_table_remove(urb_hashtable, urb);
4980 }
4981
4982 static abi_long
4983 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4984 int fd, int cmd, abi_long arg)
4985 {
4986 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4987 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4988 struct live_urb *lurb;
4989 void *argptr;
4990 uint64_t hurb;
4991 int target_size;
4992 uintptr_t target_urb_adr;
4993 abi_long ret;
4994
4995 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4996
4997 memset(buf_temp, 0, sizeof(uint64_t));
4998 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4999 if (is_error(ret)) {
5000 return ret;
5001 }
5002
5003 memcpy(&hurb, buf_temp, sizeof(uint64_t));
5004 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5005 if (!lurb->target_urb_adr) {
5006 return -TARGET_EFAULT;
5007 }
5008 urb_hashtable_remove(lurb);
5009 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5010 lurb->host_urb.buffer_length);
5011 lurb->target_buf_ptr = NULL;
5012
5013 /* restore the guest buffer pointer */
5014 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5015
5016 /* update the guest urb struct */
5017 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5018 if (!argptr) {
5019 g_free(lurb);
5020 return -TARGET_EFAULT;
5021 }
5022 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5023 unlock_user(argptr, lurb->target_urb_adr, target_size);
5024
5025 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5026 /* write back the urb handle */
5027 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5028 if (!argptr) {
5029 g_free(lurb);
5030 return -TARGET_EFAULT;
5031 }
5032
5033 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5034 target_urb_adr = lurb->target_urb_adr;
5035 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5036 unlock_user(argptr, arg, target_size);
5037
5038 g_free(lurb);
5039 return ret;
5040 }
5041
5042 static abi_long
5043 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5044 uint8_t *buf_temp __attribute__((unused)),
5045 int fd, int cmd, abi_long arg)
5046 {
5047 struct live_urb *lurb;
5048
5049 /* map target address back to host URB with metadata. */
5050 lurb = urb_hashtable_lookup(arg);
5051 if (!lurb) {
5052 return -TARGET_EFAULT;
5053 }
5054 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5055 }
5056
5057 static abi_long
5058 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5059 int fd, int cmd, abi_long arg)
5060 {
5061 const argtype *arg_type = ie->arg_type;
5062 int target_size;
5063 abi_long ret;
5064 void *argptr;
5065 int rw_dir;
5066 struct live_urb *lurb;
5067
5068 /*
5069 * each submitted URB needs to map to a unique ID for the
5070 * kernel, and that unique ID needs to be a pointer to
5071 * host memory. hence, we need to malloc for each URB.
5072 * isochronous transfers have a variable length struct.
5073 */
5074 arg_type++;
5075 target_size = thunk_type_size(arg_type, THUNK_TARGET);
5076
5077 /* construct host copy of urb and metadata */
5078 lurb = g_try_new0(struct live_urb, 1);
5079 if (!lurb) {
5080 return -TARGET_ENOMEM;
5081 }
5082
5083 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5084 if (!argptr) {
5085 g_free(lurb);
5086 return -TARGET_EFAULT;
5087 }
5088 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5089 unlock_user(argptr, arg, 0);
5090
5091 lurb->target_urb_adr = arg;
5092 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5093
5094 /* buffer space used depends on endpoint type so lock the entire buffer */
5095 /* control type urbs should check the buffer contents for true direction */
5096 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5097 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5098 lurb->host_urb.buffer_length, 1);
5099 if (lurb->target_buf_ptr == NULL) {
5100 g_free(lurb);
5101 return -TARGET_EFAULT;
5102 }
5103
5104 /* update buffer pointer in host copy */
5105 lurb->host_urb.buffer = lurb->target_buf_ptr;
5106
5107 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5108 if (is_error(ret)) {
5109 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5110 g_free(lurb);
5111 } else {
5112 urb_hashtable_insert(lurb);
5113 }
5114
5115 return ret;
5116 }
5117 #endif /* CONFIG_USBFS */
5118
5119 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5120 int cmd, abi_long arg)
5121 {
5122 void *argptr;
5123 struct dm_ioctl *host_dm;
5124 abi_long guest_data;
5125 uint32_t guest_data_size;
5126 int target_size;
5127 const argtype *arg_type = ie->arg_type;
5128 abi_long ret;
5129 void *big_buf = NULL;
5130 char *host_data;
5131
5132 arg_type++;
5133 target_size = thunk_type_size(arg_type, 0);
5134 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5135 if (!argptr) {
5136 ret = -TARGET_EFAULT;
5137 goto out;
5138 }
5139 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5140 unlock_user(argptr, arg, 0);
5141
5142 /* buf_temp is too small, so fetch things into a bigger buffer */
5143 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5144 memcpy(big_buf, buf_temp, target_size);
5145 buf_temp = big_buf;
5146 host_dm = big_buf;
5147
5148 guest_data = arg + host_dm->data_start;
5149 if ((guest_data - arg) < 0) {
5150 ret = -TARGET_EINVAL;
5151 goto out;
5152 }
5153 guest_data_size = host_dm->data_size - host_dm->data_start;
5154 host_data = (char*)host_dm + host_dm->data_start;
5155
5156 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5157 if (!argptr) {
5158 ret = -TARGET_EFAULT;
5159 goto out;
5160 }
5161
5162 switch (ie->host_cmd) {
5163 case DM_REMOVE_ALL:
5164 case DM_LIST_DEVICES:
5165 case DM_DEV_CREATE:
5166 case DM_DEV_REMOVE:
5167 case DM_DEV_SUSPEND:
5168 case DM_DEV_STATUS:
5169 case DM_DEV_WAIT:
5170 case DM_TABLE_STATUS:
5171 case DM_TABLE_CLEAR:
5172 case DM_TABLE_DEPS:
5173 case DM_LIST_VERSIONS:
5174 /* no input data */
5175 break;
5176 case DM_DEV_RENAME:
5177 case DM_DEV_SET_GEOMETRY:
5178 /* data contains only strings */
5179 memcpy(host_data, argptr, guest_data_size);
5180 break;
5181 case DM_TARGET_MSG:
5182 memcpy(host_data, argptr, guest_data_size);
5183 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5184 break;
5185 case DM_TABLE_LOAD:
5186 {
5187 void *gspec = argptr;
5188 void *cur_data = host_data;
5189 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5190 int spec_size = thunk_type_size(arg_type, 0);
5191 int i;
5192
5193 for (i = 0; i < host_dm->target_count; i++) {
5194 struct dm_target_spec *spec = cur_data;
5195 uint32_t next;
5196 int slen;
5197
5198 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5199 slen = strlen((char*)gspec + spec_size) + 1;
5200 next = spec->next;
5201 spec->next = sizeof(*spec) + slen;
5202 strcpy((char*)&spec[1], gspec + spec_size);
5203 gspec += next;
5204 cur_data += spec->next;
5205 }
5206 break;
5207 }
5208 default:
5209 ret = -TARGET_EINVAL;
5210 unlock_user(argptr, guest_data, 0);
5211 goto out;
5212 }
5213 unlock_user(argptr, guest_data, 0);
5214
5215 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5216 if (!is_error(ret)) {
5217 guest_data = arg + host_dm->data_start;
5218 guest_data_size = host_dm->data_size - host_dm->data_start;
5219 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5220 switch (ie->host_cmd) {
5221 case DM_REMOVE_ALL:
5222 case DM_DEV_CREATE:
5223 case DM_DEV_REMOVE:
5224 case DM_DEV_RENAME:
5225 case DM_DEV_SUSPEND:
5226 case DM_DEV_STATUS:
5227 case DM_TABLE_LOAD:
5228 case DM_TABLE_CLEAR:
5229 case DM_TARGET_MSG:
5230 case DM_DEV_SET_GEOMETRY:
5231 /* no return data */
5232 break;
5233 case DM_LIST_DEVICES:
5234 {
5235 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5236 uint32_t remaining_data = guest_data_size;
5237 void *cur_data = argptr;
5238 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5239 int nl_size = 12; /* can't use thunk_size due to alignment */
5240
5241 while (1) {
5242 uint32_t next = nl->next;
5243 if (next) {
5244 nl->next = nl_size + (strlen(nl->name) + 1);
5245 }
5246 if (remaining_data < nl->next) {
5247 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5248 break;
5249 }
5250 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5251 strcpy(cur_data + nl_size, nl->name);
5252 cur_data += nl->next;
5253 remaining_data -= nl->next;
5254 if (!next) {
5255 break;
5256 }
5257 nl = (void*)nl + next;
5258 }
5259 break;
5260 }
5261 case DM_DEV_WAIT:
5262 case DM_TABLE_STATUS:
5263 {
5264 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5265 void *cur_data = argptr;
5266 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5267 int spec_size = thunk_type_size(arg_type, 0);
5268 int i;
5269
5270 for (i = 0; i < host_dm->target_count; i++) {
5271 uint32_t next = spec->next;
5272 int slen = strlen((char*)&spec[1]) + 1;
5273 spec->next = (cur_data - argptr) + spec_size + slen;
5274 if (guest_data_size < spec->next) {
5275 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5276 break;
5277 }
5278 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5279 strcpy(cur_data + spec_size, (char*)&spec[1]);
5280 cur_data = argptr + spec->next;
5281 spec = (void*)host_dm + host_dm->data_start + next;
5282 }
5283 break;
5284 }
5285 case DM_TABLE_DEPS:
5286 {
5287 void *hdata = (void*)host_dm + host_dm->data_start;
5288 int count = *(uint32_t*)hdata;
5289 uint64_t *hdev = hdata + 8;
5290 uint64_t *gdev = argptr + 8;
5291 int i;
5292
5293 *(uint32_t*)argptr = tswap32(count);
5294 for (i = 0; i < count; i++) {
5295 *gdev = tswap64(*hdev);
5296 gdev++;
5297 hdev++;
5298 }
5299 break;
5300 }
5301 case DM_LIST_VERSIONS:
5302 {
5303 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5304 uint32_t remaining_data = guest_data_size;
5305 void *cur_data = argptr;
5306 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5307 int vers_size = thunk_type_size(arg_type, 0);
5308
5309 while (1) {
5310 uint32_t next = vers->next;
5311 if (next) {
5312 vers->next = vers_size + (strlen(vers->name) + 1);
5313 }
5314 if (remaining_data < vers->next) {
5315 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5316 break;
5317 }
5318 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5319 strcpy(cur_data + vers_size, vers->name);
5320 cur_data += vers->next;
5321 remaining_data -= vers->next;
5322 if (!next) {
5323 break;
5324 }
5325 vers = (void*)vers + next;
5326 }
5327 break;
5328 }
5329 default:
5330 unlock_user(argptr, guest_data, 0);
5331 ret = -TARGET_EINVAL;
5332 goto out;
5333 }
5334 unlock_user(argptr, guest_data, guest_data_size);
5335
5336 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5337 if (!argptr) {
5338 ret = -TARGET_EFAULT;
5339 goto out;
5340 }
5341 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5342 unlock_user(argptr, arg, target_size);
5343 }
5344 out:
5345 g_free(big_buf);
5346 return ret;
5347 }
5348
5349 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5350 int cmd, abi_long arg)
5351 {
5352 void *argptr;
5353 int target_size;
5354 const argtype *arg_type = ie->arg_type;
5355 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5356 abi_long ret;
5357
5358 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5359 struct blkpg_partition host_part;
5360
5361 /* Read and convert blkpg */
5362 arg_type++;
5363 target_size = thunk_type_size(arg_type, 0);
5364 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5365 if (!argptr) {
5366 ret = -TARGET_EFAULT;
5367 goto out;
5368 }
5369 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5370 unlock_user(argptr, arg, 0);
5371
5372 switch (host_blkpg->op) {
5373 case BLKPG_ADD_PARTITION:
5374 case BLKPG_DEL_PARTITION:
5375 /* payload is struct blkpg_partition */
5376 break;
5377 default:
5378 /* Unknown opcode */
5379 ret = -TARGET_EINVAL;
5380 goto out;
5381 }
5382
5383 /* Read and convert blkpg->data */
5384 arg = (abi_long)(uintptr_t)host_blkpg->data;
5385 target_size = thunk_type_size(part_arg_type, 0);
5386 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5387 if (!argptr) {
5388 ret = -TARGET_EFAULT;
5389 goto out;
5390 }
5391 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5392 unlock_user(argptr, arg, 0);
5393
5394 /* Swizzle the data pointer to our local copy and call! */
5395 host_blkpg->data = &host_part;
5396 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5397
5398 out:
5399 return ret;
5400 }
5401
5402 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5403 int fd, int cmd, abi_long arg)
5404 {
5405 const argtype *arg_type = ie->arg_type;
5406 const StructEntry *se;
5407 const argtype *field_types;
5408 const int *dst_offsets, *src_offsets;
5409 int target_size;
5410 void *argptr;
5411 abi_ulong *target_rt_dev_ptr = NULL;
5412 unsigned long *host_rt_dev_ptr = NULL;
5413 abi_long ret;
5414 int i;
5415
5416 assert(ie->access == IOC_W);
5417 assert(*arg_type == TYPE_PTR);
5418 arg_type++;
5419 assert(*arg_type == TYPE_STRUCT);
5420 target_size = thunk_type_size(arg_type, 0);
5421 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5422 if (!argptr) {
5423 return -TARGET_EFAULT;
5424 }
5425 arg_type++;
5426 assert(*arg_type == (int)STRUCT_rtentry);
5427 se = struct_entries + *arg_type++;
5428 assert(se->convert[0] == NULL);
5429 /* convert struct here to be able to catch rt_dev string */
5430 field_types = se->field_types;
5431 dst_offsets = se->field_offsets[THUNK_HOST];
5432 src_offsets = se->field_offsets[THUNK_TARGET];
5433 for (i = 0; i < se->nb_fields; i++) {
5434 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5435 assert(*field_types == TYPE_PTRVOID);
5436 target_rt_dev_ptr = argptr + src_offsets[i];
5437 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5438 if (*target_rt_dev_ptr != 0) {
5439 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5440 tswapal(*target_rt_dev_ptr));
5441 if (!*host_rt_dev_ptr) {
5442 unlock_user(argptr, arg, 0);
5443 return -TARGET_EFAULT;
5444 }
5445 } else {
5446 *host_rt_dev_ptr = 0;
5447 }
5448 field_types++;
5449 continue;
5450 }
5451 field_types = thunk_convert(buf_temp + dst_offsets[i],
5452 argptr + src_offsets[i],
5453 field_types, THUNK_HOST);
5454 }
5455 unlock_user(argptr, arg, 0);
5456
5457 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5458
5459 assert(host_rt_dev_ptr != NULL);
5460 assert(target_rt_dev_ptr != NULL);
5461 if (*host_rt_dev_ptr != 0) {
5462 unlock_user((void *)*host_rt_dev_ptr,
5463 *target_rt_dev_ptr, 0);
5464 }
5465 return ret;
5466 }
5467
5468 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5469 int fd, int cmd, abi_long arg)
5470 {
5471 int sig = target_to_host_signal(arg);
5472 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5473 }
5474
5475 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5476 int fd, int cmd, abi_long arg)
5477 {
5478 struct timeval tv;
5479 abi_long ret;
5480
5481 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5482 if (is_error(ret)) {
5483 return ret;
5484 }
5485
5486 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5487 if (copy_to_user_timeval(arg, &tv)) {
5488 return -TARGET_EFAULT;
5489 }
5490 } else {
5491 if (copy_to_user_timeval64(arg, &tv)) {
5492 return -TARGET_EFAULT;
5493 }
5494 }
5495
5496 return ret;
5497 }
5498
5499 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5500 int fd, int cmd, abi_long arg)
5501 {
5502 struct timespec ts;
5503 abi_long ret;
5504
5505 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5506 if (is_error(ret)) {
5507 return ret;
5508 }
5509
5510 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5511 if (host_to_target_timespec(arg, &ts)) {
5512 return -TARGET_EFAULT;
5513 }
5514 } else{
5515 if (host_to_target_timespec64(arg, &ts)) {
5516 return -TARGET_EFAULT;
5517 }
5518 }
5519
5520 return ret;
5521 }
5522
5523 #ifdef TIOCGPTPEER
5524 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5525 int fd, int cmd, abi_long arg)
5526 {
5527 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5528 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5529 }
5530 #endif
5531
5532 #ifdef HAVE_DRM_H
5533
5534 static void unlock_drm_version(struct drm_version *host_ver,
5535 struct target_drm_version *target_ver,
5536 bool copy)
5537 {
5538 unlock_user(host_ver->name, target_ver->name,
5539 copy ? host_ver->name_len : 0);
5540 unlock_user(host_ver->date, target_ver->date,
5541 copy ? host_ver->date_len : 0);
5542 unlock_user(host_ver->desc, target_ver->desc,
5543 copy ? host_ver->desc_len : 0);
5544 }
5545
5546 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5547 struct target_drm_version *target_ver)
5548 {
5549 memset(host_ver, 0, sizeof(*host_ver));
5550
5551 __get_user(host_ver->name_len, &target_ver->name_len);
5552 if (host_ver->name_len) {
5553 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5554 target_ver->name_len, 0);
5555 if (!host_ver->name) {
5556 return -EFAULT;
5557 }
5558 }
5559
5560 __get_user(host_ver->date_len, &target_ver->date_len);
5561 if (host_ver->date_len) {
5562 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5563 target_ver->date_len, 0);
5564 if (!host_ver->date) {
5565 goto err;
5566 }
5567 }
5568
5569 __get_user(host_ver->desc_len, &target_ver->desc_len);
5570 if (host_ver->desc_len) {
5571 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5572 target_ver->desc_len, 0);
5573 if (!host_ver->desc) {
5574 goto err;
5575 }
5576 }
5577
5578 return 0;
5579 err:
5580 unlock_drm_version(host_ver, target_ver, false);
5581 return -EFAULT;
5582 }
5583
5584 static inline void host_to_target_drmversion(
5585 struct target_drm_version *target_ver,
5586 struct drm_version *host_ver)
5587 {
5588 __put_user(host_ver->version_major, &target_ver->version_major);
5589 __put_user(host_ver->version_minor, &target_ver->version_minor);
5590 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5591 __put_user(host_ver->name_len, &target_ver->name_len);
5592 __put_user(host_ver->date_len, &target_ver->date_len);
5593 __put_user(host_ver->desc_len, &target_ver->desc_len);
5594 unlock_drm_version(host_ver, target_ver, true);
5595 }
5596
5597 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5598 int fd, int cmd, abi_long arg)
5599 {
5600 struct drm_version *ver;
5601 struct target_drm_version *target_ver;
5602 abi_long ret;
5603
5604 switch (ie->host_cmd) {
5605 case DRM_IOCTL_VERSION:
5606 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5607 return -TARGET_EFAULT;
5608 }
5609 ver = (struct drm_version *)buf_temp;
5610 ret = target_to_host_drmversion(ver, target_ver);
5611 if (!is_error(ret)) {
5612 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5613 if (is_error(ret)) {
5614 unlock_drm_version(ver, target_ver, false);
5615 } else {
5616 host_to_target_drmversion(target_ver, ver);
5617 }
5618 }
5619 unlock_user_struct(target_ver, arg, 0);
5620 return ret;
5621 }
5622 return -TARGET_ENOSYS;
5623 }
5624
5625 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5626 struct drm_i915_getparam *gparam,
5627 int fd, abi_long arg)
5628 {
5629 abi_long ret;
5630 int value;
5631 struct target_drm_i915_getparam *target_gparam;
5632
5633 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5634 return -TARGET_EFAULT;
5635 }
5636
5637 __get_user(gparam->param, &target_gparam->param);
5638 gparam->value = &value;
5639 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5640 put_user_s32(value, target_gparam->value);
5641
5642 unlock_user_struct(target_gparam, arg, 0);
5643 return ret;
5644 }
5645
5646 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5647 int fd, int cmd, abi_long arg)
5648 {
5649 switch (ie->host_cmd) {
5650 case DRM_IOCTL_I915_GETPARAM:
5651 return do_ioctl_drm_i915_getparam(ie,
5652 (struct drm_i915_getparam *)buf_temp,
5653 fd, arg);
5654 default:
5655 return -TARGET_ENOSYS;
5656 }
5657 }
5658
5659 #endif
5660
5661 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5662 int fd, int cmd, abi_long arg)
5663 {
5664 struct tun_filter *filter = (struct tun_filter *)buf_temp;
5665 struct tun_filter *target_filter;
5666 char *target_addr;
5667
5668 assert(ie->access == IOC_W);
5669
5670 target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5671 if (!target_filter) {
5672 return -TARGET_EFAULT;
5673 }
5674 filter->flags = tswap16(target_filter->flags);
5675 filter->count = tswap16(target_filter->count);
5676 unlock_user(target_filter, arg, 0);
5677
5678 if (filter->count) {
5679 if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5680 MAX_STRUCT_SIZE) {
5681 return -TARGET_EFAULT;
5682 }
5683
5684 target_addr = lock_user(VERIFY_READ,
5685 arg + offsetof(struct tun_filter, addr),
5686 filter->count * ETH_ALEN, 1);
5687 if (!target_addr) {
5688 return -TARGET_EFAULT;
5689 }
5690 memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5691 unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5692 }
5693
5694 return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5695 }
5696
5697 IOCTLEntry ioctl_entries[] = {
5698 #define IOCTL(cmd, access, ...) \
5699 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5700 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5701 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5702 #define IOCTL_IGNORE(cmd) \
5703 { TARGET_ ## cmd, 0, #cmd },
5704 #include "ioctls.h"
5705 { 0, 0, },
5706 };
5707
5708 /* ??? Implement proper locking for ioctls. */
5709 /* do_ioctl() Must return target values and target errnos. */
5710 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5711 {
5712 const IOCTLEntry *ie;
5713 const argtype *arg_type;
5714 abi_long ret;
5715 uint8_t buf_temp[MAX_STRUCT_SIZE];
5716 int target_size;
5717 void *argptr;
5718
5719 ie = ioctl_entries;
5720 for(;;) {
5721 if (ie->target_cmd == 0) {
5722 qemu_log_mask(
5723 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5724 return -TARGET_ENOTTY;
5725 }
5726 if (ie->target_cmd == cmd)
5727 break;
5728 ie++;
5729 }
5730 arg_type = ie->arg_type;
5731 if (ie->do_ioctl) {
5732 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5733 } else if (!ie->host_cmd) {
5734 /* Some architectures define BSD ioctls in their headers
5735 that are not implemented in Linux. */
5736 return -TARGET_ENOTTY;
5737 }
5738
5739 switch(arg_type[0]) {
5740 case TYPE_NULL:
5741 /* no argument */
5742 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5743 break;
5744 case TYPE_PTRVOID:
5745 case TYPE_INT:
5746 case TYPE_LONG:
5747 case TYPE_ULONG:
5748 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5749 break;
5750 case TYPE_PTR:
5751 arg_type++;
5752 target_size = thunk_type_size(arg_type, 0);
5753 switch(ie->access) {
5754 case IOC_R:
5755 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5756 if (!is_error(ret)) {
5757 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5758 if (!argptr)
5759 return -TARGET_EFAULT;
5760 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5761 unlock_user(argptr, arg, target_size);
5762 }
5763 break;
5764 case IOC_W:
5765 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5766 if (!argptr)
5767 return -TARGET_EFAULT;
5768 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5769 unlock_user(argptr, arg, 0);
5770 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5771 break;
5772 default:
5773 case IOC_RW:
5774 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5775 if (!argptr)
5776 return -TARGET_EFAULT;
5777 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5778 unlock_user(argptr, arg, 0);
5779 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5780 if (!is_error(ret)) {
5781 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5782 if (!argptr)
5783 return -TARGET_EFAULT;
5784 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5785 unlock_user(argptr, arg, target_size);
5786 }
5787 break;
5788 }
5789 break;
5790 default:
5791 qemu_log_mask(LOG_UNIMP,
5792 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5793 (long)cmd, arg_type[0]);
5794 ret = -TARGET_ENOTTY;
5795 break;
5796 }
5797 return ret;
5798 }
5799
5800 static const bitmask_transtbl iflag_tbl[] = {
5801 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5802 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5803 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5804 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5805 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5806 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5807 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5808 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5809 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5810 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5811 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5812 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5813 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5814 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5815 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5816 { 0, 0, 0, 0 }
5817 };
5818
5819 static const bitmask_transtbl oflag_tbl[] = {
5820 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5821 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5822 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5823 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5824 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5825 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5826 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5827 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5828 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5829 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5830 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5831 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5832 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5833 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5834 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5835 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5836 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5837 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5838 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5839 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5840 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5841 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5842 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5843 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5844 { 0, 0, 0, 0 }
5845 };
5846
5847 static const bitmask_transtbl cflag_tbl[] = {
5848 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5849 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5850 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5851 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5852 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5853 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5854 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5855 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5856 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5857 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5858 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5859 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5860 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5861 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5862 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5863 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5864 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5865 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5866 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5867 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5868 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5869 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5870 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5871 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5872 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5873 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5874 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5875 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5876 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5877 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5878 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5879 { 0, 0, 0, 0 }
5880 };
5881
5882 static const bitmask_transtbl lflag_tbl[] = {
5883 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5884 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5885 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5886 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5887 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5888 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5889 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5890 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5891 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5892 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5893 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5894 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5895 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5896 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5897 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5898 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5899 { 0, 0, 0, 0 }
5900 };
5901
5902 static void target_to_host_termios (void *dst, const void *src)
5903 {
5904 struct host_termios *host = dst;
5905 const struct target_termios *target = src;
5906
5907 host->c_iflag =
5908 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5909 host->c_oflag =
5910 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5911 host->c_cflag =
5912 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5913 host->c_lflag =
5914 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5915 host->c_line = target->c_line;
5916
5917 memset(host->c_cc, 0, sizeof(host->c_cc));
5918 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5919 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5920 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5921 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5922 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5923 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5924 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5925 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5926 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5927 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5928 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5929 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5930 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5931 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5932 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5933 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5934 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5935 }
5936
5937 static void host_to_target_termios (void *dst, const void *src)
5938 {
5939 struct target_termios *target = dst;
5940 const struct host_termios *host = src;
5941
5942 target->c_iflag =
5943 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5944 target->c_oflag =
5945 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5946 target->c_cflag =
5947 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5948 target->c_lflag =
5949 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5950 target->c_line = host->c_line;
5951
5952 memset(target->c_cc, 0, sizeof(target->c_cc));
5953 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5954 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5955 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5956 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5957 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5958 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5959 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5960 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5961 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5962 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5963 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5964 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5965 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5966 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5967 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5968 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5969 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5970 }
5971
5972 static const StructEntry struct_termios_def = {
5973 .convert = { host_to_target_termios, target_to_host_termios },
5974 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5975 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5976 .print = print_termios,
5977 };
5978
5979 /* If the host does not provide these bits, they may be safely discarded. */
5980 #ifndef MAP_SYNC
5981 #define MAP_SYNC 0
5982 #endif
5983 #ifndef MAP_UNINITIALIZED
5984 #define MAP_UNINITIALIZED 0
5985 #endif
5986
5987 static const bitmask_transtbl mmap_flags_tbl[] = {
5988 { TARGET_MAP_TYPE, TARGET_MAP_SHARED, MAP_TYPE, MAP_SHARED },
5989 { TARGET_MAP_TYPE, TARGET_MAP_PRIVATE, MAP_TYPE, MAP_PRIVATE },
5990 { TARGET_MAP_TYPE, TARGET_MAP_SHARED_VALIDATE,
5991 MAP_TYPE, MAP_SHARED_VALIDATE },
5992 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5993 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5994 MAP_ANONYMOUS, MAP_ANONYMOUS },
5995 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5996 MAP_GROWSDOWN, MAP_GROWSDOWN },
5997 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5998 MAP_DENYWRITE, MAP_DENYWRITE },
5999 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6000 MAP_EXECUTABLE, MAP_EXECUTABLE },
6001 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6002 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6003 MAP_NORESERVE, MAP_NORESERVE },
6004 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6005 /* MAP_STACK had been ignored by the kernel for quite some time.
6006 Recognize it for the target insofar as we do not want to pass
6007 it through to the host. */
6008 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6009 { TARGET_MAP_SYNC, TARGET_MAP_SYNC, MAP_SYNC, MAP_SYNC },
6010 { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
6011 { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
6012 { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
6013 MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
6014 { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
6015 MAP_UNINITIALIZED, MAP_UNINITIALIZED },
6016 { 0, 0, 0, 0 }
6017 };
6018
6019 /*
6020 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6021 * TARGET_I386 is defined if TARGET_X86_64 is defined
6022 */
6023 #if defined(TARGET_I386)
6024
6025 /* NOTE: there is really one LDT for all the threads */
6026 static uint8_t *ldt_table;
6027
6028 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6029 {
6030 int size;
6031 void *p;
6032
6033 if (!ldt_table)
6034 return 0;
6035 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6036 if (size > bytecount)
6037 size = bytecount;
6038 p = lock_user(VERIFY_WRITE, ptr, size, 0);
6039 if (!p)
6040 return -TARGET_EFAULT;
6041 /* ??? Should this by byteswapped? */
6042 memcpy(p, ldt_table, size);
6043 unlock_user(p, ptr, size);
6044 return size;
6045 }
6046
6047 /* XXX: add locking support */
6048 static abi_long write_ldt(CPUX86State *env,
6049 abi_ulong ptr, unsigned long bytecount, int oldmode)
6050 {
6051 struct target_modify_ldt_ldt_s ldt_info;
6052 struct target_modify_ldt_ldt_s *target_ldt_info;
6053 int seg_32bit, contents, read_exec_only, limit_in_pages;
6054 int seg_not_present, useable, lm;
6055 uint32_t *lp, entry_1, entry_2;
6056
6057 if (bytecount != sizeof(ldt_info))
6058 return -TARGET_EINVAL;
6059 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6060 return -TARGET_EFAULT;
6061 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6062 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6063 ldt_info.limit = tswap32(target_ldt_info->limit);
6064 ldt_info.flags = tswap32(target_ldt_info->flags);
6065 unlock_user_struct(target_ldt_info, ptr, 0);
6066
6067 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6068 return -TARGET_EINVAL;
6069 seg_32bit = ldt_info.flags & 1;
6070 contents = (ldt_info.flags >> 1) & 3;
6071 read_exec_only = (ldt_info.flags >> 3) & 1;
6072 limit_in_pages = (ldt_info.flags >> 4) & 1;
6073 seg_not_present = (ldt_info.flags >> 5) & 1;
6074 useable = (ldt_info.flags >> 6) & 1;
6075 #ifdef TARGET_ABI32
6076 lm = 0;
6077 #else
6078 lm = (ldt_info.flags >> 7) & 1;
6079 #endif
6080 if (contents == 3) {
6081 if (oldmode)
6082 return -TARGET_EINVAL;
6083 if (seg_not_present == 0)
6084 return -TARGET_EINVAL;
6085 }
6086 /* allocate the LDT */
6087 if (!ldt_table) {
6088 env->ldt.base = target_mmap(0,
6089 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6090 PROT_READ|PROT_WRITE,
6091 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6092 if (env->ldt.base == -1)
6093 return -TARGET_ENOMEM;
6094 memset(g2h_untagged(env->ldt.base), 0,
6095 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6096 env->ldt.limit = 0xffff;
6097 ldt_table = g2h_untagged(env->ldt.base);
6098 }
6099
6100 /* NOTE: same code as Linux kernel */
6101 /* Allow LDTs to be cleared by the user. */
6102 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6103 if (oldmode ||
6104 (contents == 0 &&
6105 read_exec_only == 1 &&
6106 seg_32bit == 0 &&
6107 limit_in_pages == 0 &&
6108 seg_not_present == 1 &&
6109 useable == 0 )) {
6110 entry_1 = 0;
6111 entry_2 = 0;
6112 goto install;
6113 }
6114 }
6115
6116 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6117 (ldt_info.limit & 0x0ffff);
6118 entry_2 = (ldt_info.base_addr & 0xff000000) |
6119 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6120 (ldt_info.limit & 0xf0000) |
6121 ((read_exec_only ^ 1) << 9) |
6122 (contents << 10) |
6123 ((seg_not_present ^ 1) << 15) |
6124 (seg_32bit << 22) |
6125 (limit_in_pages << 23) |
6126 (lm << 21) |
6127 0x7000;
6128 if (!oldmode)
6129 entry_2 |= (useable << 20);
6130
6131 /* Install the new entry ... */
6132 install:
6133 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6134 lp[0] = tswap32(entry_1);
6135 lp[1] = tswap32(entry_2);
6136 return 0;
6137 }
6138
6139 /* specific and weird i386 syscalls */
6140 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6141 unsigned long bytecount)
6142 {
6143 abi_long ret;
6144
6145 switch (func) {
6146 case 0:
6147 ret = read_ldt(ptr, bytecount);
6148 break;
6149 case 1:
6150 ret = write_ldt(env, ptr, bytecount, 1);
6151 break;
6152 case 0x11:
6153 ret = write_ldt(env, ptr, bytecount, 0);
6154 break;
6155 default:
6156 ret = -TARGET_ENOSYS;
6157 break;
6158 }
6159 return ret;
6160 }
6161
6162 #if defined(TARGET_ABI32)
6163 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6164 {
6165 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6166 struct target_modify_ldt_ldt_s ldt_info;
6167 struct target_modify_ldt_ldt_s *target_ldt_info;
6168 int seg_32bit, contents, read_exec_only, limit_in_pages;
6169 int seg_not_present, useable, lm;
6170 uint32_t *lp, entry_1, entry_2;
6171 int i;
6172
6173 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6174 if (!target_ldt_info)
6175 return -TARGET_EFAULT;
6176 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6177 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6178 ldt_info.limit = tswap32(target_ldt_info->limit);
6179 ldt_info.flags = tswap32(target_ldt_info->flags);
6180 if (ldt_info.entry_number == -1) {
6181 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6182 if (gdt_table[i] == 0) {
6183 ldt_info.entry_number = i;
6184 target_ldt_info->entry_number = tswap32(i);
6185 break;
6186 }
6187 }
6188 }
6189 unlock_user_struct(target_ldt_info, ptr, 1);
6190
6191 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6192 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6193 return -TARGET_EINVAL;
6194 seg_32bit = ldt_info.flags & 1;
6195 contents = (ldt_info.flags >> 1) & 3;
6196 read_exec_only = (ldt_info.flags >> 3) & 1;
6197 limit_in_pages = (ldt_info.flags >> 4) & 1;
6198 seg_not_present = (ldt_info.flags >> 5) & 1;
6199 useable = (ldt_info.flags >> 6) & 1;
6200 #ifdef TARGET_ABI32
6201 lm = 0;
6202 #else
6203 lm = (ldt_info.flags >> 7) & 1;
6204 #endif
6205
6206 if (contents == 3) {
6207 if (seg_not_present == 0)
6208 return -TARGET_EINVAL;
6209 }
6210
6211 /* NOTE: same code as Linux kernel */
6212 /* Allow LDTs to be cleared by the user. */
6213 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6214 if ((contents == 0 &&
6215 read_exec_only == 1 &&
6216 seg_32bit == 0 &&
6217 limit_in_pages == 0 &&
6218 seg_not_present == 1 &&
6219 useable == 0 )) {
6220 entry_1 = 0;
6221 entry_2 = 0;
6222 goto install;
6223 }
6224 }
6225
6226 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6227 (ldt_info.limit & 0x0ffff);
6228 entry_2 = (ldt_info.base_addr & 0xff000000) |
6229 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6230 (ldt_info.limit & 0xf0000) |
6231 ((read_exec_only ^ 1) << 9) |
6232 (contents << 10) |
6233 ((seg_not_present ^ 1) << 15) |
6234 (seg_32bit << 22) |
6235 (limit_in_pages << 23) |
6236 (useable << 20) |
6237 (lm << 21) |
6238 0x7000;
6239
6240 /* Install the new entry ... */
6241 install:
6242 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6243 lp[0] = tswap32(entry_1);
6244 lp[1] = tswap32(entry_2);
6245 return 0;
6246 }
6247
6248 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6249 {
6250 struct target_modify_ldt_ldt_s *target_ldt_info;
6251 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6252 uint32_t base_addr, limit, flags;
6253 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6254 int seg_not_present, useable, lm;
6255 uint32_t *lp, entry_1, entry_2;
6256
6257 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6258 if (!target_ldt_info)
6259 return -TARGET_EFAULT;
6260 idx = tswap32(target_ldt_info->entry_number);
6261 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6262 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6263 unlock_user_struct(target_ldt_info, ptr, 1);
6264 return -TARGET_EINVAL;
6265 }
6266 lp = (uint32_t *)(gdt_table + idx);
6267 entry_1 = tswap32(lp[0]);
6268 entry_2 = tswap32(lp[1]);
6269
6270 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6271 contents = (entry_2 >> 10) & 3;
6272 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6273 seg_32bit = (entry_2 >> 22) & 1;
6274 limit_in_pages = (entry_2 >> 23) & 1;
6275 useable = (entry_2 >> 20) & 1;
6276 #ifdef TARGET_ABI32
6277 lm = 0;
6278 #else
6279 lm = (entry_2 >> 21) & 1;
6280 #endif
6281 flags = (seg_32bit << 0) | (contents << 1) |
6282 (read_exec_only << 3) | (limit_in_pages << 4) |
6283 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6284 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6285 base_addr = (entry_1 >> 16) |
6286 (entry_2 & 0xff000000) |
6287 ((entry_2 & 0xff) << 16);
6288 target_ldt_info->base_addr = tswapal(base_addr);
6289 target_ldt_info->limit = tswap32(limit);
6290 target_ldt_info->flags = tswap32(flags);
6291 unlock_user_struct(target_ldt_info, ptr, 1);
6292 return 0;
6293 }
6294
6295 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6296 {
6297 return -TARGET_ENOSYS;
6298 }
6299 #else
6300 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6301 {
6302 abi_long ret = 0;
6303 abi_ulong val;
6304 int idx;
6305
6306 switch(code) {
6307 case TARGET_ARCH_SET_GS:
6308 case TARGET_ARCH_SET_FS:
6309 if (code == TARGET_ARCH_SET_GS)
6310 idx = R_GS;
6311 else
6312 idx = R_FS;
6313 cpu_x86_load_seg(env, idx, 0);
6314 env->segs[idx].base = addr;
6315 break;
6316 case TARGET_ARCH_GET_GS:
6317 case TARGET_ARCH_GET_FS:
6318 if (code == TARGET_ARCH_GET_GS)
6319 idx = R_GS;
6320 else
6321 idx = R_FS;
6322 val = env->segs[idx].base;
6323 if (put_user(val, addr, abi_ulong))
6324 ret = -TARGET_EFAULT;
6325 break;
6326 default:
6327 ret = -TARGET_EINVAL;
6328 break;
6329 }
6330 return ret;
6331 }
6332 #endif /* defined(TARGET_ABI32 */
6333 #endif /* defined(TARGET_I386) */
6334
6335 /*
6336 * These constants are generic. Supply any that are missing from the host.
6337 */
6338 #ifndef PR_SET_NAME
6339 # define PR_SET_NAME 15
6340 # define PR_GET_NAME 16
6341 #endif
6342 #ifndef PR_SET_FP_MODE
6343 # define PR_SET_FP_MODE 45
6344 # define PR_GET_FP_MODE 46
6345 # define PR_FP_MODE_FR (1 << 0)
6346 # define PR_FP_MODE_FRE (1 << 1)
6347 #endif
6348 #ifndef PR_SVE_SET_VL
6349 # define PR_SVE_SET_VL 50
6350 # define PR_SVE_GET_VL 51
6351 # define PR_SVE_VL_LEN_MASK 0xffff
6352 # define PR_SVE_VL_INHERIT (1 << 17)
6353 #endif
6354 #ifndef PR_PAC_RESET_KEYS
6355 # define PR_PAC_RESET_KEYS 54
6356 # define PR_PAC_APIAKEY (1 << 0)
6357 # define PR_PAC_APIBKEY (1 << 1)
6358 # define PR_PAC_APDAKEY (1 << 2)
6359 # define PR_PAC_APDBKEY (1 << 3)
6360 # define PR_PAC_APGAKEY (1 << 4)
6361 #endif
6362 #ifndef PR_SET_TAGGED_ADDR_CTRL
6363 # define PR_SET_TAGGED_ADDR_CTRL 55
6364 # define PR_GET_TAGGED_ADDR_CTRL 56
6365 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6366 #endif
6367 #ifndef PR_MTE_TCF_SHIFT
6368 # define PR_MTE_TCF_SHIFT 1
6369 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6370 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6371 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6372 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6373 # define PR_MTE_TAG_SHIFT 3
6374 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6375 #endif
6376 #ifndef PR_SET_IO_FLUSHER
6377 # define PR_SET_IO_FLUSHER 57
6378 # define PR_GET_IO_FLUSHER 58
6379 #endif
6380 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6381 # define PR_SET_SYSCALL_USER_DISPATCH 59
6382 #endif
6383 #ifndef PR_SME_SET_VL
6384 # define PR_SME_SET_VL 63
6385 # define PR_SME_GET_VL 64
6386 # define PR_SME_VL_LEN_MASK 0xffff
6387 # define PR_SME_VL_INHERIT (1 << 17)
6388 #endif
6389
6390 #include "target_prctl.h"
6391
6392 static abi_long do_prctl_inval0(CPUArchState *env)
6393 {
6394 return -TARGET_EINVAL;
6395 }
6396
6397 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6398 {
6399 return -TARGET_EINVAL;
6400 }
6401
6402 #ifndef do_prctl_get_fp_mode
6403 #define do_prctl_get_fp_mode do_prctl_inval0
6404 #endif
6405 #ifndef do_prctl_set_fp_mode
6406 #define do_prctl_set_fp_mode do_prctl_inval1
6407 #endif
6408 #ifndef do_prctl_sve_get_vl
6409 #define do_prctl_sve_get_vl do_prctl_inval0
6410 #endif
6411 #ifndef do_prctl_sve_set_vl
6412 #define do_prctl_sve_set_vl do_prctl_inval1
6413 #endif
6414 #ifndef do_prctl_reset_keys
6415 #define do_prctl_reset_keys do_prctl_inval1
6416 #endif
6417 #ifndef do_prctl_set_tagged_addr_ctrl
6418 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6419 #endif
6420 #ifndef do_prctl_get_tagged_addr_ctrl
6421 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6422 #endif
6423 #ifndef do_prctl_get_unalign
6424 #define do_prctl_get_unalign do_prctl_inval1
6425 #endif
6426 #ifndef do_prctl_set_unalign
6427 #define do_prctl_set_unalign do_prctl_inval1
6428 #endif
6429 #ifndef do_prctl_sme_get_vl
6430 #define do_prctl_sme_get_vl do_prctl_inval0
6431 #endif
6432 #ifndef do_prctl_sme_set_vl
6433 #define do_prctl_sme_set_vl do_prctl_inval1
6434 #endif
6435
6436 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6437 abi_long arg3, abi_long arg4, abi_long arg5)
6438 {
6439 abi_long ret;
6440
6441 switch (option) {
6442 case PR_GET_PDEATHSIG:
6443 {
6444 int deathsig;
6445 ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6446 arg3, arg4, arg5));
6447 if (!is_error(ret) &&
6448 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6449 return -TARGET_EFAULT;
6450 }
6451 return ret;
6452 }
6453 case PR_SET_PDEATHSIG:
6454 return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6455 arg3, arg4, arg5));
6456 case PR_GET_NAME:
6457 {
6458 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6459 if (!name) {
6460 return -TARGET_EFAULT;
6461 }
6462 ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6463 arg3, arg4, arg5));
6464 unlock_user(name, arg2, 16);
6465 return ret;
6466 }
6467 case PR_SET_NAME:
6468 {
6469 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6470 if (!name) {
6471 return -TARGET_EFAULT;
6472 }
6473 ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6474 arg3, arg4, arg5));
6475 unlock_user(name, arg2, 0);
6476 return ret;
6477 }
6478 case PR_GET_FP_MODE:
6479 return do_prctl_get_fp_mode(env);
6480 case PR_SET_FP_MODE:
6481 return do_prctl_set_fp_mode(env, arg2);
6482 case PR_SVE_GET_VL:
6483 return do_prctl_sve_get_vl(env);
6484 case PR_SVE_SET_VL:
6485 return do_prctl_sve_set_vl(env, arg2);
6486 case PR_SME_GET_VL:
6487 return do_prctl_sme_get_vl(env);
6488 case PR_SME_SET_VL:
6489 return do_prctl_sme_set_vl(env, arg2);
6490 case PR_PAC_RESET_KEYS:
6491 if (arg3 || arg4 || arg5) {
6492 return -TARGET_EINVAL;
6493 }
6494 return do_prctl_reset_keys(env, arg2);
6495 case PR_SET_TAGGED_ADDR_CTRL:
6496 if (arg3 || arg4 || arg5) {
6497 return -TARGET_EINVAL;
6498 }
6499 return do_prctl_set_tagged_addr_ctrl(env, arg2);
6500 case PR_GET_TAGGED_ADDR_CTRL:
6501 if (arg2 || arg3 || arg4 || arg5) {
6502 return -TARGET_EINVAL;
6503 }
6504 return do_prctl_get_tagged_addr_ctrl(env);
6505
6506 case PR_GET_UNALIGN:
6507 return do_prctl_get_unalign(env, arg2);
6508 case PR_SET_UNALIGN:
6509 return do_prctl_set_unalign(env, arg2);
6510
6511 case PR_CAP_AMBIENT:
6512 case PR_CAPBSET_READ:
6513 case PR_CAPBSET_DROP:
6514 case PR_GET_DUMPABLE:
6515 case PR_SET_DUMPABLE:
6516 case PR_GET_KEEPCAPS:
6517 case PR_SET_KEEPCAPS:
6518 case PR_GET_SECUREBITS:
6519 case PR_SET_SECUREBITS:
6520 case PR_GET_TIMING:
6521 case PR_SET_TIMING:
6522 case PR_GET_TIMERSLACK:
6523 case PR_SET_TIMERSLACK:
6524 case PR_MCE_KILL:
6525 case PR_MCE_KILL_GET:
6526 case PR_GET_NO_NEW_PRIVS:
6527 case PR_SET_NO_NEW_PRIVS:
6528 case PR_GET_IO_FLUSHER:
6529 case PR_SET_IO_FLUSHER:
6530 /* Some prctl options have no pointer arguments and we can pass on. */
6531 return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6532
6533 case PR_GET_CHILD_SUBREAPER:
6534 case PR_SET_CHILD_SUBREAPER:
6535 case PR_GET_SPECULATION_CTRL:
6536 case PR_SET_SPECULATION_CTRL:
6537 case PR_GET_TID_ADDRESS:
6538 /* TODO */
6539 return -TARGET_EINVAL;
6540
6541 case PR_GET_FPEXC:
6542 case PR_SET_FPEXC:
6543 /* Was used for SPE on PowerPC. */
6544 return -TARGET_EINVAL;
6545
6546 case PR_GET_ENDIAN:
6547 case PR_SET_ENDIAN:
6548 case PR_GET_FPEMU:
6549 case PR_SET_FPEMU:
6550 case PR_SET_MM:
6551 case PR_GET_SECCOMP:
6552 case PR_SET_SECCOMP:
6553 case PR_SET_SYSCALL_USER_DISPATCH:
6554 case PR_GET_THP_DISABLE:
6555 case PR_SET_THP_DISABLE:
6556 case PR_GET_TSC:
6557 case PR_SET_TSC:
6558 /* Disable to prevent the target disabling stuff we need. */
6559 return -TARGET_EINVAL;
6560
6561 default:
6562 qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6563 option);
6564 return -TARGET_EINVAL;
6565 }
6566 }
6567
6568 #define NEW_STACK_SIZE 0x40000
6569
6570
6571 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6572 typedef struct {
6573 CPUArchState *env;
6574 pthread_mutex_t mutex;
6575 pthread_cond_t cond;
6576 pthread_t thread;
6577 uint32_t tid;
6578 abi_ulong child_tidptr;
6579 abi_ulong parent_tidptr;
6580 sigset_t sigmask;
6581 } new_thread_info;
6582
6583 static void *clone_func(void *arg)
6584 {
6585 new_thread_info *info = arg;
6586 CPUArchState *env;
6587 CPUState *cpu;
6588 TaskState *ts;
6589
6590 rcu_register_thread();
6591 tcg_register_thread();
6592 env = info->env;
6593 cpu = env_cpu(env);
6594 thread_cpu = cpu;
6595 ts = (TaskState *)cpu->opaque;
6596 info->tid = sys_gettid();
6597 task_settid(ts);
6598 if (info->child_tidptr)
6599 put_user_u32(info->tid, info->child_tidptr);
6600 if (info->parent_tidptr)
6601 put_user_u32(info->tid, info->parent_tidptr);
6602 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6603 /* Enable signals. */
6604 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6605 /* Signal to the parent that we're ready. */
6606 pthread_mutex_lock(&info->mutex);
6607 pthread_cond_broadcast(&info->cond);
6608 pthread_mutex_unlock(&info->mutex);
6609 /* Wait until the parent has finished initializing the tls state. */
6610 pthread_mutex_lock(&clone_lock);
6611 pthread_mutex_unlock(&clone_lock);
6612 cpu_loop(env);
6613 /* never exits */
6614 return NULL;
6615 }
6616
6617 /* do_fork() Must return host values and target errnos (unlike most
6618 do_*() functions). */
6619 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6620 abi_ulong parent_tidptr, target_ulong newtls,
6621 abi_ulong child_tidptr)
6622 {
6623 CPUState *cpu = env_cpu(env);
6624 int ret;
6625 TaskState *ts;
6626 CPUState *new_cpu;
6627 CPUArchState *new_env;
6628 sigset_t sigmask;
6629
6630 flags &= ~CLONE_IGNORED_FLAGS;
6631
6632 /* Emulate vfork() with fork() */
6633 if (flags & CLONE_VFORK)
6634 flags &= ~(CLONE_VFORK | CLONE_VM);
6635
6636 if (flags & CLONE_VM) {
6637 TaskState *parent_ts = (TaskState *)cpu->opaque;
6638 new_thread_info info;
6639 pthread_attr_t attr;
6640
6641 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6642 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6643 return -TARGET_EINVAL;
6644 }
6645
6646 ts = g_new0(TaskState, 1);
6647 init_task_state(ts);
6648
6649 /* Grab a mutex so that thread setup appears atomic. */
6650 pthread_mutex_lock(&clone_lock);
6651
6652 /*
6653 * If this is our first additional thread, we need to ensure we
6654 * generate code for parallel execution and flush old translations.
6655 * Do this now so that the copy gets CF_PARALLEL too.
6656 */
6657 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6658 cpu->tcg_cflags |= CF_PARALLEL;
6659 tb_flush(cpu);
6660 }
6661
6662 /* we create a new CPU instance. */
6663 new_env = cpu_copy(env);
6664 /* Init regs that differ from the parent. */
6665 cpu_clone_regs_child(new_env, newsp, flags);
6666 cpu_clone_regs_parent(env, flags);
6667 new_cpu = env_cpu(new_env);
6668 new_cpu->opaque = ts;
6669 ts->bprm = parent_ts->bprm;
6670 ts->info = parent_ts->info;
6671 ts->signal_mask = parent_ts->signal_mask;
6672
6673 if (flags & CLONE_CHILD_CLEARTID) {
6674 ts->child_tidptr = child_tidptr;
6675 }
6676
6677 if (flags & CLONE_SETTLS) {
6678 cpu_set_tls (new_env, newtls);
6679 }
6680
6681 memset(&info, 0, sizeof(info));
6682 pthread_mutex_init(&info.mutex, NULL);
6683 pthread_mutex_lock(&info.mutex);
6684 pthread_cond_init(&info.cond, NULL);
6685 info.env = new_env;
6686 if (flags & CLONE_CHILD_SETTID) {
6687 info.child_tidptr = child_tidptr;
6688 }
6689 if (flags & CLONE_PARENT_SETTID) {
6690 info.parent_tidptr = parent_tidptr;
6691 }
6692
6693 ret = pthread_attr_init(&attr);
6694 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6695 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6696 /* It is not safe to deliver signals until the child has finished
6697 initializing, so temporarily block all signals. */
6698 sigfillset(&sigmask);
6699 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6700 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6701
6702 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6703 /* TODO: Free new CPU state if thread creation failed. */
6704
6705 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6706 pthread_attr_destroy(&attr);
6707 if (ret == 0) {
6708 /* Wait for the child to initialize. */
6709 pthread_cond_wait(&info.cond, &info.mutex);
6710 ret = info.tid;
6711 } else {
6712 ret = -1;
6713 }
6714 pthread_mutex_unlock(&info.mutex);
6715 pthread_cond_destroy(&info.cond);
6716 pthread_mutex_destroy(&info.mutex);
6717 pthread_mutex_unlock(&clone_lock);
6718 } else {
6719 /* if no CLONE_VM, we consider it is a fork */
6720 if (flags & CLONE_INVALID_FORK_FLAGS) {
6721 return -TARGET_EINVAL;
6722 }
6723
6724 /* We can't support custom termination signals */
6725 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6726 return -TARGET_EINVAL;
6727 }
6728
6729 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6730 if (flags & CLONE_PIDFD) {
6731 return -TARGET_EINVAL;
6732 }
6733 #endif
6734
6735 /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6736 if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6737 return -TARGET_EINVAL;
6738 }
6739
6740 if (block_signals()) {
6741 return -QEMU_ERESTARTSYS;
6742 }
6743
6744 fork_start();
6745 ret = fork();
6746 if (ret == 0) {
6747 /* Child Process. */
6748 cpu_clone_regs_child(env, newsp, flags);
6749 fork_end(1);
6750 /* There is a race condition here. The parent process could
6751 theoretically read the TID in the child process before the child
6752 tid is set. This would require using either ptrace
6753 (not implemented) or having *_tidptr to point at a shared memory
6754 mapping. We can't repeat the spinlock hack used above because
6755 the child process gets its own copy of the lock. */
6756 if (flags & CLONE_CHILD_SETTID)
6757 put_user_u32(sys_gettid(), child_tidptr);
6758 if (flags & CLONE_PARENT_SETTID)
6759 put_user_u32(sys_gettid(), parent_tidptr);
6760 ts = (TaskState *)cpu->opaque;
6761 if (flags & CLONE_SETTLS)
6762 cpu_set_tls (env, newtls);
6763 if (flags & CLONE_CHILD_CLEARTID)
6764 ts->child_tidptr = child_tidptr;
6765 } else {
6766 cpu_clone_regs_parent(env, flags);
6767 if (flags & CLONE_PIDFD) {
6768 int pid_fd = 0;
6769 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6770 int pid_child = ret;
6771 pid_fd = pidfd_open(pid_child, 0);
6772 if (pid_fd >= 0) {
6773 fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6774 | FD_CLOEXEC);
6775 } else {
6776 pid_fd = 0;
6777 }
6778 #endif
6779 put_user_u32(pid_fd, parent_tidptr);
6780 }
6781 fork_end(0);
6782 }
6783 g_assert(!cpu_in_exclusive_context(cpu));
6784 }
6785 return ret;
6786 }
6787
6788 /* warning : doesn't handle linux specific flags... */
6789 static int target_to_host_fcntl_cmd(int cmd)
6790 {
6791 int ret;
6792
6793 switch(cmd) {
6794 case TARGET_F_DUPFD:
6795 case TARGET_F_GETFD:
6796 case TARGET_F_SETFD:
6797 case TARGET_F_GETFL:
6798 case TARGET_F_SETFL:
6799 case TARGET_F_OFD_GETLK:
6800 case TARGET_F_OFD_SETLK:
6801 case TARGET_F_OFD_SETLKW:
6802 ret = cmd;
6803 break;
6804 case TARGET_F_GETLK:
6805 ret = F_GETLK64;
6806 break;
6807 case TARGET_F_SETLK:
6808 ret = F_SETLK64;
6809 break;
6810 case TARGET_F_SETLKW:
6811 ret = F_SETLKW64;
6812 break;
6813 case TARGET_F_GETOWN:
6814 ret = F_GETOWN;
6815 break;
6816 case TARGET_F_SETOWN:
6817 ret = F_SETOWN;
6818 break;
6819 case TARGET_F_GETSIG:
6820 ret = F_GETSIG;
6821 break;
6822 case TARGET_F_SETSIG:
6823 ret = F_SETSIG;
6824 break;
6825 #if TARGET_ABI_BITS == 32
6826 case TARGET_F_GETLK64:
6827 ret = F_GETLK64;
6828 break;
6829 case TARGET_F_SETLK64:
6830 ret = F_SETLK64;
6831 break;
6832 case TARGET_F_SETLKW64:
6833 ret = F_SETLKW64;
6834 break;
6835 #endif
6836 case TARGET_F_SETLEASE:
6837 ret = F_SETLEASE;
6838 break;
6839 case TARGET_F_GETLEASE:
6840 ret = F_GETLEASE;
6841 break;
6842 #ifdef F_DUPFD_CLOEXEC
6843 case TARGET_F_DUPFD_CLOEXEC:
6844 ret = F_DUPFD_CLOEXEC;
6845 break;
6846 #endif
6847 case TARGET_F_NOTIFY:
6848 ret = F_NOTIFY;
6849 break;
6850 #ifdef F_GETOWN_EX
6851 case TARGET_F_GETOWN_EX:
6852 ret = F_GETOWN_EX;
6853 break;
6854 #endif
6855 #ifdef F_SETOWN_EX
6856 case TARGET_F_SETOWN_EX:
6857 ret = F_SETOWN_EX;
6858 break;
6859 #endif
6860 #ifdef F_SETPIPE_SZ
6861 case TARGET_F_SETPIPE_SZ:
6862 ret = F_SETPIPE_SZ;
6863 break;
6864 case TARGET_F_GETPIPE_SZ:
6865 ret = F_GETPIPE_SZ;
6866 break;
6867 #endif
6868 #ifdef F_ADD_SEALS
6869 case TARGET_F_ADD_SEALS:
6870 ret = F_ADD_SEALS;
6871 break;
6872 case TARGET_F_GET_SEALS:
6873 ret = F_GET_SEALS;
6874 break;
6875 #endif
6876 default:
6877 ret = -TARGET_EINVAL;
6878 break;
6879 }
6880
6881 #if defined(__powerpc64__)
6882 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6883 * is not supported by kernel. The glibc fcntl call actually adjusts
6884 * them to 5, 6 and 7 before making the syscall(). Since we make the
6885 * syscall directly, adjust to what is supported by the kernel.
6886 */
6887 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6888 ret -= F_GETLK64 - 5;
6889 }
6890 #endif
6891
6892 return ret;
6893 }
6894
6895 #define FLOCK_TRANSTBL \
6896 switch (type) { \
6897 TRANSTBL_CONVERT(F_RDLCK); \
6898 TRANSTBL_CONVERT(F_WRLCK); \
6899 TRANSTBL_CONVERT(F_UNLCK); \
6900 }
6901
6902 static int target_to_host_flock(int type)
6903 {
6904 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6905 FLOCK_TRANSTBL
6906 #undef TRANSTBL_CONVERT
6907 return -TARGET_EINVAL;
6908 }
6909
6910 static int host_to_target_flock(int type)
6911 {
6912 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6913 FLOCK_TRANSTBL
6914 #undef TRANSTBL_CONVERT
6915 /* if we don't know how to convert the value coming
6916 * from the host we copy to the target field as-is
6917 */
6918 return type;
6919 }
6920
6921 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6922 abi_ulong target_flock_addr)
6923 {
6924 struct target_flock *target_fl;
6925 int l_type;
6926
6927 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6928 return -TARGET_EFAULT;
6929 }
6930
6931 __get_user(l_type, &target_fl->l_type);
6932 l_type = target_to_host_flock(l_type);
6933 if (l_type < 0) {
6934 return l_type;
6935 }
6936 fl->l_type = l_type;
6937 __get_user(fl->l_whence, &target_fl->l_whence);
6938 __get_user(fl->l_start, &target_fl->l_start);
6939 __get_user(fl->l_len, &target_fl->l_len);
6940 __get_user(fl->l_pid, &target_fl->l_pid);
6941 unlock_user_struct(target_fl, target_flock_addr, 0);
6942 return 0;
6943 }
6944
6945 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6946 const struct flock64 *fl)
6947 {
6948 struct target_flock *target_fl;
6949 short l_type;
6950
6951 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6952 return -TARGET_EFAULT;
6953 }
6954
6955 l_type = host_to_target_flock(fl->l_type);
6956 __put_user(l_type, &target_fl->l_type);
6957 __put_user(fl->l_whence, &target_fl->l_whence);
6958 __put_user(fl->l_start, &target_fl->l_start);
6959 __put_user(fl->l_len, &target_fl->l_len);
6960 __put_user(fl->l_pid, &target_fl->l_pid);
6961 unlock_user_struct(target_fl, target_flock_addr, 1);
6962 return 0;
6963 }
6964
6965 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6966 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6967
6968 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6969 struct target_oabi_flock64 {
6970 abi_short l_type;
6971 abi_short l_whence;
6972 abi_llong l_start;
6973 abi_llong l_len;
6974 abi_int l_pid;
6975 } QEMU_PACKED;
6976
6977 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6978 abi_ulong target_flock_addr)
6979 {
6980 struct target_oabi_flock64 *target_fl;
6981 int l_type;
6982
6983 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6984 return -TARGET_EFAULT;
6985 }
6986
6987 __get_user(l_type, &target_fl->l_type);
6988 l_type = target_to_host_flock(l_type);
6989 if (l_type < 0) {
6990 return l_type;
6991 }
6992 fl->l_type = l_type;
6993 __get_user(fl->l_whence, &target_fl->l_whence);
6994 __get_user(fl->l_start, &target_fl->l_start);
6995 __get_user(fl->l_len, &target_fl->l_len);
6996 __get_user(fl->l_pid, &target_fl->l_pid);
6997 unlock_user_struct(target_fl, target_flock_addr, 0);
6998 return 0;
6999 }
7000
7001 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
7002 const struct flock64 *fl)
7003 {
7004 struct target_oabi_flock64 *target_fl;
7005 short l_type;
7006
7007 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7008 return -TARGET_EFAULT;
7009 }
7010
7011 l_type = host_to_target_flock(fl->l_type);
7012 __put_user(l_type, &target_fl->l_type);
7013 __put_user(fl->l_whence, &target_fl->l_whence);
7014 __put_user(fl->l_start, &target_fl->l_start);
7015 __put_user(fl->l_len, &target_fl->l_len);
7016 __put_user(fl->l_pid, &target_fl->l_pid);
7017 unlock_user_struct(target_fl, target_flock_addr, 1);
7018 return 0;
7019 }
7020 #endif
7021
7022 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7023 abi_ulong target_flock_addr)
7024 {
7025 struct target_flock64 *target_fl;
7026 int l_type;
7027
7028 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7029 return -TARGET_EFAULT;
7030 }
7031
7032 __get_user(l_type, &target_fl->l_type);
7033 l_type = target_to_host_flock(l_type);
7034 if (l_type < 0) {
7035 return l_type;
7036 }
7037 fl->l_type = l_type;
7038 __get_user(fl->l_whence, &target_fl->l_whence);
7039 __get_user(fl->l_start, &target_fl->l_start);
7040 __get_user(fl->l_len, &target_fl->l_len);
7041 __get_user(fl->l_pid, &target_fl->l_pid);
7042 unlock_user_struct(target_fl, target_flock_addr, 0);
7043 return 0;
7044 }
7045
7046 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7047 const struct flock64 *fl)
7048 {
7049 struct target_flock64 *target_fl;
7050 short l_type;
7051
7052 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7053 return -TARGET_EFAULT;
7054 }
7055
7056 l_type = host_to_target_flock(fl->l_type);
7057 __put_user(l_type, &target_fl->l_type);
7058 __put_user(fl->l_whence, &target_fl->l_whence);
7059 __put_user(fl->l_start, &target_fl->l_start);
7060 __put_user(fl->l_len, &target_fl->l_len);
7061 __put_user(fl->l_pid, &target_fl->l_pid);
7062 unlock_user_struct(target_fl, target_flock_addr, 1);
7063 return 0;
7064 }
7065
7066 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7067 {
7068 struct flock64 fl64;
7069 #ifdef F_GETOWN_EX
7070 struct f_owner_ex fox;
7071 struct target_f_owner_ex *target_fox;
7072 #endif
7073 abi_long ret;
7074 int host_cmd = target_to_host_fcntl_cmd(cmd);
7075
7076 if (host_cmd == -TARGET_EINVAL)
7077 return host_cmd;
7078
7079 switch(cmd) {
7080 case TARGET_F_GETLK:
7081 ret = copy_from_user_flock(&fl64, arg);
7082 if (ret) {
7083 return ret;
7084 }
7085 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7086 if (ret == 0) {
7087 ret = copy_to_user_flock(arg, &fl64);
7088 }
7089 break;
7090
7091 case TARGET_F_SETLK:
7092 case TARGET_F_SETLKW:
7093 ret = copy_from_user_flock(&fl64, arg);
7094 if (ret) {
7095 return ret;
7096 }
7097 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7098 break;
7099
7100 case TARGET_F_GETLK64:
7101 case TARGET_F_OFD_GETLK:
7102 ret = copy_from_user_flock64(&fl64, arg);
7103 if (ret) {
7104 return ret;
7105 }
7106 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7107 if (ret == 0) {
7108 ret = copy_to_user_flock64(arg, &fl64);
7109 }
7110 break;
7111 case TARGET_F_SETLK64:
7112 case TARGET_F_SETLKW64:
7113 case TARGET_F_OFD_SETLK:
7114 case TARGET_F_OFD_SETLKW:
7115 ret = copy_from_user_flock64(&fl64, arg);
7116 if (ret) {
7117 return ret;
7118 }
7119 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7120 break;
7121
7122 case TARGET_F_GETFL:
7123 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7124 if (ret >= 0) {
7125 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7126 /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7127 if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7128 ret |= TARGET_O_LARGEFILE;
7129 }
7130 }
7131 break;
7132
7133 case TARGET_F_SETFL:
7134 ret = get_errno(safe_fcntl(fd, host_cmd,
7135 target_to_host_bitmask(arg,
7136 fcntl_flags_tbl)));
7137 break;
7138
7139 #ifdef F_GETOWN_EX
7140 case TARGET_F_GETOWN_EX:
7141 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7142 if (ret >= 0) {
7143 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7144 return -TARGET_EFAULT;
7145 target_fox->type = tswap32(fox.type);
7146 target_fox->pid = tswap32(fox.pid);
7147 unlock_user_struct(target_fox, arg, 1);
7148 }
7149 break;
7150 #endif
7151
7152 #ifdef F_SETOWN_EX
7153 case TARGET_F_SETOWN_EX:
7154 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7155 return -TARGET_EFAULT;
7156 fox.type = tswap32(target_fox->type);
7157 fox.pid = tswap32(target_fox->pid);
7158 unlock_user_struct(target_fox, arg, 0);
7159 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7160 break;
7161 #endif
7162
7163 case TARGET_F_SETSIG:
7164 ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7165 break;
7166
7167 case TARGET_F_GETSIG:
7168 ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7169 break;
7170
7171 case TARGET_F_SETOWN:
7172 case TARGET_F_GETOWN:
7173 case TARGET_F_SETLEASE:
7174 case TARGET_F_GETLEASE:
7175 case TARGET_F_SETPIPE_SZ:
7176 case TARGET_F_GETPIPE_SZ:
7177 case TARGET_F_ADD_SEALS:
7178 case TARGET_F_GET_SEALS:
7179 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7180 break;
7181
7182 default:
7183 ret = get_errno(safe_fcntl(fd, cmd, arg));
7184 break;
7185 }
7186 return ret;
7187 }
7188
7189 #ifdef USE_UID16
7190
7191 static inline int high2lowuid(int uid)
7192 {
7193 if (uid > 65535)
7194 return 65534;
7195 else
7196 return uid;
7197 }
7198
7199 static inline int high2lowgid(int gid)
7200 {
7201 if (gid > 65535)
7202 return 65534;
7203 else
7204 return gid;
7205 }
7206
7207 static inline int low2highuid(int uid)
7208 {
7209 if ((int16_t)uid == -1)
7210 return -1;
7211 else
7212 return uid;
7213 }
7214
7215 static inline int low2highgid(int gid)
7216 {
7217 if ((int16_t)gid == -1)
7218 return -1;
7219 else
7220 return gid;
7221 }
7222 static inline int tswapid(int id)
7223 {
7224 return tswap16(id);
7225 }
7226
7227 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7228
7229 #else /* !USE_UID16 */
7230 static inline int high2lowuid(int uid)
7231 {
7232 return uid;
7233 }
7234 static inline int high2lowgid(int gid)
7235 {
7236 return gid;
7237 }
7238 static inline int low2highuid(int uid)
7239 {
7240 return uid;
7241 }
7242 static inline int low2highgid(int gid)
7243 {
7244 return gid;
7245 }
7246 static inline int tswapid(int id)
7247 {
7248 return tswap32(id);
7249 }
7250
7251 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7252
7253 #endif /* USE_UID16 */
7254
7255 /* We must do direct syscalls for setting UID/GID, because we want to
7256 * implement the Linux system call semantics of "change only for this thread",
7257 * not the libc/POSIX semantics of "change for all threads in process".
7258 * (See http://ewontfix.com/17/ for more details.)
7259 * We use the 32-bit version of the syscalls if present; if it is not
7260 * then either the host architecture supports 32-bit UIDs natively with
7261 * the standard syscall, or the 16-bit UID is the best we can do.
7262 */
7263 #ifdef __NR_setuid32
7264 #define __NR_sys_setuid __NR_setuid32
7265 #else
7266 #define __NR_sys_setuid __NR_setuid
7267 #endif
7268 #ifdef __NR_setgid32
7269 #define __NR_sys_setgid __NR_setgid32
7270 #else
7271 #define __NR_sys_setgid __NR_setgid
7272 #endif
7273 #ifdef __NR_setresuid32
7274 #define __NR_sys_setresuid __NR_setresuid32
7275 #else
7276 #define __NR_sys_setresuid __NR_setresuid
7277 #endif
7278 #ifdef __NR_setresgid32
7279 #define __NR_sys_setresgid __NR_setresgid32
7280 #else
7281 #define __NR_sys_setresgid __NR_setresgid
7282 #endif
7283
7284 _syscall1(int, sys_setuid, uid_t, uid)
7285 _syscall1(int, sys_setgid, gid_t, gid)
7286 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7287 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7288
7289 void syscall_init(void)
7290 {
7291 IOCTLEntry *ie;
7292 const argtype *arg_type;
7293 int size;
7294
7295 thunk_init(STRUCT_MAX);
7296
7297 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7298 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7299 #include "syscall_types.h"
7300 #undef STRUCT
7301 #undef STRUCT_SPECIAL
7302
7303 /* we patch the ioctl size if necessary. We rely on the fact that
7304 no ioctl has all the bits at '1' in the size field */
7305 ie = ioctl_entries;
7306 while (ie->target_cmd != 0) {
7307 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7308 TARGET_IOC_SIZEMASK) {
7309 arg_type = ie->arg_type;
7310 if (arg_type[0] != TYPE_PTR) {
7311 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7312 ie->target_cmd);
7313 exit(1);
7314 }
7315 arg_type++;
7316 size = thunk_type_size(arg_type, 0);
7317 ie->target_cmd = (ie->target_cmd &
7318 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7319 (size << TARGET_IOC_SIZESHIFT);
7320 }
7321
7322 /* automatic consistency check if same arch */
7323 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7324 (defined(__x86_64__) && defined(TARGET_X86_64))
7325 if (unlikely(ie->target_cmd != ie->host_cmd)) {
7326 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7327 ie->name, ie->target_cmd, ie->host_cmd);
7328 }
7329 #endif
7330 ie++;
7331 }
7332 }
7333
7334 #ifdef TARGET_NR_truncate64
7335 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7336 abi_long arg2,
7337 abi_long arg3,
7338 abi_long arg4)
7339 {
7340 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7341 arg2 = arg3;
7342 arg3 = arg4;
7343 }
7344 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7345 }
7346 #endif
7347
7348 #ifdef TARGET_NR_ftruncate64
7349 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7350 abi_long arg2,
7351 abi_long arg3,
7352 abi_long arg4)
7353 {
7354 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7355 arg2 = arg3;
7356 arg3 = arg4;
7357 }
7358 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7359 }
7360 #endif
7361
7362 #if defined(TARGET_NR_timer_settime) || \
7363 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7364 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7365 abi_ulong target_addr)
7366 {
7367 if (target_to_host_timespec(&host_its->it_interval, target_addr +
7368 offsetof(struct target_itimerspec,
7369 it_interval)) ||
7370 target_to_host_timespec(&host_its->it_value, target_addr +
7371 offsetof(struct target_itimerspec,
7372 it_value))) {
7373 return -TARGET_EFAULT;
7374 }
7375
7376 return 0;
7377 }
7378 #endif
7379
7380 #if defined(TARGET_NR_timer_settime64) || \
7381 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7382 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7383 abi_ulong target_addr)
7384 {
7385 if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7386 offsetof(struct target__kernel_itimerspec,
7387 it_interval)) ||
7388 target_to_host_timespec64(&host_its->it_value, target_addr +
7389 offsetof(struct target__kernel_itimerspec,
7390 it_value))) {
7391 return -TARGET_EFAULT;
7392 }
7393
7394 return 0;
7395 }
7396 #endif
7397
7398 #if ((defined(TARGET_NR_timerfd_gettime) || \
7399 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7400 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7401 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7402 struct itimerspec *host_its)
7403 {
7404 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7405 it_interval),
7406 &host_its->it_interval) ||
7407 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7408 it_value),
7409 &host_its->it_value)) {
7410 return -TARGET_EFAULT;
7411 }
7412 return 0;
7413 }
7414 #endif
7415
7416 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7417 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7418 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7419 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7420 struct itimerspec *host_its)
7421 {
7422 if (host_to_target_timespec64(target_addr +
7423 offsetof(struct target__kernel_itimerspec,
7424 it_interval),
7425 &host_its->it_interval) ||
7426 host_to_target_timespec64(target_addr +
7427 offsetof(struct target__kernel_itimerspec,
7428 it_value),
7429 &host_its->it_value)) {
7430 return -TARGET_EFAULT;
7431 }
7432 return 0;
7433 }
7434 #endif
7435
7436 #if defined(TARGET_NR_adjtimex) || \
7437 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7438 static inline abi_long target_to_host_timex(struct timex *host_tx,
7439 abi_long target_addr)
7440 {
7441 struct target_timex *target_tx;
7442
7443 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7444 return -TARGET_EFAULT;
7445 }
7446
7447 __get_user(host_tx->modes, &target_tx->modes);
7448 __get_user(host_tx->offset, &target_tx->offset);
7449 __get_user(host_tx->freq, &target_tx->freq);
7450 __get_user(host_tx->maxerror, &target_tx->maxerror);
7451 __get_user(host_tx->esterror, &target_tx->esterror);
7452 __get_user(host_tx->status, &target_tx->status);
7453 __get_user(host_tx->constant, &target_tx->constant);
7454 __get_user(host_tx->precision, &target_tx->precision);
7455 __get_user(host_tx->tolerance, &target_tx->tolerance);
7456 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7457 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7458 __get_user(host_tx->tick, &target_tx->tick);
7459 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7460 __get_user(host_tx->jitter, &target_tx->jitter);
7461 __get_user(host_tx->shift, &target_tx->shift);
7462 __get_user(host_tx->stabil, &target_tx->stabil);
7463 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7464 __get_user(host_tx->calcnt, &target_tx->calcnt);
7465 __get_user(host_tx->errcnt, &target_tx->errcnt);
7466 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7467 __get_user(host_tx->tai, &target_tx->tai);
7468
7469 unlock_user_struct(target_tx, target_addr, 0);
7470 return 0;
7471 }
7472
7473 static inline abi_long host_to_target_timex(abi_long target_addr,
7474 struct timex *host_tx)
7475 {
7476 struct target_timex *target_tx;
7477
7478 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7479 return -TARGET_EFAULT;
7480 }
7481
7482 __put_user(host_tx->modes, &target_tx->modes);
7483 __put_user(host_tx->offset, &target_tx->offset);
7484 __put_user(host_tx->freq, &target_tx->freq);
7485 __put_user(host_tx->maxerror, &target_tx->maxerror);
7486 __put_user(host_tx->esterror, &target_tx->esterror);
7487 __put_user(host_tx->status, &target_tx->status);
7488 __put_user(host_tx->constant, &target_tx->constant);
7489 __put_user(host_tx->precision, &target_tx->precision);
7490 __put_user(host_tx->tolerance, &target_tx->tolerance);
7491 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7492 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7493 __put_user(host_tx->tick, &target_tx->tick);
7494 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7495 __put_user(host_tx->jitter, &target_tx->jitter);
7496 __put_user(host_tx->shift, &target_tx->shift);
7497 __put_user(host_tx->stabil, &target_tx->stabil);
7498 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7499 __put_user(host_tx->calcnt, &target_tx->calcnt);
7500 __put_user(host_tx->errcnt, &target_tx->errcnt);
7501 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7502 __put_user(host_tx->tai, &target_tx->tai);
7503
7504 unlock_user_struct(target_tx, target_addr, 1);
7505 return 0;
7506 }
7507 #endif
7508
7509
7510 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7511 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7512 abi_long target_addr)
7513 {
7514 struct target__kernel_timex *target_tx;
7515
7516 if (copy_from_user_timeval64(&host_tx->time, target_addr +
7517 offsetof(struct target__kernel_timex,
7518 time))) {
7519 return -TARGET_EFAULT;
7520 }
7521
7522 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7523 return -TARGET_EFAULT;
7524 }
7525
7526 __get_user(host_tx->modes, &target_tx->modes);
7527 __get_user(host_tx->offset, &target_tx->offset);
7528 __get_user(host_tx->freq, &target_tx->freq);
7529 __get_user(host_tx->maxerror, &target_tx->maxerror);
7530 __get_user(host_tx->esterror, &target_tx->esterror);
7531 __get_user(host_tx->status, &target_tx->status);
7532 __get_user(host_tx->constant, &target_tx->constant);
7533 __get_user(host_tx->precision, &target_tx->precision);
7534 __get_user(host_tx->tolerance, &target_tx->tolerance);
7535 __get_user(host_tx->tick, &target_tx->tick);
7536 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7537 __get_user(host_tx->jitter, &target_tx->jitter);
7538 __get_user(host_tx->shift, &target_tx->shift);
7539 __get_user(host_tx->stabil, &target_tx->stabil);
7540 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7541 __get_user(host_tx->calcnt, &target_tx->calcnt);
7542 __get_user(host_tx->errcnt, &target_tx->errcnt);
7543 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7544 __get_user(host_tx->tai, &target_tx->tai);
7545
7546 unlock_user_struct(target_tx, target_addr, 0);
7547 return 0;
7548 }
7549
7550 static inline abi_long host_to_target_timex64(abi_long target_addr,
7551 struct timex *host_tx)
7552 {
7553 struct target__kernel_timex *target_tx;
7554
7555 if (copy_to_user_timeval64(target_addr +
7556 offsetof(struct target__kernel_timex, time),
7557 &host_tx->time)) {
7558 return -TARGET_EFAULT;
7559 }
7560
7561 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7562 return -TARGET_EFAULT;
7563 }
7564
7565 __put_user(host_tx->modes, &target_tx->modes);
7566 __put_user(host_tx->offset, &target_tx->offset);
7567 __put_user(host_tx->freq, &target_tx->freq);
7568 __put_user(host_tx->maxerror, &target_tx->maxerror);
7569 __put_user(host_tx->esterror, &target_tx->esterror);
7570 __put_user(host_tx->status, &target_tx->status);
7571 __put_user(host_tx->constant, &target_tx->constant);
7572 __put_user(host_tx->precision, &target_tx->precision);
7573 __put_user(host_tx->tolerance, &target_tx->tolerance);
7574 __put_user(host_tx->tick, &target_tx->tick);
7575 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7576 __put_user(host_tx->jitter, &target_tx->jitter);
7577 __put_user(host_tx->shift, &target_tx->shift);
7578 __put_user(host_tx->stabil, &target_tx->stabil);
7579 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7580 __put_user(host_tx->calcnt, &target_tx->calcnt);
7581 __put_user(host_tx->errcnt, &target_tx->errcnt);
7582 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7583 __put_user(host_tx->tai, &target_tx->tai);
7584
7585 unlock_user_struct(target_tx, target_addr, 1);
7586 return 0;
7587 }
7588 #endif
7589
7590 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7591 #define sigev_notify_thread_id _sigev_un._tid
7592 #endif
7593
7594 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7595 abi_ulong target_addr)
7596 {
7597 struct target_sigevent *target_sevp;
7598
7599 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7600 return -TARGET_EFAULT;
7601 }
7602
7603 /* This union is awkward on 64 bit systems because it has a 32 bit
7604 * integer and a pointer in it; we follow the conversion approach
7605 * used for handling sigval types in signal.c so the guest should get
7606 * the correct value back even if we did a 64 bit byteswap and it's
7607 * using the 32 bit integer.
7608 */
7609 host_sevp->sigev_value.sival_ptr =
7610 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7611 host_sevp->sigev_signo =
7612 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7613 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7614 host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7615
7616 unlock_user_struct(target_sevp, target_addr, 1);
7617 return 0;
7618 }
7619
7620 #if defined(TARGET_NR_mlockall)
7621 static inline int target_to_host_mlockall_arg(int arg)
7622 {
7623 int result = 0;
7624
7625 if (arg & TARGET_MCL_CURRENT) {
7626 result |= MCL_CURRENT;
7627 }
7628 if (arg & TARGET_MCL_FUTURE) {
7629 result |= MCL_FUTURE;
7630 }
7631 #ifdef MCL_ONFAULT
7632 if (arg & TARGET_MCL_ONFAULT) {
7633 result |= MCL_ONFAULT;
7634 }
7635 #endif
7636
7637 return result;
7638 }
7639 #endif
7640
7641 static inline int target_to_host_msync_arg(abi_long arg)
7642 {
7643 return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7644 ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7645 ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7646 (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7647 }
7648
7649 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7650 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7651 defined(TARGET_NR_newfstatat))
7652 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7653 abi_ulong target_addr,
7654 struct stat *host_st)
7655 {
7656 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7657 if (cpu_env->eabi) {
7658 struct target_eabi_stat64 *target_st;
7659
7660 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7661 return -TARGET_EFAULT;
7662 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7663 __put_user(host_st->st_dev, &target_st->st_dev);
7664 __put_user(host_st->st_ino, &target_st->st_ino);
7665 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7666 __put_user(host_st->st_ino, &target_st->__st_ino);
7667 #endif
7668 __put_user(host_st->st_mode, &target_st->st_mode);
7669 __put_user(host_st->st_nlink, &target_st->st_nlink);
7670 __put_user(host_st->st_uid, &target_st->st_uid);
7671 __put_user(host_st->st_gid, &target_st->st_gid);
7672 __put_user(host_st->st_rdev, &target_st->st_rdev);
7673 __put_user(host_st->st_size, &target_st->st_size);
7674 __put_user(host_st->st_blksize, &target_st->st_blksize);
7675 __put_user(host_st->st_blocks, &target_st->st_blocks);
7676 __put_user(host_st->st_atime, &target_st->target_st_atime);
7677 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7678 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7679 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7680 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7681 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7682 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7683 #endif
7684 unlock_user_struct(target_st, target_addr, 1);
7685 } else
7686 #endif
7687 {
7688 #if defined(TARGET_HAS_STRUCT_STAT64)
7689 struct target_stat64 *target_st;
7690 #else
7691 struct target_stat *target_st;
7692 #endif
7693
7694 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7695 return -TARGET_EFAULT;
7696 memset(target_st, 0, sizeof(*target_st));
7697 __put_user(host_st->st_dev, &target_st->st_dev);
7698 __put_user(host_st->st_ino, &target_st->st_ino);
7699 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7700 __put_user(host_st->st_ino, &target_st->__st_ino);
7701 #endif
7702 __put_user(host_st->st_mode, &target_st->st_mode);
7703 __put_user(host_st->st_nlink, &target_st->st_nlink);
7704 __put_user(host_st->st_uid, &target_st->st_uid);
7705 __put_user(host_st->st_gid, &target_st->st_gid);
7706 __put_user(host_st->st_rdev, &target_st->st_rdev);
7707 /* XXX: better use of kernel struct */
7708 __put_user(host_st->st_size, &target_st->st_size);
7709 __put_user(host_st->st_blksize, &target_st->st_blksize);
7710 __put_user(host_st->st_blocks, &target_st->st_blocks);
7711 __put_user(host_st->st_atime, &target_st->target_st_atime);
7712 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7713 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7714 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7715 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7716 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7717 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7718 #endif
7719 unlock_user_struct(target_st, target_addr, 1);
7720 }
7721
7722 return 0;
7723 }
7724 #endif
7725
7726 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7727 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7728 abi_ulong target_addr)
7729 {
7730 struct target_statx *target_stx;
7731
7732 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7733 return -TARGET_EFAULT;
7734 }
7735 memset(target_stx, 0, sizeof(*target_stx));
7736
7737 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7738 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7739 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7740 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7741 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7742 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7743 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7744 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7745 __put_user(host_stx->stx_size, &target_stx->stx_size);
7746 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7747 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7748 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7749 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7750 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7751 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7752 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7753 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7754 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7755 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7756 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7757 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7758 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7759 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7760
7761 unlock_user_struct(target_stx, target_addr, 1);
7762
7763 return 0;
7764 }
7765 #endif
7766
7767 static int do_sys_futex(int *uaddr, int op, int val,
7768 const struct timespec *timeout, int *uaddr2,
7769 int val3)
7770 {
7771 #if HOST_LONG_BITS == 64
7772 #if defined(__NR_futex)
7773 /* always a 64-bit time_t, it doesn't define _time64 version */
7774 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7775
7776 #endif
7777 #else /* HOST_LONG_BITS == 64 */
7778 #if defined(__NR_futex_time64)
7779 if (sizeof(timeout->tv_sec) == 8) {
7780 /* _time64 function on 32bit arch */
7781 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7782 }
7783 #endif
7784 #if defined(__NR_futex)
7785 /* old function on 32bit arch */
7786 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7787 #endif
7788 #endif /* HOST_LONG_BITS == 64 */
7789 g_assert_not_reached();
7790 }
7791
7792 static int do_safe_futex(int *uaddr, int op, int val,
7793 const struct timespec *timeout, int *uaddr2,
7794 int val3)
7795 {
7796 #if HOST_LONG_BITS == 64
7797 #if defined(__NR_futex)
7798 /* always a 64-bit time_t, it doesn't define _time64 version */
7799 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7800 #endif
7801 #else /* HOST_LONG_BITS == 64 */
7802 #if defined(__NR_futex_time64)
7803 if (sizeof(timeout->tv_sec) == 8) {
7804 /* _time64 function on 32bit arch */
7805 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7806 val3));
7807 }
7808 #endif
7809 #if defined(__NR_futex)
7810 /* old function on 32bit arch */
7811 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7812 #endif
7813 #endif /* HOST_LONG_BITS == 64 */
7814 return -TARGET_ENOSYS;
7815 }
7816
7817 /* ??? Using host futex calls even when target atomic operations
7818 are not really atomic probably breaks things. However implementing
7819 futexes locally would make futexes shared between multiple processes
7820 tricky. However they're probably useless because guest atomic
7821 operations won't work either. */
7822 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7823 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7824 int op, int val, target_ulong timeout,
7825 target_ulong uaddr2, int val3)
7826 {
7827 struct timespec ts, *pts = NULL;
7828 void *haddr2 = NULL;
7829 int base_op;
7830
7831 /* We assume FUTEX_* constants are the same on both host and target. */
7832 #ifdef FUTEX_CMD_MASK
7833 base_op = op & FUTEX_CMD_MASK;
7834 #else
7835 base_op = op;
7836 #endif
7837 switch (base_op) {
7838 case FUTEX_WAIT:
7839 case FUTEX_WAIT_BITSET:
7840 val = tswap32(val);
7841 break;
7842 case FUTEX_WAIT_REQUEUE_PI:
7843 val = tswap32(val);
7844 haddr2 = g2h(cpu, uaddr2);
7845 break;
7846 case FUTEX_LOCK_PI:
7847 case FUTEX_LOCK_PI2:
7848 break;
7849 case FUTEX_WAKE:
7850 case FUTEX_WAKE_BITSET:
7851 case FUTEX_TRYLOCK_PI:
7852 case FUTEX_UNLOCK_PI:
7853 timeout = 0;
7854 break;
7855 case FUTEX_FD:
7856 val = target_to_host_signal(val);
7857 timeout = 0;
7858 break;
7859 case FUTEX_CMP_REQUEUE:
7860 case FUTEX_CMP_REQUEUE_PI:
7861 val3 = tswap32(val3);
7862 /* fall through */
7863 case FUTEX_REQUEUE:
7864 case FUTEX_WAKE_OP:
7865 /*
7866 * For these, the 4th argument is not TIMEOUT, but VAL2.
7867 * But the prototype of do_safe_futex takes a pointer, so
7868 * insert casts to satisfy the compiler. We do not need
7869 * to tswap VAL2 since it's not compared to guest memory.
7870 */
7871 pts = (struct timespec *)(uintptr_t)timeout;
7872 timeout = 0;
7873 haddr2 = g2h(cpu, uaddr2);
7874 break;
7875 default:
7876 return -TARGET_ENOSYS;
7877 }
7878 if (timeout) {
7879 pts = &ts;
7880 if (time64
7881 ? target_to_host_timespec64(pts, timeout)
7882 : target_to_host_timespec(pts, timeout)) {
7883 return -TARGET_EFAULT;
7884 }
7885 }
7886 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7887 }
7888 #endif
7889
7890 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7891 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7892 abi_long handle, abi_long mount_id,
7893 abi_long flags)
7894 {
7895 struct file_handle *target_fh;
7896 struct file_handle *fh;
7897 int mid = 0;
7898 abi_long ret;
7899 char *name;
7900 unsigned int size, total_size;
7901
7902 if (get_user_s32(size, handle)) {
7903 return -TARGET_EFAULT;
7904 }
7905
7906 name = lock_user_string(pathname);
7907 if (!name) {
7908 return -TARGET_EFAULT;
7909 }
7910
7911 total_size = sizeof(struct file_handle) + size;
7912 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7913 if (!target_fh) {
7914 unlock_user(name, pathname, 0);
7915 return -TARGET_EFAULT;
7916 }
7917
7918 fh = g_malloc0(total_size);
7919 fh->handle_bytes = size;
7920
7921 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7922 unlock_user(name, pathname, 0);
7923
7924 /* man name_to_handle_at(2):
7925 * Other than the use of the handle_bytes field, the caller should treat
7926 * the file_handle structure as an opaque data type
7927 */
7928
7929 memcpy(target_fh, fh, total_size);
7930 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7931 target_fh->handle_type = tswap32(fh->handle_type);
7932 g_free(fh);
7933 unlock_user(target_fh, handle, total_size);
7934
7935 if (put_user_s32(mid, mount_id)) {
7936 return -TARGET_EFAULT;
7937 }
7938
7939 return ret;
7940
7941 }
7942 #endif
7943
7944 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7945 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7946 abi_long flags)
7947 {
7948 struct file_handle *target_fh;
7949 struct file_handle *fh;
7950 unsigned int size, total_size;
7951 abi_long ret;
7952
7953 if (get_user_s32(size, handle)) {
7954 return -TARGET_EFAULT;
7955 }
7956
7957 total_size = sizeof(struct file_handle) + size;
7958 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7959 if (!target_fh) {
7960 return -TARGET_EFAULT;
7961 }
7962
7963 fh = g_memdup(target_fh, total_size);
7964 fh->handle_bytes = size;
7965 fh->handle_type = tswap32(target_fh->handle_type);
7966
7967 ret = get_errno(open_by_handle_at(mount_fd, fh,
7968 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7969
7970 g_free(fh);
7971
7972 unlock_user(target_fh, handle, total_size);
7973
7974 return ret;
7975 }
7976 #endif
7977
7978 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7979
7980 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7981 {
7982 int host_flags;
7983 target_sigset_t *target_mask;
7984 sigset_t host_mask;
7985 abi_long ret;
7986
7987 if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7988 return -TARGET_EINVAL;
7989 }
7990 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7991 return -TARGET_EFAULT;
7992 }
7993
7994 target_to_host_sigset(&host_mask, target_mask);
7995
7996 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7997
7998 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7999 if (ret >= 0) {
8000 fd_trans_register(ret, &target_signalfd_trans);
8001 }
8002
8003 unlock_user_struct(target_mask, mask, 0);
8004
8005 return ret;
8006 }
8007 #endif
8008
8009 /* Map host to target signal numbers for the wait family of syscalls.
8010 Assume all other status bits are the same. */
8011 int host_to_target_waitstatus(int status)
8012 {
8013 if (WIFSIGNALED(status)) {
8014 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8015 }
8016 if (WIFSTOPPED(status)) {
8017 return (host_to_target_signal(WSTOPSIG(status)) << 8)
8018 | (status & 0xff);
8019 }
8020 return status;
8021 }
8022
8023 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8024 {
8025 CPUState *cpu = env_cpu(cpu_env);
8026 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8027 int i;
8028
8029 for (i = 0; i < bprm->argc; i++) {
8030 size_t len = strlen(bprm->argv[i]) + 1;
8031
8032 if (write(fd, bprm->argv[i], len) != len) {
8033 return -1;
8034 }
8035 }
8036
8037 return 0;
8038 }
8039
8040 static void show_smaps(int fd, unsigned long size)
8041 {
8042 unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8043 unsigned long size_kb = size >> 10;
8044
8045 dprintf(fd, "Size: %lu kB\n"
8046 "KernelPageSize: %lu kB\n"
8047 "MMUPageSize: %lu kB\n"
8048 "Rss: 0 kB\n"
8049 "Pss: 0 kB\n"
8050 "Pss_Dirty: 0 kB\n"
8051 "Shared_Clean: 0 kB\n"
8052 "Shared_Dirty: 0 kB\n"
8053 "Private_Clean: 0 kB\n"
8054 "Private_Dirty: 0 kB\n"
8055 "Referenced: 0 kB\n"
8056 "Anonymous: 0 kB\n"
8057 "LazyFree: 0 kB\n"
8058 "AnonHugePages: 0 kB\n"
8059 "ShmemPmdMapped: 0 kB\n"
8060 "FilePmdMapped: 0 kB\n"
8061 "Shared_Hugetlb: 0 kB\n"
8062 "Private_Hugetlb: 0 kB\n"
8063 "Swap: 0 kB\n"
8064 "SwapPss: 0 kB\n"
8065 "Locked: 0 kB\n"
8066 "THPeligible: 0\n", size_kb, page_size_kb, page_size_kb);
8067 }
8068
8069 static int open_self_maps_1(CPUArchState *cpu_env, int fd, bool smaps)
8070 {
8071 CPUState *cpu = env_cpu(cpu_env);
8072 TaskState *ts = cpu->opaque;
8073 IntervalTreeRoot *map_info = read_self_maps();
8074 IntervalTreeNode *s;
8075 int count;
8076
8077 for (s = interval_tree_iter_first(map_info, 0, -1); s;
8078 s = interval_tree_iter_next(s, 0, -1)) {
8079 MapInfo *e = container_of(s, MapInfo, itree);
8080
8081 if (h2g_valid(e->itree.start)) {
8082 unsigned long min = e->itree.start;
8083 unsigned long max = e->itree.last + 1;
8084 int flags = page_get_flags(h2g(min));
8085 const char *path;
8086
8087 max = h2g_valid(max - 1) ?
8088 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8089
8090 if (!page_check_range(h2g(min), max - min, flags)) {
8091 continue;
8092 }
8093
8094 #ifdef TARGET_HPPA
8095 if (h2g(max) == ts->info->stack_limit) {
8096 #else
8097 if (h2g(min) == ts->info->stack_limit) {
8098 #endif
8099 path = "[stack]";
8100 } else {
8101 path = e->path;
8102 }
8103
8104 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8105 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8106 h2g(min), h2g(max - 1) + 1,
8107 (flags & PAGE_READ) ? 'r' : '-',
8108 (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8109 (flags & PAGE_EXEC) ? 'x' : '-',
8110 e->is_priv ? 'p' : 's',
8111 (uint64_t) e->offset, e->dev, e->inode);
8112 if (path) {
8113 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8114 } else {
8115 dprintf(fd, "\n");
8116 }
8117 if (smaps) {
8118 show_smaps(fd, max - min);
8119 dprintf(fd, "VmFlags:%s%s%s%s%s%s%s%s\n",
8120 (flags & PAGE_READ) ? " rd" : "",
8121 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8122 (flags & PAGE_EXEC) ? " ex" : "",
8123 e->is_priv ? "" : " sh",
8124 (flags & PAGE_READ) ? " mr" : "",
8125 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8126 (flags & PAGE_EXEC) ? " me" : "",
8127 e->is_priv ? "" : " ms");
8128 }
8129 }
8130 }
8131
8132 free_self_maps(map_info);
8133
8134 #ifdef TARGET_VSYSCALL_PAGE
8135 /*
8136 * We only support execution from the vsyscall page.
8137 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8138 */
8139 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8140 " --xp 00000000 00:00 0",
8141 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8142 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
8143 if (smaps) {
8144 show_smaps(fd, TARGET_PAGE_SIZE);
8145 dprintf(fd, "VmFlags: ex\n");
8146 }
8147 #endif
8148
8149 return 0;
8150 }
8151
8152 static int open_self_maps(CPUArchState *cpu_env, int fd)
8153 {
8154 return open_self_maps_1(cpu_env, fd, false);
8155 }
8156
8157 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8158 {
8159 return open_self_maps_1(cpu_env, fd, true);
8160 }
8161
8162 static int open_self_stat(CPUArchState *cpu_env, int fd)
8163 {
8164 CPUState *cpu = env_cpu(cpu_env);
8165 TaskState *ts = cpu->opaque;
8166 g_autoptr(GString) buf = g_string_new(NULL);
8167 int i;
8168
8169 for (i = 0; i < 44; i++) {
8170 if (i == 0) {
8171 /* pid */
8172 g_string_printf(buf, FMT_pid " ", getpid());
8173 } else if (i == 1) {
8174 /* app name */
8175 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8176 bin = bin ? bin + 1 : ts->bprm->argv[0];
8177 g_string_printf(buf, "(%.15s) ", bin);
8178 } else if (i == 2) {
8179 /* task state */
8180 g_string_assign(buf, "R "); /* we are running right now */
8181 } else if (i == 3) {
8182 /* ppid */
8183 g_string_printf(buf, FMT_pid " ", getppid());
8184 } else if (i == 21) {
8185 /* starttime */
8186 g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8187 } else if (i == 27) {
8188 /* stack bottom */
8189 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8190 } else {
8191 /* for the rest, there is MasterCard */
8192 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8193 }
8194
8195 if (write(fd, buf->str, buf->len) != buf->len) {
8196 return -1;
8197 }
8198 }
8199
8200 return 0;
8201 }
8202
8203 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8204 {
8205 CPUState *cpu = env_cpu(cpu_env);
8206 TaskState *ts = cpu->opaque;
8207 abi_ulong auxv = ts->info->saved_auxv;
8208 abi_ulong len = ts->info->auxv_len;
8209 char *ptr;
8210
8211 /*
8212 * Auxiliary vector is stored in target process stack.
8213 * read in whole auxv vector and copy it to file
8214 */
8215 ptr = lock_user(VERIFY_READ, auxv, len, 0);
8216 if (ptr != NULL) {
8217 while (len > 0) {
8218 ssize_t r;
8219 r = write(fd, ptr, len);
8220 if (r <= 0) {
8221 break;
8222 }
8223 len -= r;
8224 ptr += r;
8225 }
8226 lseek(fd, 0, SEEK_SET);
8227 unlock_user(ptr, auxv, len);
8228 }
8229
8230 return 0;
8231 }
8232
8233 static int is_proc_myself(const char *filename, const char *entry)
8234 {
8235 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8236 filename += strlen("/proc/");
8237 if (!strncmp(filename, "self/", strlen("self/"))) {
8238 filename += strlen("self/");
8239 } else if (*filename >= '1' && *filename <= '9') {
8240 char myself[80];
8241 snprintf(myself, sizeof(myself), "%d/", getpid());
8242 if (!strncmp(filename, myself, strlen(myself))) {
8243 filename += strlen(myself);
8244 } else {
8245 return 0;
8246 }
8247 } else {
8248 return 0;
8249 }
8250 if (!strcmp(filename, entry)) {
8251 return 1;
8252 }
8253 }
8254 return 0;
8255 }
8256
8257 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8258 const char *fmt, int code)
8259 {
8260 if (logfile) {
8261 CPUState *cs = env_cpu(env);
8262
8263 fprintf(logfile, fmt, code);
8264 fprintf(logfile, "Failing executable: %s\n", exec_path);
8265 cpu_dump_state(cs, logfile, 0);
8266 open_self_maps(env, fileno(logfile));
8267 }
8268 }
8269
8270 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8271 {
8272 /* dump to console */
8273 excp_dump_file(stderr, env, fmt, code);
8274
8275 /* dump to log file */
8276 if (qemu_log_separate()) {
8277 FILE *logfile = qemu_log_trylock();
8278
8279 excp_dump_file(logfile, env, fmt, code);
8280 qemu_log_unlock(logfile);
8281 }
8282 }
8283
8284 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8285 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) || \
8286 defined(TARGET_RISCV) || defined(TARGET_S390X)
8287 static int is_proc(const char *filename, const char *entry)
8288 {
8289 return strcmp(filename, entry) == 0;
8290 }
8291 #endif
8292
8293 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8294 static int open_net_route(CPUArchState *cpu_env, int fd)
8295 {
8296 FILE *fp;
8297 char *line = NULL;
8298 size_t len = 0;
8299 ssize_t read;
8300
8301 fp = fopen("/proc/net/route", "r");
8302 if (fp == NULL) {
8303 return -1;
8304 }
8305
8306 /* read header */
8307
8308 read = getline(&line, &len, fp);
8309 dprintf(fd, "%s", line);
8310
8311 /* read routes */
8312
8313 while ((read = getline(&line, &len, fp)) != -1) {
8314 char iface[16];
8315 uint32_t dest, gw, mask;
8316 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8317 int fields;
8318
8319 fields = sscanf(line,
8320 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8321 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8322 &mask, &mtu, &window, &irtt);
8323 if (fields != 11) {
8324 continue;
8325 }
8326 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8327 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8328 metric, tswap32(mask), mtu, window, irtt);
8329 }
8330
8331 free(line);
8332 fclose(fp);
8333
8334 return 0;
8335 }
8336 #endif
8337
8338 #if defined(TARGET_SPARC)
8339 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8340 {
8341 dprintf(fd, "type\t\t: sun4u\n");
8342 return 0;
8343 }
8344 #endif
8345
8346 #if defined(TARGET_HPPA)
8347 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8348 {
8349 int i, num_cpus;
8350
8351 num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8352 for (i = 0; i < num_cpus; i++) {
8353 dprintf(fd, "processor\t: %d\n", i);
8354 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8355 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8356 dprintf(fd, "capabilities\t: os32\n");
8357 dprintf(fd, "model\t\t: 9000/778/B160L - "
8358 "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8359 }
8360 return 0;
8361 }
8362 #endif
8363
8364 #if defined(TARGET_RISCV)
8365 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8366 {
8367 int i;
8368 int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8369 RISCVCPU *cpu = env_archcpu(cpu_env);
8370 const RISCVCPUConfig *cfg = riscv_cpu_cfg((CPURISCVState *) cpu_env);
8371 char *isa_string = riscv_isa_string(cpu);
8372 const char *mmu;
8373
8374 if (cfg->mmu) {
8375 mmu = (cpu_env->xl == MXL_RV32) ? "sv32" : "sv48";
8376 } else {
8377 mmu = "none";
8378 }
8379
8380 for (i = 0; i < num_cpus; i++) {
8381 dprintf(fd, "processor\t: %d\n", i);
8382 dprintf(fd, "hart\t\t: %d\n", i);
8383 dprintf(fd, "isa\t\t: %s\n", isa_string);
8384 dprintf(fd, "mmu\t\t: %s\n", mmu);
8385 dprintf(fd, "uarch\t\t: qemu\n\n");
8386 }
8387
8388 g_free(isa_string);
8389 return 0;
8390 }
8391 #endif
8392
8393 #if defined(TARGET_S390X)
8394 /*
8395 * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would
8396 * show in /proc/cpuinfo.
8397 *
8398 * Skip the following in order to match the missing support in op_ecag():
8399 * - show_cacheinfo().
8400 * - show_cpu_topology().
8401 * - show_cpu_mhz().
8402 *
8403 * Use fixed values for certain fields:
8404 * - bogomips per cpu - from a qemu-system-s390x run.
8405 * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported.
8406 *
8407 * Keep the code structure close to arch/s390/kernel/processor.c.
8408 */
8409
8410 static void show_facilities(int fd)
8411 {
8412 size_t sizeof_stfl_bytes = 2048;
8413 g_autofree uint8_t *stfl_bytes = g_new0(uint8_t, sizeof_stfl_bytes);
8414 unsigned int bit;
8415
8416 dprintf(fd, "facilities :");
8417 s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
8418 for (bit = 0; bit < sizeof_stfl_bytes * 8; bit++) {
8419 if (test_be_bit(bit, stfl_bytes)) {
8420 dprintf(fd, " %d", bit);
8421 }
8422 }
8423 dprintf(fd, "\n");
8424 }
8425
8426 static int cpu_ident(unsigned long n)
8427 {
8428 return deposit32(0, CPU_ID_BITS - CPU_PHYS_ADDR_BITS, CPU_PHYS_ADDR_BITS,
8429 n);
8430 }
8431
8432 static void show_cpu_summary(CPUArchState *cpu_env, int fd)
8433 {
8434 S390CPUModel *model = env_archcpu(cpu_env)->model;
8435 int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8436 uint32_t elf_hwcap = get_elf_hwcap();
8437 const char *hwcap_str;
8438 int i;
8439
8440 dprintf(fd, "vendor_id : IBM/S390\n"
8441 "# processors : %i\n"
8442 "bogomips per cpu: 13370.00\n",
8443 num_cpus);
8444 dprintf(fd, "max thread id : 0\n");
8445 dprintf(fd, "features\t: ");
8446 for (i = 0; i < sizeof(elf_hwcap) * 8; i++) {
8447 if (!(elf_hwcap & (1 << i))) {
8448 continue;
8449 }
8450 hwcap_str = elf_hwcap_str(i);
8451 if (hwcap_str) {
8452 dprintf(fd, "%s ", hwcap_str);
8453 }
8454 }
8455 dprintf(fd, "\n");
8456 show_facilities(fd);
8457 for (i = 0; i < num_cpus; i++) {
8458 dprintf(fd, "processor %d: "
8459 "version = %02X, "
8460 "identification = %06X, "
8461 "machine = %04X\n",
8462 i, model->cpu_ver, cpu_ident(i), model->def->type);
8463 }
8464 }
8465
8466 static void show_cpu_ids(CPUArchState *cpu_env, int fd, unsigned long n)
8467 {
8468 S390CPUModel *model = env_archcpu(cpu_env)->model;
8469
8470 dprintf(fd, "version : %02X\n", model->cpu_ver);
8471 dprintf(fd, "identification : %06X\n", cpu_ident(n));
8472 dprintf(fd, "machine : %04X\n", model->def->type);
8473 }
8474
8475 static void show_cpuinfo(CPUArchState *cpu_env, int fd, unsigned long n)
8476 {
8477 dprintf(fd, "\ncpu number : %ld\n", n);
8478 show_cpu_ids(cpu_env, fd, n);
8479 }
8480
8481 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8482 {
8483 int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8484 int i;
8485
8486 show_cpu_summary(cpu_env, fd);
8487 for (i = 0; i < num_cpus; i++) {
8488 show_cpuinfo(cpu_env, fd, i);
8489 }
8490 return 0;
8491 }
8492 #endif
8493
8494 #if defined(TARGET_M68K)
8495 static int open_hardware(CPUArchState *cpu_env, int fd)
8496 {
8497 dprintf(fd, "Model:\t\tqemu-m68k\n");
8498 return 0;
8499 }
8500 #endif
8501
8502 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8503 int flags, mode_t mode, bool safe)
8504 {
8505 struct fake_open {
8506 const char *filename;
8507 int (*fill)(CPUArchState *cpu_env, int fd);
8508 int (*cmp)(const char *s1, const char *s2);
8509 };
8510 const struct fake_open *fake_open;
8511 static const struct fake_open fakes[] = {
8512 { "maps", open_self_maps, is_proc_myself },
8513 { "smaps", open_self_smaps, is_proc_myself },
8514 { "stat", open_self_stat, is_proc_myself },
8515 { "auxv", open_self_auxv, is_proc_myself },
8516 { "cmdline", open_self_cmdline, is_proc_myself },
8517 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8518 { "/proc/net/route", open_net_route, is_proc },
8519 #endif
8520 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) || \
8521 defined(TARGET_RISCV) || defined(TARGET_S390X)
8522 { "/proc/cpuinfo", open_cpuinfo, is_proc },
8523 #endif
8524 #if defined(TARGET_M68K)
8525 { "/proc/hardware", open_hardware, is_proc },
8526 #endif
8527 { NULL, NULL, NULL }
8528 };
8529
8530 if (is_proc_myself(pathname, "exe")) {
8531 if (safe) {
8532 return safe_openat(dirfd, exec_path, flags, mode);
8533 } else {
8534 return openat(dirfd, exec_path, flags, mode);
8535 }
8536 }
8537
8538 for (fake_open = fakes; fake_open->filename; fake_open++) {
8539 if (fake_open->cmp(pathname, fake_open->filename)) {
8540 break;
8541 }
8542 }
8543
8544 if (fake_open->filename) {
8545 const char *tmpdir;
8546 char filename[PATH_MAX];
8547 int fd, r;
8548
8549 fd = memfd_create("qemu-open", 0);
8550 if (fd < 0) {
8551 if (errno != ENOSYS) {
8552 return fd;
8553 }
8554 /* create temporary file to map stat to */
8555 tmpdir = getenv("TMPDIR");
8556 if (!tmpdir)
8557 tmpdir = "/tmp";
8558 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8559 fd = mkstemp(filename);
8560 if (fd < 0) {
8561 return fd;
8562 }
8563 unlink(filename);
8564 }
8565
8566 if ((r = fake_open->fill(cpu_env, fd))) {
8567 int e = errno;
8568 close(fd);
8569 errno = e;
8570 return r;
8571 }
8572 lseek(fd, 0, SEEK_SET);
8573
8574 return fd;
8575 }
8576
8577 if (safe) {
8578 return safe_openat(dirfd, path(pathname), flags, mode);
8579 } else {
8580 return openat(dirfd, path(pathname), flags, mode);
8581 }
8582 }
8583
8584 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8585 {
8586 ssize_t ret;
8587
8588 if (!pathname || !buf) {
8589 errno = EFAULT;
8590 return -1;
8591 }
8592
8593 if (!bufsiz) {
8594 /* Short circuit this for the magic exe check. */
8595 errno = EINVAL;
8596 return -1;
8597 }
8598
8599 if (is_proc_myself((const char *)pathname, "exe")) {
8600 /*
8601 * Don't worry about sign mismatch as earlier mapping
8602 * logic would have thrown a bad address error.
8603 */
8604 ret = MIN(strlen(exec_path), bufsiz);
8605 /* We cannot NUL terminate the string. */
8606 memcpy(buf, exec_path, ret);
8607 } else {
8608 ret = readlink(path(pathname), buf, bufsiz);
8609 }
8610
8611 return ret;
8612 }
8613
8614 static int do_execv(CPUArchState *cpu_env, int dirfd,
8615 abi_long pathname, abi_long guest_argp,
8616 abi_long guest_envp, int flags, bool is_execveat)
8617 {
8618 int ret;
8619 char **argp, **envp;
8620 int argc, envc;
8621 abi_ulong gp;
8622 abi_ulong addr;
8623 char **q;
8624 void *p;
8625
8626 argc = 0;
8627
8628 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8629 if (get_user_ual(addr, gp)) {
8630 return -TARGET_EFAULT;
8631 }
8632 if (!addr) {
8633 break;
8634 }
8635 argc++;
8636 }
8637 envc = 0;
8638 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8639 if (get_user_ual(addr, gp)) {
8640 return -TARGET_EFAULT;
8641 }
8642 if (!addr) {
8643 break;
8644 }
8645 envc++;
8646 }
8647
8648 argp = g_new0(char *, argc + 1);
8649 envp = g_new0(char *, envc + 1);
8650
8651 for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8652 if (get_user_ual(addr, gp)) {
8653 goto execve_efault;
8654 }
8655 if (!addr) {
8656 break;
8657 }
8658 *q = lock_user_string(addr);
8659 if (!*q) {
8660 goto execve_efault;
8661 }
8662 }
8663 *q = NULL;
8664
8665 for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8666 if (get_user_ual(addr, gp)) {
8667 goto execve_efault;
8668 }
8669 if (!addr) {
8670 break;
8671 }
8672 *q = lock_user_string(addr);
8673 if (!*q) {
8674 goto execve_efault;
8675 }
8676 }
8677 *q = NULL;
8678
8679 /*
8680 * Although execve() is not an interruptible syscall it is
8681 * a special case where we must use the safe_syscall wrapper:
8682 * if we allow a signal to happen before we make the host
8683 * syscall then we will 'lose' it, because at the point of
8684 * execve the process leaves QEMU's control. So we use the
8685 * safe syscall wrapper to ensure that we either take the
8686 * signal as a guest signal, or else it does not happen
8687 * before the execve completes and makes it the other
8688 * program's problem.
8689 */
8690 p = lock_user_string(pathname);
8691 if (!p) {
8692 goto execve_efault;
8693 }
8694
8695 const char *exe = p;
8696 if (is_proc_myself(p, "exe")) {
8697 exe = exec_path;
8698 }
8699 ret = is_execveat
8700 ? safe_execveat(dirfd, exe, argp, envp, flags)
8701 : safe_execve(exe, argp, envp);
8702 ret = get_errno(ret);
8703
8704 unlock_user(p, pathname, 0);
8705
8706 goto execve_end;
8707
8708 execve_efault:
8709 ret = -TARGET_EFAULT;
8710
8711 execve_end:
8712 for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8713 if (get_user_ual(addr, gp) || !addr) {
8714 break;
8715 }
8716 unlock_user(*q, addr, 0);
8717 }
8718 for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8719 if (get_user_ual(addr, gp) || !addr) {
8720 break;
8721 }
8722 unlock_user(*q, addr, 0);
8723 }
8724
8725 g_free(argp);
8726 g_free(envp);
8727 return ret;
8728 }
8729
8730 #define TIMER_MAGIC 0x0caf0000
8731 #define TIMER_MAGIC_MASK 0xffff0000
8732
8733 /* Convert QEMU provided timer ID back to internal 16bit index format */
8734 static target_timer_t get_timer_id(abi_long arg)
8735 {
8736 target_timer_t timerid = arg;
8737
8738 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8739 return -TARGET_EINVAL;
8740 }
8741
8742 timerid &= 0xffff;
8743
8744 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8745 return -TARGET_EINVAL;
8746 }
8747
8748 return timerid;
8749 }
8750
8751 static int target_to_host_cpu_mask(unsigned long *host_mask,
8752 size_t host_size,
8753 abi_ulong target_addr,
8754 size_t target_size)
8755 {
8756 unsigned target_bits = sizeof(abi_ulong) * 8;
8757 unsigned host_bits = sizeof(*host_mask) * 8;
8758 abi_ulong *target_mask;
8759 unsigned i, j;
8760
8761 assert(host_size >= target_size);
8762
8763 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8764 if (!target_mask) {
8765 return -TARGET_EFAULT;
8766 }
8767 memset(host_mask, 0, host_size);
8768
8769 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8770 unsigned bit = i * target_bits;
8771 abi_ulong val;
8772
8773 __get_user(val, &target_mask[i]);
8774 for (j = 0; j < target_bits; j++, bit++) {
8775 if (val & (1UL << j)) {
8776 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8777 }
8778 }
8779 }
8780
8781 unlock_user(target_mask, target_addr, 0);
8782 return 0;
8783 }
8784
8785 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8786 size_t host_size,
8787 abi_ulong target_addr,
8788 size_t target_size)
8789 {
8790 unsigned target_bits = sizeof(abi_ulong) * 8;
8791 unsigned host_bits = sizeof(*host_mask) * 8;
8792 abi_ulong *target_mask;
8793 unsigned i, j;
8794
8795 assert(host_size >= target_size);
8796
8797 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8798 if (!target_mask) {
8799 return -TARGET_EFAULT;
8800 }
8801
8802 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8803 unsigned bit = i * target_bits;
8804 abi_ulong val = 0;
8805
8806 for (j = 0; j < target_bits; j++, bit++) {
8807 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8808 val |= 1UL << j;
8809 }
8810 }
8811 __put_user(val, &target_mask[i]);
8812 }
8813
8814 unlock_user(target_mask, target_addr, target_size);
8815 return 0;
8816 }
8817
8818 #ifdef TARGET_NR_getdents
8819 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8820 {
8821 g_autofree void *hdirp = NULL;
8822 void *tdirp;
8823 int hlen, hoff, toff;
8824 int hreclen, treclen;
8825 off64_t prev_diroff = 0;
8826
8827 hdirp = g_try_malloc(count);
8828 if (!hdirp) {
8829 return -TARGET_ENOMEM;
8830 }
8831
8832 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8833 hlen = sys_getdents(dirfd, hdirp, count);
8834 #else
8835 hlen = sys_getdents64(dirfd, hdirp, count);
8836 #endif
8837
8838 hlen = get_errno(hlen);
8839 if (is_error(hlen)) {
8840 return hlen;
8841 }
8842
8843 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8844 if (!tdirp) {
8845 return -TARGET_EFAULT;
8846 }
8847
8848 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8849 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8850 struct linux_dirent *hde = hdirp + hoff;
8851 #else
8852 struct linux_dirent64 *hde = hdirp + hoff;
8853 #endif
8854 struct target_dirent *tde = tdirp + toff;
8855 int namelen;
8856 uint8_t type;
8857
8858 namelen = strlen(hde->d_name);
8859 hreclen = hde->d_reclen;
8860 treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8861 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8862
8863 if (toff + treclen > count) {
8864 /*
8865 * If the host struct is smaller than the target struct, or
8866 * requires less alignment and thus packs into less space,
8867 * then the host can return more entries than we can pass
8868 * on to the guest.
8869 */
8870 if (toff == 0) {
8871 toff = -TARGET_EINVAL; /* result buffer is too small */
8872 break;
8873 }
8874 /*
8875 * Return what we have, resetting the file pointer to the
8876 * location of the first record not returned.
8877 */
8878 lseek64(dirfd, prev_diroff, SEEK_SET);
8879 break;
8880 }
8881
8882 prev_diroff = hde->d_off;
8883 tde->d_ino = tswapal(hde->d_ino);
8884 tde->d_off = tswapal(hde->d_off);
8885 tde->d_reclen = tswap16(treclen);
8886 memcpy(tde->d_name, hde->d_name, namelen + 1);
8887
8888 /*
8889 * The getdents type is in what was formerly a padding byte at the
8890 * end of the structure.
8891 */
8892 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8893 type = *((uint8_t *)hde + hreclen - 1);
8894 #else
8895 type = hde->d_type;
8896 #endif
8897 *((uint8_t *)tde + treclen - 1) = type;
8898 }
8899
8900 unlock_user(tdirp, arg2, toff);
8901 return toff;
8902 }
8903 #endif /* TARGET_NR_getdents */
8904
8905 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8906 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8907 {
8908 g_autofree void *hdirp = NULL;
8909 void *tdirp;
8910 int hlen, hoff, toff;
8911 int hreclen, treclen;
8912 off64_t prev_diroff = 0;
8913
8914 hdirp = g_try_malloc(count);
8915 if (!hdirp) {
8916 return -TARGET_ENOMEM;
8917 }
8918
8919 hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8920 if (is_error(hlen)) {
8921 return hlen;
8922 }
8923
8924 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8925 if (!tdirp) {
8926 return -TARGET_EFAULT;
8927 }
8928
8929 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8930 struct linux_dirent64 *hde = hdirp + hoff;
8931 struct target_dirent64 *tde = tdirp + toff;
8932 int namelen;
8933
8934 namelen = strlen(hde->d_name) + 1;
8935 hreclen = hde->d_reclen;
8936 treclen = offsetof(struct target_dirent64, d_name) + namelen;
8937 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8938
8939 if (toff + treclen > count) {
8940 /*
8941 * If the host struct is smaller than the target struct, or
8942 * requires less alignment and thus packs into less space,
8943 * then the host can return more entries than we can pass
8944 * on to the guest.
8945 */
8946 if (toff == 0) {
8947 toff = -TARGET_EINVAL; /* result buffer is too small */
8948 break;
8949 }
8950 /*
8951 * Return what we have, resetting the file pointer to the
8952 * location of the first record not returned.
8953 */
8954 lseek64(dirfd, prev_diroff, SEEK_SET);
8955 break;
8956 }
8957
8958 prev_diroff = hde->d_off;
8959 tde->d_ino = tswap64(hde->d_ino);
8960 tde->d_off = tswap64(hde->d_off);
8961 tde->d_reclen = tswap16(treclen);
8962 tde->d_type = hde->d_type;
8963 memcpy(tde->d_name, hde->d_name, namelen);
8964 }
8965
8966 unlock_user(tdirp, arg2, toff);
8967 return toff;
8968 }
8969 #endif /* TARGET_NR_getdents64 */
8970
8971 #if defined(TARGET_NR_riscv_hwprobe)
8972
8973 #define RISCV_HWPROBE_KEY_MVENDORID 0
8974 #define RISCV_HWPROBE_KEY_MARCHID 1
8975 #define RISCV_HWPROBE_KEY_MIMPID 2
8976
8977 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8978 #define RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8979
8980 #define RISCV_HWPROBE_KEY_IMA_EXT_0 4
8981 #define RISCV_HWPROBE_IMA_FD (1 << 0)
8982 #define RISCV_HWPROBE_IMA_C (1 << 1)
8983
8984 #define RISCV_HWPROBE_KEY_CPUPERF_0 5
8985 #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
8986 #define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
8987 #define RISCV_HWPROBE_MISALIGNED_SLOW (2 << 0)
8988 #define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0)
8989 #define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8990 #define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0)
8991
8992 struct riscv_hwprobe {
8993 abi_llong key;
8994 abi_ullong value;
8995 };
8996
8997 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
8998 struct riscv_hwprobe *pair,
8999 size_t pair_count)
9000 {
9001 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9002
9003 for (; pair_count > 0; pair_count--, pair++) {
9004 abi_llong key;
9005 abi_ullong value;
9006 __put_user(0, &pair->value);
9007 __get_user(key, &pair->key);
9008 switch (key) {
9009 case RISCV_HWPROBE_KEY_MVENDORID:
9010 __put_user(cfg->mvendorid, &pair->value);
9011 break;
9012 case RISCV_HWPROBE_KEY_MARCHID:
9013 __put_user(cfg->marchid, &pair->value);
9014 break;
9015 case RISCV_HWPROBE_KEY_MIMPID:
9016 __put_user(cfg->mimpid, &pair->value);
9017 break;
9018 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9019 value = riscv_has_ext(env, RVI) &&
9020 riscv_has_ext(env, RVM) &&
9021 riscv_has_ext(env, RVA) ?
9022 RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9023 __put_user(value, &pair->value);
9024 break;
9025 case RISCV_HWPROBE_KEY_IMA_EXT_0:
9026 value = riscv_has_ext(env, RVF) &&
9027 riscv_has_ext(env, RVD) ?
9028 RISCV_HWPROBE_IMA_FD : 0;
9029 value |= riscv_has_ext(env, RVC) ?
9030 RISCV_HWPROBE_IMA_C : pair->value;
9031 __put_user(value, &pair->value);
9032 break;
9033 case RISCV_HWPROBE_KEY_CPUPERF_0:
9034 __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9035 break;
9036 default:
9037 __put_user(-1, &pair->key);
9038 break;
9039 }
9040 }
9041 }
9042
9043 static int cpu_set_valid(abi_long arg3, abi_long arg4)
9044 {
9045 int ret, i, tmp;
9046 size_t host_mask_size, target_mask_size;
9047 unsigned long *host_mask;
9048
9049 /*
9050 * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9051 * arg3 contains the cpu count.
9052 */
9053 tmp = (8 * sizeof(abi_ulong));
9054 target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9055 host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9056 ~(sizeof(*host_mask) - 1);
9057
9058 host_mask = alloca(host_mask_size);
9059
9060 ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9061 arg4, target_mask_size);
9062 if (ret != 0) {
9063 return ret;
9064 }
9065
9066 for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9067 if (host_mask[i] != 0) {
9068 return 0;
9069 }
9070 }
9071 return -TARGET_EINVAL;
9072 }
9073
9074 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9075 abi_long arg2, abi_long arg3,
9076 abi_long arg4, abi_long arg5)
9077 {
9078 int ret;
9079 struct riscv_hwprobe *host_pairs;
9080
9081 /* flags must be 0 */
9082 if (arg5 != 0) {
9083 return -TARGET_EINVAL;
9084 }
9085
9086 /* check cpu_set */
9087 if (arg3 != 0) {
9088 ret = cpu_set_valid(arg3, arg4);
9089 if (ret != 0) {
9090 return ret;
9091 }
9092 } else if (arg4 != 0) {
9093 return -TARGET_EINVAL;
9094 }
9095
9096 /* no pairs */
9097 if (arg2 == 0) {
9098 return 0;
9099 }
9100
9101 host_pairs = lock_user(VERIFY_WRITE, arg1,
9102 sizeof(*host_pairs) * (size_t)arg2, 0);
9103 if (host_pairs == NULL) {
9104 return -TARGET_EFAULT;
9105 }
9106 risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9107 unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9108 return 0;
9109 }
9110 #endif /* TARGET_NR_riscv_hwprobe */
9111
9112 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9113 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9114 #endif
9115
9116 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9117 #define __NR_sys_open_tree __NR_open_tree
9118 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9119 unsigned int, __flags)
9120 #endif
9121
9122 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9123 #define __NR_sys_move_mount __NR_move_mount
9124 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9125 int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9126 #endif
9127
9128 /* This is an internal helper for do_syscall so that it is easier
9129 * to have a single return point, so that actions, such as logging
9130 * of syscall results, can be performed.
9131 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9132 */
9133 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9134 abi_long arg2, abi_long arg3, abi_long arg4,
9135 abi_long arg5, abi_long arg6, abi_long arg7,
9136 abi_long arg8)
9137 {
9138 CPUState *cpu = env_cpu(cpu_env);
9139 abi_long ret;
9140 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9141 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9142 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9143 || defined(TARGET_NR_statx)
9144 struct stat st;
9145 #endif
9146 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9147 || defined(TARGET_NR_fstatfs)
9148 struct statfs stfs;
9149 #endif
9150 void *p;
9151
9152 switch(num) {
9153 case TARGET_NR_exit:
9154 /* In old applications this may be used to implement _exit(2).
9155 However in threaded applications it is used for thread termination,
9156 and _exit_group is used for application termination.
9157 Do thread termination if we have more then one thread. */
9158
9159 if (block_signals()) {
9160 return -QEMU_ERESTARTSYS;
9161 }
9162
9163 pthread_mutex_lock(&clone_lock);
9164
9165 if (CPU_NEXT(first_cpu)) {
9166 TaskState *ts = cpu->opaque;
9167
9168 if (ts->child_tidptr) {
9169 put_user_u32(0, ts->child_tidptr);
9170 do_sys_futex(g2h(cpu, ts->child_tidptr),
9171 FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9172 }
9173
9174 object_unparent(OBJECT(cpu));
9175 object_unref(OBJECT(cpu));
9176 /*
9177 * At this point the CPU should be unrealized and removed
9178 * from cpu lists. We can clean-up the rest of the thread
9179 * data without the lock held.
9180 */
9181
9182 pthread_mutex_unlock(&clone_lock);
9183
9184 thread_cpu = NULL;
9185 g_free(ts);
9186 rcu_unregister_thread();
9187 pthread_exit(NULL);
9188 }
9189
9190 pthread_mutex_unlock(&clone_lock);
9191 preexit_cleanup(cpu_env, arg1);
9192 _exit(arg1);
9193 return 0; /* avoid warning */
9194 case TARGET_NR_read:
9195 if (arg2 == 0 && arg3 == 0) {
9196 return get_errno(safe_read(arg1, 0, 0));
9197 } else {
9198 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9199 return -TARGET_EFAULT;
9200 ret = get_errno(safe_read(arg1, p, arg3));
9201 if (ret >= 0 &&
9202 fd_trans_host_to_target_data(arg1)) {
9203 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9204 }
9205 unlock_user(p, arg2, ret);
9206 }
9207 return ret;
9208 case TARGET_NR_write:
9209 if (arg2 == 0 && arg3 == 0) {
9210 return get_errno(safe_write(arg1, 0, 0));
9211 }
9212 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9213 return -TARGET_EFAULT;
9214 if (fd_trans_target_to_host_data(arg1)) {
9215 void *copy = g_malloc(arg3);
9216 memcpy(copy, p, arg3);
9217 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9218 if (ret >= 0) {
9219 ret = get_errno(safe_write(arg1, copy, ret));
9220 }
9221 g_free(copy);
9222 } else {
9223 ret = get_errno(safe_write(arg1, p, arg3));
9224 }
9225 unlock_user(p, arg2, 0);
9226 return ret;
9227
9228 #ifdef TARGET_NR_open
9229 case TARGET_NR_open:
9230 if (!(p = lock_user_string(arg1)))
9231 return -TARGET_EFAULT;
9232 ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9233 target_to_host_bitmask(arg2, fcntl_flags_tbl),
9234 arg3, true));
9235 fd_trans_unregister(ret);
9236 unlock_user(p, arg1, 0);
9237 return ret;
9238 #endif
9239 case TARGET_NR_openat:
9240 if (!(p = lock_user_string(arg2)))
9241 return -TARGET_EFAULT;
9242 ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9243 target_to_host_bitmask(arg3, fcntl_flags_tbl),
9244 arg4, true));
9245 fd_trans_unregister(ret);
9246 unlock_user(p, arg2, 0);
9247 return ret;
9248 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9249 case TARGET_NR_name_to_handle_at:
9250 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9251 return ret;
9252 #endif
9253 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9254 case TARGET_NR_open_by_handle_at:
9255 ret = do_open_by_handle_at(arg1, arg2, arg3);
9256 fd_trans_unregister(ret);
9257 return ret;
9258 #endif
9259 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9260 case TARGET_NR_pidfd_open:
9261 return get_errno(pidfd_open(arg1, arg2));
9262 #endif
9263 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9264 case TARGET_NR_pidfd_send_signal:
9265 {
9266 siginfo_t uinfo, *puinfo;
9267
9268 if (arg3) {
9269 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9270 if (!p) {
9271 return -TARGET_EFAULT;
9272 }
9273 target_to_host_siginfo(&uinfo, p);
9274 unlock_user(p, arg3, 0);
9275 puinfo = &uinfo;
9276 } else {
9277 puinfo = NULL;
9278 }
9279 ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9280 puinfo, arg4));
9281 }
9282 return ret;
9283 #endif
9284 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9285 case TARGET_NR_pidfd_getfd:
9286 return get_errno(pidfd_getfd(arg1, arg2, arg3));
9287 #endif
9288 case TARGET_NR_close:
9289 fd_trans_unregister(arg1);
9290 return get_errno(close(arg1));
9291 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9292 case TARGET_NR_close_range:
9293 ret = get_errno(sys_close_range(arg1, arg2, arg3));
9294 if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9295 abi_long fd, maxfd;
9296 maxfd = MIN(arg2, target_fd_max);
9297 for (fd = arg1; fd < maxfd; fd++) {
9298 fd_trans_unregister(fd);
9299 }
9300 }
9301 return ret;
9302 #endif
9303
9304 case TARGET_NR_brk:
9305 return do_brk(arg1);
9306 #ifdef TARGET_NR_fork
9307 case TARGET_NR_fork:
9308 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9309 #endif
9310 #ifdef TARGET_NR_waitpid
9311 case TARGET_NR_waitpid:
9312 {
9313 int status;
9314 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9315 if (!is_error(ret) && arg2 && ret
9316 && put_user_s32(host_to_target_waitstatus(status), arg2))
9317 return -TARGET_EFAULT;
9318 }
9319 return ret;
9320 #endif
9321 #ifdef TARGET_NR_waitid
9322 case TARGET_NR_waitid:
9323 {
9324 siginfo_t info;
9325 info.si_pid = 0;
9326 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
9327 if (!is_error(ret) && arg3 && info.si_pid != 0) {
9328 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
9329 return -TARGET_EFAULT;
9330 host_to_target_siginfo(p, &info);
9331 unlock_user(p, arg3, sizeof(target_siginfo_t));
9332 }
9333 }
9334 return ret;
9335 #endif
9336 #ifdef TARGET_NR_creat /* not on alpha */
9337 case TARGET_NR_creat:
9338 if (!(p = lock_user_string(arg1)))
9339 return -TARGET_EFAULT;
9340 ret = get_errno(creat(p, arg2));
9341 fd_trans_unregister(ret);
9342 unlock_user(p, arg1, 0);
9343 return ret;
9344 #endif
9345 #ifdef TARGET_NR_link
9346 case TARGET_NR_link:
9347 {
9348 void * p2;
9349 p = lock_user_string(arg1);
9350 p2 = lock_user_string(arg2);
9351 if (!p || !p2)
9352 ret = -TARGET_EFAULT;
9353 else
9354 ret = get_errno(link(p, p2));
9355 unlock_user(p2, arg2, 0);
9356 unlock_user(p, arg1, 0);
9357 }
9358 return ret;
9359 #endif
9360 #if defined(TARGET_NR_linkat)
9361 case TARGET_NR_linkat:
9362 {
9363 void * p2 = NULL;
9364 if (!arg2 || !arg4)
9365 return -TARGET_EFAULT;
9366 p = lock_user_string(arg2);
9367 p2 = lock_user_string(arg4);
9368 if (!p || !p2)
9369 ret = -TARGET_EFAULT;
9370 else
9371 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9372 unlock_user(p, arg2, 0);
9373 unlock_user(p2, arg4, 0);
9374 }
9375 return ret;
9376 #endif
9377 #ifdef TARGET_NR_unlink
9378 case TARGET_NR_unlink:
9379 if (!(p = lock_user_string(arg1)))
9380 return -TARGET_EFAULT;
9381 ret = get_errno(unlink(p));
9382 unlock_user(p, arg1, 0);
9383 return ret;
9384 #endif
9385 #if defined(TARGET_NR_unlinkat)
9386 case TARGET_NR_unlinkat:
9387 if (!(p = lock_user_string(arg2)))
9388 return -TARGET_EFAULT;
9389 ret = get_errno(unlinkat(arg1, p, arg3));
9390 unlock_user(p, arg2, 0);
9391 return ret;
9392 #endif
9393 case TARGET_NR_execveat:
9394 return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9395 case TARGET_NR_execve:
9396 return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9397 case TARGET_NR_chdir:
9398 if (!(p = lock_user_string(arg1)))
9399 return -TARGET_EFAULT;
9400 ret = get_errno(chdir(p));
9401 unlock_user(p, arg1, 0);
9402 return ret;
9403 #ifdef TARGET_NR_time
9404 case TARGET_NR_time:
9405 {
9406 time_t host_time;
9407 ret = get_errno(time(&host_time));
9408 if (!is_error(ret)
9409 && arg1
9410 && put_user_sal(host_time, arg1))
9411 return -TARGET_EFAULT;
9412 }
9413 return ret;
9414 #endif
9415 #ifdef TARGET_NR_mknod
9416 case TARGET_NR_mknod:
9417 if (!(p = lock_user_string(arg1)))
9418 return -TARGET_EFAULT;
9419 ret = get_errno(mknod(p, arg2, arg3));
9420 unlock_user(p, arg1, 0);
9421 return ret;
9422 #endif
9423 #if defined(TARGET_NR_mknodat)
9424 case TARGET_NR_mknodat:
9425 if (!(p = lock_user_string(arg2)))
9426 return -TARGET_EFAULT;
9427 ret = get_errno(mknodat(arg1, p, arg3, arg4));
9428 unlock_user(p, arg2, 0);
9429 return ret;
9430 #endif
9431 #ifdef TARGET_NR_chmod
9432 case TARGET_NR_chmod:
9433 if (!(p = lock_user_string(arg1)))
9434 return -TARGET_EFAULT;
9435 ret = get_errno(chmod(p, arg2));
9436 unlock_user(p, arg1, 0);
9437 return ret;
9438 #endif
9439 #ifdef TARGET_NR_lseek
9440 case TARGET_NR_lseek:
9441 return get_errno(lseek(arg1, arg2, arg3));
9442 #endif
9443 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9444 /* Alpha specific */
9445 case TARGET_NR_getxpid:
9446 cpu_env->ir[IR_A4] = getppid();
9447 return get_errno(getpid());
9448 #endif
9449 #ifdef TARGET_NR_getpid
9450 case TARGET_NR_getpid:
9451 return get_errno(getpid());
9452 #endif
9453 case TARGET_NR_mount:
9454 {
9455 /* need to look at the data field */
9456 void *p2, *p3;
9457
9458 if (arg1) {
9459 p = lock_user_string(arg1);
9460 if (!p) {
9461 return -TARGET_EFAULT;
9462 }
9463 } else {
9464 p = NULL;
9465 }
9466
9467 p2 = lock_user_string(arg2);
9468 if (!p2) {
9469 if (arg1) {
9470 unlock_user(p, arg1, 0);
9471 }
9472 return -TARGET_EFAULT;
9473 }
9474
9475 if (arg3) {
9476 p3 = lock_user_string(arg3);
9477 if (!p3) {
9478 if (arg1) {
9479 unlock_user(p, arg1, 0);
9480 }
9481 unlock_user(p2, arg2, 0);
9482 return -TARGET_EFAULT;
9483 }
9484 } else {
9485 p3 = NULL;
9486 }
9487
9488 /* FIXME - arg5 should be locked, but it isn't clear how to
9489 * do that since it's not guaranteed to be a NULL-terminated
9490 * string.
9491 */
9492 if (!arg5) {
9493 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9494 } else {
9495 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9496 }
9497 ret = get_errno(ret);
9498
9499 if (arg1) {
9500 unlock_user(p, arg1, 0);
9501 }
9502 unlock_user(p2, arg2, 0);
9503 if (arg3) {
9504 unlock_user(p3, arg3, 0);
9505 }
9506 }
9507 return ret;
9508 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9509 #if defined(TARGET_NR_umount)
9510 case TARGET_NR_umount:
9511 #endif
9512 #if defined(TARGET_NR_oldumount)
9513 case TARGET_NR_oldumount:
9514 #endif
9515 if (!(p = lock_user_string(arg1)))
9516 return -TARGET_EFAULT;
9517 ret = get_errno(umount(p));
9518 unlock_user(p, arg1, 0);
9519 return ret;
9520 #endif
9521 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9522 case TARGET_NR_move_mount:
9523 {
9524 void *p2, *p4;
9525
9526 if (!arg2 || !arg4) {
9527 return -TARGET_EFAULT;
9528 }
9529
9530 p2 = lock_user_string(arg2);
9531 if (!p2) {
9532 return -TARGET_EFAULT;
9533 }
9534
9535 p4 = lock_user_string(arg4);
9536 if (!p4) {
9537 unlock_user(p2, arg2, 0);
9538 return -TARGET_EFAULT;
9539 }
9540 ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9541
9542 unlock_user(p2, arg2, 0);
9543 unlock_user(p4, arg4, 0);
9544
9545 return ret;
9546 }
9547 #endif
9548 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9549 case TARGET_NR_open_tree:
9550 {
9551 void *p2;
9552 int host_flags;
9553
9554 if (!arg2) {
9555 return -TARGET_EFAULT;
9556 }
9557
9558 p2 = lock_user_string(arg2);
9559 if (!p2) {
9560 return -TARGET_EFAULT;
9561 }
9562
9563 host_flags = arg3 & ~TARGET_O_CLOEXEC;
9564 if (arg3 & TARGET_O_CLOEXEC) {
9565 host_flags |= O_CLOEXEC;
9566 }
9567
9568 ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9569
9570 unlock_user(p2, arg2, 0);
9571
9572 return ret;
9573 }
9574 #endif
9575 #ifdef TARGET_NR_stime /* not on alpha */
9576 case TARGET_NR_stime:
9577 {
9578 struct timespec ts;
9579 ts.tv_nsec = 0;
9580 if (get_user_sal(ts.tv_sec, arg1)) {
9581 return -TARGET_EFAULT;
9582 }
9583 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9584 }
9585 #endif
9586 #ifdef TARGET_NR_alarm /* not on alpha */
9587 case TARGET_NR_alarm:
9588 return alarm(arg1);
9589 #endif
9590 #ifdef TARGET_NR_pause /* not on alpha */
9591 case TARGET_NR_pause:
9592 if (!block_signals()) {
9593 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9594 }
9595 return -TARGET_EINTR;
9596 #endif
9597 #ifdef TARGET_NR_utime
9598 case TARGET_NR_utime:
9599 {
9600 struct utimbuf tbuf, *host_tbuf;
9601 struct target_utimbuf *target_tbuf;
9602 if (arg2) {
9603 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9604 return -TARGET_EFAULT;
9605 tbuf.actime = tswapal(target_tbuf->actime);
9606 tbuf.modtime = tswapal(target_tbuf->modtime);
9607 unlock_user_struct(target_tbuf, arg2, 0);
9608 host_tbuf = &tbuf;
9609 } else {
9610 host_tbuf = NULL;
9611 }
9612 if (!(p = lock_user_string(arg1)))
9613 return -TARGET_EFAULT;
9614 ret = get_errno(utime(p, host_tbuf));
9615 unlock_user(p, arg1, 0);
9616 }
9617 return ret;
9618 #endif
9619 #ifdef TARGET_NR_utimes
9620 case TARGET_NR_utimes:
9621 {
9622 struct timeval *tvp, tv[2];
9623 if (arg2) {
9624 if (copy_from_user_timeval(&tv[0], arg2)
9625 || copy_from_user_timeval(&tv[1],
9626 arg2 + sizeof(struct target_timeval)))
9627 return -TARGET_EFAULT;
9628 tvp = tv;
9629 } else {
9630 tvp = NULL;
9631 }
9632 if (!(p = lock_user_string(arg1)))
9633 return -TARGET_EFAULT;
9634 ret = get_errno(utimes(p, tvp));
9635 unlock_user(p, arg1, 0);
9636 }
9637 return ret;
9638 #endif
9639 #if defined(TARGET_NR_futimesat)
9640 case TARGET_NR_futimesat:
9641 {
9642 struct timeval *tvp, tv[2];
9643 if (arg3) {
9644 if (copy_from_user_timeval(&tv[0], arg3)
9645 || copy_from_user_timeval(&tv[1],
9646 arg3 + sizeof(struct target_timeval)))
9647 return -TARGET_EFAULT;
9648 tvp = tv;
9649 } else {
9650 tvp = NULL;
9651 }
9652 if (!(p = lock_user_string(arg2))) {
9653 return -TARGET_EFAULT;
9654 }
9655 ret = get_errno(futimesat(arg1, path(p), tvp));
9656 unlock_user(p, arg2, 0);
9657 }
9658 return ret;
9659 #endif
9660 #ifdef TARGET_NR_access
9661 case TARGET_NR_access:
9662 if (!(p = lock_user_string(arg1))) {
9663 return -TARGET_EFAULT;
9664 }
9665 ret = get_errno(access(path(p), arg2));
9666 unlock_user(p, arg1, 0);
9667 return ret;
9668 #endif
9669 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9670 case TARGET_NR_faccessat:
9671 if (!(p = lock_user_string(arg2))) {
9672 return -TARGET_EFAULT;
9673 }
9674 ret = get_errno(faccessat(arg1, p, arg3, 0));
9675 unlock_user(p, arg2, 0);
9676 return ret;
9677 #endif
9678 #if defined(TARGET_NR_faccessat2)
9679 case TARGET_NR_faccessat2:
9680 if (!(p = lock_user_string(arg2))) {
9681 return -TARGET_EFAULT;
9682 }
9683 ret = get_errno(faccessat(arg1, p, arg3, arg4));
9684 unlock_user(p, arg2, 0);
9685 return ret;
9686 #endif
9687 #ifdef TARGET_NR_nice /* not on alpha */
9688 case TARGET_NR_nice:
9689 return get_errno(nice(arg1));
9690 #endif
9691 case TARGET_NR_sync:
9692 sync();
9693 return 0;
9694 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9695 case TARGET_NR_syncfs:
9696 return get_errno(syncfs(arg1));
9697 #endif
9698 case TARGET_NR_kill:
9699 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9700 #ifdef TARGET_NR_rename
9701 case TARGET_NR_rename:
9702 {
9703 void *p2;
9704 p = lock_user_string(arg1);
9705 p2 = lock_user_string(arg2);
9706 if (!p || !p2)
9707 ret = -TARGET_EFAULT;
9708 else
9709 ret = get_errno(rename(p, p2));
9710 unlock_user(p2, arg2, 0);
9711 unlock_user(p, arg1, 0);
9712 }
9713 return ret;
9714 #endif
9715 #if defined(TARGET_NR_renameat)
9716 case TARGET_NR_renameat:
9717 {
9718 void *p2;
9719 p = lock_user_string(arg2);
9720 p2 = lock_user_string(arg4);
9721 if (!p || !p2)
9722 ret = -TARGET_EFAULT;
9723 else
9724 ret = get_errno(renameat(arg1, p, arg3, p2));
9725 unlock_user(p2, arg4, 0);
9726 unlock_user(p, arg2, 0);
9727 }
9728 return ret;
9729 #endif
9730 #if defined(TARGET_NR_renameat2)
9731 case TARGET_NR_renameat2:
9732 {
9733 void *p2;
9734 p = lock_user_string(arg2);
9735 p2 = lock_user_string(arg4);
9736 if (!p || !p2) {
9737 ret = -TARGET_EFAULT;
9738 } else {
9739 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9740 }
9741 unlock_user(p2, arg4, 0);
9742 unlock_user(p, arg2, 0);
9743 }
9744 return ret;
9745 #endif
9746 #ifdef TARGET_NR_mkdir
9747 case TARGET_NR_mkdir:
9748 if (!(p = lock_user_string(arg1)))
9749 return -TARGET_EFAULT;
9750 ret = get_errno(mkdir(p, arg2));
9751 unlock_user(p, arg1, 0);
9752 return ret;
9753 #endif
9754 #if defined(TARGET_NR_mkdirat)
9755 case TARGET_NR_mkdirat:
9756 if (!(p = lock_user_string(arg2)))
9757 return -TARGET_EFAULT;
9758 ret = get_errno(mkdirat(arg1, p, arg3));
9759 unlock_user(p, arg2, 0);
9760 return ret;
9761 #endif
9762 #ifdef TARGET_NR_rmdir
9763 case TARGET_NR_rmdir:
9764 if (!(p = lock_user_string(arg1)))
9765 return -TARGET_EFAULT;
9766 ret = get_errno(rmdir(p));
9767 unlock_user(p, arg1, 0);
9768 return ret;
9769 #endif
9770 case TARGET_NR_dup:
9771 ret = get_errno(dup(arg1));
9772 if (ret >= 0) {
9773 fd_trans_dup(arg1, ret);
9774 }
9775 return ret;
9776 #ifdef TARGET_NR_pipe
9777 case TARGET_NR_pipe:
9778 return do_pipe(cpu_env, arg1, 0, 0);
9779 #endif
9780 #ifdef TARGET_NR_pipe2
9781 case TARGET_NR_pipe2:
9782 return do_pipe(cpu_env, arg1,
9783 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9784 #endif
9785 case TARGET_NR_times:
9786 {
9787 struct target_tms *tmsp;
9788 struct tms tms;
9789 ret = get_errno(times(&tms));
9790 if (arg1) {
9791 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9792 if (!tmsp)
9793 return -TARGET_EFAULT;
9794 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9795 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9796 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9797 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9798 }
9799 if (!is_error(ret))
9800 ret = host_to_target_clock_t(ret);
9801 }
9802 return ret;
9803 case TARGET_NR_acct:
9804 if (arg1 == 0) {
9805 ret = get_errno(acct(NULL));
9806 } else {
9807 if (!(p = lock_user_string(arg1))) {
9808 return -TARGET_EFAULT;
9809 }
9810 ret = get_errno(acct(path(p)));
9811 unlock_user(p, arg1, 0);
9812 }
9813 return ret;
9814 #ifdef TARGET_NR_umount2
9815 case TARGET_NR_umount2:
9816 if (!(p = lock_user_string(arg1)))
9817 return -TARGET_EFAULT;
9818 ret = get_errno(umount2(p, arg2));
9819 unlock_user(p, arg1, 0);
9820 return ret;
9821 #endif
9822 case TARGET_NR_ioctl:
9823 return do_ioctl(arg1, arg2, arg3);
9824 #ifdef TARGET_NR_fcntl
9825 case TARGET_NR_fcntl:
9826 return do_fcntl(arg1, arg2, arg3);
9827 #endif
9828 case TARGET_NR_setpgid:
9829 return get_errno(setpgid(arg1, arg2));
9830 case TARGET_NR_umask:
9831 return get_errno(umask(arg1));
9832 case TARGET_NR_chroot:
9833 if (!(p = lock_user_string(arg1)))
9834 return -TARGET_EFAULT;
9835 ret = get_errno(chroot(p));
9836 unlock_user(p, arg1, 0);
9837 return ret;
9838 #ifdef TARGET_NR_dup2
9839 case TARGET_NR_dup2:
9840 ret = get_errno(dup2(arg1, arg2));
9841 if (ret >= 0) {
9842 fd_trans_dup(arg1, arg2);
9843 }
9844 return ret;
9845 #endif
9846 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9847 case TARGET_NR_dup3:
9848 {
9849 int host_flags;
9850
9851 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9852 return -EINVAL;
9853 }
9854 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9855 ret = get_errno(dup3(arg1, arg2, host_flags));
9856 if (ret >= 0) {
9857 fd_trans_dup(arg1, arg2);
9858 }
9859 return ret;
9860 }
9861 #endif
9862 #ifdef TARGET_NR_getppid /* not on alpha */
9863 case TARGET_NR_getppid:
9864 return get_errno(getppid());
9865 #endif
9866 #ifdef TARGET_NR_getpgrp
9867 case TARGET_NR_getpgrp:
9868 return get_errno(getpgrp());
9869 #endif
9870 case TARGET_NR_setsid:
9871 return get_errno(setsid());
9872 #ifdef TARGET_NR_sigaction
9873 case TARGET_NR_sigaction:
9874 {
9875 #if defined(TARGET_MIPS)
9876 struct target_sigaction act, oact, *pact, *old_act;
9877
9878 if (arg2) {
9879 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9880 return -TARGET_EFAULT;
9881 act._sa_handler = old_act->_sa_handler;
9882 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9883 act.sa_flags = old_act->sa_flags;
9884 unlock_user_struct(old_act, arg2, 0);
9885 pact = &act;
9886 } else {
9887 pact = NULL;
9888 }
9889
9890 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9891
9892 if (!is_error(ret) && arg3) {
9893 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9894 return -TARGET_EFAULT;
9895 old_act->_sa_handler = oact._sa_handler;
9896 old_act->sa_flags = oact.sa_flags;
9897 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9898 old_act->sa_mask.sig[1] = 0;
9899 old_act->sa_mask.sig[2] = 0;
9900 old_act->sa_mask.sig[3] = 0;
9901 unlock_user_struct(old_act, arg3, 1);
9902 }
9903 #else
9904 struct target_old_sigaction *old_act;
9905 struct target_sigaction act, oact, *pact;
9906 if (arg2) {
9907 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9908 return -TARGET_EFAULT;
9909 act._sa_handler = old_act->_sa_handler;
9910 target_siginitset(&act.sa_mask, old_act->sa_mask);
9911 act.sa_flags = old_act->sa_flags;
9912 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9913 act.sa_restorer = old_act->sa_restorer;
9914 #endif
9915 unlock_user_struct(old_act, arg2, 0);
9916 pact = &act;
9917 } else {
9918 pact = NULL;
9919 }
9920 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9921 if (!is_error(ret) && arg3) {
9922 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9923 return -TARGET_EFAULT;
9924 old_act->_sa_handler = oact._sa_handler;
9925 old_act->sa_mask = oact.sa_mask.sig[0];
9926 old_act->sa_flags = oact.sa_flags;
9927 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9928 old_act->sa_restorer = oact.sa_restorer;
9929 #endif
9930 unlock_user_struct(old_act, arg3, 1);
9931 }
9932 #endif
9933 }
9934 return ret;
9935 #endif
9936 case TARGET_NR_rt_sigaction:
9937 {
9938 /*
9939 * For Alpha and SPARC this is a 5 argument syscall, with
9940 * a 'restorer' parameter which must be copied into the
9941 * sa_restorer field of the sigaction struct.
9942 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9943 * and arg5 is the sigsetsize.
9944 */
9945 #if defined(TARGET_ALPHA)
9946 target_ulong sigsetsize = arg4;
9947 target_ulong restorer = arg5;
9948 #elif defined(TARGET_SPARC)
9949 target_ulong restorer = arg4;
9950 target_ulong sigsetsize = arg5;
9951 #else
9952 target_ulong sigsetsize = arg4;
9953 target_ulong restorer = 0;
9954 #endif
9955 struct target_sigaction *act = NULL;
9956 struct target_sigaction *oact = NULL;
9957
9958 if (sigsetsize != sizeof(target_sigset_t)) {
9959 return -TARGET_EINVAL;
9960 }
9961 if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9962 return -TARGET_EFAULT;
9963 }
9964 if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9965 ret = -TARGET_EFAULT;
9966 } else {
9967 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9968 if (oact) {
9969 unlock_user_struct(oact, arg3, 1);
9970 }
9971 }
9972 if (act) {
9973 unlock_user_struct(act, arg2, 0);
9974 }
9975 }
9976 return ret;
9977 #ifdef TARGET_NR_sgetmask /* not on alpha */
9978 case TARGET_NR_sgetmask:
9979 {
9980 sigset_t cur_set;
9981 abi_ulong target_set;
9982 ret = do_sigprocmask(0, NULL, &cur_set);
9983 if (!ret) {
9984 host_to_target_old_sigset(&target_set, &cur_set);
9985 ret = target_set;
9986 }
9987 }
9988 return ret;
9989 #endif
9990 #ifdef TARGET_NR_ssetmask /* not on alpha */
9991 case TARGET_NR_ssetmask:
9992 {
9993 sigset_t set, oset;
9994 abi_ulong target_set = arg1;
9995 target_to_host_old_sigset(&set, &target_set);
9996 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9997 if (!ret) {
9998 host_to_target_old_sigset(&target_set, &oset);
9999 ret = target_set;
10000 }
10001 }
10002 return ret;
10003 #endif
10004 #ifdef TARGET_NR_sigprocmask
10005 case TARGET_NR_sigprocmask:
10006 {
10007 #if defined(TARGET_ALPHA)
10008 sigset_t set, oldset;
10009 abi_ulong mask;
10010 int how;
10011
10012 switch (arg1) {
10013 case TARGET_SIG_BLOCK:
10014 how = SIG_BLOCK;
10015 break;
10016 case TARGET_SIG_UNBLOCK:
10017 how = SIG_UNBLOCK;
10018 break;
10019 case TARGET_SIG_SETMASK:
10020 how = SIG_SETMASK;
10021 break;
10022 default:
10023 return -TARGET_EINVAL;
10024 }
10025 mask = arg2;
10026 target_to_host_old_sigset(&set, &mask);
10027
10028 ret = do_sigprocmask(how, &set, &oldset);
10029 if (!is_error(ret)) {
10030 host_to_target_old_sigset(&mask, &oldset);
10031 ret = mask;
10032 cpu_env->ir[IR_V0] = 0; /* force no error */
10033 }
10034 #else
10035 sigset_t set, oldset, *set_ptr;
10036 int how;
10037
10038 if (arg2) {
10039 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10040 if (!p) {
10041 return -TARGET_EFAULT;
10042 }
10043 target_to_host_old_sigset(&set, p);
10044 unlock_user(p, arg2, 0);
10045 set_ptr = &set;
10046 switch (arg1) {
10047 case TARGET_SIG_BLOCK:
10048 how = SIG_BLOCK;
10049 break;
10050 case TARGET_SIG_UNBLOCK:
10051 how = SIG_UNBLOCK;
10052 break;
10053 case TARGET_SIG_SETMASK:
10054 how = SIG_SETMASK;
10055 break;
10056 default:
10057 return -TARGET_EINVAL;
10058 }
10059 } else {
10060 how = 0;
10061 set_ptr = NULL;
10062 }
10063 ret = do_sigprocmask(how, set_ptr, &oldset);
10064 if (!is_error(ret) && arg3) {
10065 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10066 return -TARGET_EFAULT;
10067 host_to_target_old_sigset(p, &oldset);
10068 unlock_user(p, arg3, sizeof(target_sigset_t));
10069 }
10070 #endif
10071 }
10072 return ret;
10073 #endif
10074 case TARGET_NR_rt_sigprocmask:
10075 {
10076 int how = arg1;
10077 sigset_t set, oldset, *set_ptr;
10078
10079 if (arg4 != sizeof(target_sigset_t)) {
10080 return -TARGET_EINVAL;
10081 }
10082
10083 if (arg2) {
10084 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10085 if (!p) {
10086 return -TARGET_EFAULT;
10087 }
10088 target_to_host_sigset(&set, p);
10089 unlock_user(p, arg2, 0);
10090 set_ptr = &set;
10091 switch(how) {
10092 case TARGET_SIG_BLOCK:
10093 how = SIG_BLOCK;
10094 break;
10095 case TARGET_SIG_UNBLOCK:
10096 how = SIG_UNBLOCK;
10097 break;
10098 case TARGET_SIG_SETMASK:
10099 how = SIG_SETMASK;
10100 break;
10101 default:
10102 return -TARGET_EINVAL;
10103 }
10104 } else {
10105 how = 0;
10106 set_ptr = NULL;
10107 }
10108 ret = do_sigprocmask(how, set_ptr, &oldset);
10109 if (!is_error(ret) && arg3) {
10110 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10111 return -TARGET_EFAULT;
10112 host_to_target_sigset(p, &oldset);
10113 unlock_user(p, arg3, sizeof(target_sigset_t));
10114 }
10115 }
10116 return ret;
10117 #ifdef TARGET_NR_sigpending
10118 case TARGET_NR_sigpending:
10119 {
10120 sigset_t set;
10121 ret = get_errno(sigpending(&set));
10122 if (!is_error(ret)) {
10123 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10124 return -TARGET_EFAULT;
10125 host_to_target_old_sigset(p, &set);
10126 unlock_user(p, arg1, sizeof(target_sigset_t));
10127 }
10128 }
10129 return ret;
10130 #endif
10131 case TARGET_NR_rt_sigpending:
10132 {
10133 sigset_t set;
10134
10135 /* Yes, this check is >, not != like most. We follow the kernel's
10136 * logic and it does it like this because it implements
10137 * NR_sigpending through the same code path, and in that case
10138 * the old_sigset_t is smaller in size.
10139 */
10140 if (arg2 > sizeof(target_sigset_t)) {
10141 return -TARGET_EINVAL;
10142 }
10143
10144 ret = get_errno(sigpending(&set));
10145 if (!is_error(ret)) {
10146 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10147 return -TARGET_EFAULT;
10148 host_to_target_sigset(p, &set);
10149 unlock_user(p, arg1, sizeof(target_sigset_t));
10150 }
10151 }
10152 return ret;
10153 #ifdef TARGET_NR_sigsuspend
10154 case TARGET_NR_sigsuspend:
10155 {
10156 sigset_t *set;
10157
10158 #if defined(TARGET_ALPHA)
10159 TaskState *ts = cpu->opaque;
10160 /* target_to_host_old_sigset will bswap back */
10161 abi_ulong mask = tswapal(arg1);
10162 set = &ts->sigsuspend_mask;
10163 target_to_host_old_sigset(set, &mask);
10164 #else
10165 ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10166 if (ret != 0) {
10167 return ret;
10168 }
10169 #endif
10170 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10171 finish_sigsuspend_mask(ret);
10172 }
10173 return ret;
10174 #endif
10175 case TARGET_NR_rt_sigsuspend:
10176 {
10177 sigset_t *set;
10178
10179 ret = process_sigsuspend_mask(&set, arg1, arg2);
10180 if (ret != 0) {
10181 return ret;
10182 }
10183 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10184 finish_sigsuspend_mask(ret);
10185 }
10186 return ret;
10187 #ifdef TARGET_NR_rt_sigtimedwait
10188 case TARGET_NR_rt_sigtimedwait:
10189 {
10190 sigset_t set;
10191 struct timespec uts, *puts;
10192 siginfo_t uinfo;
10193
10194 if (arg4 != sizeof(target_sigset_t)) {
10195 return -TARGET_EINVAL;
10196 }
10197
10198 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10199 return -TARGET_EFAULT;
10200 target_to_host_sigset(&set, p);
10201 unlock_user(p, arg1, 0);
10202 if (arg3) {
10203 puts = &uts;
10204 if (target_to_host_timespec(puts, arg3)) {
10205 return -TARGET_EFAULT;
10206 }
10207 } else {
10208 puts = NULL;
10209 }
10210 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10211 SIGSET_T_SIZE));
10212 if (!is_error(ret)) {
10213 if (arg2) {
10214 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10215 0);
10216 if (!p) {
10217 return -TARGET_EFAULT;
10218 }
10219 host_to_target_siginfo(p, &uinfo);
10220 unlock_user(p, arg2, sizeof(target_siginfo_t));
10221 }
10222 ret = host_to_target_signal(ret);
10223 }
10224 }
10225 return ret;
10226 #endif
10227 #ifdef TARGET_NR_rt_sigtimedwait_time64
10228 case TARGET_NR_rt_sigtimedwait_time64:
10229 {
10230 sigset_t set;
10231 struct timespec uts, *puts;
10232 siginfo_t uinfo;
10233
10234 if (arg4 != sizeof(target_sigset_t)) {
10235 return -TARGET_EINVAL;
10236 }
10237
10238 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10239 if (!p) {
10240 return -TARGET_EFAULT;
10241 }
10242 target_to_host_sigset(&set, p);
10243 unlock_user(p, arg1, 0);
10244 if (arg3) {
10245 puts = &uts;
10246 if (target_to_host_timespec64(puts, arg3)) {
10247 return -TARGET_EFAULT;
10248 }
10249 } else {
10250 puts = NULL;
10251 }
10252 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10253 SIGSET_T_SIZE));
10254 if (!is_error(ret)) {
10255 if (arg2) {
10256 p = lock_user(VERIFY_WRITE, arg2,
10257 sizeof(target_siginfo_t), 0);
10258 if (!p) {
10259 return -TARGET_EFAULT;
10260 }
10261 host_to_target_siginfo(p, &uinfo);
10262 unlock_user(p, arg2, sizeof(target_siginfo_t));
10263 }
10264 ret = host_to_target_signal(ret);
10265 }
10266 }
10267 return ret;
10268 #endif
10269 case TARGET_NR_rt_sigqueueinfo:
10270 {
10271 siginfo_t uinfo;
10272
10273 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10274 if (!p) {
10275 return -TARGET_EFAULT;
10276 }
10277 target_to_host_siginfo(&uinfo, p);
10278 unlock_user(p, arg3, 0);
10279 ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10280 }
10281 return ret;
10282 case TARGET_NR_rt_tgsigqueueinfo:
10283 {
10284 siginfo_t uinfo;
10285
10286 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10287 if (!p) {
10288 return -TARGET_EFAULT;
10289 }
10290 target_to_host_siginfo(&uinfo, p);
10291 unlock_user(p, arg4, 0);
10292 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10293 }
10294 return ret;
10295 #ifdef TARGET_NR_sigreturn
10296 case TARGET_NR_sigreturn:
10297 if (block_signals()) {
10298 return -QEMU_ERESTARTSYS;
10299 }
10300 return do_sigreturn(cpu_env);
10301 #endif
10302 case TARGET_NR_rt_sigreturn:
10303 if (block_signals()) {
10304 return -QEMU_ERESTARTSYS;
10305 }
10306 return do_rt_sigreturn(cpu_env);
10307 case TARGET_NR_sethostname:
10308 if (!(p = lock_user_string(arg1)))
10309 return -TARGET_EFAULT;
10310 ret = get_errno(sethostname(p, arg2));
10311 unlock_user(p, arg1, 0);
10312 return ret;
10313 #ifdef TARGET_NR_setrlimit
10314 case TARGET_NR_setrlimit:
10315 {
10316 int resource = target_to_host_resource(arg1);
10317 struct target_rlimit *target_rlim;
10318 struct rlimit rlim;
10319 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10320 return -TARGET_EFAULT;
10321 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10322 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10323 unlock_user_struct(target_rlim, arg2, 0);
10324 /*
10325 * If we just passed through resource limit settings for memory then
10326 * they would also apply to QEMU's own allocations, and QEMU will
10327 * crash or hang or die if its allocations fail. Ideally we would
10328 * track the guest allocations in QEMU and apply the limits ourselves.
10329 * For now, just tell the guest the call succeeded but don't actually
10330 * limit anything.
10331 */
10332 if (resource != RLIMIT_AS &&
10333 resource != RLIMIT_DATA &&
10334 resource != RLIMIT_STACK) {
10335 return get_errno(setrlimit(resource, &rlim));
10336 } else {
10337 return 0;
10338 }
10339 }
10340 #endif
10341 #ifdef TARGET_NR_getrlimit
10342 case TARGET_NR_getrlimit:
10343 {
10344 int resource = target_to_host_resource(arg1);
10345 struct target_rlimit *target_rlim;
10346 struct rlimit rlim;
10347
10348 ret = get_errno(getrlimit(resource, &rlim));
10349 if (!is_error(ret)) {
10350 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10351 return -TARGET_EFAULT;
10352 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10353 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10354 unlock_user_struct(target_rlim, arg2, 1);
10355 }
10356 }
10357 return ret;
10358 #endif
10359 case TARGET_NR_getrusage:
10360 {
10361 struct rusage rusage;
10362 ret = get_errno(getrusage(arg1, &rusage));
10363 if (!is_error(ret)) {
10364 ret = host_to_target_rusage(arg2, &rusage);
10365 }
10366 }
10367 return ret;
10368 #if defined(TARGET_NR_gettimeofday)
10369 case TARGET_NR_gettimeofday:
10370 {
10371 struct timeval tv;
10372 struct timezone tz;
10373
10374 ret = get_errno(gettimeofday(&tv, &tz));
10375 if (!is_error(ret)) {
10376 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10377 return -TARGET_EFAULT;
10378 }
10379 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10380 return -TARGET_EFAULT;
10381 }
10382 }
10383 }
10384 return ret;
10385 #endif
10386 #if defined(TARGET_NR_settimeofday)
10387 case TARGET_NR_settimeofday:
10388 {
10389 struct timeval tv, *ptv = NULL;
10390 struct timezone tz, *ptz = NULL;
10391
10392 if (arg1) {
10393 if (copy_from_user_timeval(&tv, arg1)) {
10394 return -TARGET_EFAULT;
10395 }
10396 ptv = &tv;
10397 }
10398
10399 if (arg2) {
10400 if (copy_from_user_timezone(&tz, arg2)) {
10401 return -TARGET_EFAULT;
10402 }
10403 ptz = &tz;
10404 }
10405
10406 return get_errno(settimeofday(ptv, ptz));
10407 }
10408 #endif
10409 #if defined(TARGET_NR_select)
10410 case TARGET_NR_select:
10411 #if defined(TARGET_WANT_NI_OLD_SELECT)
10412 /* some architectures used to have old_select here
10413 * but now ENOSYS it.
10414 */
10415 ret = -TARGET_ENOSYS;
10416 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10417 ret = do_old_select(arg1);
10418 #else
10419 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10420 #endif
10421 return ret;
10422 #endif
10423 #ifdef TARGET_NR_pselect6
10424 case TARGET_NR_pselect6:
10425 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10426 #endif
10427 #ifdef TARGET_NR_pselect6_time64
10428 case TARGET_NR_pselect6_time64:
10429 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10430 #endif
10431 #ifdef TARGET_NR_symlink
10432 case TARGET_NR_symlink:
10433 {
10434 void *p2;
10435 p = lock_user_string(arg1);
10436 p2 = lock_user_string(arg2);
10437 if (!p || !p2)
10438 ret = -TARGET_EFAULT;
10439 else
10440 ret = get_errno(symlink(p, p2));
10441 unlock_user(p2, arg2, 0);
10442 unlock_user(p, arg1, 0);
10443 }
10444 return ret;
10445 #endif
10446 #if defined(TARGET_NR_symlinkat)
10447 case TARGET_NR_symlinkat:
10448 {
10449 void *p2;
10450 p = lock_user_string(arg1);
10451 p2 = lock_user_string(arg3);
10452 if (!p || !p2)
10453 ret = -TARGET_EFAULT;
10454 else
10455 ret = get_errno(symlinkat(p, arg2, p2));
10456 unlock_user(p2, arg3, 0);
10457 unlock_user(p, arg1, 0);
10458 }
10459 return ret;
10460 #endif
10461 #ifdef TARGET_NR_readlink
10462 case TARGET_NR_readlink:
10463 {
10464 void *p2;
10465 p = lock_user_string(arg1);
10466 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10467 ret = get_errno(do_guest_readlink(p, p2, arg3));
10468 unlock_user(p2, arg2, ret);
10469 unlock_user(p, arg1, 0);
10470 }
10471 return ret;
10472 #endif
10473 #if defined(TARGET_NR_readlinkat)
10474 case TARGET_NR_readlinkat:
10475 {
10476 void *p2;
10477 p = lock_user_string(arg2);
10478 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10479 if (!p || !p2) {
10480 ret = -TARGET_EFAULT;
10481 } else if (!arg4) {
10482 /* Short circuit this for the magic exe check. */
10483 ret = -TARGET_EINVAL;
10484 } else if (is_proc_myself((const char *)p, "exe")) {
10485 /*
10486 * Don't worry about sign mismatch as earlier mapping
10487 * logic would have thrown a bad address error.
10488 */
10489 ret = MIN(strlen(exec_path), arg4);
10490 /* We cannot NUL terminate the string. */
10491 memcpy(p2, exec_path, ret);
10492 } else {
10493 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10494 }
10495 unlock_user(p2, arg3, ret);
10496 unlock_user(p, arg2, 0);
10497 }
10498 return ret;
10499 #endif
10500 #ifdef TARGET_NR_swapon
10501 case TARGET_NR_swapon:
10502 if (!(p = lock_user_string(arg1)))
10503 return -TARGET_EFAULT;
10504 ret = get_errno(swapon(p, arg2));
10505 unlock_user(p, arg1, 0);
10506 return ret;
10507 #endif
10508 case TARGET_NR_reboot:
10509 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10510 /* arg4 must be ignored in all other cases */
10511 p = lock_user_string(arg4);
10512 if (!p) {
10513 return -TARGET_EFAULT;
10514 }
10515 ret = get_errno(reboot(arg1, arg2, arg3, p));
10516 unlock_user(p, arg4, 0);
10517 } else {
10518 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10519 }
10520 return ret;
10521 #ifdef TARGET_NR_mmap
10522 case TARGET_NR_mmap:
10523 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10524 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10525 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10526 || defined(TARGET_S390X)
10527 {
10528 abi_ulong *v;
10529 abi_ulong v1, v2, v3, v4, v5, v6;
10530 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10531 return -TARGET_EFAULT;
10532 v1 = tswapal(v[0]);
10533 v2 = tswapal(v[1]);
10534 v3 = tswapal(v[2]);
10535 v4 = tswapal(v[3]);
10536 v5 = tswapal(v[4]);
10537 v6 = tswapal(v[5]);
10538 unlock_user(v, arg1, 0);
10539 ret = get_errno(target_mmap(v1, v2, v3,
10540 target_to_host_bitmask(v4, mmap_flags_tbl),
10541 v5, v6));
10542 }
10543 #else
10544 /* mmap pointers are always untagged */
10545 ret = get_errno(target_mmap(arg1, arg2, arg3,
10546 target_to_host_bitmask(arg4, mmap_flags_tbl),
10547 arg5,
10548 arg6));
10549 #endif
10550 return ret;
10551 #endif
10552 #ifdef TARGET_NR_mmap2
10553 case TARGET_NR_mmap2:
10554 #ifndef MMAP_SHIFT
10555 #define MMAP_SHIFT 12
10556 #endif
10557 ret = target_mmap(arg1, arg2, arg3,
10558 target_to_host_bitmask(arg4, mmap_flags_tbl),
10559 arg5, (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10560 return get_errno(ret);
10561 #endif
10562 case TARGET_NR_munmap:
10563 arg1 = cpu_untagged_addr(cpu, arg1);
10564 return get_errno(target_munmap(arg1, arg2));
10565 case TARGET_NR_mprotect:
10566 arg1 = cpu_untagged_addr(cpu, arg1);
10567 {
10568 TaskState *ts = cpu->opaque;
10569 /* Special hack to detect libc making the stack executable. */
10570 if ((arg3 & PROT_GROWSDOWN)
10571 && arg1 >= ts->info->stack_limit
10572 && arg1 <= ts->info->start_stack) {
10573 arg3 &= ~PROT_GROWSDOWN;
10574 arg2 = arg2 + arg1 - ts->info->stack_limit;
10575 arg1 = ts->info->stack_limit;
10576 }
10577 }
10578 return get_errno(target_mprotect(arg1, arg2, arg3));
10579 #ifdef TARGET_NR_mremap
10580 case TARGET_NR_mremap:
10581 arg1 = cpu_untagged_addr(cpu, arg1);
10582 /* mremap new_addr (arg5) is always untagged */
10583 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10584 #endif
10585 /* ??? msync/mlock/munlock are broken for softmmu. */
10586 #ifdef TARGET_NR_msync
10587 case TARGET_NR_msync:
10588 return get_errno(msync(g2h(cpu, arg1), arg2,
10589 target_to_host_msync_arg(arg3)));
10590 #endif
10591 #ifdef TARGET_NR_mlock
10592 case TARGET_NR_mlock:
10593 return get_errno(mlock(g2h(cpu, arg1), arg2));
10594 #endif
10595 #ifdef TARGET_NR_munlock
10596 case TARGET_NR_munlock:
10597 return get_errno(munlock(g2h(cpu, arg1), arg2));
10598 #endif
10599 #ifdef TARGET_NR_mlockall
10600 case TARGET_NR_mlockall:
10601 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10602 #endif
10603 #ifdef TARGET_NR_munlockall
10604 case TARGET_NR_munlockall:
10605 return get_errno(munlockall());
10606 #endif
10607 #ifdef TARGET_NR_truncate
10608 case TARGET_NR_truncate:
10609 if (!(p = lock_user_string(arg1)))
10610 return -TARGET_EFAULT;
10611 ret = get_errno(truncate(p, arg2));
10612 unlock_user(p, arg1, 0);
10613 return ret;
10614 #endif
10615 #ifdef TARGET_NR_ftruncate
10616 case TARGET_NR_ftruncate:
10617 return get_errno(ftruncate(arg1, arg2));
10618 #endif
10619 case TARGET_NR_fchmod:
10620 return get_errno(fchmod(arg1, arg2));
10621 #if defined(TARGET_NR_fchmodat)
10622 case TARGET_NR_fchmodat:
10623 if (!(p = lock_user_string(arg2)))
10624 return -TARGET_EFAULT;
10625 ret = get_errno(fchmodat(arg1, p, arg3, 0));
10626 unlock_user(p, arg2, 0);
10627 return ret;
10628 #endif
10629 case TARGET_NR_getpriority:
10630 /* Note that negative values are valid for getpriority, so we must
10631 differentiate based on errno settings. */
10632 errno = 0;
10633 ret = getpriority(arg1, arg2);
10634 if (ret == -1 && errno != 0) {
10635 return -host_to_target_errno(errno);
10636 }
10637 #ifdef TARGET_ALPHA
10638 /* Return value is the unbiased priority. Signal no error. */
10639 cpu_env->ir[IR_V0] = 0;
10640 #else
10641 /* Return value is a biased priority to avoid negative numbers. */
10642 ret = 20 - ret;
10643 #endif
10644 return ret;
10645 case TARGET_NR_setpriority:
10646 return get_errno(setpriority(arg1, arg2, arg3));
10647 #ifdef TARGET_NR_statfs
10648 case TARGET_NR_statfs:
10649 if (!(p = lock_user_string(arg1))) {
10650 return -TARGET_EFAULT;
10651 }
10652 ret = get_errno(statfs(path(p), &stfs));
10653 unlock_user(p, arg1, 0);
10654 convert_statfs:
10655 if (!is_error(ret)) {
10656 struct target_statfs *target_stfs;
10657
10658 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10659 return -TARGET_EFAULT;
10660 __put_user(stfs.f_type, &target_stfs->f_type);
10661 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10662 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10663 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10664 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10665 __put_user(stfs.f_files, &target_stfs->f_files);
10666 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10667 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10668 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10669 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10670 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10671 #ifdef _STATFS_F_FLAGS
10672 __put_user(stfs.f_flags, &target_stfs->f_flags);
10673 #else
10674 __put_user(0, &target_stfs->f_flags);
10675 #endif
10676 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10677 unlock_user_struct(target_stfs, arg2, 1);
10678 }
10679 return ret;
10680 #endif
10681 #ifdef TARGET_NR_fstatfs
10682 case TARGET_NR_fstatfs:
10683 ret = get_errno(fstatfs(arg1, &stfs));
10684 goto convert_statfs;
10685 #endif
10686 #ifdef TARGET_NR_statfs64
10687 case TARGET_NR_statfs64:
10688 if (!(p = lock_user_string(arg1))) {
10689 return -TARGET_EFAULT;
10690 }
10691 ret = get_errno(statfs(path(p), &stfs));
10692 unlock_user(p, arg1, 0);
10693 convert_statfs64:
10694 if (!is_error(ret)) {
10695 struct target_statfs64 *target_stfs;
10696
10697 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10698 return -TARGET_EFAULT;
10699 __put_user(stfs.f_type, &target_stfs->f_type);
10700 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10701 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10702 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10703 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10704 __put_user(stfs.f_files, &target_stfs->f_files);
10705 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10706 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10707 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10708 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10709 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10710 #ifdef _STATFS_F_FLAGS
10711 __put_user(stfs.f_flags, &target_stfs->f_flags);
10712 #else
10713 __put_user(0, &target_stfs->f_flags);
10714 #endif
10715 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10716 unlock_user_struct(target_stfs, arg3, 1);
10717 }
10718 return ret;
10719 case TARGET_NR_fstatfs64:
10720 ret = get_errno(fstatfs(arg1, &stfs));
10721 goto convert_statfs64;
10722 #endif
10723 #ifdef TARGET_NR_socketcall
10724 case TARGET_NR_socketcall:
10725 return do_socketcall(arg1, arg2);
10726 #endif
10727 #ifdef TARGET_NR_accept
10728 case TARGET_NR_accept:
10729 return do_accept4(arg1, arg2, arg3, 0);
10730 #endif
10731 #ifdef TARGET_NR_accept4
10732 case TARGET_NR_accept4:
10733 return do_accept4(arg1, arg2, arg3, arg4);
10734 #endif
10735 #ifdef TARGET_NR_bind
10736 case TARGET_NR_bind:
10737 return do_bind(arg1, arg2, arg3);
10738 #endif
10739 #ifdef TARGET_NR_connect
10740 case TARGET_NR_connect:
10741 return do_connect(arg1, arg2, arg3);
10742 #endif
10743 #ifdef TARGET_NR_getpeername
10744 case TARGET_NR_getpeername:
10745 return do_getpeername(arg1, arg2, arg3);
10746 #endif
10747 #ifdef TARGET_NR_getsockname
10748 case TARGET_NR_getsockname:
10749 return do_getsockname(arg1, arg2, arg3);
10750 #endif
10751 #ifdef TARGET_NR_getsockopt
10752 case TARGET_NR_getsockopt:
10753 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10754 #endif
10755 #ifdef TARGET_NR_listen
10756 case TARGET_NR_listen:
10757 return get_errno(listen(arg1, arg2));
10758 #endif
10759 #ifdef TARGET_NR_recv
10760 case TARGET_NR_recv:
10761 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10762 #endif
10763 #ifdef TARGET_NR_recvfrom
10764 case TARGET_NR_recvfrom:
10765 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10766 #endif
10767 #ifdef TARGET_NR_recvmsg
10768 case TARGET_NR_recvmsg:
10769 return do_sendrecvmsg(arg1, arg2, arg3, 0);
10770 #endif
10771 #ifdef TARGET_NR_send
10772 case TARGET_NR_send:
10773 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10774 #endif
10775 #ifdef TARGET_NR_sendmsg
10776 case TARGET_NR_sendmsg:
10777 return do_sendrecvmsg(arg1, arg2, arg3, 1);
10778 #endif
10779 #ifdef TARGET_NR_sendmmsg
10780 case TARGET_NR_sendmmsg:
10781 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10782 #endif
10783 #ifdef TARGET_NR_recvmmsg
10784 case TARGET_NR_recvmmsg:
10785 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10786 #endif
10787 #ifdef TARGET_NR_sendto
10788 case TARGET_NR_sendto:
10789 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10790 #endif
10791 #ifdef TARGET_NR_shutdown
10792 case TARGET_NR_shutdown:
10793 return get_errno(shutdown(arg1, arg2));
10794 #endif
10795 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10796 case TARGET_NR_getrandom:
10797 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10798 if (!p) {
10799 return -TARGET_EFAULT;
10800 }
10801 ret = get_errno(getrandom(p, arg2, arg3));
10802 unlock_user(p, arg1, ret);
10803 return ret;
10804 #endif
10805 #ifdef TARGET_NR_socket
10806 case TARGET_NR_socket:
10807 return do_socket(arg1, arg2, arg3);
10808 #endif
10809 #ifdef TARGET_NR_socketpair
10810 case TARGET_NR_socketpair:
10811 return do_socketpair(arg1, arg2, arg3, arg4);
10812 #endif
10813 #ifdef TARGET_NR_setsockopt
10814 case TARGET_NR_setsockopt:
10815 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10816 #endif
10817 #if defined(TARGET_NR_syslog)
10818 case TARGET_NR_syslog:
10819 {
10820 int len = arg2;
10821
10822 switch (arg1) {
10823 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
10824 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
10825 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
10826 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
10827 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
10828 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10829 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
10830 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
10831 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10832 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
10833 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
10834 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
10835 {
10836 if (len < 0) {
10837 return -TARGET_EINVAL;
10838 }
10839 if (len == 0) {
10840 return 0;
10841 }
10842 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10843 if (!p) {
10844 return -TARGET_EFAULT;
10845 }
10846 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10847 unlock_user(p, arg2, arg3);
10848 }
10849 return ret;
10850 default:
10851 return -TARGET_EINVAL;
10852 }
10853 }
10854 break;
10855 #endif
10856 case TARGET_NR_setitimer:
10857 {
10858 struct itimerval value, ovalue, *pvalue;
10859
10860 if (arg2) {
10861 pvalue = &value;
10862 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10863 || copy_from_user_timeval(&pvalue->it_value,
10864 arg2 + sizeof(struct target_timeval)))
10865 return -TARGET_EFAULT;
10866 } else {
10867 pvalue = NULL;
10868 }
10869 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10870 if (!is_error(ret) && arg3) {
10871 if (copy_to_user_timeval(arg3,
10872 &ovalue.it_interval)
10873 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10874 &ovalue.it_value))
10875 return -TARGET_EFAULT;
10876 }
10877 }
10878 return ret;
10879 case TARGET_NR_getitimer:
10880 {
10881 struct itimerval value;
10882
10883 ret = get_errno(getitimer(arg1, &value));
10884 if (!is_error(ret) && arg2) {
10885 if (copy_to_user_timeval(arg2,
10886 &value.it_interval)
10887 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10888 &value.it_value))
10889 return -TARGET_EFAULT;
10890 }
10891 }
10892 return ret;
10893 #ifdef TARGET_NR_stat
10894 case TARGET_NR_stat:
10895 if (!(p = lock_user_string(arg1))) {
10896 return -TARGET_EFAULT;
10897 }
10898 ret = get_errno(stat(path(p), &st));
10899 unlock_user(p, arg1, 0);
10900 goto do_stat;
10901 #endif
10902 #ifdef TARGET_NR_lstat
10903 case TARGET_NR_lstat:
10904 if (!(p = lock_user_string(arg1))) {
10905 return -TARGET_EFAULT;
10906 }
10907 ret = get_errno(lstat(path(p), &st));
10908 unlock_user(p, arg1, 0);
10909 goto do_stat;
10910 #endif
10911 #ifdef TARGET_NR_fstat
10912 case TARGET_NR_fstat:
10913 {
10914 ret = get_errno(fstat(arg1, &st));
10915 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10916 do_stat:
10917 #endif
10918 if (!is_error(ret)) {
10919 struct target_stat *target_st;
10920
10921 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10922 return -TARGET_EFAULT;
10923 memset(target_st, 0, sizeof(*target_st));
10924 __put_user(st.st_dev, &target_st->st_dev);
10925 __put_user(st.st_ino, &target_st->st_ino);
10926 __put_user(st.st_mode, &target_st->st_mode);
10927 __put_user(st.st_uid, &target_st->st_uid);
10928 __put_user(st.st_gid, &target_st->st_gid);
10929 __put_user(st.st_nlink, &target_st->st_nlink);
10930 __put_user(st.st_rdev, &target_st->st_rdev);
10931 __put_user(st.st_size, &target_st->st_size);
10932 __put_user(st.st_blksize, &target_st->st_blksize);
10933 __put_user(st.st_blocks, &target_st->st_blocks);
10934 __put_user(st.st_atime, &target_st->target_st_atime);
10935 __put_user(st.st_mtime, &target_st->target_st_mtime);
10936 __put_user(st.st_ctime, &target_st->target_st_ctime);
10937 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10938 __put_user(st.st_atim.tv_nsec,
10939 &target_st->target_st_atime_nsec);
10940 __put_user(st.st_mtim.tv_nsec,
10941 &target_st->target_st_mtime_nsec);
10942 __put_user(st.st_ctim.tv_nsec,
10943 &target_st->target_st_ctime_nsec);
10944 #endif
10945 unlock_user_struct(target_st, arg2, 1);
10946 }
10947 }
10948 return ret;
10949 #endif
10950 case TARGET_NR_vhangup:
10951 return get_errno(vhangup());
10952 #ifdef TARGET_NR_syscall
10953 case TARGET_NR_syscall:
10954 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10955 arg6, arg7, arg8, 0);
10956 #endif
10957 #if defined(TARGET_NR_wait4)
10958 case TARGET_NR_wait4:
10959 {
10960 int status;
10961 abi_long status_ptr = arg2;
10962 struct rusage rusage, *rusage_ptr;
10963 abi_ulong target_rusage = arg4;
10964 abi_long rusage_err;
10965 if (target_rusage)
10966 rusage_ptr = &rusage;
10967 else
10968 rusage_ptr = NULL;
10969 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10970 if (!is_error(ret)) {
10971 if (status_ptr && ret) {
10972 status = host_to_target_waitstatus(status);
10973 if (put_user_s32(status, status_ptr))
10974 return -TARGET_EFAULT;
10975 }
10976 if (target_rusage) {
10977 rusage_err = host_to_target_rusage(target_rusage, &rusage);
10978 if (rusage_err) {
10979 ret = rusage_err;
10980 }
10981 }
10982 }
10983 }
10984 return ret;
10985 #endif
10986 #ifdef TARGET_NR_swapoff
10987 case TARGET_NR_swapoff:
10988 if (!(p = lock_user_string(arg1)))
10989 return -TARGET_EFAULT;
10990 ret = get_errno(swapoff(p));
10991 unlock_user(p, arg1, 0);
10992 return ret;
10993 #endif
10994 case TARGET_NR_sysinfo:
10995 {
10996 struct target_sysinfo *target_value;
10997 struct sysinfo value;
10998 ret = get_errno(sysinfo(&value));
10999 if (!is_error(ret) && arg1)
11000 {
11001 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11002 return -TARGET_EFAULT;
11003 __put_user(value.uptime, &target_value->uptime);
11004 __put_user(value.loads[0], &target_value->loads[0]);
11005 __put_user(value.loads[1], &target_value->loads[1]);
11006 __put_user(value.loads[2], &target_value->loads[2]);
11007 __put_user(value.totalram, &target_value->totalram);
11008 __put_user(value.freeram, &target_value->freeram);
11009 __put_user(value.sharedram, &target_value->sharedram);
11010 __put_user(value.bufferram, &target_value->bufferram);
11011 __put_user(value.totalswap, &target_value->totalswap);
11012 __put_user(value.freeswap, &target_value->freeswap);
11013 __put_user(value.procs, &target_value->procs);
11014 __put_user(value.totalhigh, &target_value->totalhigh);
11015 __put_user(value.freehigh, &target_value->freehigh);
11016 __put_user(value.mem_unit, &target_value->mem_unit);
11017 unlock_user_struct(target_value, arg1, 1);
11018 }
11019 }
11020 return ret;
11021 #ifdef TARGET_NR_ipc
11022 case TARGET_NR_ipc:
11023 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11024 #endif
11025 #ifdef TARGET_NR_semget
11026 case TARGET_NR_semget:
11027 return get_errno(semget(arg1, arg2, arg3));
11028 #endif
11029 #ifdef TARGET_NR_semop
11030 case TARGET_NR_semop:
11031 return do_semtimedop(arg1, arg2, arg3, 0, false);
11032 #endif
11033 #ifdef TARGET_NR_semtimedop
11034 case TARGET_NR_semtimedop:
11035 return do_semtimedop(arg1, arg2, arg3, arg4, false);
11036 #endif
11037 #ifdef TARGET_NR_semtimedop_time64
11038 case TARGET_NR_semtimedop_time64:
11039 return do_semtimedop(arg1, arg2, arg3, arg4, true);
11040 #endif
11041 #ifdef TARGET_NR_semctl
11042 case TARGET_NR_semctl:
11043 return do_semctl(arg1, arg2, arg3, arg4);
11044 #endif
11045 #ifdef TARGET_NR_msgctl
11046 case TARGET_NR_msgctl:
11047 return do_msgctl(arg1, arg2, arg3);
11048 #endif
11049 #ifdef TARGET_NR_msgget
11050 case TARGET_NR_msgget:
11051 return get_errno(msgget(arg1, arg2));
11052 #endif
11053 #ifdef TARGET_NR_msgrcv
11054 case TARGET_NR_msgrcv:
11055 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11056 #endif
11057 #ifdef TARGET_NR_msgsnd
11058 case TARGET_NR_msgsnd:
11059 return do_msgsnd(arg1, arg2, arg3, arg4);
11060 #endif
11061 #ifdef TARGET_NR_shmget
11062 case TARGET_NR_shmget:
11063 return get_errno(shmget(arg1, arg2, arg3));
11064 #endif
11065 #ifdef TARGET_NR_shmctl
11066 case TARGET_NR_shmctl:
11067 return do_shmctl(arg1, arg2, arg3);
11068 #endif
11069 #ifdef TARGET_NR_shmat
11070 case TARGET_NR_shmat:
11071 return do_shmat(cpu_env, arg1, arg2, arg3);
11072 #endif
11073 #ifdef TARGET_NR_shmdt
11074 case TARGET_NR_shmdt:
11075 return do_shmdt(arg1);
11076 #endif
11077 case TARGET_NR_fsync:
11078 return get_errno(fsync(arg1));
11079 case TARGET_NR_clone:
11080 /* Linux manages to have three different orderings for its
11081 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11082 * match the kernel's CONFIG_CLONE_* settings.
11083 * Microblaze is further special in that it uses a sixth
11084 * implicit argument to clone for the TLS pointer.
11085 */
11086 #if defined(TARGET_MICROBLAZE)
11087 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11088 #elif defined(TARGET_CLONE_BACKWARDS)
11089 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11090 #elif defined(TARGET_CLONE_BACKWARDS2)
11091 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11092 #else
11093 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11094 #endif
11095 return ret;
11096 #ifdef __NR_exit_group
11097 /* new thread calls */
11098 case TARGET_NR_exit_group:
11099 preexit_cleanup(cpu_env, arg1);
11100 return get_errno(exit_group(arg1));
11101 #endif
11102 case TARGET_NR_setdomainname:
11103 if (!(p = lock_user_string(arg1)))
11104 return -TARGET_EFAULT;
11105 ret = get_errno(setdomainname(p, arg2));
11106 unlock_user(p, arg1, 0);
11107 return ret;
11108 case TARGET_NR_uname:
11109 /* no need to transcode because we use the linux syscall */
11110 {
11111 struct new_utsname * buf;
11112
11113 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11114 return -TARGET_EFAULT;
11115 ret = get_errno(sys_uname(buf));
11116 if (!is_error(ret)) {
11117 /* Overwrite the native machine name with whatever is being
11118 emulated. */
11119 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11120 sizeof(buf->machine));
11121 /* Allow the user to override the reported release. */
11122 if (qemu_uname_release && *qemu_uname_release) {
11123 g_strlcpy(buf->release, qemu_uname_release,
11124 sizeof(buf->release));
11125 }
11126 }
11127 unlock_user_struct(buf, arg1, 1);
11128 }
11129 return ret;
11130 #ifdef TARGET_I386
11131 case TARGET_NR_modify_ldt:
11132 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11133 #if !defined(TARGET_X86_64)
11134 case TARGET_NR_vm86:
11135 return do_vm86(cpu_env, arg1, arg2);
11136 #endif
11137 #endif
11138 #if defined(TARGET_NR_adjtimex)
11139 case TARGET_NR_adjtimex:
11140 {
11141 struct timex host_buf;
11142
11143 if (target_to_host_timex(&host_buf, arg1) != 0) {
11144 return -TARGET_EFAULT;
11145 }
11146 ret = get_errno(adjtimex(&host_buf));
11147 if (!is_error(ret)) {
11148 if (host_to_target_timex(arg1, &host_buf) != 0) {
11149 return -TARGET_EFAULT;
11150 }
11151 }
11152 }
11153 return ret;
11154 #endif
11155 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11156 case TARGET_NR_clock_adjtime:
11157 {
11158 struct timex htx;
11159
11160 if (target_to_host_timex(&htx, arg2) != 0) {
11161 return -TARGET_EFAULT;
11162 }
11163 ret = get_errno(clock_adjtime(arg1, &htx));
11164 if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11165 return -TARGET_EFAULT;
11166 }
11167 }
11168 return ret;
11169 #endif
11170 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11171 case TARGET_NR_clock_adjtime64:
11172 {
11173 struct timex htx;
11174
11175 if (target_to_host_timex64(&htx, arg2) != 0) {
11176 return -TARGET_EFAULT;
11177 }
11178 ret = get_errno(clock_adjtime(arg1, &htx));
11179 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11180 return -TARGET_EFAULT;
11181 }
11182 }
11183 return ret;
11184 #endif
11185 case TARGET_NR_getpgid:
11186 return get_errno(getpgid(arg1));
11187 case TARGET_NR_fchdir:
11188 return get_errno(fchdir(arg1));
11189 case TARGET_NR_personality:
11190 return get_errno(personality(arg1));
11191 #ifdef TARGET_NR__llseek /* Not on alpha */
11192 case TARGET_NR__llseek:
11193 {
11194 int64_t res;
11195 #if !defined(__NR_llseek)
11196 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11197 if (res == -1) {
11198 ret = get_errno(res);
11199 } else {
11200 ret = 0;
11201 }
11202 #else
11203 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11204 #endif
11205 if ((ret == 0) && put_user_s64(res, arg4)) {
11206 return -TARGET_EFAULT;
11207 }
11208 }
11209 return ret;
11210 #endif
11211 #ifdef TARGET_NR_getdents
11212 case TARGET_NR_getdents:
11213 return do_getdents(arg1, arg2, arg3);
11214 #endif /* TARGET_NR_getdents */
11215 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11216 case TARGET_NR_getdents64:
11217 return do_getdents64(arg1, arg2, arg3);
11218 #endif /* TARGET_NR_getdents64 */
11219 #if defined(TARGET_NR__newselect)
11220 case TARGET_NR__newselect:
11221 return do_select(arg1, arg2, arg3, arg4, arg5);
11222 #endif
11223 #ifdef TARGET_NR_poll
11224 case TARGET_NR_poll:
11225 return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11226 #endif
11227 #ifdef TARGET_NR_ppoll
11228 case TARGET_NR_ppoll:
11229 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11230 #endif
11231 #ifdef TARGET_NR_ppoll_time64
11232 case TARGET_NR_ppoll_time64:
11233 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11234 #endif
11235 case TARGET_NR_flock:
11236 /* NOTE: the flock constant seems to be the same for every
11237 Linux platform */
11238 return get_errno(safe_flock(arg1, arg2));
11239 case TARGET_NR_readv:
11240 {
11241 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11242 if (vec != NULL) {
11243 ret = get_errno(safe_readv(arg1, vec, arg3));
11244 unlock_iovec(vec, arg2, arg3, 1);
11245 } else {
11246 ret = -host_to_target_errno(errno);
11247 }
11248 }
11249 return ret;
11250 case TARGET_NR_writev:
11251 {
11252 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11253 if (vec != NULL) {
11254 ret = get_errno(safe_writev(arg1, vec, arg3));
11255 unlock_iovec(vec, arg2, arg3, 0);
11256 } else {
11257 ret = -host_to_target_errno(errno);
11258 }
11259 }
11260 return ret;
11261 #if defined(TARGET_NR_preadv)
11262 case TARGET_NR_preadv:
11263 {
11264 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11265 if (vec != NULL) {
11266 unsigned long low, high;
11267
11268 target_to_host_low_high(arg4, arg5, &low, &high);
11269 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11270 unlock_iovec(vec, arg2, arg3, 1);
11271 } else {
11272 ret = -host_to_target_errno(errno);
11273 }
11274 }
11275 return ret;
11276 #endif
11277 #if defined(TARGET_NR_pwritev)
11278 case TARGET_NR_pwritev:
11279 {
11280 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11281 if (vec != NULL) {
11282 unsigned long low, high;
11283
11284 target_to_host_low_high(arg4, arg5, &low, &high);
11285 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11286 unlock_iovec(vec, arg2, arg3, 0);
11287 } else {
11288 ret = -host_to_target_errno(errno);
11289 }
11290 }
11291 return ret;
11292 #endif
11293 case TARGET_NR_getsid:
11294 return get_errno(getsid(arg1));
11295 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11296 case TARGET_NR_fdatasync:
11297 return get_errno(fdatasync(arg1));
11298 #endif
11299 case TARGET_NR_sched_getaffinity:
11300 {
11301 unsigned int mask_size;
11302 unsigned long *mask;
11303
11304 /*
11305 * sched_getaffinity needs multiples of ulong, so need to take
11306 * care of mismatches between target ulong and host ulong sizes.
11307 */
11308 if (arg2 & (sizeof(abi_ulong) - 1)) {
11309 return -TARGET_EINVAL;
11310 }
11311 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11312
11313 mask = alloca(mask_size);
11314 memset(mask, 0, mask_size);
11315 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11316
11317 if (!is_error(ret)) {
11318 if (ret > arg2) {
11319 /* More data returned than the caller's buffer will fit.
11320 * This only happens if sizeof(abi_long) < sizeof(long)
11321 * and the caller passed us a buffer holding an odd number
11322 * of abi_longs. If the host kernel is actually using the
11323 * extra 4 bytes then fail EINVAL; otherwise we can just
11324 * ignore them and only copy the interesting part.
11325 */
11326 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11327 if (numcpus > arg2 * 8) {
11328 return -TARGET_EINVAL;
11329 }
11330 ret = arg2;
11331 }
11332
11333 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11334 return -TARGET_EFAULT;
11335 }
11336 }
11337 }
11338 return ret;
11339 case TARGET_NR_sched_setaffinity:
11340 {
11341 unsigned int mask_size;
11342 unsigned long *mask;
11343
11344 /*
11345 * sched_setaffinity needs multiples of ulong, so need to take
11346 * care of mismatches between target ulong and host ulong sizes.
11347 */
11348 if (arg2 & (sizeof(abi_ulong) - 1)) {
11349 return -TARGET_EINVAL;
11350 }
11351 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11352 mask = alloca(mask_size);
11353
11354 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11355 if (ret) {
11356 return ret;
11357 }
11358
11359 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11360 }
11361 case TARGET_NR_getcpu:
11362 {
11363 unsigned cpu, node;
11364 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
11365 arg2 ? &node : NULL,
11366 NULL));
11367 if (is_error(ret)) {
11368 return ret;
11369 }
11370 if (arg1 && put_user_u32(cpu, arg1)) {
11371 return -TARGET_EFAULT;
11372 }
11373 if (arg2 && put_user_u32(node, arg2)) {
11374 return -TARGET_EFAULT;
11375 }
11376 }
11377 return ret;
11378 case TARGET_NR_sched_setparam:
11379 {
11380 struct target_sched_param *target_schp;
11381 struct sched_param schp;
11382
11383 if (arg2 == 0) {
11384 return -TARGET_EINVAL;
11385 }
11386 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11387 return -TARGET_EFAULT;
11388 }
11389 schp.sched_priority = tswap32(target_schp->sched_priority);
11390 unlock_user_struct(target_schp, arg2, 0);
11391 return get_errno(sys_sched_setparam(arg1, &schp));
11392 }
11393 case TARGET_NR_sched_getparam:
11394 {
11395 struct target_sched_param *target_schp;
11396 struct sched_param schp;
11397
11398 if (arg2 == 0) {
11399 return -TARGET_EINVAL;
11400 }
11401 ret = get_errno(sys_sched_getparam(arg1, &schp));
11402 if (!is_error(ret)) {
11403 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11404 return -TARGET_EFAULT;
11405 }
11406 target_schp->sched_priority = tswap32(schp.sched_priority);
11407 unlock_user_struct(target_schp, arg2, 1);
11408 }
11409 }
11410 return ret;
11411 case TARGET_NR_sched_setscheduler:
11412 {
11413 struct target_sched_param *target_schp;
11414 struct sched_param schp;
11415 if (arg3 == 0) {
11416 return -TARGET_EINVAL;
11417 }
11418 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11419 return -TARGET_EFAULT;
11420 }
11421 schp.sched_priority = tswap32(target_schp->sched_priority);
11422 unlock_user_struct(target_schp, arg3, 0);
11423 return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11424 }
11425 case TARGET_NR_sched_getscheduler:
11426 return get_errno(sys_sched_getscheduler(arg1));
11427 case TARGET_NR_sched_getattr:
11428 {
11429 struct target_sched_attr *target_scha;
11430 struct sched_attr scha;
11431 if (arg2 == 0) {
11432 return -TARGET_EINVAL;
11433 }
11434 if (arg3 > sizeof(scha)) {
11435 arg3 = sizeof(scha);
11436 }
11437 ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11438 if (!is_error(ret)) {
11439 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11440 if (!target_scha) {
11441 return -TARGET_EFAULT;
11442 }
11443 target_scha->size = tswap32(scha.size);
11444 target_scha->sched_policy = tswap32(scha.sched_policy);
11445 target_scha->sched_flags = tswap64(scha.sched_flags);
11446 target_scha->sched_nice = tswap32(scha.sched_nice);
11447 target_scha->sched_priority = tswap32(scha.sched_priority);
11448 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11449 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11450 target_scha->sched_period = tswap64(scha.sched_period);
11451 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11452 target_scha->sched_util_min = tswap32(scha.sched_util_min);
11453 target_scha->sched_util_max = tswap32(scha.sched_util_max);
11454 }
11455 unlock_user(target_scha, arg2, arg3);
11456 }
11457 return ret;
11458 }
11459 case TARGET_NR_sched_setattr:
11460 {
11461 struct target_sched_attr *target_scha;
11462 struct sched_attr scha;
11463 uint32_t size;
11464 int zeroed;
11465 if (arg2 == 0) {
11466 return -TARGET_EINVAL;
11467 }
11468 if (get_user_u32(size, arg2)) {
11469 return -TARGET_EFAULT;
11470 }
11471 if (!size) {
11472 size = offsetof(struct target_sched_attr, sched_util_min);
11473 }
11474 if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11475 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11476 return -TARGET_EFAULT;
11477 }
11478 return -TARGET_E2BIG;
11479 }
11480
11481 zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11482 if (zeroed < 0) {
11483 return zeroed;
11484 } else if (zeroed == 0) {
11485 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11486 return -TARGET_EFAULT;
11487 }
11488 return -TARGET_E2BIG;
11489 }
11490 if (size > sizeof(struct target_sched_attr)) {
11491 size = sizeof(struct target_sched_attr);
11492 }
11493
11494 target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11495 if (!target_scha) {
11496 return -TARGET_EFAULT;
11497 }
11498 scha.size = size;
11499 scha.sched_policy = tswap32(target_scha->sched_policy);
11500 scha.sched_flags = tswap64(target_scha->sched_flags);
11501 scha.sched_nice = tswap32(target_scha->sched_nice);
11502 scha.sched_priority = tswap32(target_scha->sched_priority);
11503 scha.sched_runtime = tswap64(target_scha->sched_runtime);
11504 scha.sched_deadline = tswap64(target_scha->sched_deadline);
11505 scha.sched_period = tswap64(target_scha->sched_period);
11506 if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11507 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11508 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11509 }
11510 unlock_user(target_scha, arg2, 0);
11511 return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11512 }
11513 case TARGET_NR_sched_yield:
11514 return get_errno(sched_yield());
11515 case TARGET_NR_sched_get_priority_max:
11516 return get_errno(sched_get_priority_max(arg1));
11517 case TARGET_NR_sched_get_priority_min:
11518 return get_errno(sched_get_priority_min(arg1));
11519 #ifdef TARGET_NR_sched_rr_get_interval
11520 case TARGET_NR_sched_rr_get_interval:
11521 {
11522 struct timespec ts;
11523 ret = get_errno(sched_rr_get_interval(arg1, &ts));
11524 if (!is_error(ret)) {
11525 ret = host_to_target_timespec(arg2, &ts);
11526 }
11527 }
11528 return ret;
11529 #endif
11530 #ifdef TARGET_NR_sched_rr_get_interval_time64
11531 case TARGET_NR_sched_rr_get_interval_time64:
11532 {
11533 struct timespec ts;
11534 ret = get_errno(sched_rr_get_interval(arg1, &ts));
11535 if (!is_error(ret)) {
11536 ret = host_to_target_timespec64(arg2, &ts);
11537 }
11538 }
11539 return ret;
11540 #endif
11541 #if defined(TARGET_NR_nanosleep)
11542 case TARGET_NR_nanosleep:
11543 {
11544 struct timespec req, rem;
11545 target_to_host_timespec(&req, arg1);
11546 ret = get_errno(safe_nanosleep(&req, &rem));
11547 if (is_error(ret) && arg2) {
11548 host_to_target_timespec(arg2, &rem);
11549 }
11550 }
11551 return ret;
11552 #endif
11553 case TARGET_NR_prctl:
11554 return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11555 break;
11556 #ifdef TARGET_NR_arch_prctl
11557 case TARGET_NR_arch_prctl:
11558 return do_arch_prctl(cpu_env, arg1, arg2);
11559 #endif
11560 #ifdef TARGET_NR_pread64
11561 case TARGET_NR_pread64:
11562 if (regpairs_aligned(cpu_env, num)) {
11563 arg4 = arg5;
11564 arg5 = arg6;
11565 }
11566 if (arg2 == 0 && arg3 == 0) {
11567 /* Special-case NULL buffer and zero length, which should succeed */
11568 p = 0;
11569 } else {
11570 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11571 if (!p) {
11572 return -TARGET_EFAULT;
11573 }
11574 }
11575 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11576 unlock_user(p, arg2, ret);
11577 return ret;
11578 case TARGET_NR_pwrite64:
11579 if (regpairs_aligned(cpu_env, num)) {
11580 arg4 = arg5;
11581 arg5 = arg6;
11582 }
11583 if (arg2 == 0 && arg3 == 0) {
11584 /* Special-case NULL buffer and zero length, which should succeed */
11585 p = 0;
11586 } else {
11587 p = lock_user(VERIFY_READ, arg2, arg3, 1);
11588 if (!p) {
11589 return -TARGET_EFAULT;
11590 }
11591 }
11592 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11593 unlock_user(p, arg2, 0);
11594 return ret;
11595 #endif
11596 case TARGET_NR_getcwd:
11597 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11598 return -TARGET_EFAULT;
11599 ret = get_errno(sys_getcwd1(p, arg2));
11600 unlock_user(p, arg1, ret);
11601 return ret;
11602 case TARGET_NR_capget:
11603 case TARGET_NR_capset:
11604 {
11605 struct target_user_cap_header *target_header;
11606 struct target_user_cap_data *target_data = NULL;
11607 struct __user_cap_header_struct header;
11608 struct __user_cap_data_struct data[2];
11609 struct __user_cap_data_struct *dataptr = NULL;
11610 int i, target_datalen;
11611 int data_items = 1;
11612
11613 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11614 return -TARGET_EFAULT;
11615 }
11616 header.version = tswap32(target_header->version);
11617 header.pid = tswap32(target_header->pid);
11618
11619 if (header.version != _LINUX_CAPABILITY_VERSION) {
11620 /* Version 2 and up takes pointer to two user_data structs */
11621 data_items = 2;
11622 }
11623
11624 target_datalen = sizeof(*target_data) * data_items;
11625
11626 if (arg2) {
11627 if (num == TARGET_NR_capget) {
11628 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11629 } else {
11630 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11631 }
11632 if (!target_data) {
11633 unlock_user_struct(target_header, arg1, 0);
11634 return -TARGET_EFAULT;
11635 }
11636
11637 if (num == TARGET_NR_capset) {
11638 for (i = 0; i < data_items; i++) {
11639 data[i].effective = tswap32(target_data[i].effective);
11640 data[i].permitted = tswap32(target_data[i].permitted);
11641 data[i].inheritable = tswap32(target_data[i].inheritable);
11642 }
11643 }
11644
11645 dataptr = data;
11646 }
11647
11648 if (num == TARGET_NR_capget) {
11649 ret = get_errno(capget(&header, dataptr));
11650 } else {
11651 ret = get_errno(capset(&header, dataptr));
11652 }
11653
11654 /* The kernel always updates version for both capget and capset */
11655 target_header->version = tswap32(header.version);
11656 unlock_user_struct(target_header, arg1, 1);
11657
11658 if (arg2) {
11659 if (num == TARGET_NR_capget) {
11660 for (i = 0; i < data_items; i++) {
11661 target_data[i].effective = tswap32(data[i].effective);
11662 target_data[i].permitted = tswap32(data[i].permitted);
11663 target_data[i].inheritable = tswap32(data[i].inheritable);
11664 }
11665 unlock_user(target_data, arg2, target_datalen);
11666 } else {
11667 unlock_user(target_data, arg2, 0);
11668 }
11669 }
11670 return ret;
11671 }
11672 case TARGET_NR_sigaltstack:
11673 return do_sigaltstack(arg1, arg2, cpu_env);
11674
11675 #ifdef CONFIG_SENDFILE
11676 #ifdef TARGET_NR_sendfile
11677 case TARGET_NR_sendfile:
11678 {
11679 off_t *offp = NULL;
11680 off_t off;
11681 if (arg3) {
11682 ret = get_user_sal(off, arg3);
11683 if (is_error(ret)) {
11684 return ret;
11685 }
11686 offp = &off;
11687 }
11688 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11689 if (!is_error(ret) && arg3) {
11690 abi_long ret2 = put_user_sal(off, arg3);
11691 if (is_error(ret2)) {
11692 ret = ret2;
11693 }
11694 }
11695 return ret;
11696 }
11697 #endif
11698 #ifdef TARGET_NR_sendfile64
11699 case TARGET_NR_sendfile64:
11700 {
11701 off_t *offp = NULL;
11702 off_t off;
11703 if (arg3) {
11704 ret = get_user_s64(off, arg3);
11705 if (is_error(ret)) {
11706 return ret;
11707 }
11708 offp = &off;
11709 }
11710 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11711 if (!is_error(ret) && arg3) {
11712 abi_long ret2 = put_user_s64(off, arg3);
11713 if (is_error(ret2)) {
11714 ret = ret2;
11715 }
11716 }
11717 return ret;
11718 }
11719 #endif
11720 #endif
11721 #ifdef TARGET_NR_vfork
11722 case TARGET_NR_vfork:
11723 return get_errno(do_fork(cpu_env,
11724 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11725 0, 0, 0, 0));
11726 #endif
11727 #ifdef TARGET_NR_ugetrlimit
11728 case TARGET_NR_ugetrlimit:
11729 {
11730 struct rlimit rlim;
11731 int resource = target_to_host_resource(arg1);
11732 ret = get_errno(getrlimit(resource, &rlim));
11733 if (!is_error(ret)) {
11734 struct target_rlimit *target_rlim;
11735 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11736 return -TARGET_EFAULT;
11737 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11738 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11739 unlock_user_struct(target_rlim, arg2, 1);
11740 }
11741 return ret;
11742 }
11743 #endif
11744 #ifdef TARGET_NR_truncate64
11745 case TARGET_NR_truncate64:
11746 if (!(p = lock_user_string(arg1)))
11747 return -TARGET_EFAULT;
11748 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11749 unlock_user(p, arg1, 0);
11750 return ret;
11751 #endif
11752 #ifdef TARGET_NR_ftruncate64
11753 case TARGET_NR_ftruncate64:
11754 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11755 #endif
11756 #ifdef TARGET_NR_stat64
11757 case TARGET_NR_stat64:
11758 if (!(p = lock_user_string(arg1))) {
11759 return -TARGET_EFAULT;
11760 }
11761 ret = get_errno(stat(path(p), &st));
11762 unlock_user(p, arg1, 0);
11763 if (!is_error(ret))
11764 ret = host_to_target_stat64(cpu_env, arg2, &st);
11765 return ret;
11766 #endif
11767 #ifdef TARGET_NR_lstat64
11768 case TARGET_NR_lstat64:
11769 if (!(p = lock_user_string(arg1))) {
11770 return -TARGET_EFAULT;
11771 }
11772 ret = get_errno(lstat(path(p), &st));
11773 unlock_user(p, arg1, 0);
11774 if (!is_error(ret))
11775 ret = host_to_target_stat64(cpu_env, arg2, &st);
11776 return ret;
11777 #endif
11778 #ifdef TARGET_NR_fstat64
11779 case TARGET_NR_fstat64:
11780 ret = get_errno(fstat(arg1, &st));
11781 if (!is_error(ret))
11782 ret = host_to_target_stat64(cpu_env, arg2, &st);
11783 return ret;
11784 #endif
11785 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11786 #ifdef TARGET_NR_fstatat64
11787 case TARGET_NR_fstatat64:
11788 #endif
11789 #ifdef TARGET_NR_newfstatat
11790 case TARGET_NR_newfstatat:
11791 #endif
11792 if (!(p = lock_user_string(arg2))) {
11793 return -TARGET_EFAULT;
11794 }
11795 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11796 unlock_user(p, arg2, 0);
11797 if (!is_error(ret))
11798 ret = host_to_target_stat64(cpu_env, arg3, &st);
11799 return ret;
11800 #endif
11801 #if defined(TARGET_NR_statx)
11802 case TARGET_NR_statx:
11803 {
11804 struct target_statx *target_stx;
11805 int dirfd = arg1;
11806 int flags = arg3;
11807
11808 p = lock_user_string(arg2);
11809 if (p == NULL) {
11810 return -TARGET_EFAULT;
11811 }
11812 #if defined(__NR_statx)
11813 {
11814 /*
11815 * It is assumed that struct statx is architecture independent.
11816 */
11817 struct target_statx host_stx;
11818 int mask = arg4;
11819
11820 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11821 if (!is_error(ret)) {
11822 if (host_to_target_statx(&host_stx, arg5) != 0) {
11823 unlock_user(p, arg2, 0);
11824 return -TARGET_EFAULT;
11825 }
11826 }
11827
11828 if (ret != -TARGET_ENOSYS) {
11829 unlock_user(p, arg2, 0);
11830 return ret;
11831 }
11832 }
11833 #endif
11834 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11835 unlock_user(p, arg2, 0);
11836
11837 if (!is_error(ret)) {
11838 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11839 return -TARGET_EFAULT;
11840 }
11841 memset(target_stx, 0, sizeof(*target_stx));
11842 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11843 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11844 __put_user(st.st_ino, &target_stx->stx_ino);
11845 __put_user(st.st_mode, &target_stx->stx_mode);
11846 __put_user(st.st_uid, &target_stx->stx_uid);
11847 __put_user(st.st_gid, &target_stx->stx_gid);
11848 __put_user(st.st_nlink, &target_stx->stx_nlink);
11849 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11850 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11851 __put_user(st.st_size, &target_stx->stx_size);
11852 __put_user(st.st_blksize, &target_stx->stx_blksize);
11853 __put_user(st.st_blocks, &target_stx->stx_blocks);
11854 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11855 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11856 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11857 unlock_user_struct(target_stx, arg5, 1);
11858 }
11859 }
11860 return ret;
11861 #endif
11862 #ifdef TARGET_NR_lchown
11863 case TARGET_NR_lchown:
11864 if (!(p = lock_user_string(arg1)))
11865 return -TARGET_EFAULT;
11866 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11867 unlock_user(p, arg1, 0);
11868 return ret;
11869 #endif
11870 #ifdef TARGET_NR_getuid
11871 case TARGET_NR_getuid:
11872 return get_errno(high2lowuid(getuid()));
11873 #endif
11874 #ifdef TARGET_NR_getgid
11875 case TARGET_NR_getgid:
11876 return get_errno(high2lowgid(getgid()));
11877 #endif
11878 #ifdef TARGET_NR_geteuid
11879 case TARGET_NR_geteuid:
11880 return get_errno(high2lowuid(geteuid()));
11881 #endif
11882 #ifdef TARGET_NR_getegid
11883 case TARGET_NR_getegid:
11884 return get_errno(high2lowgid(getegid()));
11885 #endif
11886 case TARGET_NR_setreuid:
11887 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11888 case TARGET_NR_setregid:
11889 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11890 case TARGET_NR_getgroups:
11891 { /* the same code as for TARGET_NR_getgroups32 */
11892 int gidsetsize = arg1;
11893 target_id *target_grouplist;
11894 g_autofree gid_t *grouplist = NULL;
11895 int i;
11896
11897 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11898 return -TARGET_EINVAL;
11899 }
11900 if (gidsetsize > 0) {
11901 grouplist = g_try_new(gid_t, gidsetsize);
11902 if (!grouplist) {
11903 return -TARGET_ENOMEM;
11904 }
11905 }
11906 ret = get_errno(getgroups(gidsetsize, grouplist));
11907 if (!is_error(ret) && gidsetsize > 0) {
11908 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11909 gidsetsize * sizeof(target_id), 0);
11910 if (!target_grouplist) {
11911 return -TARGET_EFAULT;
11912 }
11913 for (i = 0; i < ret; i++) {
11914 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11915 }
11916 unlock_user(target_grouplist, arg2,
11917 gidsetsize * sizeof(target_id));
11918 }
11919 return ret;
11920 }
11921 case TARGET_NR_setgroups:
11922 { /* the same code as for TARGET_NR_setgroups32 */
11923 int gidsetsize = arg1;
11924 target_id *target_grouplist;
11925 g_autofree gid_t *grouplist = NULL;
11926 int i;
11927
11928 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11929 return -TARGET_EINVAL;
11930 }
11931 if (gidsetsize > 0) {
11932 grouplist = g_try_new(gid_t, gidsetsize);
11933 if (!grouplist) {
11934 return -TARGET_ENOMEM;
11935 }
11936 target_grouplist = lock_user(VERIFY_READ, arg2,
11937 gidsetsize * sizeof(target_id), 1);
11938 if (!target_grouplist) {
11939 return -TARGET_EFAULT;
11940 }
11941 for (i = 0; i < gidsetsize; i++) {
11942 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11943 }
11944 unlock_user(target_grouplist, arg2,
11945 gidsetsize * sizeof(target_id));
11946 }
11947 return get_errno(setgroups(gidsetsize, grouplist));
11948 }
11949 case TARGET_NR_fchown:
11950 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11951 #if defined(TARGET_NR_fchownat)
11952 case TARGET_NR_fchownat:
11953 if (!(p = lock_user_string(arg2)))
11954 return -TARGET_EFAULT;
11955 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11956 low2highgid(arg4), arg5));
11957 unlock_user(p, arg2, 0);
11958 return ret;
11959 #endif
11960 #ifdef TARGET_NR_setresuid
11961 case TARGET_NR_setresuid:
11962 return get_errno(sys_setresuid(low2highuid(arg1),
11963 low2highuid(arg2),
11964 low2highuid(arg3)));
11965 #endif
11966 #ifdef TARGET_NR_getresuid
11967 case TARGET_NR_getresuid:
11968 {
11969 uid_t ruid, euid, suid;
11970 ret = get_errno(getresuid(&ruid, &euid, &suid));
11971 if (!is_error(ret)) {
11972 if (put_user_id(high2lowuid(ruid), arg1)
11973 || put_user_id(high2lowuid(euid), arg2)
11974 || put_user_id(high2lowuid(suid), arg3))
11975 return -TARGET_EFAULT;
11976 }
11977 }
11978 return ret;
11979 #endif
11980 #ifdef TARGET_NR_getresgid
11981 case TARGET_NR_setresgid:
11982 return get_errno(sys_setresgid(low2highgid(arg1),
11983 low2highgid(arg2),
11984 low2highgid(arg3)));
11985 #endif
11986 #ifdef TARGET_NR_getresgid
11987 case TARGET_NR_getresgid:
11988 {
11989 gid_t rgid, egid, sgid;
11990 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11991 if (!is_error(ret)) {
11992 if (put_user_id(high2lowgid(rgid), arg1)
11993 || put_user_id(high2lowgid(egid), arg2)
11994 || put_user_id(high2lowgid(sgid), arg3))
11995 return -TARGET_EFAULT;
11996 }
11997 }
11998 return ret;
11999 #endif
12000 #ifdef TARGET_NR_chown
12001 case TARGET_NR_chown:
12002 if (!(p = lock_user_string(arg1)))
12003 return -TARGET_EFAULT;
12004 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12005 unlock_user(p, arg1, 0);
12006 return ret;
12007 #endif
12008 case TARGET_NR_setuid:
12009 return get_errno(sys_setuid(low2highuid(arg1)));
12010 case TARGET_NR_setgid:
12011 return get_errno(sys_setgid(low2highgid(arg1)));
12012 case TARGET_NR_setfsuid:
12013 return get_errno(setfsuid(arg1));
12014 case TARGET_NR_setfsgid:
12015 return get_errno(setfsgid(arg1));
12016
12017 #ifdef TARGET_NR_lchown32
12018 case TARGET_NR_lchown32:
12019 if (!(p = lock_user_string(arg1)))
12020 return -TARGET_EFAULT;
12021 ret = get_errno(lchown(p, arg2, arg3));
12022 unlock_user(p, arg1, 0);
12023 return ret;
12024 #endif
12025 #ifdef TARGET_NR_getuid32
12026 case TARGET_NR_getuid32:
12027 return get_errno(getuid());
12028 #endif
12029
12030 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12031 /* Alpha specific */
12032 case TARGET_NR_getxuid:
12033 {
12034 uid_t euid;
12035 euid=geteuid();
12036 cpu_env->ir[IR_A4]=euid;
12037 }
12038 return get_errno(getuid());
12039 #endif
12040 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12041 /* Alpha specific */
12042 case TARGET_NR_getxgid:
12043 {
12044 uid_t egid;
12045 egid=getegid();
12046 cpu_env->ir[IR_A4]=egid;
12047 }
12048 return get_errno(getgid());
12049 #endif
12050 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12051 /* Alpha specific */
12052 case TARGET_NR_osf_getsysinfo:
12053 ret = -TARGET_EOPNOTSUPP;
12054 switch (arg1) {
12055 case TARGET_GSI_IEEE_FP_CONTROL:
12056 {
12057 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12058 uint64_t swcr = cpu_env->swcr;
12059
12060 swcr &= ~SWCR_STATUS_MASK;
12061 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12062
12063 if (put_user_u64 (swcr, arg2))
12064 return -TARGET_EFAULT;
12065 ret = 0;
12066 }
12067 break;
12068
12069 /* case GSI_IEEE_STATE_AT_SIGNAL:
12070 -- Not implemented in linux kernel.
12071 case GSI_UACPROC:
12072 -- Retrieves current unaligned access state; not much used.
12073 case GSI_PROC_TYPE:
12074 -- Retrieves implver information; surely not used.
12075 case GSI_GET_HWRPB:
12076 -- Grabs a copy of the HWRPB; surely not used.
12077 */
12078 }
12079 return ret;
12080 #endif
12081 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12082 /* Alpha specific */
12083 case TARGET_NR_osf_setsysinfo:
12084 ret = -TARGET_EOPNOTSUPP;
12085 switch (arg1) {
12086 case TARGET_SSI_IEEE_FP_CONTROL:
12087 {
12088 uint64_t swcr, fpcr;
12089
12090 if (get_user_u64 (swcr, arg2)) {
12091 return -TARGET_EFAULT;
12092 }
12093
12094 /*
12095 * The kernel calls swcr_update_status to update the
12096 * status bits from the fpcr at every point that it
12097 * could be queried. Therefore, we store the status
12098 * bits only in FPCR.
12099 */
12100 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12101
12102 fpcr = cpu_alpha_load_fpcr(cpu_env);
12103 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12104 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12105 cpu_alpha_store_fpcr(cpu_env, fpcr);
12106 ret = 0;
12107 }
12108 break;
12109
12110 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12111 {
12112 uint64_t exc, fpcr, fex;
12113
12114 if (get_user_u64(exc, arg2)) {
12115 return -TARGET_EFAULT;
12116 }
12117 exc &= SWCR_STATUS_MASK;
12118 fpcr = cpu_alpha_load_fpcr(cpu_env);
12119
12120 /* Old exceptions are not signaled. */
12121 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12122 fex = exc & ~fex;
12123 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12124 fex &= (cpu_env)->swcr;
12125
12126 /* Update the hardware fpcr. */
12127 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12128 cpu_alpha_store_fpcr(cpu_env, fpcr);
12129
12130 if (fex) {
12131 int si_code = TARGET_FPE_FLTUNK;
12132 target_siginfo_t info;
12133
12134 if (fex & SWCR_TRAP_ENABLE_DNO) {
12135 si_code = TARGET_FPE_FLTUND;
12136 }
12137 if (fex & SWCR_TRAP_ENABLE_INE) {
12138 si_code = TARGET_FPE_FLTRES;
12139 }
12140 if (fex & SWCR_TRAP_ENABLE_UNF) {
12141 si_code = TARGET_FPE_FLTUND;
12142 }
12143 if (fex & SWCR_TRAP_ENABLE_OVF) {
12144 si_code = TARGET_FPE_FLTOVF;
12145 }
12146 if (fex & SWCR_TRAP_ENABLE_DZE) {
12147 si_code = TARGET_FPE_FLTDIV;
12148 }
12149 if (fex & SWCR_TRAP_ENABLE_INV) {
12150 si_code = TARGET_FPE_FLTINV;
12151 }
12152
12153 info.si_signo = SIGFPE;
12154 info.si_errno = 0;
12155 info.si_code = si_code;
12156 info._sifields._sigfault._addr = (cpu_env)->pc;
12157 queue_signal(cpu_env, info.si_signo,
12158 QEMU_SI_FAULT, &info);
12159 }
12160 ret = 0;
12161 }
12162 break;
12163
12164 /* case SSI_NVPAIRS:
12165 -- Used with SSIN_UACPROC to enable unaligned accesses.
12166 case SSI_IEEE_STATE_AT_SIGNAL:
12167 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12168 -- Not implemented in linux kernel
12169 */
12170 }
12171 return ret;
12172 #endif
12173 #ifdef TARGET_NR_osf_sigprocmask
12174 /* Alpha specific. */
12175 case TARGET_NR_osf_sigprocmask:
12176 {
12177 abi_ulong mask;
12178 int how;
12179 sigset_t set, oldset;
12180
12181 switch(arg1) {
12182 case TARGET_SIG_BLOCK:
12183 how = SIG_BLOCK;
12184 break;
12185 case TARGET_SIG_UNBLOCK:
12186 how = SIG_UNBLOCK;
12187 break;
12188 case TARGET_SIG_SETMASK:
12189 how = SIG_SETMASK;
12190 break;
12191 default:
12192 return -TARGET_EINVAL;
12193 }
12194 mask = arg2;
12195 target_to_host_old_sigset(&set, &mask);
12196 ret = do_sigprocmask(how, &set, &oldset);
12197 if (!ret) {
12198 host_to_target_old_sigset(&mask, &oldset);
12199 ret = mask;
12200 }
12201 }
12202 return ret;
12203 #endif
12204
12205 #ifdef TARGET_NR_getgid32
12206 case TARGET_NR_getgid32:
12207 return get_errno(getgid());
12208 #endif
12209 #ifdef TARGET_NR_geteuid32
12210 case TARGET_NR_geteuid32:
12211 return get_errno(geteuid());
12212 #endif
12213 #ifdef TARGET_NR_getegid32
12214 case TARGET_NR_getegid32:
12215 return get_errno(getegid());
12216 #endif
12217 #ifdef TARGET_NR_setreuid32
12218 case TARGET_NR_setreuid32:
12219 return get_errno(setreuid(arg1, arg2));
12220 #endif
12221 #ifdef TARGET_NR_setregid32
12222 case TARGET_NR_setregid32:
12223 return get_errno(setregid(arg1, arg2));
12224 #endif
12225 #ifdef TARGET_NR_getgroups32
12226 case TARGET_NR_getgroups32:
12227 { /* the same code as for TARGET_NR_getgroups */
12228 int gidsetsize = arg1;
12229 uint32_t *target_grouplist;
12230 g_autofree gid_t *grouplist = NULL;
12231 int i;
12232
12233 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12234 return -TARGET_EINVAL;
12235 }
12236 if (gidsetsize > 0) {
12237 grouplist = g_try_new(gid_t, gidsetsize);
12238 if (!grouplist) {
12239 return -TARGET_ENOMEM;
12240 }
12241 }
12242 ret = get_errno(getgroups(gidsetsize, grouplist));
12243 if (!is_error(ret) && gidsetsize > 0) {
12244 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12245 gidsetsize * 4, 0);
12246 if (!target_grouplist) {
12247 return -TARGET_EFAULT;
12248 }
12249 for (i = 0; i < ret; i++) {
12250 target_grouplist[i] = tswap32(grouplist[i]);
12251 }
12252 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12253 }
12254 return ret;
12255 }
12256 #endif
12257 #ifdef TARGET_NR_setgroups32
12258 case TARGET_NR_setgroups32:
12259 { /* the same code as for TARGET_NR_setgroups */
12260 int gidsetsize = arg1;
12261 uint32_t *target_grouplist;
12262 g_autofree gid_t *grouplist = NULL;
12263 int i;
12264
12265 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12266 return -TARGET_EINVAL;
12267 }
12268 if (gidsetsize > 0) {
12269 grouplist = g_try_new(gid_t, gidsetsize);
12270 if (!grouplist) {
12271 return -TARGET_ENOMEM;
12272 }
12273 target_grouplist = lock_user(VERIFY_READ, arg2,
12274 gidsetsize * 4, 1);
12275 if (!target_grouplist) {
12276 return -TARGET_EFAULT;
12277 }
12278 for (i = 0; i < gidsetsize; i++) {
12279 grouplist[i] = tswap32(target_grouplist[i]);
12280 }
12281 unlock_user(target_grouplist, arg2, 0);
12282 }
12283 return get_errno(setgroups(gidsetsize, grouplist));
12284 }
12285 #endif
12286 #ifdef TARGET_NR_fchown32
12287 case TARGET_NR_fchown32:
12288 return get_errno(fchown(arg1, arg2, arg3));
12289 #endif
12290 #ifdef TARGET_NR_setresuid32
12291 case TARGET_NR_setresuid32:
12292 return get_errno(sys_setresuid(arg1, arg2, arg3));
12293 #endif
12294 #ifdef TARGET_NR_getresuid32
12295 case TARGET_NR_getresuid32:
12296 {
12297 uid_t ruid, euid, suid;
12298 ret = get_errno(getresuid(&ruid, &euid, &suid));
12299 if (!is_error(ret)) {
12300 if (put_user_u32(ruid, arg1)
12301 || put_user_u32(euid, arg2)
12302 || put_user_u32(suid, arg3))
12303 return -TARGET_EFAULT;
12304 }
12305 }
12306 return ret;
12307 #endif
12308 #ifdef TARGET_NR_setresgid32
12309 case TARGET_NR_setresgid32:
12310 return get_errno(sys_setresgid(arg1, arg2, arg3));
12311 #endif
12312 #ifdef TARGET_NR_getresgid32
12313 case TARGET_NR_getresgid32:
12314 {
12315 gid_t rgid, egid, sgid;
12316 ret = get_errno(getresgid(&rgid, &egid, &sgid));
12317 if (!is_error(ret)) {
12318 if (put_user_u32(rgid, arg1)
12319 || put_user_u32(egid, arg2)
12320 || put_user_u32(sgid, arg3))
12321 return -TARGET_EFAULT;
12322 }
12323 }
12324 return ret;
12325 #endif
12326 #ifdef TARGET_NR_chown32
12327 case TARGET_NR_chown32:
12328 if (!(p = lock_user_string(arg1)))
12329 return -TARGET_EFAULT;
12330 ret = get_errno(chown(p, arg2, arg3));
12331 unlock_user(p, arg1, 0);
12332 return ret;
12333 #endif
12334 #ifdef TARGET_NR_setuid32
12335 case TARGET_NR_setuid32:
12336 return get_errno(sys_setuid(arg1));
12337 #endif
12338 #ifdef TARGET_NR_setgid32
12339 case TARGET_NR_setgid32:
12340 return get_errno(sys_setgid(arg1));
12341 #endif
12342 #ifdef TARGET_NR_setfsuid32
12343 case TARGET_NR_setfsuid32:
12344 return get_errno(setfsuid(arg1));
12345 #endif
12346 #ifdef TARGET_NR_setfsgid32
12347 case TARGET_NR_setfsgid32:
12348 return get_errno(setfsgid(arg1));
12349 #endif
12350 #ifdef TARGET_NR_mincore
12351 case TARGET_NR_mincore:
12352 {
12353 void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12354 if (!a) {
12355 return -TARGET_ENOMEM;
12356 }
12357 p = lock_user_string(arg3);
12358 if (!p) {
12359 ret = -TARGET_EFAULT;
12360 } else {
12361 ret = get_errno(mincore(a, arg2, p));
12362 unlock_user(p, arg3, ret);
12363 }
12364 unlock_user(a, arg1, 0);
12365 }
12366 return ret;
12367 #endif
12368 #ifdef TARGET_NR_arm_fadvise64_64
12369 case TARGET_NR_arm_fadvise64_64:
12370 /* arm_fadvise64_64 looks like fadvise64_64 but
12371 * with different argument order: fd, advice, offset, len
12372 * rather than the usual fd, offset, len, advice.
12373 * Note that offset and len are both 64-bit so appear as
12374 * pairs of 32-bit registers.
12375 */
12376 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12377 target_offset64(arg5, arg6), arg2);
12378 return -host_to_target_errno(ret);
12379 #endif
12380
12381 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12382
12383 #ifdef TARGET_NR_fadvise64_64
12384 case TARGET_NR_fadvise64_64:
12385 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12386 /* 6 args: fd, advice, offset (high, low), len (high, low) */
12387 ret = arg2;
12388 arg2 = arg3;
12389 arg3 = arg4;
12390 arg4 = arg5;
12391 arg5 = arg6;
12392 arg6 = ret;
12393 #else
12394 /* 6 args: fd, offset (high, low), len (high, low), advice */
12395 if (regpairs_aligned(cpu_env, num)) {
12396 /* offset is in (3,4), len in (5,6) and advice in 7 */
12397 arg2 = arg3;
12398 arg3 = arg4;
12399 arg4 = arg5;
12400 arg5 = arg6;
12401 arg6 = arg7;
12402 }
12403 #endif
12404 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12405 target_offset64(arg4, arg5), arg6);
12406 return -host_to_target_errno(ret);
12407 #endif
12408
12409 #ifdef TARGET_NR_fadvise64
12410 case TARGET_NR_fadvise64:
12411 /* 5 args: fd, offset (high, low), len, advice */
12412 if (regpairs_aligned(cpu_env, num)) {
12413 /* offset is in (3,4), len in 5 and advice in 6 */
12414 arg2 = arg3;
12415 arg3 = arg4;
12416 arg4 = arg5;
12417 arg5 = arg6;
12418 }
12419 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12420 return -host_to_target_errno(ret);
12421 #endif
12422
12423 #else /* not a 32-bit ABI */
12424 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12425 #ifdef TARGET_NR_fadvise64_64
12426 case TARGET_NR_fadvise64_64:
12427 #endif
12428 #ifdef TARGET_NR_fadvise64
12429 case TARGET_NR_fadvise64:
12430 #endif
12431 #ifdef TARGET_S390X
12432 switch (arg4) {
12433 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12434 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12435 case 6: arg4 = POSIX_FADV_DONTNEED; break;
12436 case 7: arg4 = POSIX_FADV_NOREUSE; break;
12437 default: break;
12438 }
12439 #endif
12440 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12441 #endif
12442 #endif /* end of 64-bit ABI fadvise handling */
12443
12444 #ifdef TARGET_NR_madvise
12445 case TARGET_NR_madvise:
12446 return target_madvise(arg1, arg2, arg3);
12447 #endif
12448 #ifdef TARGET_NR_fcntl64
12449 case TARGET_NR_fcntl64:
12450 {
12451 int cmd;
12452 struct flock64 fl;
12453 from_flock64_fn *copyfrom = copy_from_user_flock64;
12454 to_flock64_fn *copyto = copy_to_user_flock64;
12455
12456 #ifdef TARGET_ARM
12457 if (!cpu_env->eabi) {
12458 copyfrom = copy_from_user_oabi_flock64;
12459 copyto = copy_to_user_oabi_flock64;
12460 }
12461 #endif
12462
12463 cmd = target_to_host_fcntl_cmd(arg2);
12464 if (cmd == -TARGET_EINVAL) {
12465 return cmd;
12466 }
12467
12468 switch(arg2) {
12469 case TARGET_F_GETLK64:
12470 ret = copyfrom(&fl, arg3);
12471 if (ret) {
12472 break;
12473 }
12474 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12475 if (ret == 0) {
12476 ret = copyto(arg3, &fl);
12477 }
12478 break;
12479
12480 case TARGET_F_SETLK64:
12481 case TARGET_F_SETLKW64:
12482 ret = copyfrom(&fl, arg3);
12483 if (ret) {
12484 break;
12485 }
12486 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12487 break;
12488 default:
12489 ret = do_fcntl(arg1, arg2, arg3);
12490 break;
12491 }
12492 return ret;
12493 }
12494 #endif
12495 #ifdef TARGET_NR_cacheflush
12496 case TARGET_NR_cacheflush:
12497 /* self-modifying code is handled automatically, so nothing needed */
12498 return 0;
12499 #endif
12500 #ifdef TARGET_NR_getpagesize
12501 case TARGET_NR_getpagesize:
12502 return TARGET_PAGE_SIZE;
12503 #endif
12504 case TARGET_NR_gettid:
12505 return get_errno(sys_gettid());
12506 #ifdef TARGET_NR_readahead
12507 case TARGET_NR_readahead:
12508 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12509 if (regpairs_aligned(cpu_env, num)) {
12510 arg2 = arg3;
12511 arg3 = arg4;
12512 arg4 = arg5;
12513 }
12514 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12515 #else
12516 ret = get_errno(readahead(arg1, arg2, arg3));
12517 #endif
12518 return ret;
12519 #endif
12520 #ifdef CONFIG_ATTR
12521 #ifdef TARGET_NR_setxattr
12522 case TARGET_NR_listxattr:
12523 case TARGET_NR_llistxattr:
12524 {
12525 void *p, *b = 0;
12526 if (arg2) {
12527 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12528 if (!b) {
12529 return -TARGET_EFAULT;
12530 }
12531 }
12532 p = lock_user_string(arg1);
12533 if (p) {
12534 if (num == TARGET_NR_listxattr) {
12535 ret = get_errno(listxattr(p, b, arg3));
12536 } else {
12537 ret = get_errno(llistxattr(p, b, arg3));
12538 }
12539 } else {
12540 ret = -TARGET_EFAULT;
12541 }
12542 unlock_user(p, arg1, 0);
12543 unlock_user(b, arg2, arg3);
12544 return ret;
12545 }
12546 case TARGET_NR_flistxattr:
12547 {
12548 void *b = 0;
12549 if (arg2) {
12550 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12551 if (!b) {
12552 return -TARGET_EFAULT;
12553 }
12554 }
12555 ret = get_errno(flistxattr(arg1, b, arg3));
12556 unlock_user(b, arg2, arg3);
12557 return ret;
12558 }
12559 case TARGET_NR_setxattr:
12560 case TARGET_NR_lsetxattr:
12561 {
12562 void *p, *n, *v = 0;
12563 if (arg3) {
12564 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12565 if (!v) {
12566 return -TARGET_EFAULT;
12567 }
12568 }
12569 p = lock_user_string(arg1);
12570 n = lock_user_string(arg2);
12571 if (p && n) {
12572 if (num == TARGET_NR_setxattr) {
12573 ret = get_errno(setxattr(p, n, v, arg4, arg5));
12574 } else {
12575 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12576 }
12577 } else {
12578 ret = -TARGET_EFAULT;
12579 }
12580 unlock_user(p, arg1, 0);
12581 unlock_user(n, arg2, 0);
12582 unlock_user(v, arg3, 0);
12583 }
12584 return ret;
12585 case TARGET_NR_fsetxattr:
12586 {
12587 void *n, *v = 0;
12588 if (arg3) {
12589 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12590 if (!v) {
12591 return -TARGET_EFAULT;
12592 }
12593 }
12594 n = lock_user_string(arg2);
12595 if (n) {
12596 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12597 } else {
12598 ret = -TARGET_EFAULT;
12599 }
12600 unlock_user(n, arg2, 0);
12601 unlock_user(v, arg3, 0);
12602 }
12603 return ret;
12604 case TARGET_NR_getxattr:
12605 case TARGET_NR_lgetxattr:
12606 {
12607 void *p, *n, *v = 0;
12608 if (arg3) {
12609 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12610 if (!v) {
12611 return -TARGET_EFAULT;
12612 }
12613 }
12614 p = lock_user_string(arg1);
12615 n = lock_user_string(arg2);
12616 if (p && n) {
12617 if (num == TARGET_NR_getxattr) {
12618 ret = get_errno(getxattr(p, n, v, arg4));
12619 } else {
12620 ret = get_errno(lgetxattr(p, n, v, arg4));
12621 }
12622 } else {
12623 ret = -TARGET_EFAULT;
12624 }
12625 unlock_user(p, arg1, 0);
12626 unlock_user(n, arg2, 0);
12627 unlock_user(v, arg3, arg4);
12628 }
12629 return ret;
12630 case TARGET_NR_fgetxattr:
12631 {
12632 void *n, *v = 0;
12633 if (arg3) {
12634 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12635 if (!v) {
12636 return -TARGET_EFAULT;
12637 }
12638 }
12639 n = lock_user_string(arg2);
12640 if (n) {
12641 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12642 } else {
12643 ret = -TARGET_EFAULT;
12644 }
12645 unlock_user(n, arg2, 0);
12646 unlock_user(v, arg3, arg4);
12647 }
12648 return ret;
12649 case TARGET_NR_removexattr:
12650 case TARGET_NR_lremovexattr:
12651 {
12652 void *p, *n;
12653 p = lock_user_string(arg1);
12654 n = lock_user_string(arg2);
12655 if (p && n) {
12656 if (num == TARGET_NR_removexattr) {
12657 ret = get_errno(removexattr(p, n));
12658 } else {
12659 ret = get_errno(lremovexattr(p, n));
12660 }
12661 } else {
12662 ret = -TARGET_EFAULT;
12663 }
12664 unlock_user(p, arg1, 0);
12665 unlock_user(n, arg2, 0);
12666 }
12667 return ret;
12668 case TARGET_NR_fremovexattr:
12669 {
12670 void *n;
12671 n = lock_user_string(arg2);
12672 if (n) {
12673 ret = get_errno(fremovexattr(arg1, n));
12674 } else {
12675 ret = -TARGET_EFAULT;
12676 }
12677 unlock_user(n, arg2, 0);
12678 }
12679 return ret;
12680 #endif
12681 #endif /* CONFIG_ATTR */
12682 #ifdef TARGET_NR_set_thread_area
12683 case TARGET_NR_set_thread_area:
12684 #if defined(TARGET_MIPS)
12685 cpu_env->active_tc.CP0_UserLocal = arg1;
12686 return 0;
12687 #elif defined(TARGET_CRIS)
12688 if (arg1 & 0xff)
12689 ret = -TARGET_EINVAL;
12690 else {
12691 cpu_env->pregs[PR_PID] = arg1;
12692 ret = 0;
12693 }
12694 return ret;
12695 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12696 return do_set_thread_area(cpu_env, arg1);
12697 #elif defined(TARGET_M68K)
12698 {
12699 TaskState *ts = cpu->opaque;
12700 ts->tp_value = arg1;
12701 return 0;
12702 }
12703 #else
12704 return -TARGET_ENOSYS;
12705 #endif
12706 #endif
12707 #ifdef TARGET_NR_get_thread_area
12708 case TARGET_NR_get_thread_area:
12709 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12710 return do_get_thread_area(cpu_env, arg1);
12711 #elif defined(TARGET_M68K)
12712 {
12713 TaskState *ts = cpu->opaque;
12714 return ts->tp_value;
12715 }
12716 #else
12717 return -TARGET_ENOSYS;
12718 #endif
12719 #endif
12720 #ifdef TARGET_NR_getdomainname
12721 case TARGET_NR_getdomainname:
12722 return -TARGET_ENOSYS;
12723 #endif
12724
12725 #ifdef TARGET_NR_clock_settime
12726 case TARGET_NR_clock_settime:
12727 {
12728 struct timespec ts;
12729
12730 ret = target_to_host_timespec(&ts, arg2);
12731 if (!is_error(ret)) {
12732 ret = get_errno(clock_settime(arg1, &ts));
12733 }
12734 return ret;
12735 }
12736 #endif
12737 #ifdef TARGET_NR_clock_settime64
12738 case TARGET_NR_clock_settime64:
12739 {
12740 struct timespec ts;
12741
12742 ret = target_to_host_timespec64(&ts, arg2);
12743 if (!is_error(ret)) {
12744 ret = get_errno(clock_settime(arg1, &ts));
12745 }
12746 return ret;
12747 }
12748 #endif
12749 #ifdef TARGET_NR_clock_gettime
12750 case TARGET_NR_clock_gettime:
12751 {
12752 struct timespec ts;
12753 ret = get_errno(clock_gettime(arg1, &ts));
12754 if (!is_error(ret)) {
12755 ret = host_to_target_timespec(arg2, &ts);
12756 }
12757 return ret;
12758 }
12759 #endif
12760 #ifdef TARGET_NR_clock_gettime64
12761 case TARGET_NR_clock_gettime64:
12762 {
12763 struct timespec ts;
12764 ret = get_errno(clock_gettime(arg1, &ts));
12765 if (!is_error(ret)) {
12766 ret = host_to_target_timespec64(arg2, &ts);
12767 }
12768 return ret;
12769 }
12770 #endif
12771 #ifdef TARGET_NR_clock_getres
12772 case TARGET_NR_clock_getres:
12773 {
12774 struct timespec ts;
12775 ret = get_errno(clock_getres(arg1, &ts));
12776 if (!is_error(ret)) {
12777 host_to_target_timespec(arg2, &ts);
12778 }
12779 return ret;
12780 }
12781 #endif
12782 #ifdef TARGET_NR_clock_getres_time64
12783 case TARGET_NR_clock_getres_time64:
12784 {
12785 struct timespec ts;
12786 ret = get_errno(clock_getres(arg1, &ts));
12787 if (!is_error(ret)) {
12788 host_to_target_timespec64(arg2, &ts);
12789 }
12790 return ret;
12791 }
12792 #endif
12793 #ifdef TARGET_NR_clock_nanosleep
12794 case TARGET_NR_clock_nanosleep:
12795 {
12796 struct timespec ts;
12797 if (target_to_host_timespec(&ts, arg3)) {
12798 return -TARGET_EFAULT;
12799 }
12800 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12801 &ts, arg4 ? &ts : NULL));
12802 /*
12803 * if the call is interrupted by a signal handler, it fails
12804 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12805 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12806 */
12807 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12808 host_to_target_timespec(arg4, &ts)) {
12809 return -TARGET_EFAULT;
12810 }
12811
12812 return ret;
12813 }
12814 #endif
12815 #ifdef TARGET_NR_clock_nanosleep_time64
12816 case TARGET_NR_clock_nanosleep_time64:
12817 {
12818 struct timespec ts;
12819
12820 if (target_to_host_timespec64(&ts, arg3)) {
12821 return -TARGET_EFAULT;
12822 }
12823
12824 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12825 &ts, arg4 ? &ts : NULL));
12826
12827 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12828 host_to_target_timespec64(arg4, &ts)) {
12829 return -TARGET_EFAULT;
12830 }
12831 return ret;
12832 }
12833 #endif
12834
12835 #if defined(TARGET_NR_set_tid_address)
12836 case TARGET_NR_set_tid_address:
12837 {
12838 TaskState *ts = cpu->opaque;
12839 ts->child_tidptr = arg1;
12840 /* do not call host set_tid_address() syscall, instead return tid() */
12841 return get_errno(sys_gettid());
12842 }
12843 #endif
12844
12845 case TARGET_NR_tkill:
12846 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12847
12848 case TARGET_NR_tgkill:
12849 return get_errno(safe_tgkill((int)arg1, (int)arg2,
12850 target_to_host_signal(arg3)));
12851
12852 #ifdef TARGET_NR_set_robust_list
12853 case TARGET_NR_set_robust_list:
12854 case TARGET_NR_get_robust_list:
12855 /* The ABI for supporting robust futexes has userspace pass
12856 * the kernel a pointer to a linked list which is updated by
12857 * userspace after the syscall; the list is walked by the kernel
12858 * when the thread exits. Since the linked list in QEMU guest
12859 * memory isn't a valid linked list for the host and we have
12860 * no way to reliably intercept the thread-death event, we can't
12861 * support these. Silently return ENOSYS so that guest userspace
12862 * falls back to a non-robust futex implementation (which should
12863 * be OK except in the corner case of the guest crashing while
12864 * holding a mutex that is shared with another process via
12865 * shared memory).
12866 */
12867 return -TARGET_ENOSYS;
12868 #endif
12869
12870 #if defined(TARGET_NR_utimensat)
12871 case TARGET_NR_utimensat:
12872 {
12873 struct timespec *tsp, ts[2];
12874 if (!arg3) {
12875 tsp = NULL;
12876 } else {
12877 if (target_to_host_timespec(ts, arg3)) {
12878 return -TARGET_EFAULT;
12879 }
12880 if (target_to_host_timespec(ts + 1, arg3 +
12881 sizeof(struct target_timespec))) {
12882 return -TARGET_EFAULT;
12883 }
12884 tsp = ts;
12885 }
12886 if (!arg2)
12887 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12888 else {
12889 if (!(p = lock_user_string(arg2))) {
12890 return -TARGET_EFAULT;
12891 }
12892 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12893 unlock_user(p, arg2, 0);
12894 }
12895 }
12896 return ret;
12897 #endif
12898 #ifdef TARGET_NR_utimensat_time64
12899 case TARGET_NR_utimensat_time64:
12900 {
12901 struct timespec *tsp, ts[2];
12902 if (!arg3) {
12903 tsp = NULL;
12904 } else {
12905 if (target_to_host_timespec64(ts, arg3)) {
12906 return -TARGET_EFAULT;
12907 }
12908 if (target_to_host_timespec64(ts + 1, arg3 +
12909 sizeof(struct target__kernel_timespec))) {
12910 return -TARGET_EFAULT;
12911 }
12912 tsp = ts;
12913 }
12914 if (!arg2)
12915 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12916 else {
12917 p = lock_user_string(arg2);
12918 if (!p) {
12919 return -TARGET_EFAULT;
12920 }
12921 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12922 unlock_user(p, arg2, 0);
12923 }
12924 }
12925 return ret;
12926 #endif
12927 #ifdef TARGET_NR_futex
12928 case TARGET_NR_futex:
12929 return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12930 #endif
12931 #ifdef TARGET_NR_futex_time64
12932 case TARGET_NR_futex_time64:
12933 return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12934 #endif
12935 #ifdef CONFIG_INOTIFY
12936 #if defined(TARGET_NR_inotify_init)
12937 case TARGET_NR_inotify_init:
12938 ret = get_errno(inotify_init());
12939 if (ret >= 0) {
12940 fd_trans_register(ret, &target_inotify_trans);
12941 }
12942 return ret;
12943 #endif
12944 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12945 case TARGET_NR_inotify_init1:
12946 ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12947 fcntl_flags_tbl)));
12948 if (ret >= 0) {
12949 fd_trans_register(ret, &target_inotify_trans);
12950 }
12951 return ret;
12952 #endif
12953 #if defined(TARGET_NR_inotify_add_watch)
12954 case TARGET_NR_inotify_add_watch:
12955 p = lock_user_string(arg2);
12956 ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12957 unlock_user(p, arg2, 0);
12958 return ret;
12959 #endif
12960 #if defined(TARGET_NR_inotify_rm_watch)
12961 case TARGET_NR_inotify_rm_watch:
12962 return get_errno(inotify_rm_watch(arg1, arg2));
12963 #endif
12964 #endif
12965
12966 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12967 case TARGET_NR_mq_open:
12968 {
12969 struct mq_attr posix_mq_attr;
12970 struct mq_attr *pposix_mq_attr;
12971 int host_flags;
12972
12973 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12974 pposix_mq_attr = NULL;
12975 if (arg4) {
12976 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12977 return -TARGET_EFAULT;
12978 }
12979 pposix_mq_attr = &posix_mq_attr;
12980 }
12981 p = lock_user_string(arg1 - 1);
12982 if (!p) {
12983 return -TARGET_EFAULT;
12984 }
12985 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12986 unlock_user (p, arg1, 0);
12987 }
12988 return ret;
12989
12990 case TARGET_NR_mq_unlink:
12991 p = lock_user_string(arg1 - 1);
12992 if (!p) {
12993 return -TARGET_EFAULT;
12994 }
12995 ret = get_errno(mq_unlink(p));
12996 unlock_user (p, arg1, 0);
12997 return ret;
12998
12999 #ifdef TARGET_NR_mq_timedsend
13000 case TARGET_NR_mq_timedsend:
13001 {
13002 struct timespec ts;
13003
13004 p = lock_user (VERIFY_READ, arg2, arg3, 1);
13005 if (arg5 != 0) {
13006 if (target_to_host_timespec(&ts, arg5)) {
13007 return -TARGET_EFAULT;
13008 }
13009 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13010 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13011 return -TARGET_EFAULT;
13012 }
13013 } else {
13014 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13015 }
13016 unlock_user (p, arg2, arg3);
13017 }
13018 return ret;
13019 #endif
13020 #ifdef TARGET_NR_mq_timedsend_time64
13021 case TARGET_NR_mq_timedsend_time64:
13022 {
13023 struct timespec ts;
13024
13025 p = lock_user(VERIFY_READ, arg2, arg3, 1);
13026 if (arg5 != 0) {
13027 if (target_to_host_timespec64(&ts, arg5)) {
13028 return -TARGET_EFAULT;
13029 }
13030 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13031 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13032 return -TARGET_EFAULT;
13033 }
13034 } else {
13035 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13036 }
13037 unlock_user(p, arg2, arg3);
13038 }
13039 return ret;
13040 #endif
13041
13042 #ifdef TARGET_NR_mq_timedreceive
13043 case TARGET_NR_mq_timedreceive:
13044 {
13045 struct timespec ts;
13046 unsigned int prio;
13047
13048 p = lock_user (VERIFY_READ, arg2, arg3, 1);
13049 if (arg5 != 0) {
13050 if (target_to_host_timespec(&ts, arg5)) {
13051 return -TARGET_EFAULT;
13052 }
13053 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13054 &prio, &ts));
13055 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13056 return -TARGET_EFAULT;
13057 }
13058 } else {
13059 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13060 &prio, NULL));
13061 }
13062 unlock_user (p, arg2, arg3);
13063 if (arg4 != 0)
13064 put_user_u32(prio, arg4);
13065 }
13066 return ret;
13067 #endif
13068 #ifdef TARGET_NR_mq_timedreceive_time64
13069 case TARGET_NR_mq_timedreceive_time64:
13070 {
13071 struct timespec ts;
13072 unsigned int prio;
13073
13074 p = lock_user(VERIFY_READ, arg2, arg3, 1);
13075 if (arg5 != 0) {
13076 if (target_to_host_timespec64(&ts, arg5)) {
13077 return -TARGET_EFAULT;
13078 }
13079 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13080 &prio, &ts));
13081 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13082 return -TARGET_EFAULT;
13083 }
13084 } else {
13085 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13086 &prio, NULL));
13087 }
13088 unlock_user(p, arg2, arg3);
13089 if (arg4 != 0) {
13090 put_user_u32(prio, arg4);
13091 }
13092 }
13093 return ret;
13094 #endif
13095
13096 /* Not implemented for now... */
13097 /* case TARGET_NR_mq_notify: */
13098 /* break; */
13099
13100 case TARGET_NR_mq_getsetattr:
13101 {
13102 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13103 ret = 0;
13104 if (arg2 != 0) {
13105 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13106 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13107 &posix_mq_attr_out));
13108 } else if (arg3 != 0) {
13109 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13110 }
13111 if (ret == 0 && arg3 != 0) {
13112 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13113 }
13114 }
13115 return ret;
13116 #endif
13117
13118 #ifdef CONFIG_SPLICE
13119 #ifdef TARGET_NR_tee
13120 case TARGET_NR_tee:
13121 {
13122 ret = get_errno(tee(arg1,arg2,arg3,arg4));
13123 }
13124 return ret;
13125 #endif
13126 #ifdef TARGET_NR_splice
13127 case TARGET_NR_splice:
13128 {
13129 loff_t loff_in, loff_out;
13130 loff_t *ploff_in = NULL, *ploff_out = NULL;
13131 if (arg2) {
13132 if (get_user_u64(loff_in, arg2)) {
13133 return -TARGET_EFAULT;
13134 }
13135 ploff_in = &loff_in;
13136 }
13137 if (arg4) {
13138 if (get_user_u64(loff_out, arg4)) {
13139 return -TARGET_EFAULT;
13140 }
13141 ploff_out = &loff_out;
13142 }
13143 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13144 if (arg2) {
13145 if (put_user_u64(loff_in, arg2)) {
13146 return -TARGET_EFAULT;
13147 }
13148 }
13149 if (arg4) {
13150 if (put_user_u64(loff_out, arg4)) {
13151 return -TARGET_EFAULT;
13152 }
13153 }
13154 }
13155 return ret;
13156 #endif
13157 #ifdef TARGET_NR_vmsplice
13158 case TARGET_NR_vmsplice:
13159 {
13160 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13161 if (vec != NULL) {
13162 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13163 unlock_iovec(vec, arg2, arg3, 0);
13164 } else {
13165 ret = -host_to_target_errno(errno);
13166 }
13167 }
13168 return ret;
13169 #endif
13170 #endif /* CONFIG_SPLICE */
13171 #ifdef CONFIG_EVENTFD
13172 #if defined(TARGET_NR_eventfd)
13173 case TARGET_NR_eventfd:
13174 ret = get_errno(eventfd(arg1, 0));
13175 if (ret >= 0) {
13176 fd_trans_register(ret, &target_eventfd_trans);
13177 }
13178 return ret;
13179 #endif
13180 #if defined(TARGET_NR_eventfd2)
13181 case TARGET_NR_eventfd2:
13182 {
13183 int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13184 if (arg2 & TARGET_O_NONBLOCK) {
13185 host_flags |= O_NONBLOCK;
13186 }
13187 if (arg2 & TARGET_O_CLOEXEC) {
13188 host_flags |= O_CLOEXEC;
13189 }
13190 ret = get_errno(eventfd(arg1, host_flags));
13191 if (ret >= 0) {
13192 fd_trans_register(ret, &target_eventfd_trans);
13193 }
13194 return ret;
13195 }
13196 #endif
13197 #endif /* CONFIG_EVENTFD */
13198 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13199 case TARGET_NR_fallocate:
13200 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13201 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13202 target_offset64(arg5, arg6)));
13203 #else
13204 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13205 #endif
13206 return ret;
13207 #endif
13208 #if defined(CONFIG_SYNC_FILE_RANGE)
13209 #if defined(TARGET_NR_sync_file_range)
13210 case TARGET_NR_sync_file_range:
13211 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13212 #if defined(TARGET_MIPS)
13213 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13214 target_offset64(arg5, arg6), arg7));
13215 #else
13216 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13217 target_offset64(arg4, arg5), arg6));
13218 #endif /* !TARGET_MIPS */
13219 #else
13220 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13221 #endif
13222 return ret;
13223 #endif
13224 #if defined(TARGET_NR_sync_file_range2) || \
13225 defined(TARGET_NR_arm_sync_file_range)
13226 #if defined(TARGET_NR_sync_file_range2)
13227 case TARGET_NR_sync_file_range2:
13228 #endif
13229 #if defined(TARGET_NR_arm_sync_file_range)
13230 case TARGET_NR_arm_sync_file_range:
13231 #endif
13232 /* This is like sync_file_range but the arguments are reordered */
13233 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13234 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13235 target_offset64(arg5, arg6), arg2));
13236 #else
13237 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13238 #endif
13239 return ret;
13240 #endif
13241 #endif
13242 #if defined(TARGET_NR_signalfd4)
13243 case TARGET_NR_signalfd4:
13244 return do_signalfd4(arg1, arg2, arg4);
13245 #endif
13246 #if defined(TARGET_NR_signalfd)
13247 case TARGET_NR_signalfd:
13248 return do_signalfd4(arg1, arg2, 0);
13249 #endif
13250 #if defined(CONFIG_EPOLL)
13251 #if defined(TARGET_NR_epoll_create)
13252 case TARGET_NR_epoll_create:
13253 return get_errno(epoll_create(arg1));
13254 #endif
13255 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13256 case TARGET_NR_epoll_create1:
13257 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13258 #endif
13259 #if defined(TARGET_NR_epoll_ctl)
13260 case TARGET_NR_epoll_ctl:
13261 {
13262 struct epoll_event ep;
13263 struct epoll_event *epp = 0;
13264 if (arg4) {
13265 if (arg2 != EPOLL_CTL_DEL) {
13266 struct target_epoll_event *target_ep;
13267 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13268 return -TARGET_EFAULT;
13269 }
13270 ep.events = tswap32(target_ep->events);
13271 /*
13272 * The epoll_data_t union is just opaque data to the kernel,
13273 * so we transfer all 64 bits across and need not worry what
13274 * actual data type it is.
13275 */
13276 ep.data.u64 = tswap64(target_ep->data.u64);
13277 unlock_user_struct(target_ep, arg4, 0);
13278 }
13279 /*
13280 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13281 * non-null pointer, even though this argument is ignored.
13282 *
13283 */
13284 epp = &ep;
13285 }
13286 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13287 }
13288 #endif
13289
13290 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13291 #if defined(TARGET_NR_epoll_wait)
13292 case TARGET_NR_epoll_wait:
13293 #endif
13294 #if defined(TARGET_NR_epoll_pwait)
13295 case TARGET_NR_epoll_pwait:
13296 #endif
13297 {
13298 struct target_epoll_event *target_ep;
13299 struct epoll_event *ep;
13300 int epfd = arg1;
13301 int maxevents = arg3;
13302 int timeout = arg4;
13303
13304 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13305 return -TARGET_EINVAL;
13306 }
13307
13308 target_ep = lock_user(VERIFY_WRITE, arg2,
13309 maxevents * sizeof(struct target_epoll_event), 1);
13310 if (!target_ep) {
13311 return -TARGET_EFAULT;
13312 }
13313
13314 ep = g_try_new(struct epoll_event, maxevents);
13315 if (!ep) {
13316 unlock_user(target_ep, arg2, 0);
13317 return -TARGET_ENOMEM;
13318 }
13319
13320 switch (num) {
13321 #if defined(TARGET_NR_epoll_pwait)
13322 case TARGET_NR_epoll_pwait:
13323 {
13324 sigset_t *set = NULL;
13325
13326 if (arg5) {
13327 ret = process_sigsuspend_mask(&set, arg5, arg6);
13328 if (ret != 0) {
13329 break;
13330 }
13331 }
13332
13333 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13334 set, SIGSET_T_SIZE));
13335
13336 if (set) {
13337 finish_sigsuspend_mask(ret);
13338 }
13339 break;
13340 }
13341 #endif
13342 #if defined(TARGET_NR_epoll_wait)
13343 case TARGET_NR_epoll_wait:
13344 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13345 NULL, 0));
13346 break;
13347 #endif
13348 default:
13349 ret = -TARGET_ENOSYS;
13350 }
13351 if (!is_error(ret)) {
13352 int i;
13353 for (i = 0; i < ret; i++) {
13354 target_ep[i].events = tswap32(ep[i].events);
13355 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13356 }
13357 unlock_user(target_ep, arg2,
13358 ret * sizeof(struct target_epoll_event));
13359 } else {
13360 unlock_user(target_ep, arg2, 0);
13361 }
13362 g_free(ep);
13363 return ret;
13364 }
13365 #endif
13366 #endif
13367 #ifdef TARGET_NR_prlimit64
13368 case TARGET_NR_prlimit64:
13369 {
13370 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13371 struct target_rlimit64 *target_rnew, *target_rold;
13372 struct host_rlimit64 rnew, rold, *rnewp = 0;
13373 int resource = target_to_host_resource(arg2);
13374
13375 if (arg3 && (resource != RLIMIT_AS &&
13376 resource != RLIMIT_DATA &&
13377 resource != RLIMIT_STACK)) {
13378 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13379 return -TARGET_EFAULT;
13380 }
13381 __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13382 __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13383 unlock_user_struct(target_rnew, arg3, 0);
13384 rnewp = &rnew;
13385 }
13386
13387 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13388 if (!is_error(ret) && arg4) {
13389 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13390 return -TARGET_EFAULT;
13391 }
13392 __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13393 __put_user(rold.rlim_max, &target_rold->rlim_max);
13394 unlock_user_struct(target_rold, arg4, 1);
13395 }
13396 return ret;
13397 }
13398 #endif
13399 #ifdef TARGET_NR_gethostname
13400 case TARGET_NR_gethostname:
13401 {
13402 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13403 if (name) {
13404 ret = get_errno(gethostname(name, arg2));
13405 unlock_user(name, arg1, arg2);
13406 } else {
13407 ret = -TARGET_EFAULT;
13408 }
13409 return ret;
13410 }
13411 #endif
13412 #ifdef TARGET_NR_atomic_cmpxchg_32
13413 case TARGET_NR_atomic_cmpxchg_32:
13414 {
13415 /* should use start_exclusive from main.c */
13416 abi_ulong mem_value;
13417 if (get_user_u32(mem_value, arg6)) {
13418 target_siginfo_t info;
13419 info.si_signo = SIGSEGV;
13420 info.si_errno = 0;
13421 info.si_code = TARGET_SEGV_MAPERR;
13422 info._sifields._sigfault._addr = arg6;
13423 queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13424 ret = 0xdeadbeef;
13425
13426 }
13427 if (mem_value == arg2)
13428 put_user_u32(arg1, arg6);
13429 return mem_value;
13430 }
13431 #endif
13432 #ifdef TARGET_NR_atomic_barrier
13433 case TARGET_NR_atomic_barrier:
13434 /* Like the kernel implementation and the
13435 qemu arm barrier, no-op this? */
13436 return 0;
13437 #endif
13438
13439 #ifdef TARGET_NR_timer_create
13440 case TARGET_NR_timer_create:
13441 {
13442 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13443
13444 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13445
13446 int clkid = arg1;
13447 int timer_index = next_free_host_timer();
13448
13449 if (timer_index < 0) {
13450 ret = -TARGET_EAGAIN;
13451 } else {
13452 timer_t *phtimer = g_posix_timers + timer_index;
13453
13454 if (arg2) {
13455 phost_sevp = &host_sevp;
13456 ret = target_to_host_sigevent(phost_sevp, arg2);
13457 if (ret != 0) {
13458 free_host_timer_slot(timer_index);
13459 return ret;
13460 }
13461 }
13462
13463 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13464 if (ret) {
13465 free_host_timer_slot(timer_index);
13466 } else {
13467 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13468 timer_delete(*phtimer);
13469 free_host_timer_slot(timer_index);
13470 return -TARGET_EFAULT;
13471 }
13472 }
13473 }
13474 return ret;
13475 }
13476 #endif
13477
13478 #ifdef TARGET_NR_timer_settime
13479 case TARGET_NR_timer_settime:
13480 {
13481 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13482 * struct itimerspec * old_value */
13483 target_timer_t timerid = get_timer_id(arg1);
13484
13485 if (timerid < 0) {
13486 ret = timerid;
13487 } else if (arg3 == 0) {
13488 ret = -TARGET_EINVAL;
13489 } else {
13490 timer_t htimer = g_posix_timers[timerid];
13491 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13492
13493 if (target_to_host_itimerspec(&hspec_new, arg3)) {
13494 return -TARGET_EFAULT;
13495 }
13496 ret = get_errno(
13497 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13498 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13499 return -TARGET_EFAULT;
13500 }
13501 }
13502 return ret;
13503 }
13504 #endif
13505
13506 #ifdef TARGET_NR_timer_settime64
13507 case TARGET_NR_timer_settime64:
13508 {
13509 target_timer_t timerid = get_timer_id(arg1);
13510
13511 if (timerid < 0) {
13512 ret = timerid;
13513 } else if (arg3 == 0) {
13514 ret = -TARGET_EINVAL;
13515 } else {
13516 timer_t htimer = g_posix_timers[timerid];
13517 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13518
13519 if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13520 return -TARGET_EFAULT;
13521 }
13522 ret = get_errno(
13523 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13524 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13525 return -TARGET_EFAULT;
13526 }
13527 }
13528 return ret;
13529 }
13530 #endif
13531
13532 #ifdef TARGET_NR_timer_gettime
13533 case TARGET_NR_timer_gettime:
13534 {
13535 /* args: timer_t timerid, struct itimerspec *curr_value */
13536 target_timer_t timerid = get_timer_id(arg1);
13537
13538 if (timerid < 0) {
13539 ret = timerid;
13540 } else if (!arg2) {
13541 ret = -TARGET_EFAULT;
13542 } else {
13543 timer_t htimer = g_posix_timers[timerid];
13544 struct itimerspec hspec;
13545 ret = get_errno(timer_gettime(htimer, &hspec));
13546
13547 if (host_to_target_itimerspec(arg2, &hspec)) {
13548 ret = -TARGET_EFAULT;
13549 }
13550 }
13551 return ret;
13552 }
13553 #endif
13554
13555 #ifdef TARGET_NR_timer_gettime64
13556 case TARGET_NR_timer_gettime64:
13557 {
13558 /* args: timer_t timerid, struct itimerspec64 *curr_value */
13559 target_timer_t timerid = get_timer_id(arg1);
13560
13561 if (timerid < 0) {
13562 ret = timerid;
13563 } else if (!arg2) {
13564 ret = -TARGET_EFAULT;
13565 } else {
13566 timer_t htimer = g_posix_timers[timerid];
13567 struct itimerspec hspec;
13568 ret = get_errno(timer_gettime(htimer, &hspec));
13569
13570 if (host_to_target_itimerspec64(arg2, &hspec)) {
13571 ret = -TARGET_EFAULT;
13572 }
13573 }
13574 return ret;
13575 }
13576 #endif
13577
13578 #ifdef TARGET_NR_timer_getoverrun
13579 case TARGET_NR_timer_getoverrun:
13580 {
13581 /* args: timer_t timerid */
13582 target_timer_t timerid = get_timer_id(arg1);
13583
13584 if (timerid < 0) {
13585 ret = timerid;
13586 } else {
13587 timer_t htimer = g_posix_timers[timerid];
13588 ret = get_errno(timer_getoverrun(htimer));
13589 }
13590 return ret;
13591 }
13592 #endif
13593
13594 #ifdef TARGET_NR_timer_delete
13595 case TARGET_NR_timer_delete:
13596 {
13597 /* args: timer_t timerid */
13598 target_timer_t timerid = get_timer_id(arg1);
13599
13600 if (timerid < 0) {
13601 ret = timerid;
13602 } else {
13603 timer_t htimer = g_posix_timers[timerid];
13604 ret = get_errno(timer_delete(htimer));
13605 free_host_timer_slot(timerid);
13606 }
13607 return ret;
13608 }
13609 #endif
13610
13611 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13612 case TARGET_NR_timerfd_create:
13613 ret = get_errno(timerfd_create(arg1,
13614 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13615 if (ret >= 0) {
13616 fd_trans_register(ret, &target_timerfd_trans);
13617 }
13618 return ret;
13619 #endif
13620
13621 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13622 case TARGET_NR_timerfd_gettime:
13623 {
13624 struct itimerspec its_curr;
13625
13626 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13627
13628 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13629 return -TARGET_EFAULT;
13630 }
13631 }
13632 return ret;
13633 #endif
13634
13635 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13636 case TARGET_NR_timerfd_gettime64:
13637 {
13638 struct itimerspec its_curr;
13639
13640 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13641
13642 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13643 return -TARGET_EFAULT;
13644 }
13645 }
13646 return ret;
13647 #endif
13648
13649 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13650 case TARGET_NR_timerfd_settime:
13651 {
13652 struct itimerspec its_new, its_old, *p_new;
13653
13654 if (arg3) {
13655 if (target_to_host_itimerspec(&its_new, arg3)) {
13656 return -TARGET_EFAULT;
13657 }
13658 p_new = &its_new;
13659 } else {
13660 p_new = NULL;
13661 }
13662
13663 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13664
13665 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13666 return -TARGET_EFAULT;
13667 }
13668 }
13669 return ret;
13670 #endif
13671
13672 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13673 case TARGET_NR_timerfd_settime64:
13674 {
13675 struct itimerspec its_new, its_old, *p_new;
13676
13677 if (arg3) {
13678 if (target_to_host_itimerspec64(&its_new, arg3)) {
13679 return -TARGET_EFAULT;
13680 }
13681 p_new = &its_new;
13682 } else {
13683 p_new = NULL;
13684 }
13685
13686 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13687
13688 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13689 return -TARGET_EFAULT;
13690 }
13691 }
13692 return ret;
13693 #endif
13694
13695 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13696 case TARGET_NR_ioprio_get:
13697 return get_errno(ioprio_get(arg1, arg2));
13698 #endif
13699
13700 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13701 case TARGET_NR_ioprio_set:
13702 return get_errno(ioprio_set(arg1, arg2, arg3));
13703 #endif
13704
13705 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13706 case TARGET_NR_setns:
13707 return get_errno(setns(arg1, arg2));
13708 #endif
13709 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13710 case TARGET_NR_unshare:
13711 return get_errno(unshare(arg1));
13712 #endif
13713 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13714 case TARGET_NR_kcmp:
13715 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13716 #endif
13717 #ifdef TARGET_NR_swapcontext
13718 case TARGET_NR_swapcontext:
13719 /* PowerPC specific. */
13720 return do_swapcontext(cpu_env, arg1, arg2, arg3);
13721 #endif
13722 #ifdef TARGET_NR_memfd_create
13723 case TARGET_NR_memfd_create:
13724 p = lock_user_string(arg1);
13725 if (!p) {
13726 return -TARGET_EFAULT;
13727 }
13728 ret = get_errno(memfd_create(p, arg2));
13729 fd_trans_unregister(ret);
13730 unlock_user(p, arg1, 0);
13731 return ret;
13732 #endif
13733 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13734 case TARGET_NR_membarrier:
13735 return get_errno(membarrier(arg1, arg2));
13736 #endif
13737
13738 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13739 case TARGET_NR_copy_file_range:
13740 {
13741 loff_t inoff, outoff;
13742 loff_t *pinoff = NULL, *poutoff = NULL;
13743
13744 if (arg2) {
13745 if (get_user_u64(inoff, arg2)) {
13746 return -TARGET_EFAULT;
13747 }
13748 pinoff = &inoff;
13749 }
13750 if (arg4) {
13751 if (get_user_u64(outoff, arg4)) {
13752 return -TARGET_EFAULT;
13753 }
13754 poutoff = &outoff;
13755 }
13756 /* Do not sign-extend the count parameter. */
13757 ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13758 (abi_ulong)arg5, arg6));
13759 if (!is_error(ret) && ret > 0) {
13760 if (arg2) {
13761 if (put_user_u64(inoff, arg2)) {
13762 return -TARGET_EFAULT;
13763 }
13764 }
13765 if (arg4) {
13766 if (put_user_u64(outoff, arg4)) {
13767 return -TARGET_EFAULT;
13768 }
13769 }
13770 }
13771 }
13772 return ret;
13773 #endif
13774
13775 #if defined(TARGET_NR_pivot_root)
13776 case TARGET_NR_pivot_root:
13777 {
13778 void *p2;
13779 p = lock_user_string(arg1); /* new_root */
13780 p2 = lock_user_string(arg2); /* put_old */
13781 if (!p || !p2) {
13782 ret = -TARGET_EFAULT;
13783 } else {
13784 ret = get_errno(pivot_root(p, p2));
13785 }
13786 unlock_user(p2, arg2, 0);
13787 unlock_user(p, arg1, 0);
13788 }
13789 return ret;
13790 #endif
13791
13792 #if defined(TARGET_NR_riscv_hwprobe)
13793 case TARGET_NR_riscv_hwprobe:
13794 return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13795 #endif
13796
13797 default:
13798 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13799 return -TARGET_ENOSYS;
13800 }
13801 return ret;
13802 }
13803
13804 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13805 abi_long arg2, abi_long arg3, abi_long arg4,
13806 abi_long arg5, abi_long arg6, abi_long arg7,
13807 abi_long arg8)
13808 {
13809 CPUState *cpu = env_cpu(cpu_env);
13810 abi_long ret;
13811
13812 #ifdef DEBUG_ERESTARTSYS
13813 /* Debug-only code for exercising the syscall-restart code paths
13814 * in the per-architecture cpu main loops: restart every syscall
13815 * the guest makes once before letting it through.
13816 */
13817 {
13818 static bool flag;
13819 flag = !flag;
13820 if (flag) {
13821 return -QEMU_ERESTARTSYS;
13822 }
13823 }
13824 #endif
13825
13826 record_syscall_start(cpu, num, arg1,
13827 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13828
13829 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13830 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13831 }
13832
13833 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13834 arg5, arg6, arg7, arg8);
13835
13836 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13837 print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13838 arg3, arg4, arg5, arg6);
13839 }
13840
13841 record_syscall_return(cpu, num, ret);
13842 return ret;
13843 }