]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/syscall.c
linux-user: Widen target_mmap offset argument to off_t
[mirror_qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "target_mman.h"
27 #include <elf.h>
28 #include <endian.h>
29 #include <grp.h>
30 #include <sys/ipc.h>
31 #include <sys/msg.h>
32 #include <sys/wait.h>
33 #include <sys/mount.h>
34 #include <sys/file.h>
35 #include <sys/fsuid.h>
36 #include <sys/personality.h>
37 #include <sys/prctl.h>
38 #include <sys/resource.h>
39 #include <sys/swap.h>
40 #include <linux/capability.h>
41 #include <sched.h>
42 #include <sys/timex.h>
43 #include <sys/socket.h>
44 #include <linux/sockios.h>
45 #include <sys/un.h>
46 #include <sys/uio.h>
47 #include <poll.h>
48 #include <sys/times.h>
49 #include <sys/shm.h>
50 #include <sys/sem.h>
51 #include <sys/statfs.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <netinet/udp.h>
60 #include <linux/wireless.h>
61 #include <linux/icmp.h>
62 #include <linux/icmpv6.h>
63 #include <linux/if_tun.h>
64 #include <linux/in6.h>
65 #include <linux/errqueue.h>
66 #include <linux/random.h>
67 #ifdef CONFIG_TIMERFD
68 #include <sys/timerfd.h>
69 #endif
70 #ifdef CONFIG_EVENTFD
71 #include <sys/eventfd.h>
72 #endif
73 #ifdef CONFIG_EPOLL
74 #include <sys/epoll.h>
75 #endif
76 #ifdef CONFIG_ATTR
77 #include "qemu/xattr.h"
78 #endif
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
81 #endif
82 #ifdef HAVE_SYS_KCOV_H
83 #include <sys/kcov.h>
84 #endif
85
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
92
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #include <linux/fd.h>
102 #if defined(CONFIG_FIEMAP)
103 #include <linux/fiemap.h>
104 #endif
105 #include <linux/fb.h>
106 #if defined(CONFIG_USBFS)
107 #include <linux/usbdevice_fs.h>
108 #include <linux/usb/ch9.h>
109 #endif
110 #include <linux/vt.h>
111 #include <linux/dm-ioctl.h>
112 #include <linux/reboot.h>
113 #include <linux/route.h>
114 #include <linux/filter.h>
115 #include <linux/blkpg.h>
116 #include <netpacket/packet.h>
117 #include <linux/netlink.h>
118 #include <linux/if_alg.h>
119 #include <linux/rtc.h>
120 #include <sound/asound.h>
121 #ifdef HAVE_BTRFS_H
122 #include <linux/btrfs.h>
123 #endif
124 #ifdef HAVE_DRM_H
125 #include <libdrm/drm.h>
126 #include <libdrm/i915_drm.h>
127 #endif
128 #include "linux_loop.h"
129 #include "uname.h"
130
131 #include "qemu.h"
132 #include "user-internals.h"
133 #include "strace.h"
134 #include "signal-common.h"
135 #include "loader.h"
136 #include "user-mmap.h"
137 #include "user/safe-syscall.h"
138 #include "qemu/guest-random.h"
139 #include "qemu/selfmap.h"
140 #include "user/syscall-trace.h"
141 #include "special-errno.h"
142 #include "qapi/error.h"
143 #include "fd-trans.h"
144 #include "tcg/tcg.h"
145 #include "cpu_loop-common.h"
146
147 #ifndef CLONE_IO
148 #define CLONE_IO 0x80000000 /* Clone io context */
149 #endif
150
151 /* We can't directly call the host clone syscall, because this will
152 * badly confuse libc (breaking mutexes, for example). So we must
153 * divide clone flags into:
154 * * flag combinations that look like pthread_create()
155 * * flag combinations that look like fork()
156 * * flags we can implement within QEMU itself
157 * * flags we can't support and will return an error for
158 */
159 /* For thread creation, all these flags must be present; for
160 * fork, none must be present.
161 */
162 #define CLONE_THREAD_FLAGS \
163 (CLONE_VM | CLONE_FS | CLONE_FILES | \
164 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
165
166 /* These flags are ignored:
167 * CLONE_DETACHED is now ignored by the kernel;
168 * CLONE_IO is just an optimisation hint to the I/O scheduler
169 */
170 #define CLONE_IGNORED_FLAGS \
171 (CLONE_DETACHED | CLONE_IO)
172
173 #ifndef CLONE_PIDFD
174 # define CLONE_PIDFD 0x00001000
175 #endif
176
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS \
179 (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
181
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS \
184 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
185 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
186
187 #define CLONE_INVALID_FORK_FLAGS \
188 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
189
190 #define CLONE_INVALID_THREAD_FLAGS \
191 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
192 CLONE_IGNORED_FLAGS))
193
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195 * have almost all been allocated. We cannot support any of
196 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198 * The checks against the invalid thread masks above will catch these.
199 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
200 */
201
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203 * once. This exercises the codepaths for restart.
204 */
205 //#define DEBUG_ERESTARTSYS
206
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
212
213 #undef _syscall0
214 #undef _syscall1
215 #undef _syscall2
216 #undef _syscall3
217 #undef _syscall4
218 #undef _syscall5
219 #undef _syscall6
220
221 #define _syscall0(type,name) \
222 static type name (void) \
223 { \
224 return syscall(__NR_##name); \
225 }
226
227 #define _syscall1(type,name,type1,arg1) \
228 static type name (type1 arg1) \
229 { \
230 return syscall(__NR_##name, arg1); \
231 }
232
233 #define _syscall2(type,name,type1,arg1,type2,arg2) \
234 static type name (type1 arg1,type2 arg2) \
235 { \
236 return syscall(__NR_##name, arg1, arg2); \
237 }
238
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
240 static type name (type1 arg1,type2 arg2,type3 arg3) \
241 { \
242 return syscall(__NR_##name, arg1, arg2, arg3); \
243 }
244
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
247 { \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
249 }
250
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
252 type5,arg5) \
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
254 { \
255 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
256 }
257
258
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
260 type5,arg5,type6,arg6) \
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
262 type6 arg6) \
263 { \
264 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
265 }
266
267
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
278 #endif
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
281 #endif
282 #define __NR_sys_statx __NR_statx
283
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
286 #endif
287
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
291 #endif
292
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296 #endif
297
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid)
300
301 /* For the 64-bit guest on 32-bit host case we must emulate
302 * getdents using getdents64, because otherwise the host
303 * might hand us back more dirent records than we can fit
304 * into the guest buffer after structure format conversion.
305 * Otherwise we emulate getdents with getdents if the host has it.
306 */
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
309 #endif
310
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
313 #endif
314 #if (defined(TARGET_NR_getdents) && \
315 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
318 #endif
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
321 loff_t *, res, uint, wh);
322 #endif
323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325 siginfo_t *, uinfo)
326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group,int,error_code)
329 #endif
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC (1U << 2)
335 #endif
336 #endif
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339 const struct timespec *,timeout,int *,uaddr2,int,val3)
340 #endif
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343 const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347 #endif
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350 unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354 #endif
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357 unsigned long *, user_mask_ptr);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360 unsigned long *, user_mask_ptr);
361 /* sched_attr is not defined in glibc */
362 struct sched_attr {
363 uint32_t size;
364 uint32_t sched_policy;
365 uint64_t sched_flags;
366 int32_t sched_nice;
367 uint32_t sched_priority;
368 uint64_t sched_runtime;
369 uint64_t sched_deadline;
370 uint64_t sched_period;
371 uint32_t sched_util_min;
372 uint32_t sched_util_max;
373 };
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
376 unsigned int, size, unsigned int, flags);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
379 unsigned int, flags);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
384 const struct sched_param *, param);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam, pid_t, pid,
387 struct sched_param *, param);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam, pid_t, pid,
390 const struct sched_param *, param);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
393 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
394 void *, arg);
395 _syscall2(int, capget, struct __user_cap_header_struct *, header,
396 struct __user_cap_data_struct *, data);
397 _syscall2(int, capset, struct __user_cap_header_struct *, header,
398 struct __user_cap_data_struct *, data);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get, int, which, int, who)
401 #endif
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
404 #endif
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
407 #endif
408
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
411 unsigned long, idx1, unsigned long, idx2)
412 #endif
413
414 /*
415 * It is assumed that struct statx is architecture independent.
416 */
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
419 unsigned int, mask, struct target_statx *, statxbuf)
420 #endif
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier, int, cmd, int, flags)
423 #endif
424
425 static const bitmask_transtbl fcntl_flags_tbl[] = {
426 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
427 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
428 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
429 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
430 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
431 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
432 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
433 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
434 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
435 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
436 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
437 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
438 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
439 #if defined(O_DIRECT)
440 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
441 #endif
442 #if defined(O_NOATIME)
443 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
444 #endif
445 #if defined(O_CLOEXEC)
446 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
447 #endif
448 #if defined(O_PATH)
449 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
450 #endif
451 #if defined(O_TMPFILE)
452 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
453 #endif
454 /* Don't terminate the list prematurely on 64-bit host+guest. */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
457 #endif
458 { 0, 0, 0, 0 }
459 };
460
461 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
462
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
467 const struct timespec *,tsp,int,flags)
468 #else
469 static int sys_utimensat(int dirfd, const char *pathname,
470 const struct timespec times[2], int flags)
471 {
472 errno = ENOSYS;
473 return -1;
474 }
475 #endif
476 #endif /* TARGET_NR_utimensat */
477
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
482 const char *, new, unsigned int, flags)
483 #else
484 static int sys_renameat2(int oldfd, const char *old,
485 int newfd, const char *new, int flags)
486 {
487 if (flags == 0) {
488 return renameat(oldfd, old, newfd, new);
489 }
490 errno = ENOSYS;
491 return -1;
492 }
493 #endif
494 #endif /* TARGET_NR_renameat2 */
495
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
498 #else
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY */
505
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
509 #endif
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64 {
513 uint64_t rlim_cur;
514 uint64_t rlim_max;
515 };
516 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
517 const struct host_rlimit64 *, new_limit,
518 struct host_rlimit64 *, old_limit)
519 #endif
520
521
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers[GUEST_TIMER_MAX];
526 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
527
528 static inline int next_free_host_timer(void)
529 {
530 int k;
531 for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
532 if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
533 return k;
534 }
535 }
536 return -1;
537 }
538
539 static inline void free_host_timer_slot(int id)
540 {
541 qatomic_store_release(g_posix_timer_allocated + id, 0);
542 }
543 #endif
544
545 static inline int host_to_target_errno(int host_errno)
546 {
547 switch (host_errno) {
548 #define E(X) case X: return TARGET_##X;
549 #include "errnos.c.inc"
550 #undef E
551 default:
552 return host_errno;
553 }
554 }
555
556 static inline int target_to_host_errno(int target_errno)
557 {
558 switch (target_errno) {
559 #define E(X) case TARGET_##X: return X;
560 #include "errnos.c.inc"
561 #undef E
562 default:
563 return target_errno;
564 }
565 }
566
567 abi_long get_errno(abi_long ret)
568 {
569 if (ret == -1)
570 return -host_to_target_errno(errno);
571 else
572 return ret;
573 }
574
575 const char *target_strerror(int err)
576 {
577 if (err == QEMU_ERESTARTSYS) {
578 return "To be restarted";
579 }
580 if (err == QEMU_ESIGRETURN) {
581 return "Successful exit from sigreturn";
582 }
583
584 return strerror(target_to_host_errno(err));
585 }
586
587 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
588 {
589 int i;
590 uint8_t b;
591 if (usize <= ksize) {
592 return 1;
593 }
594 for (i = ksize; i < usize; i++) {
595 if (get_user_u8(b, addr + i)) {
596 return -TARGET_EFAULT;
597 }
598 if (b != 0) {
599 return 0;
600 }
601 }
602 return 1;
603 }
604
605 #define safe_syscall0(type, name) \
606 static type safe_##name(void) \
607 { \
608 return safe_syscall(__NR_##name); \
609 }
610
611 #define safe_syscall1(type, name, type1, arg1) \
612 static type safe_##name(type1 arg1) \
613 { \
614 return safe_syscall(__NR_##name, arg1); \
615 }
616
617 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
618 static type safe_##name(type1 arg1, type2 arg2) \
619 { \
620 return safe_syscall(__NR_##name, arg1, arg2); \
621 }
622
623 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
625 { \
626 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
627 }
628
629 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
630 type4, arg4) \
631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
632 { \
633 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
634 }
635
636 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
637 type4, arg4, type5, arg5) \
638 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
639 type5 arg5) \
640 { \
641 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
642 }
643
644 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
645 type4, arg4, type5, arg5, type6, arg6) \
646 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
647 type5 arg5, type6 arg6) \
648 { \
649 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
650 }
651
652 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
653 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
654 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
655 int, flags, mode_t, mode)
656 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
657 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
658 struct rusage *, rusage)
659 #endif
660 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
661 int, options, struct rusage *, rusage)
662 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
663 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
664 char **, argv, char **, envp, int, flags)
665 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
666 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
667 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
668 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
669 #endif
670 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
671 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
672 struct timespec *, tsp, const sigset_t *, sigmask,
673 size_t, sigsetsize)
674 #endif
675 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
676 int, maxevents, int, timeout, const sigset_t *, sigmask,
677 size_t, sigsetsize)
678 #if defined(__NR_futex)
679 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
680 const struct timespec *,timeout,int *,uaddr2,int,val3)
681 #endif
682 #if defined(__NR_futex_time64)
683 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
684 const struct timespec *,timeout,int *,uaddr2,int,val3)
685 #endif
686 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
687 safe_syscall2(int, kill, pid_t, pid, int, sig)
688 safe_syscall2(int, tkill, int, tid, int, sig)
689 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
690 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
691 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
692 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
693 unsigned long, pos_l, unsigned long, pos_h)
694 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
695 unsigned long, pos_l, unsigned long, pos_h)
696 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
697 socklen_t, addrlen)
698 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
699 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
700 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
701 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
702 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
703 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
704 safe_syscall2(int, flock, int, fd, int, operation)
705 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
706 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
707 const struct timespec *, uts, size_t, sigsetsize)
708 #endif
709 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
710 int, flags)
711 #if defined(TARGET_NR_nanosleep)
712 safe_syscall2(int, nanosleep, const struct timespec *, req,
713 struct timespec *, rem)
714 #endif
715 #if defined(TARGET_NR_clock_nanosleep) || \
716 defined(TARGET_NR_clock_nanosleep_time64)
717 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
718 const struct timespec *, req, struct timespec *, rem)
719 #endif
720 #ifdef __NR_ipc
721 #ifdef __s390x__
722 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
723 void *, ptr)
724 #else
725 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
726 void *, ptr, long, fifth)
727 #endif
728 #endif
729 #ifdef __NR_msgsnd
730 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
731 int, flags)
732 #endif
733 #ifdef __NR_msgrcv
734 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
735 long, msgtype, int, flags)
736 #endif
737 #ifdef __NR_semtimedop
738 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
739 unsigned, nsops, const struct timespec *, timeout)
740 #endif
741 #if defined(TARGET_NR_mq_timedsend) || \
742 defined(TARGET_NR_mq_timedsend_time64)
743 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
744 size_t, len, unsigned, prio, const struct timespec *, timeout)
745 #endif
746 #if defined(TARGET_NR_mq_timedreceive) || \
747 defined(TARGET_NR_mq_timedreceive_time64)
748 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
749 size_t, len, unsigned *, prio, const struct timespec *, timeout)
750 #endif
751 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
752 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
753 int, outfd, loff_t *, poutoff, size_t, length,
754 unsigned int, flags)
755 #endif
756
757 /* We do ioctl like this rather than via safe_syscall3 to preserve the
758 * "third argument might be integer or pointer or not present" behaviour of
759 * the libc function.
760 */
761 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
762 /* Similarly for fcntl. Note that callers must always:
763 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
764 * use the flock64 struct rather than unsuffixed flock
765 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
766 */
767 #ifdef __NR_fcntl64
768 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
769 #else
770 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
771 #endif
772
773 static inline int host_to_target_sock_type(int host_type)
774 {
775 int target_type;
776
777 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
778 case SOCK_DGRAM:
779 target_type = TARGET_SOCK_DGRAM;
780 break;
781 case SOCK_STREAM:
782 target_type = TARGET_SOCK_STREAM;
783 break;
784 default:
785 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
786 break;
787 }
788
789 #if defined(SOCK_CLOEXEC)
790 if (host_type & SOCK_CLOEXEC) {
791 target_type |= TARGET_SOCK_CLOEXEC;
792 }
793 #endif
794
795 #if defined(SOCK_NONBLOCK)
796 if (host_type & SOCK_NONBLOCK) {
797 target_type |= TARGET_SOCK_NONBLOCK;
798 }
799 #endif
800
801 return target_type;
802 }
803
804 static abi_ulong target_brk;
805 static abi_ulong brk_page;
806
807 void target_set_brk(abi_ulong new_brk)
808 {
809 target_brk = TARGET_PAGE_ALIGN(new_brk);
810 brk_page = HOST_PAGE_ALIGN(target_brk);
811 }
812
813 /* do_brk() must return target values and target errnos. */
814 abi_long do_brk(abi_ulong brk_val)
815 {
816 abi_long mapped_addr;
817 abi_ulong new_alloc_size;
818 abi_ulong new_brk, new_host_brk_page;
819
820 /* brk pointers are always untagged */
821
822 /* return old brk value if brk_val unchanged or zero */
823 if (!brk_val || brk_val == target_brk) {
824 return target_brk;
825 }
826
827 new_brk = TARGET_PAGE_ALIGN(brk_val);
828 new_host_brk_page = HOST_PAGE_ALIGN(brk_val);
829
830 /* brk_val and old target_brk might be on the same page */
831 if (new_brk == TARGET_PAGE_ALIGN(target_brk)) {
832 if (brk_val > target_brk) {
833 /* empty remaining bytes in (possibly larger) host page */
834 memset(g2h_untagged(target_brk), 0, new_host_brk_page - target_brk);
835 }
836 target_brk = brk_val;
837 return target_brk;
838 }
839
840 /* Release heap if necesary */
841 if (new_brk < target_brk) {
842 /* empty remaining bytes in (possibly larger) host page */
843 memset(g2h_untagged(brk_val), 0, new_host_brk_page - brk_val);
844
845 /* free unused host pages and set new brk_page */
846 target_munmap(new_host_brk_page, brk_page - new_host_brk_page);
847 brk_page = new_host_brk_page;
848
849 target_brk = brk_val;
850 return target_brk;
851 }
852
853 /* We need to allocate more memory after the brk... Note that
854 * we don't use MAP_FIXED because that will map over the top of
855 * any existing mapping (like the one with the host libc or qemu
856 * itself); instead we treat "mapped but at wrong address" as
857 * a failure and unmap again.
858 */
859 new_alloc_size = new_host_brk_page - brk_page;
860 if (new_alloc_size) {
861 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
862 PROT_READ|PROT_WRITE,
863 MAP_ANON|MAP_PRIVATE, 0, 0));
864 } else {
865 mapped_addr = brk_page;
866 }
867
868 if (mapped_addr == brk_page) {
869 /* Heap contents are initialized to zero, as for anonymous
870 * mapped pages. Technically the new pages are already
871 * initialized to zero since they *are* anonymous mapped
872 * pages, however we have to take care with the contents that
873 * come from the remaining part of the previous page: it may
874 * contains garbage data due to a previous heap usage (grown
875 * then shrunken). */
876 memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
877
878 target_brk = brk_val;
879 brk_page = new_host_brk_page;
880 return target_brk;
881 } else if (mapped_addr != -1) {
882 /* Mapped but at wrong address, meaning there wasn't actually
883 * enough space for this brk.
884 */
885 target_munmap(mapped_addr, new_alloc_size);
886 mapped_addr = -1;
887 }
888
889 #if defined(TARGET_ALPHA)
890 /* We (partially) emulate OSF/1 on Alpha, which requires we
891 return a proper errno, not an unchanged brk value. */
892 return -TARGET_ENOMEM;
893 #endif
894 /* For everything else, return the previous break. */
895 return target_brk;
896 }
897
898 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
899 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
900 static inline abi_long copy_from_user_fdset(fd_set *fds,
901 abi_ulong target_fds_addr,
902 int n)
903 {
904 int i, nw, j, k;
905 abi_ulong b, *target_fds;
906
907 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
908 if (!(target_fds = lock_user(VERIFY_READ,
909 target_fds_addr,
910 sizeof(abi_ulong) * nw,
911 1)))
912 return -TARGET_EFAULT;
913
914 FD_ZERO(fds);
915 k = 0;
916 for (i = 0; i < nw; i++) {
917 /* grab the abi_ulong */
918 __get_user(b, &target_fds[i]);
919 for (j = 0; j < TARGET_ABI_BITS; j++) {
920 /* check the bit inside the abi_ulong */
921 if ((b >> j) & 1)
922 FD_SET(k, fds);
923 k++;
924 }
925 }
926
927 unlock_user(target_fds, target_fds_addr, 0);
928
929 return 0;
930 }
931
932 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
933 abi_ulong target_fds_addr,
934 int n)
935 {
936 if (target_fds_addr) {
937 if (copy_from_user_fdset(fds, target_fds_addr, n))
938 return -TARGET_EFAULT;
939 *fds_ptr = fds;
940 } else {
941 *fds_ptr = NULL;
942 }
943 return 0;
944 }
945
946 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
947 const fd_set *fds,
948 int n)
949 {
950 int i, nw, j, k;
951 abi_long v;
952 abi_ulong *target_fds;
953
954 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
955 if (!(target_fds = lock_user(VERIFY_WRITE,
956 target_fds_addr,
957 sizeof(abi_ulong) * nw,
958 0)))
959 return -TARGET_EFAULT;
960
961 k = 0;
962 for (i = 0; i < nw; i++) {
963 v = 0;
964 for (j = 0; j < TARGET_ABI_BITS; j++) {
965 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
966 k++;
967 }
968 __put_user(v, &target_fds[i]);
969 }
970
971 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
972
973 return 0;
974 }
975 #endif
976
977 #if defined(__alpha__)
978 #define HOST_HZ 1024
979 #else
980 #define HOST_HZ 100
981 #endif
982
983 static inline abi_long host_to_target_clock_t(long ticks)
984 {
985 #if HOST_HZ == TARGET_HZ
986 return ticks;
987 #else
988 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
989 #endif
990 }
991
992 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
993 const struct rusage *rusage)
994 {
995 struct target_rusage *target_rusage;
996
997 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
998 return -TARGET_EFAULT;
999 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1000 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1001 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1002 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1003 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1004 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1005 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1006 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1007 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1008 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1009 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1010 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1011 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1012 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1013 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1014 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1015 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1016 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1017 unlock_user_struct(target_rusage, target_addr, 1);
1018
1019 return 0;
1020 }
1021
1022 #ifdef TARGET_NR_setrlimit
1023 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1024 {
1025 abi_ulong target_rlim_swap;
1026 rlim_t result;
1027
1028 target_rlim_swap = tswapal(target_rlim);
1029 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1030 return RLIM_INFINITY;
1031
1032 result = target_rlim_swap;
1033 if (target_rlim_swap != (rlim_t)result)
1034 return RLIM_INFINITY;
1035
1036 return result;
1037 }
1038 #endif
1039
1040 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1041 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1042 {
1043 abi_ulong target_rlim_swap;
1044 abi_ulong result;
1045
1046 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1047 target_rlim_swap = TARGET_RLIM_INFINITY;
1048 else
1049 target_rlim_swap = rlim;
1050 result = tswapal(target_rlim_swap);
1051
1052 return result;
1053 }
1054 #endif
1055
1056 static inline int target_to_host_resource(int code)
1057 {
1058 switch (code) {
1059 case TARGET_RLIMIT_AS:
1060 return RLIMIT_AS;
1061 case TARGET_RLIMIT_CORE:
1062 return RLIMIT_CORE;
1063 case TARGET_RLIMIT_CPU:
1064 return RLIMIT_CPU;
1065 case TARGET_RLIMIT_DATA:
1066 return RLIMIT_DATA;
1067 case TARGET_RLIMIT_FSIZE:
1068 return RLIMIT_FSIZE;
1069 case TARGET_RLIMIT_LOCKS:
1070 return RLIMIT_LOCKS;
1071 case TARGET_RLIMIT_MEMLOCK:
1072 return RLIMIT_MEMLOCK;
1073 case TARGET_RLIMIT_MSGQUEUE:
1074 return RLIMIT_MSGQUEUE;
1075 case TARGET_RLIMIT_NICE:
1076 return RLIMIT_NICE;
1077 case TARGET_RLIMIT_NOFILE:
1078 return RLIMIT_NOFILE;
1079 case TARGET_RLIMIT_NPROC:
1080 return RLIMIT_NPROC;
1081 case TARGET_RLIMIT_RSS:
1082 return RLIMIT_RSS;
1083 case TARGET_RLIMIT_RTPRIO:
1084 return RLIMIT_RTPRIO;
1085 #ifdef RLIMIT_RTTIME
1086 case TARGET_RLIMIT_RTTIME:
1087 return RLIMIT_RTTIME;
1088 #endif
1089 case TARGET_RLIMIT_SIGPENDING:
1090 return RLIMIT_SIGPENDING;
1091 case TARGET_RLIMIT_STACK:
1092 return RLIMIT_STACK;
1093 default:
1094 return code;
1095 }
1096 }
1097
1098 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1099 abi_ulong target_tv_addr)
1100 {
1101 struct target_timeval *target_tv;
1102
1103 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1104 return -TARGET_EFAULT;
1105 }
1106
1107 __get_user(tv->tv_sec, &target_tv->tv_sec);
1108 __get_user(tv->tv_usec, &target_tv->tv_usec);
1109
1110 unlock_user_struct(target_tv, target_tv_addr, 0);
1111
1112 return 0;
1113 }
1114
1115 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1116 const struct timeval *tv)
1117 {
1118 struct target_timeval *target_tv;
1119
1120 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1121 return -TARGET_EFAULT;
1122 }
1123
1124 __put_user(tv->tv_sec, &target_tv->tv_sec);
1125 __put_user(tv->tv_usec, &target_tv->tv_usec);
1126
1127 unlock_user_struct(target_tv, target_tv_addr, 1);
1128
1129 return 0;
1130 }
1131
1132 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1133 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1134 abi_ulong target_tv_addr)
1135 {
1136 struct target__kernel_sock_timeval *target_tv;
1137
1138 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1139 return -TARGET_EFAULT;
1140 }
1141
1142 __get_user(tv->tv_sec, &target_tv->tv_sec);
1143 __get_user(tv->tv_usec, &target_tv->tv_usec);
1144
1145 unlock_user_struct(target_tv, target_tv_addr, 0);
1146
1147 return 0;
1148 }
1149 #endif
1150
1151 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1152 const struct timeval *tv)
1153 {
1154 struct target__kernel_sock_timeval *target_tv;
1155
1156 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1157 return -TARGET_EFAULT;
1158 }
1159
1160 __put_user(tv->tv_sec, &target_tv->tv_sec);
1161 __put_user(tv->tv_usec, &target_tv->tv_usec);
1162
1163 unlock_user_struct(target_tv, target_tv_addr, 1);
1164
1165 return 0;
1166 }
1167
1168 #if defined(TARGET_NR_futex) || \
1169 defined(TARGET_NR_rt_sigtimedwait) || \
1170 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1171 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1172 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1173 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1174 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1175 defined(TARGET_NR_timer_settime) || \
1176 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1177 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1178 abi_ulong target_addr)
1179 {
1180 struct target_timespec *target_ts;
1181
1182 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1183 return -TARGET_EFAULT;
1184 }
1185 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1186 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1187 unlock_user_struct(target_ts, target_addr, 0);
1188 return 0;
1189 }
1190 #endif
1191
1192 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1193 defined(TARGET_NR_timer_settime64) || \
1194 defined(TARGET_NR_mq_timedsend_time64) || \
1195 defined(TARGET_NR_mq_timedreceive_time64) || \
1196 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1197 defined(TARGET_NR_clock_nanosleep_time64) || \
1198 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1199 defined(TARGET_NR_utimensat) || \
1200 defined(TARGET_NR_utimensat_time64) || \
1201 defined(TARGET_NR_semtimedop_time64) || \
1202 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1203 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1204 abi_ulong target_addr)
1205 {
1206 struct target__kernel_timespec *target_ts;
1207
1208 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1209 return -TARGET_EFAULT;
1210 }
1211 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1212 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1213 /* in 32bit mode, this drops the padding */
1214 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1215 unlock_user_struct(target_ts, target_addr, 0);
1216 return 0;
1217 }
1218 #endif
1219
1220 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1221 struct timespec *host_ts)
1222 {
1223 struct target_timespec *target_ts;
1224
1225 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1226 return -TARGET_EFAULT;
1227 }
1228 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1229 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1230 unlock_user_struct(target_ts, target_addr, 1);
1231 return 0;
1232 }
1233
1234 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1235 struct timespec *host_ts)
1236 {
1237 struct target__kernel_timespec *target_ts;
1238
1239 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1240 return -TARGET_EFAULT;
1241 }
1242 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1243 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1244 unlock_user_struct(target_ts, target_addr, 1);
1245 return 0;
1246 }
1247
1248 #if defined(TARGET_NR_gettimeofday)
1249 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1250 struct timezone *tz)
1251 {
1252 struct target_timezone *target_tz;
1253
1254 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1255 return -TARGET_EFAULT;
1256 }
1257
1258 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1259 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1260
1261 unlock_user_struct(target_tz, target_tz_addr, 1);
1262
1263 return 0;
1264 }
1265 #endif
1266
1267 #if defined(TARGET_NR_settimeofday)
1268 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1269 abi_ulong target_tz_addr)
1270 {
1271 struct target_timezone *target_tz;
1272
1273 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1274 return -TARGET_EFAULT;
1275 }
1276
1277 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1278 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1279
1280 unlock_user_struct(target_tz, target_tz_addr, 0);
1281
1282 return 0;
1283 }
1284 #endif
1285
1286 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1287 #include <mqueue.h>
1288
1289 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1290 abi_ulong target_mq_attr_addr)
1291 {
1292 struct target_mq_attr *target_mq_attr;
1293
1294 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1295 target_mq_attr_addr, 1))
1296 return -TARGET_EFAULT;
1297
1298 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1299 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1300 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1301 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1302
1303 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1304
1305 return 0;
1306 }
1307
1308 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1309 const struct mq_attr *attr)
1310 {
1311 struct target_mq_attr *target_mq_attr;
1312
1313 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1314 target_mq_attr_addr, 0))
1315 return -TARGET_EFAULT;
1316
1317 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1318 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1319 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1320 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1321
1322 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1323
1324 return 0;
1325 }
1326 #endif
1327
1328 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1329 /* do_select() must return target values and target errnos. */
1330 static abi_long do_select(int n,
1331 abi_ulong rfd_addr, abi_ulong wfd_addr,
1332 abi_ulong efd_addr, abi_ulong target_tv_addr)
1333 {
1334 fd_set rfds, wfds, efds;
1335 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1336 struct timeval tv;
1337 struct timespec ts, *ts_ptr;
1338 abi_long ret;
1339
1340 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1341 if (ret) {
1342 return ret;
1343 }
1344 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1345 if (ret) {
1346 return ret;
1347 }
1348 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1349 if (ret) {
1350 return ret;
1351 }
1352
1353 if (target_tv_addr) {
1354 if (copy_from_user_timeval(&tv, target_tv_addr))
1355 return -TARGET_EFAULT;
1356 ts.tv_sec = tv.tv_sec;
1357 ts.tv_nsec = tv.tv_usec * 1000;
1358 ts_ptr = &ts;
1359 } else {
1360 ts_ptr = NULL;
1361 }
1362
1363 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1364 ts_ptr, NULL));
1365
1366 if (!is_error(ret)) {
1367 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1368 return -TARGET_EFAULT;
1369 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1370 return -TARGET_EFAULT;
1371 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1372 return -TARGET_EFAULT;
1373
1374 if (target_tv_addr) {
1375 tv.tv_sec = ts.tv_sec;
1376 tv.tv_usec = ts.tv_nsec / 1000;
1377 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1378 return -TARGET_EFAULT;
1379 }
1380 }
1381 }
1382
1383 return ret;
1384 }
1385
1386 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1387 static abi_long do_old_select(abi_ulong arg1)
1388 {
1389 struct target_sel_arg_struct *sel;
1390 abi_ulong inp, outp, exp, tvp;
1391 long nsel;
1392
1393 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1394 return -TARGET_EFAULT;
1395 }
1396
1397 nsel = tswapal(sel->n);
1398 inp = tswapal(sel->inp);
1399 outp = tswapal(sel->outp);
1400 exp = tswapal(sel->exp);
1401 tvp = tswapal(sel->tvp);
1402
1403 unlock_user_struct(sel, arg1, 0);
1404
1405 return do_select(nsel, inp, outp, exp, tvp);
1406 }
1407 #endif
1408 #endif
1409
1410 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1411 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1412 abi_long arg4, abi_long arg5, abi_long arg6,
1413 bool time64)
1414 {
1415 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1416 fd_set rfds, wfds, efds;
1417 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1418 struct timespec ts, *ts_ptr;
1419 abi_long ret;
1420
1421 /*
1422 * The 6th arg is actually two args smashed together,
1423 * so we cannot use the C library.
1424 */
1425 struct {
1426 sigset_t *set;
1427 size_t size;
1428 } sig, *sig_ptr;
1429
1430 abi_ulong arg_sigset, arg_sigsize, *arg7;
1431
1432 n = arg1;
1433 rfd_addr = arg2;
1434 wfd_addr = arg3;
1435 efd_addr = arg4;
1436 ts_addr = arg5;
1437
1438 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1439 if (ret) {
1440 return ret;
1441 }
1442 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1443 if (ret) {
1444 return ret;
1445 }
1446 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1447 if (ret) {
1448 return ret;
1449 }
1450
1451 /*
1452 * This takes a timespec, and not a timeval, so we cannot
1453 * use the do_select() helper ...
1454 */
1455 if (ts_addr) {
1456 if (time64) {
1457 if (target_to_host_timespec64(&ts, ts_addr)) {
1458 return -TARGET_EFAULT;
1459 }
1460 } else {
1461 if (target_to_host_timespec(&ts, ts_addr)) {
1462 return -TARGET_EFAULT;
1463 }
1464 }
1465 ts_ptr = &ts;
1466 } else {
1467 ts_ptr = NULL;
1468 }
1469
1470 /* Extract the two packed args for the sigset */
1471 sig_ptr = NULL;
1472 if (arg6) {
1473 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1474 if (!arg7) {
1475 return -TARGET_EFAULT;
1476 }
1477 arg_sigset = tswapal(arg7[0]);
1478 arg_sigsize = tswapal(arg7[1]);
1479 unlock_user(arg7, arg6, 0);
1480
1481 if (arg_sigset) {
1482 ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1483 if (ret != 0) {
1484 return ret;
1485 }
1486 sig_ptr = &sig;
1487 sig.size = SIGSET_T_SIZE;
1488 }
1489 }
1490
1491 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1492 ts_ptr, sig_ptr));
1493
1494 if (sig_ptr) {
1495 finish_sigsuspend_mask(ret);
1496 }
1497
1498 if (!is_error(ret)) {
1499 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1500 return -TARGET_EFAULT;
1501 }
1502 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1503 return -TARGET_EFAULT;
1504 }
1505 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1506 return -TARGET_EFAULT;
1507 }
1508 if (time64) {
1509 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1510 return -TARGET_EFAULT;
1511 }
1512 } else {
1513 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1514 return -TARGET_EFAULT;
1515 }
1516 }
1517 }
1518 return ret;
1519 }
1520 #endif
1521
1522 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1523 defined(TARGET_NR_ppoll_time64)
1524 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1525 abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1526 {
1527 struct target_pollfd *target_pfd;
1528 unsigned int nfds = arg2;
1529 struct pollfd *pfd;
1530 unsigned int i;
1531 abi_long ret;
1532
1533 pfd = NULL;
1534 target_pfd = NULL;
1535 if (nfds) {
1536 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1537 return -TARGET_EINVAL;
1538 }
1539 target_pfd = lock_user(VERIFY_WRITE, arg1,
1540 sizeof(struct target_pollfd) * nfds, 1);
1541 if (!target_pfd) {
1542 return -TARGET_EFAULT;
1543 }
1544
1545 pfd = alloca(sizeof(struct pollfd) * nfds);
1546 for (i = 0; i < nfds; i++) {
1547 pfd[i].fd = tswap32(target_pfd[i].fd);
1548 pfd[i].events = tswap16(target_pfd[i].events);
1549 }
1550 }
1551 if (ppoll) {
1552 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1553 sigset_t *set = NULL;
1554
1555 if (arg3) {
1556 if (time64) {
1557 if (target_to_host_timespec64(timeout_ts, arg3)) {
1558 unlock_user(target_pfd, arg1, 0);
1559 return -TARGET_EFAULT;
1560 }
1561 } else {
1562 if (target_to_host_timespec(timeout_ts, arg3)) {
1563 unlock_user(target_pfd, arg1, 0);
1564 return -TARGET_EFAULT;
1565 }
1566 }
1567 } else {
1568 timeout_ts = NULL;
1569 }
1570
1571 if (arg4) {
1572 ret = process_sigsuspend_mask(&set, arg4, arg5);
1573 if (ret != 0) {
1574 unlock_user(target_pfd, arg1, 0);
1575 return ret;
1576 }
1577 }
1578
1579 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1580 set, SIGSET_T_SIZE));
1581
1582 if (set) {
1583 finish_sigsuspend_mask(ret);
1584 }
1585 if (!is_error(ret) && arg3) {
1586 if (time64) {
1587 if (host_to_target_timespec64(arg3, timeout_ts)) {
1588 return -TARGET_EFAULT;
1589 }
1590 } else {
1591 if (host_to_target_timespec(arg3, timeout_ts)) {
1592 return -TARGET_EFAULT;
1593 }
1594 }
1595 }
1596 } else {
1597 struct timespec ts, *pts;
1598
1599 if (arg3 >= 0) {
1600 /* Convert ms to secs, ns */
1601 ts.tv_sec = arg3 / 1000;
1602 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1603 pts = &ts;
1604 } else {
1605 /* -ve poll() timeout means "infinite" */
1606 pts = NULL;
1607 }
1608 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1609 }
1610
1611 if (!is_error(ret)) {
1612 for (i = 0; i < nfds; i++) {
1613 target_pfd[i].revents = tswap16(pfd[i].revents);
1614 }
1615 }
1616 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1617 return ret;
1618 }
1619 #endif
1620
1621 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1622 int flags, int is_pipe2)
1623 {
1624 int host_pipe[2];
1625 abi_long ret;
1626 ret = pipe2(host_pipe, flags);
1627
1628 if (is_error(ret))
1629 return get_errno(ret);
1630
1631 /* Several targets have special calling conventions for the original
1632 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1633 if (!is_pipe2) {
1634 #if defined(TARGET_ALPHA)
1635 cpu_env->ir[IR_A4] = host_pipe[1];
1636 return host_pipe[0];
1637 #elif defined(TARGET_MIPS)
1638 cpu_env->active_tc.gpr[3] = host_pipe[1];
1639 return host_pipe[0];
1640 #elif defined(TARGET_SH4)
1641 cpu_env->gregs[1] = host_pipe[1];
1642 return host_pipe[0];
1643 #elif defined(TARGET_SPARC)
1644 cpu_env->regwptr[1] = host_pipe[1];
1645 return host_pipe[0];
1646 #endif
1647 }
1648
1649 if (put_user_s32(host_pipe[0], pipedes)
1650 || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1651 return -TARGET_EFAULT;
1652 return get_errno(ret);
1653 }
1654
1655 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1656 abi_ulong target_addr,
1657 socklen_t len)
1658 {
1659 struct target_ip_mreqn *target_smreqn;
1660
1661 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1662 if (!target_smreqn)
1663 return -TARGET_EFAULT;
1664 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1665 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1666 if (len == sizeof(struct target_ip_mreqn))
1667 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1668 unlock_user(target_smreqn, target_addr, 0);
1669
1670 return 0;
1671 }
1672
1673 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1674 abi_ulong target_addr,
1675 socklen_t len)
1676 {
1677 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1678 sa_family_t sa_family;
1679 struct target_sockaddr *target_saddr;
1680
1681 if (fd_trans_target_to_host_addr(fd)) {
1682 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1683 }
1684
1685 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1686 if (!target_saddr)
1687 return -TARGET_EFAULT;
1688
1689 sa_family = tswap16(target_saddr->sa_family);
1690
1691 /* Oops. The caller might send a incomplete sun_path; sun_path
1692 * must be terminated by \0 (see the manual page), but
1693 * unfortunately it is quite common to specify sockaddr_un
1694 * length as "strlen(x->sun_path)" while it should be
1695 * "strlen(...) + 1". We'll fix that here if needed.
1696 * Linux kernel has a similar feature.
1697 */
1698
1699 if (sa_family == AF_UNIX) {
1700 if (len < unix_maxlen && len > 0) {
1701 char *cp = (char*)target_saddr;
1702
1703 if ( cp[len-1] && !cp[len] )
1704 len++;
1705 }
1706 if (len > unix_maxlen)
1707 len = unix_maxlen;
1708 }
1709
1710 memcpy(addr, target_saddr, len);
1711 addr->sa_family = sa_family;
1712 if (sa_family == AF_NETLINK) {
1713 struct sockaddr_nl *nladdr;
1714
1715 nladdr = (struct sockaddr_nl *)addr;
1716 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1717 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1718 } else if (sa_family == AF_PACKET) {
1719 struct target_sockaddr_ll *lladdr;
1720
1721 lladdr = (struct target_sockaddr_ll *)addr;
1722 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1723 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1724 } else if (sa_family == AF_INET6) {
1725 struct sockaddr_in6 *in6addr;
1726
1727 in6addr = (struct sockaddr_in6 *)addr;
1728 in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1729 }
1730 unlock_user(target_saddr, target_addr, 0);
1731
1732 return 0;
1733 }
1734
1735 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1736 struct sockaddr *addr,
1737 socklen_t len)
1738 {
1739 struct target_sockaddr *target_saddr;
1740
1741 if (len == 0) {
1742 return 0;
1743 }
1744 assert(addr);
1745
1746 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1747 if (!target_saddr)
1748 return -TARGET_EFAULT;
1749 memcpy(target_saddr, addr, len);
1750 if (len >= offsetof(struct target_sockaddr, sa_family) +
1751 sizeof(target_saddr->sa_family)) {
1752 target_saddr->sa_family = tswap16(addr->sa_family);
1753 }
1754 if (addr->sa_family == AF_NETLINK &&
1755 len >= sizeof(struct target_sockaddr_nl)) {
1756 struct target_sockaddr_nl *target_nl =
1757 (struct target_sockaddr_nl *)target_saddr;
1758 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1759 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1760 } else if (addr->sa_family == AF_PACKET) {
1761 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1762 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1763 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1764 } else if (addr->sa_family == AF_INET6 &&
1765 len >= sizeof(struct target_sockaddr_in6)) {
1766 struct target_sockaddr_in6 *target_in6 =
1767 (struct target_sockaddr_in6 *)target_saddr;
1768 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1769 }
1770 unlock_user(target_saddr, target_addr, len);
1771
1772 return 0;
1773 }
1774
1775 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1776 struct target_msghdr *target_msgh)
1777 {
1778 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1779 abi_long msg_controllen;
1780 abi_ulong target_cmsg_addr;
1781 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1782 socklen_t space = 0;
1783
1784 msg_controllen = tswapal(target_msgh->msg_controllen);
1785 if (msg_controllen < sizeof (struct target_cmsghdr))
1786 goto the_end;
1787 target_cmsg_addr = tswapal(target_msgh->msg_control);
1788 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1789 target_cmsg_start = target_cmsg;
1790 if (!target_cmsg)
1791 return -TARGET_EFAULT;
1792
1793 while (cmsg && target_cmsg) {
1794 void *data = CMSG_DATA(cmsg);
1795 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1796
1797 int len = tswapal(target_cmsg->cmsg_len)
1798 - sizeof(struct target_cmsghdr);
1799
1800 space += CMSG_SPACE(len);
1801 if (space > msgh->msg_controllen) {
1802 space -= CMSG_SPACE(len);
1803 /* This is a QEMU bug, since we allocated the payload
1804 * area ourselves (unlike overflow in host-to-target
1805 * conversion, which is just the guest giving us a buffer
1806 * that's too small). It can't happen for the payload types
1807 * we currently support; if it becomes an issue in future
1808 * we would need to improve our allocation strategy to
1809 * something more intelligent than "twice the size of the
1810 * target buffer we're reading from".
1811 */
1812 qemu_log_mask(LOG_UNIMP,
1813 ("Unsupported ancillary data %d/%d: "
1814 "unhandled msg size\n"),
1815 tswap32(target_cmsg->cmsg_level),
1816 tswap32(target_cmsg->cmsg_type));
1817 break;
1818 }
1819
1820 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1821 cmsg->cmsg_level = SOL_SOCKET;
1822 } else {
1823 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1824 }
1825 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1826 cmsg->cmsg_len = CMSG_LEN(len);
1827
1828 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1829 int *fd = (int *)data;
1830 int *target_fd = (int *)target_data;
1831 int i, numfds = len / sizeof(int);
1832
1833 for (i = 0; i < numfds; i++) {
1834 __get_user(fd[i], target_fd + i);
1835 }
1836 } else if (cmsg->cmsg_level == SOL_SOCKET
1837 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1838 struct ucred *cred = (struct ucred *)data;
1839 struct target_ucred *target_cred =
1840 (struct target_ucred *)target_data;
1841
1842 __get_user(cred->pid, &target_cred->pid);
1843 __get_user(cred->uid, &target_cred->uid);
1844 __get_user(cred->gid, &target_cred->gid);
1845 } else if (cmsg->cmsg_level == SOL_ALG) {
1846 uint32_t *dst = (uint32_t *)data;
1847
1848 memcpy(dst, target_data, len);
1849 /* fix endianess of first 32-bit word */
1850 if (len >= sizeof(uint32_t)) {
1851 *dst = tswap32(*dst);
1852 }
1853 } else {
1854 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1855 cmsg->cmsg_level, cmsg->cmsg_type);
1856 memcpy(data, target_data, len);
1857 }
1858
1859 cmsg = CMSG_NXTHDR(msgh, cmsg);
1860 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1861 target_cmsg_start);
1862 }
1863 unlock_user(target_cmsg, target_cmsg_addr, 0);
1864 the_end:
1865 msgh->msg_controllen = space;
1866 return 0;
1867 }
1868
1869 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1870 struct msghdr *msgh)
1871 {
1872 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1873 abi_long msg_controllen;
1874 abi_ulong target_cmsg_addr;
1875 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1876 socklen_t space = 0;
1877
1878 msg_controllen = tswapal(target_msgh->msg_controllen);
1879 if (msg_controllen < sizeof (struct target_cmsghdr))
1880 goto the_end;
1881 target_cmsg_addr = tswapal(target_msgh->msg_control);
1882 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1883 target_cmsg_start = target_cmsg;
1884 if (!target_cmsg)
1885 return -TARGET_EFAULT;
1886
1887 while (cmsg && target_cmsg) {
1888 void *data = CMSG_DATA(cmsg);
1889 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1890
1891 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1892 int tgt_len, tgt_space;
1893
1894 /* We never copy a half-header but may copy half-data;
1895 * this is Linux's behaviour in put_cmsg(). Note that
1896 * truncation here is a guest problem (which we report
1897 * to the guest via the CTRUNC bit), unlike truncation
1898 * in target_to_host_cmsg, which is a QEMU bug.
1899 */
1900 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1901 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1902 break;
1903 }
1904
1905 if (cmsg->cmsg_level == SOL_SOCKET) {
1906 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1907 } else {
1908 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1909 }
1910 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1911
1912 /* Payload types which need a different size of payload on
1913 * the target must adjust tgt_len here.
1914 */
1915 tgt_len = len;
1916 switch (cmsg->cmsg_level) {
1917 case SOL_SOCKET:
1918 switch (cmsg->cmsg_type) {
1919 case SO_TIMESTAMP:
1920 tgt_len = sizeof(struct target_timeval);
1921 break;
1922 default:
1923 break;
1924 }
1925 break;
1926 default:
1927 break;
1928 }
1929
1930 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1931 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1932 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1933 }
1934
1935 /* We must now copy-and-convert len bytes of payload
1936 * into tgt_len bytes of destination space. Bear in mind
1937 * that in both source and destination we may be dealing
1938 * with a truncated value!
1939 */
1940 switch (cmsg->cmsg_level) {
1941 case SOL_SOCKET:
1942 switch (cmsg->cmsg_type) {
1943 case SCM_RIGHTS:
1944 {
1945 int *fd = (int *)data;
1946 int *target_fd = (int *)target_data;
1947 int i, numfds = tgt_len / sizeof(int);
1948
1949 for (i = 0; i < numfds; i++) {
1950 __put_user(fd[i], target_fd + i);
1951 }
1952 break;
1953 }
1954 case SO_TIMESTAMP:
1955 {
1956 struct timeval *tv = (struct timeval *)data;
1957 struct target_timeval *target_tv =
1958 (struct target_timeval *)target_data;
1959
1960 if (len != sizeof(struct timeval) ||
1961 tgt_len != sizeof(struct target_timeval)) {
1962 goto unimplemented;
1963 }
1964
1965 /* copy struct timeval to target */
1966 __put_user(tv->tv_sec, &target_tv->tv_sec);
1967 __put_user(tv->tv_usec, &target_tv->tv_usec);
1968 break;
1969 }
1970 case SCM_CREDENTIALS:
1971 {
1972 struct ucred *cred = (struct ucred *)data;
1973 struct target_ucred *target_cred =
1974 (struct target_ucred *)target_data;
1975
1976 __put_user(cred->pid, &target_cred->pid);
1977 __put_user(cred->uid, &target_cred->uid);
1978 __put_user(cred->gid, &target_cred->gid);
1979 break;
1980 }
1981 default:
1982 goto unimplemented;
1983 }
1984 break;
1985
1986 case SOL_IP:
1987 switch (cmsg->cmsg_type) {
1988 case IP_TTL:
1989 {
1990 uint32_t *v = (uint32_t *)data;
1991 uint32_t *t_int = (uint32_t *)target_data;
1992
1993 if (len != sizeof(uint32_t) ||
1994 tgt_len != sizeof(uint32_t)) {
1995 goto unimplemented;
1996 }
1997 __put_user(*v, t_int);
1998 break;
1999 }
2000 case IP_RECVERR:
2001 {
2002 struct errhdr_t {
2003 struct sock_extended_err ee;
2004 struct sockaddr_in offender;
2005 };
2006 struct errhdr_t *errh = (struct errhdr_t *)data;
2007 struct errhdr_t *target_errh =
2008 (struct errhdr_t *)target_data;
2009
2010 if (len != sizeof(struct errhdr_t) ||
2011 tgt_len != sizeof(struct errhdr_t)) {
2012 goto unimplemented;
2013 }
2014 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2015 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2016 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2017 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2018 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2019 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2020 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2021 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2022 (void *) &errh->offender, sizeof(errh->offender));
2023 break;
2024 }
2025 default:
2026 goto unimplemented;
2027 }
2028 break;
2029
2030 case SOL_IPV6:
2031 switch (cmsg->cmsg_type) {
2032 case IPV6_HOPLIMIT:
2033 {
2034 uint32_t *v = (uint32_t *)data;
2035 uint32_t *t_int = (uint32_t *)target_data;
2036
2037 if (len != sizeof(uint32_t) ||
2038 tgt_len != sizeof(uint32_t)) {
2039 goto unimplemented;
2040 }
2041 __put_user(*v, t_int);
2042 break;
2043 }
2044 case IPV6_RECVERR:
2045 {
2046 struct errhdr6_t {
2047 struct sock_extended_err ee;
2048 struct sockaddr_in6 offender;
2049 };
2050 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2051 struct errhdr6_t *target_errh =
2052 (struct errhdr6_t *)target_data;
2053
2054 if (len != sizeof(struct errhdr6_t) ||
2055 tgt_len != sizeof(struct errhdr6_t)) {
2056 goto unimplemented;
2057 }
2058 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2059 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2060 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2061 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2062 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2063 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2064 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2065 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2066 (void *) &errh->offender, sizeof(errh->offender));
2067 break;
2068 }
2069 default:
2070 goto unimplemented;
2071 }
2072 break;
2073
2074 default:
2075 unimplemented:
2076 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2077 cmsg->cmsg_level, cmsg->cmsg_type);
2078 memcpy(target_data, data, MIN(len, tgt_len));
2079 if (tgt_len > len) {
2080 memset(target_data + len, 0, tgt_len - len);
2081 }
2082 }
2083
2084 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2085 tgt_space = TARGET_CMSG_SPACE(tgt_len);
2086 if (msg_controllen < tgt_space) {
2087 tgt_space = msg_controllen;
2088 }
2089 msg_controllen -= tgt_space;
2090 space += tgt_space;
2091 cmsg = CMSG_NXTHDR(msgh, cmsg);
2092 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2093 target_cmsg_start);
2094 }
2095 unlock_user(target_cmsg, target_cmsg_addr, space);
2096 the_end:
2097 target_msgh->msg_controllen = tswapal(space);
2098 return 0;
2099 }
2100
2101 /* do_setsockopt() Must return target values and target errnos. */
2102 static abi_long do_setsockopt(int sockfd, int level, int optname,
2103 abi_ulong optval_addr, socklen_t optlen)
2104 {
2105 abi_long ret;
2106 int val;
2107 struct ip_mreqn *ip_mreq;
2108 struct ip_mreq_source *ip_mreq_source;
2109
2110 switch(level) {
2111 case SOL_TCP:
2112 case SOL_UDP:
2113 /* TCP and UDP options all take an 'int' value. */
2114 if (optlen < sizeof(uint32_t))
2115 return -TARGET_EINVAL;
2116
2117 if (get_user_u32(val, optval_addr))
2118 return -TARGET_EFAULT;
2119 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2120 break;
2121 case SOL_IP:
2122 switch(optname) {
2123 case IP_TOS:
2124 case IP_TTL:
2125 case IP_HDRINCL:
2126 case IP_ROUTER_ALERT:
2127 case IP_RECVOPTS:
2128 case IP_RETOPTS:
2129 case IP_PKTINFO:
2130 case IP_MTU_DISCOVER:
2131 case IP_RECVERR:
2132 case IP_RECVTTL:
2133 case IP_RECVTOS:
2134 #ifdef IP_FREEBIND
2135 case IP_FREEBIND:
2136 #endif
2137 case IP_MULTICAST_TTL:
2138 case IP_MULTICAST_LOOP:
2139 val = 0;
2140 if (optlen >= sizeof(uint32_t)) {
2141 if (get_user_u32(val, optval_addr))
2142 return -TARGET_EFAULT;
2143 } else if (optlen >= 1) {
2144 if (get_user_u8(val, optval_addr))
2145 return -TARGET_EFAULT;
2146 }
2147 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2148 break;
2149 case IP_ADD_MEMBERSHIP:
2150 case IP_DROP_MEMBERSHIP:
2151 if (optlen < sizeof (struct target_ip_mreq) ||
2152 optlen > sizeof (struct target_ip_mreqn))
2153 return -TARGET_EINVAL;
2154
2155 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2156 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2157 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2158 break;
2159
2160 case IP_BLOCK_SOURCE:
2161 case IP_UNBLOCK_SOURCE:
2162 case IP_ADD_SOURCE_MEMBERSHIP:
2163 case IP_DROP_SOURCE_MEMBERSHIP:
2164 if (optlen != sizeof (struct target_ip_mreq_source))
2165 return -TARGET_EINVAL;
2166
2167 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2168 if (!ip_mreq_source) {
2169 return -TARGET_EFAULT;
2170 }
2171 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2172 unlock_user (ip_mreq_source, optval_addr, 0);
2173 break;
2174
2175 default:
2176 goto unimplemented;
2177 }
2178 break;
2179 case SOL_IPV6:
2180 switch (optname) {
2181 case IPV6_MTU_DISCOVER:
2182 case IPV6_MTU:
2183 case IPV6_V6ONLY:
2184 case IPV6_RECVPKTINFO:
2185 case IPV6_UNICAST_HOPS:
2186 case IPV6_MULTICAST_HOPS:
2187 case IPV6_MULTICAST_LOOP:
2188 case IPV6_RECVERR:
2189 case IPV6_RECVHOPLIMIT:
2190 case IPV6_2292HOPLIMIT:
2191 case IPV6_CHECKSUM:
2192 case IPV6_ADDRFORM:
2193 case IPV6_2292PKTINFO:
2194 case IPV6_RECVTCLASS:
2195 case IPV6_RECVRTHDR:
2196 case IPV6_2292RTHDR:
2197 case IPV6_RECVHOPOPTS:
2198 case IPV6_2292HOPOPTS:
2199 case IPV6_RECVDSTOPTS:
2200 case IPV6_2292DSTOPTS:
2201 case IPV6_TCLASS:
2202 case IPV6_ADDR_PREFERENCES:
2203 #ifdef IPV6_RECVPATHMTU
2204 case IPV6_RECVPATHMTU:
2205 #endif
2206 #ifdef IPV6_TRANSPARENT
2207 case IPV6_TRANSPARENT:
2208 #endif
2209 #ifdef IPV6_FREEBIND
2210 case IPV6_FREEBIND:
2211 #endif
2212 #ifdef IPV6_RECVORIGDSTADDR
2213 case IPV6_RECVORIGDSTADDR:
2214 #endif
2215 val = 0;
2216 if (optlen < sizeof(uint32_t)) {
2217 return -TARGET_EINVAL;
2218 }
2219 if (get_user_u32(val, optval_addr)) {
2220 return -TARGET_EFAULT;
2221 }
2222 ret = get_errno(setsockopt(sockfd, level, optname,
2223 &val, sizeof(val)));
2224 break;
2225 case IPV6_PKTINFO:
2226 {
2227 struct in6_pktinfo pki;
2228
2229 if (optlen < sizeof(pki)) {
2230 return -TARGET_EINVAL;
2231 }
2232
2233 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2234 return -TARGET_EFAULT;
2235 }
2236
2237 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2238
2239 ret = get_errno(setsockopt(sockfd, level, optname,
2240 &pki, sizeof(pki)));
2241 break;
2242 }
2243 case IPV6_ADD_MEMBERSHIP:
2244 case IPV6_DROP_MEMBERSHIP:
2245 {
2246 struct ipv6_mreq ipv6mreq;
2247
2248 if (optlen < sizeof(ipv6mreq)) {
2249 return -TARGET_EINVAL;
2250 }
2251
2252 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2253 return -TARGET_EFAULT;
2254 }
2255
2256 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2257
2258 ret = get_errno(setsockopt(sockfd, level, optname,
2259 &ipv6mreq, sizeof(ipv6mreq)));
2260 break;
2261 }
2262 default:
2263 goto unimplemented;
2264 }
2265 break;
2266 case SOL_ICMPV6:
2267 switch (optname) {
2268 case ICMPV6_FILTER:
2269 {
2270 struct icmp6_filter icmp6f;
2271
2272 if (optlen > sizeof(icmp6f)) {
2273 optlen = sizeof(icmp6f);
2274 }
2275
2276 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2277 return -TARGET_EFAULT;
2278 }
2279
2280 for (val = 0; val < 8; val++) {
2281 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2282 }
2283
2284 ret = get_errno(setsockopt(sockfd, level, optname,
2285 &icmp6f, optlen));
2286 break;
2287 }
2288 default:
2289 goto unimplemented;
2290 }
2291 break;
2292 case SOL_RAW:
2293 switch (optname) {
2294 case ICMP_FILTER:
2295 case IPV6_CHECKSUM:
2296 /* those take an u32 value */
2297 if (optlen < sizeof(uint32_t)) {
2298 return -TARGET_EINVAL;
2299 }
2300
2301 if (get_user_u32(val, optval_addr)) {
2302 return -TARGET_EFAULT;
2303 }
2304 ret = get_errno(setsockopt(sockfd, level, optname,
2305 &val, sizeof(val)));
2306 break;
2307
2308 default:
2309 goto unimplemented;
2310 }
2311 break;
2312 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2313 case SOL_ALG:
2314 switch (optname) {
2315 case ALG_SET_KEY:
2316 {
2317 char *alg_key = g_malloc(optlen);
2318
2319 if (!alg_key) {
2320 return -TARGET_ENOMEM;
2321 }
2322 if (copy_from_user(alg_key, optval_addr, optlen)) {
2323 g_free(alg_key);
2324 return -TARGET_EFAULT;
2325 }
2326 ret = get_errno(setsockopt(sockfd, level, optname,
2327 alg_key, optlen));
2328 g_free(alg_key);
2329 break;
2330 }
2331 case ALG_SET_AEAD_AUTHSIZE:
2332 {
2333 ret = get_errno(setsockopt(sockfd, level, optname,
2334 NULL, optlen));
2335 break;
2336 }
2337 default:
2338 goto unimplemented;
2339 }
2340 break;
2341 #endif
2342 case TARGET_SOL_SOCKET:
2343 switch (optname) {
2344 case TARGET_SO_RCVTIMEO:
2345 {
2346 struct timeval tv;
2347
2348 optname = SO_RCVTIMEO;
2349
2350 set_timeout:
2351 if (optlen != sizeof(struct target_timeval)) {
2352 return -TARGET_EINVAL;
2353 }
2354
2355 if (copy_from_user_timeval(&tv, optval_addr)) {
2356 return -TARGET_EFAULT;
2357 }
2358
2359 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2360 &tv, sizeof(tv)));
2361 return ret;
2362 }
2363 case TARGET_SO_SNDTIMEO:
2364 optname = SO_SNDTIMEO;
2365 goto set_timeout;
2366 case TARGET_SO_ATTACH_FILTER:
2367 {
2368 struct target_sock_fprog *tfprog;
2369 struct target_sock_filter *tfilter;
2370 struct sock_fprog fprog;
2371 struct sock_filter *filter;
2372 int i;
2373
2374 if (optlen != sizeof(*tfprog)) {
2375 return -TARGET_EINVAL;
2376 }
2377 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2378 return -TARGET_EFAULT;
2379 }
2380 if (!lock_user_struct(VERIFY_READ, tfilter,
2381 tswapal(tfprog->filter), 0)) {
2382 unlock_user_struct(tfprog, optval_addr, 1);
2383 return -TARGET_EFAULT;
2384 }
2385
2386 fprog.len = tswap16(tfprog->len);
2387 filter = g_try_new(struct sock_filter, fprog.len);
2388 if (filter == NULL) {
2389 unlock_user_struct(tfilter, tfprog->filter, 1);
2390 unlock_user_struct(tfprog, optval_addr, 1);
2391 return -TARGET_ENOMEM;
2392 }
2393 for (i = 0; i < fprog.len; i++) {
2394 filter[i].code = tswap16(tfilter[i].code);
2395 filter[i].jt = tfilter[i].jt;
2396 filter[i].jf = tfilter[i].jf;
2397 filter[i].k = tswap32(tfilter[i].k);
2398 }
2399 fprog.filter = filter;
2400
2401 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2402 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2403 g_free(filter);
2404
2405 unlock_user_struct(tfilter, tfprog->filter, 1);
2406 unlock_user_struct(tfprog, optval_addr, 1);
2407 return ret;
2408 }
2409 case TARGET_SO_BINDTODEVICE:
2410 {
2411 char *dev_ifname, *addr_ifname;
2412
2413 if (optlen > IFNAMSIZ - 1) {
2414 optlen = IFNAMSIZ - 1;
2415 }
2416 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2417 if (!dev_ifname) {
2418 return -TARGET_EFAULT;
2419 }
2420 optname = SO_BINDTODEVICE;
2421 addr_ifname = alloca(IFNAMSIZ);
2422 memcpy(addr_ifname, dev_ifname, optlen);
2423 addr_ifname[optlen] = 0;
2424 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2425 addr_ifname, optlen));
2426 unlock_user (dev_ifname, optval_addr, 0);
2427 return ret;
2428 }
2429 case TARGET_SO_LINGER:
2430 {
2431 struct linger lg;
2432 struct target_linger *tlg;
2433
2434 if (optlen != sizeof(struct target_linger)) {
2435 return -TARGET_EINVAL;
2436 }
2437 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2438 return -TARGET_EFAULT;
2439 }
2440 __get_user(lg.l_onoff, &tlg->l_onoff);
2441 __get_user(lg.l_linger, &tlg->l_linger);
2442 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2443 &lg, sizeof(lg)));
2444 unlock_user_struct(tlg, optval_addr, 0);
2445 return ret;
2446 }
2447 /* Options with 'int' argument. */
2448 case TARGET_SO_DEBUG:
2449 optname = SO_DEBUG;
2450 break;
2451 case TARGET_SO_REUSEADDR:
2452 optname = SO_REUSEADDR;
2453 break;
2454 #ifdef SO_REUSEPORT
2455 case TARGET_SO_REUSEPORT:
2456 optname = SO_REUSEPORT;
2457 break;
2458 #endif
2459 case TARGET_SO_TYPE:
2460 optname = SO_TYPE;
2461 break;
2462 case TARGET_SO_ERROR:
2463 optname = SO_ERROR;
2464 break;
2465 case TARGET_SO_DONTROUTE:
2466 optname = SO_DONTROUTE;
2467 break;
2468 case TARGET_SO_BROADCAST:
2469 optname = SO_BROADCAST;
2470 break;
2471 case TARGET_SO_SNDBUF:
2472 optname = SO_SNDBUF;
2473 break;
2474 case TARGET_SO_SNDBUFFORCE:
2475 optname = SO_SNDBUFFORCE;
2476 break;
2477 case TARGET_SO_RCVBUF:
2478 optname = SO_RCVBUF;
2479 break;
2480 case TARGET_SO_RCVBUFFORCE:
2481 optname = SO_RCVBUFFORCE;
2482 break;
2483 case TARGET_SO_KEEPALIVE:
2484 optname = SO_KEEPALIVE;
2485 break;
2486 case TARGET_SO_OOBINLINE:
2487 optname = SO_OOBINLINE;
2488 break;
2489 case TARGET_SO_NO_CHECK:
2490 optname = SO_NO_CHECK;
2491 break;
2492 case TARGET_SO_PRIORITY:
2493 optname = SO_PRIORITY;
2494 break;
2495 #ifdef SO_BSDCOMPAT
2496 case TARGET_SO_BSDCOMPAT:
2497 optname = SO_BSDCOMPAT;
2498 break;
2499 #endif
2500 case TARGET_SO_PASSCRED:
2501 optname = SO_PASSCRED;
2502 break;
2503 case TARGET_SO_PASSSEC:
2504 optname = SO_PASSSEC;
2505 break;
2506 case TARGET_SO_TIMESTAMP:
2507 optname = SO_TIMESTAMP;
2508 break;
2509 case TARGET_SO_RCVLOWAT:
2510 optname = SO_RCVLOWAT;
2511 break;
2512 default:
2513 goto unimplemented;
2514 }
2515 if (optlen < sizeof(uint32_t))
2516 return -TARGET_EINVAL;
2517
2518 if (get_user_u32(val, optval_addr))
2519 return -TARGET_EFAULT;
2520 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2521 break;
2522 #ifdef SOL_NETLINK
2523 case SOL_NETLINK:
2524 switch (optname) {
2525 case NETLINK_PKTINFO:
2526 case NETLINK_ADD_MEMBERSHIP:
2527 case NETLINK_DROP_MEMBERSHIP:
2528 case NETLINK_BROADCAST_ERROR:
2529 case NETLINK_NO_ENOBUFS:
2530 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2531 case NETLINK_LISTEN_ALL_NSID:
2532 case NETLINK_CAP_ACK:
2533 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2534 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2535 case NETLINK_EXT_ACK:
2536 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2537 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2538 case NETLINK_GET_STRICT_CHK:
2539 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2540 break;
2541 default:
2542 goto unimplemented;
2543 }
2544 val = 0;
2545 if (optlen < sizeof(uint32_t)) {
2546 return -TARGET_EINVAL;
2547 }
2548 if (get_user_u32(val, optval_addr)) {
2549 return -TARGET_EFAULT;
2550 }
2551 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2552 sizeof(val)));
2553 break;
2554 #endif /* SOL_NETLINK */
2555 default:
2556 unimplemented:
2557 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2558 level, optname);
2559 ret = -TARGET_ENOPROTOOPT;
2560 }
2561 return ret;
2562 }
2563
2564 /* do_getsockopt() Must return target values and target errnos. */
2565 static abi_long do_getsockopt(int sockfd, int level, int optname,
2566 abi_ulong optval_addr, abi_ulong optlen)
2567 {
2568 abi_long ret;
2569 int len, val;
2570 socklen_t lv;
2571
2572 switch(level) {
2573 case TARGET_SOL_SOCKET:
2574 level = SOL_SOCKET;
2575 switch (optname) {
2576 /* These don't just return a single integer */
2577 case TARGET_SO_PEERNAME:
2578 goto unimplemented;
2579 case TARGET_SO_RCVTIMEO: {
2580 struct timeval tv;
2581 socklen_t tvlen;
2582
2583 optname = SO_RCVTIMEO;
2584
2585 get_timeout:
2586 if (get_user_u32(len, optlen)) {
2587 return -TARGET_EFAULT;
2588 }
2589 if (len < 0) {
2590 return -TARGET_EINVAL;
2591 }
2592
2593 tvlen = sizeof(tv);
2594 ret = get_errno(getsockopt(sockfd, level, optname,
2595 &tv, &tvlen));
2596 if (ret < 0) {
2597 return ret;
2598 }
2599 if (len > sizeof(struct target_timeval)) {
2600 len = sizeof(struct target_timeval);
2601 }
2602 if (copy_to_user_timeval(optval_addr, &tv)) {
2603 return -TARGET_EFAULT;
2604 }
2605 if (put_user_u32(len, optlen)) {
2606 return -TARGET_EFAULT;
2607 }
2608 break;
2609 }
2610 case TARGET_SO_SNDTIMEO:
2611 optname = SO_SNDTIMEO;
2612 goto get_timeout;
2613 case TARGET_SO_PEERCRED: {
2614 struct ucred cr;
2615 socklen_t crlen;
2616 struct target_ucred *tcr;
2617
2618 if (get_user_u32(len, optlen)) {
2619 return -TARGET_EFAULT;
2620 }
2621 if (len < 0) {
2622 return -TARGET_EINVAL;
2623 }
2624
2625 crlen = sizeof(cr);
2626 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2627 &cr, &crlen));
2628 if (ret < 0) {
2629 return ret;
2630 }
2631 if (len > crlen) {
2632 len = crlen;
2633 }
2634 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2635 return -TARGET_EFAULT;
2636 }
2637 __put_user(cr.pid, &tcr->pid);
2638 __put_user(cr.uid, &tcr->uid);
2639 __put_user(cr.gid, &tcr->gid);
2640 unlock_user_struct(tcr, optval_addr, 1);
2641 if (put_user_u32(len, optlen)) {
2642 return -TARGET_EFAULT;
2643 }
2644 break;
2645 }
2646 case TARGET_SO_PEERSEC: {
2647 char *name;
2648
2649 if (get_user_u32(len, optlen)) {
2650 return -TARGET_EFAULT;
2651 }
2652 if (len < 0) {
2653 return -TARGET_EINVAL;
2654 }
2655 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2656 if (!name) {
2657 return -TARGET_EFAULT;
2658 }
2659 lv = len;
2660 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2661 name, &lv));
2662 if (put_user_u32(lv, optlen)) {
2663 ret = -TARGET_EFAULT;
2664 }
2665 unlock_user(name, optval_addr, lv);
2666 break;
2667 }
2668 case TARGET_SO_LINGER:
2669 {
2670 struct linger lg;
2671 socklen_t lglen;
2672 struct target_linger *tlg;
2673
2674 if (get_user_u32(len, optlen)) {
2675 return -TARGET_EFAULT;
2676 }
2677 if (len < 0) {
2678 return -TARGET_EINVAL;
2679 }
2680
2681 lglen = sizeof(lg);
2682 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2683 &lg, &lglen));
2684 if (ret < 0) {
2685 return ret;
2686 }
2687 if (len > lglen) {
2688 len = lglen;
2689 }
2690 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2691 return -TARGET_EFAULT;
2692 }
2693 __put_user(lg.l_onoff, &tlg->l_onoff);
2694 __put_user(lg.l_linger, &tlg->l_linger);
2695 unlock_user_struct(tlg, optval_addr, 1);
2696 if (put_user_u32(len, optlen)) {
2697 return -TARGET_EFAULT;
2698 }
2699 break;
2700 }
2701 /* Options with 'int' argument. */
2702 case TARGET_SO_DEBUG:
2703 optname = SO_DEBUG;
2704 goto int_case;
2705 case TARGET_SO_REUSEADDR:
2706 optname = SO_REUSEADDR;
2707 goto int_case;
2708 #ifdef SO_REUSEPORT
2709 case TARGET_SO_REUSEPORT:
2710 optname = SO_REUSEPORT;
2711 goto int_case;
2712 #endif
2713 case TARGET_SO_TYPE:
2714 optname = SO_TYPE;
2715 goto int_case;
2716 case TARGET_SO_ERROR:
2717 optname = SO_ERROR;
2718 goto int_case;
2719 case TARGET_SO_DONTROUTE:
2720 optname = SO_DONTROUTE;
2721 goto int_case;
2722 case TARGET_SO_BROADCAST:
2723 optname = SO_BROADCAST;
2724 goto int_case;
2725 case TARGET_SO_SNDBUF:
2726 optname = SO_SNDBUF;
2727 goto int_case;
2728 case TARGET_SO_RCVBUF:
2729 optname = SO_RCVBUF;
2730 goto int_case;
2731 case TARGET_SO_KEEPALIVE:
2732 optname = SO_KEEPALIVE;
2733 goto int_case;
2734 case TARGET_SO_OOBINLINE:
2735 optname = SO_OOBINLINE;
2736 goto int_case;
2737 case TARGET_SO_NO_CHECK:
2738 optname = SO_NO_CHECK;
2739 goto int_case;
2740 case TARGET_SO_PRIORITY:
2741 optname = SO_PRIORITY;
2742 goto int_case;
2743 #ifdef SO_BSDCOMPAT
2744 case TARGET_SO_BSDCOMPAT:
2745 optname = SO_BSDCOMPAT;
2746 goto int_case;
2747 #endif
2748 case TARGET_SO_PASSCRED:
2749 optname = SO_PASSCRED;
2750 goto int_case;
2751 case TARGET_SO_TIMESTAMP:
2752 optname = SO_TIMESTAMP;
2753 goto int_case;
2754 case TARGET_SO_RCVLOWAT:
2755 optname = SO_RCVLOWAT;
2756 goto int_case;
2757 case TARGET_SO_ACCEPTCONN:
2758 optname = SO_ACCEPTCONN;
2759 goto int_case;
2760 case TARGET_SO_PROTOCOL:
2761 optname = SO_PROTOCOL;
2762 goto int_case;
2763 case TARGET_SO_DOMAIN:
2764 optname = SO_DOMAIN;
2765 goto int_case;
2766 default:
2767 goto int_case;
2768 }
2769 break;
2770 case SOL_TCP:
2771 case SOL_UDP:
2772 /* TCP and UDP options all take an 'int' value. */
2773 int_case:
2774 if (get_user_u32(len, optlen))
2775 return -TARGET_EFAULT;
2776 if (len < 0)
2777 return -TARGET_EINVAL;
2778 lv = sizeof(lv);
2779 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2780 if (ret < 0)
2781 return ret;
2782 switch (optname) {
2783 case SO_TYPE:
2784 val = host_to_target_sock_type(val);
2785 break;
2786 case SO_ERROR:
2787 val = host_to_target_errno(val);
2788 break;
2789 }
2790 if (len > lv)
2791 len = lv;
2792 if (len == 4) {
2793 if (put_user_u32(val, optval_addr))
2794 return -TARGET_EFAULT;
2795 } else {
2796 if (put_user_u8(val, optval_addr))
2797 return -TARGET_EFAULT;
2798 }
2799 if (put_user_u32(len, optlen))
2800 return -TARGET_EFAULT;
2801 break;
2802 case SOL_IP:
2803 switch(optname) {
2804 case IP_TOS:
2805 case IP_TTL:
2806 case IP_HDRINCL:
2807 case IP_ROUTER_ALERT:
2808 case IP_RECVOPTS:
2809 case IP_RETOPTS:
2810 case IP_PKTINFO:
2811 case IP_MTU_DISCOVER:
2812 case IP_RECVERR:
2813 case IP_RECVTOS:
2814 #ifdef IP_FREEBIND
2815 case IP_FREEBIND:
2816 #endif
2817 case IP_MULTICAST_TTL:
2818 case IP_MULTICAST_LOOP:
2819 if (get_user_u32(len, optlen))
2820 return -TARGET_EFAULT;
2821 if (len < 0)
2822 return -TARGET_EINVAL;
2823 lv = sizeof(lv);
2824 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2825 if (ret < 0)
2826 return ret;
2827 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2828 len = 1;
2829 if (put_user_u32(len, optlen)
2830 || put_user_u8(val, optval_addr))
2831 return -TARGET_EFAULT;
2832 } else {
2833 if (len > sizeof(int))
2834 len = sizeof(int);
2835 if (put_user_u32(len, optlen)
2836 || put_user_u32(val, optval_addr))
2837 return -TARGET_EFAULT;
2838 }
2839 break;
2840 default:
2841 ret = -TARGET_ENOPROTOOPT;
2842 break;
2843 }
2844 break;
2845 case SOL_IPV6:
2846 switch (optname) {
2847 case IPV6_MTU_DISCOVER:
2848 case IPV6_MTU:
2849 case IPV6_V6ONLY:
2850 case IPV6_RECVPKTINFO:
2851 case IPV6_UNICAST_HOPS:
2852 case IPV6_MULTICAST_HOPS:
2853 case IPV6_MULTICAST_LOOP:
2854 case IPV6_RECVERR:
2855 case IPV6_RECVHOPLIMIT:
2856 case IPV6_2292HOPLIMIT:
2857 case IPV6_CHECKSUM:
2858 case IPV6_ADDRFORM:
2859 case IPV6_2292PKTINFO:
2860 case IPV6_RECVTCLASS:
2861 case IPV6_RECVRTHDR:
2862 case IPV6_2292RTHDR:
2863 case IPV6_RECVHOPOPTS:
2864 case IPV6_2292HOPOPTS:
2865 case IPV6_RECVDSTOPTS:
2866 case IPV6_2292DSTOPTS:
2867 case IPV6_TCLASS:
2868 case IPV6_ADDR_PREFERENCES:
2869 #ifdef IPV6_RECVPATHMTU
2870 case IPV6_RECVPATHMTU:
2871 #endif
2872 #ifdef IPV6_TRANSPARENT
2873 case IPV6_TRANSPARENT:
2874 #endif
2875 #ifdef IPV6_FREEBIND
2876 case IPV6_FREEBIND:
2877 #endif
2878 #ifdef IPV6_RECVORIGDSTADDR
2879 case IPV6_RECVORIGDSTADDR:
2880 #endif
2881 if (get_user_u32(len, optlen))
2882 return -TARGET_EFAULT;
2883 if (len < 0)
2884 return -TARGET_EINVAL;
2885 lv = sizeof(lv);
2886 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2887 if (ret < 0)
2888 return ret;
2889 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2890 len = 1;
2891 if (put_user_u32(len, optlen)
2892 || put_user_u8(val, optval_addr))
2893 return -TARGET_EFAULT;
2894 } else {
2895 if (len > sizeof(int))
2896 len = sizeof(int);
2897 if (put_user_u32(len, optlen)
2898 || put_user_u32(val, optval_addr))
2899 return -TARGET_EFAULT;
2900 }
2901 break;
2902 default:
2903 ret = -TARGET_ENOPROTOOPT;
2904 break;
2905 }
2906 break;
2907 #ifdef SOL_NETLINK
2908 case SOL_NETLINK:
2909 switch (optname) {
2910 case NETLINK_PKTINFO:
2911 case NETLINK_BROADCAST_ERROR:
2912 case NETLINK_NO_ENOBUFS:
2913 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2914 case NETLINK_LISTEN_ALL_NSID:
2915 case NETLINK_CAP_ACK:
2916 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2917 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2918 case NETLINK_EXT_ACK:
2919 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2920 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2921 case NETLINK_GET_STRICT_CHK:
2922 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2923 if (get_user_u32(len, optlen)) {
2924 return -TARGET_EFAULT;
2925 }
2926 if (len != sizeof(val)) {
2927 return -TARGET_EINVAL;
2928 }
2929 lv = len;
2930 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2931 if (ret < 0) {
2932 return ret;
2933 }
2934 if (put_user_u32(lv, optlen)
2935 || put_user_u32(val, optval_addr)) {
2936 return -TARGET_EFAULT;
2937 }
2938 break;
2939 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2940 case NETLINK_LIST_MEMBERSHIPS:
2941 {
2942 uint32_t *results;
2943 int i;
2944 if (get_user_u32(len, optlen)) {
2945 return -TARGET_EFAULT;
2946 }
2947 if (len < 0) {
2948 return -TARGET_EINVAL;
2949 }
2950 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2951 if (!results && len > 0) {
2952 return -TARGET_EFAULT;
2953 }
2954 lv = len;
2955 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2956 if (ret < 0) {
2957 unlock_user(results, optval_addr, 0);
2958 return ret;
2959 }
2960 /* swap host endianess to target endianess. */
2961 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2962 results[i] = tswap32(results[i]);
2963 }
2964 if (put_user_u32(lv, optlen)) {
2965 return -TARGET_EFAULT;
2966 }
2967 unlock_user(results, optval_addr, 0);
2968 break;
2969 }
2970 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2971 default:
2972 goto unimplemented;
2973 }
2974 break;
2975 #endif /* SOL_NETLINK */
2976 default:
2977 unimplemented:
2978 qemu_log_mask(LOG_UNIMP,
2979 "getsockopt level=%d optname=%d not yet supported\n",
2980 level, optname);
2981 ret = -TARGET_EOPNOTSUPP;
2982 break;
2983 }
2984 return ret;
2985 }
2986
2987 /* Convert target low/high pair representing file offset into the host
2988 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2989 * as the kernel doesn't handle them either.
2990 */
2991 static void target_to_host_low_high(abi_ulong tlow,
2992 abi_ulong thigh,
2993 unsigned long *hlow,
2994 unsigned long *hhigh)
2995 {
2996 uint64_t off = tlow |
2997 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2998 TARGET_LONG_BITS / 2;
2999
3000 *hlow = off;
3001 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3002 }
3003
3004 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3005 abi_ulong count, int copy)
3006 {
3007 struct target_iovec *target_vec;
3008 struct iovec *vec;
3009 abi_ulong total_len, max_len;
3010 int i;
3011 int err = 0;
3012 bool bad_address = false;
3013
3014 if (count == 0) {
3015 errno = 0;
3016 return NULL;
3017 }
3018 if (count > IOV_MAX) {
3019 errno = EINVAL;
3020 return NULL;
3021 }
3022
3023 vec = g_try_new0(struct iovec, count);
3024 if (vec == NULL) {
3025 errno = ENOMEM;
3026 return NULL;
3027 }
3028
3029 target_vec = lock_user(VERIFY_READ, target_addr,
3030 count * sizeof(struct target_iovec), 1);
3031 if (target_vec == NULL) {
3032 err = EFAULT;
3033 goto fail2;
3034 }
3035
3036 /* ??? If host page size > target page size, this will result in a
3037 value larger than what we can actually support. */
3038 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3039 total_len = 0;
3040
3041 for (i = 0; i < count; i++) {
3042 abi_ulong base = tswapal(target_vec[i].iov_base);
3043 abi_long len = tswapal(target_vec[i].iov_len);
3044
3045 if (len < 0) {
3046 err = EINVAL;
3047 goto fail;
3048 } else if (len == 0) {
3049 /* Zero length pointer is ignored. */
3050 vec[i].iov_base = 0;
3051 } else {
3052 vec[i].iov_base = lock_user(type, base, len, copy);
3053 /* If the first buffer pointer is bad, this is a fault. But
3054 * subsequent bad buffers will result in a partial write; this
3055 * is realized by filling the vector with null pointers and
3056 * zero lengths. */
3057 if (!vec[i].iov_base) {
3058 if (i == 0) {
3059 err = EFAULT;
3060 goto fail;
3061 } else {
3062 bad_address = true;
3063 }
3064 }
3065 if (bad_address) {
3066 len = 0;
3067 }
3068 if (len > max_len - total_len) {
3069 len = max_len - total_len;
3070 }
3071 }
3072 vec[i].iov_len = len;
3073 total_len += len;
3074 }
3075
3076 unlock_user(target_vec, target_addr, 0);
3077 return vec;
3078
3079 fail:
3080 while (--i >= 0) {
3081 if (tswapal(target_vec[i].iov_len) > 0) {
3082 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3083 }
3084 }
3085 unlock_user(target_vec, target_addr, 0);
3086 fail2:
3087 g_free(vec);
3088 errno = err;
3089 return NULL;
3090 }
3091
3092 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3093 abi_ulong count, int copy)
3094 {
3095 struct target_iovec *target_vec;
3096 int i;
3097
3098 target_vec = lock_user(VERIFY_READ, target_addr,
3099 count * sizeof(struct target_iovec), 1);
3100 if (target_vec) {
3101 for (i = 0; i < count; i++) {
3102 abi_ulong base = tswapal(target_vec[i].iov_base);
3103 abi_long len = tswapal(target_vec[i].iov_len);
3104 if (len < 0) {
3105 break;
3106 }
3107 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3108 }
3109 unlock_user(target_vec, target_addr, 0);
3110 }
3111
3112 g_free(vec);
3113 }
3114
3115 static inline int target_to_host_sock_type(int *type)
3116 {
3117 int host_type = 0;
3118 int target_type = *type;
3119
3120 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3121 case TARGET_SOCK_DGRAM:
3122 host_type = SOCK_DGRAM;
3123 break;
3124 case TARGET_SOCK_STREAM:
3125 host_type = SOCK_STREAM;
3126 break;
3127 default:
3128 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3129 break;
3130 }
3131 if (target_type & TARGET_SOCK_CLOEXEC) {
3132 #if defined(SOCK_CLOEXEC)
3133 host_type |= SOCK_CLOEXEC;
3134 #else
3135 return -TARGET_EINVAL;
3136 #endif
3137 }
3138 if (target_type & TARGET_SOCK_NONBLOCK) {
3139 #if defined(SOCK_NONBLOCK)
3140 host_type |= SOCK_NONBLOCK;
3141 #elif !defined(O_NONBLOCK)
3142 return -TARGET_EINVAL;
3143 #endif
3144 }
3145 *type = host_type;
3146 return 0;
3147 }
3148
3149 /* Try to emulate socket type flags after socket creation. */
3150 static int sock_flags_fixup(int fd, int target_type)
3151 {
3152 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3153 if (target_type & TARGET_SOCK_NONBLOCK) {
3154 int flags = fcntl(fd, F_GETFL);
3155 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3156 close(fd);
3157 return -TARGET_EINVAL;
3158 }
3159 }
3160 #endif
3161 return fd;
3162 }
3163
3164 /* do_socket() Must return target values and target errnos. */
3165 static abi_long do_socket(int domain, int type, int protocol)
3166 {
3167 int target_type = type;
3168 int ret;
3169
3170 ret = target_to_host_sock_type(&type);
3171 if (ret) {
3172 return ret;
3173 }
3174
3175 if (domain == PF_NETLINK && !(
3176 #ifdef CONFIG_RTNETLINK
3177 protocol == NETLINK_ROUTE ||
3178 #endif
3179 protocol == NETLINK_KOBJECT_UEVENT ||
3180 protocol == NETLINK_AUDIT)) {
3181 return -TARGET_EPROTONOSUPPORT;
3182 }
3183
3184 if (domain == AF_PACKET ||
3185 (domain == AF_INET && type == SOCK_PACKET)) {
3186 protocol = tswap16(protocol);
3187 }
3188
3189 ret = get_errno(socket(domain, type, protocol));
3190 if (ret >= 0) {
3191 ret = sock_flags_fixup(ret, target_type);
3192 if (type == SOCK_PACKET) {
3193 /* Manage an obsolete case :
3194 * if socket type is SOCK_PACKET, bind by name
3195 */
3196 fd_trans_register(ret, &target_packet_trans);
3197 } else if (domain == PF_NETLINK) {
3198 switch (protocol) {
3199 #ifdef CONFIG_RTNETLINK
3200 case NETLINK_ROUTE:
3201 fd_trans_register(ret, &target_netlink_route_trans);
3202 break;
3203 #endif
3204 case NETLINK_KOBJECT_UEVENT:
3205 /* nothing to do: messages are strings */
3206 break;
3207 case NETLINK_AUDIT:
3208 fd_trans_register(ret, &target_netlink_audit_trans);
3209 break;
3210 default:
3211 g_assert_not_reached();
3212 }
3213 }
3214 }
3215 return ret;
3216 }
3217
3218 /* do_bind() Must return target values and target errnos. */
3219 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3220 socklen_t addrlen)
3221 {
3222 void *addr;
3223 abi_long ret;
3224
3225 if ((int)addrlen < 0) {
3226 return -TARGET_EINVAL;
3227 }
3228
3229 addr = alloca(addrlen+1);
3230
3231 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3232 if (ret)
3233 return ret;
3234
3235 return get_errno(bind(sockfd, addr, addrlen));
3236 }
3237
3238 /* do_connect() Must return target values and target errnos. */
3239 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3240 socklen_t addrlen)
3241 {
3242 void *addr;
3243 abi_long ret;
3244
3245 if ((int)addrlen < 0) {
3246 return -TARGET_EINVAL;
3247 }
3248
3249 addr = alloca(addrlen+1);
3250
3251 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3252 if (ret)
3253 return ret;
3254
3255 return get_errno(safe_connect(sockfd, addr, addrlen));
3256 }
3257
3258 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3259 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3260 int flags, int send)
3261 {
3262 abi_long ret, len;
3263 struct msghdr msg;
3264 abi_ulong count;
3265 struct iovec *vec;
3266 abi_ulong target_vec;
3267
3268 if (msgp->msg_name) {
3269 msg.msg_namelen = tswap32(msgp->msg_namelen);
3270 msg.msg_name = alloca(msg.msg_namelen+1);
3271 ret = target_to_host_sockaddr(fd, msg.msg_name,
3272 tswapal(msgp->msg_name),
3273 msg.msg_namelen);
3274 if (ret == -TARGET_EFAULT) {
3275 /* For connected sockets msg_name and msg_namelen must
3276 * be ignored, so returning EFAULT immediately is wrong.
3277 * Instead, pass a bad msg_name to the host kernel, and
3278 * let it decide whether to return EFAULT or not.
3279 */
3280 msg.msg_name = (void *)-1;
3281 } else if (ret) {
3282 goto out2;
3283 }
3284 } else {
3285 msg.msg_name = NULL;
3286 msg.msg_namelen = 0;
3287 }
3288 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3289 msg.msg_control = alloca(msg.msg_controllen);
3290 memset(msg.msg_control, 0, msg.msg_controllen);
3291
3292 msg.msg_flags = tswap32(msgp->msg_flags);
3293
3294 count = tswapal(msgp->msg_iovlen);
3295 target_vec = tswapal(msgp->msg_iov);
3296
3297 if (count > IOV_MAX) {
3298 /* sendrcvmsg returns a different errno for this condition than
3299 * readv/writev, so we must catch it here before lock_iovec() does.
3300 */
3301 ret = -TARGET_EMSGSIZE;
3302 goto out2;
3303 }
3304
3305 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3306 target_vec, count, send);
3307 if (vec == NULL) {
3308 ret = -host_to_target_errno(errno);
3309 /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3310 if (!send || ret) {
3311 goto out2;
3312 }
3313 }
3314 msg.msg_iovlen = count;
3315 msg.msg_iov = vec;
3316
3317 if (send) {
3318 if (fd_trans_target_to_host_data(fd)) {
3319 void *host_msg;
3320
3321 host_msg = g_malloc(msg.msg_iov->iov_len);
3322 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3323 ret = fd_trans_target_to_host_data(fd)(host_msg,
3324 msg.msg_iov->iov_len);
3325 if (ret >= 0) {
3326 msg.msg_iov->iov_base = host_msg;
3327 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3328 }
3329 g_free(host_msg);
3330 } else {
3331 ret = target_to_host_cmsg(&msg, msgp);
3332 if (ret == 0) {
3333 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3334 }
3335 }
3336 } else {
3337 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3338 if (!is_error(ret)) {
3339 len = ret;
3340 if (fd_trans_host_to_target_data(fd)) {
3341 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3342 MIN(msg.msg_iov->iov_len, len));
3343 }
3344 if (!is_error(ret)) {
3345 ret = host_to_target_cmsg(msgp, &msg);
3346 }
3347 if (!is_error(ret)) {
3348 msgp->msg_namelen = tswap32(msg.msg_namelen);
3349 msgp->msg_flags = tswap32(msg.msg_flags);
3350 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3351 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3352 msg.msg_name, msg.msg_namelen);
3353 if (ret) {
3354 goto out;
3355 }
3356 }
3357
3358 ret = len;
3359 }
3360 }
3361 }
3362
3363 out:
3364 if (vec) {
3365 unlock_iovec(vec, target_vec, count, !send);
3366 }
3367 out2:
3368 return ret;
3369 }
3370
3371 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3372 int flags, int send)
3373 {
3374 abi_long ret;
3375 struct target_msghdr *msgp;
3376
3377 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3378 msgp,
3379 target_msg,
3380 send ? 1 : 0)) {
3381 return -TARGET_EFAULT;
3382 }
3383 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3384 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3385 return ret;
3386 }
3387
3388 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3389 * so it might not have this *mmsg-specific flag either.
3390 */
3391 #ifndef MSG_WAITFORONE
3392 #define MSG_WAITFORONE 0x10000
3393 #endif
3394
3395 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3396 unsigned int vlen, unsigned int flags,
3397 int send)
3398 {
3399 struct target_mmsghdr *mmsgp;
3400 abi_long ret = 0;
3401 int i;
3402
3403 if (vlen > UIO_MAXIOV) {
3404 vlen = UIO_MAXIOV;
3405 }
3406
3407 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3408 if (!mmsgp) {
3409 return -TARGET_EFAULT;
3410 }
3411
3412 for (i = 0; i < vlen; i++) {
3413 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3414 if (is_error(ret)) {
3415 break;
3416 }
3417 mmsgp[i].msg_len = tswap32(ret);
3418 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3419 if (flags & MSG_WAITFORONE) {
3420 flags |= MSG_DONTWAIT;
3421 }
3422 }
3423
3424 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3425
3426 /* Return number of datagrams sent if we sent any at all;
3427 * otherwise return the error.
3428 */
3429 if (i) {
3430 return i;
3431 }
3432 return ret;
3433 }
3434
3435 /* do_accept4() Must return target values and target errnos. */
3436 static abi_long do_accept4(int fd, abi_ulong target_addr,
3437 abi_ulong target_addrlen_addr, int flags)
3438 {
3439 socklen_t addrlen, ret_addrlen;
3440 void *addr;
3441 abi_long ret;
3442 int host_flags;
3443
3444 if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3445 return -TARGET_EINVAL;
3446 }
3447
3448 host_flags = 0;
3449 if (flags & TARGET_SOCK_NONBLOCK) {
3450 host_flags |= SOCK_NONBLOCK;
3451 }
3452 if (flags & TARGET_SOCK_CLOEXEC) {
3453 host_flags |= SOCK_CLOEXEC;
3454 }
3455
3456 if (target_addr == 0) {
3457 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3458 }
3459
3460 /* linux returns EFAULT if addrlen pointer is invalid */
3461 if (get_user_u32(addrlen, target_addrlen_addr))
3462 return -TARGET_EFAULT;
3463
3464 if ((int)addrlen < 0) {
3465 return -TARGET_EINVAL;
3466 }
3467
3468 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3469 return -TARGET_EFAULT;
3470 }
3471
3472 addr = alloca(addrlen);
3473
3474 ret_addrlen = addrlen;
3475 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3476 if (!is_error(ret)) {
3477 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3478 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3479 ret = -TARGET_EFAULT;
3480 }
3481 }
3482 return ret;
3483 }
3484
3485 /* do_getpeername() Must return target values and target errnos. */
3486 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3487 abi_ulong target_addrlen_addr)
3488 {
3489 socklen_t addrlen, ret_addrlen;
3490 void *addr;
3491 abi_long ret;
3492
3493 if (get_user_u32(addrlen, target_addrlen_addr))
3494 return -TARGET_EFAULT;
3495
3496 if ((int)addrlen < 0) {
3497 return -TARGET_EINVAL;
3498 }
3499
3500 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3501 return -TARGET_EFAULT;
3502 }
3503
3504 addr = alloca(addrlen);
3505
3506 ret_addrlen = addrlen;
3507 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3508 if (!is_error(ret)) {
3509 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3510 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3511 ret = -TARGET_EFAULT;
3512 }
3513 }
3514 return ret;
3515 }
3516
3517 /* do_getsockname() Must return target values and target errnos. */
3518 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3519 abi_ulong target_addrlen_addr)
3520 {
3521 socklen_t addrlen, ret_addrlen;
3522 void *addr;
3523 abi_long ret;
3524
3525 if (get_user_u32(addrlen, target_addrlen_addr))
3526 return -TARGET_EFAULT;
3527
3528 if ((int)addrlen < 0) {
3529 return -TARGET_EINVAL;
3530 }
3531
3532 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3533 return -TARGET_EFAULT;
3534 }
3535
3536 addr = alloca(addrlen);
3537
3538 ret_addrlen = addrlen;
3539 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3540 if (!is_error(ret)) {
3541 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3542 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3543 ret = -TARGET_EFAULT;
3544 }
3545 }
3546 return ret;
3547 }
3548
3549 /* do_socketpair() Must return target values and target errnos. */
3550 static abi_long do_socketpair(int domain, int type, int protocol,
3551 abi_ulong target_tab_addr)
3552 {
3553 int tab[2];
3554 abi_long ret;
3555
3556 target_to_host_sock_type(&type);
3557
3558 ret = get_errno(socketpair(domain, type, protocol, tab));
3559 if (!is_error(ret)) {
3560 if (put_user_s32(tab[0], target_tab_addr)
3561 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3562 ret = -TARGET_EFAULT;
3563 }
3564 return ret;
3565 }
3566
3567 /* do_sendto() Must return target values and target errnos. */
3568 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3569 abi_ulong target_addr, socklen_t addrlen)
3570 {
3571 void *addr;
3572 void *host_msg;
3573 void *copy_msg = NULL;
3574 abi_long ret;
3575
3576 if ((int)addrlen < 0) {
3577 return -TARGET_EINVAL;
3578 }
3579
3580 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3581 if (!host_msg)
3582 return -TARGET_EFAULT;
3583 if (fd_trans_target_to_host_data(fd)) {
3584 copy_msg = host_msg;
3585 host_msg = g_malloc(len);
3586 memcpy(host_msg, copy_msg, len);
3587 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3588 if (ret < 0) {
3589 goto fail;
3590 }
3591 }
3592 if (target_addr) {
3593 addr = alloca(addrlen+1);
3594 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3595 if (ret) {
3596 goto fail;
3597 }
3598 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3599 } else {
3600 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3601 }
3602 fail:
3603 if (copy_msg) {
3604 g_free(host_msg);
3605 host_msg = copy_msg;
3606 }
3607 unlock_user(host_msg, msg, 0);
3608 return ret;
3609 }
3610
3611 /* do_recvfrom() Must return target values and target errnos. */
3612 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3613 abi_ulong target_addr,
3614 abi_ulong target_addrlen)
3615 {
3616 socklen_t addrlen, ret_addrlen;
3617 void *addr;
3618 void *host_msg;
3619 abi_long ret;
3620
3621 if (!msg) {
3622 host_msg = NULL;
3623 } else {
3624 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3625 if (!host_msg) {
3626 return -TARGET_EFAULT;
3627 }
3628 }
3629 if (target_addr) {
3630 if (get_user_u32(addrlen, target_addrlen)) {
3631 ret = -TARGET_EFAULT;
3632 goto fail;
3633 }
3634 if ((int)addrlen < 0) {
3635 ret = -TARGET_EINVAL;
3636 goto fail;
3637 }
3638 addr = alloca(addrlen);
3639 ret_addrlen = addrlen;
3640 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3641 addr, &ret_addrlen));
3642 } else {
3643 addr = NULL; /* To keep compiler quiet. */
3644 addrlen = 0; /* To keep compiler quiet. */
3645 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3646 }
3647 if (!is_error(ret)) {
3648 if (fd_trans_host_to_target_data(fd)) {
3649 abi_long trans;
3650 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3651 if (is_error(trans)) {
3652 ret = trans;
3653 goto fail;
3654 }
3655 }
3656 if (target_addr) {
3657 host_to_target_sockaddr(target_addr, addr,
3658 MIN(addrlen, ret_addrlen));
3659 if (put_user_u32(ret_addrlen, target_addrlen)) {
3660 ret = -TARGET_EFAULT;
3661 goto fail;
3662 }
3663 }
3664 unlock_user(host_msg, msg, len);
3665 } else {
3666 fail:
3667 unlock_user(host_msg, msg, 0);
3668 }
3669 return ret;
3670 }
3671
3672 #ifdef TARGET_NR_socketcall
3673 /* do_socketcall() must return target values and target errnos. */
3674 static abi_long do_socketcall(int num, abi_ulong vptr)
3675 {
3676 static const unsigned nargs[] = { /* number of arguments per operation */
3677 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3678 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3679 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3680 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3681 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3682 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3683 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3684 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3685 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3686 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3687 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3688 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3689 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3690 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3691 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3692 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3693 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3694 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3695 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3696 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3697 };
3698 abi_long a[6]; /* max 6 args */
3699 unsigned i;
3700
3701 /* check the range of the first argument num */
3702 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3703 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3704 return -TARGET_EINVAL;
3705 }
3706 /* ensure we have space for args */
3707 if (nargs[num] > ARRAY_SIZE(a)) {
3708 return -TARGET_EINVAL;
3709 }
3710 /* collect the arguments in a[] according to nargs[] */
3711 for (i = 0; i < nargs[num]; ++i) {
3712 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3713 return -TARGET_EFAULT;
3714 }
3715 }
3716 /* now when we have the args, invoke the appropriate underlying function */
3717 switch (num) {
3718 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3719 return do_socket(a[0], a[1], a[2]);
3720 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3721 return do_bind(a[0], a[1], a[2]);
3722 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3723 return do_connect(a[0], a[1], a[2]);
3724 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3725 return get_errno(listen(a[0], a[1]));
3726 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3727 return do_accept4(a[0], a[1], a[2], 0);
3728 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3729 return do_getsockname(a[0], a[1], a[2]);
3730 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3731 return do_getpeername(a[0], a[1], a[2]);
3732 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3733 return do_socketpair(a[0], a[1], a[2], a[3]);
3734 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3735 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3736 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3737 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3738 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3739 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3740 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3741 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3742 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3743 return get_errno(shutdown(a[0], a[1]));
3744 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3745 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3746 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3747 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3748 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3749 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3750 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3751 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3752 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3753 return do_accept4(a[0], a[1], a[2], a[3]);
3754 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3755 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3756 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3757 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3758 default:
3759 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3760 return -TARGET_EINVAL;
3761 }
3762 }
3763 #endif
3764
3765 #define N_SHM_REGIONS 32
3766
3767 static struct shm_region {
3768 abi_ulong start;
3769 abi_ulong size;
3770 bool in_use;
3771 } shm_regions[N_SHM_REGIONS];
3772
3773 #ifndef TARGET_SEMID64_DS
3774 /* asm-generic version of this struct */
3775 struct target_semid64_ds
3776 {
3777 struct target_ipc_perm sem_perm;
3778 abi_ulong sem_otime;
3779 #if TARGET_ABI_BITS == 32
3780 abi_ulong __unused1;
3781 #endif
3782 abi_ulong sem_ctime;
3783 #if TARGET_ABI_BITS == 32
3784 abi_ulong __unused2;
3785 #endif
3786 abi_ulong sem_nsems;
3787 abi_ulong __unused3;
3788 abi_ulong __unused4;
3789 };
3790 #endif
3791
3792 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3793 abi_ulong target_addr)
3794 {
3795 struct target_ipc_perm *target_ip;
3796 struct target_semid64_ds *target_sd;
3797
3798 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3799 return -TARGET_EFAULT;
3800 target_ip = &(target_sd->sem_perm);
3801 host_ip->__key = tswap32(target_ip->__key);
3802 host_ip->uid = tswap32(target_ip->uid);
3803 host_ip->gid = tswap32(target_ip->gid);
3804 host_ip->cuid = tswap32(target_ip->cuid);
3805 host_ip->cgid = tswap32(target_ip->cgid);
3806 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3807 host_ip->mode = tswap32(target_ip->mode);
3808 #else
3809 host_ip->mode = tswap16(target_ip->mode);
3810 #endif
3811 #if defined(TARGET_PPC)
3812 host_ip->__seq = tswap32(target_ip->__seq);
3813 #else
3814 host_ip->__seq = tswap16(target_ip->__seq);
3815 #endif
3816 unlock_user_struct(target_sd, target_addr, 0);
3817 return 0;
3818 }
3819
3820 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3821 struct ipc_perm *host_ip)
3822 {
3823 struct target_ipc_perm *target_ip;
3824 struct target_semid64_ds *target_sd;
3825
3826 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3827 return -TARGET_EFAULT;
3828 target_ip = &(target_sd->sem_perm);
3829 target_ip->__key = tswap32(host_ip->__key);
3830 target_ip->uid = tswap32(host_ip->uid);
3831 target_ip->gid = tswap32(host_ip->gid);
3832 target_ip->cuid = tswap32(host_ip->cuid);
3833 target_ip->cgid = tswap32(host_ip->cgid);
3834 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3835 target_ip->mode = tswap32(host_ip->mode);
3836 #else
3837 target_ip->mode = tswap16(host_ip->mode);
3838 #endif
3839 #if defined(TARGET_PPC)
3840 target_ip->__seq = tswap32(host_ip->__seq);
3841 #else
3842 target_ip->__seq = tswap16(host_ip->__seq);
3843 #endif
3844 unlock_user_struct(target_sd, target_addr, 1);
3845 return 0;
3846 }
3847
3848 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3849 abi_ulong target_addr)
3850 {
3851 struct target_semid64_ds *target_sd;
3852
3853 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3854 return -TARGET_EFAULT;
3855 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3856 return -TARGET_EFAULT;
3857 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3858 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3859 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3860 unlock_user_struct(target_sd, target_addr, 0);
3861 return 0;
3862 }
3863
3864 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3865 struct semid_ds *host_sd)
3866 {
3867 struct target_semid64_ds *target_sd;
3868
3869 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3870 return -TARGET_EFAULT;
3871 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3872 return -TARGET_EFAULT;
3873 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3874 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3875 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3876 unlock_user_struct(target_sd, target_addr, 1);
3877 return 0;
3878 }
3879
3880 struct target_seminfo {
3881 int semmap;
3882 int semmni;
3883 int semmns;
3884 int semmnu;
3885 int semmsl;
3886 int semopm;
3887 int semume;
3888 int semusz;
3889 int semvmx;
3890 int semaem;
3891 };
3892
3893 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3894 struct seminfo *host_seminfo)
3895 {
3896 struct target_seminfo *target_seminfo;
3897 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3898 return -TARGET_EFAULT;
3899 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3900 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3901 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3902 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3903 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3904 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3905 __put_user(host_seminfo->semume, &target_seminfo->semume);
3906 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3907 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3908 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3909 unlock_user_struct(target_seminfo, target_addr, 1);
3910 return 0;
3911 }
3912
3913 union semun {
3914 int val;
3915 struct semid_ds *buf;
3916 unsigned short *array;
3917 struct seminfo *__buf;
3918 };
3919
3920 union target_semun {
3921 int val;
3922 abi_ulong buf;
3923 abi_ulong array;
3924 abi_ulong __buf;
3925 };
3926
3927 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3928 abi_ulong target_addr)
3929 {
3930 int nsems;
3931 unsigned short *array;
3932 union semun semun;
3933 struct semid_ds semid_ds;
3934 int i, ret;
3935
3936 semun.buf = &semid_ds;
3937
3938 ret = semctl(semid, 0, IPC_STAT, semun);
3939 if (ret == -1)
3940 return get_errno(ret);
3941
3942 nsems = semid_ds.sem_nsems;
3943
3944 *host_array = g_try_new(unsigned short, nsems);
3945 if (!*host_array) {
3946 return -TARGET_ENOMEM;
3947 }
3948 array = lock_user(VERIFY_READ, target_addr,
3949 nsems*sizeof(unsigned short), 1);
3950 if (!array) {
3951 g_free(*host_array);
3952 return -TARGET_EFAULT;
3953 }
3954
3955 for(i=0; i<nsems; i++) {
3956 __get_user((*host_array)[i], &array[i]);
3957 }
3958 unlock_user(array, target_addr, 0);
3959
3960 return 0;
3961 }
3962
3963 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3964 unsigned short **host_array)
3965 {
3966 int nsems;
3967 unsigned short *array;
3968 union semun semun;
3969 struct semid_ds semid_ds;
3970 int i, ret;
3971
3972 semun.buf = &semid_ds;
3973
3974 ret = semctl(semid, 0, IPC_STAT, semun);
3975 if (ret == -1)
3976 return get_errno(ret);
3977
3978 nsems = semid_ds.sem_nsems;
3979
3980 array = lock_user(VERIFY_WRITE, target_addr,
3981 nsems*sizeof(unsigned short), 0);
3982 if (!array)
3983 return -TARGET_EFAULT;
3984
3985 for(i=0; i<nsems; i++) {
3986 __put_user((*host_array)[i], &array[i]);
3987 }
3988 g_free(*host_array);
3989 unlock_user(array, target_addr, 1);
3990
3991 return 0;
3992 }
3993
3994 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3995 abi_ulong target_arg)
3996 {
3997 union target_semun target_su = { .buf = target_arg };
3998 union semun arg;
3999 struct semid_ds dsarg;
4000 unsigned short *array = NULL;
4001 struct seminfo seminfo;
4002 abi_long ret = -TARGET_EINVAL;
4003 abi_long err;
4004 cmd &= 0xff;
4005
4006 switch( cmd ) {
4007 case GETVAL:
4008 case SETVAL:
4009 /* In 64 bit cross-endian situations, we will erroneously pick up
4010 * the wrong half of the union for the "val" element. To rectify
4011 * this, the entire 8-byte structure is byteswapped, followed by
4012 * a swap of the 4 byte val field. In other cases, the data is
4013 * already in proper host byte order. */
4014 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4015 target_su.buf = tswapal(target_su.buf);
4016 arg.val = tswap32(target_su.val);
4017 } else {
4018 arg.val = target_su.val;
4019 }
4020 ret = get_errno(semctl(semid, semnum, cmd, arg));
4021 break;
4022 case GETALL:
4023 case SETALL:
4024 err = target_to_host_semarray(semid, &array, target_su.array);
4025 if (err)
4026 return err;
4027 arg.array = array;
4028 ret = get_errno(semctl(semid, semnum, cmd, arg));
4029 err = host_to_target_semarray(semid, target_su.array, &array);
4030 if (err)
4031 return err;
4032 break;
4033 case IPC_STAT:
4034 case IPC_SET:
4035 case SEM_STAT:
4036 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4037 if (err)
4038 return err;
4039 arg.buf = &dsarg;
4040 ret = get_errno(semctl(semid, semnum, cmd, arg));
4041 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4042 if (err)
4043 return err;
4044 break;
4045 case IPC_INFO:
4046 case SEM_INFO:
4047 arg.__buf = &seminfo;
4048 ret = get_errno(semctl(semid, semnum, cmd, arg));
4049 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4050 if (err)
4051 return err;
4052 break;
4053 case IPC_RMID:
4054 case GETPID:
4055 case GETNCNT:
4056 case GETZCNT:
4057 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4058 break;
4059 }
4060
4061 return ret;
4062 }
4063
4064 struct target_sembuf {
4065 unsigned short sem_num;
4066 short sem_op;
4067 short sem_flg;
4068 };
4069
4070 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4071 abi_ulong target_addr,
4072 unsigned nsops)
4073 {
4074 struct target_sembuf *target_sembuf;
4075 int i;
4076
4077 target_sembuf = lock_user(VERIFY_READ, target_addr,
4078 nsops*sizeof(struct target_sembuf), 1);
4079 if (!target_sembuf)
4080 return -TARGET_EFAULT;
4081
4082 for(i=0; i<nsops; i++) {
4083 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4084 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4085 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4086 }
4087
4088 unlock_user(target_sembuf, target_addr, 0);
4089
4090 return 0;
4091 }
4092
4093 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4094 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4095
4096 /*
4097 * This macro is required to handle the s390 variants, which passes the
4098 * arguments in a different order than default.
4099 */
4100 #ifdef __s390x__
4101 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4102 (__nsops), (__timeout), (__sops)
4103 #else
4104 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4105 (__nsops), 0, (__sops), (__timeout)
4106 #endif
4107
4108 static inline abi_long do_semtimedop(int semid,
4109 abi_long ptr,
4110 unsigned nsops,
4111 abi_long timeout, bool time64)
4112 {
4113 struct sembuf *sops;
4114 struct timespec ts, *pts = NULL;
4115 abi_long ret;
4116
4117 if (timeout) {
4118 pts = &ts;
4119 if (time64) {
4120 if (target_to_host_timespec64(pts, timeout)) {
4121 return -TARGET_EFAULT;
4122 }
4123 } else {
4124 if (target_to_host_timespec(pts, timeout)) {
4125 return -TARGET_EFAULT;
4126 }
4127 }
4128 }
4129
4130 if (nsops > TARGET_SEMOPM) {
4131 return -TARGET_E2BIG;
4132 }
4133
4134 sops = g_new(struct sembuf, nsops);
4135
4136 if (target_to_host_sembuf(sops, ptr, nsops)) {
4137 g_free(sops);
4138 return -TARGET_EFAULT;
4139 }
4140
4141 ret = -TARGET_ENOSYS;
4142 #ifdef __NR_semtimedop
4143 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4144 #endif
4145 #ifdef __NR_ipc
4146 if (ret == -TARGET_ENOSYS) {
4147 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4148 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4149 }
4150 #endif
4151 g_free(sops);
4152 return ret;
4153 }
4154 #endif
4155
4156 struct target_msqid_ds
4157 {
4158 struct target_ipc_perm msg_perm;
4159 abi_ulong msg_stime;
4160 #if TARGET_ABI_BITS == 32
4161 abi_ulong __unused1;
4162 #endif
4163 abi_ulong msg_rtime;
4164 #if TARGET_ABI_BITS == 32
4165 abi_ulong __unused2;
4166 #endif
4167 abi_ulong msg_ctime;
4168 #if TARGET_ABI_BITS == 32
4169 abi_ulong __unused3;
4170 #endif
4171 abi_ulong __msg_cbytes;
4172 abi_ulong msg_qnum;
4173 abi_ulong msg_qbytes;
4174 abi_ulong msg_lspid;
4175 abi_ulong msg_lrpid;
4176 abi_ulong __unused4;
4177 abi_ulong __unused5;
4178 };
4179
4180 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4181 abi_ulong target_addr)
4182 {
4183 struct target_msqid_ds *target_md;
4184
4185 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4186 return -TARGET_EFAULT;
4187 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4188 return -TARGET_EFAULT;
4189 host_md->msg_stime = tswapal(target_md->msg_stime);
4190 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4191 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4192 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4193 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4194 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4195 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4196 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4197 unlock_user_struct(target_md, target_addr, 0);
4198 return 0;
4199 }
4200
4201 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4202 struct msqid_ds *host_md)
4203 {
4204 struct target_msqid_ds *target_md;
4205
4206 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4207 return -TARGET_EFAULT;
4208 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4209 return -TARGET_EFAULT;
4210 target_md->msg_stime = tswapal(host_md->msg_stime);
4211 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4212 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4213 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4214 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4215 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4216 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4217 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4218 unlock_user_struct(target_md, target_addr, 1);
4219 return 0;
4220 }
4221
4222 struct target_msginfo {
4223 int msgpool;
4224 int msgmap;
4225 int msgmax;
4226 int msgmnb;
4227 int msgmni;
4228 int msgssz;
4229 int msgtql;
4230 unsigned short int msgseg;
4231 };
4232
4233 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4234 struct msginfo *host_msginfo)
4235 {
4236 struct target_msginfo *target_msginfo;
4237 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4238 return -TARGET_EFAULT;
4239 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4240 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4241 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4242 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4243 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4244 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4245 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4246 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4247 unlock_user_struct(target_msginfo, target_addr, 1);
4248 return 0;
4249 }
4250
4251 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4252 {
4253 struct msqid_ds dsarg;
4254 struct msginfo msginfo;
4255 abi_long ret = -TARGET_EINVAL;
4256
4257 cmd &= 0xff;
4258
4259 switch (cmd) {
4260 case IPC_STAT:
4261 case IPC_SET:
4262 case MSG_STAT:
4263 if (target_to_host_msqid_ds(&dsarg,ptr))
4264 return -TARGET_EFAULT;
4265 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4266 if (host_to_target_msqid_ds(ptr,&dsarg))
4267 return -TARGET_EFAULT;
4268 break;
4269 case IPC_RMID:
4270 ret = get_errno(msgctl(msgid, cmd, NULL));
4271 break;
4272 case IPC_INFO:
4273 case MSG_INFO:
4274 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4275 if (host_to_target_msginfo(ptr, &msginfo))
4276 return -TARGET_EFAULT;
4277 break;
4278 }
4279
4280 return ret;
4281 }
4282
4283 struct target_msgbuf {
4284 abi_long mtype;
4285 char mtext[1];
4286 };
4287
4288 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4289 ssize_t msgsz, int msgflg)
4290 {
4291 struct target_msgbuf *target_mb;
4292 struct msgbuf *host_mb;
4293 abi_long ret = 0;
4294
4295 if (msgsz < 0) {
4296 return -TARGET_EINVAL;
4297 }
4298
4299 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4300 return -TARGET_EFAULT;
4301 host_mb = g_try_malloc(msgsz + sizeof(long));
4302 if (!host_mb) {
4303 unlock_user_struct(target_mb, msgp, 0);
4304 return -TARGET_ENOMEM;
4305 }
4306 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4307 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4308 ret = -TARGET_ENOSYS;
4309 #ifdef __NR_msgsnd
4310 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4311 #endif
4312 #ifdef __NR_ipc
4313 if (ret == -TARGET_ENOSYS) {
4314 #ifdef __s390x__
4315 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4316 host_mb));
4317 #else
4318 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4319 host_mb, 0));
4320 #endif
4321 }
4322 #endif
4323 g_free(host_mb);
4324 unlock_user_struct(target_mb, msgp, 0);
4325
4326 return ret;
4327 }
4328
4329 #ifdef __NR_ipc
4330 #if defined(__sparc__)
4331 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4332 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4333 #elif defined(__s390x__)
4334 /* The s390 sys_ipc variant has only five parameters. */
4335 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4336 ((long int[]){(long int)__msgp, __msgtyp})
4337 #else
4338 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4339 ((long int[]){(long int)__msgp, __msgtyp}), 0
4340 #endif
4341 #endif
4342
4343 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4344 ssize_t msgsz, abi_long msgtyp,
4345 int msgflg)
4346 {
4347 struct target_msgbuf *target_mb;
4348 char *target_mtext;
4349 struct msgbuf *host_mb;
4350 abi_long ret = 0;
4351
4352 if (msgsz < 0) {
4353 return -TARGET_EINVAL;
4354 }
4355
4356 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4357 return -TARGET_EFAULT;
4358
4359 host_mb = g_try_malloc(msgsz + sizeof(long));
4360 if (!host_mb) {
4361 ret = -TARGET_ENOMEM;
4362 goto end;
4363 }
4364 ret = -TARGET_ENOSYS;
4365 #ifdef __NR_msgrcv
4366 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4367 #endif
4368 #ifdef __NR_ipc
4369 if (ret == -TARGET_ENOSYS) {
4370 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4371 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4372 }
4373 #endif
4374
4375 if (ret > 0) {
4376 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4377 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4378 if (!target_mtext) {
4379 ret = -TARGET_EFAULT;
4380 goto end;
4381 }
4382 memcpy(target_mb->mtext, host_mb->mtext, ret);
4383 unlock_user(target_mtext, target_mtext_addr, ret);
4384 }
4385
4386 target_mb->mtype = tswapal(host_mb->mtype);
4387
4388 end:
4389 if (target_mb)
4390 unlock_user_struct(target_mb, msgp, 1);
4391 g_free(host_mb);
4392 return ret;
4393 }
4394
4395 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4396 abi_ulong target_addr)
4397 {
4398 struct target_shmid_ds *target_sd;
4399
4400 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4401 return -TARGET_EFAULT;
4402 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4403 return -TARGET_EFAULT;
4404 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4405 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4406 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4407 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4408 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4409 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4410 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4411 unlock_user_struct(target_sd, target_addr, 0);
4412 return 0;
4413 }
4414
4415 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4416 struct shmid_ds *host_sd)
4417 {
4418 struct target_shmid_ds *target_sd;
4419
4420 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4421 return -TARGET_EFAULT;
4422 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4423 return -TARGET_EFAULT;
4424 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4425 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4426 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4427 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4428 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4429 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4430 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4431 unlock_user_struct(target_sd, target_addr, 1);
4432 return 0;
4433 }
4434
4435 struct target_shminfo {
4436 abi_ulong shmmax;
4437 abi_ulong shmmin;
4438 abi_ulong shmmni;
4439 abi_ulong shmseg;
4440 abi_ulong shmall;
4441 };
4442
4443 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4444 struct shminfo *host_shminfo)
4445 {
4446 struct target_shminfo *target_shminfo;
4447 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4448 return -TARGET_EFAULT;
4449 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4450 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4451 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4452 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4453 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4454 unlock_user_struct(target_shminfo, target_addr, 1);
4455 return 0;
4456 }
4457
4458 struct target_shm_info {
4459 int used_ids;
4460 abi_ulong shm_tot;
4461 abi_ulong shm_rss;
4462 abi_ulong shm_swp;
4463 abi_ulong swap_attempts;
4464 abi_ulong swap_successes;
4465 };
4466
4467 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4468 struct shm_info *host_shm_info)
4469 {
4470 struct target_shm_info *target_shm_info;
4471 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4472 return -TARGET_EFAULT;
4473 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4474 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4475 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4476 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4477 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4478 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4479 unlock_user_struct(target_shm_info, target_addr, 1);
4480 return 0;
4481 }
4482
4483 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4484 {
4485 struct shmid_ds dsarg;
4486 struct shminfo shminfo;
4487 struct shm_info shm_info;
4488 abi_long ret = -TARGET_EINVAL;
4489
4490 cmd &= 0xff;
4491
4492 switch(cmd) {
4493 case IPC_STAT:
4494 case IPC_SET:
4495 case SHM_STAT:
4496 if (target_to_host_shmid_ds(&dsarg, buf))
4497 return -TARGET_EFAULT;
4498 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4499 if (host_to_target_shmid_ds(buf, &dsarg))
4500 return -TARGET_EFAULT;
4501 break;
4502 case IPC_INFO:
4503 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4504 if (host_to_target_shminfo(buf, &shminfo))
4505 return -TARGET_EFAULT;
4506 break;
4507 case SHM_INFO:
4508 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4509 if (host_to_target_shm_info(buf, &shm_info))
4510 return -TARGET_EFAULT;
4511 break;
4512 case IPC_RMID:
4513 case SHM_LOCK:
4514 case SHM_UNLOCK:
4515 ret = get_errno(shmctl(shmid, cmd, NULL));
4516 break;
4517 }
4518
4519 return ret;
4520 }
4521
4522 #ifndef TARGET_FORCE_SHMLBA
4523 /* For most architectures, SHMLBA is the same as the page size;
4524 * some architectures have larger values, in which case they should
4525 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4526 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4527 * and defining its own value for SHMLBA.
4528 *
4529 * The kernel also permits SHMLBA to be set by the architecture to a
4530 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4531 * this means that addresses are rounded to the large size if
4532 * SHM_RND is set but addresses not aligned to that size are not rejected
4533 * as long as they are at least page-aligned. Since the only architecture
4534 * which uses this is ia64 this code doesn't provide for that oddity.
4535 */
4536 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4537 {
4538 return TARGET_PAGE_SIZE;
4539 }
4540 #endif
4541
4542 static abi_ulong do_shmat(CPUArchState *cpu_env, int shmid,
4543 abi_ulong shmaddr, int shmflg)
4544 {
4545 CPUState *cpu = env_cpu(cpu_env);
4546 abi_ulong raddr;
4547 void *host_raddr;
4548 struct shmid_ds shm_info;
4549 int i, ret;
4550 abi_ulong shmlba;
4551
4552 /* shmat pointers are always untagged */
4553
4554 /* find out the length of the shared memory segment */
4555 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4556 if (is_error(ret)) {
4557 /* can't get length, bail out */
4558 return ret;
4559 }
4560
4561 shmlba = target_shmlba(cpu_env);
4562
4563 if (shmaddr & (shmlba - 1)) {
4564 if (shmflg & SHM_RND) {
4565 shmaddr &= ~(shmlba - 1);
4566 } else {
4567 return -TARGET_EINVAL;
4568 }
4569 }
4570 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4571 return -TARGET_EINVAL;
4572 }
4573
4574 mmap_lock();
4575
4576 /*
4577 * We're mapping shared memory, so ensure we generate code for parallel
4578 * execution and flush old translations. This will work up to the level
4579 * supported by the host -- anything that requires EXCP_ATOMIC will not
4580 * be atomic with respect to an external process.
4581 */
4582 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4583 cpu->tcg_cflags |= CF_PARALLEL;
4584 tb_flush(cpu);
4585 }
4586
4587 if (shmaddr)
4588 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4589 else {
4590 abi_ulong mmap_start;
4591
4592 /* In order to use the host shmat, we need to honor host SHMLBA. */
4593 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4594
4595 if (mmap_start == -1) {
4596 errno = ENOMEM;
4597 host_raddr = (void *)-1;
4598 } else
4599 host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4600 shmflg | SHM_REMAP);
4601 }
4602
4603 if (host_raddr == (void *)-1) {
4604 mmap_unlock();
4605 return get_errno((intptr_t)host_raddr);
4606 }
4607 raddr = h2g((uintptr_t)host_raddr);
4608
4609 page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
4610 PAGE_VALID | PAGE_RESET | PAGE_READ |
4611 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4612
4613 for (i = 0; i < N_SHM_REGIONS; i++) {
4614 if (!shm_regions[i].in_use) {
4615 shm_regions[i].in_use = true;
4616 shm_regions[i].start = raddr;
4617 shm_regions[i].size = shm_info.shm_segsz;
4618 break;
4619 }
4620 }
4621
4622 mmap_unlock();
4623 return raddr;
4624 }
4625
4626 static inline abi_long do_shmdt(abi_ulong shmaddr)
4627 {
4628 int i;
4629 abi_long rv;
4630
4631 /* shmdt pointers are always untagged */
4632
4633 mmap_lock();
4634
4635 for (i = 0; i < N_SHM_REGIONS; ++i) {
4636 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4637 shm_regions[i].in_use = false;
4638 page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
4639 break;
4640 }
4641 }
4642 rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4643
4644 mmap_unlock();
4645
4646 return rv;
4647 }
4648
4649 #ifdef TARGET_NR_ipc
4650 /* ??? This only works with linear mappings. */
4651 /* do_ipc() must return target values and target errnos. */
4652 static abi_long do_ipc(CPUArchState *cpu_env,
4653 unsigned int call, abi_long first,
4654 abi_long second, abi_long third,
4655 abi_long ptr, abi_long fifth)
4656 {
4657 int version;
4658 abi_long ret = 0;
4659
4660 version = call >> 16;
4661 call &= 0xffff;
4662
4663 switch (call) {
4664 case IPCOP_semop:
4665 ret = do_semtimedop(first, ptr, second, 0, false);
4666 break;
4667 case IPCOP_semtimedop:
4668 /*
4669 * The s390 sys_ipc variant has only five parameters instead of six
4670 * (as for default variant) and the only difference is the handling of
4671 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4672 * to a struct timespec where the generic variant uses fifth parameter.
4673 */
4674 #if defined(TARGET_S390X)
4675 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4676 #else
4677 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4678 #endif
4679 break;
4680
4681 case IPCOP_semget:
4682 ret = get_errno(semget(first, second, third));
4683 break;
4684
4685 case IPCOP_semctl: {
4686 /* The semun argument to semctl is passed by value, so dereference the
4687 * ptr argument. */
4688 abi_ulong atptr;
4689 get_user_ual(atptr, ptr);
4690 ret = do_semctl(first, second, third, atptr);
4691 break;
4692 }
4693
4694 case IPCOP_msgget:
4695 ret = get_errno(msgget(first, second));
4696 break;
4697
4698 case IPCOP_msgsnd:
4699 ret = do_msgsnd(first, ptr, second, third);
4700 break;
4701
4702 case IPCOP_msgctl:
4703 ret = do_msgctl(first, second, ptr);
4704 break;
4705
4706 case IPCOP_msgrcv:
4707 switch (version) {
4708 case 0:
4709 {
4710 struct target_ipc_kludge {
4711 abi_long msgp;
4712 abi_long msgtyp;
4713 } *tmp;
4714
4715 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4716 ret = -TARGET_EFAULT;
4717 break;
4718 }
4719
4720 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4721
4722 unlock_user_struct(tmp, ptr, 0);
4723 break;
4724 }
4725 default:
4726 ret = do_msgrcv(first, ptr, second, fifth, third);
4727 }
4728 break;
4729
4730 case IPCOP_shmat:
4731 switch (version) {
4732 default:
4733 {
4734 abi_ulong raddr;
4735 raddr = do_shmat(cpu_env, first, ptr, second);
4736 if (is_error(raddr))
4737 return get_errno(raddr);
4738 if (put_user_ual(raddr, third))
4739 return -TARGET_EFAULT;
4740 break;
4741 }
4742 case 1:
4743 ret = -TARGET_EINVAL;
4744 break;
4745 }
4746 break;
4747 case IPCOP_shmdt:
4748 ret = do_shmdt(ptr);
4749 break;
4750
4751 case IPCOP_shmget:
4752 /* IPC_* flag values are the same on all linux platforms */
4753 ret = get_errno(shmget(first, second, third));
4754 break;
4755
4756 /* IPC_* and SHM_* command values are the same on all linux platforms */
4757 case IPCOP_shmctl:
4758 ret = do_shmctl(first, second, ptr);
4759 break;
4760 default:
4761 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4762 call, version);
4763 ret = -TARGET_ENOSYS;
4764 break;
4765 }
4766 return ret;
4767 }
4768 #endif
4769
4770 /* kernel structure types definitions */
4771
4772 #define STRUCT(name, ...) STRUCT_ ## name,
4773 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4774 enum {
4775 #include "syscall_types.h"
4776 STRUCT_MAX
4777 };
4778 #undef STRUCT
4779 #undef STRUCT_SPECIAL
4780
4781 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4782 #define STRUCT_SPECIAL(name)
4783 #include "syscall_types.h"
4784 #undef STRUCT
4785 #undef STRUCT_SPECIAL
4786
4787 #define MAX_STRUCT_SIZE 4096
4788
4789 #ifdef CONFIG_FIEMAP
4790 /* So fiemap access checks don't overflow on 32 bit systems.
4791 * This is very slightly smaller than the limit imposed by
4792 * the underlying kernel.
4793 */
4794 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4795 / sizeof(struct fiemap_extent))
4796
4797 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4798 int fd, int cmd, abi_long arg)
4799 {
4800 /* The parameter for this ioctl is a struct fiemap followed
4801 * by an array of struct fiemap_extent whose size is set
4802 * in fiemap->fm_extent_count. The array is filled in by the
4803 * ioctl.
4804 */
4805 int target_size_in, target_size_out;
4806 struct fiemap *fm;
4807 const argtype *arg_type = ie->arg_type;
4808 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4809 void *argptr, *p;
4810 abi_long ret;
4811 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4812 uint32_t outbufsz;
4813 int free_fm = 0;
4814
4815 assert(arg_type[0] == TYPE_PTR);
4816 assert(ie->access == IOC_RW);
4817 arg_type++;
4818 target_size_in = thunk_type_size(arg_type, 0);
4819 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4820 if (!argptr) {
4821 return -TARGET_EFAULT;
4822 }
4823 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4824 unlock_user(argptr, arg, 0);
4825 fm = (struct fiemap *)buf_temp;
4826 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4827 return -TARGET_EINVAL;
4828 }
4829
4830 outbufsz = sizeof (*fm) +
4831 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4832
4833 if (outbufsz > MAX_STRUCT_SIZE) {
4834 /* We can't fit all the extents into the fixed size buffer.
4835 * Allocate one that is large enough and use it instead.
4836 */
4837 fm = g_try_malloc(outbufsz);
4838 if (!fm) {
4839 return -TARGET_ENOMEM;
4840 }
4841 memcpy(fm, buf_temp, sizeof(struct fiemap));
4842 free_fm = 1;
4843 }
4844 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4845 if (!is_error(ret)) {
4846 target_size_out = target_size_in;
4847 /* An extent_count of 0 means we were only counting the extents
4848 * so there are no structs to copy
4849 */
4850 if (fm->fm_extent_count != 0) {
4851 target_size_out += fm->fm_mapped_extents * extent_size;
4852 }
4853 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4854 if (!argptr) {
4855 ret = -TARGET_EFAULT;
4856 } else {
4857 /* Convert the struct fiemap */
4858 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4859 if (fm->fm_extent_count != 0) {
4860 p = argptr + target_size_in;
4861 /* ...and then all the struct fiemap_extents */
4862 for (i = 0; i < fm->fm_mapped_extents; i++) {
4863 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4864 THUNK_TARGET);
4865 p += extent_size;
4866 }
4867 }
4868 unlock_user(argptr, arg, target_size_out);
4869 }
4870 }
4871 if (free_fm) {
4872 g_free(fm);
4873 }
4874 return ret;
4875 }
4876 #endif
4877
4878 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4879 int fd, int cmd, abi_long arg)
4880 {
4881 const argtype *arg_type = ie->arg_type;
4882 int target_size;
4883 void *argptr;
4884 int ret;
4885 struct ifconf *host_ifconf;
4886 uint32_t outbufsz;
4887 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4888 const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4889 int target_ifreq_size;
4890 int nb_ifreq;
4891 int free_buf = 0;
4892 int i;
4893 int target_ifc_len;
4894 abi_long target_ifc_buf;
4895 int host_ifc_len;
4896 char *host_ifc_buf;
4897
4898 assert(arg_type[0] == TYPE_PTR);
4899 assert(ie->access == IOC_RW);
4900
4901 arg_type++;
4902 target_size = thunk_type_size(arg_type, 0);
4903
4904 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4905 if (!argptr)
4906 return -TARGET_EFAULT;
4907 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4908 unlock_user(argptr, arg, 0);
4909
4910 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4911 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4912 target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4913
4914 if (target_ifc_buf != 0) {
4915 target_ifc_len = host_ifconf->ifc_len;
4916 nb_ifreq = target_ifc_len / target_ifreq_size;
4917 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4918
4919 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4920 if (outbufsz > MAX_STRUCT_SIZE) {
4921 /*
4922 * We can't fit all the extents into the fixed size buffer.
4923 * Allocate one that is large enough and use it instead.
4924 */
4925 host_ifconf = g_try_malloc(outbufsz);
4926 if (!host_ifconf) {
4927 return -TARGET_ENOMEM;
4928 }
4929 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4930 free_buf = 1;
4931 }
4932 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4933
4934 host_ifconf->ifc_len = host_ifc_len;
4935 } else {
4936 host_ifc_buf = NULL;
4937 }
4938 host_ifconf->ifc_buf = host_ifc_buf;
4939
4940 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4941 if (!is_error(ret)) {
4942 /* convert host ifc_len to target ifc_len */
4943
4944 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4945 target_ifc_len = nb_ifreq * target_ifreq_size;
4946 host_ifconf->ifc_len = target_ifc_len;
4947
4948 /* restore target ifc_buf */
4949
4950 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4951
4952 /* copy struct ifconf to target user */
4953
4954 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4955 if (!argptr)
4956 return -TARGET_EFAULT;
4957 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4958 unlock_user(argptr, arg, target_size);
4959
4960 if (target_ifc_buf != 0) {
4961 /* copy ifreq[] to target user */
4962 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4963 for (i = 0; i < nb_ifreq ; i++) {
4964 thunk_convert(argptr + i * target_ifreq_size,
4965 host_ifc_buf + i * sizeof(struct ifreq),
4966 ifreq_arg_type, THUNK_TARGET);
4967 }
4968 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4969 }
4970 }
4971
4972 if (free_buf) {
4973 g_free(host_ifconf);
4974 }
4975
4976 return ret;
4977 }
4978
4979 #if defined(CONFIG_USBFS)
4980 #if HOST_LONG_BITS > 64
4981 #error USBDEVFS thunks do not support >64 bit hosts yet.
4982 #endif
4983 struct live_urb {
4984 uint64_t target_urb_adr;
4985 uint64_t target_buf_adr;
4986 char *target_buf_ptr;
4987 struct usbdevfs_urb host_urb;
4988 };
4989
4990 static GHashTable *usbdevfs_urb_hashtable(void)
4991 {
4992 static GHashTable *urb_hashtable;
4993
4994 if (!urb_hashtable) {
4995 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4996 }
4997 return urb_hashtable;
4998 }
4999
5000 static void urb_hashtable_insert(struct live_urb *urb)
5001 {
5002 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5003 g_hash_table_insert(urb_hashtable, urb, urb);
5004 }
5005
5006 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5007 {
5008 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5009 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5010 }
5011
5012 static void urb_hashtable_remove(struct live_urb *urb)
5013 {
5014 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5015 g_hash_table_remove(urb_hashtable, urb);
5016 }
5017
5018 static abi_long
5019 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5020 int fd, int cmd, abi_long arg)
5021 {
5022 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5023 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5024 struct live_urb *lurb;
5025 void *argptr;
5026 uint64_t hurb;
5027 int target_size;
5028 uintptr_t target_urb_adr;
5029 abi_long ret;
5030
5031 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5032
5033 memset(buf_temp, 0, sizeof(uint64_t));
5034 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5035 if (is_error(ret)) {
5036 return ret;
5037 }
5038
5039 memcpy(&hurb, buf_temp, sizeof(uint64_t));
5040 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5041 if (!lurb->target_urb_adr) {
5042 return -TARGET_EFAULT;
5043 }
5044 urb_hashtable_remove(lurb);
5045 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5046 lurb->host_urb.buffer_length);
5047 lurb->target_buf_ptr = NULL;
5048
5049 /* restore the guest buffer pointer */
5050 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5051
5052 /* update the guest urb struct */
5053 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5054 if (!argptr) {
5055 g_free(lurb);
5056 return -TARGET_EFAULT;
5057 }
5058 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5059 unlock_user(argptr, lurb->target_urb_adr, target_size);
5060
5061 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5062 /* write back the urb handle */
5063 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5064 if (!argptr) {
5065 g_free(lurb);
5066 return -TARGET_EFAULT;
5067 }
5068
5069 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5070 target_urb_adr = lurb->target_urb_adr;
5071 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5072 unlock_user(argptr, arg, target_size);
5073
5074 g_free(lurb);
5075 return ret;
5076 }
5077
5078 static abi_long
5079 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5080 uint8_t *buf_temp __attribute__((unused)),
5081 int fd, int cmd, abi_long arg)
5082 {
5083 struct live_urb *lurb;
5084
5085 /* map target address back to host URB with metadata. */
5086 lurb = urb_hashtable_lookup(arg);
5087 if (!lurb) {
5088 return -TARGET_EFAULT;
5089 }
5090 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5091 }
5092
5093 static abi_long
5094 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5095 int fd, int cmd, abi_long arg)
5096 {
5097 const argtype *arg_type = ie->arg_type;
5098 int target_size;
5099 abi_long ret;
5100 void *argptr;
5101 int rw_dir;
5102 struct live_urb *lurb;
5103
5104 /*
5105 * each submitted URB needs to map to a unique ID for the
5106 * kernel, and that unique ID needs to be a pointer to
5107 * host memory. hence, we need to malloc for each URB.
5108 * isochronous transfers have a variable length struct.
5109 */
5110 arg_type++;
5111 target_size = thunk_type_size(arg_type, THUNK_TARGET);
5112
5113 /* construct host copy of urb and metadata */
5114 lurb = g_try_new0(struct live_urb, 1);
5115 if (!lurb) {
5116 return -TARGET_ENOMEM;
5117 }
5118
5119 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5120 if (!argptr) {
5121 g_free(lurb);
5122 return -TARGET_EFAULT;
5123 }
5124 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5125 unlock_user(argptr, arg, 0);
5126
5127 lurb->target_urb_adr = arg;
5128 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5129
5130 /* buffer space used depends on endpoint type so lock the entire buffer */
5131 /* control type urbs should check the buffer contents for true direction */
5132 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5133 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5134 lurb->host_urb.buffer_length, 1);
5135 if (lurb->target_buf_ptr == NULL) {
5136 g_free(lurb);
5137 return -TARGET_EFAULT;
5138 }
5139
5140 /* update buffer pointer in host copy */
5141 lurb->host_urb.buffer = lurb->target_buf_ptr;
5142
5143 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5144 if (is_error(ret)) {
5145 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5146 g_free(lurb);
5147 } else {
5148 urb_hashtable_insert(lurb);
5149 }
5150
5151 return ret;
5152 }
5153 #endif /* CONFIG_USBFS */
5154
5155 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5156 int cmd, abi_long arg)
5157 {
5158 void *argptr;
5159 struct dm_ioctl *host_dm;
5160 abi_long guest_data;
5161 uint32_t guest_data_size;
5162 int target_size;
5163 const argtype *arg_type = ie->arg_type;
5164 abi_long ret;
5165 void *big_buf = NULL;
5166 char *host_data;
5167
5168 arg_type++;
5169 target_size = thunk_type_size(arg_type, 0);
5170 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5171 if (!argptr) {
5172 ret = -TARGET_EFAULT;
5173 goto out;
5174 }
5175 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5176 unlock_user(argptr, arg, 0);
5177
5178 /* buf_temp is too small, so fetch things into a bigger buffer */
5179 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5180 memcpy(big_buf, buf_temp, target_size);
5181 buf_temp = big_buf;
5182 host_dm = big_buf;
5183
5184 guest_data = arg + host_dm->data_start;
5185 if ((guest_data - arg) < 0) {
5186 ret = -TARGET_EINVAL;
5187 goto out;
5188 }
5189 guest_data_size = host_dm->data_size - host_dm->data_start;
5190 host_data = (char*)host_dm + host_dm->data_start;
5191
5192 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5193 if (!argptr) {
5194 ret = -TARGET_EFAULT;
5195 goto out;
5196 }
5197
5198 switch (ie->host_cmd) {
5199 case DM_REMOVE_ALL:
5200 case DM_LIST_DEVICES:
5201 case DM_DEV_CREATE:
5202 case DM_DEV_REMOVE:
5203 case DM_DEV_SUSPEND:
5204 case DM_DEV_STATUS:
5205 case DM_DEV_WAIT:
5206 case DM_TABLE_STATUS:
5207 case DM_TABLE_CLEAR:
5208 case DM_TABLE_DEPS:
5209 case DM_LIST_VERSIONS:
5210 /* no input data */
5211 break;
5212 case DM_DEV_RENAME:
5213 case DM_DEV_SET_GEOMETRY:
5214 /* data contains only strings */
5215 memcpy(host_data, argptr, guest_data_size);
5216 break;
5217 case DM_TARGET_MSG:
5218 memcpy(host_data, argptr, guest_data_size);
5219 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5220 break;
5221 case DM_TABLE_LOAD:
5222 {
5223 void *gspec = argptr;
5224 void *cur_data = host_data;
5225 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5226 int spec_size = thunk_type_size(arg_type, 0);
5227 int i;
5228
5229 for (i = 0; i < host_dm->target_count; i++) {
5230 struct dm_target_spec *spec = cur_data;
5231 uint32_t next;
5232 int slen;
5233
5234 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5235 slen = strlen((char*)gspec + spec_size) + 1;
5236 next = spec->next;
5237 spec->next = sizeof(*spec) + slen;
5238 strcpy((char*)&spec[1], gspec + spec_size);
5239 gspec += next;
5240 cur_data += spec->next;
5241 }
5242 break;
5243 }
5244 default:
5245 ret = -TARGET_EINVAL;
5246 unlock_user(argptr, guest_data, 0);
5247 goto out;
5248 }
5249 unlock_user(argptr, guest_data, 0);
5250
5251 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5252 if (!is_error(ret)) {
5253 guest_data = arg + host_dm->data_start;
5254 guest_data_size = host_dm->data_size - host_dm->data_start;
5255 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5256 switch (ie->host_cmd) {
5257 case DM_REMOVE_ALL:
5258 case DM_DEV_CREATE:
5259 case DM_DEV_REMOVE:
5260 case DM_DEV_RENAME:
5261 case DM_DEV_SUSPEND:
5262 case DM_DEV_STATUS:
5263 case DM_TABLE_LOAD:
5264 case DM_TABLE_CLEAR:
5265 case DM_TARGET_MSG:
5266 case DM_DEV_SET_GEOMETRY:
5267 /* no return data */
5268 break;
5269 case DM_LIST_DEVICES:
5270 {
5271 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5272 uint32_t remaining_data = guest_data_size;
5273 void *cur_data = argptr;
5274 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5275 int nl_size = 12; /* can't use thunk_size due to alignment */
5276
5277 while (1) {
5278 uint32_t next = nl->next;
5279 if (next) {
5280 nl->next = nl_size + (strlen(nl->name) + 1);
5281 }
5282 if (remaining_data < nl->next) {
5283 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5284 break;
5285 }
5286 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5287 strcpy(cur_data + nl_size, nl->name);
5288 cur_data += nl->next;
5289 remaining_data -= nl->next;
5290 if (!next) {
5291 break;
5292 }
5293 nl = (void*)nl + next;
5294 }
5295 break;
5296 }
5297 case DM_DEV_WAIT:
5298 case DM_TABLE_STATUS:
5299 {
5300 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5301 void *cur_data = argptr;
5302 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5303 int spec_size = thunk_type_size(arg_type, 0);
5304 int i;
5305
5306 for (i = 0; i < host_dm->target_count; i++) {
5307 uint32_t next = spec->next;
5308 int slen = strlen((char*)&spec[1]) + 1;
5309 spec->next = (cur_data - argptr) + spec_size + slen;
5310 if (guest_data_size < spec->next) {
5311 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5312 break;
5313 }
5314 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5315 strcpy(cur_data + spec_size, (char*)&spec[1]);
5316 cur_data = argptr + spec->next;
5317 spec = (void*)host_dm + host_dm->data_start + next;
5318 }
5319 break;
5320 }
5321 case DM_TABLE_DEPS:
5322 {
5323 void *hdata = (void*)host_dm + host_dm->data_start;
5324 int count = *(uint32_t*)hdata;
5325 uint64_t *hdev = hdata + 8;
5326 uint64_t *gdev = argptr + 8;
5327 int i;
5328
5329 *(uint32_t*)argptr = tswap32(count);
5330 for (i = 0; i < count; i++) {
5331 *gdev = tswap64(*hdev);
5332 gdev++;
5333 hdev++;
5334 }
5335 break;
5336 }
5337 case DM_LIST_VERSIONS:
5338 {
5339 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5340 uint32_t remaining_data = guest_data_size;
5341 void *cur_data = argptr;
5342 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5343 int vers_size = thunk_type_size(arg_type, 0);
5344
5345 while (1) {
5346 uint32_t next = vers->next;
5347 if (next) {
5348 vers->next = vers_size + (strlen(vers->name) + 1);
5349 }
5350 if (remaining_data < vers->next) {
5351 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5352 break;
5353 }
5354 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5355 strcpy(cur_data + vers_size, vers->name);
5356 cur_data += vers->next;
5357 remaining_data -= vers->next;
5358 if (!next) {
5359 break;
5360 }
5361 vers = (void*)vers + next;
5362 }
5363 break;
5364 }
5365 default:
5366 unlock_user(argptr, guest_data, 0);
5367 ret = -TARGET_EINVAL;
5368 goto out;
5369 }
5370 unlock_user(argptr, guest_data, guest_data_size);
5371
5372 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5373 if (!argptr) {
5374 ret = -TARGET_EFAULT;
5375 goto out;
5376 }
5377 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5378 unlock_user(argptr, arg, target_size);
5379 }
5380 out:
5381 g_free(big_buf);
5382 return ret;
5383 }
5384
5385 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5386 int cmd, abi_long arg)
5387 {
5388 void *argptr;
5389 int target_size;
5390 const argtype *arg_type = ie->arg_type;
5391 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5392 abi_long ret;
5393
5394 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5395 struct blkpg_partition host_part;
5396
5397 /* Read and convert blkpg */
5398 arg_type++;
5399 target_size = thunk_type_size(arg_type, 0);
5400 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5401 if (!argptr) {
5402 ret = -TARGET_EFAULT;
5403 goto out;
5404 }
5405 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5406 unlock_user(argptr, arg, 0);
5407
5408 switch (host_blkpg->op) {
5409 case BLKPG_ADD_PARTITION:
5410 case BLKPG_DEL_PARTITION:
5411 /* payload is struct blkpg_partition */
5412 break;
5413 default:
5414 /* Unknown opcode */
5415 ret = -TARGET_EINVAL;
5416 goto out;
5417 }
5418
5419 /* Read and convert blkpg->data */
5420 arg = (abi_long)(uintptr_t)host_blkpg->data;
5421 target_size = thunk_type_size(part_arg_type, 0);
5422 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5423 if (!argptr) {
5424 ret = -TARGET_EFAULT;
5425 goto out;
5426 }
5427 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5428 unlock_user(argptr, arg, 0);
5429
5430 /* Swizzle the data pointer to our local copy and call! */
5431 host_blkpg->data = &host_part;
5432 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5433
5434 out:
5435 return ret;
5436 }
5437
5438 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5439 int fd, int cmd, abi_long arg)
5440 {
5441 const argtype *arg_type = ie->arg_type;
5442 const StructEntry *se;
5443 const argtype *field_types;
5444 const int *dst_offsets, *src_offsets;
5445 int target_size;
5446 void *argptr;
5447 abi_ulong *target_rt_dev_ptr = NULL;
5448 unsigned long *host_rt_dev_ptr = NULL;
5449 abi_long ret;
5450 int i;
5451
5452 assert(ie->access == IOC_W);
5453 assert(*arg_type == TYPE_PTR);
5454 arg_type++;
5455 assert(*arg_type == TYPE_STRUCT);
5456 target_size = thunk_type_size(arg_type, 0);
5457 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5458 if (!argptr) {
5459 return -TARGET_EFAULT;
5460 }
5461 arg_type++;
5462 assert(*arg_type == (int)STRUCT_rtentry);
5463 se = struct_entries + *arg_type++;
5464 assert(se->convert[0] == NULL);
5465 /* convert struct here to be able to catch rt_dev string */
5466 field_types = se->field_types;
5467 dst_offsets = se->field_offsets[THUNK_HOST];
5468 src_offsets = se->field_offsets[THUNK_TARGET];
5469 for (i = 0; i < se->nb_fields; i++) {
5470 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5471 assert(*field_types == TYPE_PTRVOID);
5472 target_rt_dev_ptr = argptr + src_offsets[i];
5473 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5474 if (*target_rt_dev_ptr != 0) {
5475 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5476 tswapal(*target_rt_dev_ptr));
5477 if (!*host_rt_dev_ptr) {
5478 unlock_user(argptr, arg, 0);
5479 return -TARGET_EFAULT;
5480 }
5481 } else {
5482 *host_rt_dev_ptr = 0;
5483 }
5484 field_types++;
5485 continue;
5486 }
5487 field_types = thunk_convert(buf_temp + dst_offsets[i],
5488 argptr + src_offsets[i],
5489 field_types, THUNK_HOST);
5490 }
5491 unlock_user(argptr, arg, 0);
5492
5493 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5494
5495 assert(host_rt_dev_ptr != NULL);
5496 assert(target_rt_dev_ptr != NULL);
5497 if (*host_rt_dev_ptr != 0) {
5498 unlock_user((void *)*host_rt_dev_ptr,
5499 *target_rt_dev_ptr, 0);
5500 }
5501 return ret;
5502 }
5503
5504 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5505 int fd, int cmd, abi_long arg)
5506 {
5507 int sig = target_to_host_signal(arg);
5508 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5509 }
5510
5511 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5512 int fd, int cmd, abi_long arg)
5513 {
5514 struct timeval tv;
5515 abi_long ret;
5516
5517 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5518 if (is_error(ret)) {
5519 return ret;
5520 }
5521
5522 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5523 if (copy_to_user_timeval(arg, &tv)) {
5524 return -TARGET_EFAULT;
5525 }
5526 } else {
5527 if (copy_to_user_timeval64(arg, &tv)) {
5528 return -TARGET_EFAULT;
5529 }
5530 }
5531
5532 return ret;
5533 }
5534
5535 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5536 int fd, int cmd, abi_long arg)
5537 {
5538 struct timespec ts;
5539 abi_long ret;
5540
5541 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5542 if (is_error(ret)) {
5543 return ret;
5544 }
5545
5546 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5547 if (host_to_target_timespec(arg, &ts)) {
5548 return -TARGET_EFAULT;
5549 }
5550 } else{
5551 if (host_to_target_timespec64(arg, &ts)) {
5552 return -TARGET_EFAULT;
5553 }
5554 }
5555
5556 return ret;
5557 }
5558
5559 #ifdef TIOCGPTPEER
5560 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5561 int fd, int cmd, abi_long arg)
5562 {
5563 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5564 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5565 }
5566 #endif
5567
5568 #ifdef HAVE_DRM_H
5569
5570 static void unlock_drm_version(struct drm_version *host_ver,
5571 struct target_drm_version *target_ver,
5572 bool copy)
5573 {
5574 unlock_user(host_ver->name, target_ver->name,
5575 copy ? host_ver->name_len : 0);
5576 unlock_user(host_ver->date, target_ver->date,
5577 copy ? host_ver->date_len : 0);
5578 unlock_user(host_ver->desc, target_ver->desc,
5579 copy ? host_ver->desc_len : 0);
5580 }
5581
5582 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5583 struct target_drm_version *target_ver)
5584 {
5585 memset(host_ver, 0, sizeof(*host_ver));
5586
5587 __get_user(host_ver->name_len, &target_ver->name_len);
5588 if (host_ver->name_len) {
5589 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5590 target_ver->name_len, 0);
5591 if (!host_ver->name) {
5592 return -EFAULT;
5593 }
5594 }
5595
5596 __get_user(host_ver->date_len, &target_ver->date_len);
5597 if (host_ver->date_len) {
5598 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5599 target_ver->date_len, 0);
5600 if (!host_ver->date) {
5601 goto err;
5602 }
5603 }
5604
5605 __get_user(host_ver->desc_len, &target_ver->desc_len);
5606 if (host_ver->desc_len) {
5607 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5608 target_ver->desc_len, 0);
5609 if (!host_ver->desc) {
5610 goto err;
5611 }
5612 }
5613
5614 return 0;
5615 err:
5616 unlock_drm_version(host_ver, target_ver, false);
5617 return -EFAULT;
5618 }
5619
5620 static inline void host_to_target_drmversion(
5621 struct target_drm_version *target_ver,
5622 struct drm_version *host_ver)
5623 {
5624 __put_user(host_ver->version_major, &target_ver->version_major);
5625 __put_user(host_ver->version_minor, &target_ver->version_minor);
5626 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5627 __put_user(host_ver->name_len, &target_ver->name_len);
5628 __put_user(host_ver->date_len, &target_ver->date_len);
5629 __put_user(host_ver->desc_len, &target_ver->desc_len);
5630 unlock_drm_version(host_ver, target_ver, true);
5631 }
5632
5633 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5634 int fd, int cmd, abi_long arg)
5635 {
5636 struct drm_version *ver;
5637 struct target_drm_version *target_ver;
5638 abi_long ret;
5639
5640 switch (ie->host_cmd) {
5641 case DRM_IOCTL_VERSION:
5642 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5643 return -TARGET_EFAULT;
5644 }
5645 ver = (struct drm_version *)buf_temp;
5646 ret = target_to_host_drmversion(ver, target_ver);
5647 if (!is_error(ret)) {
5648 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5649 if (is_error(ret)) {
5650 unlock_drm_version(ver, target_ver, false);
5651 } else {
5652 host_to_target_drmversion(target_ver, ver);
5653 }
5654 }
5655 unlock_user_struct(target_ver, arg, 0);
5656 return ret;
5657 }
5658 return -TARGET_ENOSYS;
5659 }
5660
5661 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5662 struct drm_i915_getparam *gparam,
5663 int fd, abi_long arg)
5664 {
5665 abi_long ret;
5666 int value;
5667 struct target_drm_i915_getparam *target_gparam;
5668
5669 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5670 return -TARGET_EFAULT;
5671 }
5672
5673 __get_user(gparam->param, &target_gparam->param);
5674 gparam->value = &value;
5675 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5676 put_user_s32(value, target_gparam->value);
5677
5678 unlock_user_struct(target_gparam, arg, 0);
5679 return ret;
5680 }
5681
5682 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5683 int fd, int cmd, abi_long arg)
5684 {
5685 switch (ie->host_cmd) {
5686 case DRM_IOCTL_I915_GETPARAM:
5687 return do_ioctl_drm_i915_getparam(ie,
5688 (struct drm_i915_getparam *)buf_temp,
5689 fd, arg);
5690 default:
5691 return -TARGET_ENOSYS;
5692 }
5693 }
5694
5695 #endif
5696
5697 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5698 int fd, int cmd, abi_long arg)
5699 {
5700 struct tun_filter *filter = (struct tun_filter *)buf_temp;
5701 struct tun_filter *target_filter;
5702 char *target_addr;
5703
5704 assert(ie->access == IOC_W);
5705
5706 target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5707 if (!target_filter) {
5708 return -TARGET_EFAULT;
5709 }
5710 filter->flags = tswap16(target_filter->flags);
5711 filter->count = tswap16(target_filter->count);
5712 unlock_user(target_filter, arg, 0);
5713
5714 if (filter->count) {
5715 if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5716 MAX_STRUCT_SIZE) {
5717 return -TARGET_EFAULT;
5718 }
5719
5720 target_addr = lock_user(VERIFY_READ,
5721 arg + offsetof(struct tun_filter, addr),
5722 filter->count * ETH_ALEN, 1);
5723 if (!target_addr) {
5724 return -TARGET_EFAULT;
5725 }
5726 memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5727 unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5728 }
5729
5730 return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5731 }
5732
5733 IOCTLEntry ioctl_entries[] = {
5734 #define IOCTL(cmd, access, ...) \
5735 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5736 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5737 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5738 #define IOCTL_IGNORE(cmd) \
5739 { TARGET_ ## cmd, 0, #cmd },
5740 #include "ioctls.h"
5741 { 0, 0, },
5742 };
5743
5744 /* ??? Implement proper locking for ioctls. */
5745 /* do_ioctl() Must return target values and target errnos. */
5746 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5747 {
5748 const IOCTLEntry *ie;
5749 const argtype *arg_type;
5750 abi_long ret;
5751 uint8_t buf_temp[MAX_STRUCT_SIZE];
5752 int target_size;
5753 void *argptr;
5754
5755 ie = ioctl_entries;
5756 for(;;) {
5757 if (ie->target_cmd == 0) {
5758 qemu_log_mask(
5759 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5760 return -TARGET_ENOTTY;
5761 }
5762 if (ie->target_cmd == cmd)
5763 break;
5764 ie++;
5765 }
5766 arg_type = ie->arg_type;
5767 if (ie->do_ioctl) {
5768 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5769 } else if (!ie->host_cmd) {
5770 /* Some architectures define BSD ioctls in their headers
5771 that are not implemented in Linux. */
5772 return -TARGET_ENOTTY;
5773 }
5774
5775 switch(arg_type[0]) {
5776 case TYPE_NULL:
5777 /* no argument */
5778 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5779 break;
5780 case TYPE_PTRVOID:
5781 case TYPE_INT:
5782 case TYPE_LONG:
5783 case TYPE_ULONG:
5784 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5785 break;
5786 case TYPE_PTR:
5787 arg_type++;
5788 target_size = thunk_type_size(arg_type, 0);
5789 switch(ie->access) {
5790 case IOC_R:
5791 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5792 if (!is_error(ret)) {
5793 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5794 if (!argptr)
5795 return -TARGET_EFAULT;
5796 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5797 unlock_user(argptr, arg, target_size);
5798 }
5799 break;
5800 case IOC_W:
5801 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5802 if (!argptr)
5803 return -TARGET_EFAULT;
5804 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5805 unlock_user(argptr, arg, 0);
5806 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5807 break;
5808 default:
5809 case IOC_RW:
5810 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5811 if (!argptr)
5812 return -TARGET_EFAULT;
5813 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5814 unlock_user(argptr, arg, 0);
5815 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5816 if (!is_error(ret)) {
5817 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5818 if (!argptr)
5819 return -TARGET_EFAULT;
5820 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5821 unlock_user(argptr, arg, target_size);
5822 }
5823 break;
5824 }
5825 break;
5826 default:
5827 qemu_log_mask(LOG_UNIMP,
5828 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5829 (long)cmd, arg_type[0]);
5830 ret = -TARGET_ENOTTY;
5831 break;
5832 }
5833 return ret;
5834 }
5835
5836 static const bitmask_transtbl iflag_tbl[] = {
5837 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5838 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5839 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5840 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5841 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5842 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5843 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5844 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5845 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5846 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5847 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5848 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5849 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5850 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5851 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5852 { 0, 0, 0, 0 }
5853 };
5854
5855 static const bitmask_transtbl oflag_tbl[] = {
5856 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5857 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5858 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5859 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5860 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5861 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5862 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5863 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5864 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5865 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5866 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5867 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5868 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5869 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5870 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5871 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5872 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5873 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5874 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5875 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5876 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5877 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5878 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5879 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5880 { 0, 0, 0, 0 }
5881 };
5882
5883 static const bitmask_transtbl cflag_tbl[] = {
5884 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5885 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5886 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5887 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5888 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5889 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5890 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5891 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5892 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5893 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5894 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5895 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5896 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5897 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5898 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5899 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5900 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5901 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5902 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5903 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5904 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5905 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5906 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5907 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5908 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5909 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5910 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5911 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5912 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5913 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5914 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5915 { 0, 0, 0, 0 }
5916 };
5917
5918 static const bitmask_transtbl lflag_tbl[] = {
5919 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5920 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5921 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5922 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5923 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5924 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5925 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5926 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5927 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5928 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5929 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5930 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5931 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5932 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5933 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5934 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5935 { 0, 0, 0, 0 }
5936 };
5937
5938 static void target_to_host_termios (void *dst, const void *src)
5939 {
5940 struct host_termios *host = dst;
5941 const struct target_termios *target = src;
5942
5943 host->c_iflag =
5944 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5945 host->c_oflag =
5946 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5947 host->c_cflag =
5948 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5949 host->c_lflag =
5950 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5951 host->c_line = target->c_line;
5952
5953 memset(host->c_cc, 0, sizeof(host->c_cc));
5954 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5955 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5956 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5957 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5958 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5959 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5960 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5961 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5962 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5963 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5964 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5965 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5966 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5967 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5968 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5969 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5970 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5971 }
5972
5973 static void host_to_target_termios (void *dst, const void *src)
5974 {
5975 struct target_termios *target = dst;
5976 const struct host_termios *host = src;
5977
5978 target->c_iflag =
5979 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5980 target->c_oflag =
5981 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5982 target->c_cflag =
5983 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5984 target->c_lflag =
5985 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5986 target->c_line = host->c_line;
5987
5988 memset(target->c_cc, 0, sizeof(target->c_cc));
5989 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5990 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5991 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5992 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5993 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5994 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5995 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5996 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5997 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5998 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5999 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6000 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6001 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6002 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6003 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6004 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6005 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6006 }
6007
6008 static const StructEntry struct_termios_def = {
6009 .convert = { host_to_target_termios, target_to_host_termios },
6010 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6011 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6012 .print = print_termios,
6013 };
6014
6015 /* If the host does not provide these bits, they may be safely discarded. */
6016 #ifndef MAP_SYNC
6017 #define MAP_SYNC 0
6018 #endif
6019 #ifndef MAP_UNINITIALIZED
6020 #define MAP_UNINITIALIZED 0
6021 #endif
6022
6023 static const bitmask_transtbl mmap_flags_tbl[] = {
6024 { TARGET_MAP_TYPE, TARGET_MAP_SHARED, MAP_TYPE, MAP_SHARED },
6025 { TARGET_MAP_TYPE, TARGET_MAP_PRIVATE, MAP_TYPE, MAP_PRIVATE },
6026 { TARGET_MAP_TYPE, TARGET_MAP_SHARED_VALIDATE,
6027 MAP_TYPE, MAP_SHARED_VALIDATE },
6028 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6029 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6030 MAP_ANONYMOUS, MAP_ANONYMOUS },
6031 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6032 MAP_GROWSDOWN, MAP_GROWSDOWN },
6033 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6034 MAP_DENYWRITE, MAP_DENYWRITE },
6035 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6036 MAP_EXECUTABLE, MAP_EXECUTABLE },
6037 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6038 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6039 MAP_NORESERVE, MAP_NORESERVE },
6040 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6041 /* MAP_STACK had been ignored by the kernel for quite some time.
6042 Recognize it for the target insofar as we do not want to pass
6043 it through to the host. */
6044 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6045 { TARGET_MAP_SYNC, TARGET_MAP_SYNC, MAP_SYNC, MAP_SYNC },
6046 { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
6047 { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
6048 { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
6049 MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
6050 { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
6051 MAP_UNINITIALIZED, MAP_UNINITIALIZED },
6052 { 0, 0, 0, 0 }
6053 };
6054
6055 /*
6056 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6057 * TARGET_I386 is defined if TARGET_X86_64 is defined
6058 */
6059 #if defined(TARGET_I386)
6060
6061 /* NOTE: there is really one LDT for all the threads */
6062 static uint8_t *ldt_table;
6063
6064 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6065 {
6066 int size;
6067 void *p;
6068
6069 if (!ldt_table)
6070 return 0;
6071 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6072 if (size > bytecount)
6073 size = bytecount;
6074 p = lock_user(VERIFY_WRITE, ptr, size, 0);
6075 if (!p)
6076 return -TARGET_EFAULT;
6077 /* ??? Should this by byteswapped? */
6078 memcpy(p, ldt_table, size);
6079 unlock_user(p, ptr, size);
6080 return size;
6081 }
6082
6083 /* XXX: add locking support */
6084 static abi_long write_ldt(CPUX86State *env,
6085 abi_ulong ptr, unsigned long bytecount, int oldmode)
6086 {
6087 struct target_modify_ldt_ldt_s ldt_info;
6088 struct target_modify_ldt_ldt_s *target_ldt_info;
6089 int seg_32bit, contents, read_exec_only, limit_in_pages;
6090 int seg_not_present, useable, lm;
6091 uint32_t *lp, entry_1, entry_2;
6092
6093 if (bytecount != sizeof(ldt_info))
6094 return -TARGET_EINVAL;
6095 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6096 return -TARGET_EFAULT;
6097 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6098 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6099 ldt_info.limit = tswap32(target_ldt_info->limit);
6100 ldt_info.flags = tswap32(target_ldt_info->flags);
6101 unlock_user_struct(target_ldt_info, ptr, 0);
6102
6103 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6104 return -TARGET_EINVAL;
6105 seg_32bit = ldt_info.flags & 1;
6106 contents = (ldt_info.flags >> 1) & 3;
6107 read_exec_only = (ldt_info.flags >> 3) & 1;
6108 limit_in_pages = (ldt_info.flags >> 4) & 1;
6109 seg_not_present = (ldt_info.flags >> 5) & 1;
6110 useable = (ldt_info.flags >> 6) & 1;
6111 #ifdef TARGET_ABI32
6112 lm = 0;
6113 #else
6114 lm = (ldt_info.flags >> 7) & 1;
6115 #endif
6116 if (contents == 3) {
6117 if (oldmode)
6118 return -TARGET_EINVAL;
6119 if (seg_not_present == 0)
6120 return -TARGET_EINVAL;
6121 }
6122 /* allocate the LDT */
6123 if (!ldt_table) {
6124 env->ldt.base = target_mmap(0,
6125 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6126 PROT_READ|PROT_WRITE,
6127 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6128 if (env->ldt.base == -1)
6129 return -TARGET_ENOMEM;
6130 memset(g2h_untagged(env->ldt.base), 0,
6131 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6132 env->ldt.limit = 0xffff;
6133 ldt_table = g2h_untagged(env->ldt.base);
6134 }
6135
6136 /* NOTE: same code as Linux kernel */
6137 /* Allow LDTs to be cleared by the user. */
6138 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6139 if (oldmode ||
6140 (contents == 0 &&
6141 read_exec_only == 1 &&
6142 seg_32bit == 0 &&
6143 limit_in_pages == 0 &&
6144 seg_not_present == 1 &&
6145 useable == 0 )) {
6146 entry_1 = 0;
6147 entry_2 = 0;
6148 goto install;
6149 }
6150 }
6151
6152 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6153 (ldt_info.limit & 0x0ffff);
6154 entry_2 = (ldt_info.base_addr & 0xff000000) |
6155 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6156 (ldt_info.limit & 0xf0000) |
6157 ((read_exec_only ^ 1) << 9) |
6158 (contents << 10) |
6159 ((seg_not_present ^ 1) << 15) |
6160 (seg_32bit << 22) |
6161 (limit_in_pages << 23) |
6162 (lm << 21) |
6163 0x7000;
6164 if (!oldmode)
6165 entry_2 |= (useable << 20);
6166
6167 /* Install the new entry ... */
6168 install:
6169 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6170 lp[0] = tswap32(entry_1);
6171 lp[1] = tswap32(entry_2);
6172 return 0;
6173 }
6174
6175 /* specific and weird i386 syscalls */
6176 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6177 unsigned long bytecount)
6178 {
6179 abi_long ret;
6180
6181 switch (func) {
6182 case 0:
6183 ret = read_ldt(ptr, bytecount);
6184 break;
6185 case 1:
6186 ret = write_ldt(env, ptr, bytecount, 1);
6187 break;
6188 case 0x11:
6189 ret = write_ldt(env, ptr, bytecount, 0);
6190 break;
6191 default:
6192 ret = -TARGET_ENOSYS;
6193 break;
6194 }
6195 return ret;
6196 }
6197
6198 #if defined(TARGET_ABI32)
6199 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6200 {
6201 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6202 struct target_modify_ldt_ldt_s ldt_info;
6203 struct target_modify_ldt_ldt_s *target_ldt_info;
6204 int seg_32bit, contents, read_exec_only, limit_in_pages;
6205 int seg_not_present, useable, lm;
6206 uint32_t *lp, entry_1, entry_2;
6207 int i;
6208
6209 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6210 if (!target_ldt_info)
6211 return -TARGET_EFAULT;
6212 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6213 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6214 ldt_info.limit = tswap32(target_ldt_info->limit);
6215 ldt_info.flags = tswap32(target_ldt_info->flags);
6216 if (ldt_info.entry_number == -1) {
6217 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6218 if (gdt_table[i] == 0) {
6219 ldt_info.entry_number = i;
6220 target_ldt_info->entry_number = tswap32(i);
6221 break;
6222 }
6223 }
6224 }
6225 unlock_user_struct(target_ldt_info, ptr, 1);
6226
6227 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6228 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6229 return -TARGET_EINVAL;
6230 seg_32bit = ldt_info.flags & 1;
6231 contents = (ldt_info.flags >> 1) & 3;
6232 read_exec_only = (ldt_info.flags >> 3) & 1;
6233 limit_in_pages = (ldt_info.flags >> 4) & 1;
6234 seg_not_present = (ldt_info.flags >> 5) & 1;
6235 useable = (ldt_info.flags >> 6) & 1;
6236 #ifdef TARGET_ABI32
6237 lm = 0;
6238 #else
6239 lm = (ldt_info.flags >> 7) & 1;
6240 #endif
6241
6242 if (contents == 3) {
6243 if (seg_not_present == 0)
6244 return -TARGET_EINVAL;
6245 }
6246
6247 /* NOTE: same code as Linux kernel */
6248 /* Allow LDTs to be cleared by the user. */
6249 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6250 if ((contents == 0 &&
6251 read_exec_only == 1 &&
6252 seg_32bit == 0 &&
6253 limit_in_pages == 0 &&
6254 seg_not_present == 1 &&
6255 useable == 0 )) {
6256 entry_1 = 0;
6257 entry_2 = 0;
6258 goto install;
6259 }
6260 }
6261
6262 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6263 (ldt_info.limit & 0x0ffff);
6264 entry_2 = (ldt_info.base_addr & 0xff000000) |
6265 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6266 (ldt_info.limit & 0xf0000) |
6267 ((read_exec_only ^ 1) << 9) |
6268 (contents << 10) |
6269 ((seg_not_present ^ 1) << 15) |
6270 (seg_32bit << 22) |
6271 (limit_in_pages << 23) |
6272 (useable << 20) |
6273 (lm << 21) |
6274 0x7000;
6275
6276 /* Install the new entry ... */
6277 install:
6278 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6279 lp[0] = tswap32(entry_1);
6280 lp[1] = tswap32(entry_2);
6281 return 0;
6282 }
6283
6284 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6285 {
6286 struct target_modify_ldt_ldt_s *target_ldt_info;
6287 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6288 uint32_t base_addr, limit, flags;
6289 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6290 int seg_not_present, useable, lm;
6291 uint32_t *lp, entry_1, entry_2;
6292
6293 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6294 if (!target_ldt_info)
6295 return -TARGET_EFAULT;
6296 idx = tswap32(target_ldt_info->entry_number);
6297 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6298 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6299 unlock_user_struct(target_ldt_info, ptr, 1);
6300 return -TARGET_EINVAL;
6301 }
6302 lp = (uint32_t *)(gdt_table + idx);
6303 entry_1 = tswap32(lp[0]);
6304 entry_2 = tswap32(lp[1]);
6305
6306 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6307 contents = (entry_2 >> 10) & 3;
6308 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6309 seg_32bit = (entry_2 >> 22) & 1;
6310 limit_in_pages = (entry_2 >> 23) & 1;
6311 useable = (entry_2 >> 20) & 1;
6312 #ifdef TARGET_ABI32
6313 lm = 0;
6314 #else
6315 lm = (entry_2 >> 21) & 1;
6316 #endif
6317 flags = (seg_32bit << 0) | (contents << 1) |
6318 (read_exec_only << 3) | (limit_in_pages << 4) |
6319 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6320 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6321 base_addr = (entry_1 >> 16) |
6322 (entry_2 & 0xff000000) |
6323 ((entry_2 & 0xff) << 16);
6324 target_ldt_info->base_addr = tswapal(base_addr);
6325 target_ldt_info->limit = tswap32(limit);
6326 target_ldt_info->flags = tswap32(flags);
6327 unlock_user_struct(target_ldt_info, ptr, 1);
6328 return 0;
6329 }
6330
6331 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6332 {
6333 return -TARGET_ENOSYS;
6334 }
6335 #else
6336 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6337 {
6338 abi_long ret = 0;
6339 abi_ulong val;
6340 int idx;
6341
6342 switch(code) {
6343 case TARGET_ARCH_SET_GS:
6344 case TARGET_ARCH_SET_FS:
6345 if (code == TARGET_ARCH_SET_GS)
6346 idx = R_GS;
6347 else
6348 idx = R_FS;
6349 cpu_x86_load_seg(env, idx, 0);
6350 env->segs[idx].base = addr;
6351 break;
6352 case TARGET_ARCH_GET_GS:
6353 case TARGET_ARCH_GET_FS:
6354 if (code == TARGET_ARCH_GET_GS)
6355 idx = R_GS;
6356 else
6357 idx = R_FS;
6358 val = env->segs[idx].base;
6359 if (put_user(val, addr, abi_ulong))
6360 ret = -TARGET_EFAULT;
6361 break;
6362 default:
6363 ret = -TARGET_EINVAL;
6364 break;
6365 }
6366 return ret;
6367 }
6368 #endif /* defined(TARGET_ABI32 */
6369 #endif /* defined(TARGET_I386) */
6370
6371 /*
6372 * These constants are generic. Supply any that are missing from the host.
6373 */
6374 #ifndef PR_SET_NAME
6375 # define PR_SET_NAME 15
6376 # define PR_GET_NAME 16
6377 #endif
6378 #ifndef PR_SET_FP_MODE
6379 # define PR_SET_FP_MODE 45
6380 # define PR_GET_FP_MODE 46
6381 # define PR_FP_MODE_FR (1 << 0)
6382 # define PR_FP_MODE_FRE (1 << 1)
6383 #endif
6384 #ifndef PR_SVE_SET_VL
6385 # define PR_SVE_SET_VL 50
6386 # define PR_SVE_GET_VL 51
6387 # define PR_SVE_VL_LEN_MASK 0xffff
6388 # define PR_SVE_VL_INHERIT (1 << 17)
6389 #endif
6390 #ifndef PR_PAC_RESET_KEYS
6391 # define PR_PAC_RESET_KEYS 54
6392 # define PR_PAC_APIAKEY (1 << 0)
6393 # define PR_PAC_APIBKEY (1 << 1)
6394 # define PR_PAC_APDAKEY (1 << 2)
6395 # define PR_PAC_APDBKEY (1 << 3)
6396 # define PR_PAC_APGAKEY (1 << 4)
6397 #endif
6398 #ifndef PR_SET_TAGGED_ADDR_CTRL
6399 # define PR_SET_TAGGED_ADDR_CTRL 55
6400 # define PR_GET_TAGGED_ADDR_CTRL 56
6401 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6402 #endif
6403 #ifndef PR_MTE_TCF_SHIFT
6404 # define PR_MTE_TCF_SHIFT 1
6405 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6406 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6407 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6408 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6409 # define PR_MTE_TAG_SHIFT 3
6410 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6411 #endif
6412 #ifndef PR_SET_IO_FLUSHER
6413 # define PR_SET_IO_FLUSHER 57
6414 # define PR_GET_IO_FLUSHER 58
6415 #endif
6416 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6417 # define PR_SET_SYSCALL_USER_DISPATCH 59
6418 #endif
6419 #ifndef PR_SME_SET_VL
6420 # define PR_SME_SET_VL 63
6421 # define PR_SME_GET_VL 64
6422 # define PR_SME_VL_LEN_MASK 0xffff
6423 # define PR_SME_VL_INHERIT (1 << 17)
6424 #endif
6425
6426 #include "target_prctl.h"
6427
6428 static abi_long do_prctl_inval0(CPUArchState *env)
6429 {
6430 return -TARGET_EINVAL;
6431 }
6432
6433 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6434 {
6435 return -TARGET_EINVAL;
6436 }
6437
6438 #ifndef do_prctl_get_fp_mode
6439 #define do_prctl_get_fp_mode do_prctl_inval0
6440 #endif
6441 #ifndef do_prctl_set_fp_mode
6442 #define do_prctl_set_fp_mode do_prctl_inval1
6443 #endif
6444 #ifndef do_prctl_sve_get_vl
6445 #define do_prctl_sve_get_vl do_prctl_inval0
6446 #endif
6447 #ifndef do_prctl_sve_set_vl
6448 #define do_prctl_sve_set_vl do_prctl_inval1
6449 #endif
6450 #ifndef do_prctl_reset_keys
6451 #define do_prctl_reset_keys do_prctl_inval1
6452 #endif
6453 #ifndef do_prctl_set_tagged_addr_ctrl
6454 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6455 #endif
6456 #ifndef do_prctl_get_tagged_addr_ctrl
6457 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6458 #endif
6459 #ifndef do_prctl_get_unalign
6460 #define do_prctl_get_unalign do_prctl_inval1
6461 #endif
6462 #ifndef do_prctl_set_unalign
6463 #define do_prctl_set_unalign do_prctl_inval1
6464 #endif
6465 #ifndef do_prctl_sme_get_vl
6466 #define do_prctl_sme_get_vl do_prctl_inval0
6467 #endif
6468 #ifndef do_prctl_sme_set_vl
6469 #define do_prctl_sme_set_vl do_prctl_inval1
6470 #endif
6471
6472 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6473 abi_long arg3, abi_long arg4, abi_long arg5)
6474 {
6475 abi_long ret;
6476
6477 switch (option) {
6478 case PR_GET_PDEATHSIG:
6479 {
6480 int deathsig;
6481 ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6482 arg3, arg4, arg5));
6483 if (!is_error(ret) &&
6484 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6485 return -TARGET_EFAULT;
6486 }
6487 return ret;
6488 }
6489 case PR_SET_PDEATHSIG:
6490 return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6491 arg3, arg4, arg5));
6492 case PR_GET_NAME:
6493 {
6494 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6495 if (!name) {
6496 return -TARGET_EFAULT;
6497 }
6498 ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6499 arg3, arg4, arg5));
6500 unlock_user(name, arg2, 16);
6501 return ret;
6502 }
6503 case PR_SET_NAME:
6504 {
6505 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6506 if (!name) {
6507 return -TARGET_EFAULT;
6508 }
6509 ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6510 arg3, arg4, arg5));
6511 unlock_user(name, arg2, 0);
6512 return ret;
6513 }
6514 case PR_GET_FP_MODE:
6515 return do_prctl_get_fp_mode(env);
6516 case PR_SET_FP_MODE:
6517 return do_prctl_set_fp_mode(env, arg2);
6518 case PR_SVE_GET_VL:
6519 return do_prctl_sve_get_vl(env);
6520 case PR_SVE_SET_VL:
6521 return do_prctl_sve_set_vl(env, arg2);
6522 case PR_SME_GET_VL:
6523 return do_prctl_sme_get_vl(env);
6524 case PR_SME_SET_VL:
6525 return do_prctl_sme_set_vl(env, arg2);
6526 case PR_PAC_RESET_KEYS:
6527 if (arg3 || arg4 || arg5) {
6528 return -TARGET_EINVAL;
6529 }
6530 return do_prctl_reset_keys(env, arg2);
6531 case PR_SET_TAGGED_ADDR_CTRL:
6532 if (arg3 || arg4 || arg5) {
6533 return -TARGET_EINVAL;
6534 }
6535 return do_prctl_set_tagged_addr_ctrl(env, arg2);
6536 case PR_GET_TAGGED_ADDR_CTRL:
6537 if (arg2 || arg3 || arg4 || arg5) {
6538 return -TARGET_EINVAL;
6539 }
6540 return do_prctl_get_tagged_addr_ctrl(env);
6541
6542 case PR_GET_UNALIGN:
6543 return do_prctl_get_unalign(env, arg2);
6544 case PR_SET_UNALIGN:
6545 return do_prctl_set_unalign(env, arg2);
6546
6547 case PR_CAP_AMBIENT:
6548 case PR_CAPBSET_READ:
6549 case PR_CAPBSET_DROP:
6550 case PR_GET_DUMPABLE:
6551 case PR_SET_DUMPABLE:
6552 case PR_GET_KEEPCAPS:
6553 case PR_SET_KEEPCAPS:
6554 case PR_GET_SECUREBITS:
6555 case PR_SET_SECUREBITS:
6556 case PR_GET_TIMING:
6557 case PR_SET_TIMING:
6558 case PR_GET_TIMERSLACK:
6559 case PR_SET_TIMERSLACK:
6560 case PR_MCE_KILL:
6561 case PR_MCE_KILL_GET:
6562 case PR_GET_NO_NEW_PRIVS:
6563 case PR_SET_NO_NEW_PRIVS:
6564 case PR_GET_IO_FLUSHER:
6565 case PR_SET_IO_FLUSHER:
6566 /* Some prctl options have no pointer arguments and we can pass on. */
6567 return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6568
6569 case PR_GET_CHILD_SUBREAPER:
6570 case PR_SET_CHILD_SUBREAPER:
6571 case PR_GET_SPECULATION_CTRL:
6572 case PR_SET_SPECULATION_CTRL:
6573 case PR_GET_TID_ADDRESS:
6574 /* TODO */
6575 return -TARGET_EINVAL;
6576
6577 case PR_GET_FPEXC:
6578 case PR_SET_FPEXC:
6579 /* Was used for SPE on PowerPC. */
6580 return -TARGET_EINVAL;
6581
6582 case PR_GET_ENDIAN:
6583 case PR_SET_ENDIAN:
6584 case PR_GET_FPEMU:
6585 case PR_SET_FPEMU:
6586 case PR_SET_MM:
6587 case PR_GET_SECCOMP:
6588 case PR_SET_SECCOMP:
6589 case PR_SET_SYSCALL_USER_DISPATCH:
6590 case PR_GET_THP_DISABLE:
6591 case PR_SET_THP_DISABLE:
6592 case PR_GET_TSC:
6593 case PR_SET_TSC:
6594 /* Disable to prevent the target disabling stuff we need. */
6595 return -TARGET_EINVAL;
6596
6597 default:
6598 qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6599 option);
6600 return -TARGET_EINVAL;
6601 }
6602 }
6603
6604 #define NEW_STACK_SIZE 0x40000
6605
6606
6607 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6608 typedef struct {
6609 CPUArchState *env;
6610 pthread_mutex_t mutex;
6611 pthread_cond_t cond;
6612 pthread_t thread;
6613 uint32_t tid;
6614 abi_ulong child_tidptr;
6615 abi_ulong parent_tidptr;
6616 sigset_t sigmask;
6617 } new_thread_info;
6618
6619 static void *clone_func(void *arg)
6620 {
6621 new_thread_info *info = arg;
6622 CPUArchState *env;
6623 CPUState *cpu;
6624 TaskState *ts;
6625
6626 rcu_register_thread();
6627 tcg_register_thread();
6628 env = info->env;
6629 cpu = env_cpu(env);
6630 thread_cpu = cpu;
6631 ts = (TaskState *)cpu->opaque;
6632 info->tid = sys_gettid();
6633 task_settid(ts);
6634 if (info->child_tidptr)
6635 put_user_u32(info->tid, info->child_tidptr);
6636 if (info->parent_tidptr)
6637 put_user_u32(info->tid, info->parent_tidptr);
6638 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6639 /* Enable signals. */
6640 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6641 /* Signal to the parent that we're ready. */
6642 pthread_mutex_lock(&info->mutex);
6643 pthread_cond_broadcast(&info->cond);
6644 pthread_mutex_unlock(&info->mutex);
6645 /* Wait until the parent has finished initializing the tls state. */
6646 pthread_mutex_lock(&clone_lock);
6647 pthread_mutex_unlock(&clone_lock);
6648 cpu_loop(env);
6649 /* never exits */
6650 return NULL;
6651 }
6652
6653 /* do_fork() Must return host values and target errnos (unlike most
6654 do_*() functions). */
6655 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6656 abi_ulong parent_tidptr, target_ulong newtls,
6657 abi_ulong child_tidptr)
6658 {
6659 CPUState *cpu = env_cpu(env);
6660 int ret;
6661 TaskState *ts;
6662 CPUState *new_cpu;
6663 CPUArchState *new_env;
6664 sigset_t sigmask;
6665
6666 flags &= ~CLONE_IGNORED_FLAGS;
6667
6668 /* Emulate vfork() with fork() */
6669 if (flags & CLONE_VFORK)
6670 flags &= ~(CLONE_VFORK | CLONE_VM);
6671
6672 if (flags & CLONE_VM) {
6673 TaskState *parent_ts = (TaskState *)cpu->opaque;
6674 new_thread_info info;
6675 pthread_attr_t attr;
6676
6677 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6678 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6679 return -TARGET_EINVAL;
6680 }
6681
6682 ts = g_new0(TaskState, 1);
6683 init_task_state(ts);
6684
6685 /* Grab a mutex so that thread setup appears atomic. */
6686 pthread_mutex_lock(&clone_lock);
6687
6688 /*
6689 * If this is our first additional thread, we need to ensure we
6690 * generate code for parallel execution and flush old translations.
6691 * Do this now so that the copy gets CF_PARALLEL too.
6692 */
6693 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6694 cpu->tcg_cflags |= CF_PARALLEL;
6695 tb_flush(cpu);
6696 }
6697
6698 /* we create a new CPU instance. */
6699 new_env = cpu_copy(env);
6700 /* Init regs that differ from the parent. */
6701 cpu_clone_regs_child(new_env, newsp, flags);
6702 cpu_clone_regs_parent(env, flags);
6703 new_cpu = env_cpu(new_env);
6704 new_cpu->opaque = ts;
6705 ts->bprm = parent_ts->bprm;
6706 ts->info = parent_ts->info;
6707 ts->signal_mask = parent_ts->signal_mask;
6708
6709 if (flags & CLONE_CHILD_CLEARTID) {
6710 ts->child_tidptr = child_tidptr;
6711 }
6712
6713 if (flags & CLONE_SETTLS) {
6714 cpu_set_tls (new_env, newtls);
6715 }
6716
6717 memset(&info, 0, sizeof(info));
6718 pthread_mutex_init(&info.mutex, NULL);
6719 pthread_mutex_lock(&info.mutex);
6720 pthread_cond_init(&info.cond, NULL);
6721 info.env = new_env;
6722 if (flags & CLONE_CHILD_SETTID) {
6723 info.child_tidptr = child_tidptr;
6724 }
6725 if (flags & CLONE_PARENT_SETTID) {
6726 info.parent_tidptr = parent_tidptr;
6727 }
6728
6729 ret = pthread_attr_init(&attr);
6730 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6731 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6732 /* It is not safe to deliver signals until the child has finished
6733 initializing, so temporarily block all signals. */
6734 sigfillset(&sigmask);
6735 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6736 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6737
6738 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6739 /* TODO: Free new CPU state if thread creation failed. */
6740
6741 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6742 pthread_attr_destroy(&attr);
6743 if (ret == 0) {
6744 /* Wait for the child to initialize. */
6745 pthread_cond_wait(&info.cond, &info.mutex);
6746 ret = info.tid;
6747 } else {
6748 ret = -1;
6749 }
6750 pthread_mutex_unlock(&info.mutex);
6751 pthread_cond_destroy(&info.cond);
6752 pthread_mutex_destroy(&info.mutex);
6753 pthread_mutex_unlock(&clone_lock);
6754 } else {
6755 /* if no CLONE_VM, we consider it is a fork */
6756 if (flags & CLONE_INVALID_FORK_FLAGS) {
6757 return -TARGET_EINVAL;
6758 }
6759
6760 /* We can't support custom termination signals */
6761 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6762 return -TARGET_EINVAL;
6763 }
6764
6765 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6766 if (flags & CLONE_PIDFD) {
6767 return -TARGET_EINVAL;
6768 }
6769 #endif
6770
6771 /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6772 if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6773 return -TARGET_EINVAL;
6774 }
6775
6776 if (block_signals()) {
6777 return -QEMU_ERESTARTSYS;
6778 }
6779
6780 fork_start();
6781 ret = fork();
6782 if (ret == 0) {
6783 /* Child Process. */
6784 cpu_clone_regs_child(env, newsp, flags);
6785 fork_end(1);
6786 /* There is a race condition here. The parent process could
6787 theoretically read the TID in the child process before the child
6788 tid is set. This would require using either ptrace
6789 (not implemented) or having *_tidptr to point at a shared memory
6790 mapping. We can't repeat the spinlock hack used above because
6791 the child process gets its own copy of the lock. */
6792 if (flags & CLONE_CHILD_SETTID)
6793 put_user_u32(sys_gettid(), child_tidptr);
6794 if (flags & CLONE_PARENT_SETTID)
6795 put_user_u32(sys_gettid(), parent_tidptr);
6796 ts = (TaskState *)cpu->opaque;
6797 if (flags & CLONE_SETTLS)
6798 cpu_set_tls (env, newtls);
6799 if (flags & CLONE_CHILD_CLEARTID)
6800 ts->child_tidptr = child_tidptr;
6801 } else {
6802 cpu_clone_regs_parent(env, flags);
6803 if (flags & CLONE_PIDFD) {
6804 int pid_fd = 0;
6805 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6806 int pid_child = ret;
6807 pid_fd = pidfd_open(pid_child, 0);
6808 if (pid_fd >= 0) {
6809 fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6810 | FD_CLOEXEC);
6811 } else {
6812 pid_fd = 0;
6813 }
6814 #endif
6815 put_user_u32(pid_fd, parent_tidptr);
6816 }
6817 fork_end(0);
6818 }
6819 g_assert(!cpu_in_exclusive_context(cpu));
6820 }
6821 return ret;
6822 }
6823
6824 /* warning : doesn't handle linux specific flags... */
6825 static int target_to_host_fcntl_cmd(int cmd)
6826 {
6827 int ret;
6828
6829 switch(cmd) {
6830 case TARGET_F_DUPFD:
6831 case TARGET_F_GETFD:
6832 case TARGET_F_SETFD:
6833 case TARGET_F_GETFL:
6834 case TARGET_F_SETFL:
6835 case TARGET_F_OFD_GETLK:
6836 case TARGET_F_OFD_SETLK:
6837 case TARGET_F_OFD_SETLKW:
6838 ret = cmd;
6839 break;
6840 case TARGET_F_GETLK:
6841 ret = F_GETLK64;
6842 break;
6843 case TARGET_F_SETLK:
6844 ret = F_SETLK64;
6845 break;
6846 case TARGET_F_SETLKW:
6847 ret = F_SETLKW64;
6848 break;
6849 case TARGET_F_GETOWN:
6850 ret = F_GETOWN;
6851 break;
6852 case TARGET_F_SETOWN:
6853 ret = F_SETOWN;
6854 break;
6855 case TARGET_F_GETSIG:
6856 ret = F_GETSIG;
6857 break;
6858 case TARGET_F_SETSIG:
6859 ret = F_SETSIG;
6860 break;
6861 #if TARGET_ABI_BITS == 32
6862 case TARGET_F_GETLK64:
6863 ret = F_GETLK64;
6864 break;
6865 case TARGET_F_SETLK64:
6866 ret = F_SETLK64;
6867 break;
6868 case TARGET_F_SETLKW64:
6869 ret = F_SETLKW64;
6870 break;
6871 #endif
6872 case TARGET_F_SETLEASE:
6873 ret = F_SETLEASE;
6874 break;
6875 case TARGET_F_GETLEASE:
6876 ret = F_GETLEASE;
6877 break;
6878 #ifdef F_DUPFD_CLOEXEC
6879 case TARGET_F_DUPFD_CLOEXEC:
6880 ret = F_DUPFD_CLOEXEC;
6881 break;
6882 #endif
6883 case TARGET_F_NOTIFY:
6884 ret = F_NOTIFY;
6885 break;
6886 #ifdef F_GETOWN_EX
6887 case TARGET_F_GETOWN_EX:
6888 ret = F_GETOWN_EX;
6889 break;
6890 #endif
6891 #ifdef F_SETOWN_EX
6892 case TARGET_F_SETOWN_EX:
6893 ret = F_SETOWN_EX;
6894 break;
6895 #endif
6896 #ifdef F_SETPIPE_SZ
6897 case TARGET_F_SETPIPE_SZ:
6898 ret = F_SETPIPE_SZ;
6899 break;
6900 case TARGET_F_GETPIPE_SZ:
6901 ret = F_GETPIPE_SZ;
6902 break;
6903 #endif
6904 #ifdef F_ADD_SEALS
6905 case TARGET_F_ADD_SEALS:
6906 ret = F_ADD_SEALS;
6907 break;
6908 case TARGET_F_GET_SEALS:
6909 ret = F_GET_SEALS;
6910 break;
6911 #endif
6912 default:
6913 ret = -TARGET_EINVAL;
6914 break;
6915 }
6916
6917 #if defined(__powerpc64__)
6918 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6919 * is not supported by kernel. The glibc fcntl call actually adjusts
6920 * them to 5, 6 and 7 before making the syscall(). Since we make the
6921 * syscall directly, adjust to what is supported by the kernel.
6922 */
6923 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6924 ret -= F_GETLK64 - 5;
6925 }
6926 #endif
6927
6928 return ret;
6929 }
6930
6931 #define FLOCK_TRANSTBL \
6932 switch (type) { \
6933 TRANSTBL_CONVERT(F_RDLCK); \
6934 TRANSTBL_CONVERT(F_WRLCK); \
6935 TRANSTBL_CONVERT(F_UNLCK); \
6936 }
6937
6938 static int target_to_host_flock(int type)
6939 {
6940 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6941 FLOCK_TRANSTBL
6942 #undef TRANSTBL_CONVERT
6943 return -TARGET_EINVAL;
6944 }
6945
6946 static int host_to_target_flock(int type)
6947 {
6948 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6949 FLOCK_TRANSTBL
6950 #undef TRANSTBL_CONVERT
6951 /* if we don't know how to convert the value coming
6952 * from the host we copy to the target field as-is
6953 */
6954 return type;
6955 }
6956
6957 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6958 abi_ulong target_flock_addr)
6959 {
6960 struct target_flock *target_fl;
6961 int l_type;
6962
6963 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6964 return -TARGET_EFAULT;
6965 }
6966
6967 __get_user(l_type, &target_fl->l_type);
6968 l_type = target_to_host_flock(l_type);
6969 if (l_type < 0) {
6970 return l_type;
6971 }
6972 fl->l_type = l_type;
6973 __get_user(fl->l_whence, &target_fl->l_whence);
6974 __get_user(fl->l_start, &target_fl->l_start);
6975 __get_user(fl->l_len, &target_fl->l_len);
6976 __get_user(fl->l_pid, &target_fl->l_pid);
6977 unlock_user_struct(target_fl, target_flock_addr, 0);
6978 return 0;
6979 }
6980
6981 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6982 const struct flock64 *fl)
6983 {
6984 struct target_flock *target_fl;
6985 short l_type;
6986
6987 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6988 return -TARGET_EFAULT;
6989 }
6990
6991 l_type = host_to_target_flock(fl->l_type);
6992 __put_user(l_type, &target_fl->l_type);
6993 __put_user(fl->l_whence, &target_fl->l_whence);
6994 __put_user(fl->l_start, &target_fl->l_start);
6995 __put_user(fl->l_len, &target_fl->l_len);
6996 __put_user(fl->l_pid, &target_fl->l_pid);
6997 unlock_user_struct(target_fl, target_flock_addr, 1);
6998 return 0;
6999 }
7000
7001 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
7002 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
7003
7004 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
7005 struct target_oabi_flock64 {
7006 abi_short l_type;
7007 abi_short l_whence;
7008 abi_llong l_start;
7009 abi_llong l_len;
7010 abi_int l_pid;
7011 } QEMU_PACKED;
7012
7013 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
7014 abi_ulong target_flock_addr)
7015 {
7016 struct target_oabi_flock64 *target_fl;
7017 int l_type;
7018
7019 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7020 return -TARGET_EFAULT;
7021 }
7022
7023 __get_user(l_type, &target_fl->l_type);
7024 l_type = target_to_host_flock(l_type);
7025 if (l_type < 0) {
7026 return l_type;
7027 }
7028 fl->l_type = l_type;
7029 __get_user(fl->l_whence, &target_fl->l_whence);
7030 __get_user(fl->l_start, &target_fl->l_start);
7031 __get_user(fl->l_len, &target_fl->l_len);
7032 __get_user(fl->l_pid, &target_fl->l_pid);
7033 unlock_user_struct(target_fl, target_flock_addr, 0);
7034 return 0;
7035 }
7036
7037 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
7038 const struct flock64 *fl)
7039 {
7040 struct target_oabi_flock64 *target_fl;
7041 short l_type;
7042
7043 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7044 return -TARGET_EFAULT;
7045 }
7046
7047 l_type = host_to_target_flock(fl->l_type);
7048 __put_user(l_type, &target_fl->l_type);
7049 __put_user(fl->l_whence, &target_fl->l_whence);
7050 __put_user(fl->l_start, &target_fl->l_start);
7051 __put_user(fl->l_len, &target_fl->l_len);
7052 __put_user(fl->l_pid, &target_fl->l_pid);
7053 unlock_user_struct(target_fl, target_flock_addr, 1);
7054 return 0;
7055 }
7056 #endif
7057
7058 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7059 abi_ulong target_flock_addr)
7060 {
7061 struct target_flock64 *target_fl;
7062 int l_type;
7063
7064 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7065 return -TARGET_EFAULT;
7066 }
7067
7068 __get_user(l_type, &target_fl->l_type);
7069 l_type = target_to_host_flock(l_type);
7070 if (l_type < 0) {
7071 return l_type;
7072 }
7073 fl->l_type = l_type;
7074 __get_user(fl->l_whence, &target_fl->l_whence);
7075 __get_user(fl->l_start, &target_fl->l_start);
7076 __get_user(fl->l_len, &target_fl->l_len);
7077 __get_user(fl->l_pid, &target_fl->l_pid);
7078 unlock_user_struct(target_fl, target_flock_addr, 0);
7079 return 0;
7080 }
7081
7082 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7083 const struct flock64 *fl)
7084 {
7085 struct target_flock64 *target_fl;
7086 short l_type;
7087
7088 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7089 return -TARGET_EFAULT;
7090 }
7091
7092 l_type = host_to_target_flock(fl->l_type);
7093 __put_user(l_type, &target_fl->l_type);
7094 __put_user(fl->l_whence, &target_fl->l_whence);
7095 __put_user(fl->l_start, &target_fl->l_start);
7096 __put_user(fl->l_len, &target_fl->l_len);
7097 __put_user(fl->l_pid, &target_fl->l_pid);
7098 unlock_user_struct(target_fl, target_flock_addr, 1);
7099 return 0;
7100 }
7101
7102 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7103 {
7104 struct flock64 fl64;
7105 #ifdef F_GETOWN_EX
7106 struct f_owner_ex fox;
7107 struct target_f_owner_ex *target_fox;
7108 #endif
7109 abi_long ret;
7110 int host_cmd = target_to_host_fcntl_cmd(cmd);
7111
7112 if (host_cmd == -TARGET_EINVAL)
7113 return host_cmd;
7114
7115 switch(cmd) {
7116 case TARGET_F_GETLK:
7117 ret = copy_from_user_flock(&fl64, arg);
7118 if (ret) {
7119 return ret;
7120 }
7121 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7122 if (ret == 0) {
7123 ret = copy_to_user_flock(arg, &fl64);
7124 }
7125 break;
7126
7127 case TARGET_F_SETLK:
7128 case TARGET_F_SETLKW:
7129 ret = copy_from_user_flock(&fl64, arg);
7130 if (ret) {
7131 return ret;
7132 }
7133 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7134 break;
7135
7136 case TARGET_F_GETLK64:
7137 case TARGET_F_OFD_GETLK:
7138 ret = copy_from_user_flock64(&fl64, arg);
7139 if (ret) {
7140 return ret;
7141 }
7142 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7143 if (ret == 0) {
7144 ret = copy_to_user_flock64(arg, &fl64);
7145 }
7146 break;
7147 case TARGET_F_SETLK64:
7148 case TARGET_F_SETLKW64:
7149 case TARGET_F_OFD_SETLK:
7150 case TARGET_F_OFD_SETLKW:
7151 ret = copy_from_user_flock64(&fl64, arg);
7152 if (ret) {
7153 return ret;
7154 }
7155 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7156 break;
7157
7158 case TARGET_F_GETFL:
7159 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7160 if (ret >= 0) {
7161 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7162 /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7163 if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7164 ret |= TARGET_O_LARGEFILE;
7165 }
7166 }
7167 break;
7168
7169 case TARGET_F_SETFL:
7170 ret = get_errno(safe_fcntl(fd, host_cmd,
7171 target_to_host_bitmask(arg,
7172 fcntl_flags_tbl)));
7173 break;
7174
7175 #ifdef F_GETOWN_EX
7176 case TARGET_F_GETOWN_EX:
7177 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7178 if (ret >= 0) {
7179 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7180 return -TARGET_EFAULT;
7181 target_fox->type = tswap32(fox.type);
7182 target_fox->pid = tswap32(fox.pid);
7183 unlock_user_struct(target_fox, arg, 1);
7184 }
7185 break;
7186 #endif
7187
7188 #ifdef F_SETOWN_EX
7189 case TARGET_F_SETOWN_EX:
7190 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7191 return -TARGET_EFAULT;
7192 fox.type = tswap32(target_fox->type);
7193 fox.pid = tswap32(target_fox->pid);
7194 unlock_user_struct(target_fox, arg, 0);
7195 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7196 break;
7197 #endif
7198
7199 case TARGET_F_SETSIG:
7200 ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7201 break;
7202
7203 case TARGET_F_GETSIG:
7204 ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7205 break;
7206
7207 case TARGET_F_SETOWN:
7208 case TARGET_F_GETOWN:
7209 case TARGET_F_SETLEASE:
7210 case TARGET_F_GETLEASE:
7211 case TARGET_F_SETPIPE_SZ:
7212 case TARGET_F_GETPIPE_SZ:
7213 case TARGET_F_ADD_SEALS:
7214 case TARGET_F_GET_SEALS:
7215 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7216 break;
7217
7218 default:
7219 ret = get_errno(safe_fcntl(fd, cmd, arg));
7220 break;
7221 }
7222 return ret;
7223 }
7224
7225 #ifdef USE_UID16
7226
7227 static inline int high2lowuid(int uid)
7228 {
7229 if (uid > 65535)
7230 return 65534;
7231 else
7232 return uid;
7233 }
7234
7235 static inline int high2lowgid(int gid)
7236 {
7237 if (gid > 65535)
7238 return 65534;
7239 else
7240 return gid;
7241 }
7242
7243 static inline int low2highuid(int uid)
7244 {
7245 if ((int16_t)uid == -1)
7246 return -1;
7247 else
7248 return uid;
7249 }
7250
7251 static inline int low2highgid(int gid)
7252 {
7253 if ((int16_t)gid == -1)
7254 return -1;
7255 else
7256 return gid;
7257 }
7258 static inline int tswapid(int id)
7259 {
7260 return tswap16(id);
7261 }
7262
7263 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7264
7265 #else /* !USE_UID16 */
7266 static inline int high2lowuid(int uid)
7267 {
7268 return uid;
7269 }
7270 static inline int high2lowgid(int gid)
7271 {
7272 return gid;
7273 }
7274 static inline int low2highuid(int uid)
7275 {
7276 return uid;
7277 }
7278 static inline int low2highgid(int gid)
7279 {
7280 return gid;
7281 }
7282 static inline int tswapid(int id)
7283 {
7284 return tswap32(id);
7285 }
7286
7287 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7288
7289 #endif /* USE_UID16 */
7290
7291 /* We must do direct syscalls for setting UID/GID, because we want to
7292 * implement the Linux system call semantics of "change only for this thread",
7293 * not the libc/POSIX semantics of "change for all threads in process".
7294 * (See http://ewontfix.com/17/ for more details.)
7295 * We use the 32-bit version of the syscalls if present; if it is not
7296 * then either the host architecture supports 32-bit UIDs natively with
7297 * the standard syscall, or the 16-bit UID is the best we can do.
7298 */
7299 #ifdef __NR_setuid32
7300 #define __NR_sys_setuid __NR_setuid32
7301 #else
7302 #define __NR_sys_setuid __NR_setuid
7303 #endif
7304 #ifdef __NR_setgid32
7305 #define __NR_sys_setgid __NR_setgid32
7306 #else
7307 #define __NR_sys_setgid __NR_setgid
7308 #endif
7309 #ifdef __NR_setresuid32
7310 #define __NR_sys_setresuid __NR_setresuid32
7311 #else
7312 #define __NR_sys_setresuid __NR_setresuid
7313 #endif
7314 #ifdef __NR_setresgid32
7315 #define __NR_sys_setresgid __NR_setresgid32
7316 #else
7317 #define __NR_sys_setresgid __NR_setresgid
7318 #endif
7319
7320 _syscall1(int, sys_setuid, uid_t, uid)
7321 _syscall1(int, sys_setgid, gid_t, gid)
7322 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7323 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7324
7325 void syscall_init(void)
7326 {
7327 IOCTLEntry *ie;
7328 const argtype *arg_type;
7329 int size;
7330
7331 thunk_init(STRUCT_MAX);
7332
7333 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7334 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7335 #include "syscall_types.h"
7336 #undef STRUCT
7337 #undef STRUCT_SPECIAL
7338
7339 /* we patch the ioctl size if necessary. We rely on the fact that
7340 no ioctl has all the bits at '1' in the size field */
7341 ie = ioctl_entries;
7342 while (ie->target_cmd != 0) {
7343 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7344 TARGET_IOC_SIZEMASK) {
7345 arg_type = ie->arg_type;
7346 if (arg_type[0] != TYPE_PTR) {
7347 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7348 ie->target_cmd);
7349 exit(1);
7350 }
7351 arg_type++;
7352 size = thunk_type_size(arg_type, 0);
7353 ie->target_cmd = (ie->target_cmd &
7354 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7355 (size << TARGET_IOC_SIZESHIFT);
7356 }
7357
7358 /* automatic consistency check if same arch */
7359 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7360 (defined(__x86_64__) && defined(TARGET_X86_64))
7361 if (unlikely(ie->target_cmd != ie->host_cmd)) {
7362 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7363 ie->name, ie->target_cmd, ie->host_cmd);
7364 }
7365 #endif
7366 ie++;
7367 }
7368 }
7369
7370 #ifdef TARGET_NR_truncate64
7371 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7372 abi_long arg2,
7373 abi_long arg3,
7374 abi_long arg4)
7375 {
7376 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7377 arg2 = arg3;
7378 arg3 = arg4;
7379 }
7380 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7381 }
7382 #endif
7383
7384 #ifdef TARGET_NR_ftruncate64
7385 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7386 abi_long arg2,
7387 abi_long arg3,
7388 abi_long arg4)
7389 {
7390 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7391 arg2 = arg3;
7392 arg3 = arg4;
7393 }
7394 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7395 }
7396 #endif
7397
7398 #if defined(TARGET_NR_timer_settime) || \
7399 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7400 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7401 abi_ulong target_addr)
7402 {
7403 if (target_to_host_timespec(&host_its->it_interval, target_addr +
7404 offsetof(struct target_itimerspec,
7405 it_interval)) ||
7406 target_to_host_timespec(&host_its->it_value, target_addr +
7407 offsetof(struct target_itimerspec,
7408 it_value))) {
7409 return -TARGET_EFAULT;
7410 }
7411
7412 return 0;
7413 }
7414 #endif
7415
7416 #if defined(TARGET_NR_timer_settime64) || \
7417 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7418 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7419 abi_ulong target_addr)
7420 {
7421 if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7422 offsetof(struct target__kernel_itimerspec,
7423 it_interval)) ||
7424 target_to_host_timespec64(&host_its->it_value, target_addr +
7425 offsetof(struct target__kernel_itimerspec,
7426 it_value))) {
7427 return -TARGET_EFAULT;
7428 }
7429
7430 return 0;
7431 }
7432 #endif
7433
7434 #if ((defined(TARGET_NR_timerfd_gettime) || \
7435 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7436 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7437 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7438 struct itimerspec *host_its)
7439 {
7440 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7441 it_interval),
7442 &host_its->it_interval) ||
7443 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7444 it_value),
7445 &host_its->it_value)) {
7446 return -TARGET_EFAULT;
7447 }
7448 return 0;
7449 }
7450 #endif
7451
7452 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7453 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7454 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7455 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7456 struct itimerspec *host_its)
7457 {
7458 if (host_to_target_timespec64(target_addr +
7459 offsetof(struct target__kernel_itimerspec,
7460 it_interval),
7461 &host_its->it_interval) ||
7462 host_to_target_timespec64(target_addr +
7463 offsetof(struct target__kernel_itimerspec,
7464 it_value),
7465 &host_its->it_value)) {
7466 return -TARGET_EFAULT;
7467 }
7468 return 0;
7469 }
7470 #endif
7471
7472 #if defined(TARGET_NR_adjtimex) || \
7473 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7474 static inline abi_long target_to_host_timex(struct timex *host_tx,
7475 abi_long target_addr)
7476 {
7477 struct target_timex *target_tx;
7478
7479 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7480 return -TARGET_EFAULT;
7481 }
7482
7483 __get_user(host_tx->modes, &target_tx->modes);
7484 __get_user(host_tx->offset, &target_tx->offset);
7485 __get_user(host_tx->freq, &target_tx->freq);
7486 __get_user(host_tx->maxerror, &target_tx->maxerror);
7487 __get_user(host_tx->esterror, &target_tx->esterror);
7488 __get_user(host_tx->status, &target_tx->status);
7489 __get_user(host_tx->constant, &target_tx->constant);
7490 __get_user(host_tx->precision, &target_tx->precision);
7491 __get_user(host_tx->tolerance, &target_tx->tolerance);
7492 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7493 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7494 __get_user(host_tx->tick, &target_tx->tick);
7495 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7496 __get_user(host_tx->jitter, &target_tx->jitter);
7497 __get_user(host_tx->shift, &target_tx->shift);
7498 __get_user(host_tx->stabil, &target_tx->stabil);
7499 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7500 __get_user(host_tx->calcnt, &target_tx->calcnt);
7501 __get_user(host_tx->errcnt, &target_tx->errcnt);
7502 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7503 __get_user(host_tx->tai, &target_tx->tai);
7504
7505 unlock_user_struct(target_tx, target_addr, 0);
7506 return 0;
7507 }
7508
7509 static inline abi_long host_to_target_timex(abi_long target_addr,
7510 struct timex *host_tx)
7511 {
7512 struct target_timex *target_tx;
7513
7514 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7515 return -TARGET_EFAULT;
7516 }
7517
7518 __put_user(host_tx->modes, &target_tx->modes);
7519 __put_user(host_tx->offset, &target_tx->offset);
7520 __put_user(host_tx->freq, &target_tx->freq);
7521 __put_user(host_tx->maxerror, &target_tx->maxerror);
7522 __put_user(host_tx->esterror, &target_tx->esterror);
7523 __put_user(host_tx->status, &target_tx->status);
7524 __put_user(host_tx->constant, &target_tx->constant);
7525 __put_user(host_tx->precision, &target_tx->precision);
7526 __put_user(host_tx->tolerance, &target_tx->tolerance);
7527 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7528 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7529 __put_user(host_tx->tick, &target_tx->tick);
7530 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7531 __put_user(host_tx->jitter, &target_tx->jitter);
7532 __put_user(host_tx->shift, &target_tx->shift);
7533 __put_user(host_tx->stabil, &target_tx->stabil);
7534 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7535 __put_user(host_tx->calcnt, &target_tx->calcnt);
7536 __put_user(host_tx->errcnt, &target_tx->errcnt);
7537 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7538 __put_user(host_tx->tai, &target_tx->tai);
7539
7540 unlock_user_struct(target_tx, target_addr, 1);
7541 return 0;
7542 }
7543 #endif
7544
7545
7546 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7547 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7548 abi_long target_addr)
7549 {
7550 struct target__kernel_timex *target_tx;
7551
7552 if (copy_from_user_timeval64(&host_tx->time, target_addr +
7553 offsetof(struct target__kernel_timex,
7554 time))) {
7555 return -TARGET_EFAULT;
7556 }
7557
7558 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7559 return -TARGET_EFAULT;
7560 }
7561
7562 __get_user(host_tx->modes, &target_tx->modes);
7563 __get_user(host_tx->offset, &target_tx->offset);
7564 __get_user(host_tx->freq, &target_tx->freq);
7565 __get_user(host_tx->maxerror, &target_tx->maxerror);
7566 __get_user(host_tx->esterror, &target_tx->esterror);
7567 __get_user(host_tx->status, &target_tx->status);
7568 __get_user(host_tx->constant, &target_tx->constant);
7569 __get_user(host_tx->precision, &target_tx->precision);
7570 __get_user(host_tx->tolerance, &target_tx->tolerance);
7571 __get_user(host_tx->tick, &target_tx->tick);
7572 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7573 __get_user(host_tx->jitter, &target_tx->jitter);
7574 __get_user(host_tx->shift, &target_tx->shift);
7575 __get_user(host_tx->stabil, &target_tx->stabil);
7576 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7577 __get_user(host_tx->calcnt, &target_tx->calcnt);
7578 __get_user(host_tx->errcnt, &target_tx->errcnt);
7579 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7580 __get_user(host_tx->tai, &target_tx->tai);
7581
7582 unlock_user_struct(target_tx, target_addr, 0);
7583 return 0;
7584 }
7585
7586 static inline abi_long host_to_target_timex64(abi_long target_addr,
7587 struct timex *host_tx)
7588 {
7589 struct target__kernel_timex *target_tx;
7590
7591 if (copy_to_user_timeval64(target_addr +
7592 offsetof(struct target__kernel_timex, time),
7593 &host_tx->time)) {
7594 return -TARGET_EFAULT;
7595 }
7596
7597 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7598 return -TARGET_EFAULT;
7599 }
7600
7601 __put_user(host_tx->modes, &target_tx->modes);
7602 __put_user(host_tx->offset, &target_tx->offset);
7603 __put_user(host_tx->freq, &target_tx->freq);
7604 __put_user(host_tx->maxerror, &target_tx->maxerror);
7605 __put_user(host_tx->esterror, &target_tx->esterror);
7606 __put_user(host_tx->status, &target_tx->status);
7607 __put_user(host_tx->constant, &target_tx->constant);
7608 __put_user(host_tx->precision, &target_tx->precision);
7609 __put_user(host_tx->tolerance, &target_tx->tolerance);
7610 __put_user(host_tx->tick, &target_tx->tick);
7611 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7612 __put_user(host_tx->jitter, &target_tx->jitter);
7613 __put_user(host_tx->shift, &target_tx->shift);
7614 __put_user(host_tx->stabil, &target_tx->stabil);
7615 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7616 __put_user(host_tx->calcnt, &target_tx->calcnt);
7617 __put_user(host_tx->errcnt, &target_tx->errcnt);
7618 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7619 __put_user(host_tx->tai, &target_tx->tai);
7620
7621 unlock_user_struct(target_tx, target_addr, 1);
7622 return 0;
7623 }
7624 #endif
7625
7626 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7627 #define sigev_notify_thread_id _sigev_un._tid
7628 #endif
7629
7630 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7631 abi_ulong target_addr)
7632 {
7633 struct target_sigevent *target_sevp;
7634
7635 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7636 return -TARGET_EFAULT;
7637 }
7638
7639 /* This union is awkward on 64 bit systems because it has a 32 bit
7640 * integer and a pointer in it; we follow the conversion approach
7641 * used for handling sigval types in signal.c so the guest should get
7642 * the correct value back even if we did a 64 bit byteswap and it's
7643 * using the 32 bit integer.
7644 */
7645 host_sevp->sigev_value.sival_ptr =
7646 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7647 host_sevp->sigev_signo =
7648 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7649 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7650 host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7651
7652 unlock_user_struct(target_sevp, target_addr, 1);
7653 return 0;
7654 }
7655
7656 #if defined(TARGET_NR_mlockall)
7657 static inline int target_to_host_mlockall_arg(int arg)
7658 {
7659 int result = 0;
7660
7661 if (arg & TARGET_MCL_CURRENT) {
7662 result |= MCL_CURRENT;
7663 }
7664 if (arg & TARGET_MCL_FUTURE) {
7665 result |= MCL_FUTURE;
7666 }
7667 #ifdef MCL_ONFAULT
7668 if (arg & TARGET_MCL_ONFAULT) {
7669 result |= MCL_ONFAULT;
7670 }
7671 #endif
7672
7673 return result;
7674 }
7675 #endif
7676
7677 static inline int target_to_host_msync_arg(abi_long arg)
7678 {
7679 return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7680 ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7681 ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7682 (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7683 }
7684
7685 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7686 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7687 defined(TARGET_NR_newfstatat))
7688 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7689 abi_ulong target_addr,
7690 struct stat *host_st)
7691 {
7692 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7693 if (cpu_env->eabi) {
7694 struct target_eabi_stat64 *target_st;
7695
7696 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7697 return -TARGET_EFAULT;
7698 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7699 __put_user(host_st->st_dev, &target_st->st_dev);
7700 __put_user(host_st->st_ino, &target_st->st_ino);
7701 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7702 __put_user(host_st->st_ino, &target_st->__st_ino);
7703 #endif
7704 __put_user(host_st->st_mode, &target_st->st_mode);
7705 __put_user(host_st->st_nlink, &target_st->st_nlink);
7706 __put_user(host_st->st_uid, &target_st->st_uid);
7707 __put_user(host_st->st_gid, &target_st->st_gid);
7708 __put_user(host_st->st_rdev, &target_st->st_rdev);
7709 __put_user(host_st->st_size, &target_st->st_size);
7710 __put_user(host_st->st_blksize, &target_st->st_blksize);
7711 __put_user(host_st->st_blocks, &target_st->st_blocks);
7712 __put_user(host_st->st_atime, &target_st->target_st_atime);
7713 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7714 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7715 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7716 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7717 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7718 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7719 #endif
7720 unlock_user_struct(target_st, target_addr, 1);
7721 } else
7722 #endif
7723 {
7724 #if defined(TARGET_HAS_STRUCT_STAT64)
7725 struct target_stat64 *target_st;
7726 #else
7727 struct target_stat *target_st;
7728 #endif
7729
7730 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7731 return -TARGET_EFAULT;
7732 memset(target_st, 0, sizeof(*target_st));
7733 __put_user(host_st->st_dev, &target_st->st_dev);
7734 __put_user(host_st->st_ino, &target_st->st_ino);
7735 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7736 __put_user(host_st->st_ino, &target_st->__st_ino);
7737 #endif
7738 __put_user(host_st->st_mode, &target_st->st_mode);
7739 __put_user(host_st->st_nlink, &target_st->st_nlink);
7740 __put_user(host_st->st_uid, &target_st->st_uid);
7741 __put_user(host_st->st_gid, &target_st->st_gid);
7742 __put_user(host_st->st_rdev, &target_st->st_rdev);
7743 /* XXX: better use of kernel struct */
7744 __put_user(host_st->st_size, &target_st->st_size);
7745 __put_user(host_st->st_blksize, &target_st->st_blksize);
7746 __put_user(host_st->st_blocks, &target_st->st_blocks);
7747 __put_user(host_st->st_atime, &target_st->target_st_atime);
7748 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7749 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7750 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7751 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7752 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7753 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7754 #endif
7755 unlock_user_struct(target_st, target_addr, 1);
7756 }
7757
7758 return 0;
7759 }
7760 #endif
7761
7762 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7763 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7764 abi_ulong target_addr)
7765 {
7766 struct target_statx *target_stx;
7767
7768 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7769 return -TARGET_EFAULT;
7770 }
7771 memset(target_stx, 0, sizeof(*target_stx));
7772
7773 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7774 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7775 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7776 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7777 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7778 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7779 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7780 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7781 __put_user(host_stx->stx_size, &target_stx->stx_size);
7782 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7783 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7784 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7785 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7786 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7787 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7788 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7789 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7790 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7791 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7792 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7793 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7794 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7795 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7796
7797 unlock_user_struct(target_stx, target_addr, 1);
7798
7799 return 0;
7800 }
7801 #endif
7802
7803 static int do_sys_futex(int *uaddr, int op, int val,
7804 const struct timespec *timeout, int *uaddr2,
7805 int val3)
7806 {
7807 #if HOST_LONG_BITS == 64
7808 #if defined(__NR_futex)
7809 /* always a 64-bit time_t, it doesn't define _time64 version */
7810 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7811
7812 #endif
7813 #else /* HOST_LONG_BITS == 64 */
7814 #if defined(__NR_futex_time64)
7815 if (sizeof(timeout->tv_sec) == 8) {
7816 /* _time64 function on 32bit arch */
7817 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7818 }
7819 #endif
7820 #if defined(__NR_futex)
7821 /* old function on 32bit arch */
7822 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7823 #endif
7824 #endif /* HOST_LONG_BITS == 64 */
7825 g_assert_not_reached();
7826 }
7827
7828 static int do_safe_futex(int *uaddr, int op, int val,
7829 const struct timespec *timeout, int *uaddr2,
7830 int val3)
7831 {
7832 #if HOST_LONG_BITS == 64
7833 #if defined(__NR_futex)
7834 /* always a 64-bit time_t, it doesn't define _time64 version */
7835 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7836 #endif
7837 #else /* HOST_LONG_BITS == 64 */
7838 #if defined(__NR_futex_time64)
7839 if (sizeof(timeout->tv_sec) == 8) {
7840 /* _time64 function on 32bit arch */
7841 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7842 val3));
7843 }
7844 #endif
7845 #if defined(__NR_futex)
7846 /* old function on 32bit arch */
7847 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7848 #endif
7849 #endif /* HOST_LONG_BITS == 64 */
7850 return -TARGET_ENOSYS;
7851 }
7852
7853 /* ??? Using host futex calls even when target atomic operations
7854 are not really atomic probably breaks things. However implementing
7855 futexes locally would make futexes shared between multiple processes
7856 tricky. However they're probably useless because guest atomic
7857 operations won't work either. */
7858 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7859 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7860 int op, int val, target_ulong timeout,
7861 target_ulong uaddr2, int val3)
7862 {
7863 struct timespec ts, *pts = NULL;
7864 void *haddr2 = NULL;
7865 int base_op;
7866
7867 /* We assume FUTEX_* constants are the same on both host and target. */
7868 #ifdef FUTEX_CMD_MASK
7869 base_op = op & FUTEX_CMD_MASK;
7870 #else
7871 base_op = op;
7872 #endif
7873 switch (base_op) {
7874 case FUTEX_WAIT:
7875 case FUTEX_WAIT_BITSET:
7876 val = tswap32(val);
7877 break;
7878 case FUTEX_WAIT_REQUEUE_PI:
7879 val = tswap32(val);
7880 haddr2 = g2h(cpu, uaddr2);
7881 break;
7882 case FUTEX_LOCK_PI:
7883 case FUTEX_LOCK_PI2:
7884 break;
7885 case FUTEX_WAKE:
7886 case FUTEX_WAKE_BITSET:
7887 case FUTEX_TRYLOCK_PI:
7888 case FUTEX_UNLOCK_PI:
7889 timeout = 0;
7890 break;
7891 case FUTEX_FD:
7892 val = target_to_host_signal(val);
7893 timeout = 0;
7894 break;
7895 case FUTEX_CMP_REQUEUE:
7896 case FUTEX_CMP_REQUEUE_PI:
7897 val3 = tswap32(val3);
7898 /* fall through */
7899 case FUTEX_REQUEUE:
7900 case FUTEX_WAKE_OP:
7901 /*
7902 * For these, the 4th argument is not TIMEOUT, but VAL2.
7903 * But the prototype of do_safe_futex takes a pointer, so
7904 * insert casts to satisfy the compiler. We do not need
7905 * to tswap VAL2 since it's not compared to guest memory.
7906 */
7907 pts = (struct timespec *)(uintptr_t)timeout;
7908 timeout = 0;
7909 haddr2 = g2h(cpu, uaddr2);
7910 break;
7911 default:
7912 return -TARGET_ENOSYS;
7913 }
7914 if (timeout) {
7915 pts = &ts;
7916 if (time64
7917 ? target_to_host_timespec64(pts, timeout)
7918 : target_to_host_timespec(pts, timeout)) {
7919 return -TARGET_EFAULT;
7920 }
7921 }
7922 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7923 }
7924 #endif
7925
7926 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7927 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7928 abi_long handle, abi_long mount_id,
7929 abi_long flags)
7930 {
7931 struct file_handle *target_fh;
7932 struct file_handle *fh;
7933 int mid = 0;
7934 abi_long ret;
7935 char *name;
7936 unsigned int size, total_size;
7937
7938 if (get_user_s32(size, handle)) {
7939 return -TARGET_EFAULT;
7940 }
7941
7942 name = lock_user_string(pathname);
7943 if (!name) {
7944 return -TARGET_EFAULT;
7945 }
7946
7947 total_size = sizeof(struct file_handle) + size;
7948 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7949 if (!target_fh) {
7950 unlock_user(name, pathname, 0);
7951 return -TARGET_EFAULT;
7952 }
7953
7954 fh = g_malloc0(total_size);
7955 fh->handle_bytes = size;
7956
7957 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7958 unlock_user(name, pathname, 0);
7959
7960 /* man name_to_handle_at(2):
7961 * Other than the use of the handle_bytes field, the caller should treat
7962 * the file_handle structure as an opaque data type
7963 */
7964
7965 memcpy(target_fh, fh, total_size);
7966 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7967 target_fh->handle_type = tswap32(fh->handle_type);
7968 g_free(fh);
7969 unlock_user(target_fh, handle, total_size);
7970
7971 if (put_user_s32(mid, mount_id)) {
7972 return -TARGET_EFAULT;
7973 }
7974
7975 return ret;
7976
7977 }
7978 #endif
7979
7980 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7981 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7982 abi_long flags)
7983 {
7984 struct file_handle *target_fh;
7985 struct file_handle *fh;
7986 unsigned int size, total_size;
7987 abi_long ret;
7988
7989 if (get_user_s32(size, handle)) {
7990 return -TARGET_EFAULT;
7991 }
7992
7993 total_size = sizeof(struct file_handle) + size;
7994 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7995 if (!target_fh) {
7996 return -TARGET_EFAULT;
7997 }
7998
7999 fh = g_memdup(target_fh, total_size);
8000 fh->handle_bytes = size;
8001 fh->handle_type = tswap32(target_fh->handle_type);
8002
8003 ret = get_errno(open_by_handle_at(mount_fd, fh,
8004 target_to_host_bitmask(flags, fcntl_flags_tbl)));
8005
8006 g_free(fh);
8007
8008 unlock_user(target_fh, handle, total_size);
8009
8010 return ret;
8011 }
8012 #endif
8013
8014 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
8015
8016 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
8017 {
8018 int host_flags;
8019 target_sigset_t *target_mask;
8020 sigset_t host_mask;
8021 abi_long ret;
8022
8023 if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
8024 return -TARGET_EINVAL;
8025 }
8026 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
8027 return -TARGET_EFAULT;
8028 }
8029
8030 target_to_host_sigset(&host_mask, target_mask);
8031
8032 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
8033
8034 ret = get_errno(signalfd(fd, &host_mask, host_flags));
8035 if (ret >= 0) {
8036 fd_trans_register(ret, &target_signalfd_trans);
8037 }
8038
8039 unlock_user_struct(target_mask, mask, 0);
8040
8041 return ret;
8042 }
8043 #endif
8044
8045 /* Map host to target signal numbers for the wait family of syscalls.
8046 Assume all other status bits are the same. */
8047 int host_to_target_waitstatus(int status)
8048 {
8049 if (WIFSIGNALED(status)) {
8050 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8051 }
8052 if (WIFSTOPPED(status)) {
8053 return (host_to_target_signal(WSTOPSIG(status)) << 8)
8054 | (status & 0xff);
8055 }
8056 return status;
8057 }
8058
8059 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8060 {
8061 CPUState *cpu = env_cpu(cpu_env);
8062 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8063 int i;
8064
8065 for (i = 0; i < bprm->argc; i++) {
8066 size_t len = strlen(bprm->argv[i]) + 1;
8067
8068 if (write(fd, bprm->argv[i], len) != len) {
8069 return -1;
8070 }
8071 }
8072
8073 return 0;
8074 }
8075
8076 static void show_smaps(int fd, unsigned long size)
8077 {
8078 unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8079 unsigned long size_kb = size >> 10;
8080
8081 dprintf(fd, "Size: %lu kB\n"
8082 "KernelPageSize: %lu kB\n"
8083 "MMUPageSize: %lu kB\n"
8084 "Rss: 0 kB\n"
8085 "Pss: 0 kB\n"
8086 "Pss_Dirty: 0 kB\n"
8087 "Shared_Clean: 0 kB\n"
8088 "Shared_Dirty: 0 kB\n"
8089 "Private_Clean: 0 kB\n"
8090 "Private_Dirty: 0 kB\n"
8091 "Referenced: 0 kB\n"
8092 "Anonymous: 0 kB\n"
8093 "LazyFree: 0 kB\n"
8094 "AnonHugePages: 0 kB\n"
8095 "ShmemPmdMapped: 0 kB\n"
8096 "FilePmdMapped: 0 kB\n"
8097 "Shared_Hugetlb: 0 kB\n"
8098 "Private_Hugetlb: 0 kB\n"
8099 "Swap: 0 kB\n"
8100 "SwapPss: 0 kB\n"
8101 "Locked: 0 kB\n"
8102 "THPeligible: 0\n", size_kb, page_size_kb, page_size_kb);
8103 }
8104
8105 static int open_self_maps_1(CPUArchState *cpu_env, int fd, bool smaps)
8106 {
8107 CPUState *cpu = env_cpu(cpu_env);
8108 TaskState *ts = cpu->opaque;
8109 GSList *map_info = read_self_maps();
8110 GSList *s;
8111 int count;
8112
8113 for (s = map_info; s; s = g_slist_next(s)) {
8114 MapInfo *e = (MapInfo *) s->data;
8115
8116 if (h2g_valid(e->start)) {
8117 unsigned long min = e->start;
8118 unsigned long max = e->end;
8119 int flags = page_get_flags(h2g(min));
8120 const char *path;
8121
8122 max = h2g_valid(max - 1) ?
8123 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8124
8125 if (page_check_range(h2g(min), max - min, flags) == -1) {
8126 continue;
8127 }
8128
8129 #ifdef TARGET_HPPA
8130 if (h2g(max) == ts->info->stack_limit) {
8131 #else
8132 if (h2g(min) == ts->info->stack_limit) {
8133 #endif
8134 path = "[stack]";
8135 } else {
8136 path = e->path;
8137 }
8138
8139 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8140 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8141 h2g(min), h2g(max - 1) + 1,
8142 (flags & PAGE_READ) ? 'r' : '-',
8143 (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8144 (flags & PAGE_EXEC) ? 'x' : '-',
8145 e->is_priv ? 'p' : 's',
8146 (uint64_t) e->offset, e->dev, e->inode);
8147 if (path) {
8148 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8149 } else {
8150 dprintf(fd, "\n");
8151 }
8152 if (smaps) {
8153 show_smaps(fd, max - min);
8154 dprintf(fd, "VmFlags:%s%s%s%s%s%s%s%s\n",
8155 (flags & PAGE_READ) ? " rd" : "",
8156 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8157 (flags & PAGE_EXEC) ? " ex" : "",
8158 e->is_priv ? "" : " sh",
8159 (flags & PAGE_READ) ? " mr" : "",
8160 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8161 (flags & PAGE_EXEC) ? " me" : "",
8162 e->is_priv ? "" : " ms");
8163 }
8164 }
8165 }
8166
8167 free_self_maps(map_info);
8168
8169 #ifdef TARGET_VSYSCALL_PAGE
8170 /*
8171 * We only support execution from the vsyscall page.
8172 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8173 */
8174 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8175 " --xp 00000000 00:00 0",
8176 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8177 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
8178 if (smaps) {
8179 show_smaps(fd, TARGET_PAGE_SIZE);
8180 dprintf(fd, "VmFlags: ex\n");
8181 }
8182 #endif
8183
8184 return 0;
8185 }
8186
8187 static int open_self_maps(CPUArchState *cpu_env, int fd)
8188 {
8189 return open_self_maps_1(cpu_env, fd, false);
8190 }
8191
8192 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8193 {
8194 return open_self_maps_1(cpu_env, fd, true);
8195 }
8196
8197 static int open_self_stat(CPUArchState *cpu_env, int fd)
8198 {
8199 CPUState *cpu = env_cpu(cpu_env);
8200 TaskState *ts = cpu->opaque;
8201 g_autoptr(GString) buf = g_string_new(NULL);
8202 int i;
8203
8204 for (i = 0; i < 44; i++) {
8205 if (i == 0) {
8206 /* pid */
8207 g_string_printf(buf, FMT_pid " ", getpid());
8208 } else if (i == 1) {
8209 /* app name */
8210 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8211 bin = bin ? bin + 1 : ts->bprm->argv[0];
8212 g_string_printf(buf, "(%.15s) ", bin);
8213 } else if (i == 2) {
8214 /* task state */
8215 g_string_assign(buf, "R "); /* we are running right now */
8216 } else if (i == 3) {
8217 /* ppid */
8218 g_string_printf(buf, FMT_pid " ", getppid());
8219 } else if (i == 21) {
8220 /* starttime */
8221 g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8222 } else if (i == 27) {
8223 /* stack bottom */
8224 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8225 } else {
8226 /* for the rest, there is MasterCard */
8227 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8228 }
8229
8230 if (write(fd, buf->str, buf->len) != buf->len) {
8231 return -1;
8232 }
8233 }
8234
8235 return 0;
8236 }
8237
8238 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8239 {
8240 CPUState *cpu = env_cpu(cpu_env);
8241 TaskState *ts = cpu->opaque;
8242 abi_ulong auxv = ts->info->saved_auxv;
8243 abi_ulong len = ts->info->auxv_len;
8244 char *ptr;
8245
8246 /*
8247 * Auxiliary vector is stored in target process stack.
8248 * read in whole auxv vector and copy it to file
8249 */
8250 ptr = lock_user(VERIFY_READ, auxv, len, 0);
8251 if (ptr != NULL) {
8252 while (len > 0) {
8253 ssize_t r;
8254 r = write(fd, ptr, len);
8255 if (r <= 0) {
8256 break;
8257 }
8258 len -= r;
8259 ptr += r;
8260 }
8261 lseek(fd, 0, SEEK_SET);
8262 unlock_user(ptr, auxv, len);
8263 }
8264
8265 return 0;
8266 }
8267
8268 static int is_proc_myself(const char *filename, const char *entry)
8269 {
8270 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8271 filename += strlen("/proc/");
8272 if (!strncmp(filename, "self/", strlen("self/"))) {
8273 filename += strlen("self/");
8274 } else if (*filename >= '1' && *filename <= '9') {
8275 char myself[80];
8276 snprintf(myself, sizeof(myself), "%d/", getpid());
8277 if (!strncmp(filename, myself, strlen(myself))) {
8278 filename += strlen(myself);
8279 } else {
8280 return 0;
8281 }
8282 } else {
8283 return 0;
8284 }
8285 if (!strcmp(filename, entry)) {
8286 return 1;
8287 }
8288 }
8289 return 0;
8290 }
8291
8292 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8293 const char *fmt, int code)
8294 {
8295 if (logfile) {
8296 CPUState *cs = env_cpu(env);
8297
8298 fprintf(logfile, fmt, code);
8299 fprintf(logfile, "Failing executable: %s\n", exec_path);
8300 cpu_dump_state(cs, logfile, 0);
8301 open_self_maps(env, fileno(logfile));
8302 }
8303 }
8304
8305 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8306 {
8307 /* dump to console */
8308 excp_dump_file(stderr, env, fmt, code);
8309
8310 /* dump to log file */
8311 if (qemu_log_separate()) {
8312 FILE *logfile = qemu_log_trylock();
8313
8314 excp_dump_file(logfile, env, fmt, code);
8315 qemu_log_unlock(logfile);
8316 }
8317 }
8318
8319 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8320 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) || \
8321 defined(TARGET_RISCV) || defined(TARGET_S390X)
8322 static int is_proc(const char *filename, const char *entry)
8323 {
8324 return strcmp(filename, entry) == 0;
8325 }
8326 #endif
8327
8328 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8329 static int open_net_route(CPUArchState *cpu_env, int fd)
8330 {
8331 FILE *fp;
8332 char *line = NULL;
8333 size_t len = 0;
8334 ssize_t read;
8335
8336 fp = fopen("/proc/net/route", "r");
8337 if (fp == NULL) {
8338 return -1;
8339 }
8340
8341 /* read header */
8342
8343 read = getline(&line, &len, fp);
8344 dprintf(fd, "%s", line);
8345
8346 /* read routes */
8347
8348 while ((read = getline(&line, &len, fp)) != -1) {
8349 char iface[16];
8350 uint32_t dest, gw, mask;
8351 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8352 int fields;
8353
8354 fields = sscanf(line,
8355 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8356 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8357 &mask, &mtu, &window, &irtt);
8358 if (fields != 11) {
8359 continue;
8360 }
8361 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8362 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8363 metric, tswap32(mask), mtu, window, irtt);
8364 }
8365
8366 free(line);
8367 fclose(fp);
8368
8369 return 0;
8370 }
8371 #endif
8372
8373 #if defined(TARGET_SPARC)
8374 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8375 {
8376 dprintf(fd, "type\t\t: sun4u\n");
8377 return 0;
8378 }
8379 #endif
8380
8381 #if defined(TARGET_HPPA)
8382 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8383 {
8384 int i, num_cpus;
8385
8386 num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8387 for (i = 0; i < num_cpus; i++) {
8388 dprintf(fd, "processor\t: %d\n", i);
8389 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8390 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8391 dprintf(fd, "capabilities\t: os32\n");
8392 dprintf(fd, "model\t\t: 9000/778/B160L - "
8393 "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8394 }
8395 return 0;
8396 }
8397 #endif
8398
8399 #if defined(TARGET_RISCV)
8400 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8401 {
8402 int i;
8403 int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8404 RISCVCPU *cpu = env_archcpu(cpu_env);
8405 const RISCVCPUConfig *cfg = riscv_cpu_cfg((CPURISCVState *) cpu_env);
8406 char *isa_string = riscv_isa_string(cpu);
8407 const char *mmu;
8408
8409 if (cfg->mmu) {
8410 mmu = (cpu_env->xl == MXL_RV32) ? "sv32" : "sv48";
8411 } else {
8412 mmu = "none";
8413 }
8414
8415 for (i = 0; i < num_cpus; i++) {
8416 dprintf(fd, "processor\t: %d\n", i);
8417 dprintf(fd, "hart\t\t: %d\n", i);
8418 dprintf(fd, "isa\t\t: %s\n", isa_string);
8419 dprintf(fd, "mmu\t\t: %s\n", mmu);
8420 dprintf(fd, "uarch\t\t: qemu\n\n");
8421 }
8422
8423 g_free(isa_string);
8424 return 0;
8425 }
8426 #endif
8427
8428 #if defined(TARGET_S390X)
8429 /*
8430 * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would
8431 * show in /proc/cpuinfo.
8432 *
8433 * Skip the following in order to match the missing support in op_ecag():
8434 * - show_cacheinfo().
8435 * - show_cpu_topology().
8436 * - show_cpu_mhz().
8437 *
8438 * Use fixed values for certain fields:
8439 * - bogomips per cpu - from a qemu-system-s390x run.
8440 * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported.
8441 *
8442 * Keep the code structure close to arch/s390/kernel/processor.c.
8443 */
8444
8445 static void show_facilities(int fd)
8446 {
8447 size_t sizeof_stfl_bytes = 2048;
8448 g_autofree uint8_t *stfl_bytes = g_new0(uint8_t, sizeof_stfl_bytes);
8449 unsigned int bit;
8450
8451 dprintf(fd, "facilities :");
8452 s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
8453 for (bit = 0; bit < sizeof_stfl_bytes * 8; bit++) {
8454 if (test_be_bit(bit, stfl_bytes)) {
8455 dprintf(fd, " %d", bit);
8456 }
8457 }
8458 dprintf(fd, "\n");
8459 }
8460
8461 static int cpu_ident(unsigned long n)
8462 {
8463 return deposit32(0, CPU_ID_BITS - CPU_PHYS_ADDR_BITS, CPU_PHYS_ADDR_BITS,
8464 n);
8465 }
8466
8467 static void show_cpu_summary(CPUArchState *cpu_env, int fd)
8468 {
8469 S390CPUModel *model = env_archcpu(cpu_env)->model;
8470 int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8471 uint32_t elf_hwcap = get_elf_hwcap();
8472 const char *hwcap_str;
8473 int i;
8474
8475 dprintf(fd, "vendor_id : IBM/S390\n"
8476 "# processors : %i\n"
8477 "bogomips per cpu: 13370.00\n",
8478 num_cpus);
8479 dprintf(fd, "max thread id : 0\n");
8480 dprintf(fd, "features\t: ");
8481 for (i = 0; i < sizeof(elf_hwcap) * 8; i++) {
8482 if (!(elf_hwcap & (1 << i))) {
8483 continue;
8484 }
8485 hwcap_str = elf_hwcap_str(i);
8486 if (hwcap_str) {
8487 dprintf(fd, "%s ", hwcap_str);
8488 }
8489 }
8490 dprintf(fd, "\n");
8491 show_facilities(fd);
8492 for (i = 0; i < num_cpus; i++) {
8493 dprintf(fd, "processor %d: "
8494 "version = %02X, "
8495 "identification = %06X, "
8496 "machine = %04X\n",
8497 i, model->cpu_ver, cpu_ident(i), model->def->type);
8498 }
8499 }
8500
8501 static void show_cpu_ids(CPUArchState *cpu_env, int fd, unsigned long n)
8502 {
8503 S390CPUModel *model = env_archcpu(cpu_env)->model;
8504
8505 dprintf(fd, "version : %02X\n", model->cpu_ver);
8506 dprintf(fd, "identification : %06X\n", cpu_ident(n));
8507 dprintf(fd, "machine : %04X\n", model->def->type);
8508 }
8509
8510 static void show_cpuinfo(CPUArchState *cpu_env, int fd, unsigned long n)
8511 {
8512 dprintf(fd, "\ncpu number : %ld\n", n);
8513 show_cpu_ids(cpu_env, fd, n);
8514 }
8515
8516 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8517 {
8518 int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8519 int i;
8520
8521 show_cpu_summary(cpu_env, fd);
8522 for (i = 0; i < num_cpus; i++) {
8523 show_cpuinfo(cpu_env, fd, i);
8524 }
8525 return 0;
8526 }
8527 #endif
8528
8529 #if defined(TARGET_M68K)
8530 static int open_hardware(CPUArchState *cpu_env, int fd)
8531 {
8532 dprintf(fd, "Model:\t\tqemu-m68k\n");
8533 return 0;
8534 }
8535 #endif
8536
8537 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8538 int flags, mode_t mode, bool safe)
8539 {
8540 struct fake_open {
8541 const char *filename;
8542 int (*fill)(CPUArchState *cpu_env, int fd);
8543 int (*cmp)(const char *s1, const char *s2);
8544 };
8545 const struct fake_open *fake_open;
8546 static const struct fake_open fakes[] = {
8547 { "maps", open_self_maps, is_proc_myself },
8548 { "smaps", open_self_smaps, is_proc_myself },
8549 { "stat", open_self_stat, is_proc_myself },
8550 { "auxv", open_self_auxv, is_proc_myself },
8551 { "cmdline", open_self_cmdline, is_proc_myself },
8552 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8553 { "/proc/net/route", open_net_route, is_proc },
8554 #endif
8555 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) || \
8556 defined(TARGET_RISCV) || defined(TARGET_S390X)
8557 { "/proc/cpuinfo", open_cpuinfo, is_proc },
8558 #endif
8559 #if defined(TARGET_M68K)
8560 { "/proc/hardware", open_hardware, is_proc },
8561 #endif
8562 { NULL, NULL, NULL }
8563 };
8564
8565 if (is_proc_myself(pathname, "exe")) {
8566 if (safe) {
8567 return safe_openat(dirfd, exec_path, flags, mode);
8568 } else {
8569 return openat(dirfd, exec_path, flags, mode);
8570 }
8571 }
8572
8573 for (fake_open = fakes; fake_open->filename; fake_open++) {
8574 if (fake_open->cmp(pathname, fake_open->filename)) {
8575 break;
8576 }
8577 }
8578
8579 if (fake_open->filename) {
8580 const char *tmpdir;
8581 char filename[PATH_MAX];
8582 int fd, r;
8583
8584 fd = memfd_create("qemu-open", 0);
8585 if (fd < 0) {
8586 if (errno != ENOSYS) {
8587 return fd;
8588 }
8589 /* create temporary file to map stat to */
8590 tmpdir = getenv("TMPDIR");
8591 if (!tmpdir)
8592 tmpdir = "/tmp";
8593 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8594 fd = mkstemp(filename);
8595 if (fd < 0) {
8596 return fd;
8597 }
8598 unlink(filename);
8599 }
8600
8601 if ((r = fake_open->fill(cpu_env, fd))) {
8602 int e = errno;
8603 close(fd);
8604 errno = e;
8605 return r;
8606 }
8607 lseek(fd, 0, SEEK_SET);
8608
8609 return fd;
8610 }
8611
8612 if (safe) {
8613 return safe_openat(dirfd, path(pathname), flags, mode);
8614 } else {
8615 return openat(dirfd, path(pathname), flags, mode);
8616 }
8617 }
8618
8619 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8620 {
8621 ssize_t ret;
8622
8623 if (!pathname || !buf) {
8624 errno = EFAULT;
8625 return -1;
8626 }
8627
8628 if (!bufsiz) {
8629 /* Short circuit this for the magic exe check. */
8630 errno = EINVAL;
8631 return -1;
8632 }
8633
8634 if (is_proc_myself((const char *)pathname, "exe")) {
8635 /*
8636 * Don't worry about sign mismatch as earlier mapping
8637 * logic would have thrown a bad address error.
8638 */
8639 ret = MIN(strlen(exec_path), bufsiz);
8640 /* We cannot NUL terminate the string. */
8641 memcpy(buf, exec_path, ret);
8642 } else {
8643 ret = readlink(path(pathname), buf, bufsiz);
8644 }
8645
8646 return ret;
8647 }
8648
8649 static int do_execv(CPUArchState *cpu_env, int dirfd,
8650 abi_long pathname, abi_long guest_argp,
8651 abi_long guest_envp, int flags, bool is_execveat)
8652 {
8653 int ret;
8654 char **argp, **envp;
8655 int argc, envc;
8656 abi_ulong gp;
8657 abi_ulong addr;
8658 char **q;
8659 void *p;
8660
8661 argc = 0;
8662
8663 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8664 if (get_user_ual(addr, gp)) {
8665 return -TARGET_EFAULT;
8666 }
8667 if (!addr) {
8668 break;
8669 }
8670 argc++;
8671 }
8672 envc = 0;
8673 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8674 if (get_user_ual(addr, gp)) {
8675 return -TARGET_EFAULT;
8676 }
8677 if (!addr) {
8678 break;
8679 }
8680 envc++;
8681 }
8682
8683 argp = g_new0(char *, argc + 1);
8684 envp = g_new0(char *, envc + 1);
8685
8686 for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8687 if (get_user_ual(addr, gp)) {
8688 goto execve_efault;
8689 }
8690 if (!addr) {
8691 break;
8692 }
8693 *q = lock_user_string(addr);
8694 if (!*q) {
8695 goto execve_efault;
8696 }
8697 }
8698 *q = NULL;
8699
8700 for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8701 if (get_user_ual(addr, gp)) {
8702 goto execve_efault;
8703 }
8704 if (!addr) {
8705 break;
8706 }
8707 *q = lock_user_string(addr);
8708 if (!*q) {
8709 goto execve_efault;
8710 }
8711 }
8712 *q = NULL;
8713
8714 /*
8715 * Although execve() is not an interruptible syscall it is
8716 * a special case where we must use the safe_syscall wrapper:
8717 * if we allow a signal to happen before we make the host
8718 * syscall then we will 'lose' it, because at the point of
8719 * execve the process leaves QEMU's control. So we use the
8720 * safe syscall wrapper to ensure that we either take the
8721 * signal as a guest signal, or else it does not happen
8722 * before the execve completes and makes it the other
8723 * program's problem.
8724 */
8725 p = lock_user_string(pathname);
8726 if (!p) {
8727 goto execve_efault;
8728 }
8729
8730 const char *exe = p;
8731 if (is_proc_myself(p, "exe")) {
8732 exe = exec_path;
8733 }
8734 ret = is_execveat
8735 ? safe_execveat(dirfd, exe, argp, envp, flags)
8736 : safe_execve(exe, argp, envp);
8737 ret = get_errno(ret);
8738
8739 unlock_user(p, pathname, 0);
8740
8741 goto execve_end;
8742
8743 execve_efault:
8744 ret = -TARGET_EFAULT;
8745
8746 execve_end:
8747 for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8748 if (get_user_ual(addr, gp) || !addr) {
8749 break;
8750 }
8751 unlock_user(*q, addr, 0);
8752 }
8753 for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8754 if (get_user_ual(addr, gp) || !addr) {
8755 break;
8756 }
8757 unlock_user(*q, addr, 0);
8758 }
8759
8760 g_free(argp);
8761 g_free(envp);
8762 return ret;
8763 }
8764
8765 #define TIMER_MAGIC 0x0caf0000
8766 #define TIMER_MAGIC_MASK 0xffff0000
8767
8768 /* Convert QEMU provided timer ID back to internal 16bit index format */
8769 static target_timer_t get_timer_id(abi_long arg)
8770 {
8771 target_timer_t timerid = arg;
8772
8773 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8774 return -TARGET_EINVAL;
8775 }
8776
8777 timerid &= 0xffff;
8778
8779 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8780 return -TARGET_EINVAL;
8781 }
8782
8783 return timerid;
8784 }
8785
8786 static int target_to_host_cpu_mask(unsigned long *host_mask,
8787 size_t host_size,
8788 abi_ulong target_addr,
8789 size_t target_size)
8790 {
8791 unsigned target_bits = sizeof(abi_ulong) * 8;
8792 unsigned host_bits = sizeof(*host_mask) * 8;
8793 abi_ulong *target_mask;
8794 unsigned i, j;
8795
8796 assert(host_size >= target_size);
8797
8798 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8799 if (!target_mask) {
8800 return -TARGET_EFAULT;
8801 }
8802 memset(host_mask, 0, host_size);
8803
8804 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8805 unsigned bit = i * target_bits;
8806 abi_ulong val;
8807
8808 __get_user(val, &target_mask[i]);
8809 for (j = 0; j < target_bits; j++, bit++) {
8810 if (val & (1UL << j)) {
8811 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8812 }
8813 }
8814 }
8815
8816 unlock_user(target_mask, target_addr, 0);
8817 return 0;
8818 }
8819
8820 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8821 size_t host_size,
8822 abi_ulong target_addr,
8823 size_t target_size)
8824 {
8825 unsigned target_bits = sizeof(abi_ulong) * 8;
8826 unsigned host_bits = sizeof(*host_mask) * 8;
8827 abi_ulong *target_mask;
8828 unsigned i, j;
8829
8830 assert(host_size >= target_size);
8831
8832 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8833 if (!target_mask) {
8834 return -TARGET_EFAULT;
8835 }
8836
8837 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8838 unsigned bit = i * target_bits;
8839 abi_ulong val = 0;
8840
8841 for (j = 0; j < target_bits; j++, bit++) {
8842 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8843 val |= 1UL << j;
8844 }
8845 }
8846 __put_user(val, &target_mask[i]);
8847 }
8848
8849 unlock_user(target_mask, target_addr, target_size);
8850 return 0;
8851 }
8852
8853 #ifdef TARGET_NR_getdents
8854 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8855 {
8856 g_autofree void *hdirp = NULL;
8857 void *tdirp;
8858 int hlen, hoff, toff;
8859 int hreclen, treclen;
8860 off64_t prev_diroff = 0;
8861
8862 hdirp = g_try_malloc(count);
8863 if (!hdirp) {
8864 return -TARGET_ENOMEM;
8865 }
8866
8867 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8868 hlen = sys_getdents(dirfd, hdirp, count);
8869 #else
8870 hlen = sys_getdents64(dirfd, hdirp, count);
8871 #endif
8872
8873 hlen = get_errno(hlen);
8874 if (is_error(hlen)) {
8875 return hlen;
8876 }
8877
8878 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8879 if (!tdirp) {
8880 return -TARGET_EFAULT;
8881 }
8882
8883 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8884 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8885 struct linux_dirent *hde = hdirp + hoff;
8886 #else
8887 struct linux_dirent64 *hde = hdirp + hoff;
8888 #endif
8889 struct target_dirent *tde = tdirp + toff;
8890 int namelen;
8891 uint8_t type;
8892
8893 namelen = strlen(hde->d_name);
8894 hreclen = hde->d_reclen;
8895 treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8896 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8897
8898 if (toff + treclen > count) {
8899 /*
8900 * If the host struct is smaller than the target struct, or
8901 * requires less alignment and thus packs into less space,
8902 * then the host can return more entries than we can pass
8903 * on to the guest.
8904 */
8905 if (toff == 0) {
8906 toff = -TARGET_EINVAL; /* result buffer is too small */
8907 break;
8908 }
8909 /*
8910 * Return what we have, resetting the file pointer to the
8911 * location of the first record not returned.
8912 */
8913 lseek64(dirfd, prev_diroff, SEEK_SET);
8914 break;
8915 }
8916
8917 prev_diroff = hde->d_off;
8918 tde->d_ino = tswapal(hde->d_ino);
8919 tde->d_off = tswapal(hde->d_off);
8920 tde->d_reclen = tswap16(treclen);
8921 memcpy(tde->d_name, hde->d_name, namelen + 1);
8922
8923 /*
8924 * The getdents type is in what was formerly a padding byte at the
8925 * end of the structure.
8926 */
8927 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8928 type = *((uint8_t *)hde + hreclen - 1);
8929 #else
8930 type = hde->d_type;
8931 #endif
8932 *((uint8_t *)tde + treclen - 1) = type;
8933 }
8934
8935 unlock_user(tdirp, arg2, toff);
8936 return toff;
8937 }
8938 #endif /* TARGET_NR_getdents */
8939
8940 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8941 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8942 {
8943 g_autofree void *hdirp = NULL;
8944 void *tdirp;
8945 int hlen, hoff, toff;
8946 int hreclen, treclen;
8947 off64_t prev_diroff = 0;
8948
8949 hdirp = g_try_malloc(count);
8950 if (!hdirp) {
8951 return -TARGET_ENOMEM;
8952 }
8953
8954 hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8955 if (is_error(hlen)) {
8956 return hlen;
8957 }
8958
8959 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8960 if (!tdirp) {
8961 return -TARGET_EFAULT;
8962 }
8963
8964 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8965 struct linux_dirent64 *hde = hdirp + hoff;
8966 struct target_dirent64 *tde = tdirp + toff;
8967 int namelen;
8968
8969 namelen = strlen(hde->d_name) + 1;
8970 hreclen = hde->d_reclen;
8971 treclen = offsetof(struct target_dirent64, d_name) + namelen;
8972 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8973
8974 if (toff + treclen > count) {
8975 /*
8976 * If the host struct is smaller than the target struct, or
8977 * requires less alignment and thus packs into less space,
8978 * then the host can return more entries than we can pass
8979 * on to the guest.
8980 */
8981 if (toff == 0) {
8982 toff = -TARGET_EINVAL; /* result buffer is too small */
8983 break;
8984 }
8985 /*
8986 * Return what we have, resetting the file pointer to the
8987 * location of the first record not returned.
8988 */
8989 lseek64(dirfd, prev_diroff, SEEK_SET);
8990 break;
8991 }
8992
8993 prev_diroff = hde->d_off;
8994 tde->d_ino = tswap64(hde->d_ino);
8995 tde->d_off = tswap64(hde->d_off);
8996 tde->d_reclen = tswap16(treclen);
8997 tde->d_type = hde->d_type;
8998 memcpy(tde->d_name, hde->d_name, namelen);
8999 }
9000
9001 unlock_user(tdirp, arg2, toff);
9002 return toff;
9003 }
9004 #endif /* TARGET_NR_getdents64 */
9005
9006 #if defined(TARGET_NR_riscv_hwprobe)
9007
9008 #define RISCV_HWPROBE_KEY_MVENDORID 0
9009 #define RISCV_HWPROBE_KEY_MARCHID 1
9010 #define RISCV_HWPROBE_KEY_MIMPID 2
9011
9012 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
9013 #define RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
9014
9015 #define RISCV_HWPROBE_KEY_IMA_EXT_0 4
9016 #define RISCV_HWPROBE_IMA_FD (1 << 0)
9017 #define RISCV_HWPROBE_IMA_C (1 << 1)
9018
9019 #define RISCV_HWPROBE_KEY_CPUPERF_0 5
9020 #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
9021 #define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
9022 #define RISCV_HWPROBE_MISALIGNED_SLOW (2 << 0)
9023 #define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0)
9024 #define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
9025 #define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0)
9026
9027 struct riscv_hwprobe {
9028 abi_llong key;
9029 abi_ullong value;
9030 };
9031
9032 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9033 struct riscv_hwprobe *pair,
9034 size_t pair_count)
9035 {
9036 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9037
9038 for (; pair_count > 0; pair_count--, pair++) {
9039 abi_llong key;
9040 abi_ullong value;
9041 __put_user(0, &pair->value);
9042 __get_user(key, &pair->key);
9043 switch (key) {
9044 case RISCV_HWPROBE_KEY_MVENDORID:
9045 __put_user(cfg->mvendorid, &pair->value);
9046 break;
9047 case RISCV_HWPROBE_KEY_MARCHID:
9048 __put_user(cfg->marchid, &pair->value);
9049 break;
9050 case RISCV_HWPROBE_KEY_MIMPID:
9051 __put_user(cfg->mimpid, &pair->value);
9052 break;
9053 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9054 value = riscv_has_ext(env, RVI) &&
9055 riscv_has_ext(env, RVM) &&
9056 riscv_has_ext(env, RVA) ?
9057 RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9058 __put_user(value, &pair->value);
9059 break;
9060 case RISCV_HWPROBE_KEY_IMA_EXT_0:
9061 value = riscv_has_ext(env, RVF) &&
9062 riscv_has_ext(env, RVD) ?
9063 RISCV_HWPROBE_IMA_FD : 0;
9064 value |= riscv_has_ext(env, RVC) ?
9065 RISCV_HWPROBE_IMA_C : pair->value;
9066 __put_user(value, &pair->value);
9067 break;
9068 case RISCV_HWPROBE_KEY_CPUPERF_0:
9069 __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9070 break;
9071 default:
9072 __put_user(-1, &pair->key);
9073 break;
9074 }
9075 }
9076 }
9077
9078 static int cpu_set_valid(abi_long arg3, abi_long arg4)
9079 {
9080 int ret, i, tmp;
9081 size_t host_mask_size, target_mask_size;
9082 unsigned long *host_mask;
9083
9084 /*
9085 * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9086 * arg3 contains the cpu count.
9087 */
9088 tmp = (8 * sizeof(abi_ulong));
9089 target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9090 host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9091 ~(sizeof(*host_mask) - 1);
9092
9093 host_mask = alloca(host_mask_size);
9094
9095 ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9096 arg4, target_mask_size);
9097 if (ret != 0) {
9098 return ret;
9099 }
9100
9101 for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9102 if (host_mask[i] != 0) {
9103 return 0;
9104 }
9105 }
9106 return -TARGET_EINVAL;
9107 }
9108
9109 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9110 abi_long arg2, abi_long arg3,
9111 abi_long arg4, abi_long arg5)
9112 {
9113 int ret;
9114 struct riscv_hwprobe *host_pairs;
9115
9116 /* flags must be 0 */
9117 if (arg5 != 0) {
9118 return -TARGET_EINVAL;
9119 }
9120
9121 /* check cpu_set */
9122 if (arg3 != 0) {
9123 ret = cpu_set_valid(arg3, arg4);
9124 if (ret != 0) {
9125 return ret;
9126 }
9127 } else if (arg4 != 0) {
9128 return -TARGET_EINVAL;
9129 }
9130
9131 /* no pairs */
9132 if (arg2 == 0) {
9133 return 0;
9134 }
9135
9136 host_pairs = lock_user(VERIFY_WRITE, arg1,
9137 sizeof(*host_pairs) * (size_t)arg2, 0);
9138 if (host_pairs == NULL) {
9139 return -TARGET_EFAULT;
9140 }
9141 risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9142 unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9143 return 0;
9144 }
9145 #endif /* TARGET_NR_riscv_hwprobe */
9146
9147 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9148 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9149 #endif
9150
9151 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9152 #define __NR_sys_open_tree __NR_open_tree
9153 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9154 unsigned int, __flags)
9155 #endif
9156
9157 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9158 #define __NR_sys_move_mount __NR_move_mount
9159 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9160 int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9161 #endif
9162
9163 /* This is an internal helper for do_syscall so that it is easier
9164 * to have a single return point, so that actions, such as logging
9165 * of syscall results, can be performed.
9166 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9167 */
9168 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9169 abi_long arg2, abi_long arg3, abi_long arg4,
9170 abi_long arg5, abi_long arg6, abi_long arg7,
9171 abi_long arg8)
9172 {
9173 CPUState *cpu = env_cpu(cpu_env);
9174 abi_long ret;
9175 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9176 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9177 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9178 || defined(TARGET_NR_statx)
9179 struct stat st;
9180 #endif
9181 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9182 || defined(TARGET_NR_fstatfs)
9183 struct statfs stfs;
9184 #endif
9185 void *p;
9186
9187 switch(num) {
9188 case TARGET_NR_exit:
9189 /* In old applications this may be used to implement _exit(2).
9190 However in threaded applications it is used for thread termination,
9191 and _exit_group is used for application termination.
9192 Do thread termination if we have more then one thread. */
9193
9194 if (block_signals()) {
9195 return -QEMU_ERESTARTSYS;
9196 }
9197
9198 pthread_mutex_lock(&clone_lock);
9199
9200 if (CPU_NEXT(first_cpu)) {
9201 TaskState *ts = cpu->opaque;
9202
9203 if (ts->child_tidptr) {
9204 put_user_u32(0, ts->child_tidptr);
9205 do_sys_futex(g2h(cpu, ts->child_tidptr),
9206 FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9207 }
9208
9209 object_unparent(OBJECT(cpu));
9210 object_unref(OBJECT(cpu));
9211 /*
9212 * At this point the CPU should be unrealized and removed
9213 * from cpu lists. We can clean-up the rest of the thread
9214 * data without the lock held.
9215 */
9216
9217 pthread_mutex_unlock(&clone_lock);
9218
9219 thread_cpu = NULL;
9220 g_free(ts);
9221 rcu_unregister_thread();
9222 pthread_exit(NULL);
9223 }
9224
9225 pthread_mutex_unlock(&clone_lock);
9226 preexit_cleanup(cpu_env, arg1);
9227 _exit(arg1);
9228 return 0; /* avoid warning */
9229 case TARGET_NR_read:
9230 if (arg2 == 0 && arg3 == 0) {
9231 return get_errno(safe_read(arg1, 0, 0));
9232 } else {
9233 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9234 return -TARGET_EFAULT;
9235 ret = get_errno(safe_read(arg1, p, arg3));
9236 if (ret >= 0 &&
9237 fd_trans_host_to_target_data(arg1)) {
9238 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9239 }
9240 unlock_user(p, arg2, ret);
9241 }
9242 return ret;
9243 case TARGET_NR_write:
9244 if (arg2 == 0 && arg3 == 0) {
9245 return get_errno(safe_write(arg1, 0, 0));
9246 }
9247 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9248 return -TARGET_EFAULT;
9249 if (fd_trans_target_to_host_data(arg1)) {
9250 void *copy = g_malloc(arg3);
9251 memcpy(copy, p, arg3);
9252 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9253 if (ret >= 0) {
9254 ret = get_errno(safe_write(arg1, copy, ret));
9255 }
9256 g_free(copy);
9257 } else {
9258 ret = get_errno(safe_write(arg1, p, arg3));
9259 }
9260 unlock_user(p, arg2, 0);
9261 return ret;
9262
9263 #ifdef TARGET_NR_open
9264 case TARGET_NR_open:
9265 if (!(p = lock_user_string(arg1)))
9266 return -TARGET_EFAULT;
9267 ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9268 target_to_host_bitmask(arg2, fcntl_flags_tbl),
9269 arg3, true));
9270 fd_trans_unregister(ret);
9271 unlock_user(p, arg1, 0);
9272 return ret;
9273 #endif
9274 case TARGET_NR_openat:
9275 if (!(p = lock_user_string(arg2)))
9276 return -TARGET_EFAULT;
9277 ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9278 target_to_host_bitmask(arg3, fcntl_flags_tbl),
9279 arg4, true));
9280 fd_trans_unregister(ret);
9281 unlock_user(p, arg2, 0);
9282 return ret;
9283 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9284 case TARGET_NR_name_to_handle_at:
9285 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9286 return ret;
9287 #endif
9288 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9289 case TARGET_NR_open_by_handle_at:
9290 ret = do_open_by_handle_at(arg1, arg2, arg3);
9291 fd_trans_unregister(ret);
9292 return ret;
9293 #endif
9294 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9295 case TARGET_NR_pidfd_open:
9296 return get_errno(pidfd_open(arg1, arg2));
9297 #endif
9298 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9299 case TARGET_NR_pidfd_send_signal:
9300 {
9301 siginfo_t uinfo, *puinfo;
9302
9303 if (arg3) {
9304 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9305 if (!p) {
9306 return -TARGET_EFAULT;
9307 }
9308 target_to_host_siginfo(&uinfo, p);
9309 unlock_user(p, arg3, 0);
9310 puinfo = &uinfo;
9311 } else {
9312 puinfo = NULL;
9313 }
9314 ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9315 puinfo, arg4));
9316 }
9317 return ret;
9318 #endif
9319 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9320 case TARGET_NR_pidfd_getfd:
9321 return get_errno(pidfd_getfd(arg1, arg2, arg3));
9322 #endif
9323 case TARGET_NR_close:
9324 fd_trans_unregister(arg1);
9325 return get_errno(close(arg1));
9326 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9327 case TARGET_NR_close_range:
9328 ret = get_errno(sys_close_range(arg1, arg2, arg3));
9329 if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9330 abi_long fd, maxfd;
9331 maxfd = MIN(arg2, target_fd_max);
9332 for (fd = arg1; fd < maxfd; fd++) {
9333 fd_trans_unregister(fd);
9334 }
9335 }
9336 return ret;
9337 #endif
9338
9339 case TARGET_NR_brk:
9340 return do_brk(arg1);
9341 #ifdef TARGET_NR_fork
9342 case TARGET_NR_fork:
9343 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9344 #endif
9345 #ifdef TARGET_NR_waitpid
9346 case TARGET_NR_waitpid:
9347 {
9348 int status;
9349 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9350 if (!is_error(ret) && arg2 && ret
9351 && put_user_s32(host_to_target_waitstatus(status), arg2))
9352 return -TARGET_EFAULT;
9353 }
9354 return ret;
9355 #endif
9356 #ifdef TARGET_NR_waitid
9357 case TARGET_NR_waitid:
9358 {
9359 siginfo_t info;
9360 info.si_pid = 0;
9361 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
9362 if (!is_error(ret) && arg3 && info.si_pid != 0) {
9363 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
9364 return -TARGET_EFAULT;
9365 host_to_target_siginfo(p, &info);
9366 unlock_user(p, arg3, sizeof(target_siginfo_t));
9367 }
9368 }
9369 return ret;
9370 #endif
9371 #ifdef TARGET_NR_creat /* not on alpha */
9372 case TARGET_NR_creat:
9373 if (!(p = lock_user_string(arg1)))
9374 return -TARGET_EFAULT;
9375 ret = get_errno(creat(p, arg2));
9376 fd_trans_unregister(ret);
9377 unlock_user(p, arg1, 0);
9378 return ret;
9379 #endif
9380 #ifdef TARGET_NR_link
9381 case TARGET_NR_link:
9382 {
9383 void * p2;
9384 p = lock_user_string(arg1);
9385 p2 = lock_user_string(arg2);
9386 if (!p || !p2)
9387 ret = -TARGET_EFAULT;
9388 else
9389 ret = get_errno(link(p, p2));
9390 unlock_user(p2, arg2, 0);
9391 unlock_user(p, arg1, 0);
9392 }
9393 return ret;
9394 #endif
9395 #if defined(TARGET_NR_linkat)
9396 case TARGET_NR_linkat:
9397 {
9398 void * p2 = NULL;
9399 if (!arg2 || !arg4)
9400 return -TARGET_EFAULT;
9401 p = lock_user_string(arg2);
9402 p2 = lock_user_string(arg4);
9403 if (!p || !p2)
9404 ret = -TARGET_EFAULT;
9405 else
9406 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9407 unlock_user(p, arg2, 0);
9408 unlock_user(p2, arg4, 0);
9409 }
9410 return ret;
9411 #endif
9412 #ifdef TARGET_NR_unlink
9413 case TARGET_NR_unlink:
9414 if (!(p = lock_user_string(arg1)))
9415 return -TARGET_EFAULT;
9416 ret = get_errno(unlink(p));
9417 unlock_user(p, arg1, 0);
9418 return ret;
9419 #endif
9420 #if defined(TARGET_NR_unlinkat)
9421 case TARGET_NR_unlinkat:
9422 if (!(p = lock_user_string(arg2)))
9423 return -TARGET_EFAULT;
9424 ret = get_errno(unlinkat(arg1, p, arg3));
9425 unlock_user(p, arg2, 0);
9426 return ret;
9427 #endif
9428 case TARGET_NR_execveat:
9429 return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9430 case TARGET_NR_execve:
9431 return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9432 case TARGET_NR_chdir:
9433 if (!(p = lock_user_string(arg1)))
9434 return -TARGET_EFAULT;
9435 ret = get_errno(chdir(p));
9436 unlock_user(p, arg1, 0);
9437 return ret;
9438 #ifdef TARGET_NR_time
9439 case TARGET_NR_time:
9440 {
9441 time_t host_time;
9442 ret = get_errno(time(&host_time));
9443 if (!is_error(ret)
9444 && arg1
9445 && put_user_sal(host_time, arg1))
9446 return -TARGET_EFAULT;
9447 }
9448 return ret;
9449 #endif
9450 #ifdef TARGET_NR_mknod
9451 case TARGET_NR_mknod:
9452 if (!(p = lock_user_string(arg1)))
9453 return -TARGET_EFAULT;
9454 ret = get_errno(mknod(p, arg2, arg3));
9455 unlock_user(p, arg1, 0);
9456 return ret;
9457 #endif
9458 #if defined(TARGET_NR_mknodat)
9459 case TARGET_NR_mknodat:
9460 if (!(p = lock_user_string(arg2)))
9461 return -TARGET_EFAULT;
9462 ret = get_errno(mknodat(arg1, p, arg3, arg4));
9463 unlock_user(p, arg2, 0);
9464 return ret;
9465 #endif
9466 #ifdef TARGET_NR_chmod
9467 case TARGET_NR_chmod:
9468 if (!(p = lock_user_string(arg1)))
9469 return -TARGET_EFAULT;
9470 ret = get_errno(chmod(p, arg2));
9471 unlock_user(p, arg1, 0);
9472 return ret;
9473 #endif
9474 #ifdef TARGET_NR_lseek
9475 case TARGET_NR_lseek:
9476 return get_errno(lseek(arg1, arg2, arg3));
9477 #endif
9478 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9479 /* Alpha specific */
9480 case TARGET_NR_getxpid:
9481 cpu_env->ir[IR_A4] = getppid();
9482 return get_errno(getpid());
9483 #endif
9484 #ifdef TARGET_NR_getpid
9485 case TARGET_NR_getpid:
9486 return get_errno(getpid());
9487 #endif
9488 case TARGET_NR_mount:
9489 {
9490 /* need to look at the data field */
9491 void *p2, *p3;
9492
9493 if (arg1) {
9494 p = lock_user_string(arg1);
9495 if (!p) {
9496 return -TARGET_EFAULT;
9497 }
9498 } else {
9499 p = NULL;
9500 }
9501
9502 p2 = lock_user_string(arg2);
9503 if (!p2) {
9504 if (arg1) {
9505 unlock_user(p, arg1, 0);
9506 }
9507 return -TARGET_EFAULT;
9508 }
9509
9510 if (arg3) {
9511 p3 = lock_user_string(arg3);
9512 if (!p3) {
9513 if (arg1) {
9514 unlock_user(p, arg1, 0);
9515 }
9516 unlock_user(p2, arg2, 0);
9517 return -TARGET_EFAULT;
9518 }
9519 } else {
9520 p3 = NULL;
9521 }
9522
9523 /* FIXME - arg5 should be locked, but it isn't clear how to
9524 * do that since it's not guaranteed to be a NULL-terminated
9525 * string.
9526 */
9527 if (!arg5) {
9528 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9529 } else {
9530 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9531 }
9532 ret = get_errno(ret);
9533
9534 if (arg1) {
9535 unlock_user(p, arg1, 0);
9536 }
9537 unlock_user(p2, arg2, 0);
9538 if (arg3) {
9539 unlock_user(p3, arg3, 0);
9540 }
9541 }
9542 return ret;
9543 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9544 #if defined(TARGET_NR_umount)
9545 case TARGET_NR_umount:
9546 #endif
9547 #if defined(TARGET_NR_oldumount)
9548 case TARGET_NR_oldumount:
9549 #endif
9550 if (!(p = lock_user_string(arg1)))
9551 return -TARGET_EFAULT;
9552 ret = get_errno(umount(p));
9553 unlock_user(p, arg1, 0);
9554 return ret;
9555 #endif
9556 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9557 case TARGET_NR_move_mount:
9558 {
9559 void *p2, *p4;
9560
9561 if (!arg2 || !arg4) {
9562 return -TARGET_EFAULT;
9563 }
9564
9565 p2 = lock_user_string(arg2);
9566 if (!p2) {
9567 return -TARGET_EFAULT;
9568 }
9569
9570 p4 = lock_user_string(arg4);
9571 if (!p4) {
9572 unlock_user(p2, arg2, 0);
9573 return -TARGET_EFAULT;
9574 }
9575 ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9576
9577 unlock_user(p2, arg2, 0);
9578 unlock_user(p4, arg4, 0);
9579
9580 return ret;
9581 }
9582 #endif
9583 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9584 case TARGET_NR_open_tree:
9585 {
9586 void *p2;
9587 int host_flags;
9588
9589 if (!arg2) {
9590 return -TARGET_EFAULT;
9591 }
9592
9593 p2 = lock_user_string(arg2);
9594 if (!p2) {
9595 return -TARGET_EFAULT;
9596 }
9597
9598 host_flags = arg3 & ~TARGET_O_CLOEXEC;
9599 if (arg3 & TARGET_O_CLOEXEC) {
9600 host_flags |= O_CLOEXEC;
9601 }
9602
9603 ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9604
9605 unlock_user(p2, arg2, 0);
9606
9607 return ret;
9608 }
9609 #endif
9610 #ifdef TARGET_NR_stime /* not on alpha */
9611 case TARGET_NR_stime:
9612 {
9613 struct timespec ts;
9614 ts.tv_nsec = 0;
9615 if (get_user_sal(ts.tv_sec, arg1)) {
9616 return -TARGET_EFAULT;
9617 }
9618 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9619 }
9620 #endif
9621 #ifdef TARGET_NR_alarm /* not on alpha */
9622 case TARGET_NR_alarm:
9623 return alarm(arg1);
9624 #endif
9625 #ifdef TARGET_NR_pause /* not on alpha */
9626 case TARGET_NR_pause:
9627 if (!block_signals()) {
9628 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9629 }
9630 return -TARGET_EINTR;
9631 #endif
9632 #ifdef TARGET_NR_utime
9633 case TARGET_NR_utime:
9634 {
9635 struct utimbuf tbuf, *host_tbuf;
9636 struct target_utimbuf *target_tbuf;
9637 if (arg2) {
9638 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9639 return -TARGET_EFAULT;
9640 tbuf.actime = tswapal(target_tbuf->actime);
9641 tbuf.modtime = tswapal(target_tbuf->modtime);
9642 unlock_user_struct(target_tbuf, arg2, 0);
9643 host_tbuf = &tbuf;
9644 } else {
9645 host_tbuf = NULL;
9646 }
9647 if (!(p = lock_user_string(arg1)))
9648 return -TARGET_EFAULT;
9649 ret = get_errno(utime(p, host_tbuf));
9650 unlock_user(p, arg1, 0);
9651 }
9652 return ret;
9653 #endif
9654 #ifdef TARGET_NR_utimes
9655 case TARGET_NR_utimes:
9656 {
9657 struct timeval *tvp, tv[2];
9658 if (arg2) {
9659 if (copy_from_user_timeval(&tv[0], arg2)
9660 || copy_from_user_timeval(&tv[1],
9661 arg2 + sizeof(struct target_timeval)))
9662 return -TARGET_EFAULT;
9663 tvp = tv;
9664 } else {
9665 tvp = NULL;
9666 }
9667 if (!(p = lock_user_string(arg1)))
9668 return -TARGET_EFAULT;
9669 ret = get_errno(utimes(p, tvp));
9670 unlock_user(p, arg1, 0);
9671 }
9672 return ret;
9673 #endif
9674 #if defined(TARGET_NR_futimesat)
9675 case TARGET_NR_futimesat:
9676 {
9677 struct timeval *tvp, tv[2];
9678 if (arg3) {
9679 if (copy_from_user_timeval(&tv[0], arg3)
9680 || copy_from_user_timeval(&tv[1],
9681 arg3 + sizeof(struct target_timeval)))
9682 return -TARGET_EFAULT;
9683 tvp = tv;
9684 } else {
9685 tvp = NULL;
9686 }
9687 if (!(p = lock_user_string(arg2))) {
9688 return -TARGET_EFAULT;
9689 }
9690 ret = get_errno(futimesat(arg1, path(p), tvp));
9691 unlock_user(p, arg2, 0);
9692 }
9693 return ret;
9694 #endif
9695 #ifdef TARGET_NR_access
9696 case TARGET_NR_access:
9697 if (!(p = lock_user_string(arg1))) {
9698 return -TARGET_EFAULT;
9699 }
9700 ret = get_errno(access(path(p), arg2));
9701 unlock_user(p, arg1, 0);
9702 return ret;
9703 #endif
9704 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9705 case TARGET_NR_faccessat:
9706 if (!(p = lock_user_string(arg2))) {
9707 return -TARGET_EFAULT;
9708 }
9709 ret = get_errno(faccessat(arg1, p, arg3, 0));
9710 unlock_user(p, arg2, 0);
9711 return ret;
9712 #endif
9713 #if defined(TARGET_NR_faccessat2)
9714 case TARGET_NR_faccessat2:
9715 if (!(p = lock_user_string(arg2))) {
9716 return -TARGET_EFAULT;
9717 }
9718 ret = get_errno(faccessat(arg1, p, arg3, arg4));
9719 unlock_user(p, arg2, 0);
9720 return ret;
9721 #endif
9722 #ifdef TARGET_NR_nice /* not on alpha */
9723 case TARGET_NR_nice:
9724 return get_errno(nice(arg1));
9725 #endif
9726 case TARGET_NR_sync:
9727 sync();
9728 return 0;
9729 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9730 case TARGET_NR_syncfs:
9731 return get_errno(syncfs(arg1));
9732 #endif
9733 case TARGET_NR_kill:
9734 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9735 #ifdef TARGET_NR_rename
9736 case TARGET_NR_rename:
9737 {
9738 void *p2;
9739 p = lock_user_string(arg1);
9740 p2 = lock_user_string(arg2);
9741 if (!p || !p2)
9742 ret = -TARGET_EFAULT;
9743 else
9744 ret = get_errno(rename(p, p2));
9745 unlock_user(p2, arg2, 0);
9746 unlock_user(p, arg1, 0);
9747 }
9748 return ret;
9749 #endif
9750 #if defined(TARGET_NR_renameat)
9751 case TARGET_NR_renameat:
9752 {
9753 void *p2;
9754 p = lock_user_string(arg2);
9755 p2 = lock_user_string(arg4);
9756 if (!p || !p2)
9757 ret = -TARGET_EFAULT;
9758 else
9759 ret = get_errno(renameat(arg1, p, arg3, p2));
9760 unlock_user(p2, arg4, 0);
9761 unlock_user(p, arg2, 0);
9762 }
9763 return ret;
9764 #endif
9765 #if defined(TARGET_NR_renameat2)
9766 case TARGET_NR_renameat2:
9767 {
9768 void *p2;
9769 p = lock_user_string(arg2);
9770 p2 = lock_user_string(arg4);
9771 if (!p || !p2) {
9772 ret = -TARGET_EFAULT;
9773 } else {
9774 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9775 }
9776 unlock_user(p2, arg4, 0);
9777 unlock_user(p, arg2, 0);
9778 }
9779 return ret;
9780 #endif
9781 #ifdef TARGET_NR_mkdir
9782 case TARGET_NR_mkdir:
9783 if (!(p = lock_user_string(arg1)))
9784 return -TARGET_EFAULT;
9785 ret = get_errno(mkdir(p, arg2));
9786 unlock_user(p, arg1, 0);
9787 return ret;
9788 #endif
9789 #if defined(TARGET_NR_mkdirat)
9790 case TARGET_NR_mkdirat:
9791 if (!(p = lock_user_string(arg2)))
9792 return -TARGET_EFAULT;
9793 ret = get_errno(mkdirat(arg1, p, arg3));
9794 unlock_user(p, arg2, 0);
9795 return ret;
9796 #endif
9797 #ifdef TARGET_NR_rmdir
9798 case TARGET_NR_rmdir:
9799 if (!(p = lock_user_string(arg1)))
9800 return -TARGET_EFAULT;
9801 ret = get_errno(rmdir(p));
9802 unlock_user(p, arg1, 0);
9803 return ret;
9804 #endif
9805 case TARGET_NR_dup:
9806 ret = get_errno(dup(arg1));
9807 if (ret >= 0) {
9808 fd_trans_dup(arg1, ret);
9809 }
9810 return ret;
9811 #ifdef TARGET_NR_pipe
9812 case TARGET_NR_pipe:
9813 return do_pipe(cpu_env, arg1, 0, 0);
9814 #endif
9815 #ifdef TARGET_NR_pipe2
9816 case TARGET_NR_pipe2:
9817 return do_pipe(cpu_env, arg1,
9818 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9819 #endif
9820 case TARGET_NR_times:
9821 {
9822 struct target_tms *tmsp;
9823 struct tms tms;
9824 ret = get_errno(times(&tms));
9825 if (arg1) {
9826 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9827 if (!tmsp)
9828 return -TARGET_EFAULT;
9829 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9830 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9831 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9832 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9833 }
9834 if (!is_error(ret))
9835 ret = host_to_target_clock_t(ret);
9836 }
9837 return ret;
9838 case TARGET_NR_acct:
9839 if (arg1 == 0) {
9840 ret = get_errno(acct(NULL));
9841 } else {
9842 if (!(p = lock_user_string(arg1))) {
9843 return -TARGET_EFAULT;
9844 }
9845 ret = get_errno(acct(path(p)));
9846 unlock_user(p, arg1, 0);
9847 }
9848 return ret;
9849 #ifdef TARGET_NR_umount2
9850 case TARGET_NR_umount2:
9851 if (!(p = lock_user_string(arg1)))
9852 return -TARGET_EFAULT;
9853 ret = get_errno(umount2(p, arg2));
9854 unlock_user(p, arg1, 0);
9855 return ret;
9856 #endif
9857 case TARGET_NR_ioctl:
9858 return do_ioctl(arg1, arg2, arg3);
9859 #ifdef TARGET_NR_fcntl
9860 case TARGET_NR_fcntl:
9861 return do_fcntl(arg1, arg2, arg3);
9862 #endif
9863 case TARGET_NR_setpgid:
9864 return get_errno(setpgid(arg1, arg2));
9865 case TARGET_NR_umask:
9866 return get_errno(umask(arg1));
9867 case TARGET_NR_chroot:
9868 if (!(p = lock_user_string(arg1)))
9869 return -TARGET_EFAULT;
9870 ret = get_errno(chroot(p));
9871 unlock_user(p, arg1, 0);
9872 return ret;
9873 #ifdef TARGET_NR_dup2
9874 case TARGET_NR_dup2:
9875 ret = get_errno(dup2(arg1, arg2));
9876 if (ret >= 0) {
9877 fd_trans_dup(arg1, arg2);
9878 }
9879 return ret;
9880 #endif
9881 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9882 case TARGET_NR_dup3:
9883 {
9884 int host_flags;
9885
9886 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9887 return -EINVAL;
9888 }
9889 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9890 ret = get_errno(dup3(arg1, arg2, host_flags));
9891 if (ret >= 0) {
9892 fd_trans_dup(arg1, arg2);
9893 }
9894 return ret;
9895 }
9896 #endif
9897 #ifdef TARGET_NR_getppid /* not on alpha */
9898 case TARGET_NR_getppid:
9899 return get_errno(getppid());
9900 #endif
9901 #ifdef TARGET_NR_getpgrp
9902 case TARGET_NR_getpgrp:
9903 return get_errno(getpgrp());
9904 #endif
9905 case TARGET_NR_setsid:
9906 return get_errno(setsid());
9907 #ifdef TARGET_NR_sigaction
9908 case TARGET_NR_sigaction:
9909 {
9910 #if defined(TARGET_MIPS)
9911 struct target_sigaction act, oact, *pact, *old_act;
9912
9913 if (arg2) {
9914 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9915 return -TARGET_EFAULT;
9916 act._sa_handler = old_act->_sa_handler;
9917 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9918 act.sa_flags = old_act->sa_flags;
9919 unlock_user_struct(old_act, arg2, 0);
9920 pact = &act;
9921 } else {
9922 pact = NULL;
9923 }
9924
9925 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9926
9927 if (!is_error(ret) && arg3) {
9928 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9929 return -TARGET_EFAULT;
9930 old_act->_sa_handler = oact._sa_handler;
9931 old_act->sa_flags = oact.sa_flags;
9932 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9933 old_act->sa_mask.sig[1] = 0;
9934 old_act->sa_mask.sig[2] = 0;
9935 old_act->sa_mask.sig[3] = 0;
9936 unlock_user_struct(old_act, arg3, 1);
9937 }
9938 #else
9939 struct target_old_sigaction *old_act;
9940 struct target_sigaction act, oact, *pact;
9941 if (arg2) {
9942 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9943 return -TARGET_EFAULT;
9944 act._sa_handler = old_act->_sa_handler;
9945 target_siginitset(&act.sa_mask, old_act->sa_mask);
9946 act.sa_flags = old_act->sa_flags;
9947 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9948 act.sa_restorer = old_act->sa_restorer;
9949 #endif
9950 unlock_user_struct(old_act, arg2, 0);
9951 pact = &act;
9952 } else {
9953 pact = NULL;
9954 }
9955 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9956 if (!is_error(ret) && arg3) {
9957 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9958 return -TARGET_EFAULT;
9959 old_act->_sa_handler = oact._sa_handler;
9960 old_act->sa_mask = oact.sa_mask.sig[0];
9961 old_act->sa_flags = oact.sa_flags;
9962 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9963 old_act->sa_restorer = oact.sa_restorer;
9964 #endif
9965 unlock_user_struct(old_act, arg3, 1);
9966 }
9967 #endif
9968 }
9969 return ret;
9970 #endif
9971 case TARGET_NR_rt_sigaction:
9972 {
9973 /*
9974 * For Alpha and SPARC this is a 5 argument syscall, with
9975 * a 'restorer' parameter which must be copied into the
9976 * sa_restorer field of the sigaction struct.
9977 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9978 * and arg5 is the sigsetsize.
9979 */
9980 #if defined(TARGET_ALPHA)
9981 target_ulong sigsetsize = arg4;
9982 target_ulong restorer = arg5;
9983 #elif defined(TARGET_SPARC)
9984 target_ulong restorer = arg4;
9985 target_ulong sigsetsize = arg5;
9986 #else
9987 target_ulong sigsetsize = arg4;
9988 target_ulong restorer = 0;
9989 #endif
9990 struct target_sigaction *act = NULL;
9991 struct target_sigaction *oact = NULL;
9992
9993 if (sigsetsize != sizeof(target_sigset_t)) {
9994 return -TARGET_EINVAL;
9995 }
9996 if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9997 return -TARGET_EFAULT;
9998 }
9999 if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10000 ret = -TARGET_EFAULT;
10001 } else {
10002 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10003 if (oact) {
10004 unlock_user_struct(oact, arg3, 1);
10005 }
10006 }
10007 if (act) {
10008 unlock_user_struct(act, arg2, 0);
10009 }
10010 }
10011 return ret;
10012 #ifdef TARGET_NR_sgetmask /* not on alpha */
10013 case TARGET_NR_sgetmask:
10014 {
10015 sigset_t cur_set;
10016 abi_ulong target_set;
10017 ret = do_sigprocmask(0, NULL, &cur_set);
10018 if (!ret) {
10019 host_to_target_old_sigset(&target_set, &cur_set);
10020 ret = target_set;
10021 }
10022 }
10023 return ret;
10024 #endif
10025 #ifdef TARGET_NR_ssetmask /* not on alpha */
10026 case TARGET_NR_ssetmask:
10027 {
10028 sigset_t set, oset;
10029 abi_ulong target_set = arg1;
10030 target_to_host_old_sigset(&set, &target_set);
10031 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10032 if (!ret) {
10033 host_to_target_old_sigset(&target_set, &oset);
10034 ret = target_set;
10035 }
10036 }
10037 return ret;
10038 #endif
10039 #ifdef TARGET_NR_sigprocmask
10040 case TARGET_NR_sigprocmask:
10041 {
10042 #if defined(TARGET_ALPHA)
10043 sigset_t set, oldset;
10044 abi_ulong mask;
10045 int how;
10046
10047 switch (arg1) {
10048 case TARGET_SIG_BLOCK:
10049 how = SIG_BLOCK;
10050 break;
10051 case TARGET_SIG_UNBLOCK:
10052 how = SIG_UNBLOCK;
10053 break;
10054 case TARGET_SIG_SETMASK:
10055 how = SIG_SETMASK;
10056 break;
10057 default:
10058 return -TARGET_EINVAL;
10059 }
10060 mask = arg2;
10061 target_to_host_old_sigset(&set, &mask);
10062
10063 ret = do_sigprocmask(how, &set, &oldset);
10064 if (!is_error(ret)) {
10065 host_to_target_old_sigset(&mask, &oldset);
10066 ret = mask;
10067 cpu_env->ir[IR_V0] = 0; /* force no error */
10068 }
10069 #else
10070 sigset_t set, oldset, *set_ptr;
10071 int how;
10072
10073 if (arg2) {
10074 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10075 if (!p) {
10076 return -TARGET_EFAULT;
10077 }
10078 target_to_host_old_sigset(&set, p);
10079 unlock_user(p, arg2, 0);
10080 set_ptr = &set;
10081 switch (arg1) {
10082 case TARGET_SIG_BLOCK:
10083 how = SIG_BLOCK;
10084 break;
10085 case TARGET_SIG_UNBLOCK:
10086 how = SIG_UNBLOCK;
10087 break;
10088 case TARGET_SIG_SETMASK:
10089 how = SIG_SETMASK;
10090 break;
10091 default:
10092 return -TARGET_EINVAL;
10093 }
10094 } else {
10095 how = 0;
10096 set_ptr = NULL;
10097 }
10098 ret = do_sigprocmask(how, set_ptr, &oldset);
10099 if (!is_error(ret) && arg3) {
10100 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10101 return -TARGET_EFAULT;
10102 host_to_target_old_sigset(p, &oldset);
10103 unlock_user(p, arg3, sizeof(target_sigset_t));
10104 }
10105 #endif
10106 }
10107 return ret;
10108 #endif
10109 case TARGET_NR_rt_sigprocmask:
10110 {
10111 int how = arg1;
10112 sigset_t set, oldset, *set_ptr;
10113
10114 if (arg4 != sizeof(target_sigset_t)) {
10115 return -TARGET_EINVAL;
10116 }
10117
10118 if (arg2) {
10119 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10120 if (!p) {
10121 return -TARGET_EFAULT;
10122 }
10123 target_to_host_sigset(&set, p);
10124 unlock_user(p, arg2, 0);
10125 set_ptr = &set;
10126 switch(how) {
10127 case TARGET_SIG_BLOCK:
10128 how = SIG_BLOCK;
10129 break;
10130 case TARGET_SIG_UNBLOCK:
10131 how = SIG_UNBLOCK;
10132 break;
10133 case TARGET_SIG_SETMASK:
10134 how = SIG_SETMASK;
10135 break;
10136 default:
10137 return -TARGET_EINVAL;
10138 }
10139 } else {
10140 how = 0;
10141 set_ptr = NULL;
10142 }
10143 ret = do_sigprocmask(how, set_ptr, &oldset);
10144 if (!is_error(ret) && arg3) {
10145 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10146 return -TARGET_EFAULT;
10147 host_to_target_sigset(p, &oldset);
10148 unlock_user(p, arg3, sizeof(target_sigset_t));
10149 }
10150 }
10151 return ret;
10152 #ifdef TARGET_NR_sigpending
10153 case TARGET_NR_sigpending:
10154 {
10155 sigset_t set;
10156 ret = get_errno(sigpending(&set));
10157 if (!is_error(ret)) {
10158 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10159 return -TARGET_EFAULT;
10160 host_to_target_old_sigset(p, &set);
10161 unlock_user(p, arg1, sizeof(target_sigset_t));
10162 }
10163 }
10164 return ret;
10165 #endif
10166 case TARGET_NR_rt_sigpending:
10167 {
10168 sigset_t set;
10169
10170 /* Yes, this check is >, not != like most. We follow the kernel's
10171 * logic and it does it like this because it implements
10172 * NR_sigpending through the same code path, and in that case
10173 * the old_sigset_t is smaller in size.
10174 */
10175 if (arg2 > sizeof(target_sigset_t)) {
10176 return -TARGET_EINVAL;
10177 }
10178
10179 ret = get_errno(sigpending(&set));
10180 if (!is_error(ret)) {
10181 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10182 return -TARGET_EFAULT;
10183 host_to_target_sigset(p, &set);
10184 unlock_user(p, arg1, sizeof(target_sigset_t));
10185 }
10186 }
10187 return ret;
10188 #ifdef TARGET_NR_sigsuspend
10189 case TARGET_NR_sigsuspend:
10190 {
10191 sigset_t *set;
10192
10193 #if defined(TARGET_ALPHA)
10194 TaskState *ts = cpu->opaque;
10195 /* target_to_host_old_sigset will bswap back */
10196 abi_ulong mask = tswapal(arg1);
10197 set = &ts->sigsuspend_mask;
10198 target_to_host_old_sigset(set, &mask);
10199 #else
10200 ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10201 if (ret != 0) {
10202 return ret;
10203 }
10204 #endif
10205 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10206 finish_sigsuspend_mask(ret);
10207 }
10208 return ret;
10209 #endif
10210 case TARGET_NR_rt_sigsuspend:
10211 {
10212 sigset_t *set;
10213
10214 ret = process_sigsuspend_mask(&set, arg1, arg2);
10215 if (ret != 0) {
10216 return ret;
10217 }
10218 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10219 finish_sigsuspend_mask(ret);
10220 }
10221 return ret;
10222 #ifdef TARGET_NR_rt_sigtimedwait
10223 case TARGET_NR_rt_sigtimedwait:
10224 {
10225 sigset_t set;
10226 struct timespec uts, *puts;
10227 siginfo_t uinfo;
10228
10229 if (arg4 != sizeof(target_sigset_t)) {
10230 return -TARGET_EINVAL;
10231 }
10232
10233 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10234 return -TARGET_EFAULT;
10235 target_to_host_sigset(&set, p);
10236 unlock_user(p, arg1, 0);
10237 if (arg3) {
10238 puts = &uts;
10239 if (target_to_host_timespec(puts, arg3)) {
10240 return -TARGET_EFAULT;
10241 }
10242 } else {
10243 puts = NULL;
10244 }
10245 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10246 SIGSET_T_SIZE));
10247 if (!is_error(ret)) {
10248 if (arg2) {
10249 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10250 0);
10251 if (!p) {
10252 return -TARGET_EFAULT;
10253 }
10254 host_to_target_siginfo(p, &uinfo);
10255 unlock_user(p, arg2, sizeof(target_siginfo_t));
10256 }
10257 ret = host_to_target_signal(ret);
10258 }
10259 }
10260 return ret;
10261 #endif
10262 #ifdef TARGET_NR_rt_sigtimedwait_time64
10263 case TARGET_NR_rt_sigtimedwait_time64:
10264 {
10265 sigset_t set;
10266 struct timespec uts, *puts;
10267 siginfo_t uinfo;
10268
10269 if (arg4 != sizeof(target_sigset_t)) {
10270 return -TARGET_EINVAL;
10271 }
10272
10273 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10274 if (!p) {
10275 return -TARGET_EFAULT;
10276 }
10277 target_to_host_sigset(&set, p);
10278 unlock_user(p, arg1, 0);
10279 if (arg3) {
10280 puts = &uts;
10281 if (target_to_host_timespec64(puts, arg3)) {
10282 return -TARGET_EFAULT;
10283 }
10284 } else {
10285 puts = NULL;
10286 }
10287 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10288 SIGSET_T_SIZE));
10289 if (!is_error(ret)) {
10290 if (arg2) {
10291 p = lock_user(VERIFY_WRITE, arg2,
10292 sizeof(target_siginfo_t), 0);
10293 if (!p) {
10294 return -TARGET_EFAULT;
10295 }
10296 host_to_target_siginfo(p, &uinfo);
10297 unlock_user(p, arg2, sizeof(target_siginfo_t));
10298 }
10299 ret = host_to_target_signal(ret);
10300 }
10301 }
10302 return ret;
10303 #endif
10304 case TARGET_NR_rt_sigqueueinfo:
10305 {
10306 siginfo_t uinfo;
10307
10308 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10309 if (!p) {
10310 return -TARGET_EFAULT;
10311 }
10312 target_to_host_siginfo(&uinfo, p);
10313 unlock_user(p, arg3, 0);
10314 ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10315 }
10316 return ret;
10317 case TARGET_NR_rt_tgsigqueueinfo:
10318 {
10319 siginfo_t uinfo;
10320
10321 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10322 if (!p) {
10323 return -TARGET_EFAULT;
10324 }
10325 target_to_host_siginfo(&uinfo, p);
10326 unlock_user(p, arg4, 0);
10327 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10328 }
10329 return ret;
10330 #ifdef TARGET_NR_sigreturn
10331 case TARGET_NR_sigreturn:
10332 if (block_signals()) {
10333 return -QEMU_ERESTARTSYS;
10334 }
10335 return do_sigreturn(cpu_env);
10336 #endif
10337 case TARGET_NR_rt_sigreturn:
10338 if (block_signals()) {
10339 return -QEMU_ERESTARTSYS;
10340 }
10341 return do_rt_sigreturn(cpu_env);
10342 case TARGET_NR_sethostname:
10343 if (!(p = lock_user_string(arg1)))
10344 return -TARGET_EFAULT;
10345 ret = get_errno(sethostname(p, arg2));
10346 unlock_user(p, arg1, 0);
10347 return ret;
10348 #ifdef TARGET_NR_setrlimit
10349 case TARGET_NR_setrlimit:
10350 {
10351 int resource = target_to_host_resource(arg1);
10352 struct target_rlimit *target_rlim;
10353 struct rlimit rlim;
10354 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10355 return -TARGET_EFAULT;
10356 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10357 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10358 unlock_user_struct(target_rlim, arg2, 0);
10359 /*
10360 * If we just passed through resource limit settings for memory then
10361 * they would also apply to QEMU's own allocations, and QEMU will
10362 * crash or hang or die if its allocations fail. Ideally we would
10363 * track the guest allocations in QEMU and apply the limits ourselves.
10364 * For now, just tell the guest the call succeeded but don't actually
10365 * limit anything.
10366 */
10367 if (resource != RLIMIT_AS &&
10368 resource != RLIMIT_DATA &&
10369 resource != RLIMIT_STACK) {
10370 return get_errno(setrlimit(resource, &rlim));
10371 } else {
10372 return 0;
10373 }
10374 }
10375 #endif
10376 #ifdef TARGET_NR_getrlimit
10377 case TARGET_NR_getrlimit:
10378 {
10379 int resource = target_to_host_resource(arg1);
10380 struct target_rlimit *target_rlim;
10381 struct rlimit rlim;
10382
10383 ret = get_errno(getrlimit(resource, &rlim));
10384 if (!is_error(ret)) {
10385 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10386 return -TARGET_EFAULT;
10387 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10388 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10389 unlock_user_struct(target_rlim, arg2, 1);
10390 }
10391 }
10392 return ret;
10393 #endif
10394 case TARGET_NR_getrusage:
10395 {
10396 struct rusage rusage;
10397 ret = get_errno(getrusage(arg1, &rusage));
10398 if (!is_error(ret)) {
10399 ret = host_to_target_rusage(arg2, &rusage);
10400 }
10401 }
10402 return ret;
10403 #if defined(TARGET_NR_gettimeofday)
10404 case TARGET_NR_gettimeofday:
10405 {
10406 struct timeval tv;
10407 struct timezone tz;
10408
10409 ret = get_errno(gettimeofday(&tv, &tz));
10410 if (!is_error(ret)) {
10411 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10412 return -TARGET_EFAULT;
10413 }
10414 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10415 return -TARGET_EFAULT;
10416 }
10417 }
10418 }
10419 return ret;
10420 #endif
10421 #if defined(TARGET_NR_settimeofday)
10422 case TARGET_NR_settimeofday:
10423 {
10424 struct timeval tv, *ptv = NULL;
10425 struct timezone tz, *ptz = NULL;
10426
10427 if (arg1) {
10428 if (copy_from_user_timeval(&tv, arg1)) {
10429 return -TARGET_EFAULT;
10430 }
10431 ptv = &tv;
10432 }
10433
10434 if (arg2) {
10435 if (copy_from_user_timezone(&tz, arg2)) {
10436 return -TARGET_EFAULT;
10437 }
10438 ptz = &tz;
10439 }
10440
10441 return get_errno(settimeofday(ptv, ptz));
10442 }
10443 #endif
10444 #if defined(TARGET_NR_select)
10445 case TARGET_NR_select:
10446 #if defined(TARGET_WANT_NI_OLD_SELECT)
10447 /* some architectures used to have old_select here
10448 * but now ENOSYS it.
10449 */
10450 ret = -TARGET_ENOSYS;
10451 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10452 ret = do_old_select(arg1);
10453 #else
10454 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10455 #endif
10456 return ret;
10457 #endif
10458 #ifdef TARGET_NR_pselect6
10459 case TARGET_NR_pselect6:
10460 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10461 #endif
10462 #ifdef TARGET_NR_pselect6_time64
10463 case TARGET_NR_pselect6_time64:
10464 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10465 #endif
10466 #ifdef TARGET_NR_symlink
10467 case TARGET_NR_symlink:
10468 {
10469 void *p2;
10470 p = lock_user_string(arg1);
10471 p2 = lock_user_string(arg2);
10472 if (!p || !p2)
10473 ret = -TARGET_EFAULT;
10474 else
10475 ret = get_errno(symlink(p, p2));
10476 unlock_user(p2, arg2, 0);
10477 unlock_user(p, arg1, 0);
10478 }
10479 return ret;
10480 #endif
10481 #if defined(TARGET_NR_symlinkat)
10482 case TARGET_NR_symlinkat:
10483 {
10484 void *p2;
10485 p = lock_user_string(arg1);
10486 p2 = lock_user_string(arg3);
10487 if (!p || !p2)
10488 ret = -TARGET_EFAULT;
10489 else
10490 ret = get_errno(symlinkat(p, arg2, p2));
10491 unlock_user(p2, arg3, 0);
10492 unlock_user(p, arg1, 0);
10493 }
10494 return ret;
10495 #endif
10496 #ifdef TARGET_NR_readlink
10497 case TARGET_NR_readlink:
10498 {
10499 void *p2;
10500 p = lock_user_string(arg1);
10501 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10502 ret = get_errno(do_guest_readlink(p, p2, arg3));
10503 unlock_user(p2, arg2, ret);
10504 unlock_user(p, arg1, 0);
10505 }
10506 return ret;
10507 #endif
10508 #if defined(TARGET_NR_readlinkat)
10509 case TARGET_NR_readlinkat:
10510 {
10511 void *p2;
10512 p = lock_user_string(arg2);
10513 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10514 if (!p || !p2) {
10515 ret = -TARGET_EFAULT;
10516 } else if (!arg4) {
10517 /* Short circuit this for the magic exe check. */
10518 ret = -TARGET_EINVAL;
10519 } else if (is_proc_myself((const char *)p, "exe")) {
10520 /*
10521 * Don't worry about sign mismatch as earlier mapping
10522 * logic would have thrown a bad address error.
10523 */
10524 ret = MIN(strlen(exec_path), arg4);
10525 /* We cannot NUL terminate the string. */
10526 memcpy(p2, exec_path, ret);
10527 } else {
10528 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10529 }
10530 unlock_user(p2, arg3, ret);
10531 unlock_user(p, arg2, 0);
10532 }
10533 return ret;
10534 #endif
10535 #ifdef TARGET_NR_swapon
10536 case TARGET_NR_swapon:
10537 if (!(p = lock_user_string(arg1)))
10538 return -TARGET_EFAULT;
10539 ret = get_errno(swapon(p, arg2));
10540 unlock_user(p, arg1, 0);
10541 return ret;
10542 #endif
10543 case TARGET_NR_reboot:
10544 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10545 /* arg4 must be ignored in all other cases */
10546 p = lock_user_string(arg4);
10547 if (!p) {
10548 return -TARGET_EFAULT;
10549 }
10550 ret = get_errno(reboot(arg1, arg2, arg3, p));
10551 unlock_user(p, arg4, 0);
10552 } else {
10553 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10554 }
10555 return ret;
10556 #ifdef TARGET_NR_mmap
10557 case TARGET_NR_mmap:
10558 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10559 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10560 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10561 || defined(TARGET_S390X)
10562 {
10563 abi_ulong *v;
10564 abi_ulong v1, v2, v3, v4, v5, v6;
10565 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10566 return -TARGET_EFAULT;
10567 v1 = tswapal(v[0]);
10568 v2 = tswapal(v[1]);
10569 v3 = tswapal(v[2]);
10570 v4 = tswapal(v[3]);
10571 v5 = tswapal(v[4]);
10572 v6 = tswapal(v[5]);
10573 unlock_user(v, arg1, 0);
10574 ret = get_errno(target_mmap(v1, v2, v3,
10575 target_to_host_bitmask(v4, mmap_flags_tbl),
10576 v5, v6));
10577 }
10578 #else
10579 /* mmap pointers are always untagged */
10580 ret = get_errno(target_mmap(arg1, arg2, arg3,
10581 target_to_host_bitmask(arg4, mmap_flags_tbl),
10582 arg5,
10583 arg6));
10584 #endif
10585 return ret;
10586 #endif
10587 #ifdef TARGET_NR_mmap2
10588 case TARGET_NR_mmap2:
10589 #ifndef MMAP_SHIFT
10590 #define MMAP_SHIFT 12
10591 #endif
10592 ret = target_mmap(arg1, arg2, arg3,
10593 target_to_host_bitmask(arg4, mmap_flags_tbl),
10594 arg5, (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10595 return get_errno(ret);
10596 #endif
10597 case TARGET_NR_munmap:
10598 arg1 = cpu_untagged_addr(cpu, arg1);
10599 return get_errno(target_munmap(arg1, arg2));
10600 case TARGET_NR_mprotect:
10601 arg1 = cpu_untagged_addr(cpu, arg1);
10602 {
10603 TaskState *ts = cpu->opaque;
10604 /* Special hack to detect libc making the stack executable. */
10605 if ((arg3 & PROT_GROWSDOWN)
10606 && arg1 >= ts->info->stack_limit
10607 && arg1 <= ts->info->start_stack) {
10608 arg3 &= ~PROT_GROWSDOWN;
10609 arg2 = arg2 + arg1 - ts->info->stack_limit;
10610 arg1 = ts->info->stack_limit;
10611 }
10612 }
10613 return get_errno(target_mprotect(arg1, arg2, arg3));
10614 #ifdef TARGET_NR_mremap
10615 case TARGET_NR_mremap:
10616 arg1 = cpu_untagged_addr(cpu, arg1);
10617 /* mremap new_addr (arg5) is always untagged */
10618 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10619 #endif
10620 /* ??? msync/mlock/munlock are broken for softmmu. */
10621 #ifdef TARGET_NR_msync
10622 case TARGET_NR_msync:
10623 return get_errno(msync(g2h(cpu, arg1), arg2,
10624 target_to_host_msync_arg(arg3)));
10625 #endif
10626 #ifdef TARGET_NR_mlock
10627 case TARGET_NR_mlock:
10628 return get_errno(mlock(g2h(cpu, arg1), arg2));
10629 #endif
10630 #ifdef TARGET_NR_munlock
10631 case TARGET_NR_munlock:
10632 return get_errno(munlock(g2h(cpu, arg1), arg2));
10633 #endif
10634 #ifdef TARGET_NR_mlockall
10635 case TARGET_NR_mlockall:
10636 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10637 #endif
10638 #ifdef TARGET_NR_munlockall
10639 case TARGET_NR_munlockall:
10640 return get_errno(munlockall());
10641 #endif
10642 #ifdef TARGET_NR_truncate
10643 case TARGET_NR_truncate:
10644 if (!(p = lock_user_string(arg1)))
10645 return -TARGET_EFAULT;
10646 ret = get_errno(truncate(p, arg2));
10647 unlock_user(p, arg1, 0);
10648 return ret;
10649 #endif
10650 #ifdef TARGET_NR_ftruncate
10651 case TARGET_NR_ftruncate:
10652 return get_errno(ftruncate(arg1, arg2));
10653 #endif
10654 case TARGET_NR_fchmod:
10655 return get_errno(fchmod(arg1, arg2));
10656 #if defined(TARGET_NR_fchmodat)
10657 case TARGET_NR_fchmodat:
10658 if (!(p = lock_user_string(arg2)))
10659 return -TARGET_EFAULT;
10660 ret = get_errno(fchmodat(arg1, p, arg3, 0));
10661 unlock_user(p, arg2, 0);
10662 return ret;
10663 #endif
10664 case TARGET_NR_getpriority:
10665 /* Note that negative values are valid for getpriority, so we must
10666 differentiate based on errno settings. */
10667 errno = 0;
10668 ret = getpriority(arg1, arg2);
10669 if (ret == -1 && errno != 0) {
10670 return -host_to_target_errno(errno);
10671 }
10672 #ifdef TARGET_ALPHA
10673 /* Return value is the unbiased priority. Signal no error. */
10674 cpu_env->ir[IR_V0] = 0;
10675 #else
10676 /* Return value is a biased priority to avoid negative numbers. */
10677 ret = 20 - ret;
10678 #endif
10679 return ret;
10680 case TARGET_NR_setpriority:
10681 return get_errno(setpriority(arg1, arg2, arg3));
10682 #ifdef TARGET_NR_statfs
10683 case TARGET_NR_statfs:
10684 if (!(p = lock_user_string(arg1))) {
10685 return -TARGET_EFAULT;
10686 }
10687 ret = get_errno(statfs(path(p), &stfs));
10688 unlock_user(p, arg1, 0);
10689 convert_statfs:
10690 if (!is_error(ret)) {
10691 struct target_statfs *target_stfs;
10692
10693 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10694 return -TARGET_EFAULT;
10695 __put_user(stfs.f_type, &target_stfs->f_type);
10696 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10697 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10698 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10699 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10700 __put_user(stfs.f_files, &target_stfs->f_files);
10701 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10702 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10703 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10704 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10705 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10706 #ifdef _STATFS_F_FLAGS
10707 __put_user(stfs.f_flags, &target_stfs->f_flags);
10708 #else
10709 __put_user(0, &target_stfs->f_flags);
10710 #endif
10711 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10712 unlock_user_struct(target_stfs, arg2, 1);
10713 }
10714 return ret;
10715 #endif
10716 #ifdef TARGET_NR_fstatfs
10717 case TARGET_NR_fstatfs:
10718 ret = get_errno(fstatfs(arg1, &stfs));
10719 goto convert_statfs;
10720 #endif
10721 #ifdef TARGET_NR_statfs64
10722 case TARGET_NR_statfs64:
10723 if (!(p = lock_user_string(arg1))) {
10724 return -TARGET_EFAULT;
10725 }
10726 ret = get_errno(statfs(path(p), &stfs));
10727 unlock_user(p, arg1, 0);
10728 convert_statfs64:
10729 if (!is_error(ret)) {
10730 struct target_statfs64 *target_stfs;
10731
10732 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10733 return -TARGET_EFAULT;
10734 __put_user(stfs.f_type, &target_stfs->f_type);
10735 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10736 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10737 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10738 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10739 __put_user(stfs.f_files, &target_stfs->f_files);
10740 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10741 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10742 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10743 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10744 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10745 #ifdef _STATFS_F_FLAGS
10746 __put_user(stfs.f_flags, &target_stfs->f_flags);
10747 #else
10748 __put_user(0, &target_stfs->f_flags);
10749 #endif
10750 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10751 unlock_user_struct(target_stfs, arg3, 1);
10752 }
10753 return ret;
10754 case TARGET_NR_fstatfs64:
10755 ret = get_errno(fstatfs(arg1, &stfs));
10756 goto convert_statfs64;
10757 #endif
10758 #ifdef TARGET_NR_socketcall
10759 case TARGET_NR_socketcall:
10760 return do_socketcall(arg1, arg2);
10761 #endif
10762 #ifdef TARGET_NR_accept
10763 case TARGET_NR_accept:
10764 return do_accept4(arg1, arg2, arg3, 0);
10765 #endif
10766 #ifdef TARGET_NR_accept4
10767 case TARGET_NR_accept4:
10768 return do_accept4(arg1, arg2, arg3, arg4);
10769 #endif
10770 #ifdef TARGET_NR_bind
10771 case TARGET_NR_bind:
10772 return do_bind(arg1, arg2, arg3);
10773 #endif
10774 #ifdef TARGET_NR_connect
10775 case TARGET_NR_connect:
10776 return do_connect(arg1, arg2, arg3);
10777 #endif
10778 #ifdef TARGET_NR_getpeername
10779 case TARGET_NR_getpeername:
10780 return do_getpeername(arg1, arg2, arg3);
10781 #endif
10782 #ifdef TARGET_NR_getsockname
10783 case TARGET_NR_getsockname:
10784 return do_getsockname(arg1, arg2, arg3);
10785 #endif
10786 #ifdef TARGET_NR_getsockopt
10787 case TARGET_NR_getsockopt:
10788 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10789 #endif
10790 #ifdef TARGET_NR_listen
10791 case TARGET_NR_listen:
10792 return get_errno(listen(arg1, arg2));
10793 #endif
10794 #ifdef TARGET_NR_recv
10795 case TARGET_NR_recv:
10796 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10797 #endif
10798 #ifdef TARGET_NR_recvfrom
10799 case TARGET_NR_recvfrom:
10800 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10801 #endif
10802 #ifdef TARGET_NR_recvmsg
10803 case TARGET_NR_recvmsg:
10804 return do_sendrecvmsg(arg1, arg2, arg3, 0);
10805 #endif
10806 #ifdef TARGET_NR_send
10807 case TARGET_NR_send:
10808 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10809 #endif
10810 #ifdef TARGET_NR_sendmsg
10811 case TARGET_NR_sendmsg:
10812 return do_sendrecvmsg(arg1, arg2, arg3, 1);
10813 #endif
10814 #ifdef TARGET_NR_sendmmsg
10815 case TARGET_NR_sendmmsg:
10816 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10817 #endif
10818 #ifdef TARGET_NR_recvmmsg
10819 case TARGET_NR_recvmmsg:
10820 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10821 #endif
10822 #ifdef TARGET_NR_sendto
10823 case TARGET_NR_sendto:
10824 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10825 #endif
10826 #ifdef TARGET_NR_shutdown
10827 case TARGET_NR_shutdown:
10828 return get_errno(shutdown(arg1, arg2));
10829 #endif
10830 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10831 case TARGET_NR_getrandom:
10832 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10833 if (!p) {
10834 return -TARGET_EFAULT;
10835 }
10836 ret = get_errno(getrandom(p, arg2, arg3));
10837 unlock_user(p, arg1, ret);
10838 return ret;
10839 #endif
10840 #ifdef TARGET_NR_socket
10841 case TARGET_NR_socket:
10842 return do_socket(arg1, arg2, arg3);
10843 #endif
10844 #ifdef TARGET_NR_socketpair
10845 case TARGET_NR_socketpair:
10846 return do_socketpair(arg1, arg2, arg3, arg4);
10847 #endif
10848 #ifdef TARGET_NR_setsockopt
10849 case TARGET_NR_setsockopt:
10850 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10851 #endif
10852 #if defined(TARGET_NR_syslog)
10853 case TARGET_NR_syslog:
10854 {
10855 int len = arg2;
10856
10857 switch (arg1) {
10858 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
10859 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
10860 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
10861 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
10862 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
10863 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10864 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
10865 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
10866 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10867 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
10868 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
10869 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
10870 {
10871 if (len < 0) {
10872 return -TARGET_EINVAL;
10873 }
10874 if (len == 0) {
10875 return 0;
10876 }
10877 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10878 if (!p) {
10879 return -TARGET_EFAULT;
10880 }
10881 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10882 unlock_user(p, arg2, arg3);
10883 }
10884 return ret;
10885 default:
10886 return -TARGET_EINVAL;
10887 }
10888 }
10889 break;
10890 #endif
10891 case TARGET_NR_setitimer:
10892 {
10893 struct itimerval value, ovalue, *pvalue;
10894
10895 if (arg2) {
10896 pvalue = &value;
10897 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10898 || copy_from_user_timeval(&pvalue->it_value,
10899 arg2 + sizeof(struct target_timeval)))
10900 return -TARGET_EFAULT;
10901 } else {
10902 pvalue = NULL;
10903 }
10904 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10905 if (!is_error(ret) && arg3) {
10906 if (copy_to_user_timeval(arg3,
10907 &ovalue.it_interval)
10908 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10909 &ovalue.it_value))
10910 return -TARGET_EFAULT;
10911 }
10912 }
10913 return ret;
10914 case TARGET_NR_getitimer:
10915 {
10916 struct itimerval value;
10917
10918 ret = get_errno(getitimer(arg1, &value));
10919 if (!is_error(ret) && arg2) {
10920 if (copy_to_user_timeval(arg2,
10921 &value.it_interval)
10922 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10923 &value.it_value))
10924 return -TARGET_EFAULT;
10925 }
10926 }
10927 return ret;
10928 #ifdef TARGET_NR_stat
10929 case TARGET_NR_stat:
10930 if (!(p = lock_user_string(arg1))) {
10931 return -TARGET_EFAULT;
10932 }
10933 ret = get_errno(stat(path(p), &st));
10934 unlock_user(p, arg1, 0);
10935 goto do_stat;
10936 #endif
10937 #ifdef TARGET_NR_lstat
10938 case TARGET_NR_lstat:
10939 if (!(p = lock_user_string(arg1))) {
10940 return -TARGET_EFAULT;
10941 }
10942 ret = get_errno(lstat(path(p), &st));
10943 unlock_user(p, arg1, 0);
10944 goto do_stat;
10945 #endif
10946 #ifdef TARGET_NR_fstat
10947 case TARGET_NR_fstat:
10948 {
10949 ret = get_errno(fstat(arg1, &st));
10950 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10951 do_stat:
10952 #endif
10953 if (!is_error(ret)) {
10954 struct target_stat *target_st;
10955
10956 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10957 return -TARGET_EFAULT;
10958 memset(target_st, 0, sizeof(*target_st));
10959 __put_user(st.st_dev, &target_st->st_dev);
10960 __put_user(st.st_ino, &target_st->st_ino);
10961 __put_user(st.st_mode, &target_st->st_mode);
10962 __put_user(st.st_uid, &target_st->st_uid);
10963 __put_user(st.st_gid, &target_st->st_gid);
10964 __put_user(st.st_nlink, &target_st->st_nlink);
10965 __put_user(st.st_rdev, &target_st->st_rdev);
10966 __put_user(st.st_size, &target_st->st_size);
10967 __put_user(st.st_blksize, &target_st->st_blksize);
10968 __put_user(st.st_blocks, &target_st->st_blocks);
10969 __put_user(st.st_atime, &target_st->target_st_atime);
10970 __put_user(st.st_mtime, &target_st->target_st_mtime);
10971 __put_user(st.st_ctime, &target_st->target_st_ctime);
10972 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10973 __put_user(st.st_atim.tv_nsec,
10974 &target_st->target_st_atime_nsec);
10975 __put_user(st.st_mtim.tv_nsec,
10976 &target_st->target_st_mtime_nsec);
10977 __put_user(st.st_ctim.tv_nsec,
10978 &target_st->target_st_ctime_nsec);
10979 #endif
10980 unlock_user_struct(target_st, arg2, 1);
10981 }
10982 }
10983 return ret;
10984 #endif
10985 case TARGET_NR_vhangup:
10986 return get_errno(vhangup());
10987 #ifdef TARGET_NR_syscall
10988 case TARGET_NR_syscall:
10989 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10990 arg6, arg7, arg8, 0);
10991 #endif
10992 #if defined(TARGET_NR_wait4)
10993 case TARGET_NR_wait4:
10994 {
10995 int status;
10996 abi_long status_ptr = arg2;
10997 struct rusage rusage, *rusage_ptr;
10998 abi_ulong target_rusage = arg4;
10999 abi_long rusage_err;
11000 if (target_rusage)
11001 rusage_ptr = &rusage;
11002 else
11003 rusage_ptr = NULL;
11004 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11005 if (!is_error(ret)) {
11006 if (status_ptr && ret) {
11007 status = host_to_target_waitstatus(status);
11008 if (put_user_s32(status, status_ptr))
11009 return -TARGET_EFAULT;
11010 }
11011 if (target_rusage) {
11012 rusage_err = host_to_target_rusage(target_rusage, &rusage);
11013 if (rusage_err) {
11014 ret = rusage_err;
11015 }
11016 }
11017 }
11018 }
11019 return ret;
11020 #endif
11021 #ifdef TARGET_NR_swapoff
11022 case TARGET_NR_swapoff:
11023 if (!(p = lock_user_string(arg1)))
11024 return -TARGET_EFAULT;
11025 ret = get_errno(swapoff(p));
11026 unlock_user(p, arg1, 0);
11027 return ret;
11028 #endif
11029 case TARGET_NR_sysinfo:
11030 {
11031 struct target_sysinfo *target_value;
11032 struct sysinfo value;
11033 ret = get_errno(sysinfo(&value));
11034 if (!is_error(ret) && arg1)
11035 {
11036 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11037 return -TARGET_EFAULT;
11038 __put_user(value.uptime, &target_value->uptime);
11039 __put_user(value.loads[0], &target_value->loads[0]);
11040 __put_user(value.loads[1], &target_value->loads[1]);
11041 __put_user(value.loads[2], &target_value->loads[2]);
11042 __put_user(value.totalram, &target_value->totalram);
11043 __put_user(value.freeram, &target_value->freeram);
11044 __put_user(value.sharedram, &target_value->sharedram);
11045 __put_user(value.bufferram, &target_value->bufferram);
11046 __put_user(value.totalswap, &target_value->totalswap);
11047 __put_user(value.freeswap, &target_value->freeswap);
11048 __put_user(value.procs, &target_value->procs);
11049 __put_user(value.totalhigh, &target_value->totalhigh);
11050 __put_user(value.freehigh, &target_value->freehigh);
11051 __put_user(value.mem_unit, &target_value->mem_unit);
11052 unlock_user_struct(target_value, arg1, 1);
11053 }
11054 }
11055 return ret;
11056 #ifdef TARGET_NR_ipc
11057 case TARGET_NR_ipc:
11058 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11059 #endif
11060 #ifdef TARGET_NR_semget
11061 case TARGET_NR_semget:
11062 return get_errno(semget(arg1, arg2, arg3));
11063 #endif
11064 #ifdef TARGET_NR_semop
11065 case TARGET_NR_semop:
11066 return do_semtimedop(arg1, arg2, arg3, 0, false);
11067 #endif
11068 #ifdef TARGET_NR_semtimedop
11069 case TARGET_NR_semtimedop:
11070 return do_semtimedop(arg1, arg2, arg3, arg4, false);
11071 #endif
11072 #ifdef TARGET_NR_semtimedop_time64
11073 case TARGET_NR_semtimedop_time64:
11074 return do_semtimedop(arg1, arg2, arg3, arg4, true);
11075 #endif
11076 #ifdef TARGET_NR_semctl
11077 case TARGET_NR_semctl:
11078 return do_semctl(arg1, arg2, arg3, arg4);
11079 #endif
11080 #ifdef TARGET_NR_msgctl
11081 case TARGET_NR_msgctl:
11082 return do_msgctl(arg1, arg2, arg3);
11083 #endif
11084 #ifdef TARGET_NR_msgget
11085 case TARGET_NR_msgget:
11086 return get_errno(msgget(arg1, arg2));
11087 #endif
11088 #ifdef TARGET_NR_msgrcv
11089 case TARGET_NR_msgrcv:
11090 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11091 #endif
11092 #ifdef TARGET_NR_msgsnd
11093 case TARGET_NR_msgsnd:
11094 return do_msgsnd(arg1, arg2, arg3, arg4);
11095 #endif
11096 #ifdef TARGET_NR_shmget
11097 case TARGET_NR_shmget:
11098 return get_errno(shmget(arg1, arg2, arg3));
11099 #endif
11100 #ifdef TARGET_NR_shmctl
11101 case TARGET_NR_shmctl:
11102 return do_shmctl(arg1, arg2, arg3);
11103 #endif
11104 #ifdef TARGET_NR_shmat
11105 case TARGET_NR_shmat:
11106 return do_shmat(cpu_env, arg1, arg2, arg3);
11107 #endif
11108 #ifdef TARGET_NR_shmdt
11109 case TARGET_NR_shmdt:
11110 return do_shmdt(arg1);
11111 #endif
11112 case TARGET_NR_fsync:
11113 return get_errno(fsync(arg1));
11114 case TARGET_NR_clone:
11115 /* Linux manages to have three different orderings for its
11116 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11117 * match the kernel's CONFIG_CLONE_* settings.
11118 * Microblaze is further special in that it uses a sixth
11119 * implicit argument to clone for the TLS pointer.
11120 */
11121 #if defined(TARGET_MICROBLAZE)
11122 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11123 #elif defined(TARGET_CLONE_BACKWARDS)
11124 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11125 #elif defined(TARGET_CLONE_BACKWARDS2)
11126 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11127 #else
11128 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11129 #endif
11130 return ret;
11131 #ifdef __NR_exit_group
11132 /* new thread calls */
11133 case TARGET_NR_exit_group:
11134 preexit_cleanup(cpu_env, arg1);
11135 return get_errno(exit_group(arg1));
11136 #endif
11137 case TARGET_NR_setdomainname:
11138 if (!(p = lock_user_string(arg1)))
11139 return -TARGET_EFAULT;
11140 ret = get_errno(setdomainname(p, arg2));
11141 unlock_user(p, arg1, 0);
11142 return ret;
11143 case TARGET_NR_uname:
11144 /* no need to transcode because we use the linux syscall */
11145 {
11146 struct new_utsname * buf;
11147
11148 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11149 return -TARGET_EFAULT;
11150 ret = get_errno(sys_uname(buf));
11151 if (!is_error(ret)) {
11152 /* Overwrite the native machine name with whatever is being
11153 emulated. */
11154 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11155 sizeof(buf->machine));
11156 /* Allow the user to override the reported release. */
11157 if (qemu_uname_release && *qemu_uname_release) {
11158 g_strlcpy(buf->release, qemu_uname_release,
11159 sizeof(buf->release));
11160 }
11161 }
11162 unlock_user_struct(buf, arg1, 1);
11163 }
11164 return ret;
11165 #ifdef TARGET_I386
11166 case TARGET_NR_modify_ldt:
11167 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11168 #if !defined(TARGET_X86_64)
11169 case TARGET_NR_vm86:
11170 return do_vm86(cpu_env, arg1, arg2);
11171 #endif
11172 #endif
11173 #if defined(TARGET_NR_adjtimex)
11174 case TARGET_NR_adjtimex:
11175 {
11176 struct timex host_buf;
11177
11178 if (target_to_host_timex(&host_buf, arg1) != 0) {
11179 return -TARGET_EFAULT;
11180 }
11181 ret = get_errno(adjtimex(&host_buf));
11182 if (!is_error(ret)) {
11183 if (host_to_target_timex(arg1, &host_buf) != 0) {
11184 return -TARGET_EFAULT;
11185 }
11186 }
11187 }
11188 return ret;
11189 #endif
11190 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11191 case TARGET_NR_clock_adjtime:
11192 {
11193 struct timex htx, *phtx = &htx;
11194
11195 if (target_to_host_timex(phtx, arg2) != 0) {
11196 return -TARGET_EFAULT;
11197 }
11198 ret = get_errno(clock_adjtime(arg1, phtx));
11199 if (!is_error(ret) && phtx) {
11200 if (host_to_target_timex(arg2, phtx) != 0) {
11201 return -TARGET_EFAULT;
11202 }
11203 }
11204 }
11205 return ret;
11206 #endif
11207 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11208 case TARGET_NR_clock_adjtime64:
11209 {
11210 struct timex htx;
11211
11212 if (target_to_host_timex64(&htx, arg2) != 0) {
11213 return -TARGET_EFAULT;
11214 }
11215 ret = get_errno(clock_adjtime(arg1, &htx));
11216 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11217 return -TARGET_EFAULT;
11218 }
11219 }
11220 return ret;
11221 #endif
11222 case TARGET_NR_getpgid:
11223 return get_errno(getpgid(arg1));
11224 case TARGET_NR_fchdir:
11225 return get_errno(fchdir(arg1));
11226 case TARGET_NR_personality:
11227 return get_errno(personality(arg1));
11228 #ifdef TARGET_NR__llseek /* Not on alpha */
11229 case TARGET_NR__llseek:
11230 {
11231 int64_t res;
11232 #if !defined(__NR_llseek)
11233 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11234 if (res == -1) {
11235 ret = get_errno(res);
11236 } else {
11237 ret = 0;
11238 }
11239 #else
11240 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11241 #endif
11242 if ((ret == 0) && put_user_s64(res, arg4)) {
11243 return -TARGET_EFAULT;
11244 }
11245 }
11246 return ret;
11247 #endif
11248 #ifdef TARGET_NR_getdents
11249 case TARGET_NR_getdents:
11250 return do_getdents(arg1, arg2, arg3);
11251 #endif /* TARGET_NR_getdents */
11252 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11253 case TARGET_NR_getdents64:
11254 return do_getdents64(arg1, arg2, arg3);
11255 #endif /* TARGET_NR_getdents64 */
11256 #if defined(TARGET_NR__newselect)
11257 case TARGET_NR__newselect:
11258 return do_select(arg1, arg2, arg3, arg4, arg5);
11259 #endif
11260 #ifdef TARGET_NR_poll
11261 case TARGET_NR_poll:
11262 return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11263 #endif
11264 #ifdef TARGET_NR_ppoll
11265 case TARGET_NR_ppoll:
11266 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11267 #endif
11268 #ifdef TARGET_NR_ppoll_time64
11269 case TARGET_NR_ppoll_time64:
11270 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11271 #endif
11272 case TARGET_NR_flock:
11273 /* NOTE: the flock constant seems to be the same for every
11274 Linux platform */
11275 return get_errno(safe_flock(arg1, arg2));
11276 case TARGET_NR_readv:
11277 {
11278 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11279 if (vec != NULL) {
11280 ret = get_errno(safe_readv(arg1, vec, arg3));
11281 unlock_iovec(vec, arg2, arg3, 1);
11282 } else {
11283 ret = -host_to_target_errno(errno);
11284 }
11285 }
11286 return ret;
11287 case TARGET_NR_writev:
11288 {
11289 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11290 if (vec != NULL) {
11291 ret = get_errno(safe_writev(arg1, vec, arg3));
11292 unlock_iovec(vec, arg2, arg3, 0);
11293 } else {
11294 ret = -host_to_target_errno(errno);
11295 }
11296 }
11297 return ret;
11298 #if defined(TARGET_NR_preadv)
11299 case TARGET_NR_preadv:
11300 {
11301 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11302 if (vec != NULL) {
11303 unsigned long low, high;
11304
11305 target_to_host_low_high(arg4, arg5, &low, &high);
11306 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11307 unlock_iovec(vec, arg2, arg3, 1);
11308 } else {
11309 ret = -host_to_target_errno(errno);
11310 }
11311 }
11312 return ret;
11313 #endif
11314 #if defined(TARGET_NR_pwritev)
11315 case TARGET_NR_pwritev:
11316 {
11317 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11318 if (vec != NULL) {
11319 unsigned long low, high;
11320
11321 target_to_host_low_high(arg4, arg5, &low, &high);
11322 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11323 unlock_iovec(vec, arg2, arg3, 0);
11324 } else {
11325 ret = -host_to_target_errno(errno);
11326 }
11327 }
11328 return ret;
11329 #endif
11330 case TARGET_NR_getsid:
11331 return get_errno(getsid(arg1));
11332 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11333 case TARGET_NR_fdatasync:
11334 return get_errno(fdatasync(arg1));
11335 #endif
11336 case TARGET_NR_sched_getaffinity:
11337 {
11338 unsigned int mask_size;
11339 unsigned long *mask;
11340
11341 /*
11342 * sched_getaffinity needs multiples of ulong, so need to take
11343 * care of mismatches between target ulong and host ulong sizes.
11344 */
11345 if (arg2 & (sizeof(abi_ulong) - 1)) {
11346 return -TARGET_EINVAL;
11347 }
11348 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11349
11350 mask = alloca(mask_size);
11351 memset(mask, 0, mask_size);
11352 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11353
11354 if (!is_error(ret)) {
11355 if (ret > arg2) {
11356 /* More data returned than the caller's buffer will fit.
11357 * This only happens if sizeof(abi_long) < sizeof(long)
11358 * and the caller passed us a buffer holding an odd number
11359 * of abi_longs. If the host kernel is actually using the
11360 * extra 4 bytes then fail EINVAL; otherwise we can just
11361 * ignore them and only copy the interesting part.
11362 */
11363 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11364 if (numcpus > arg2 * 8) {
11365 return -TARGET_EINVAL;
11366 }
11367 ret = arg2;
11368 }
11369
11370 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11371 return -TARGET_EFAULT;
11372 }
11373 }
11374 }
11375 return ret;
11376 case TARGET_NR_sched_setaffinity:
11377 {
11378 unsigned int mask_size;
11379 unsigned long *mask;
11380
11381 /*
11382 * sched_setaffinity needs multiples of ulong, so need to take
11383 * care of mismatches between target ulong and host ulong sizes.
11384 */
11385 if (arg2 & (sizeof(abi_ulong) - 1)) {
11386 return -TARGET_EINVAL;
11387 }
11388 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11389 mask = alloca(mask_size);
11390
11391 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11392 if (ret) {
11393 return ret;
11394 }
11395
11396 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11397 }
11398 case TARGET_NR_getcpu:
11399 {
11400 unsigned cpu, node;
11401 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
11402 arg2 ? &node : NULL,
11403 NULL));
11404 if (is_error(ret)) {
11405 return ret;
11406 }
11407 if (arg1 && put_user_u32(cpu, arg1)) {
11408 return -TARGET_EFAULT;
11409 }
11410 if (arg2 && put_user_u32(node, arg2)) {
11411 return -TARGET_EFAULT;
11412 }
11413 }
11414 return ret;
11415 case TARGET_NR_sched_setparam:
11416 {
11417 struct target_sched_param *target_schp;
11418 struct sched_param schp;
11419
11420 if (arg2 == 0) {
11421 return -TARGET_EINVAL;
11422 }
11423 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11424 return -TARGET_EFAULT;
11425 }
11426 schp.sched_priority = tswap32(target_schp->sched_priority);
11427 unlock_user_struct(target_schp, arg2, 0);
11428 return get_errno(sys_sched_setparam(arg1, &schp));
11429 }
11430 case TARGET_NR_sched_getparam:
11431 {
11432 struct target_sched_param *target_schp;
11433 struct sched_param schp;
11434
11435 if (arg2 == 0) {
11436 return -TARGET_EINVAL;
11437 }
11438 ret = get_errno(sys_sched_getparam(arg1, &schp));
11439 if (!is_error(ret)) {
11440 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11441 return -TARGET_EFAULT;
11442 }
11443 target_schp->sched_priority = tswap32(schp.sched_priority);
11444 unlock_user_struct(target_schp, arg2, 1);
11445 }
11446 }
11447 return ret;
11448 case TARGET_NR_sched_setscheduler:
11449 {
11450 struct target_sched_param *target_schp;
11451 struct sched_param schp;
11452 if (arg3 == 0) {
11453 return -TARGET_EINVAL;
11454 }
11455 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11456 return -TARGET_EFAULT;
11457 }
11458 schp.sched_priority = tswap32(target_schp->sched_priority);
11459 unlock_user_struct(target_schp, arg3, 0);
11460 return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11461 }
11462 case TARGET_NR_sched_getscheduler:
11463 return get_errno(sys_sched_getscheduler(arg1));
11464 case TARGET_NR_sched_getattr:
11465 {
11466 struct target_sched_attr *target_scha;
11467 struct sched_attr scha;
11468 if (arg2 == 0) {
11469 return -TARGET_EINVAL;
11470 }
11471 if (arg3 > sizeof(scha)) {
11472 arg3 = sizeof(scha);
11473 }
11474 ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11475 if (!is_error(ret)) {
11476 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11477 if (!target_scha) {
11478 return -TARGET_EFAULT;
11479 }
11480 target_scha->size = tswap32(scha.size);
11481 target_scha->sched_policy = tswap32(scha.sched_policy);
11482 target_scha->sched_flags = tswap64(scha.sched_flags);
11483 target_scha->sched_nice = tswap32(scha.sched_nice);
11484 target_scha->sched_priority = tswap32(scha.sched_priority);
11485 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11486 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11487 target_scha->sched_period = tswap64(scha.sched_period);
11488 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11489 target_scha->sched_util_min = tswap32(scha.sched_util_min);
11490 target_scha->sched_util_max = tswap32(scha.sched_util_max);
11491 }
11492 unlock_user(target_scha, arg2, arg3);
11493 }
11494 return ret;
11495 }
11496 case TARGET_NR_sched_setattr:
11497 {
11498 struct target_sched_attr *target_scha;
11499 struct sched_attr scha;
11500 uint32_t size;
11501 int zeroed;
11502 if (arg2 == 0) {
11503 return -TARGET_EINVAL;
11504 }
11505 if (get_user_u32(size, arg2)) {
11506 return -TARGET_EFAULT;
11507 }
11508 if (!size) {
11509 size = offsetof(struct target_sched_attr, sched_util_min);
11510 }
11511 if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11512 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11513 return -TARGET_EFAULT;
11514 }
11515 return -TARGET_E2BIG;
11516 }
11517
11518 zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11519 if (zeroed < 0) {
11520 return zeroed;
11521 } else if (zeroed == 0) {
11522 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11523 return -TARGET_EFAULT;
11524 }
11525 return -TARGET_E2BIG;
11526 }
11527 if (size > sizeof(struct target_sched_attr)) {
11528 size = sizeof(struct target_sched_attr);
11529 }
11530
11531 target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11532 if (!target_scha) {
11533 return -TARGET_EFAULT;
11534 }
11535 scha.size = size;
11536 scha.sched_policy = tswap32(target_scha->sched_policy);
11537 scha.sched_flags = tswap64(target_scha->sched_flags);
11538 scha.sched_nice = tswap32(target_scha->sched_nice);
11539 scha.sched_priority = tswap32(target_scha->sched_priority);
11540 scha.sched_runtime = tswap64(target_scha->sched_runtime);
11541 scha.sched_deadline = tswap64(target_scha->sched_deadline);
11542 scha.sched_period = tswap64(target_scha->sched_period);
11543 if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11544 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11545 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11546 }
11547 unlock_user(target_scha, arg2, 0);
11548 return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11549 }
11550 case TARGET_NR_sched_yield:
11551 return get_errno(sched_yield());
11552 case TARGET_NR_sched_get_priority_max:
11553 return get_errno(sched_get_priority_max(arg1));
11554 case TARGET_NR_sched_get_priority_min:
11555 return get_errno(sched_get_priority_min(arg1));
11556 #ifdef TARGET_NR_sched_rr_get_interval
11557 case TARGET_NR_sched_rr_get_interval:
11558 {
11559 struct timespec ts;
11560 ret = get_errno(sched_rr_get_interval(arg1, &ts));
11561 if (!is_error(ret)) {
11562 ret = host_to_target_timespec(arg2, &ts);
11563 }
11564 }
11565 return ret;
11566 #endif
11567 #ifdef TARGET_NR_sched_rr_get_interval_time64
11568 case TARGET_NR_sched_rr_get_interval_time64:
11569 {
11570 struct timespec ts;
11571 ret = get_errno(sched_rr_get_interval(arg1, &ts));
11572 if (!is_error(ret)) {
11573 ret = host_to_target_timespec64(arg2, &ts);
11574 }
11575 }
11576 return ret;
11577 #endif
11578 #if defined(TARGET_NR_nanosleep)
11579 case TARGET_NR_nanosleep:
11580 {
11581 struct timespec req, rem;
11582 target_to_host_timespec(&req, arg1);
11583 ret = get_errno(safe_nanosleep(&req, &rem));
11584 if (is_error(ret) && arg2) {
11585 host_to_target_timespec(arg2, &rem);
11586 }
11587 }
11588 return ret;
11589 #endif
11590 case TARGET_NR_prctl:
11591 return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11592 break;
11593 #ifdef TARGET_NR_arch_prctl
11594 case TARGET_NR_arch_prctl:
11595 return do_arch_prctl(cpu_env, arg1, arg2);
11596 #endif
11597 #ifdef TARGET_NR_pread64
11598 case TARGET_NR_pread64:
11599 if (regpairs_aligned(cpu_env, num)) {
11600 arg4 = arg5;
11601 arg5 = arg6;
11602 }
11603 if (arg2 == 0 && arg3 == 0) {
11604 /* Special-case NULL buffer and zero length, which should succeed */
11605 p = 0;
11606 } else {
11607 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11608 if (!p) {
11609 return -TARGET_EFAULT;
11610 }
11611 }
11612 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11613 unlock_user(p, arg2, ret);
11614 return ret;
11615 case TARGET_NR_pwrite64:
11616 if (regpairs_aligned(cpu_env, num)) {
11617 arg4 = arg5;
11618 arg5 = arg6;
11619 }
11620 if (arg2 == 0 && arg3 == 0) {
11621 /* Special-case NULL buffer and zero length, which should succeed */
11622 p = 0;
11623 } else {
11624 p = lock_user(VERIFY_READ, arg2, arg3, 1);
11625 if (!p) {
11626 return -TARGET_EFAULT;
11627 }
11628 }
11629 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11630 unlock_user(p, arg2, 0);
11631 return ret;
11632 #endif
11633 case TARGET_NR_getcwd:
11634 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11635 return -TARGET_EFAULT;
11636 ret = get_errno(sys_getcwd1(p, arg2));
11637 unlock_user(p, arg1, ret);
11638 return ret;
11639 case TARGET_NR_capget:
11640 case TARGET_NR_capset:
11641 {
11642 struct target_user_cap_header *target_header;
11643 struct target_user_cap_data *target_data = NULL;
11644 struct __user_cap_header_struct header;
11645 struct __user_cap_data_struct data[2];
11646 struct __user_cap_data_struct *dataptr = NULL;
11647 int i, target_datalen;
11648 int data_items = 1;
11649
11650 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11651 return -TARGET_EFAULT;
11652 }
11653 header.version = tswap32(target_header->version);
11654 header.pid = tswap32(target_header->pid);
11655
11656 if (header.version != _LINUX_CAPABILITY_VERSION) {
11657 /* Version 2 and up takes pointer to two user_data structs */
11658 data_items = 2;
11659 }
11660
11661 target_datalen = sizeof(*target_data) * data_items;
11662
11663 if (arg2) {
11664 if (num == TARGET_NR_capget) {
11665 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11666 } else {
11667 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11668 }
11669 if (!target_data) {
11670 unlock_user_struct(target_header, arg1, 0);
11671 return -TARGET_EFAULT;
11672 }
11673
11674 if (num == TARGET_NR_capset) {
11675 for (i = 0; i < data_items; i++) {
11676 data[i].effective = tswap32(target_data[i].effective);
11677 data[i].permitted = tswap32(target_data[i].permitted);
11678 data[i].inheritable = tswap32(target_data[i].inheritable);
11679 }
11680 }
11681
11682 dataptr = data;
11683 }
11684
11685 if (num == TARGET_NR_capget) {
11686 ret = get_errno(capget(&header, dataptr));
11687 } else {
11688 ret = get_errno(capset(&header, dataptr));
11689 }
11690
11691 /* The kernel always updates version for both capget and capset */
11692 target_header->version = tswap32(header.version);
11693 unlock_user_struct(target_header, arg1, 1);
11694
11695 if (arg2) {
11696 if (num == TARGET_NR_capget) {
11697 for (i = 0; i < data_items; i++) {
11698 target_data[i].effective = tswap32(data[i].effective);
11699 target_data[i].permitted = tswap32(data[i].permitted);
11700 target_data[i].inheritable = tswap32(data[i].inheritable);
11701 }
11702 unlock_user(target_data, arg2, target_datalen);
11703 } else {
11704 unlock_user(target_data, arg2, 0);
11705 }
11706 }
11707 return ret;
11708 }
11709 case TARGET_NR_sigaltstack:
11710 return do_sigaltstack(arg1, arg2, cpu_env);
11711
11712 #ifdef CONFIG_SENDFILE
11713 #ifdef TARGET_NR_sendfile
11714 case TARGET_NR_sendfile:
11715 {
11716 off_t *offp = NULL;
11717 off_t off;
11718 if (arg3) {
11719 ret = get_user_sal(off, arg3);
11720 if (is_error(ret)) {
11721 return ret;
11722 }
11723 offp = &off;
11724 }
11725 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11726 if (!is_error(ret) && arg3) {
11727 abi_long ret2 = put_user_sal(off, arg3);
11728 if (is_error(ret2)) {
11729 ret = ret2;
11730 }
11731 }
11732 return ret;
11733 }
11734 #endif
11735 #ifdef TARGET_NR_sendfile64
11736 case TARGET_NR_sendfile64:
11737 {
11738 off_t *offp = NULL;
11739 off_t off;
11740 if (arg3) {
11741 ret = get_user_s64(off, arg3);
11742 if (is_error(ret)) {
11743 return ret;
11744 }
11745 offp = &off;
11746 }
11747 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11748 if (!is_error(ret) && arg3) {
11749 abi_long ret2 = put_user_s64(off, arg3);
11750 if (is_error(ret2)) {
11751 ret = ret2;
11752 }
11753 }
11754 return ret;
11755 }
11756 #endif
11757 #endif
11758 #ifdef TARGET_NR_vfork
11759 case TARGET_NR_vfork:
11760 return get_errno(do_fork(cpu_env,
11761 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11762 0, 0, 0, 0));
11763 #endif
11764 #ifdef TARGET_NR_ugetrlimit
11765 case TARGET_NR_ugetrlimit:
11766 {
11767 struct rlimit rlim;
11768 int resource = target_to_host_resource(arg1);
11769 ret = get_errno(getrlimit(resource, &rlim));
11770 if (!is_error(ret)) {
11771 struct target_rlimit *target_rlim;
11772 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11773 return -TARGET_EFAULT;
11774 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11775 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11776 unlock_user_struct(target_rlim, arg2, 1);
11777 }
11778 return ret;
11779 }
11780 #endif
11781 #ifdef TARGET_NR_truncate64
11782 case TARGET_NR_truncate64:
11783 if (!(p = lock_user_string(arg1)))
11784 return -TARGET_EFAULT;
11785 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11786 unlock_user(p, arg1, 0);
11787 return ret;
11788 #endif
11789 #ifdef TARGET_NR_ftruncate64
11790 case TARGET_NR_ftruncate64:
11791 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11792 #endif
11793 #ifdef TARGET_NR_stat64
11794 case TARGET_NR_stat64:
11795 if (!(p = lock_user_string(arg1))) {
11796 return -TARGET_EFAULT;
11797 }
11798 ret = get_errno(stat(path(p), &st));
11799 unlock_user(p, arg1, 0);
11800 if (!is_error(ret))
11801 ret = host_to_target_stat64(cpu_env, arg2, &st);
11802 return ret;
11803 #endif
11804 #ifdef TARGET_NR_lstat64
11805 case TARGET_NR_lstat64:
11806 if (!(p = lock_user_string(arg1))) {
11807 return -TARGET_EFAULT;
11808 }
11809 ret = get_errno(lstat(path(p), &st));
11810 unlock_user(p, arg1, 0);
11811 if (!is_error(ret))
11812 ret = host_to_target_stat64(cpu_env, arg2, &st);
11813 return ret;
11814 #endif
11815 #ifdef TARGET_NR_fstat64
11816 case TARGET_NR_fstat64:
11817 ret = get_errno(fstat(arg1, &st));
11818 if (!is_error(ret))
11819 ret = host_to_target_stat64(cpu_env, arg2, &st);
11820 return ret;
11821 #endif
11822 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11823 #ifdef TARGET_NR_fstatat64
11824 case TARGET_NR_fstatat64:
11825 #endif
11826 #ifdef TARGET_NR_newfstatat
11827 case TARGET_NR_newfstatat:
11828 #endif
11829 if (!(p = lock_user_string(arg2))) {
11830 return -TARGET_EFAULT;
11831 }
11832 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11833 unlock_user(p, arg2, 0);
11834 if (!is_error(ret))
11835 ret = host_to_target_stat64(cpu_env, arg3, &st);
11836 return ret;
11837 #endif
11838 #if defined(TARGET_NR_statx)
11839 case TARGET_NR_statx:
11840 {
11841 struct target_statx *target_stx;
11842 int dirfd = arg1;
11843 int flags = arg3;
11844
11845 p = lock_user_string(arg2);
11846 if (p == NULL) {
11847 return -TARGET_EFAULT;
11848 }
11849 #if defined(__NR_statx)
11850 {
11851 /*
11852 * It is assumed that struct statx is architecture independent.
11853 */
11854 struct target_statx host_stx;
11855 int mask = arg4;
11856
11857 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11858 if (!is_error(ret)) {
11859 if (host_to_target_statx(&host_stx, arg5) != 0) {
11860 unlock_user(p, arg2, 0);
11861 return -TARGET_EFAULT;
11862 }
11863 }
11864
11865 if (ret != -TARGET_ENOSYS) {
11866 unlock_user(p, arg2, 0);
11867 return ret;
11868 }
11869 }
11870 #endif
11871 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11872 unlock_user(p, arg2, 0);
11873
11874 if (!is_error(ret)) {
11875 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11876 return -TARGET_EFAULT;
11877 }
11878 memset(target_stx, 0, sizeof(*target_stx));
11879 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11880 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11881 __put_user(st.st_ino, &target_stx->stx_ino);
11882 __put_user(st.st_mode, &target_stx->stx_mode);
11883 __put_user(st.st_uid, &target_stx->stx_uid);
11884 __put_user(st.st_gid, &target_stx->stx_gid);
11885 __put_user(st.st_nlink, &target_stx->stx_nlink);
11886 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11887 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11888 __put_user(st.st_size, &target_stx->stx_size);
11889 __put_user(st.st_blksize, &target_stx->stx_blksize);
11890 __put_user(st.st_blocks, &target_stx->stx_blocks);
11891 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11892 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11893 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11894 unlock_user_struct(target_stx, arg5, 1);
11895 }
11896 }
11897 return ret;
11898 #endif
11899 #ifdef TARGET_NR_lchown
11900 case TARGET_NR_lchown:
11901 if (!(p = lock_user_string(arg1)))
11902 return -TARGET_EFAULT;
11903 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11904 unlock_user(p, arg1, 0);
11905 return ret;
11906 #endif
11907 #ifdef TARGET_NR_getuid
11908 case TARGET_NR_getuid:
11909 return get_errno(high2lowuid(getuid()));
11910 #endif
11911 #ifdef TARGET_NR_getgid
11912 case TARGET_NR_getgid:
11913 return get_errno(high2lowgid(getgid()));
11914 #endif
11915 #ifdef TARGET_NR_geteuid
11916 case TARGET_NR_geteuid:
11917 return get_errno(high2lowuid(geteuid()));
11918 #endif
11919 #ifdef TARGET_NR_getegid
11920 case TARGET_NR_getegid:
11921 return get_errno(high2lowgid(getegid()));
11922 #endif
11923 case TARGET_NR_setreuid:
11924 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11925 case TARGET_NR_setregid:
11926 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11927 case TARGET_NR_getgroups:
11928 { /* the same code as for TARGET_NR_getgroups32 */
11929 int gidsetsize = arg1;
11930 target_id *target_grouplist;
11931 g_autofree gid_t *grouplist = NULL;
11932 int i;
11933
11934 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11935 return -TARGET_EINVAL;
11936 }
11937 if (gidsetsize > 0) {
11938 grouplist = g_try_new(gid_t, gidsetsize);
11939 if (!grouplist) {
11940 return -TARGET_ENOMEM;
11941 }
11942 }
11943 ret = get_errno(getgroups(gidsetsize, grouplist));
11944 if (!is_error(ret) && gidsetsize > 0) {
11945 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11946 gidsetsize * sizeof(target_id), 0);
11947 if (!target_grouplist) {
11948 return -TARGET_EFAULT;
11949 }
11950 for (i = 0; i < ret; i++) {
11951 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11952 }
11953 unlock_user(target_grouplist, arg2,
11954 gidsetsize * sizeof(target_id));
11955 }
11956 return ret;
11957 }
11958 case TARGET_NR_setgroups:
11959 { /* the same code as for TARGET_NR_setgroups32 */
11960 int gidsetsize = arg1;
11961 target_id *target_grouplist;
11962 g_autofree gid_t *grouplist = NULL;
11963 int i;
11964
11965 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11966 return -TARGET_EINVAL;
11967 }
11968 if (gidsetsize > 0) {
11969 grouplist = g_try_new(gid_t, gidsetsize);
11970 if (!grouplist) {
11971 return -TARGET_ENOMEM;
11972 }
11973 target_grouplist = lock_user(VERIFY_READ, arg2,
11974 gidsetsize * sizeof(target_id), 1);
11975 if (!target_grouplist) {
11976 return -TARGET_EFAULT;
11977 }
11978 for (i = 0; i < gidsetsize; i++) {
11979 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11980 }
11981 unlock_user(target_grouplist, arg2,
11982 gidsetsize * sizeof(target_id));
11983 }
11984 return get_errno(setgroups(gidsetsize, grouplist));
11985 }
11986 case TARGET_NR_fchown:
11987 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11988 #if defined(TARGET_NR_fchownat)
11989 case TARGET_NR_fchownat:
11990 if (!(p = lock_user_string(arg2)))
11991 return -TARGET_EFAULT;
11992 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11993 low2highgid(arg4), arg5));
11994 unlock_user(p, arg2, 0);
11995 return ret;
11996 #endif
11997 #ifdef TARGET_NR_setresuid
11998 case TARGET_NR_setresuid:
11999 return get_errno(sys_setresuid(low2highuid(arg1),
12000 low2highuid(arg2),
12001 low2highuid(arg3)));
12002 #endif
12003 #ifdef TARGET_NR_getresuid
12004 case TARGET_NR_getresuid:
12005 {
12006 uid_t ruid, euid, suid;
12007 ret = get_errno(getresuid(&ruid, &euid, &suid));
12008 if (!is_error(ret)) {
12009 if (put_user_id(high2lowuid(ruid), arg1)
12010 || put_user_id(high2lowuid(euid), arg2)
12011 || put_user_id(high2lowuid(suid), arg3))
12012 return -TARGET_EFAULT;
12013 }
12014 }
12015 return ret;
12016 #endif
12017 #ifdef TARGET_NR_getresgid
12018 case TARGET_NR_setresgid:
12019 return get_errno(sys_setresgid(low2highgid(arg1),
12020 low2highgid(arg2),
12021 low2highgid(arg3)));
12022 #endif
12023 #ifdef TARGET_NR_getresgid
12024 case TARGET_NR_getresgid:
12025 {
12026 gid_t rgid, egid, sgid;
12027 ret = get_errno(getresgid(&rgid, &egid, &sgid));
12028 if (!is_error(ret)) {
12029 if (put_user_id(high2lowgid(rgid), arg1)
12030 || put_user_id(high2lowgid(egid), arg2)
12031 || put_user_id(high2lowgid(sgid), arg3))
12032 return -TARGET_EFAULT;
12033 }
12034 }
12035 return ret;
12036 #endif
12037 #ifdef TARGET_NR_chown
12038 case TARGET_NR_chown:
12039 if (!(p = lock_user_string(arg1)))
12040 return -TARGET_EFAULT;
12041 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12042 unlock_user(p, arg1, 0);
12043 return ret;
12044 #endif
12045 case TARGET_NR_setuid:
12046 return get_errno(sys_setuid(low2highuid(arg1)));
12047 case TARGET_NR_setgid:
12048 return get_errno(sys_setgid(low2highgid(arg1)));
12049 case TARGET_NR_setfsuid:
12050 return get_errno(setfsuid(arg1));
12051 case TARGET_NR_setfsgid:
12052 return get_errno(setfsgid(arg1));
12053
12054 #ifdef TARGET_NR_lchown32
12055 case TARGET_NR_lchown32:
12056 if (!(p = lock_user_string(arg1)))
12057 return -TARGET_EFAULT;
12058 ret = get_errno(lchown(p, arg2, arg3));
12059 unlock_user(p, arg1, 0);
12060 return ret;
12061 #endif
12062 #ifdef TARGET_NR_getuid32
12063 case TARGET_NR_getuid32:
12064 return get_errno(getuid());
12065 #endif
12066
12067 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12068 /* Alpha specific */
12069 case TARGET_NR_getxuid:
12070 {
12071 uid_t euid;
12072 euid=geteuid();
12073 cpu_env->ir[IR_A4]=euid;
12074 }
12075 return get_errno(getuid());
12076 #endif
12077 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12078 /* Alpha specific */
12079 case TARGET_NR_getxgid:
12080 {
12081 uid_t egid;
12082 egid=getegid();
12083 cpu_env->ir[IR_A4]=egid;
12084 }
12085 return get_errno(getgid());
12086 #endif
12087 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12088 /* Alpha specific */
12089 case TARGET_NR_osf_getsysinfo:
12090 ret = -TARGET_EOPNOTSUPP;
12091 switch (arg1) {
12092 case TARGET_GSI_IEEE_FP_CONTROL:
12093 {
12094 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12095 uint64_t swcr = cpu_env->swcr;
12096
12097 swcr &= ~SWCR_STATUS_MASK;
12098 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12099
12100 if (put_user_u64 (swcr, arg2))
12101 return -TARGET_EFAULT;
12102 ret = 0;
12103 }
12104 break;
12105
12106 /* case GSI_IEEE_STATE_AT_SIGNAL:
12107 -- Not implemented in linux kernel.
12108 case GSI_UACPROC:
12109 -- Retrieves current unaligned access state; not much used.
12110 case GSI_PROC_TYPE:
12111 -- Retrieves implver information; surely not used.
12112 case GSI_GET_HWRPB:
12113 -- Grabs a copy of the HWRPB; surely not used.
12114 */
12115 }
12116 return ret;
12117 #endif
12118 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12119 /* Alpha specific */
12120 case TARGET_NR_osf_setsysinfo:
12121 ret = -TARGET_EOPNOTSUPP;
12122 switch (arg1) {
12123 case TARGET_SSI_IEEE_FP_CONTROL:
12124 {
12125 uint64_t swcr, fpcr;
12126
12127 if (get_user_u64 (swcr, arg2)) {
12128 return -TARGET_EFAULT;
12129 }
12130
12131 /*
12132 * The kernel calls swcr_update_status to update the
12133 * status bits from the fpcr at every point that it
12134 * could be queried. Therefore, we store the status
12135 * bits only in FPCR.
12136 */
12137 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12138
12139 fpcr = cpu_alpha_load_fpcr(cpu_env);
12140 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12141 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12142 cpu_alpha_store_fpcr(cpu_env, fpcr);
12143 ret = 0;
12144 }
12145 break;
12146
12147 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12148 {
12149 uint64_t exc, fpcr, fex;
12150
12151 if (get_user_u64(exc, arg2)) {
12152 return -TARGET_EFAULT;
12153 }
12154 exc &= SWCR_STATUS_MASK;
12155 fpcr = cpu_alpha_load_fpcr(cpu_env);
12156
12157 /* Old exceptions are not signaled. */
12158 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12159 fex = exc & ~fex;
12160 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12161 fex &= (cpu_env)->swcr;
12162
12163 /* Update the hardware fpcr. */
12164 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12165 cpu_alpha_store_fpcr(cpu_env, fpcr);
12166
12167 if (fex) {
12168 int si_code = TARGET_FPE_FLTUNK;
12169 target_siginfo_t info;
12170
12171 if (fex & SWCR_TRAP_ENABLE_DNO) {
12172 si_code = TARGET_FPE_FLTUND;
12173 }
12174 if (fex & SWCR_TRAP_ENABLE_INE) {
12175 si_code = TARGET_FPE_FLTRES;
12176 }
12177 if (fex & SWCR_TRAP_ENABLE_UNF) {
12178 si_code = TARGET_FPE_FLTUND;
12179 }
12180 if (fex & SWCR_TRAP_ENABLE_OVF) {
12181 si_code = TARGET_FPE_FLTOVF;
12182 }
12183 if (fex & SWCR_TRAP_ENABLE_DZE) {
12184 si_code = TARGET_FPE_FLTDIV;
12185 }
12186 if (fex & SWCR_TRAP_ENABLE_INV) {
12187 si_code = TARGET_FPE_FLTINV;
12188 }
12189
12190 info.si_signo = SIGFPE;
12191 info.si_errno = 0;
12192 info.si_code = si_code;
12193 info._sifields._sigfault._addr = (cpu_env)->pc;
12194 queue_signal(cpu_env, info.si_signo,
12195 QEMU_SI_FAULT, &info);
12196 }
12197 ret = 0;
12198 }
12199 break;
12200
12201 /* case SSI_NVPAIRS:
12202 -- Used with SSIN_UACPROC to enable unaligned accesses.
12203 case SSI_IEEE_STATE_AT_SIGNAL:
12204 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12205 -- Not implemented in linux kernel
12206 */
12207 }
12208 return ret;
12209 #endif
12210 #ifdef TARGET_NR_osf_sigprocmask
12211 /* Alpha specific. */
12212 case TARGET_NR_osf_sigprocmask:
12213 {
12214 abi_ulong mask;
12215 int how;
12216 sigset_t set, oldset;
12217
12218 switch(arg1) {
12219 case TARGET_SIG_BLOCK:
12220 how = SIG_BLOCK;
12221 break;
12222 case TARGET_SIG_UNBLOCK:
12223 how = SIG_UNBLOCK;
12224 break;
12225 case TARGET_SIG_SETMASK:
12226 how = SIG_SETMASK;
12227 break;
12228 default:
12229 return -TARGET_EINVAL;
12230 }
12231 mask = arg2;
12232 target_to_host_old_sigset(&set, &mask);
12233 ret = do_sigprocmask(how, &set, &oldset);
12234 if (!ret) {
12235 host_to_target_old_sigset(&mask, &oldset);
12236 ret = mask;
12237 }
12238 }
12239 return ret;
12240 #endif
12241
12242 #ifdef TARGET_NR_getgid32
12243 case TARGET_NR_getgid32:
12244 return get_errno(getgid());
12245 #endif
12246 #ifdef TARGET_NR_geteuid32
12247 case TARGET_NR_geteuid32:
12248 return get_errno(geteuid());
12249 #endif
12250 #ifdef TARGET_NR_getegid32
12251 case TARGET_NR_getegid32:
12252 return get_errno(getegid());
12253 #endif
12254 #ifdef TARGET_NR_setreuid32
12255 case TARGET_NR_setreuid32:
12256 return get_errno(setreuid(arg1, arg2));
12257 #endif
12258 #ifdef TARGET_NR_setregid32
12259 case TARGET_NR_setregid32:
12260 return get_errno(setregid(arg1, arg2));
12261 #endif
12262 #ifdef TARGET_NR_getgroups32
12263 case TARGET_NR_getgroups32:
12264 { /* the same code as for TARGET_NR_getgroups */
12265 int gidsetsize = arg1;
12266 uint32_t *target_grouplist;
12267 g_autofree gid_t *grouplist = NULL;
12268 int i;
12269
12270 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12271 return -TARGET_EINVAL;
12272 }
12273 if (gidsetsize > 0) {
12274 grouplist = g_try_new(gid_t, gidsetsize);
12275 if (!grouplist) {
12276 return -TARGET_ENOMEM;
12277 }
12278 }
12279 ret = get_errno(getgroups(gidsetsize, grouplist));
12280 if (!is_error(ret) && gidsetsize > 0) {
12281 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12282 gidsetsize * 4, 0);
12283 if (!target_grouplist) {
12284 return -TARGET_EFAULT;
12285 }
12286 for (i = 0; i < ret; i++) {
12287 target_grouplist[i] = tswap32(grouplist[i]);
12288 }
12289 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12290 }
12291 return ret;
12292 }
12293 #endif
12294 #ifdef TARGET_NR_setgroups32
12295 case TARGET_NR_setgroups32:
12296 { /* the same code as for TARGET_NR_setgroups */
12297 int gidsetsize = arg1;
12298 uint32_t *target_grouplist;
12299 g_autofree gid_t *grouplist = NULL;
12300 int i;
12301
12302 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12303 return -TARGET_EINVAL;
12304 }
12305 if (gidsetsize > 0) {
12306 grouplist = g_try_new(gid_t, gidsetsize);
12307 if (!grouplist) {
12308 return -TARGET_ENOMEM;
12309 }
12310 target_grouplist = lock_user(VERIFY_READ, arg2,
12311 gidsetsize * 4, 1);
12312 if (!target_grouplist) {
12313 return -TARGET_EFAULT;
12314 }
12315 for (i = 0; i < gidsetsize; i++) {
12316 grouplist[i] = tswap32(target_grouplist[i]);
12317 }
12318 unlock_user(target_grouplist, arg2, 0);
12319 }
12320 return get_errno(setgroups(gidsetsize, grouplist));
12321 }
12322 #endif
12323 #ifdef TARGET_NR_fchown32
12324 case TARGET_NR_fchown32:
12325 return get_errno(fchown(arg1, arg2, arg3));
12326 #endif
12327 #ifdef TARGET_NR_setresuid32
12328 case TARGET_NR_setresuid32:
12329 return get_errno(sys_setresuid(arg1, arg2, arg3));
12330 #endif
12331 #ifdef TARGET_NR_getresuid32
12332 case TARGET_NR_getresuid32:
12333 {
12334 uid_t ruid, euid, suid;
12335 ret = get_errno(getresuid(&ruid, &euid, &suid));
12336 if (!is_error(ret)) {
12337 if (put_user_u32(ruid, arg1)
12338 || put_user_u32(euid, arg2)
12339 || put_user_u32(suid, arg3))
12340 return -TARGET_EFAULT;
12341 }
12342 }
12343 return ret;
12344 #endif
12345 #ifdef TARGET_NR_setresgid32
12346 case TARGET_NR_setresgid32:
12347 return get_errno(sys_setresgid(arg1, arg2, arg3));
12348 #endif
12349 #ifdef TARGET_NR_getresgid32
12350 case TARGET_NR_getresgid32:
12351 {
12352 gid_t rgid, egid, sgid;
12353 ret = get_errno(getresgid(&rgid, &egid, &sgid));
12354 if (!is_error(ret)) {
12355 if (put_user_u32(rgid, arg1)
12356 || put_user_u32(egid, arg2)
12357 || put_user_u32(sgid, arg3))
12358 return -TARGET_EFAULT;
12359 }
12360 }
12361 return ret;
12362 #endif
12363 #ifdef TARGET_NR_chown32
12364 case TARGET_NR_chown32:
12365 if (!(p = lock_user_string(arg1)))
12366 return -TARGET_EFAULT;
12367 ret = get_errno(chown(p, arg2, arg3));
12368 unlock_user(p, arg1, 0);
12369 return ret;
12370 #endif
12371 #ifdef TARGET_NR_setuid32
12372 case TARGET_NR_setuid32:
12373 return get_errno(sys_setuid(arg1));
12374 #endif
12375 #ifdef TARGET_NR_setgid32
12376 case TARGET_NR_setgid32:
12377 return get_errno(sys_setgid(arg1));
12378 #endif
12379 #ifdef TARGET_NR_setfsuid32
12380 case TARGET_NR_setfsuid32:
12381 return get_errno(setfsuid(arg1));
12382 #endif
12383 #ifdef TARGET_NR_setfsgid32
12384 case TARGET_NR_setfsgid32:
12385 return get_errno(setfsgid(arg1));
12386 #endif
12387 #ifdef TARGET_NR_mincore
12388 case TARGET_NR_mincore:
12389 {
12390 void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12391 if (!a) {
12392 return -TARGET_ENOMEM;
12393 }
12394 p = lock_user_string(arg3);
12395 if (!p) {
12396 ret = -TARGET_EFAULT;
12397 } else {
12398 ret = get_errno(mincore(a, arg2, p));
12399 unlock_user(p, arg3, ret);
12400 }
12401 unlock_user(a, arg1, 0);
12402 }
12403 return ret;
12404 #endif
12405 #ifdef TARGET_NR_arm_fadvise64_64
12406 case TARGET_NR_arm_fadvise64_64:
12407 /* arm_fadvise64_64 looks like fadvise64_64 but
12408 * with different argument order: fd, advice, offset, len
12409 * rather than the usual fd, offset, len, advice.
12410 * Note that offset and len are both 64-bit so appear as
12411 * pairs of 32-bit registers.
12412 */
12413 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12414 target_offset64(arg5, arg6), arg2);
12415 return -host_to_target_errno(ret);
12416 #endif
12417
12418 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12419
12420 #ifdef TARGET_NR_fadvise64_64
12421 case TARGET_NR_fadvise64_64:
12422 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12423 /* 6 args: fd, advice, offset (high, low), len (high, low) */
12424 ret = arg2;
12425 arg2 = arg3;
12426 arg3 = arg4;
12427 arg4 = arg5;
12428 arg5 = arg6;
12429 arg6 = ret;
12430 #else
12431 /* 6 args: fd, offset (high, low), len (high, low), advice */
12432 if (regpairs_aligned(cpu_env, num)) {
12433 /* offset is in (3,4), len in (5,6) and advice in 7 */
12434 arg2 = arg3;
12435 arg3 = arg4;
12436 arg4 = arg5;
12437 arg5 = arg6;
12438 arg6 = arg7;
12439 }
12440 #endif
12441 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12442 target_offset64(arg4, arg5), arg6);
12443 return -host_to_target_errno(ret);
12444 #endif
12445
12446 #ifdef TARGET_NR_fadvise64
12447 case TARGET_NR_fadvise64:
12448 /* 5 args: fd, offset (high, low), len, advice */
12449 if (regpairs_aligned(cpu_env, num)) {
12450 /* offset is in (3,4), len in 5 and advice in 6 */
12451 arg2 = arg3;
12452 arg3 = arg4;
12453 arg4 = arg5;
12454 arg5 = arg6;
12455 }
12456 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12457 return -host_to_target_errno(ret);
12458 #endif
12459
12460 #else /* not a 32-bit ABI */
12461 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12462 #ifdef TARGET_NR_fadvise64_64
12463 case TARGET_NR_fadvise64_64:
12464 #endif
12465 #ifdef TARGET_NR_fadvise64
12466 case TARGET_NR_fadvise64:
12467 #endif
12468 #ifdef TARGET_S390X
12469 switch (arg4) {
12470 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12471 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12472 case 6: arg4 = POSIX_FADV_DONTNEED; break;
12473 case 7: arg4 = POSIX_FADV_NOREUSE; break;
12474 default: break;
12475 }
12476 #endif
12477 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12478 #endif
12479 #endif /* end of 64-bit ABI fadvise handling */
12480
12481 #ifdef TARGET_NR_madvise
12482 case TARGET_NR_madvise:
12483 return target_madvise(arg1, arg2, arg3);
12484 #endif
12485 #ifdef TARGET_NR_fcntl64
12486 case TARGET_NR_fcntl64:
12487 {
12488 int cmd;
12489 struct flock64 fl;
12490 from_flock64_fn *copyfrom = copy_from_user_flock64;
12491 to_flock64_fn *copyto = copy_to_user_flock64;
12492
12493 #ifdef TARGET_ARM
12494 if (!cpu_env->eabi) {
12495 copyfrom = copy_from_user_oabi_flock64;
12496 copyto = copy_to_user_oabi_flock64;
12497 }
12498 #endif
12499
12500 cmd = target_to_host_fcntl_cmd(arg2);
12501 if (cmd == -TARGET_EINVAL) {
12502 return cmd;
12503 }
12504
12505 switch(arg2) {
12506 case TARGET_F_GETLK64:
12507 ret = copyfrom(&fl, arg3);
12508 if (ret) {
12509 break;
12510 }
12511 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12512 if (ret == 0) {
12513 ret = copyto(arg3, &fl);
12514 }
12515 break;
12516
12517 case TARGET_F_SETLK64:
12518 case TARGET_F_SETLKW64:
12519 ret = copyfrom(&fl, arg3);
12520 if (ret) {
12521 break;
12522 }
12523 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12524 break;
12525 default:
12526 ret = do_fcntl(arg1, arg2, arg3);
12527 break;
12528 }
12529 return ret;
12530 }
12531 #endif
12532 #ifdef TARGET_NR_cacheflush
12533 case TARGET_NR_cacheflush:
12534 /* self-modifying code is handled automatically, so nothing needed */
12535 return 0;
12536 #endif
12537 #ifdef TARGET_NR_getpagesize
12538 case TARGET_NR_getpagesize:
12539 return TARGET_PAGE_SIZE;
12540 #endif
12541 case TARGET_NR_gettid:
12542 return get_errno(sys_gettid());
12543 #ifdef TARGET_NR_readahead
12544 case TARGET_NR_readahead:
12545 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12546 if (regpairs_aligned(cpu_env, num)) {
12547 arg2 = arg3;
12548 arg3 = arg4;
12549 arg4 = arg5;
12550 }
12551 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12552 #else
12553 ret = get_errno(readahead(arg1, arg2, arg3));
12554 #endif
12555 return ret;
12556 #endif
12557 #ifdef CONFIG_ATTR
12558 #ifdef TARGET_NR_setxattr
12559 case TARGET_NR_listxattr:
12560 case TARGET_NR_llistxattr:
12561 {
12562 void *p, *b = 0;
12563 if (arg2) {
12564 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12565 if (!b) {
12566 return -TARGET_EFAULT;
12567 }
12568 }
12569 p = lock_user_string(arg1);
12570 if (p) {
12571 if (num == TARGET_NR_listxattr) {
12572 ret = get_errno(listxattr(p, b, arg3));
12573 } else {
12574 ret = get_errno(llistxattr(p, b, arg3));
12575 }
12576 } else {
12577 ret = -TARGET_EFAULT;
12578 }
12579 unlock_user(p, arg1, 0);
12580 unlock_user(b, arg2, arg3);
12581 return ret;
12582 }
12583 case TARGET_NR_flistxattr:
12584 {
12585 void *b = 0;
12586 if (arg2) {
12587 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12588 if (!b) {
12589 return -TARGET_EFAULT;
12590 }
12591 }
12592 ret = get_errno(flistxattr(arg1, b, arg3));
12593 unlock_user(b, arg2, arg3);
12594 return ret;
12595 }
12596 case TARGET_NR_setxattr:
12597 case TARGET_NR_lsetxattr:
12598 {
12599 void *p, *n, *v = 0;
12600 if (arg3) {
12601 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12602 if (!v) {
12603 return -TARGET_EFAULT;
12604 }
12605 }
12606 p = lock_user_string(arg1);
12607 n = lock_user_string(arg2);
12608 if (p && n) {
12609 if (num == TARGET_NR_setxattr) {
12610 ret = get_errno(setxattr(p, n, v, arg4, arg5));
12611 } else {
12612 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12613 }
12614 } else {
12615 ret = -TARGET_EFAULT;
12616 }
12617 unlock_user(p, arg1, 0);
12618 unlock_user(n, arg2, 0);
12619 unlock_user(v, arg3, 0);
12620 }
12621 return ret;
12622 case TARGET_NR_fsetxattr:
12623 {
12624 void *n, *v = 0;
12625 if (arg3) {
12626 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12627 if (!v) {
12628 return -TARGET_EFAULT;
12629 }
12630 }
12631 n = lock_user_string(arg2);
12632 if (n) {
12633 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12634 } else {
12635 ret = -TARGET_EFAULT;
12636 }
12637 unlock_user(n, arg2, 0);
12638 unlock_user(v, arg3, 0);
12639 }
12640 return ret;
12641 case TARGET_NR_getxattr:
12642 case TARGET_NR_lgetxattr:
12643 {
12644 void *p, *n, *v = 0;
12645 if (arg3) {
12646 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12647 if (!v) {
12648 return -TARGET_EFAULT;
12649 }
12650 }
12651 p = lock_user_string(arg1);
12652 n = lock_user_string(arg2);
12653 if (p && n) {
12654 if (num == TARGET_NR_getxattr) {
12655 ret = get_errno(getxattr(p, n, v, arg4));
12656 } else {
12657 ret = get_errno(lgetxattr(p, n, v, arg4));
12658 }
12659 } else {
12660 ret = -TARGET_EFAULT;
12661 }
12662 unlock_user(p, arg1, 0);
12663 unlock_user(n, arg2, 0);
12664 unlock_user(v, arg3, arg4);
12665 }
12666 return ret;
12667 case TARGET_NR_fgetxattr:
12668 {
12669 void *n, *v = 0;
12670 if (arg3) {
12671 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12672 if (!v) {
12673 return -TARGET_EFAULT;
12674 }
12675 }
12676 n = lock_user_string(arg2);
12677 if (n) {
12678 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12679 } else {
12680 ret = -TARGET_EFAULT;
12681 }
12682 unlock_user(n, arg2, 0);
12683 unlock_user(v, arg3, arg4);
12684 }
12685 return ret;
12686 case TARGET_NR_removexattr:
12687 case TARGET_NR_lremovexattr:
12688 {
12689 void *p, *n;
12690 p = lock_user_string(arg1);
12691 n = lock_user_string(arg2);
12692 if (p && n) {
12693 if (num == TARGET_NR_removexattr) {
12694 ret = get_errno(removexattr(p, n));
12695 } else {
12696 ret = get_errno(lremovexattr(p, n));
12697 }
12698 } else {
12699 ret = -TARGET_EFAULT;
12700 }
12701 unlock_user(p, arg1, 0);
12702 unlock_user(n, arg2, 0);
12703 }
12704 return ret;
12705 case TARGET_NR_fremovexattr:
12706 {
12707 void *n;
12708 n = lock_user_string(arg2);
12709 if (n) {
12710 ret = get_errno(fremovexattr(arg1, n));
12711 } else {
12712 ret = -TARGET_EFAULT;
12713 }
12714 unlock_user(n, arg2, 0);
12715 }
12716 return ret;
12717 #endif
12718 #endif /* CONFIG_ATTR */
12719 #ifdef TARGET_NR_set_thread_area
12720 case TARGET_NR_set_thread_area:
12721 #if defined(TARGET_MIPS)
12722 cpu_env->active_tc.CP0_UserLocal = arg1;
12723 return 0;
12724 #elif defined(TARGET_CRIS)
12725 if (arg1 & 0xff)
12726 ret = -TARGET_EINVAL;
12727 else {
12728 cpu_env->pregs[PR_PID] = arg1;
12729 ret = 0;
12730 }
12731 return ret;
12732 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12733 return do_set_thread_area(cpu_env, arg1);
12734 #elif defined(TARGET_M68K)
12735 {
12736 TaskState *ts = cpu->opaque;
12737 ts->tp_value = arg1;
12738 return 0;
12739 }
12740 #else
12741 return -TARGET_ENOSYS;
12742 #endif
12743 #endif
12744 #ifdef TARGET_NR_get_thread_area
12745 case TARGET_NR_get_thread_area:
12746 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12747 return do_get_thread_area(cpu_env, arg1);
12748 #elif defined(TARGET_M68K)
12749 {
12750 TaskState *ts = cpu->opaque;
12751 return ts->tp_value;
12752 }
12753 #else
12754 return -TARGET_ENOSYS;
12755 #endif
12756 #endif
12757 #ifdef TARGET_NR_getdomainname
12758 case TARGET_NR_getdomainname:
12759 return -TARGET_ENOSYS;
12760 #endif
12761
12762 #ifdef TARGET_NR_clock_settime
12763 case TARGET_NR_clock_settime:
12764 {
12765 struct timespec ts;
12766
12767 ret = target_to_host_timespec(&ts, arg2);
12768 if (!is_error(ret)) {
12769 ret = get_errno(clock_settime(arg1, &ts));
12770 }
12771 return ret;
12772 }
12773 #endif
12774 #ifdef TARGET_NR_clock_settime64
12775 case TARGET_NR_clock_settime64:
12776 {
12777 struct timespec ts;
12778
12779 ret = target_to_host_timespec64(&ts, arg2);
12780 if (!is_error(ret)) {
12781 ret = get_errno(clock_settime(arg1, &ts));
12782 }
12783 return ret;
12784 }
12785 #endif
12786 #ifdef TARGET_NR_clock_gettime
12787 case TARGET_NR_clock_gettime:
12788 {
12789 struct timespec ts;
12790 ret = get_errno(clock_gettime(arg1, &ts));
12791 if (!is_error(ret)) {
12792 ret = host_to_target_timespec(arg2, &ts);
12793 }
12794 return ret;
12795 }
12796 #endif
12797 #ifdef TARGET_NR_clock_gettime64
12798 case TARGET_NR_clock_gettime64:
12799 {
12800 struct timespec ts;
12801 ret = get_errno(clock_gettime(arg1, &ts));
12802 if (!is_error(ret)) {
12803 ret = host_to_target_timespec64(arg2, &ts);
12804 }
12805 return ret;
12806 }
12807 #endif
12808 #ifdef TARGET_NR_clock_getres
12809 case TARGET_NR_clock_getres:
12810 {
12811 struct timespec ts;
12812 ret = get_errno(clock_getres(arg1, &ts));
12813 if (!is_error(ret)) {
12814 host_to_target_timespec(arg2, &ts);
12815 }
12816 return ret;
12817 }
12818 #endif
12819 #ifdef TARGET_NR_clock_getres_time64
12820 case TARGET_NR_clock_getres_time64:
12821 {
12822 struct timespec ts;
12823 ret = get_errno(clock_getres(arg1, &ts));
12824 if (!is_error(ret)) {
12825 host_to_target_timespec64(arg2, &ts);
12826 }
12827 return ret;
12828 }
12829 #endif
12830 #ifdef TARGET_NR_clock_nanosleep
12831 case TARGET_NR_clock_nanosleep:
12832 {
12833 struct timespec ts;
12834 if (target_to_host_timespec(&ts, arg3)) {
12835 return -TARGET_EFAULT;
12836 }
12837 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12838 &ts, arg4 ? &ts : NULL));
12839 /*
12840 * if the call is interrupted by a signal handler, it fails
12841 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12842 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12843 */
12844 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12845 host_to_target_timespec(arg4, &ts)) {
12846 return -TARGET_EFAULT;
12847 }
12848
12849 return ret;
12850 }
12851 #endif
12852 #ifdef TARGET_NR_clock_nanosleep_time64
12853 case TARGET_NR_clock_nanosleep_time64:
12854 {
12855 struct timespec ts;
12856
12857 if (target_to_host_timespec64(&ts, arg3)) {
12858 return -TARGET_EFAULT;
12859 }
12860
12861 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12862 &ts, arg4 ? &ts : NULL));
12863
12864 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12865 host_to_target_timespec64(arg4, &ts)) {
12866 return -TARGET_EFAULT;
12867 }
12868 return ret;
12869 }
12870 #endif
12871
12872 #if defined(TARGET_NR_set_tid_address)
12873 case TARGET_NR_set_tid_address:
12874 {
12875 TaskState *ts = cpu->opaque;
12876 ts->child_tidptr = arg1;
12877 /* do not call host set_tid_address() syscall, instead return tid() */
12878 return get_errno(sys_gettid());
12879 }
12880 #endif
12881
12882 case TARGET_NR_tkill:
12883 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12884
12885 case TARGET_NR_tgkill:
12886 return get_errno(safe_tgkill((int)arg1, (int)arg2,
12887 target_to_host_signal(arg3)));
12888
12889 #ifdef TARGET_NR_set_robust_list
12890 case TARGET_NR_set_robust_list:
12891 case TARGET_NR_get_robust_list:
12892 /* The ABI for supporting robust futexes has userspace pass
12893 * the kernel a pointer to a linked list which is updated by
12894 * userspace after the syscall; the list is walked by the kernel
12895 * when the thread exits. Since the linked list in QEMU guest
12896 * memory isn't a valid linked list for the host and we have
12897 * no way to reliably intercept the thread-death event, we can't
12898 * support these. Silently return ENOSYS so that guest userspace
12899 * falls back to a non-robust futex implementation (which should
12900 * be OK except in the corner case of the guest crashing while
12901 * holding a mutex that is shared with another process via
12902 * shared memory).
12903 */
12904 return -TARGET_ENOSYS;
12905 #endif
12906
12907 #if defined(TARGET_NR_utimensat)
12908 case TARGET_NR_utimensat:
12909 {
12910 struct timespec *tsp, ts[2];
12911 if (!arg3) {
12912 tsp = NULL;
12913 } else {
12914 if (target_to_host_timespec(ts, arg3)) {
12915 return -TARGET_EFAULT;
12916 }
12917 if (target_to_host_timespec(ts + 1, arg3 +
12918 sizeof(struct target_timespec))) {
12919 return -TARGET_EFAULT;
12920 }
12921 tsp = ts;
12922 }
12923 if (!arg2)
12924 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12925 else {
12926 if (!(p = lock_user_string(arg2))) {
12927 return -TARGET_EFAULT;
12928 }
12929 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12930 unlock_user(p, arg2, 0);
12931 }
12932 }
12933 return ret;
12934 #endif
12935 #ifdef TARGET_NR_utimensat_time64
12936 case TARGET_NR_utimensat_time64:
12937 {
12938 struct timespec *tsp, ts[2];
12939 if (!arg3) {
12940 tsp = NULL;
12941 } else {
12942 if (target_to_host_timespec64(ts, arg3)) {
12943 return -TARGET_EFAULT;
12944 }
12945 if (target_to_host_timespec64(ts + 1, arg3 +
12946 sizeof(struct target__kernel_timespec))) {
12947 return -TARGET_EFAULT;
12948 }
12949 tsp = ts;
12950 }
12951 if (!arg2)
12952 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12953 else {
12954 p = lock_user_string(arg2);
12955 if (!p) {
12956 return -TARGET_EFAULT;
12957 }
12958 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12959 unlock_user(p, arg2, 0);
12960 }
12961 }
12962 return ret;
12963 #endif
12964 #ifdef TARGET_NR_futex
12965 case TARGET_NR_futex:
12966 return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12967 #endif
12968 #ifdef TARGET_NR_futex_time64
12969 case TARGET_NR_futex_time64:
12970 return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12971 #endif
12972 #ifdef CONFIG_INOTIFY
12973 #if defined(TARGET_NR_inotify_init)
12974 case TARGET_NR_inotify_init:
12975 ret = get_errno(inotify_init());
12976 if (ret >= 0) {
12977 fd_trans_register(ret, &target_inotify_trans);
12978 }
12979 return ret;
12980 #endif
12981 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12982 case TARGET_NR_inotify_init1:
12983 ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12984 fcntl_flags_tbl)));
12985 if (ret >= 0) {
12986 fd_trans_register(ret, &target_inotify_trans);
12987 }
12988 return ret;
12989 #endif
12990 #if defined(TARGET_NR_inotify_add_watch)
12991 case TARGET_NR_inotify_add_watch:
12992 p = lock_user_string(arg2);
12993 ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12994 unlock_user(p, arg2, 0);
12995 return ret;
12996 #endif
12997 #if defined(TARGET_NR_inotify_rm_watch)
12998 case TARGET_NR_inotify_rm_watch:
12999 return get_errno(inotify_rm_watch(arg1, arg2));
13000 #endif
13001 #endif
13002
13003 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13004 case TARGET_NR_mq_open:
13005 {
13006 struct mq_attr posix_mq_attr;
13007 struct mq_attr *pposix_mq_attr;
13008 int host_flags;
13009
13010 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13011 pposix_mq_attr = NULL;
13012 if (arg4) {
13013 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13014 return -TARGET_EFAULT;
13015 }
13016 pposix_mq_attr = &posix_mq_attr;
13017 }
13018 p = lock_user_string(arg1 - 1);
13019 if (!p) {
13020 return -TARGET_EFAULT;
13021 }
13022 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13023 unlock_user (p, arg1, 0);
13024 }
13025 return ret;
13026
13027 case TARGET_NR_mq_unlink:
13028 p = lock_user_string(arg1 - 1);
13029 if (!p) {
13030 return -TARGET_EFAULT;
13031 }
13032 ret = get_errno(mq_unlink(p));
13033 unlock_user (p, arg1, 0);
13034 return ret;
13035
13036 #ifdef TARGET_NR_mq_timedsend
13037 case TARGET_NR_mq_timedsend:
13038 {
13039 struct timespec ts;
13040
13041 p = lock_user (VERIFY_READ, arg2, arg3, 1);
13042 if (arg5 != 0) {
13043 if (target_to_host_timespec(&ts, arg5)) {
13044 return -TARGET_EFAULT;
13045 }
13046 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13047 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13048 return -TARGET_EFAULT;
13049 }
13050 } else {
13051 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13052 }
13053 unlock_user (p, arg2, arg3);
13054 }
13055 return ret;
13056 #endif
13057 #ifdef TARGET_NR_mq_timedsend_time64
13058 case TARGET_NR_mq_timedsend_time64:
13059 {
13060 struct timespec ts;
13061
13062 p = lock_user(VERIFY_READ, arg2, arg3, 1);
13063 if (arg5 != 0) {
13064 if (target_to_host_timespec64(&ts, arg5)) {
13065 return -TARGET_EFAULT;
13066 }
13067 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13068 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13069 return -TARGET_EFAULT;
13070 }
13071 } else {
13072 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13073 }
13074 unlock_user(p, arg2, arg3);
13075 }
13076 return ret;
13077 #endif
13078
13079 #ifdef TARGET_NR_mq_timedreceive
13080 case TARGET_NR_mq_timedreceive:
13081 {
13082 struct timespec ts;
13083 unsigned int prio;
13084
13085 p = lock_user (VERIFY_READ, arg2, arg3, 1);
13086 if (arg5 != 0) {
13087 if (target_to_host_timespec(&ts, arg5)) {
13088 return -TARGET_EFAULT;
13089 }
13090 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13091 &prio, &ts));
13092 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13093 return -TARGET_EFAULT;
13094 }
13095 } else {
13096 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13097 &prio, NULL));
13098 }
13099 unlock_user (p, arg2, arg3);
13100 if (arg4 != 0)
13101 put_user_u32(prio, arg4);
13102 }
13103 return ret;
13104 #endif
13105 #ifdef TARGET_NR_mq_timedreceive_time64
13106 case TARGET_NR_mq_timedreceive_time64:
13107 {
13108 struct timespec ts;
13109 unsigned int prio;
13110
13111 p = lock_user(VERIFY_READ, arg2, arg3, 1);
13112 if (arg5 != 0) {
13113 if (target_to_host_timespec64(&ts, arg5)) {
13114 return -TARGET_EFAULT;
13115 }
13116 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13117 &prio, &ts));
13118 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13119 return -TARGET_EFAULT;
13120 }
13121 } else {
13122 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13123 &prio, NULL));
13124 }
13125 unlock_user(p, arg2, arg3);
13126 if (arg4 != 0) {
13127 put_user_u32(prio, arg4);
13128 }
13129 }
13130 return ret;
13131 #endif
13132
13133 /* Not implemented for now... */
13134 /* case TARGET_NR_mq_notify: */
13135 /* break; */
13136
13137 case TARGET_NR_mq_getsetattr:
13138 {
13139 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13140 ret = 0;
13141 if (arg2 != 0) {
13142 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13143 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13144 &posix_mq_attr_out));
13145 } else if (arg3 != 0) {
13146 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13147 }
13148 if (ret == 0 && arg3 != 0) {
13149 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13150 }
13151 }
13152 return ret;
13153 #endif
13154
13155 #ifdef CONFIG_SPLICE
13156 #ifdef TARGET_NR_tee
13157 case TARGET_NR_tee:
13158 {
13159 ret = get_errno(tee(arg1,arg2,arg3,arg4));
13160 }
13161 return ret;
13162 #endif
13163 #ifdef TARGET_NR_splice
13164 case TARGET_NR_splice:
13165 {
13166 loff_t loff_in, loff_out;
13167 loff_t *ploff_in = NULL, *ploff_out = NULL;
13168 if (arg2) {
13169 if (get_user_u64(loff_in, arg2)) {
13170 return -TARGET_EFAULT;
13171 }
13172 ploff_in = &loff_in;
13173 }
13174 if (arg4) {
13175 if (get_user_u64(loff_out, arg4)) {
13176 return -TARGET_EFAULT;
13177 }
13178 ploff_out = &loff_out;
13179 }
13180 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13181 if (arg2) {
13182 if (put_user_u64(loff_in, arg2)) {
13183 return -TARGET_EFAULT;
13184 }
13185 }
13186 if (arg4) {
13187 if (put_user_u64(loff_out, arg4)) {
13188 return -TARGET_EFAULT;
13189 }
13190 }
13191 }
13192 return ret;
13193 #endif
13194 #ifdef TARGET_NR_vmsplice
13195 case TARGET_NR_vmsplice:
13196 {
13197 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13198 if (vec != NULL) {
13199 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13200 unlock_iovec(vec, arg2, arg3, 0);
13201 } else {
13202 ret = -host_to_target_errno(errno);
13203 }
13204 }
13205 return ret;
13206 #endif
13207 #endif /* CONFIG_SPLICE */
13208 #ifdef CONFIG_EVENTFD
13209 #if defined(TARGET_NR_eventfd)
13210 case TARGET_NR_eventfd:
13211 ret = get_errno(eventfd(arg1, 0));
13212 if (ret >= 0) {
13213 fd_trans_register(ret, &target_eventfd_trans);
13214 }
13215 return ret;
13216 #endif
13217 #if defined(TARGET_NR_eventfd2)
13218 case TARGET_NR_eventfd2:
13219 {
13220 int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13221 if (arg2 & TARGET_O_NONBLOCK) {
13222 host_flags |= O_NONBLOCK;
13223 }
13224 if (arg2 & TARGET_O_CLOEXEC) {
13225 host_flags |= O_CLOEXEC;
13226 }
13227 ret = get_errno(eventfd(arg1, host_flags));
13228 if (ret >= 0) {
13229 fd_trans_register(ret, &target_eventfd_trans);
13230 }
13231 return ret;
13232 }
13233 #endif
13234 #endif /* CONFIG_EVENTFD */
13235 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13236 case TARGET_NR_fallocate:
13237 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13238 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13239 target_offset64(arg5, arg6)));
13240 #else
13241 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13242 #endif
13243 return ret;
13244 #endif
13245 #if defined(CONFIG_SYNC_FILE_RANGE)
13246 #if defined(TARGET_NR_sync_file_range)
13247 case TARGET_NR_sync_file_range:
13248 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13249 #if defined(TARGET_MIPS)
13250 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13251 target_offset64(arg5, arg6), arg7));
13252 #else
13253 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13254 target_offset64(arg4, arg5), arg6));
13255 #endif /* !TARGET_MIPS */
13256 #else
13257 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13258 #endif
13259 return ret;
13260 #endif
13261 #if defined(TARGET_NR_sync_file_range2) || \
13262 defined(TARGET_NR_arm_sync_file_range)
13263 #if defined(TARGET_NR_sync_file_range2)
13264 case TARGET_NR_sync_file_range2:
13265 #endif
13266 #if defined(TARGET_NR_arm_sync_file_range)
13267 case TARGET_NR_arm_sync_file_range:
13268 #endif
13269 /* This is like sync_file_range but the arguments are reordered */
13270 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13271 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13272 target_offset64(arg5, arg6), arg2));
13273 #else
13274 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13275 #endif
13276 return ret;
13277 #endif
13278 #endif
13279 #if defined(TARGET_NR_signalfd4)
13280 case TARGET_NR_signalfd4:
13281 return do_signalfd4(arg1, arg2, arg4);
13282 #endif
13283 #if defined(TARGET_NR_signalfd)
13284 case TARGET_NR_signalfd:
13285 return do_signalfd4(arg1, arg2, 0);
13286 #endif
13287 #if defined(CONFIG_EPOLL)
13288 #if defined(TARGET_NR_epoll_create)
13289 case TARGET_NR_epoll_create:
13290 return get_errno(epoll_create(arg1));
13291 #endif
13292 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13293 case TARGET_NR_epoll_create1:
13294 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13295 #endif
13296 #if defined(TARGET_NR_epoll_ctl)
13297 case TARGET_NR_epoll_ctl:
13298 {
13299 struct epoll_event ep;
13300 struct epoll_event *epp = 0;
13301 if (arg4) {
13302 if (arg2 != EPOLL_CTL_DEL) {
13303 struct target_epoll_event *target_ep;
13304 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13305 return -TARGET_EFAULT;
13306 }
13307 ep.events = tswap32(target_ep->events);
13308 /*
13309 * The epoll_data_t union is just opaque data to the kernel,
13310 * so we transfer all 64 bits across and need not worry what
13311 * actual data type it is.
13312 */
13313 ep.data.u64 = tswap64(target_ep->data.u64);
13314 unlock_user_struct(target_ep, arg4, 0);
13315 }
13316 /*
13317 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13318 * non-null pointer, even though this argument is ignored.
13319 *
13320 */
13321 epp = &ep;
13322 }
13323 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13324 }
13325 #endif
13326
13327 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13328 #if defined(TARGET_NR_epoll_wait)
13329 case TARGET_NR_epoll_wait:
13330 #endif
13331 #if defined(TARGET_NR_epoll_pwait)
13332 case TARGET_NR_epoll_pwait:
13333 #endif
13334 {
13335 struct target_epoll_event *target_ep;
13336 struct epoll_event *ep;
13337 int epfd = arg1;
13338 int maxevents = arg3;
13339 int timeout = arg4;
13340
13341 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13342 return -TARGET_EINVAL;
13343 }
13344
13345 target_ep = lock_user(VERIFY_WRITE, arg2,
13346 maxevents * sizeof(struct target_epoll_event), 1);
13347 if (!target_ep) {
13348 return -TARGET_EFAULT;
13349 }
13350
13351 ep = g_try_new(struct epoll_event, maxevents);
13352 if (!ep) {
13353 unlock_user(target_ep, arg2, 0);
13354 return -TARGET_ENOMEM;
13355 }
13356
13357 switch (num) {
13358 #if defined(TARGET_NR_epoll_pwait)
13359 case TARGET_NR_epoll_pwait:
13360 {
13361 sigset_t *set = NULL;
13362
13363 if (arg5) {
13364 ret = process_sigsuspend_mask(&set, arg5, arg6);
13365 if (ret != 0) {
13366 break;
13367 }
13368 }
13369
13370 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13371 set, SIGSET_T_SIZE));
13372
13373 if (set) {
13374 finish_sigsuspend_mask(ret);
13375 }
13376 break;
13377 }
13378 #endif
13379 #if defined(TARGET_NR_epoll_wait)
13380 case TARGET_NR_epoll_wait:
13381 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13382 NULL, 0));
13383 break;
13384 #endif
13385 default:
13386 ret = -TARGET_ENOSYS;
13387 }
13388 if (!is_error(ret)) {
13389 int i;
13390 for (i = 0; i < ret; i++) {
13391 target_ep[i].events = tswap32(ep[i].events);
13392 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13393 }
13394 unlock_user(target_ep, arg2,
13395 ret * sizeof(struct target_epoll_event));
13396 } else {
13397 unlock_user(target_ep, arg2, 0);
13398 }
13399 g_free(ep);
13400 return ret;
13401 }
13402 #endif
13403 #endif
13404 #ifdef TARGET_NR_prlimit64
13405 case TARGET_NR_prlimit64:
13406 {
13407 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13408 struct target_rlimit64 *target_rnew, *target_rold;
13409 struct host_rlimit64 rnew, rold, *rnewp = 0;
13410 int resource = target_to_host_resource(arg2);
13411
13412 if (arg3 && (resource != RLIMIT_AS &&
13413 resource != RLIMIT_DATA &&
13414 resource != RLIMIT_STACK)) {
13415 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13416 return -TARGET_EFAULT;
13417 }
13418 __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13419 __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13420 unlock_user_struct(target_rnew, arg3, 0);
13421 rnewp = &rnew;
13422 }
13423
13424 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13425 if (!is_error(ret) && arg4) {
13426 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13427 return -TARGET_EFAULT;
13428 }
13429 __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13430 __put_user(rold.rlim_max, &target_rold->rlim_max);
13431 unlock_user_struct(target_rold, arg4, 1);
13432 }
13433 return ret;
13434 }
13435 #endif
13436 #ifdef TARGET_NR_gethostname
13437 case TARGET_NR_gethostname:
13438 {
13439 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13440 if (name) {
13441 ret = get_errno(gethostname(name, arg2));
13442 unlock_user(name, arg1, arg2);
13443 } else {
13444 ret = -TARGET_EFAULT;
13445 }
13446 return ret;
13447 }
13448 #endif
13449 #ifdef TARGET_NR_atomic_cmpxchg_32
13450 case TARGET_NR_atomic_cmpxchg_32:
13451 {
13452 /* should use start_exclusive from main.c */
13453 abi_ulong mem_value;
13454 if (get_user_u32(mem_value, arg6)) {
13455 target_siginfo_t info;
13456 info.si_signo = SIGSEGV;
13457 info.si_errno = 0;
13458 info.si_code = TARGET_SEGV_MAPERR;
13459 info._sifields._sigfault._addr = arg6;
13460 queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13461 ret = 0xdeadbeef;
13462
13463 }
13464 if (mem_value == arg2)
13465 put_user_u32(arg1, arg6);
13466 return mem_value;
13467 }
13468 #endif
13469 #ifdef TARGET_NR_atomic_barrier
13470 case TARGET_NR_atomic_barrier:
13471 /* Like the kernel implementation and the
13472 qemu arm barrier, no-op this? */
13473 return 0;
13474 #endif
13475
13476 #ifdef TARGET_NR_timer_create
13477 case TARGET_NR_timer_create:
13478 {
13479 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13480
13481 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13482
13483 int clkid = arg1;
13484 int timer_index = next_free_host_timer();
13485
13486 if (timer_index < 0) {
13487 ret = -TARGET_EAGAIN;
13488 } else {
13489 timer_t *phtimer = g_posix_timers + timer_index;
13490
13491 if (arg2) {
13492 phost_sevp = &host_sevp;
13493 ret = target_to_host_sigevent(phost_sevp, arg2);
13494 if (ret != 0) {
13495 free_host_timer_slot(timer_index);
13496 return ret;
13497 }
13498 }
13499
13500 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13501 if (ret) {
13502 free_host_timer_slot(timer_index);
13503 } else {
13504 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13505 timer_delete(*phtimer);
13506 free_host_timer_slot(timer_index);
13507 return -TARGET_EFAULT;
13508 }
13509 }
13510 }
13511 return ret;
13512 }
13513 #endif
13514
13515 #ifdef TARGET_NR_timer_settime
13516 case TARGET_NR_timer_settime:
13517 {
13518 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13519 * struct itimerspec * old_value */
13520 target_timer_t timerid = get_timer_id(arg1);
13521
13522 if (timerid < 0) {
13523 ret = timerid;
13524 } else if (arg3 == 0) {
13525 ret = -TARGET_EINVAL;
13526 } else {
13527 timer_t htimer = g_posix_timers[timerid];
13528 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13529
13530 if (target_to_host_itimerspec(&hspec_new, arg3)) {
13531 return -TARGET_EFAULT;
13532 }
13533 ret = get_errno(
13534 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13535 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13536 return -TARGET_EFAULT;
13537 }
13538 }
13539 return ret;
13540 }
13541 #endif
13542
13543 #ifdef TARGET_NR_timer_settime64
13544 case TARGET_NR_timer_settime64:
13545 {
13546 target_timer_t timerid = get_timer_id(arg1);
13547
13548 if (timerid < 0) {
13549 ret = timerid;
13550 } else if (arg3 == 0) {
13551 ret = -TARGET_EINVAL;
13552 } else {
13553 timer_t htimer = g_posix_timers[timerid];
13554 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13555
13556 if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13557 return -TARGET_EFAULT;
13558 }
13559 ret = get_errno(
13560 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13561 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13562 return -TARGET_EFAULT;
13563 }
13564 }
13565 return ret;
13566 }
13567 #endif
13568
13569 #ifdef TARGET_NR_timer_gettime
13570 case TARGET_NR_timer_gettime:
13571 {
13572 /* args: timer_t timerid, struct itimerspec *curr_value */
13573 target_timer_t timerid = get_timer_id(arg1);
13574
13575 if (timerid < 0) {
13576 ret = timerid;
13577 } else if (!arg2) {
13578 ret = -TARGET_EFAULT;
13579 } else {
13580 timer_t htimer = g_posix_timers[timerid];
13581 struct itimerspec hspec;
13582 ret = get_errno(timer_gettime(htimer, &hspec));
13583
13584 if (host_to_target_itimerspec(arg2, &hspec)) {
13585 ret = -TARGET_EFAULT;
13586 }
13587 }
13588 return ret;
13589 }
13590 #endif
13591
13592 #ifdef TARGET_NR_timer_gettime64
13593 case TARGET_NR_timer_gettime64:
13594 {
13595 /* args: timer_t timerid, struct itimerspec64 *curr_value */
13596 target_timer_t timerid = get_timer_id(arg1);
13597
13598 if (timerid < 0) {
13599 ret = timerid;
13600 } else if (!arg2) {
13601 ret = -TARGET_EFAULT;
13602 } else {
13603 timer_t htimer = g_posix_timers[timerid];
13604 struct itimerspec hspec;
13605 ret = get_errno(timer_gettime(htimer, &hspec));
13606
13607 if (host_to_target_itimerspec64(arg2, &hspec)) {
13608 ret = -TARGET_EFAULT;
13609 }
13610 }
13611 return ret;
13612 }
13613 #endif
13614
13615 #ifdef TARGET_NR_timer_getoverrun
13616 case TARGET_NR_timer_getoverrun:
13617 {
13618 /* args: timer_t timerid */
13619 target_timer_t timerid = get_timer_id(arg1);
13620
13621 if (timerid < 0) {
13622 ret = timerid;
13623 } else {
13624 timer_t htimer = g_posix_timers[timerid];
13625 ret = get_errno(timer_getoverrun(htimer));
13626 }
13627 return ret;
13628 }
13629 #endif
13630
13631 #ifdef TARGET_NR_timer_delete
13632 case TARGET_NR_timer_delete:
13633 {
13634 /* args: timer_t timerid */
13635 target_timer_t timerid = get_timer_id(arg1);
13636
13637 if (timerid < 0) {
13638 ret = timerid;
13639 } else {
13640 timer_t htimer = g_posix_timers[timerid];
13641 ret = get_errno(timer_delete(htimer));
13642 free_host_timer_slot(timerid);
13643 }
13644 return ret;
13645 }
13646 #endif
13647
13648 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13649 case TARGET_NR_timerfd_create:
13650 ret = get_errno(timerfd_create(arg1,
13651 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13652 if (ret >= 0) {
13653 fd_trans_register(ret, &target_timerfd_trans);
13654 }
13655 return ret;
13656 #endif
13657
13658 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13659 case TARGET_NR_timerfd_gettime:
13660 {
13661 struct itimerspec its_curr;
13662
13663 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13664
13665 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13666 return -TARGET_EFAULT;
13667 }
13668 }
13669 return ret;
13670 #endif
13671
13672 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13673 case TARGET_NR_timerfd_gettime64:
13674 {
13675 struct itimerspec its_curr;
13676
13677 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13678
13679 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13680 return -TARGET_EFAULT;
13681 }
13682 }
13683 return ret;
13684 #endif
13685
13686 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13687 case TARGET_NR_timerfd_settime:
13688 {
13689 struct itimerspec its_new, its_old, *p_new;
13690
13691 if (arg3) {
13692 if (target_to_host_itimerspec(&its_new, arg3)) {
13693 return -TARGET_EFAULT;
13694 }
13695 p_new = &its_new;
13696 } else {
13697 p_new = NULL;
13698 }
13699
13700 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13701
13702 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13703 return -TARGET_EFAULT;
13704 }
13705 }
13706 return ret;
13707 #endif
13708
13709 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13710 case TARGET_NR_timerfd_settime64:
13711 {
13712 struct itimerspec its_new, its_old, *p_new;
13713
13714 if (arg3) {
13715 if (target_to_host_itimerspec64(&its_new, arg3)) {
13716 return -TARGET_EFAULT;
13717 }
13718 p_new = &its_new;
13719 } else {
13720 p_new = NULL;
13721 }
13722
13723 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13724
13725 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13726 return -TARGET_EFAULT;
13727 }
13728 }
13729 return ret;
13730 #endif
13731
13732 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13733 case TARGET_NR_ioprio_get:
13734 return get_errno(ioprio_get(arg1, arg2));
13735 #endif
13736
13737 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13738 case TARGET_NR_ioprio_set:
13739 return get_errno(ioprio_set(arg1, arg2, arg3));
13740 #endif
13741
13742 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13743 case TARGET_NR_setns:
13744 return get_errno(setns(arg1, arg2));
13745 #endif
13746 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13747 case TARGET_NR_unshare:
13748 return get_errno(unshare(arg1));
13749 #endif
13750 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13751 case TARGET_NR_kcmp:
13752 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13753 #endif
13754 #ifdef TARGET_NR_swapcontext
13755 case TARGET_NR_swapcontext:
13756 /* PowerPC specific. */
13757 return do_swapcontext(cpu_env, arg1, arg2, arg3);
13758 #endif
13759 #ifdef TARGET_NR_memfd_create
13760 case TARGET_NR_memfd_create:
13761 p = lock_user_string(arg1);
13762 if (!p) {
13763 return -TARGET_EFAULT;
13764 }
13765 ret = get_errno(memfd_create(p, arg2));
13766 fd_trans_unregister(ret);
13767 unlock_user(p, arg1, 0);
13768 return ret;
13769 #endif
13770 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13771 case TARGET_NR_membarrier:
13772 return get_errno(membarrier(arg1, arg2));
13773 #endif
13774
13775 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13776 case TARGET_NR_copy_file_range:
13777 {
13778 loff_t inoff, outoff;
13779 loff_t *pinoff = NULL, *poutoff = NULL;
13780
13781 if (arg2) {
13782 if (get_user_u64(inoff, arg2)) {
13783 return -TARGET_EFAULT;
13784 }
13785 pinoff = &inoff;
13786 }
13787 if (arg4) {
13788 if (get_user_u64(outoff, arg4)) {
13789 return -TARGET_EFAULT;
13790 }
13791 poutoff = &outoff;
13792 }
13793 /* Do not sign-extend the count parameter. */
13794 ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13795 (abi_ulong)arg5, arg6));
13796 if (!is_error(ret) && ret > 0) {
13797 if (arg2) {
13798 if (put_user_u64(inoff, arg2)) {
13799 return -TARGET_EFAULT;
13800 }
13801 }
13802 if (arg4) {
13803 if (put_user_u64(outoff, arg4)) {
13804 return -TARGET_EFAULT;
13805 }
13806 }
13807 }
13808 }
13809 return ret;
13810 #endif
13811
13812 #if defined(TARGET_NR_pivot_root)
13813 case TARGET_NR_pivot_root:
13814 {
13815 void *p2;
13816 p = lock_user_string(arg1); /* new_root */
13817 p2 = lock_user_string(arg2); /* put_old */
13818 if (!p || !p2) {
13819 ret = -TARGET_EFAULT;
13820 } else {
13821 ret = get_errno(pivot_root(p, p2));
13822 }
13823 unlock_user(p2, arg2, 0);
13824 unlock_user(p, arg1, 0);
13825 }
13826 return ret;
13827 #endif
13828
13829 #if defined(TARGET_NR_riscv_hwprobe)
13830 case TARGET_NR_riscv_hwprobe:
13831 return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13832 #endif
13833
13834 default:
13835 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13836 return -TARGET_ENOSYS;
13837 }
13838 return ret;
13839 }
13840
13841 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13842 abi_long arg2, abi_long arg3, abi_long arg4,
13843 abi_long arg5, abi_long arg6, abi_long arg7,
13844 abi_long arg8)
13845 {
13846 CPUState *cpu = env_cpu(cpu_env);
13847 abi_long ret;
13848
13849 #ifdef DEBUG_ERESTARTSYS
13850 /* Debug-only code for exercising the syscall-restart code paths
13851 * in the per-architecture cpu main loops: restart every syscall
13852 * the guest makes once before letting it through.
13853 */
13854 {
13855 static bool flag;
13856 flag = !flag;
13857 if (flag) {
13858 return -QEMU_ERESTARTSYS;
13859 }
13860 }
13861 #endif
13862
13863 record_syscall_start(cpu, num, arg1,
13864 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13865
13866 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13867 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13868 }
13869
13870 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13871 arg5, arg6, arg7, arg8);
13872
13873 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13874 print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13875 arg3, arg4, arg5, arg6);
13876 }
13877
13878 record_syscall_return(cpu, num, ret);
13879 return ret;
13880 }