]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/syscall.c
linux-user: Fix unaligned memory access in prlimit64 syscall
[mirror_qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143 #include "cpu_loop-common.h"
144
145 #ifndef CLONE_IO
146 #define CLONE_IO 0x80000000 /* Clone io context */
147 #endif
148
149 /* We can't directly call the host clone syscall, because this will
150 * badly confuse libc (breaking mutexes, for example). So we must
151 * divide clone flags into:
152 * * flag combinations that look like pthread_create()
153 * * flag combinations that look like fork()
154 * * flags we can implement within QEMU itself
155 * * flags we can't support and will return an error for
156 */
157 /* For thread creation, all these flags must be present; for
158 * fork, none must be present.
159 */
160 #define CLONE_THREAD_FLAGS \
161 (CLONE_VM | CLONE_FS | CLONE_FILES | \
162 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
163
164 /* These flags are ignored:
165 * CLONE_DETACHED is now ignored by the kernel;
166 * CLONE_IO is just an optimisation hint to the I/O scheduler
167 */
168 #define CLONE_IGNORED_FLAGS \
169 (CLONE_DETACHED | CLONE_IO)
170
171 /* Flags for fork which we can implement within QEMU itself */
172 #define CLONE_OPTIONAL_FORK_FLAGS \
173 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
174 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
175
176 /* Flags for thread creation which we can implement within QEMU itself */
177 #define CLONE_OPTIONAL_THREAD_FLAGS \
178 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
179 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
180
181 #define CLONE_INVALID_FORK_FLAGS \
182 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
183
184 #define CLONE_INVALID_THREAD_FLAGS \
185 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
186 CLONE_IGNORED_FLAGS))
187
188 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
189 * have almost all been allocated. We cannot support any of
190 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
191 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
192 * The checks against the invalid thread masks above will catch these.
193 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
194 */
195
196 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
197 * once. This exercises the codepaths for restart.
198 */
199 //#define DEBUG_ERESTARTSYS
200
201 //#include <linux/msdos_fs.h>
202 #define VFAT_IOCTL_READDIR_BOTH \
203 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
204 #define VFAT_IOCTL_READDIR_SHORT \
205 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
206
207 #undef _syscall0
208 #undef _syscall1
209 #undef _syscall2
210 #undef _syscall3
211 #undef _syscall4
212 #undef _syscall5
213 #undef _syscall6
214
215 #define _syscall0(type,name) \
216 static type name (void) \
217 { \
218 return syscall(__NR_##name); \
219 }
220
221 #define _syscall1(type,name,type1,arg1) \
222 static type name (type1 arg1) \
223 { \
224 return syscall(__NR_##name, arg1); \
225 }
226
227 #define _syscall2(type,name,type1,arg1,type2,arg2) \
228 static type name (type1 arg1,type2 arg2) \
229 { \
230 return syscall(__NR_##name, arg1, arg2); \
231 }
232
233 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
234 static type name (type1 arg1,type2 arg2,type3 arg3) \
235 { \
236 return syscall(__NR_##name, arg1, arg2, arg3); \
237 }
238
239 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
240 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
241 { \
242 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
243 }
244
245 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
246 type5,arg5) \
247 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
248 { \
249 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
250 }
251
252
253 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
254 type5,arg5,type6,arg6) \
255 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
256 type6 arg6) \
257 { \
258 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
259 }
260
261
262 #define __NR_sys_uname __NR_uname
263 #define __NR_sys_getcwd1 __NR_getcwd
264 #define __NR_sys_getdents __NR_getdents
265 #define __NR_sys_getdents64 __NR_getdents64
266 #define __NR_sys_getpriority __NR_getpriority
267 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
268 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
269 #define __NR_sys_syslog __NR_syslog
270 #if defined(__NR_futex)
271 # define __NR_sys_futex __NR_futex
272 #endif
273 #if defined(__NR_futex_time64)
274 # define __NR_sys_futex_time64 __NR_futex_time64
275 #endif
276 #define __NR_sys_statx __NR_statx
277
278 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
279 #define __NR__llseek __NR_lseek
280 #endif
281
282 /* Newer kernel ports have llseek() instead of _llseek() */
283 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
284 #define TARGET_NR__llseek TARGET_NR_llseek
285 #endif
286
287 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
288 #ifndef TARGET_O_NONBLOCK_MASK
289 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
290 #endif
291
292 #define __NR_sys_gettid __NR_gettid
293 _syscall0(int, sys_gettid)
294
295 /* For the 64-bit guest on 32-bit host case we must emulate
296 * getdents using getdents64, because otherwise the host
297 * might hand us back more dirent records than we can fit
298 * into the guest buffer after structure format conversion.
299 * Otherwise we emulate getdents with getdents if the host has it.
300 */
301 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
302 #define EMULATE_GETDENTS_WITH_GETDENTS
303 #endif
304
305 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
306 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
307 #endif
308 #if (defined(TARGET_NR_getdents) && \
309 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
310 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
311 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
312 #endif
313 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
314 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
315 loff_t *, res, uint, wh);
316 #endif
317 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
318 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
319 siginfo_t *, uinfo)
320 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
321 #ifdef __NR_exit_group
322 _syscall1(int,exit_group,int,error_code)
323 #endif
324 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
325 #define __NR_sys_close_range __NR_close_range
326 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
327 #ifndef CLOSE_RANGE_CLOEXEC
328 #define CLOSE_RANGE_CLOEXEC (1U << 2)
329 #endif
330 #endif
331 #if defined(__NR_futex)
332 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
333 const struct timespec *,timeout,int *,uaddr2,int,val3)
334 #endif
335 #if defined(__NR_futex_time64)
336 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
337 const struct timespec *,timeout,int *,uaddr2,int,val3)
338 #endif
339 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
340 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
341 #endif
342 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
343 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
344 unsigned int, flags);
345 #endif
346 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
347 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
348 #endif
349 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
350 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
351 unsigned long *, user_mask_ptr);
352 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
353 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
354 unsigned long *, user_mask_ptr);
355 /* sched_attr is not defined in glibc */
356 struct sched_attr {
357 uint32_t size;
358 uint32_t sched_policy;
359 uint64_t sched_flags;
360 int32_t sched_nice;
361 uint32_t sched_priority;
362 uint64_t sched_runtime;
363 uint64_t sched_deadline;
364 uint64_t sched_period;
365 uint32_t sched_util_min;
366 uint32_t sched_util_max;
367 };
368 #define __NR_sys_sched_getattr __NR_sched_getattr
369 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
370 unsigned int, size, unsigned int, flags);
371 #define __NR_sys_sched_setattr __NR_sched_setattr
372 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
373 unsigned int, flags);
374 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
375 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
376 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
377 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
378 const struct sched_param *, param);
379 #define __NR_sys_sched_getparam __NR_sched_getparam
380 _syscall2(int, sys_sched_getparam, pid_t, pid,
381 struct sched_param *, param);
382 #define __NR_sys_sched_setparam __NR_sched_setparam
383 _syscall2(int, sys_sched_setparam, pid_t, pid,
384 const struct sched_param *, param);
385 #define __NR_sys_getcpu __NR_getcpu
386 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
387 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
388 void *, arg);
389 _syscall2(int, capget, struct __user_cap_header_struct *, header,
390 struct __user_cap_data_struct *, data);
391 _syscall2(int, capset, struct __user_cap_header_struct *, header,
392 struct __user_cap_data_struct *, data);
393 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
394 _syscall2(int, ioprio_get, int, which, int, who)
395 #endif
396 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
397 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
398 #endif
399 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
400 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
401 #endif
402
403 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
404 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
405 unsigned long, idx1, unsigned long, idx2)
406 #endif
407
408 /*
409 * It is assumed that struct statx is architecture independent.
410 */
411 #if defined(TARGET_NR_statx) && defined(__NR_statx)
412 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
413 unsigned int, mask, struct target_statx *, statxbuf)
414 #endif
415 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
416 _syscall2(int, membarrier, int, cmd, int, flags)
417 #endif
418
419 static const bitmask_transtbl fcntl_flags_tbl[] = {
420 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
421 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
422 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
423 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
424 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
425 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
426 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
427 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
428 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
429 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
430 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
431 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
432 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
433 #if defined(O_DIRECT)
434 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
435 #endif
436 #if defined(O_NOATIME)
437 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
438 #endif
439 #if defined(O_CLOEXEC)
440 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
441 #endif
442 #if defined(O_PATH)
443 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
444 #endif
445 #if defined(O_TMPFILE)
446 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
447 #endif
448 /* Don't terminate the list prematurely on 64-bit host+guest. */
449 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
450 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
451 #endif
452 { 0, 0, 0, 0 }
453 };
454
455 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
456
457 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
458 #if defined(__NR_utimensat)
459 #define __NR_sys_utimensat __NR_utimensat
460 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
461 const struct timespec *,tsp,int,flags)
462 #else
463 static int sys_utimensat(int dirfd, const char *pathname,
464 const struct timespec times[2], int flags)
465 {
466 errno = ENOSYS;
467 return -1;
468 }
469 #endif
470 #endif /* TARGET_NR_utimensat */
471
472 #ifdef TARGET_NR_renameat2
473 #if defined(__NR_renameat2)
474 #define __NR_sys_renameat2 __NR_renameat2
475 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
476 const char *, new, unsigned int, flags)
477 #else
478 static int sys_renameat2(int oldfd, const char *old,
479 int newfd, const char *new, int flags)
480 {
481 if (flags == 0) {
482 return renameat(oldfd, old, newfd, new);
483 }
484 errno = ENOSYS;
485 return -1;
486 }
487 #endif
488 #endif /* TARGET_NR_renameat2 */
489
490 #ifdef CONFIG_INOTIFY
491 #include <sys/inotify.h>
492 #else
493 /* Userspace can usually survive runtime without inotify */
494 #undef TARGET_NR_inotify_init
495 #undef TARGET_NR_inotify_init1
496 #undef TARGET_NR_inotify_add_watch
497 #undef TARGET_NR_inotify_rm_watch
498 #endif /* CONFIG_INOTIFY */
499
500 #if defined(TARGET_NR_prlimit64)
501 #ifndef __NR_prlimit64
502 # define __NR_prlimit64 -1
503 #endif
504 #define __NR_sys_prlimit64 __NR_prlimit64
505 /* The glibc rlimit structure may not be that used by the underlying syscall */
506 struct host_rlimit64 {
507 uint64_t rlim_cur;
508 uint64_t rlim_max;
509 };
510 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
511 const struct host_rlimit64 *, new_limit,
512 struct host_rlimit64 *, old_limit)
513 #endif
514
515
516 #if defined(TARGET_NR_timer_create)
517 /* Maximum of 32 active POSIX timers allowed at any one time. */
518 #define GUEST_TIMER_MAX 32
519 static timer_t g_posix_timers[GUEST_TIMER_MAX];
520 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
521
522 static inline int next_free_host_timer(void)
523 {
524 int k;
525 for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
526 if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
527 return k;
528 }
529 }
530 return -1;
531 }
532
533 static inline void free_host_timer_slot(int id)
534 {
535 qatomic_store_release(g_posix_timer_allocated + id, 0);
536 }
537 #endif
538
539 static inline int host_to_target_errno(int host_errno)
540 {
541 switch (host_errno) {
542 #define E(X) case X: return TARGET_##X;
543 #include "errnos.c.inc"
544 #undef E
545 default:
546 return host_errno;
547 }
548 }
549
550 static inline int target_to_host_errno(int target_errno)
551 {
552 switch (target_errno) {
553 #define E(X) case TARGET_##X: return X;
554 #include "errnos.c.inc"
555 #undef E
556 default:
557 return target_errno;
558 }
559 }
560
561 abi_long get_errno(abi_long ret)
562 {
563 if (ret == -1)
564 return -host_to_target_errno(errno);
565 else
566 return ret;
567 }
568
569 const char *target_strerror(int err)
570 {
571 if (err == QEMU_ERESTARTSYS) {
572 return "To be restarted";
573 }
574 if (err == QEMU_ESIGRETURN) {
575 return "Successful exit from sigreturn";
576 }
577
578 return strerror(target_to_host_errno(err));
579 }
580
581 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
582 {
583 int i;
584 uint8_t b;
585 if (usize <= ksize) {
586 return 1;
587 }
588 for (i = ksize; i < usize; i++) {
589 if (get_user_u8(b, addr + i)) {
590 return -TARGET_EFAULT;
591 }
592 if (b != 0) {
593 return 0;
594 }
595 }
596 return 1;
597 }
598
599 #define safe_syscall0(type, name) \
600 static type safe_##name(void) \
601 { \
602 return safe_syscall(__NR_##name); \
603 }
604
605 #define safe_syscall1(type, name, type1, arg1) \
606 static type safe_##name(type1 arg1) \
607 { \
608 return safe_syscall(__NR_##name, arg1); \
609 }
610
611 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
612 static type safe_##name(type1 arg1, type2 arg2) \
613 { \
614 return safe_syscall(__NR_##name, arg1, arg2); \
615 }
616
617 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
618 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
619 { \
620 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
621 }
622
623 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
624 type4, arg4) \
625 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
626 { \
627 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
628 }
629
630 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
631 type4, arg4, type5, arg5) \
632 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
633 type5 arg5) \
634 { \
635 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
636 }
637
638 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
639 type4, arg4, type5, arg5, type6, arg6) \
640 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
641 type5 arg5, type6 arg6) \
642 { \
643 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
644 }
645
646 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
647 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
648 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
649 int, flags, mode_t, mode)
650 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
651 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
652 struct rusage *, rusage)
653 #endif
654 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
655 int, options, struct rusage *, rusage)
656 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
657 char **, argv, char **, envp, int, flags)
658 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
659 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
660 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
661 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
662 #endif
663 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
664 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
665 struct timespec *, tsp, const sigset_t *, sigmask,
666 size_t, sigsetsize)
667 #endif
668 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
669 int, maxevents, int, timeout, const sigset_t *, sigmask,
670 size_t, sigsetsize)
671 #if defined(__NR_futex)
672 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
673 const struct timespec *,timeout,int *,uaddr2,int,val3)
674 #endif
675 #if defined(__NR_futex_time64)
676 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
677 const struct timespec *,timeout,int *,uaddr2,int,val3)
678 #endif
679 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
680 safe_syscall2(int, kill, pid_t, pid, int, sig)
681 safe_syscall2(int, tkill, int, tid, int, sig)
682 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
683 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
684 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
685 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
686 unsigned long, pos_l, unsigned long, pos_h)
687 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
688 unsigned long, pos_l, unsigned long, pos_h)
689 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
690 socklen_t, addrlen)
691 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
692 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
693 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
694 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
695 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
696 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
697 safe_syscall2(int, flock, int, fd, int, operation)
698 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
699 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
700 const struct timespec *, uts, size_t, sigsetsize)
701 #endif
702 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
703 int, flags)
704 #if defined(TARGET_NR_nanosleep)
705 safe_syscall2(int, nanosleep, const struct timespec *, req,
706 struct timespec *, rem)
707 #endif
708 #if defined(TARGET_NR_clock_nanosleep) || \
709 defined(TARGET_NR_clock_nanosleep_time64)
710 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
711 const struct timespec *, req, struct timespec *, rem)
712 #endif
713 #ifdef __NR_ipc
714 #ifdef __s390x__
715 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
716 void *, ptr)
717 #else
718 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
719 void *, ptr, long, fifth)
720 #endif
721 #endif
722 #ifdef __NR_msgsnd
723 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
724 int, flags)
725 #endif
726 #ifdef __NR_msgrcv
727 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
728 long, msgtype, int, flags)
729 #endif
730 #ifdef __NR_semtimedop
731 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
732 unsigned, nsops, const struct timespec *, timeout)
733 #endif
734 #if defined(TARGET_NR_mq_timedsend) || \
735 defined(TARGET_NR_mq_timedsend_time64)
736 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
737 size_t, len, unsigned, prio, const struct timespec *, timeout)
738 #endif
739 #if defined(TARGET_NR_mq_timedreceive) || \
740 defined(TARGET_NR_mq_timedreceive_time64)
741 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
742 size_t, len, unsigned *, prio, const struct timespec *, timeout)
743 #endif
744 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
745 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
746 int, outfd, loff_t *, poutoff, size_t, length,
747 unsigned int, flags)
748 #endif
749
750 /* We do ioctl like this rather than via safe_syscall3 to preserve the
751 * "third argument might be integer or pointer or not present" behaviour of
752 * the libc function.
753 */
754 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
755 /* Similarly for fcntl. Note that callers must always:
756 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
757 * use the flock64 struct rather than unsuffixed flock
758 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
759 */
760 #ifdef __NR_fcntl64
761 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
762 #else
763 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
764 #endif
765
766 static inline int host_to_target_sock_type(int host_type)
767 {
768 int target_type;
769
770 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
771 case SOCK_DGRAM:
772 target_type = TARGET_SOCK_DGRAM;
773 break;
774 case SOCK_STREAM:
775 target_type = TARGET_SOCK_STREAM;
776 break;
777 default:
778 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
779 break;
780 }
781
782 #if defined(SOCK_CLOEXEC)
783 if (host_type & SOCK_CLOEXEC) {
784 target_type |= TARGET_SOCK_CLOEXEC;
785 }
786 #endif
787
788 #if defined(SOCK_NONBLOCK)
789 if (host_type & SOCK_NONBLOCK) {
790 target_type |= TARGET_SOCK_NONBLOCK;
791 }
792 #endif
793
794 return target_type;
795 }
796
797 static abi_ulong target_brk;
798 static abi_ulong target_original_brk;
799 static abi_ulong brk_page;
800
801 void target_set_brk(abi_ulong new_brk)
802 {
803 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
804 brk_page = HOST_PAGE_ALIGN(target_brk);
805 }
806
807 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
808 #define DEBUGF_BRK(message, args...)
809
810 /* do_brk() must return target values and target errnos. */
811 abi_long do_brk(abi_ulong new_brk)
812 {
813 abi_long mapped_addr;
814 abi_ulong new_alloc_size;
815
816 /* brk pointers are always untagged */
817
818 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
819
820 if (!new_brk) {
821 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
822 return target_brk;
823 }
824 if (new_brk < target_original_brk) {
825 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
826 target_brk);
827 return target_brk;
828 }
829
830 /* If the new brk is less than the highest page reserved to the
831 * target heap allocation, set it and we're almost done... */
832 if (new_brk <= brk_page) {
833 /* Heap contents are initialized to zero, as for anonymous
834 * mapped pages. */
835 if (new_brk > target_brk) {
836 memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
837 }
838 target_brk = new_brk;
839 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
840 return target_brk;
841 }
842
843 /* We need to allocate more memory after the brk... Note that
844 * we don't use MAP_FIXED because that will map over the top of
845 * any existing mapping (like the one with the host libc or qemu
846 * itself); instead we treat "mapped but at wrong address" as
847 * a failure and unmap again.
848 */
849 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
850 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
851 PROT_READ|PROT_WRITE,
852 MAP_ANON|MAP_PRIVATE, 0, 0));
853
854 if (mapped_addr == brk_page) {
855 /* Heap contents are initialized to zero, as for anonymous
856 * mapped pages. Technically the new pages are already
857 * initialized to zero since they *are* anonymous mapped
858 * pages, however we have to take care with the contents that
859 * come from the remaining part of the previous page: it may
860 * contains garbage data due to a previous heap usage (grown
861 * then shrunken). */
862 memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
863
864 target_brk = new_brk;
865 brk_page = HOST_PAGE_ALIGN(target_brk);
866 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
867 target_brk);
868 return target_brk;
869 } else if (mapped_addr != -1) {
870 /* Mapped but at wrong address, meaning there wasn't actually
871 * enough space for this brk.
872 */
873 target_munmap(mapped_addr, new_alloc_size);
874 mapped_addr = -1;
875 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
876 }
877 else {
878 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
879 }
880
881 #if defined(TARGET_ALPHA)
882 /* We (partially) emulate OSF/1 on Alpha, which requires we
883 return a proper errno, not an unchanged brk value. */
884 return -TARGET_ENOMEM;
885 #endif
886 /* For everything else, return the previous break. */
887 return target_brk;
888 }
889
890 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
891 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
892 static inline abi_long copy_from_user_fdset(fd_set *fds,
893 abi_ulong target_fds_addr,
894 int n)
895 {
896 int i, nw, j, k;
897 abi_ulong b, *target_fds;
898
899 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
900 if (!(target_fds = lock_user(VERIFY_READ,
901 target_fds_addr,
902 sizeof(abi_ulong) * nw,
903 1)))
904 return -TARGET_EFAULT;
905
906 FD_ZERO(fds);
907 k = 0;
908 for (i = 0; i < nw; i++) {
909 /* grab the abi_ulong */
910 __get_user(b, &target_fds[i]);
911 for (j = 0; j < TARGET_ABI_BITS; j++) {
912 /* check the bit inside the abi_ulong */
913 if ((b >> j) & 1)
914 FD_SET(k, fds);
915 k++;
916 }
917 }
918
919 unlock_user(target_fds, target_fds_addr, 0);
920
921 return 0;
922 }
923
924 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
925 abi_ulong target_fds_addr,
926 int n)
927 {
928 if (target_fds_addr) {
929 if (copy_from_user_fdset(fds, target_fds_addr, n))
930 return -TARGET_EFAULT;
931 *fds_ptr = fds;
932 } else {
933 *fds_ptr = NULL;
934 }
935 return 0;
936 }
937
938 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
939 const fd_set *fds,
940 int n)
941 {
942 int i, nw, j, k;
943 abi_long v;
944 abi_ulong *target_fds;
945
946 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
947 if (!(target_fds = lock_user(VERIFY_WRITE,
948 target_fds_addr,
949 sizeof(abi_ulong) * nw,
950 0)))
951 return -TARGET_EFAULT;
952
953 k = 0;
954 for (i = 0; i < nw; i++) {
955 v = 0;
956 for (j = 0; j < TARGET_ABI_BITS; j++) {
957 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
958 k++;
959 }
960 __put_user(v, &target_fds[i]);
961 }
962
963 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
964
965 return 0;
966 }
967 #endif
968
969 #if defined(__alpha__)
970 #define HOST_HZ 1024
971 #else
972 #define HOST_HZ 100
973 #endif
974
975 static inline abi_long host_to_target_clock_t(long ticks)
976 {
977 #if HOST_HZ == TARGET_HZ
978 return ticks;
979 #else
980 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
981 #endif
982 }
983
984 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
985 const struct rusage *rusage)
986 {
987 struct target_rusage *target_rusage;
988
989 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
990 return -TARGET_EFAULT;
991 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
992 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
993 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
994 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
995 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
996 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
997 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
998 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
999 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1000 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1001 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1002 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1003 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1004 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1005 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1006 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1007 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1008 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1009 unlock_user_struct(target_rusage, target_addr, 1);
1010
1011 return 0;
1012 }
1013
1014 #ifdef TARGET_NR_setrlimit
1015 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1016 {
1017 abi_ulong target_rlim_swap;
1018 rlim_t result;
1019
1020 target_rlim_swap = tswapal(target_rlim);
1021 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1022 return RLIM_INFINITY;
1023
1024 result = target_rlim_swap;
1025 if (target_rlim_swap != (rlim_t)result)
1026 return RLIM_INFINITY;
1027
1028 return result;
1029 }
1030 #endif
1031
1032 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1033 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1034 {
1035 abi_ulong target_rlim_swap;
1036 abi_ulong result;
1037
1038 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1039 target_rlim_swap = TARGET_RLIM_INFINITY;
1040 else
1041 target_rlim_swap = rlim;
1042 result = tswapal(target_rlim_swap);
1043
1044 return result;
1045 }
1046 #endif
1047
1048 static inline int target_to_host_resource(int code)
1049 {
1050 switch (code) {
1051 case TARGET_RLIMIT_AS:
1052 return RLIMIT_AS;
1053 case TARGET_RLIMIT_CORE:
1054 return RLIMIT_CORE;
1055 case TARGET_RLIMIT_CPU:
1056 return RLIMIT_CPU;
1057 case TARGET_RLIMIT_DATA:
1058 return RLIMIT_DATA;
1059 case TARGET_RLIMIT_FSIZE:
1060 return RLIMIT_FSIZE;
1061 case TARGET_RLIMIT_LOCKS:
1062 return RLIMIT_LOCKS;
1063 case TARGET_RLIMIT_MEMLOCK:
1064 return RLIMIT_MEMLOCK;
1065 case TARGET_RLIMIT_MSGQUEUE:
1066 return RLIMIT_MSGQUEUE;
1067 case TARGET_RLIMIT_NICE:
1068 return RLIMIT_NICE;
1069 case TARGET_RLIMIT_NOFILE:
1070 return RLIMIT_NOFILE;
1071 case TARGET_RLIMIT_NPROC:
1072 return RLIMIT_NPROC;
1073 case TARGET_RLIMIT_RSS:
1074 return RLIMIT_RSS;
1075 case TARGET_RLIMIT_RTPRIO:
1076 return RLIMIT_RTPRIO;
1077 #ifdef RLIMIT_RTTIME
1078 case TARGET_RLIMIT_RTTIME:
1079 return RLIMIT_RTTIME;
1080 #endif
1081 case TARGET_RLIMIT_SIGPENDING:
1082 return RLIMIT_SIGPENDING;
1083 case TARGET_RLIMIT_STACK:
1084 return RLIMIT_STACK;
1085 default:
1086 return code;
1087 }
1088 }
1089
1090 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1091 abi_ulong target_tv_addr)
1092 {
1093 struct target_timeval *target_tv;
1094
1095 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1096 return -TARGET_EFAULT;
1097 }
1098
1099 __get_user(tv->tv_sec, &target_tv->tv_sec);
1100 __get_user(tv->tv_usec, &target_tv->tv_usec);
1101
1102 unlock_user_struct(target_tv, target_tv_addr, 0);
1103
1104 return 0;
1105 }
1106
1107 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1108 const struct timeval *tv)
1109 {
1110 struct target_timeval *target_tv;
1111
1112 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1113 return -TARGET_EFAULT;
1114 }
1115
1116 __put_user(tv->tv_sec, &target_tv->tv_sec);
1117 __put_user(tv->tv_usec, &target_tv->tv_usec);
1118
1119 unlock_user_struct(target_tv, target_tv_addr, 1);
1120
1121 return 0;
1122 }
1123
1124 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1125 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1126 abi_ulong target_tv_addr)
1127 {
1128 struct target__kernel_sock_timeval *target_tv;
1129
1130 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1131 return -TARGET_EFAULT;
1132 }
1133
1134 __get_user(tv->tv_sec, &target_tv->tv_sec);
1135 __get_user(tv->tv_usec, &target_tv->tv_usec);
1136
1137 unlock_user_struct(target_tv, target_tv_addr, 0);
1138
1139 return 0;
1140 }
1141 #endif
1142
1143 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1144 const struct timeval *tv)
1145 {
1146 struct target__kernel_sock_timeval *target_tv;
1147
1148 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1149 return -TARGET_EFAULT;
1150 }
1151
1152 __put_user(tv->tv_sec, &target_tv->tv_sec);
1153 __put_user(tv->tv_usec, &target_tv->tv_usec);
1154
1155 unlock_user_struct(target_tv, target_tv_addr, 1);
1156
1157 return 0;
1158 }
1159
1160 #if defined(TARGET_NR_futex) || \
1161 defined(TARGET_NR_rt_sigtimedwait) || \
1162 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1163 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1164 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1165 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1166 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1167 defined(TARGET_NR_timer_settime) || \
1168 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1169 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1170 abi_ulong target_addr)
1171 {
1172 struct target_timespec *target_ts;
1173
1174 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1175 return -TARGET_EFAULT;
1176 }
1177 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1178 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1179 unlock_user_struct(target_ts, target_addr, 0);
1180 return 0;
1181 }
1182 #endif
1183
1184 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1185 defined(TARGET_NR_timer_settime64) || \
1186 defined(TARGET_NR_mq_timedsend_time64) || \
1187 defined(TARGET_NR_mq_timedreceive_time64) || \
1188 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1189 defined(TARGET_NR_clock_nanosleep_time64) || \
1190 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1191 defined(TARGET_NR_utimensat) || \
1192 defined(TARGET_NR_utimensat_time64) || \
1193 defined(TARGET_NR_semtimedop_time64) || \
1194 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1195 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1196 abi_ulong target_addr)
1197 {
1198 struct target__kernel_timespec *target_ts;
1199
1200 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1201 return -TARGET_EFAULT;
1202 }
1203 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1204 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1205 /* in 32bit mode, this drops the padding */
1206 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1207 unlock_user_struct(target_ts, target_addr, 0);
1208 return 0;
1209 }
1210 #endif
1211
1212 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1213 struct timespec *host_ts)
1214 {
1215 struct target_timespec *target_ts;
1216
1217 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1218 return -TARGET_EFAULT;
1219 }
1220 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1221 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1222 unlock_user_struct(target_ts, target_addr, 1);
1223 return 0;
1224 }
1225
1226 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1227 struct timespec *host_ts)
1228 {
1229 struct target__kernel_timespec *target_ts;
1230
1231 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1232 return -TARGET_EFAULT;
1233 }
1234 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1235 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1236 unlock_user_struct(target_ts, target_addr, 1);
1237 return 0;
1238 }
1239
1240 #if defined(TARGET_NR_gettimeofday)
1241 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1242 struct timezone *tz)
1243 {
1244 struct target_timezone *target_tz;
1245
1246 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1247 return -TARGET_EFAULT;
1248 }
1249
1250 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1251 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1252
1253 unlock_user_struct(target_tz, target_tz_addr, 1);
1254
1255 return 0;
1256 }
1257 #endif
1258
1259 #if defined(TARGET_NR_settimeofday)
1260 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1261 abi_ulong target_tz_addr)
1262 {
1263 struct target_timezone *target_tz;
1264
1265 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1266 return -TARGET_EFAULT;
1267 }
1268
1269 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1270 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1271
1272 unlock_user_struct(target_tz, target_tz_addr, 0);
1273
1274 return 0;
1275 }
1276 #endif
1277
1278 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1279 #include <mqueue.h>
1280
1281 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1282 abi_ulong target_mq_attr_addr)
1283 {
1284 struct target_mq_attr *target_mq_attr;
1285
1286 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1287 target_mq_attr_addr, 1))
1288 return -TARGET_EFAULT;
1289
1290 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1291 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1292 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1293 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1294
1295 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1296
1297 return 0;
1298 }
1299
1300 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1301 const struct mq_attr *attr)
1302 {
1303 struct target_mq_attr *target_mq_attr;
1304
1305 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1306 target_mq_attr_addr, 0))
1307 return -TARGET_EFAULT;
1308
1309 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1310 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1311 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1312 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1313
1314 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1315
1316 return 0;
1317 }
1318 #endif
1319
1320 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1321 /* do_select() must return target values and target errnos. */
1322 static abi_long do_select(int n,
1323 abi_ulong rfd_addr, abi_ulong wfd_addr,
1324 abi_ulong efd_addr, abi_ulong target_tv_addr)
1325 {
1326 fd_set rfds, wfds, efds;
1327 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1328 struct timeval tv;
1329 struct timespec ts, *ts_ptr;
1330 abi_long ret;
1331
1332 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1333 if (ret) {
1334 return ret;
1335 }
1336 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1337 if (ret) {
1338 return ret;
1339 }
1340 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1341 if (ret) {
1342 return ret;
1343 }
1344
1345 if (target_tv_addr) {
1346 if (copy_from_user_timeval(&tv, target_tv_addr))
1347 return -TARGET_EFAULT;
1348 ts.tv_sec = tv.tv_sec;
1349 ts.tv_nsec = tv.tv_usec * 1000;
1350 ts_ptr = &ts;
1351 } else {
1352 ts_ptr = NULL;
1353 }
1354
1355 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1356 ts_ptr, NULL));
1357
1358 if (!is_error(ret)) {
1359 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1360 return -TARGET_EFAULT;
1361 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1362 return -TARGET_EFAULT;
1363 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1364 return -TARGET_EFAULT;
1365
1366 if (target_tv_addr) {
1367 tv.tv_sec = ts.tv_sec;
1368 tv.tv_usec = ts.tv_nsec / 1000;
1369 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1370 return -TARGET_EFAULT;
1371 }
1372 }
1373 }
1374
1375 return ret;
1376 }
1377
1378 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1379 static abi_long do_old_select(abi_ulong arg1)
1380 {
1381 struct target_sel_arg_struct *sel;
1382 abi_ulong inp, outp, exp, tvp;
1383 long nsel;
1384
1385 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1386 return -TARGET_EFAULT;
1387 }
1388
1389 nsel = tswapal(sel->n);
1390 inp = tswapal(sel->inp);
1391 outp = tswapal(sel->outp);
1392 exp = tswapal(sel->exp);
1393 tvp = tswapal(sel->tvp);
1394
1395 unlock_user_struct(sel, arg1, 0);
1396
1397 return do_select(nsel, inp, outp, exp, tvp);
1398 }
1399 #endif
1400 #endif
1401
1402 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1403 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1404 abi_long arg4, abi_long arg5, abi_long arg6,
1405 bool time64)
1406 {
1407 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1408 fd_set rfds, wfds, efds;
1409 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1410 struct timespec ts, *ts_ptr;
1411 abi_long ret;
1412
1413 /*
1414 * The 6th arg is actually two args smashed together,
1415 * so we cannot use the C library.
1416 */
1417 struct {
1418 sigset_t *set;
1419 size_t size;
1420 } sig, *sig_ptr;
1421
1422 abi_ulong arg_sigset, arg_sigsize, *arg7;
1423
1424 n = arg1;
1425 rfd_addr = arg2;
1426 wfd_addr = arg3;
1427 efd_addr = arg4;
1428 ts_addr = arg5;
1429
1430 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1431 if (ret) {
1432 return ret;
1433 }
1434 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1435 if (ret) {
1436 return ret;
1437 }
1438 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1439 if (ret) {
1440 return ret;
1441 }
1442
1443 /*
1444 * This takes a timespec, and not a timeval, so we cannot
1445 * use the do_select() helper ...
1446 */
1447 if (ts_addr) {
1448 if (time64) {
1449 if (target_to_host_timespec64(&ts, ts_addr)) {
1450 return -TARGET_EFAULT;
1451 }
1452 } else {
1453 if (target_to_host_timespec(&ts, ts_addr)) {
1454 return -TARGET_EFAULT;
1455 }
1456 }
1457 ts_ptr = &ts;
1458 } else {
1459 ts_ptr = NULL;
1460 }
1461
1462 /* Extract the two packed args for the sigset */
1463 sig_ptr = NULL;
1464 if (arg6) {
1465 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1466 if (!arg7) {
1467 return -TARGET_EFAULT;
1468 }
1469 arg_sigset = tswapal(arg7[0]);
1470 arg_sigsize = tswapal(arg7[1]);
1471 unlock_user(arg7, arg6, 0);
1472
1473 if (arg_sigset) {
1474 ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1475 if (ret != 0) {
1476 return ret;
1477 }
1478 sig_ptr = &sig;
1479 sig.size = SIGSET_T_SIZE;
1480 }
1481 }
1482
1483 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1484 ts_ptr, sig_ptr));
1485
1486 if (sig_ptr) {
1487 finish_sigsuspend_mask(ret);
1488 }
1489
1490 if (!is_error(ret)) {
1491 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1492 return -TARGET_EFAULT;
1493 }
1494 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1495 return -TARGET_EFAULT;
1496 }
1497 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1498 return -TARGET_EFAULT;
1499 }
1500 if (time64) {
1501 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1502 return -TARGET_EFAULT;
1503 }
1504 } else {
1505 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1506 return -TARGET_EFAULT;
1507 }
1508 }
1509 }
1510 return ret;
1511 }
1512 #endif
1513
1514 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1515 defined(TARGET_NR_ppoll_time64)
1516 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1517 abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1518 {
1519 struct target_pollfd *target_pfd;
1520 unsigned int nfds = arg2;
1521 struct pollfd *pfd;
1522 unsigned int i;
1523 abi_long ret;
1524
1525 pfd = NULL;
1526 target_pfd = NULL;
1527 if (nfds) {
1528 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1529 return -TARGET_EINVAL;
1530 }
1531 target_pfd = lock_user(VERIFY_WRITE, arg1,
1532 sizeof(struct target_pollfd) * nfds, 1);
1533 if (!target_pfd) {
1534 return -TARGET_EFAULT;
1535 }
1536
1537 pfd = alloca(sizeof(struct pollfd) * nfds);
1538 for (i = 0; i < nfds; i++) {
1539 pfd[i].fd = tswap32(target_pfd[i].fd);
1540 pfd[i].events = tswap16(target_pfd[i].events);
1541 }
1542 }
1543 if (ppoll) {
1544 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1545 sigset_t *set = NULL;
1546
1547 if (arg3) {
1548 if (time64) {
1549 if (target_to_host_timespec64(timeout_ts, arg3)) {
1550 unlock_user(target_pfd, arg1, 0);
1551 return -TARGET_EFAULT;
1552 }
1553 } else {
1554 if (target_to_host_timespec(timeout_ts, arg3)) {
1555 unlock_user(target_pfd, arg1, 0);
1556 return -TARGET_EFAULT;
1557 }
1558 }
1559 } else {
1560 timeout_ts = NULL;
1561 }
1562
1563 if (arg4) {
1564 ret = process_sigsuspend_mask(&set, arg4, arg5);
1565 if (ret != 0) {
1566 unlock_user(target_pfd, arg1, 0);
1567 return ret;
1568 }
1569 }
1570
1571 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1572 set, SIGSET_T_SIZE));
1573
1574 if (set) {
1575 finish_sigsuspend_mask(ret);
1576 }
1577 if (!is_error(ret) && arg3) {
1578 if (time64) {
1579 if (host_to_target_timespec64(arg3, timeout_ts)) {
1580 return -TARGET_EFAULT;
1581 }
1582 } else {
1583 if (host_to_target_timespec(arg3, timeout_ts)) {
1584 return -TARGET_EFAULT;
1585 }
1586 }
1587 }
1588 } else {
1589 struct timespec ts, *pts;
1590
1591 if (arg3 >= 0) {
1592 /* Convert ms to secs, ns */
1593 ts.tv_sec = arg3 / 1000;
1594 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1595 pts = &ts;
1596 } else {
1597 /* -ve poll() timeout means "infinite" */
1598 pts = NULL;
1599 }
1600 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1601 }
1602
1603 if (!is_error(ret)) {
1604 for (i = 0; i < nfds; i++) {
1605 target_pfd[i].revents = tswap16(pfd[i].revents);
1606 }
1607 }
1608 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1609 return ret;
1610 }
1611 #endif
1612
1613 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1614 int flags, int is_pipe2)
1615 {
1616 int host_pipe[2];
1617 abi_long ret;
1618 ret = pipe2(host_pipe, flags);
1619
1620 if (is_error(ret))
1621 return get_errno(ret);
1622
1623 /* Several targets have special calling conventions for the original
1624 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1625 if (!is_pipe2) {
1626 #if defined(TARGET_ALPHA)
1627 cpu_env->ir[IR_A4] = host_pipe[1];
1628 return host_pipe[0];
1629 #elif defined(TARGET_MIPS)
1630 cpu_env->active_tc.gpr[3] = host_pipe[1];
1631 return host_pipe[0];
1632 #elif defined(TARGET_SH4)
1633 cpu_env->gregs[1] = host_pipe[1];
1634 return host_pipe[0];
1635 #elif defined(TARGET_SPARC)
1636 cpu_env->regwptr[1] = host_pipe[1];
1637 return host_pipe[0];
1638 #endif
1639 }
1640
1641 if (put_user_s32(host_pipe[0], pipedes)
1642 || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1643 return -TARGET_EFAULT;
1644 return get_errno(ret);
1645 }
1646
1647 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1648 abi_ulong target_addr,
1649 socklen_t len)
1650 {
1651 struct target_ip_mreqn *target_smreqn;
1652
1653 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1654 if (!target_smreqn)
1655 return -TARGET_EFAULT;
1656 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1657 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1658 if (len == sizeof(struct target_ip_mreqn))
1659 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1660 unlock_user(target_smreqn, target_addr, 0);
1661
1662 return 0;
1663 }
1664
1665 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1666 abi_ulong target_addr,
1667 socklen_t len)
1668 {
1669 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1670 sa_family_t sa_family;
1671 struct target_sockaddr *target_saddr;
1672
1673 if (fd_trans_target_to_host_addr(fd)) {
1674 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1675 }
1676
1677 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1678 if (!target_saddr)
1679 return -TARGET_EFAULT;
1680
1681 sa_family = tswap16(target_saddr->sa_family);
1682
1683 /* Oops. The caller might send a incomplete sun_path; sun_path
1684 * must be terminated by \0 (see the manual page), but
1685 * unfortunately it is quite common to specify sockaddr_un
1686 * length as "strlen(x->sun_path)" while it should be
1687 * "strlen(...) + 1". We'll fix that here if needed.
1688 * Linux kernel has a similar feature.
1689 */
1690
1691 if (sa_family == AF_UNIX) {
1692 if (len < unix_maxlen && len > 0) {
1693 char *cp = (char*)target_saddr;
1694
1695 if ( cp[len-1] && !cp[len] )
1696 len++;
1697 }
1698 if (len > unix_maxlen)
1699 len = unix_maxlen;
1700 }
1701
1702 memcpy(addr, target_saddr, len);
1703 addr->sa_family = sa_family;
1704 if (sa_family == AF_NETLINK) {
1705 struct sockaddr_nl *nladdr;
1706
1707 nladdr = (struct sockaddr_nl *)addr;
1708 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1709 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1710 } else if (sa_family == AF_PACKET) {
1711 struct target_sockaddr_ll *lladdr;
1712
1713 lladdr = (struct target_sockaddr_ll *)addr;
1714 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1715 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1716 }
1717 unlock_user(target_saddr, target_addr, 0);
1718
1719 return 0;
1720 }
1721
1722 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1723 struct sockaddr *addr,
1724 socklen_t len)
1725 {
1726 struct target_sockaddr *target_saddr;
1727
1728 if (len == 0) {
1729 return 0;
1730 }
1731 assert(addr);
1732
1733 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1734 if (!target_saddr)
1735 return -TARGET_EFAULT;
1736 memcpy(target_saddr, addr, len);
1737 if (len >= offsetof(struct target_sockaddr, sa_family) +
1738 sizeof(target_saddr->sa_family)) {
1739 target_saddr->sa_family = tswap16(addr->sa_family);
1740 }
1741 if (addr->sa_family == AF_NETLINK &&
1742 len >= sizeof(struct target_sockaddr_nl)) {
1743 struct target_sockaddr_nl *target_nl =
1744 (struct target_sockaddr_nl *)target_saddr;
1745 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1746 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1747 } else if (addr->sa_family == AF_PACKET) {
1748 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1749 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1750 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1751 } else if (addr->sa_family == AF_INET6 &&
1752 len >= sizeof(struct target_sockaddr_in6)) {
1753 struct target_sockaddr_in6 *target_in6 =
1754 (struct target_sockaddr_in6 *)target_saddr;
1755 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1756 }
1757 unlock_user(target_saddr, target_addr, len);
1758
1759 return 0;
1760 }
1761
1762 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1763 struct target_msghdr *target_msgh)
1764 {
1765 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1766 abi_long msg_controllen;
1767 abi_ulong target_cmsg_addr;
1768 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1769 socklen_t space = 0;
1770
1771 msg_controllen = tswapal(target_msgh->msg_controllen);
1772 if (msg_controllen < sizeof (struct target_cmsghdr))
1773 goto the_end;
1774 target_cmsg_addr = tswapal(target_msgh->msg_control);
1775 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1776 target_cmsg_start = target_cmsg;
1777 if (!target_cmsg)
1778 return -TARGET_EFAULT;
1779
1780 while (cmsg && target_cmsg) {
1781 void *data = CMSG_DATA(cmsg);
1782 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1783
1784 int len = tswapal(target_cmsg->cmsg_len)
1785 - sizeof(struct target_cmsghdr);
1786
1787 space += CMSG_SPACE(len);
1788 if (space > msgh->msg_controllen) {
1789 space -= CMSG_SPACE(len);
1790 /* This is a QEMU bug, since we allocated the payload
1791 * area ourselves (unlike overflow in host-to-target
1792 * conversion, which is just the guest giving us a buffer
1793 * that's too small). It can't happen for the payload types
1794 * we currently support; if it becomes an issue in future
1795 * we would need to improve our allocation strategy to
1796 * something more intelligent than "twice the size of the
1797 * target buffer we're reading from".
1798 */
1799 qemu_log_mask(LOG_UNIMP,
1800 ("Unsupported ancillary data %d/%d: "
1801 "unhandled msg size\n"),
1802 tswap32(target_cmsg->cmsg_level),
1803 tswap32(target_cmsg->cmsg_type));
1804 break;
1805 }
1806
1807 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1808 cmsg->cmsg_level = SOL_SOCKET;
1809 } else {
1810 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1811 }
1812 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1813 cmsg->cmsg_len = CMSG_LEN(len);
1814
1815 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1816 int *fd = (int *)data;
1817 int *target_fd = (int *)target_data;
1818 int i, numfds = len / sizeof(int);
1819
1820 for (i = 0; i < numfds; i++) {
1821 __get_user(fd[i], target_fd + i);
1822 }
1823 } else if (cmsg->cmsg_level == SOL_SOCKET
1824 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1825 struct ucred *cred = (struct ucred *)data;
1826 struct target_ucred *target_cred =
1827 (struct target_ucred *)target_data;
1828
1829 __get_user(cred->pid, &target_cred->pid);
1830 __get_user(cred->uid, &target_cred->uid);
1831 __get_user(cred->gid, &target_cred->gid);
1832 } else if (cmsg->cmsg_level == SOL_ALG) {
1833 uint32_t *dst = (uint32_t *)data;
1834
1835 memcpy(dst, target_data, len);
1836 /* fix endianess of first 32-bit word */
1837 if (len >= sizeof(uint32_t)) {
1838 *dst = tswap32(*dst);
1839 }
1840 } else {
1841 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1842 cmsg->cmsg_level, cmsg->cmsg_type);
1843 memcpy(data, target_data, len);
1844 }
1845
1846 cmsg = CMSG_NXTHDR(msgh, cmsg);
1847 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1848 target_cmsg_start);
1849 }
1850 unlock_user(target_cmsg, target_cmsg_addr, 0);
1851 the_end:
1852 msgh->msg_controllen = space;
1853 return 0;
1854 }
1855
1856 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1857 struct msghdr *msgh)
1858 {
1859 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1860 abi_long msg_controllen;
1861 abi_ulong target_cmsg_addr;
1862 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1863 socklen_t space = 0;
1864
1865 msg_controllen = tswapal(target_msgh->msg_controllen);
1866 if (msg_controllen < sizeof (struct target_cmsghdr))
1867 goto the_end;
1868 target_cmsg_addr = tswapal(target_msgh->msg_control);
1869 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1870 target_cmsg_start = target_cmsg;
1871 if (!target_cmsg)
1872 return -TARGET_EFAULT;
1873
1874 while (cmsg && target_cmsg) {
1875 void *data = CMSG_DATA(cmsg);
1876 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1877
1878 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1879 int tgt_len, tgt_space;
1880
1881 /* We never copy a half-header but may copy half-data;
1882 * this is Linux's behaviour in put_cmsg(). Note that
1883 * truncation here is a guest problem (which we report
1884 * to the guest via the CTRUNC bit), unlike truncation
1885 * in target_to_host_cmsg, which is a QEMU bug.
1886 */
1887 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1888 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1889 break;
1890 }
1891
1892 if (cmsg->cmsg_level == SOL_SOCKET) {
1893 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1894 } else {
1895 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1896 }
1897 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1898
1899 /* Payload types which need a different size of payload on
1900 * the target must adjust tgt_len here.
1901 */
1902 tgt_len = len;
1903 switch (cmsg->cmsg_level) {
1904 case SOL_SOCKET:
1905 switch (cmsg->cmsg_type) {
1906 case SO_TIMESTAMP:
1907 tgt_len = sizeof(struct target_timeval);
1908 break;
1909 default:
1910 break;
1911 }
1912 break;
1913 default:
1914 break;
1915 }
1916
1917 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1918 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1919 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1920 }
1921
1922 /* We must now copy-and-convert len bytes of payload
1923 * into tgt_len bytes of destination space. Bear in mind
1924 * that in both source and destination we may be dealing
1925 * with a truncated value!
1926 */
1927 switch (cmsg->cmsg_level) {
1928 case SOL_SOCKET:
1929 switch (cmsg->cmsg_type) {
1930 case SCM_RIGHTS:
1931 {
1932 int *fd = (int *)data;
1933 int *target_fd = (int *)target_data;
1934 int i, numfds = tgt_len / sizeof(int);
1935
1936 for (i = 0; i < numfds; i++) {
1937 __put_user(fd[i], target_fd + i);
1938 }
1939 break;
1940 }
1941 case SO_TIMESTAMP:
1942 {
1943 struct timeval *tv = (struct timeval *)data;
1944 struct target_timeval *target_tv =
1945 (struct target_timeval *)target_data;
1946
1947 if (len != sizeof(struct timeval) ||
1948 tgt_len != sizeof(struct target_timeval)) {
1949 goto unimplemented;
1950 }
1951
1952 /* copy struct timeval to target */
1953 __put_user(tv->tv_sec, &target_tv->tv_sec);
1954 __put_user(tv->tv_usec, &target_tv->tv_usec);
1955 break;
1956 }
1957 case SCM_CREDENTIALS:
1958 {
1959 struct ucred *cred = (struct ucred *)data;
1960 struct target_ucred *target_cred =
1961 (struct target_ucred *)target_data;
1962
1963 __put_user(cred->pid, &target_cred->pid);
1964 __put_user(cred->uid, &target_cred->uid);
1965 __put_user(cred->gid, &target_cred->gid);
1966 break;
1967 }
1968 default:
1969 goto unimplemented;
1970 }
1971 break;
1972
1973 case SOL_IP:
1974 switch (cmsg->cmsg_type) {
1975 case IP_TTL:
1976 {
1977 uint32_t *v = (uint32_t *)data;
1978 uint32_t *t_int = (uint32_t *)target_data;
1979
1980 if (len != sizeof(uint32_t) ||
1981 tgt_len != sizeof(uint32_t)) {
1982 goto unimplemented;
1983 }
1984 __put_user(*v, t_int);
1985 break;
1986 }
1987 case IP_RECVERR:
1988 {
1989 struct errhdr_t {
1990 struct sock_extended_err ee;
1991 struct sockaddr_in offender;
1992 };
1993 struct errhdr_t *errh = (struct errhdr_t *)data;
1994 struct errhdr_t *target_errh =
1995 (struct errhdr_t *)target_data;
1996
1997 if (len != sizeof(struct errhdr_t) ||
1998 tgt_len != sizeof(struct errhdr_t)) {
1999 goto unimplemented;
2000 }
2001 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2002 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2003 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2004 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2005 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2006 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2007 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2008 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2009 (void *) &errh->offender, sizeof(errh->offender));
2010 break;
2011 }
2012 default:
2013 goto unimplemented;
2014 }
2015 break;
2016
2017 case SOL_IPV6:
2018 switch (cmsg->cmsg_type) {
2019 case IPV6_HOPLIMIT:
2020 {
2021 uint32_t *v = (uint32_t *)data;
2022 uint32_t *t_int = (uint32_t *)target_data;
2023
2024 if (len != sizeof(uint32_t) ||
2025 tgt_len != sizeof(uint32_t)) {
2026 goto unimplemented;
2027 }
2028 __put_user(*v, t_int);
2029 break;
2030 }
2031 case IPV6_RECVERR:
2032 {
2033 struct errhdr6_t {
2034 struct sock_extended_err ee;
2035 struct sockaddr_in6 offender;
2036 };
2037 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2038 struct errhdr6_t *target_errh =
2039 (struct errhdr6_t *)target_data;
2040
2041 if (len != sizeof(struct errhdr6_t) ||
2042 tgt_len != sizeof(struct errhdr6_t)) {
2043 goto unimplemented;
2044 }
2045 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2046 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2047 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2048 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2049 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2050 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2051 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2052 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2053 (void *) &errh->offender, sizeof(errh->offender));
2054 break;
2055 }
2056 default:
2057 goto unimplemented;
2058 }
2059 break;
2060
2061 default:
2062 unimplemented:
2063 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2064 cmsg->cmsg_level, cmsg->cmsg_type);
2065 memcpy(target_data, data, MIN(len, tgt_len));
2066 if (tgt_len > len) {
2067 memset(target_data + len, 0, tgt_len - len);
2068 }
2069 }
2070
2071 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2072 tgt_space = TARGET_CMSG_SPACE(tgt_len);
2073 if (msg_controllen < tgt_space) {
2074 tgt_space = msg_controllen;
2075 }
2076 msg_controllen -= tgt_space;
2077 space += tgt_space;
2078 cmsg = CMSG_NXTHDR(msgh, cmsg);
2079 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2080 target_cmsg_start);
2081 }
2082 unlock_user(target_cmsg, target_cmsg_addr, space);
2083 the_end:
2084 target_msgh->msg_controllen = tswapal(space);
2085 return 0;
2086 }
2087
2088 /* do_setsockopt() Must return target values and target errnos. */
2089 static abi_long do_setsockopt(int sockfd, int level, int optname,
2090 abi_ulong optval_addr, socklen_t optlen)
2091 {
2092 abi_long ret;
2093 int val;
2094 struct ip_mreqn *ip_mreq;
2095 struct ip_mreq_source *ip_mreq_source;
2096
2097 switch(level) {
2098 case SOL_TCP:
2099 case SOL_UDP:
2100 /* TCP and UDP options all take an 'int' value. */
2101 if (optlen < sizeof(uint32_t))
2102 return -TARGET_EINVAL;
2103
2104 if (get_user_u32(val, optval_addr))
2105 return -TARGET_EFAULT;
2106 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2107 break;
2108 case SOL_IP:
2109 switch(optname) {
2110 case IP_TOS:
2111 case IP_TTL:
2112 case IP_HDRINCL:
2113 case IP_ROUTER_ALERT:
2114 case IP_RECVOPTS:
2115 case IP_RETOPTS:
2116 case IP_PKTINFO:
2117 case IP_MTU_DISCOVER:
2118 case IP_RECVERR:
2119 case IP_RECVTTL:
2120 case IP_RECVTOS:
2121 #ifdef IP_FREEBIND
2122 case IP_FREEBIND:
2123 #endif
2124 case IP_MULTICAST_TTL:
2125 case IP_MULTICAST_LOOP:
2126 val = 0;
2127 if (optlen >= sizeof(uint32_t)) {
2128 if (get_user_u32(val, optval_addr))
2129 return -TARGET_EFAULT;
2130 } else if (optlen >= 1) {
2131 if (get_user_u8(val, optval_addr))
2132 return -TARGET_EFAULT;
2133 }
2134 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2135 break;
2136 case IP_ADD_MEMBERSHIP:
2137 case IP_DROP_MEMBERSHIP:
2138 if (optlen < sizeof (struct target_ip_mreq) ||
2139 optlen > sizeof (struct target_ip_mreqn))
2140 return -TARGET_EINVAL;
2141
2142 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2143 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2144 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2145 break;
2146
2147 case IP_BLOCK_SOURCE:
2148 case IP_UNBLOCK_SOURCE:
2149 case IP_ADD_SOURCE_MEMBERSHIP:
2150 case IP_DROP_SOURCE_MEMBERSHIP:
2151 if (optlen != sizeof (struct target_ip_mreq_source))
2152 return -TARGET_EINVAL;
2153
2154 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2155 if (!ip_mreq_source) {
2156 return -TARGET_EFAULT;
2157 }
2158 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2159 unlock_user (ip_mreq_source, optval_addr, 0);
2160 break;
2161
2162 default:
2163 goto unimplemented;
2164 }
2165 break;
2166 case SOL_IPV6:
2167 switch (optname) {
2168 case IPV6_MTU_DISCOVER:
2169 case IPV6_MTU:
2170 case IPV6_V6ONLY:
2171 case IPV6_RECVPKTINFO:
2172 case IPV6_UNICAST_HOPS:
2173 case IPV6_MULTICAST_HOPS:
2174 case IPV6_MULTICAST_LOOP:
2175 case IPV6_RECVERR:
2176 case IPV6_RECVHOPLIMIT:
2177 case IPV6_2292HOPLIMIT:
2178 case IPV6_CHECKSUM:
2179 case IPV6_ADDRFORM:
2180 case IPV6_2292PKTINFO:
2181 case IPV6_RECVTCLASS:
2182 case IPV6_RECVRTHDR:
2183 case IPV6_2292RTHDR:
2184 case IPV6_RECVHOPOPTS:
2185 case IPV6_2292HOPOPTS:
2186 case IPV6_RECVDSTOPTS:
2187 case IPV6_2292DSTOPTS:
2188 case IPV6_TCLASS:
2189 case IPV6_ADDR_PREFERENCES:
2190 #ifdef IPV6_RECVPATHMTU
2191 case IPV6_RECVPATHMTU:
2192 #endif
2193 #ifdef IPV6_TRANSPARENT
2194 case IPV6_TRANSPARENT:
2195 #endif
2196 #ifdef IPV6_FREEBIND
2197 case IPV6_FREEBIND:
2198 #endif
2199 #ifdef IPV6_RECVORIGDSTADDR
2200 case IPV6_RECVORIGDSTADDR:
2201 #endif
2202 val = 0;
2203 if (optlen < sizeof(uint32_t)) {
2204 return -TARGET_EINVAL;
2205 }
2206 if (get_user_u32(val, optval_addr)) {
2207 return -TARGET_EFAULT;
2208 }
2209 ret = get_errno(setsockopt(sockfd, level, optname,
2210 &val, sizeof(val)));
2211 break;
2212 case IPV6_PKTINFO:
2213 {
2214 struct in6_pktinfo pki;
2215
2216 if (optlen < sizeof(pki)) {
2217 return -TARGET_EINVAL;
2218 }
2219
2220 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2221 return -TARGET_EFAULT;
2222 }
2223
2224 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2225
2226 ret = get_errno(setsockopt(sockfd, level, optname,
2227 &pki, sizeof(pki)));
2228 break;
2229 }
2230 case IPV6_ADD_MEMBERSHIP:
2231 case IPV6_DROP_MEMBERSHIP:
2232 {
2233 struct ipv6_mreq ipv6mreq;
2234
2235 if (optlen < sizeof(ipv6mreq)) {
2236 return -TARGET_EINVAL;
2237 }
2238
2239 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2240 return -TARGET_EFAULT;
2241 }
2242
2243 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2244
2245 ret = get_errno(setsockopt(sockfd, level, optname,
2246 &ipv6mreq, sizeof(ipv6mreq)));
2247 break;
2248 }
2249 default:
2250 goto unimplemented;
2251 }
2252 break;
2253 case SOL_ICMPV6:
2254 switch (optname) {
2255 case ICMPV6_FILTER:
2256 {
2257 struct icmp6_filter icmp6f;
2258
2259 if (optlen > sizeof(icmp6f)) {
2260 optlen = sizeof(icmp6f);
2261 }
2262
2263 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2264 return -TARGET_EFAULT;
2265 }
2266
2267 for (val = 0; val < 8; val++) {
2268 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2269 }
2270
2271 ret = get_errno(setsockopt(sockfd, level, optname,
2272 &icmp6f, optlen));
2273 break;
2274 }
2275 default:
2276 goto unimplemented;
2277 }
2278 break;
2279 case SOL_RAW:
2280 switch (optname) {
2281 case ICMP_FILTER:
2282 case IPV6_CHECKSUM:
2283 /* those take an u32 value */
2284 if (optlen < sizeof(uint32_t)) {
2285 return -TARGET_EINVAL;
2286 }
2287
2288 if (get_user_u32(val, optval_addr)) {
2289 return -TARGET_EFAULT;
2290 }
2291 ret = get_errno(setsockopt(sockfd, level, optname,
2292 &val, sizeof(val)));
2293 break;
2294
2295 default:
2296 goto unimplemented;
2297 }
2298 break;
2299 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2300 case SOL_ALG:
2301 switch (optname) {
2302 case ALG_SET_KEY:
2303 {
2304 char *alg_key = g_malloc(optlen);
2305
2306 if (!alg_key) {
2307 return -TARGET_ENOMEM;
2308 }
2309 if (copy_from_user(alg_key, optval_addr, optlen)) {
2310 g_free(alg_key);
2311 return -TARGET_EFAULT;
2312 }
2313 ret = get_errno(setsockopt(sockfd, level, optname,
2314 alg_key, optlen));
2315 g_free(alg_key);
2316 break;
2317 }
2318 case ALG_SET_AEAD_AUTHSIZE:
2319 {
2320 ret = get_errno(setsockopt(sockfd, level, optname,
2321 NULL, optlen));
2322 break;
2323 }
2324 default:
2325 goto unimplemented;
2326 }
2327 break;
2328 #endif
2329 case TARGET_SOL_SOCKET:
2330 switch (optname) {
2331 case TARGET_SO_RCVTIMEO:
2332 {
2333 struct timeval tv;
2334
2335 optname = SO_RCVTIMEO;
2336
2337 set_timeout:
2338 if (optlen != sizeof(struct target_timeval)) {
2339 return -TARGET_EINVAL;
2340 }
2341
2342 if (copy_from_user_timeval(&tv, optval_addr)) {
2343 return -TARGET_EFAULT;
2344 }
2345
2346 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2347 &tv, sizeof(tv)));
2348 return ret;
2349 }
2350 case TARGET_SO_SNDTIMEO:
2351 optname = SO_SNDTIMEO;
2352 goto set_timeout;
2353 case TARGET_SO_ATTACH_FILTER:
2354 {
2355 struct target_sock_fprog *tfprog;
2356 struct target_sock_filter *tfilter;
2357 struct sock_fprog fprog;
2358 struct sock_filter *filter;
2359 int i;
2360
2361 if (optlen != sizeof(*tfprog)) {
2362 return -TARGET_EINVAL;
2363 }
2364 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2365 return -TARGET_EFAULT;
2366 }
2367 if (!lock_user_struct(VERIFY_READ, tfilter,
2368 tswapal(tfprog->filter), 0)) {
2369 unlock_user_struct(tfprog, optval_addr, 1);
2370 return -TARGET_EFAULT;
2371 }
2372
2373 fprog.len = tswap16(tfprog->len);
2374 filter = g_try_new(struct sock_filter, fprog.len);
2375 if (filter == NULL) {
2376 unlock_user_struct(tfilter, tfprog->filter, 1);
2377 unlock_user_struct(tfprog, optval_addr, 1);
2378 return -TARGET_ENOMEM;
2379 }
2380 for (i = 0; i < fprog.len; i++) {
2381 filter[i].code = tswap16(tfilter[i].code);
2382 filter[i].jt = tfilter[i].jt;
2383 filter[i].jf = tfilter[i].jf;
2384 filter[i].k = tswap32(tfilter[i].k);
2385 }
2386 fprog.filter = filter;
2387
2388 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2389 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2390 g_free(filter);
2391
2392 unlock_user_struct(tfilter, tfprog->filter, 1);
2393 unlock_user_struct(tfprog, optval_addr, 1);
2394 return ret;
2395 }
2396 case TARGET_SO_BINDTODEVICE:
2397 {
2398 char *dev_ifname, *addr_ifname;
2399
2400 if (optlen > IFNAMSIZ - 1) {
2401 optlen = IFNAMSIZ - 1;
2402 }
2403 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2404 if (!dev_ifname) {
2405 return -TARGET_EFAULT;
2406 }
2407 optname = SO_BINDTODEVICE;
2408 addr_ifname = alloca(IFNAMSIZ);
2409 memcpy(addr_ifname, dev_ifname, optlen);
2410 addr_ifname[optlen] = 0;
2411 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2412 addr_ifname, optlen));
2413 unlock_user (dev_ifname, optval_addr, 0);
2414 return ret;
2415 }
2416 case TARGET_SO_LINGER:
2417 {
2418 struct linger lg;
2419 struct target_linger *tlg;
2420
2421 if (optlen != sizeof(struct target_linger)) {
2422 return -TARGET_EINVAL;
2423 }
2424 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2425 return -TARGET_EFAULT;
2426 }
2427 __get_user(lg.l_onoff, &tlg->l_onoff);
2428 __get_user(lg.l_linger, &tlg->l_linger);
2429 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2430 &lg, sizeof(lg)));
2431 unlock_user_struct(tlg, optval_addr, 0);
2432 return ret;
2433 }
2434 /* Options with 'int' argument. */
2435 case TARGET_SO_DEBUG:
2436 optname = SO_DEBUG;
2437 break;
2438 case TARGET_SO_REUSEADDR:
2439 optname = SO_REUSEADDR;
2440 break;
2441 #ifdef SO_REUSEPORT
2442 case TARGET_SO_REUSEPORT:
2443 optname = SO_REUSEPORT;
2444 break;
2445 #endif
2446 case TARGET_SO_TYPE:
2447 optname = SO_TYPE;
2448 break;
2449 case TARGET_SO_ERROR:
2450 optname = SO_ERROR;
2451 break;
2452 case TARGET_SO_DONTROUTE:
2453 optname = SO_DONTROUTE;
2454 break;
2455 case TARGET_SO_BROADCAST:
2456 optname = SO_BROADCAST;
2457 break;
2458 case TARGET_SO_SNDBUF:
2459 optname = SO_SNDBUF;
2460 break;
2461 case TARGET_SO_SNDBUFFORCE:
2462 optname = SO_SNDBUFFORCE;
2463 break;
2464 case TARGET_SO_RCVBUF:
2465 optname = SO_RCVBUF;
2466 break;
2467 case TARGET_SO_RCVBUFFORCE:
2468 optname = SO_RCVBUFFORCE;
2469 break;
2470 case TARGET_SO_KEEPALIVE:
2471 optname = SO_KEEPALIVE;
2472 break;
2473 case TARGET_SO_OOBINLINE:
2474 optname = SO_OOBINLINE;
2475 break;
2476 case TARGET_SO_NO_CHECK:
2477 optname = SO_NO_CHECK;
2478 break;
2479 case TARGET_SO_PRIORITY:
2480 optname = SO_PRIORITY;
2481 break;
2482 #ifdef SO_BSDCOMPAT
2483 case TARGET_SO_BSDCOMPAT:
2484 optname = SO_BSDCOMPAT;
2485 break;
2486 #endif
2487 case TARGET_SO_PASSCRED:
2488 optname = SO_PASSCRED;
2489 break;
2490 case TARGET_SO_PASSSEC:
2491 optname = SO_PASSSEC;
2492 break;
2493 case TARGET_SO_TIMESTAMP:
2494 optname = SO_TIMESTAMP;
2495 break;
2496 case TARGET_SO_RCVLOWAT:
2497 optname = SO_RCVLOWAT;
2498 break;
2499 default:
2500 goto unimplemented;
2501 }
2502 if (optlen < sizeof(uint32_t))
2503 return -TARGET_EINVAL;
2504
2505 if (get_user_u32(val, optval_addr))
2506 return -TARGET_EFAULT;
2507 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2508 break;
2509 #ifdef SOL_NETLINK
2510 case SOL_NETLINK:
2511 switch (optname) {
2512 case NETLINK_PKTINFO:
2513 case NETLINK_ADD_MEMBERSHIP:
2514 case NETLINK_DROP_MEMBERSHIP:
2515 case NETLINK_BROADCAST_ERROR:
2516 case NETLINK_NO_ENOBUFS:
2517 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2518 case NETLINK_LISTEN_ALL_NSID:
2519 case NETLINK_CAP_ACK:
2520 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2521 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2522 case NETLINK_EXT_ACK:
2523 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2524 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2525 case NETLINK_GET_STRICT_CHK:
2526 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2527 break;
2528 default:
2529 goto unimplemented;
2530 }
2531 val = 0;
2532 if (optlen < sizeof(uint32_t)) {
2533 return -TARGET_EINVAL;
2534 }
2535 if (get_user_u32(val, optval_addr)) {
2536 return -TARGET_EFAULT;
2537 }
2538 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2539 sizeof(val)));
2540 break;
2541 #endif /* SOL_NETLINK */
2542 default:
2543 unimplemented:
2544 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2545 level, optname);
2546 ret = -TARGET_ENOPROTOOPT;
2547 }
2548 return ret;
2549 }
2550
2551 /* do_getsockopt() Must return target values and target errnos. */
2552 static abi_long do_getsockopt(int sockfd, int level, int optname,
2553 abi_ulong optval_addr, abi_ulong optlen)
2554 {
2555 abi_long ret;
2556 int len, val;
2557 socklen_t lv;
2558
2559 switch(level) {
2560 case TARGET_SOL_SOCKET:
2561 level = SOL_SOCKET;
2562 switch (optname) {
2563 /* These don't just return a single integer */
2564 case TARGET_SO_PEERNAME:
2565 goto unimplemented;
2566 case TARGET_SO_RCVTIMEO: {
2567 struct timeval tv;
2568 socklen_t tvlen;
2569
2570 optname = SO_RCVTIMEO;
2571
2572 get_timeout:
2573 if (get_user_u32(len, optlen)) {
2574 return -TARGET_EFAULT;
2575 }
2576 if (len < 0) {
2577 return -TARGET_EINVAL;
2578 }
2579
2580 tvlen = sizeof(tv);
2581 ret = get_errno(getsockopt(sockfd, level, optname,
2582 &tv, &tvlen));
2583 if (ret < 0) {
2584 return ret;
2585 }
2586 if (len > sizeof(struct target_timeval)) {
2587 len = sizeof(struct target_timeval);
2588 }
2589 if (copy_to_user_timeval(optval_addr, &tv)) {
2590 return -TARGET_EFAULT;
2591 }
2592 if (put_user_u32(len, optlen)) {
2593 return -TARGET_EFAULT;
2594 }
2595 break;
2596 }
2597 case TARGET_SO_SNDTIMEO:
2598 optname = SO_SNDTIMEO;
2599 goto get_timeout;
2600 case TARGET_SO_PEERCRED: {
2601 struct ucred cr;
2602 socklen_t crlen;
2603 struct target_ucred *tcr;
2604
2605 if (get_user_u32(len, optlen)) {
2606 return -TARGET_EFAULT;
2607 }
2608 if (len < 0) {
2609 return -TARGET_EINVAL;
2610 }
2611
2612 crlen = sizeof(cr);
2613 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2614 &cr, &crlen));
2615 if (ret < 0) {
2616 return ret;
2617 }
2618 if (len > crlen) {
2619 len = crlen;
2620 }
2621 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2622 return -TARGET_EFAULT;
2623 }
2624 __put_user(cr.pid, &tcr->pid);
2625 __put_user(cr.uid, &tcr->uid);
2626 __put_user(cr.gid, &tcr->gid);
2627 unlock_user_struct(tcr, optval_addr, 1);
2628 if (put_user_u32(len, optlen)) {
2629 return -TARGET_EFAULT;
2630 }
2631 break;
2632 }
2633 case TARGET_SO_PEERSEC: {
2634 char *name;
2635
2636 if (get_user_u32(len, optlen)) {
2637 return -TARGET_EFAULT;
2638 }
2639 if (len < 0) {
2640 return -TARGET_EINVAL;
2641 }
2642 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2643 if (!name) {
2644 return -TARGET_EFAULT;
2645 }
2646 lv = len;
2647 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2648 name, &lv));
2649 if (put_user_u32(lv, optlen)) {
2650 ret = -TARGET_EFAULT;
2651 }
2652 unlock_user(name, optval_addr, lv);
2653 break;
2654 }
2655 case TARGET_SO_LINGER:
2656 {
2657 struct linger lg;
2658 socklen_t lglen;
2659 struct target_linger *tlg;
2660
2661 if (get_user_u32(len, optlen)) {
2662 return -TARGET_EFAULT;
2663 }
2664 if (len < 0) {
2665 return -TARGET_EINVAL;
2666 }
2667
2668 lglen = sizeof(lg);
2669 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2670 &lg, &lglen));
2671 if (ret < 0) {
2672 return ret;
2673 }
2674 if (len > lglen) {
2675 len = lglen;
2676 }
2677 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2678 return -TARGET_EFAULT;
2679 }
2680 __put_user(lg.l_onoff, &tlg->l_onoff);
2681 __put_user(lg.l_linger, &tlg->l_linger);
2682 unlock_user_struct(tlg, optval_addr, 1);
2683 if (put_user_u32(len, optlen)) {
2684 return -TARGET_EFAULT;
2685 }
2686 break;
2687 }
2688 /* Options with 'int' argument. */
2689 case TARGET_SO_DEBUG:
2690 optname = SO_DEBUG;
2691 goto int_case;
2692 case TARGET_SO_REUSEADDR:
2693 optname = SO_REUSEADDR;
2694 goto int_case;
2695 #ifdef SO_REUSEPORT
2696 case TARGET_SO_REUSEPORT:
2697 optname = SO_REUSEPORT;
2698 goto int_case;
2699 #endif
2700 case TARGET_SO_TYPE:
2701 optname = SO_TYPE;
2702 goto int_case;
2703 case TARGET_SO_ERROR:
2704 optname = SO_ERROR;
2705 goto int_case;
2706 case TARGET_SO_DONTROUTE:
2707 optname = SO_DONTROUTE;
2708 goto int_case;
2709 case TARGET_SO_BROADCAST:
2710 optname = SO_BROADCAST;
2711 goto int_case;
2712 case TARGET_SO_SNDBUF:
2713 optname = SO_SNDBUF;
2714 goto int_case;
2715 case TARGET_SO_RCVBUF:
2716 optname = SO_RCVBUF;
2717 goto int_case;
2718 case TARGET_SO_KEEPALIVE:
2719 optname = SO_KEEPALIVE;
2720 goto int_case;
2721 case TARGET_SO_OOBINLINE:
2722 optname = SO_OOBINLINE;
2723 goto int_case;
2724 case TARGET_SO_NO_CHECK:
2725 optname = SO_NO_CHECK;
2726 goto int_case;
2727 case TARGET_SO_PRIORITY:
2728 optname = SO_PRIORITY;
2729 goto int_case;
2730 #ifdef SO_BSDCOMPAT
2731 case TARGET_SO_BSDCOMPAT:
2732 optname = SO_BSDCOMPAT;
2733 goto int_case;
2734 #endif
2735 case TARGET_SO_PASSCRED:
2736 optname = SO_PASSCRED;
2737 goto int_case;
2738 case TARGET_SO_TIMESTAMP:
2739 optname = SO_TIMESTAMP;
2740 goto int_case;
2741 case TARGET_SO_RCVLOWAT:
2742 optname = SO_RCVLOWAT;
2743 goto int_case;
2744 case TARGET_SO_ACCEPTCONN:
2745 optname = SO_ACCEPTCONN;
2746 goto int_case;
2747 case TARGET_SO_PROTOCOL:
2748 optname = SO_PROTOCOL;
2749 goto int_case;
2750 case TARGET_SO_DOMAIN:
2751 optname = SO_DOMAIN;
2752 goto int_case;
2753 default:
2754 goto int_case;
2755 }
2756 break;
2757 case SOL_TCP:
2758 case SOL_UDP:
2759 /* TCP and UDP options all take an 'int' value. */
2760 int_case:
2761 if (get_user_u32(len, optlen))
2762 return -TARGET_EFAULT;
2763 if (len < 0)
2764 return -TARGET_EINVAL;
2765 lv = sizeof(lv);
2766 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2767 if (ret < 0)
2768 return ret;
2769 switch (optname) {
2770 case SO_TYPE:
2771 val = host_to_target_sock_type(val);
2772 break;
2773 case SO_ERROR:
2774 val = host_to_target_errno(val);
2775 break;
2776 }
2777 if (len > lv)
2778 len = lv;
2779 if (len == 4) {
2780 if (put_user_u32(val, optval_addr))
2781 return -TARGET_EFAULT;
2782 } else {
2783 if (put_user_u8(val, optval_addr))
2784 return -TARGET_EFAULT;
2785 }
2786 if (put_user_u32(len, optlen))
2787 return -TARGET_EFAULT;
2788 break;
2789 case SOL_IP:
2790 switch(optname) {
2791 case IP_TOS:
2792 case IP_TTL:
2793 case IP_HDRINCL:
2794 case IP_ROUTER_ALERT:
2795 case IP_RECVOPTS:
2796 case IP_RETOPTS:
2797 case IP_PKTINFO:
2798 case IP_MTU_DISCOVER:
2799 case IP_RECVERR:
2800 case IP_RECVTOS:
2801 #ifdef IP_FREEBIND
2802 case IP_FREEBIND:
2803 #endif
2804 case IP_MULTICAST_TTL:
2805 case IP_MULTICAST_LOOP:
2806 if (get_user_u32(len, optlen))
2807 return -TARGET_EFAULT;
2808 if (len < 0)
2809 return -TARGET_EINVAL;
2810 lv = sizeof(lv);
2811 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2812 if (ret < 0)
2813 return ret;
2814 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2815 len = 1;
2816 if (put_user_u32(len, optlen)
2817 || put_user_u8(val, optval_addr))
2818 return -TARGET_EFAULT;
2819 } else {
2820 if (len > sizeof(int))
2821 len = sizeof(int);
2822 if (put_user_u32(len, optlen)
2823 || put_user_u32(val, optval_addr))
2824 return -TARGET_EFAULT;
2825 }
2826 break;
2827 default:
2828 ret = -TARGET_ENOPROTOOPT;
2829 break;
2830 }
2831 break;
2832 case SOL_IPV6:
2833 switch (optname) {
2834 case IPV6_MTU_DISCOVER:
2835 case IPV6_MTU:
2836 case IPV6_V6ONLY:
2837 case IPV6_RECVPKTINFO:
2838 case IPV6_UNICAST_HOPS:
2839 case IPV6_MULTICAST_HOPS:
2840 case IPV6_MULTICAST_LOOP:
2841 case IPV6_RECVERR:
2842 case IPV6_RECVHOPLIMIT:
2843 case IPV6_2292HOPLIMIT:
2844 case IPV6_CHECKSUM:
2845 case IPV6_ADDRFORM:
2846 case IPV6_2292PKTINFO:
2847 case IPV6_RECVTCLASS:
2848 case IPV6_RECVRTHDR:
2849 case IPV6_2292RTHDR:
2850 case IPV6_RECVHOPOPTS:
2851 case IPV6_2292HOPOPTS:
2852 case IPV6_RECVDSTOPTS:
2853 case IPV6_2292DSTOPTS:
2854 case IPV6_TCLASS:
2855 case IPV6_ADDR_PREFERENCES:
2856 #ifdef IPV6_RECVPATHMTU
2857 case IPV6_RECVPATHMTU:
2858 #endif
2859 #ifdef IPV6_TRANSPARENT
2860 case IPV6_TRANSPARENT:
2861 #endif
2862 #ifdef IPV6_FREEBIND
2863 case IPV6_FREEBIND:
2864 #endif
2865 #ifdef IPV6_RECVORIGDSTADDR
2866 case IPV6_RECVORIGDSTADDR:
2867 #endif
2868 if (get_user_u32(len, optlen))
2869 return -TARGET_EFAULT;
2870 if (len < 0)
2871 return -TARGET_EINVAL;
2872 lv = sizeof(lv);
2873 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2874 if (ret < 0)
2875 return ret;
2876 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2877 len = 1;
2878 if (put_user_u32(len, optlen)
2879 || put_user_u8(val, optval_addr))
2880 return -TARGET_EFAULT;
2881 } else {
2882 if (len > sizeof(int))
2883 len = sizeof(int);
2884 if (put_user_u32(len, optlen)
2885 || put_user_u32(val, optval_addr))
2886 return -TARGET_EFAULT;
2887 }
2888 break;
2889 default:
2890 ret = -TARGET_ENOPROTOOPT;
2891 break;
2892 }
2893 break;
2894 #ifdef SOL_NETLINK
2895 case SOL_NETLINK:
2896 switch (optname) {
2897 case NETLINK_PKTINFO:
2898 case NETLINK_BROADCAST_ERROR:
2899 case NETLINK_NO_ENOBUFS:
2900 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2901 case NETLINK_LISTEN_ALL_NSID:
2902 case NETLINK_CAP_ACK:
2903 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2904 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2905 case NETLINK_EXT_ACK:
2906 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2907 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2908 case NETLINK_GET_STRICT_CHK:
2909 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2910 if (get_user_u32(len, optlen)) {
2911 return -TARGET_EFAULT;
2912 }
2913 if (len != sizeof(val)) {
2914 return -TARGET_EINVAL;
2915 }
2916 lv = len;
2917 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2918 if (ret < 0) {
2919 return ret;
2920 }
2921 if (put_user_u32(lv, optlen)
2922 || put_user_u32(val, optval_addr)) {
2923 return -TARGET_EFAULT;
2924 }
2925 break;
2926 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2927 case NETLINK_LIST_MEMBERSHIPS:
2928 {
2929 uint32_t *results;
2930 int i;
2931 if (get_user_u32(len, optlen)) {
2932 return -TARGET_EFAULT;
2933 }
2934 if (len < 0) {
2935 return -TARGET_EINVAL;
2936 }
2937 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2938 if (!results && len > 0) {
2939 return -TARGET_EFAULT;
2940 }
2941 lv = len;
2942 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2943 if (ret < 0) {
2944 unlock_user(results, optval_addr, 0);
2945 return ret;
2946 }
2947 /* swap host endianess to target endianess. */
2948 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2949 results[i] = tswap32(results[i]);
2950 }
2951 if (put_user_u32(lv, optlen)) {
2952 return -TARGET_EFAULT;
2953 }
2954 unlock_user(results, optval_addr, 0);
2955 break;
2956 }
2957 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2958 default:
2959 goto unimplemented;
2960 }
2961 break;
2962 #endif /* SOL_NETLINK */
2963 default:
2964 unimplemented:
2965 qemu_log_mask(LOG_UNIMP,
2966 "getsockopt level=%d optname=%d not yet supported\n",
2967 level, optname);
2968 ret = -TARGET_EOPNOTSUPP;
2969 break;
2970 }
2971 return ret;
2972 }
2973
2974 /* Convert target low/high pair representing file offset into the host
2975 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2976 * as the kernel doesn't handle them either.
2977 */
2978 static void target_to_host_low_high(abi_ulong tlow,
2979 abi_ulong thigh,
2980 unsigned long *hlow,
2981 unsigned long *hhigh)
2982 {
2983 uint64_t off = tlow |
2984 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2985 TARGET_LONG_BITS / 2;
2986
2987 *hlow = off;
2988 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2989 }
2990
2991 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2992 abi_ulong count, int copy)
2993 {
2994 struct target_iovec *target_vec;
2995 struct iovec *vec;
2996 abi_ulong total_len, max_len;
2997 int i;
2998 int err = 0;
2999 bool bad_address = false;
3000
3001 if (count == 0) {
3002 errno = 0;
3003 return NULL;
3004 }
3005 if (count > IOV_MAX) {
3006 errno = EINVAL;
3007 return NULL;
3008 }
3009
3010 vec = g_try_new0(struct iovec, count);
3011 if (vec == NULL) {
3012 errno = ENOMEM;
3013 return NULL;
3014 }
3015
3016 target_vec = lock_user(VERIFY_READ, target_addr,
3017 count * sizeof(struct target_iovec), 1);
3018 if (target_vec == NULL) {
3019 err = EFAULT;
3020 goto fail2;
3021 }
3022
3023 /* ??? If host page size > target page size, this will result in a
3024 value larger than what we can actually support. */
3025 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3026 total_len = 0;
3027
3028 for (i = 0; i < count; i++) {
3029 abi_ulong base = tswapal(target_vec[i].iov_base);
3030 abi_long len = tswapal(target_vec[i].iov_len);
3031
3032 if (len < 0) {
3033 err = EINVAL;
3034 goto fail;
3035 } else if (len == 0) {
3036 /* Zero length pointer is ignored. */
3037 vec[i].iov_base = 0;
3038 } else {
3039 vec[i].iov_base = lock_user(type, base, len, copy);
3040 /* If the first buffer pointer is bad, this is a fault. But
3041 * subsequent bad buffers will result in a partial write; this
3042 * is realized by filling the vector with null pointers and
3043 * zero lengths. */
3044 if (!vec[i].iov_base) {
3045 if (i == 0) {
3046 err = EFAULT;
3047 goto fail;
3048 } else {
3049 bad_address = true;
3050 }
3051 }
3052 if (bad_address) {
3053 len = 0;
3054 }
3055 if (len > max_len - total_len) {
3056 len = max_len - total_len;
3057 }
3058 }
3059 vec[i].iov_len = len;
3060 total_len += len;
3061 }
3062
3063 unlock_user(target_vec, target_addr, 0);
3064 return vec;
3065
3066 fail:
3067 while (--i >= 0) {
3068 if (tswapal(target_vec[i].iov_len) > 0) {
3069 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3070 }
3071 }
3072 unlock_user(target_vec, target_addr, 0);
3073 fail2:
3074 g_free(vec);
3075 errno = err;
3076 return NULL;
3077 }
3078
3079 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3080 abi_ulong count, int copy)
3081 {
3082 struct target_iovec *target_vec;
3083 int i;
3084
3085 target_vec = lock_user(VERIFY_READ, target_addr,
3086 count * sizeof(struct target_iovec), 1);
3087 if (target_vec) {
3088 for (i = 0; i < count; i++) {
3089 abi_ulong base = tswapal(target_vec[i].iov_base);
3090 abi_long len = tswapal(target_vec[i].iov_len);
3091 if (len < 0) {
3092 break;
3093 }
3094 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3095 }
3096 unlock_user(target_vec, target_addr, 0);
3097 }
3098
3099 g_free(vec);
3100 }
3101
3102 static inline int target_to_host_sock_type(int *type)
3103 {
3104 int host_type = 0;
3105 int target_type = *type;
3106
3107 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3108 case TARGET_SOCK_DGRAM:
3109 host_type = SOCK_DGRAM;
3110 break;
3111 case TARGET_SOCK_STREAM:
3112 host_type = SOCK_STREAM;
3113 break;
3114 default:
3115 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3116 break;
3117 }
3118 if (target_type & TARGET_SOCK_CLOEXEC) {
3119 #if defined(SOCK_CLOEXEC)
3120 host_type |= SOCK_CLOEXEC;
3121 #else
3122 return -TARGET_EINVAL;
3123 #endif
3124 }
3125 if (target_type & TARGET_SOCK_NONBLOCK) {
3126 #if defined(SOCK_NONBLOCK)
3127 host_type |= SOCK_NONBLOCK;
3128 #elif !defined(O_NONBLOCK)
3129 return -TARGET_EINVAL;
3130 #endif
3131 }
3132 *type = host_type;
3133 return 0;
3134 }
3135
3136 /* Try to emulate socket type flags after socket creation. */
3137 static int sock_flags_fixup(int fd, int target_type)
3138 {
3139 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3140 if (target_type & TARGET_SOCK_NONBLOCK) {
3141 int flags = fcntl(fd, F_GETFL);
3142 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3143 close(fd);
3144 return -TARGET_EINVAL;
3145 }
3146 }
3147 #endif
3148 return fd;
3149 }
3150
3151 /* do_socket() Must return target values and target errnos. */
3152 static abi_long do_socket(int domain, int type, int protocol)
3153 {
3154 int target_type = type;
3155 int ret;
3156
3157 ret = target_to_host_sock_type(&type);
3158 if (ret) {
3159 return ret;
3160 }
3161
3162 if (domain == PF_NETLINK && !(
3163 #ifdef CONFIG_RTNETLINK
3164 protocol == NETLINK_ROUTE ||
3165 #endif
3166 protocol == NETLINK_KOBJECT_UEVENT ||
3167 protocol == NETLINK_AUDIT)) {
3168 return -TARGET_EPROTONOSUPPORT;
3169 }
3170
3171 if (domain == AF_PACKET ||
3172 (domain == AF_INET && type == SOCK_PACKET)) {
3173 protocol = tswap16(protocol);
3174 }
3175
3176 ret = get_errno(socket(domain, type, protocol));
3177 if (ret >= 0) {
3178 ret = sock_flags_fixup(ret, target_type);
3179 if (type == SOCK_PACKET) {
3180 /* Manage an obsolete case :
3181 * if socket type is SOCK_PACKET, bind by name
3182 */
3183 fd_trans_register(ret, &target_packet_trans);
3184 } else if (domain == PF_NETLINK) {
3185 switch (protocol) {
3186 #ifdef CONFIG_RTNETLINK
3187 case NETLINK_ROUTE:
3188 fd_trans_register(ret, &target_netlink_route_trans);
3189 break;
3190 #endif
3191 case NETLINK_KOBJECT_UEVENT:
3192 /* nothing to do: messages are strings */
3193 break;
3194 case NETLINK_AUDIT:
3195 fd_trans_register(ret, &target_netlink_audit_trans);
3196 break;
3197 default:
3198 g_assert_not_reached();
3199 }
3200 }
3201 }
3202 return ret;
3203 }
3204
3205 /* do_bind() Must return target values and target errnos. */
3206 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3207 socklen_t addrlen)
3208 {
3209 void *addr;
3210 abi_long ret;
3211
3212 if ((int)addrlen < 0) {
3213 return -TARGET_EINVAL;
3214 }
3215
3216 addr = alloca(addrlen+1);
3217
3218 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3219 if (ret)
3220 return ret;
3221
3222 return get_errno(bind(sockfd, addr, addrlen));
3223 }
3224
3225 /* do_connect() Must return target values and target errnos. */
3226 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3227 socklen_t addrlen)
3228 {
3229 void *addr;
3230 abi_long ret;
3231
3232 if ((int)addrlen < 0) {
3233 return -TARGET_EINVAL;
3234 }
3235
3236 addr = alloca(addrlen+1);
3237
3238 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3239 if (ret)
3240 return ret;
3241
3242 return get_errno(safe_connect(sockfd, addr, addrlen));
3243 }
3244
3245 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3246 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3247 int flags, int send)
3248 {
3249 abi_long ret, len;
3250 struct msghdr msg;
3251 abi_ulong count;
3252 struct iovec *vec;
3253 abi_ulong target_vec;
3254
3255 if (msgp->msg_name) {
3256 msg.msg_namelen = tswap32(msgp->msg_namelen);
3257 msg.msg_name = alloca(msg.msg_namelen+1);
3258 ret = target_to_host_sockaddr(fd, msg.msg_name,
3259 tswapal(msgp->msg_name),
3260 msg.msg_namelen);
3261 if (ret == -TARGET_EFAULT) {
3262 /* For connected sockets msg_name and msg_namelen must
3263 * be ignored, so returning EFAULT immediately is wrong.
3264 * Instead, pass a bad msg_name to the host kernel, and
3265 * let it decide whether to return EFAULT or not.
3266 */
3267 msg.msg_name = (void *)-1;
3268 } else if (ret) {
3269 goto out2;
3270 }
3271 } else {
3272 msg.msg_name = NULL;
3273 msg.msg_namelen = 0;
3274 }
3275 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3276 msg.msg_control = alloca(msg.msg_controllen);
3277 memset(msg.msg_control, 0, msg.msg_controllen);
3278
3279 msg.msg_flags = tswap32(msgp->msg_flags);
3280
3281 count = tswapal(msgp->msg_iovlen);
3282 target_vec = tswapal(msgp->msg_iov);
3283
3284 if (count > IOV_MAX) {
3285 /* sendrcvmsg returns a different errno for this condition than
3286 * readv/writev, so we must catch it here before lock_iovec() does.
3287 */
3288 ret = -TARGET_EMSGSIZE;
3289 goto out2;
3290 }
3291
3292 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3293 target_vec, count, send);
3294 if (vec == NULL) {
3295 ret = -host_to_target_errno(errno);
3296 /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3297 if (!send || ret) {
3298 goto out2;
3299 }
3300 }
3301 msg.msg_iovlen = count;
3302 msg.msg_iov = vec;
3303
3304 if (send) {
3305 if (fd_trans_target_to_host_data(fd)) {
3306 void *host_msg;
3307
3308 host_msg = g_malloc(msg.msg_iov->iov_len);
3309 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3310 ret = fd_trans_target_to_host_data(fd)(host_msg,
3311 msg.msg_iov->iov_len);
3312 if (ret >= 0) {
3313 msg.msg_iov->iov_base = host_msg;
3314 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3315 }
3316 g_free(host_msg);
3317 } else {
3318 ret = target_to_host_cmsg(&msg, msgp);
3319 if (ret == 0) {
3320 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3321 }
3322 }
3323 } else {
3324 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3325 if (!is_error(ret)) {
3326 len = ret;
3327 if (fd_trans_host_to_target_data(fd)) {
3328 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3329 MIN(msg.msg_iov->iov_len, len));
3330 }
3331 if (!is_error(ret)) {
3332 ret = host_to_target_cmsg(msgp, &msg);
3333 }
3334 if (!is_error(ret)) {
3335 msgp->msg_namelen = tswap32(msg.msg_namelen);
3336 msgp->msg_flags = tswap32(msg.msg_flags);
3337 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3338 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3339 msg.msg_name, msg.msg_namelen);
3340 if (ret) {
3341 goto out;
3342 }
3343 }
3344
3345 ret = len;
3346 }
3347 }
3348 }
3349
3350 out:
3351 if (vec) {
3352 unlock_iovec(vec, target_vec, count, !send);
3353 }
3354 out2:
3355 return ret;
3356 }
3357
3358 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3359 int flags, int send)
3360 {
3361 abi_long ret;
3362 struct target_msghdr *msgp;
3363
3364 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3365 msgp,
3366 target_msg,
3367 send ? 1 : 0)) {
3368 return -TARGET_EFAULT;
3369 }
3370 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3371 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3372 return ret;
3373 }
3374
3375 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3376 * so it might not have this *mmsg-specific flag either.
3377 */
3378 #ifndef MSG_WAITFORONE
3379 #define MSG_WAITFORONE 0x10000
3380 #endif
3381
3382 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3383 unsigned int vlen, unsigned int flags,
3384 int send)
3385 {
3386 struct target_mmsghdr *mmsgp;
3387 abi_long ret = 0;
3388 int i;
3389
3390 if (vlen > UIO_MAXIOV) {
3391 vlen = UIO_MAXIOV;
3392 }
3393
3394 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3395 if (!mmsgp) {
3396 return -TARGET_EFAULT;
3397 }
3398
3399 for (i = 0; i < vlen; i++) {
3400 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3401 if (is_error(ret)) {
3402 break;
3403 }
3404 mmsgp[i].msg_len = tswap32(ret);
3405 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3406 if (flags & MSG_WAITFORONE) {
3407 flags |= MSG_DONTWAIT;
3408 }
3409 }
3410
3411 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3412
3413 /* Return number of datagrams sent if we sent any at all;
3414 * otherwise return the error.
3415 */
3416 if (i) {
3417 return i;
3418 }
3419 return ret;
3420 }
3421
3422 /* do_accept4() Must return target values and target errnos. */
3423 static abi_long do_accept4(int fd, abi_ulong target_addr,
3424 abi_ulong target_addrlen_addr, int flags)
3425 {
3426 socklen_t addrlen, ret_addrlen;
3427 void *addr;
3428 abi_long ret;
3429 int host_flags;
3430
3431 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3432
3433 if (target_addr == 0) {
3434 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3435 }
3436
3437 /* linux returns EFAULT if addrlen pointer is invalid */
3438 if (get_user_u32(addrlen, target_addrlen_addr))
3439 return -TARGET_EFAULT;
3440
3441 if ((int)addrlen < 0) {
3442 return -TARGET_EINVAL;
3443 }
3444
3445 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3446 return -TARGET_EFAULT;
3447 }
3448
3449 addr = alloca(addrlen);
3450
3451 ret_addrlen = addrlen;
3452 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3453 if (!is_error(ret)) {
3454 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3455 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3456 ret = -TARGET_EFAULT;
3457 }
3458 }
3459 return ret;
3460 }
3461
3462 /* do_getpeername() Must return target values and target errnos. */
3463 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3464 abi_ulong target_addrlen_addr)
3465 {
3466 socklen_t addrlen, ret_addrlen;
3467 void *addr;
3468 abi_long ret;
3469
3470 if (get_user_u32(addrlen, target_addrlen_addr))
3471 return -TARGET_EFAULT;
3472
3473 if ((int)addrlen < 0) {
3474 return -TARGET_EINVAL;
3475 }
3476
3477 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3478 return -TARGET_EFAULT;
3479 }
3480
3481 addr = alloca(addrlen);
3482
3483 ret_addrlen = addrlen;
3484 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3485 if (!is_error(ret)) {
3486 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3487 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3488 ret = -TARGET_EFAULT;
3489 }
3490 }
3491 return ret;
3492 }
3493
3494 /* do_getsockname() Must return target values and target errnos. */
3495 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3496 abi_ulong target_addrlen_addr)
3497 {
3498 socklen_t addrlen, ret_addrlen;
3499 void *addr;
3500 abi_long ret;
3501
3502 if (get_user_u32(addrlen, target_addrlen_addr))
3503 return -TARGET_EFAULT;
3504
3505 if ((int)addrlen < 0) {
3506 return -TARGET_EINVAL;
3507 }
3508
3509 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3510 return -TARGET_EFAULT;
3511 }
3512
3513 addr = alloca(addrlen);
3514
3515 ret_addrlen = addrlen;
3516 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3517 if (!is_error(ret)) {
3518 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3519 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3520 ret = -TARGET_EFAULT;
3521 }
3522 }
3523 return ret;
3524 }
3525
3526 /* do_socketpair() Must return target values and target errnos. */
3527 static abi_long do_socketpair(int domain, int type, int protocol,
3528 abi_ulong target_tab_addr)
3529 {
3530 int tab[2];
3531 abi_long ret;
3532
3533 target_to_host_sock_type(&type);
3534
3535 ret = get_errno(socketpair(domain, type, protocol, tab));
3536 if (!is_error(ret)) {
3537 if (put_user_s32(tab[0], target_tab_addr)
3538 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3539 ret = -TARGET_EFAULT;
3540 }
3541 return ret;
3542 }
3543
3544 /* do_sendto() Must return target values and target errnos. */
3545 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3546 abi_ulong target_addr, socklen_t addrlen)
3547 {
3548 void *addr;
3549 void *host_msg;
3550 void *copy_msg = NULL;
3551 abi_long ret;
3552
3553 if ((int)addrlen < 0) {
3554 return -TARGET_EINVAL;
3555 }
3556
3557 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3558 if (!host_msg)
3559 return -TARGET_EFAULT;
3560 if (fd_trans_target_to_host_data(fd)) {
3561 copy_msg = host_msg;
3562 host_msg = g_malloc(len);
3563 memcpy(host_msg, copy_msg, len);
3564 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3565 if (ret < 0) {
3566 goto fail;
3567 }
3568 }
3569 if (target_addr) {
3570 addr = alloca(addrlen+1);
3571 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3572 if (ret) {
3573 goto fail;
3574 }
3575 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3576 } else {
3577 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3578 }
3579 fail:
3580 if (copy_msg) {
3581 g_free(host_msg);
3582 host_msg = copy_msg;
3583 }
3584 unlock_user(host_msg, msg, 0);
3585 return ret;
3586 }
3587
3588 /* do_recvfrom() Must return target values and target errnos. */
3589 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3590 abi_ulong target_addr,
3591 abi_ulong target_addrlen)
3592 {
3593 socklen_t addrlen, ret_addrlen;
3594 void *addr;
3595 void *host_msg;
3596 abi_long ret;
3597
3598 if (!msg) {
3599 host_msg = NULL;
3600 } else {
3601 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3602 if (!host_msg) {
3603 return -TARGET_EFAULT;
3604 }
3605 }
3606 if (target_addr) {
3607 if (get_user_u32(addrlen, target_addrlen)) {
3608 ret = -TARGET_EFAULT;
3609 goto fail;
3610 }
3611 if ((int)addrlen < 0) {
3612 ret = -TARGET_EINVAL;
3613 goto fail;
3614 }
3615 addr = alloca(addrlen);
3616 ret_addrlen = addrlen;
3617 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3618 addr, &ret_addrlen));
3619 } else {
3620 addr = NULL; /* To keep compiler quiet. */
3621 addrlen = 0; /* To keep compiler quiet. */
3622 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3623 }
3624 if (!is_error(ret)) {
3625 if (fd_trans_host_to_target_data(fd)) {
3626 abi_long trans;
3627 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3628 if (is_error(trans)) {
3629 ret = trans;
3630 goto fail;
3631 }
3632 }
3633 if (target_addr) {
3634 host_to_target_sockaddr(target_addr, addr,
3635 MIN(addrlen, ret_addrlen));
3636 if (put_user_u32(ret_addrlen, target_addrlen)) {
3637 ret = -TARGET_EFAULT;
3638 goto fail;
3639 }
3640 }
3641 unlock_user(host_msg, msg, len);
3642 } else {
3643 fail:
3644 unlock_user(host_msg, msg, 0);
3645 }
3646 return ret;
3647 }
3648
3649 #ifdef TARGET_NR_socketcall
3650 /* do_socketcall() must return target values and target errnos. */
3651 static abi_long do_socketcall(int num, abi_ulong vptr)
3652 {
3653 static const unsigned nargs[] = { /* number of arguments per operation */
3654 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3655 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3656 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3657 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3658 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3659 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3660 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3661 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3662 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3663 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3664 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3665 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3666 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3667 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3668 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3669 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3670 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3671 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3672 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3673 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3674 };
3675 abi_long a[6]; /* max 6 args */
3676 unsigned i;
3677
3678 /* check the range of the first argument num */
3679 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3680 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3681 return -TARGET_EINVAL;
3682 }
3683 /* ensure we have space for args */
3684 if (nargs[num] > ARRAY_SIZE(a)) {
3685 return -TARGET_EINVAL;
3686 }
3687 /* collect the arguments in a[] according to nargs[] */
3688 for (i = 0; i < nargs[num]; ++i) {
3689 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3690 return -TARGET_EFAULT;
3691 }
3692 }
3693 /* now when we have the args, invoke the appropriate underlying function */
3694 switch (num) {
3695 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3696 return do_socket(a[0], a[1], a[2]);
3697 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3698 return do_bind(a[0], a[1], a[2]);
3699 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3700 return do_connect(a[0], a[1], a[2]);
3701 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3702 return get_errno(listen(a[0], a[1]));
3703 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3704 return do_accept4(a[0], a[1], a[2], 0);
3705 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3706 return do_getsockname(a[0], a[1], a[2]);
3707 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3708 return do_getpeername(a[0], a[1], a[2]);
3709 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3710 return do_socketpair(a[0], a[1], a[2], a[3]);
3711 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3712 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3713 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3714 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3715 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3716 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3717 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3718 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3719 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3720 return get_errno(shutdown(a[0], a[1]));
3721 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3722 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3723 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3724 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3725 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3726 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3727 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3728 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3729 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3730 return do_accept4(a[0], a[1], a[2], a[3]);
3731 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3732 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3733 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3734 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3735 default:
3736 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3737 return -TARGET_EINVAL;
3738 }
3739 }
3740 #endif
3741
3742 #define N_SHM_REGIONS 32
3743
3744 static struct shm_region {
3745 abi_ulong start;
3746 abi_ulong size;
3747 bool in_use;
3748 } shm_regions[N_SHM_REGIONS];
3749
3750 #ifndef TARGET_SEMID64_DS
3751 /* asm-generic version of this struct */
3752 struct target_semid64_ds
3753 {
3754 struct target_ipc_perm sem_perm;
3755 abi_ulong sem_otime;
3756 #if TARGET_ABI_BITS == 32
3757 abi_ulong __unused1;
3758 #endif
3759 abi_ulong sem_ctime;
3760 #if TARGET_ABI_BITS == 32
3761 abi_ulong __unused2;
3762 #endif
3763 abi_ulong sem_nsems;
3764 abi_ulong __unused3;
3765 abi_ulong __unused4;
3766 };
3767 #endif
3768
3769 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3770 abi_ulong target_addr)
3771 {
3772 struct target_ipc_perm *target_ip;
3773 struct target_semid64_ds *target_sd;
3774
3775 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3776 return -TARGET_EFAULT;
3777 target_ip = &(target_sd->sem_perm);
3778 host_ip->__key = tswap32(target_ip->__key);
3779 host_ip->uid = tswap32(target_ip->uid);
3780 host_ip->gid = tswap32(target_ip->gid);
3781 host_ip->cuid = tswap32(target_ip->cuid);
3782 host_ip->cgid = tswap32(target_ip->cgid);
3783 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3784 host_ip->mode = tswap32(target_ip->mode);
3785 #else
3786 host_ip->mode = tswap16(target_ip->mode);
3787 #endif
3788 #if defined(TARGET_PPC)
3789 host_ip->__seq = tswap32(target_ip->__seq);
3790 #else
3791 host_ip->__seq = tswap16(target_ip->__seq);
3792 #endif
3793 unlock_user_struct(target_sd, target_addr, 0);
3794 return 0;
3795 }
3796
3797 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3798 struct ipc_perm *host_ip)
3799 {
3800 struct target_ipc_perm *target_ip;
3801 struct target_semid64_ds *target_sd;
3802
3803 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3804 return -TARGET_EFAULT;
3805 target_ip = &(target_sd->sem_perm);
3806 target_ip->__key = tswap32(host_ip->__key);
3807 target_ip->uid = tswap32(host_ip->uid);
3808 target_ip->gid = tswap32(host_ip->gid);
3809 target_ip->cuid = tswap32(host_ip->cuid);
3810 target_ip->cgid = tswap32(host_ip->cgid);
3811 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3812 target_ip->mode = tswap32(host_ip->mode);
3813 #else
3814 target_ip->mode = tswap16(host_ip->mode);
3815 #endif
3816 #if defined(TARGET_PPC)
3817 target_ip->__seq = tswap32(host_ip->__seq);
3818 #else
3819 target_ip->__seq = tswap16(host_ip->__seq);
3820 #endif
3821 unlock_user_struct(target_sd, target_addr, 1);
3822 return 0;
3823 }
3824
3825 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3826 abi_ulong target_addr)
3827 {
3828 struct target_semid64_ds *target_sd;
3829
3830 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3831 return -TARGET_EFAULT;
3832 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3833 return -TARGET_EFAULT;
3834 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3835 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3836 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3837 unlock_user_struct(target_sd, target_addr, 0);
3838 return 0;
3839 }
3840
3841 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3842 struct semid_ds *host_sd)
3843 {
3844 struct target_semid64_ds *target_sd;
3845
3846 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3847 return -TARGET_EFAULT;
3848 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3849 return -TARGET_EFAULT;
3850 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3851 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3852 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3853 unlock_user_struct(target_sd, target_addr, 1);
3854 return 0;
3855 }
3856
3857 struct target_seminfo {
3858 int semmap;
3859 int semmni;
3860 int semmns;
3861 int semmnu;
3862 int semmsl;
3863 int semopm;
3864 int semume;
3865 int semusz;
3866 int semvmx;
3867 int semaem;
3868 };
3869
3870 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3871 struct seminfo *host_seminfo)
3872 {
3873 struct target_seminfo *target_seminfo;
3874 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3875 return -TARGET_EFAULT;
3876 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3877 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3878 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3879 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3880 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3881 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3882 __put_user(host_seminfo->semume, &target_seminfo->semume);
3883 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3884 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3885 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3886 unlock_user_struct(target_seminfo, target_addr, 1);
3887 return 0;
3888 }
3889
3890 union semun {
3891 int val;
3892 struct semid_ds *buf;
3893 unsigned short *array;
3894 struct seminfo *__buf;
3895 };
3896
3897 union target_semun {
3898 int val;
3899 abi_ulong buf;
3900 abi_ulong array;
3901 abi_ulong __buf;
3902 };
3903
3904 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3905 abi_ulong target_addr)
3906 {
3907 int nsems;
3908 unsigned short *array;
3909 union semun semun;
3910 struct semid_ds semid_ds;
3911 int i, ret;
3912
3913 semun.buf = &semid_ds;
3914
3915 ret = semctl(semid, 0, IPC_STAT, semun);
3916 if (ret == -1)
3917 return get_errno(ret);
3918
3919 nsems = semid_ds.sem_nsems;
3920
3921 *host_array = g_try_new(unsigned short, nsems);
3922 if (!*host_array) {
3923 return -TARGET_ENOMEM;
3924 }
3925 array = lock_user(VERIFY_READ, target_addr,
3926 nsems*sizeof(unsigned short), 1);
3927 if (!array) {
3928 g_free(*host_array);
3929 return -TARGET_EFAULT;
3930 }
3931
3932 for(i=0; i<nsems; i++) {
3933 __get_user((*host_array)[i], &array[i]);
3934 }
3935 unlock_user(array, target_addr, 0);
3936
3937 return 0;
3938 }
3939
3940 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3941 unsigned short **host_array)
3942 {
3943 int nsems;
3944 unsigned short *array;
3945 union semun semun;
3946 struct semid_ds semid_ds;
3947 int i, ret;
3948
3949 semun.buf = &semid_ds;
3950
3951 ret = semctl(semid, 0, IPC_STAT, semun);
3952 if (ret == -1)
3953 return get_errno(ret);
3954
3955 nsems = semid_ds.sem_nsems;
3956
3957 array = lock_user(VERIFY_WRITE, target_addr,
3958 nsems*sizeof(unsigned short), 0);
3959 if (!array)
3960 return -TARGET_EFAULT;
3961
3962 for(i=0; i<nsems; i++) {
3963 __put_user((*host_array)[i], &array[i]);
3964 }
3965 g_free(*host_array);
3966 unlock_user(array, target_addr, 1);
3967
3968 return 0;
3969 }
3970
3971 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3972 abi_ulong target_arg)
3973 {
3974 union target_semun target_su = { .buf = target_arg };
3975 union semun arg;
3976 struct semid_ds dsarg;
3977 unsigned short *array = NULL;
3978 struct seminfo seminfo;
3979 abi_long ret = -TARGET_EINVAL;
3980 abi_long err;
3981 cmd &= 0xff;
3982
3983 switch( cmd ) {
3984 case GETVAL:
3985 case SETVAL:
3986 /* In 64 bit cross-endian situations, we will erroneously pick up
3987 * the wrong half of the union for the "val" element. To rectify
3988 * this, the entire 8-byte structure is byteswapped, followed by
3989 * a swap of the 4 byte val field. In other cases, the data is
3990 * already in proper host byte order. */
3991 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3992 target_su.buf = tswapal(target_su.buf);
3993 arg.val = tswap32(target_su.val);
3994 } else {
3995 arg.val = target_su.val;
3996 }
3997 ret = get_errno(semctl(semid, semnum, cmd, arg));
3998 break;
3999 case GETALL:
4000 case SETALL:
4001 err = target_to_host_semarray(semid, &array, target_su.array);
4002 if (err)
4003 return err;
4004 arg.array = array;
4005 ret = get_errno(semctl(semid, semnum, cmd, arg));
4006 err = host_to_target_semarray(semid, target_su.array, &array);
4007 if (err)
4008 return err;
4009 break;
4010 case IPC_STAT:
4011 case IPC_SET:
4012 case SEM_STAT:
4013 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4014 if (err)
4015 return err;
4016 arg.buf = &dsarg;
4017 ret = get_errno(semctl(semid, semnum, cmd, arg));
4018 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4019 if (err)
4020 return err;
4021 break;
4022 case IPC_INFO:
4023 case SEM_INFO:
4024 arg.__buf = &seminfo;
4025 ret = get_errno(semctl(semid, semnum, cmd, arg));
4026 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4027 if (err)
4028 return err;
4029 break;
4030 case IPC_RMID:
4031 case GETPID:
4032 case GETNCNT:
4033 case GETZCNT:
4034 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4035 break;
4036 }
4037
4038 return ret;
4039 }
4040
4041 struct target_sembuf {
4042 unsigned short sem_num;
4043 short sem_op;
4044 short sem_flg;
4045 };
4046
4047 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4048 abi_ulong target_addr,
4049 unsigned nsops)
4050 {
4051 struct target_sembuf *target_sembuf;
4052 int i;
4053
4054 target_sembuf = lock_user(VERIFY_READ, target_addr,
4055 nsops*sizeof(struct target_sembuf), 1);
4056 if (!target_sembuf)
4057 return -TARGET_EFAULT;
4058
4059 for(i=0; i<nsops; i++) {
4060 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4061 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4062 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4063 }
4064
4065 unlock_user(target_sembuf, target_addr, 0);
4066
4067 return 0;
4068 }
4069
4070 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4071 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4072
4073 /*
4074 * This macro is required to handle the s390 variants, which passes the
4075 * arguments in a different order than default.
4076 */
4077 #ifdef __s390x__
4078 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4079 (__nsops), (__timeout), (__sops)
4080 #else
4081 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4082 (__nsops), 0, (__sops), (__timeout)
4083 #endif
4084
4085 static inline abi_long do_semtimedop(int semid,
4086 abi_long ptr,
4087 unsigned nsops,
4088 abi_long timeout, bool time64)
4089 {
4090 struct sembuf *sops;
4091 struct timespec ts, *pts = NULL;
4092 abi_long ret;
4093
4094 if (timeout) {
4095 pts = &ts;
4096 if (time64) {
4097 if (target_to_host_timespec64(pts, timeout)) {
4098 return -TARGET_EFAULT;
4099 }
4100 } else {
4101 if (target_to_host_timespec(pts, timeout)) {
4102 return -TARGET_EFAULT;
4103 }
4104 }
4105 }
4106
4107 if (nsops > TARGET_SEMOPM) {
4108 return -TARGET_E2BIG;
4109 }
4110
4111 sops = g_new(struct sembuf, nsops);
4112
4113 if (target_to_host_sembuf(sops, ptr, nsops)) {
4114 g_free(sops);
4115 return -TARGET_EFAULT;
4116 }
4117
4118 ret = -TARGET_ENOSYS;
4119 #ifdef __NR_semtimedop
4120 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4121 #endif
4122 #ifdef __NR_ipc
4123 if (ret == -TARGET_ENOSYS) {
4124 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4125 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4126 }
4127 #endif
4128 g_free(sops);
4129 return ret;
4130 }
4131 #endif
4132
4133 struct target_msqid_ds
4134 {
4135 struct target_ipc_perm msg_perm;
4136 abi_ulong msg_stime;
4137 #if TARGET_ABI_BITS == 32
4138 abi_ulong __unused1;
4139 #endif
4140 abi_ulong msg_rtime;
4141 #if TARGET_ABI_BITS == 32
4142 abi_ulong __unused2;
4143 #endif
4144 abi_ulong msg_ctime;
4145 #if TARGET_ABI_BITS == 32
4146 abi_ulong __unused3;
4147 #endif
4148 abi_ulong __msg_cbytes;
4149 abi_ulong msg_qnum;
4150 abi_ulong msg_qbytes;
4151 abi_ulong msg_lspid;
4152 abi_ulong msg_lrpid;
4153 abi_ulong __unused4;
4154 abi_ulong __unused5;
4155 };
4156
4157 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4158 abi_ulong target_addr)
4159 {
4160 struct target_msqid_ds *target_md;
4161
4162 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4163 return -TARGET_EFAULT;
4164 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4165 return -TARGET_EFAULT;
4166 host_md->msg_stime = tswapal(target_md->msg_stime);
4167 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4168 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4169 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4170 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4171 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4172 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4173 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4174 unlock_user_struct(target_md, target_addr, 0);
4175 return 0;
4176 }
4177
4178 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4179 struct msqid_ds *host_md)
4180 {
4181 struct target_msqid_ds *target_md;
4182
4183 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4184 return -TARGET_EFAULT;
4185 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4186 return -TARGET_EFAULT;
4187 target_md->msg_stime = tswapal(host_md->msg_stime);
4188 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4189 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4190 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4191 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4192 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4193 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4194 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4195 unlock_user_struct(target_md, target_addr, 1);
4196 return 0;
4197 }
4198
4199 struct target_msginfo {
4200 int msgpool;
4201 int msgmap;
4202 int msgmax;
4203 int msgmnb;
4204 int msgmni;
4205 int msgssz;
4206 int msgtql;
4207 unsigned short int msgseg;
4208 };
4209
4210 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4211 struct msginfo *host_msginfo)
4212 {
4213 struct target_msginfo *target_msginfo;
4214 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4215 return -TARGET_EFAULT;
4216 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4217 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4218 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4219 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4220 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4221 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4222 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4223 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4224 unlock_user_struct(target_msginfo, target_addr, 1);
4225 return 0;
4226 }
4227
4228 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4229 {
4230 struct msqid_ds dsarg;
4231 struct msginfo msginfo;
4232 abi_long ret = -TARGET_EINVAL;
4233
4234 cmd &= 0xff;
4235
4236 switch (cmd) {
4237 case IPC_STAT:
4238 case IPC_SET:
4239 case MSG_STAT:
4240 if (target_to_host_msqid_ds(&dsarg,ptr))
4241 return -TARGET_EFAULT;
4242 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4243 if (host_to_target_msqid_ds(ptr,&dsarg))
4244 return -TARGET_EFAULT;
4245 break;
4246 case IPC_RMID:
4247 ret = get_errno(msgctl(msgid, cmd, NULL));
4248 break;
4249 case IPC_INFO:
4250 case MSG_INFO:
4251 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4252 if (host_to_target_msginfo(ptr, &msginfo))
4253 return -TARGET_EFAULT;
4254 break;
4255 }
4256
4257 return ret;
4258 }
4259
4260 struct target_msgbuf {
4261 abi_long mtype;
4262 char mtext[1];
4263 };
4264
4265 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4266 ssize_t msgsz, int msgflg)
4267 {
4268 struct target_msgbuf *target_mb;
4269 struct msgbuf *host_mb;
4270 abi_long ret = 0;
4271
4272 if (msgsz < 0) {
4273 return -TARGET_EINVAL;
4274 }
4275
4276 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4277 return -TARGET_EFAULT;
4278 host_mb = g_try_malloc(msgsz + sizeof(long));
4279 if (!host_mb) {
4280 unlock_user_struct(target_mb, msgp, 0);
4281 return -TARGET_ENOMEM;
4282 }
4283 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4284 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4285 ret = -TARGET_ENOSYS;
4286 #ifdef __NR_msgsnd
4287 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4288 #endif
4289 #ifdef __NR_ipc
4290 if (ret == -TARGET_ENOSYS) {
4291 #ifdef __s390x__
4292 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4293 host_mb));
4294 #else
4295 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4296 host_mb, 0));
4297 #endif
4298 }
4299 #endif
4300 g_free(host_mb);
4301 unlock_user_struct(target_mb, msgp, 0);
4302
4303 return ret;
4304 }
4305
4306 #ifdef __NR_ipc
4307 #if defined(__sparc__)
4308 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4309 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4310 #elif defined(__s390x__)
4311 /* The s390 sys_ipc variant has only five parameters. */
4312 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4313 ((long int[]){(long int)__msgp, __msgtyp})
4314 #else
4315 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4316 ((long int[]){(long int)__msgp, __msgtyp}), 0
4317 #endif
4318 #endif
4319
4320 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4321 ssize_t msgsz, abi_long msgtyp,
4322 int msgflg)
4323 {
4324 struct target_msgbuf *target_mb;
4325 char *target_mtext;
4326 struct msgbuf *host_mb;
4327 abi_long ret = 0;
4328
4329 if (msgsz < 0) {
4330 return -TARGET_EINVAL;
4331 }
4332
4333 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4334 return -TARGET_EFAULT;
4335
4336 host_mb = g_try_malloc(msgsz + sizeof(long));
4337 if (!host_mb) {
4338 ret = -TARGET_ENOMEM;
4339 goto end;
4340 }
4341 ret = -TARGET_ENOSYS;
4342 #ifdef __NR_msgrcv
4343 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4344 #endif
4345 #ifdef __NR_ipc
4346 if (ret == -TARGET_ENOSYS) {
4347 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4348 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4349 }
4350 #endif
4351
4352 if (ret > 0) {
4353 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4354 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4355 if (!target_mtext) {
4356 ret = -TARGET_EFAULT;
4357 goto end;
4358 }
4359 memcpy(target_mb->mtext, host_mb->mtext, ret);
4360 unlock_user(target_mtext, target_mtext_addr, ret);
4361 }
4362
4363 target_mb->mtype = tswapal(host_mb->mtype);
4364
4365 end:
4366 if (target_mb)
4367 unlock_user_struct(target_mb, msgp, 1);
4368 g_free(host_mb);
4369 return ret;
4370 }
4371
4372 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4373 abi_ulong target_addr)
4374 {
4375 struct target_shmid_ds *target_sd;
4376
4377 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4378 return -TARGET_EFAULT;
4379 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4380 return -TARGET_EFAULT;
4381 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4382 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4383 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4384 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4385 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4386 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4387 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4388 unlock_user_struct(target_sd, target_addr, 0);
4389 return 0;
4390 }
4391
4392 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4393 struct shmid_ds *host_sd)
4394 {
4395 struct target_shmid_ds *target_sd;
4396
4397 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4398 return -TARGET_EFAULT;
4399 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4400 return -TARGET_EFAULT;
4401 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4402 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4403 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4404 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4405 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4406 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4407 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4408 unlock_user_struct(target_sd, target_addr, 1);
4409 return 0;
4410 }
4411
4412 struct target_shminfo {
4413 abi_ulong shmmax;
4414 abi_ulong shmmin;
4415 abi_ulong shmmni;
4416 abi_ulong shmseg;
4417 abi_ulong shmall;
4418 };
4419
4420 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4421 struct shminfo *host_shminfo)
4422 {
4423 struct target_shminfo *target_shminfo;
4424 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4425 return -TARGET_EFAULT;
4426 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4427 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4428 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4429 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4430 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4431 unlock_user_struct(target_shminfo, target_addr, 1);
4432 return 0;
4433 }
4434
4435 struct target_shm_info {
4436 int used_ids;
4437 abi_ulong shm_tot;
4438 abi_ulong shm_rss;
4439 abi_ulong shm_swp;
4440 abi_ulong swap_attempts;
4441 abi_ulong swap_successes;
4442 };
4443
4444 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4445 struct shm_info *host_shm_info)
4446 {
4447 struct target_shm_info *target_shm_info;
4448 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4449 return -TARGET_EFAULT;
4450 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4451 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4452 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4453 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4454 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4455 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4456 unlock_user_struct(target_shm_info, target_addr, 1);
4457 return 0;
4458 }
4459
4460 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4461 {
4462 struct shmid_ds dsarg;
4463 struct shminfo shminfo;
4464 struct shm_info shm_info;
4465 abi_long ret = -TARGET_EINVAL;
4466
4467 cmd &= 0xff;
4468
4469 switch(cmd) {
4470 case IPC_STAT:
4471 case IPC_SET:
4472 case SHM_STAT:
4473 if (target_to_host_shmid_ds(&dsarg, buf))
4474 return -TARGET_EFAULT;
4475 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4476 if (host_to_target_shmid_ds(buf, &dsarg))
4477 return -TARGET_EFAULT;
4478 break;
4479 case IPC_INFO:
4480 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4481 if (host_to_target_shminfo(buf, &shminfo))
4482 return -TARGET_EFAULT;
4483 break;
4484 case SHM_INFO:
4485 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4486 if (host_to_target_shm_info(buf, &shm_info))
4487 return -TARGET_EFAULT;
4488 break;
4489 case IPC_RMID:
4490 case SHM_LOCK:
4491 case SHM_UNLOCK:
4492 ret = get_errno(shmctl(shmid, cmd, NULL));
4493 break;
4494 }
4495
4496 return ret;
4497 }
4498
4499 #ifndef TARGET_FORCE_SHMLBA
4500 /* For most architectures, SHMLBA is the same as the page size;
4501 * some architectures have larger values, in which case they should
4502 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4503 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4504 * and defining its own value for SHMLBA.
4505 *
4506 * The kernel also permits SHMLBA to be set by the architecture to a
4507 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4508 * this means that addresses are rounded to the large size if
4509 * SHM_RND is set but addresses not aligned to that size are not rejected
4510 * as long as they are at least page-aligned. Since the only architecture
4511 * which uses this is ia64 this code doesn't provide for that oddity.
4512 */
4513 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4514 {
4515 return TARGET_PAGE_SIZE;
4516 }
4517 #endif
4518
4519 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4520 int shmid, abi_ulong shmaddr, int shmflg)
4521 {
4522 CPUState *cpu = env_cpu(cpu_env);
4523 abi_long raddr;
4524 void *host_raddr;
4525 struct shmid_ds shm_info;
4526 int i,ret;
4527 abi_ulong shmlba;
4528
4529 /* shmat pointers are always untagged */
4530
4531 /* find out the length of the shared memory segment */
4532 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4533 if (is_error(ret)) {
4534 /* can't get length, bail out */
4535 return ret;
4536 }
4537
4538 shmlba = target_shmlba(cpu_env);
4539
4540 if (shmaddr & (shmlba - 1)) {
4541 if (shmflg & SHM_RND) {
4542 shmaddr &= ~(shmlba - 1);
4543 } else {
4544 return -TARGET_EINVAL;
4545 }
4546 }
4547 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4548 return -TARGET_EINVAL;
4549 }
4550
4551 mmap_lock();
4552
4553 /*
4554 * We're mapping shared memory, so ensure we generate code for parallel
4555 * execution and flush old translations. This will work up to the level
4556 * supported by the host -- anything that requires EXCP_ATOMIC will not
4557 * be atomic with respect to an external process.
4558 */
4559 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4560 cpu->tcg_cflags |= CF_PARALLEL;
4561 tb_flush(cpu);
4562 }
4563
4564 if (shmaddr)
4565 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4566 else {
4567 abi_ulong mmap_start;
4568
4569 /* In order to use the host shmat, we need to honor host SHMLBA. */
4570 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4571
4572 if (mmap_start == -1) {
4573 errno = ENOMEM;
4574 host_raddr = (void *)-1;
4575 } else
4576 host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4577 shmflg | SHM_REMAP);
4578 }
4579
4580 if (host_raddr == (void *)-1) {
4581 mmap_unlock();
4582 return get_errno((long)host_raddr);
4583 }
4584 raddr=h2g((unsigned long)host_raddr);
4585
4586 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4587 PAGE_VALID | PAGE_RESET | PAGE_READ |
4588 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4589
4590 for (i = 0; i < N_SHM_REGIONS; i++) {
4591 if (!shm_regions[i].in_use) {
4592 shm_regions[i].in_use = true;
4593 shm_regions[i].start = raddr;
4594 shm_regions[i].size = shm_info.shm_segsz;
4595 break;
4596 }
4597 }
4598
4599 mmap_unlock();
4600 return raddr;
4601
4602 }
4603
4604 static inline abi_long do_shmdt(abi_ulong shmaddr)
4605 {
4606 int i;
4607 abi_long rv;
4608
4609 /* shmdt pointers are always untagged */
4610
4611 mmap_lock();
4612
4613 for (i = 0; i < N_SHM_REGIONS; ++i) {
4614 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4615 shm_regions[i].in_use = false;
4616 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4617 break;
4618 }
4619 }
4620 rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4621
4622 mmap_unlock();
4623
4624 return rv;
4625 }
4626
4627 #ifdef TARGET_NR_ipc
4628 /* ??? This only works with linear mappings. */
4629 /* do_ipc() must return target values and target errnos. */
4630 static abi_long do_ipc(CPUArchState *cpu_env,
4631 unsigned int call, abi_long first,
4632 abi_long second, abi_long third,
4633 abi_long ptr, abi_long fifth)
4634 {
4635 int version;
4636 abi_long ret = 0;
4637
4638 version = call >> 16;
4639 call &= 0xffff;
4640
4641 switch (call) {
4642 case IPCOP_semop:
4643 ret = do_semtimedop(first, ptr, second, 0, false);
4644 break;
4645 case IPCOP_semtimedop:
4646 /*
4647 * The s390 sys_ipc variant has only five parameters instead of six
4648 * (as for default variant) and the only difference is the handling of
4649 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4650 * to a struct timespec where the generic variant uses fifth parameter.
4651 */
4652 #if defined(TARGET_S390X)
4653 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4654 #else
4655 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4656 #endif
4657 break;
4658
4659 case IPCOP_semget:
4660 ret = get_errno(semget(first, second, third));
4661 break;
4662
4663 case IPCOP_semctl: {
4664 /* The semun argument to semctl is passed by value, so dereference the
4665 * ptr argument. */
4666 abi_ulong atptr;
4667 get_user_ual(atptr, ptr);
4668 ret = do_semctl(first, second, third, atptr);
4669 break;
4670 }
4671
4672 case IPCOP_msgget:
4673 ret = get_errno(msgget(first, second));
4674 break;
4675
4676 case IPCOP_msgsnd:
4677 ret = do_msgsnd(first, ptr, second, third);
4678 break;
4679
4680 case IPCOP_msgctl:
4681 ret = do_msgctl(first, second, ptr);
4682 break;
4683
4684 case IPCOP_msgrcv:
4685 switch (version) {
4686 case 0:
4687 {
4688 struct target_ipc_kludge {
4689 abi_long msgp;
4690 abi_long msgtyp;
4691 } *tmp;
4692
4693 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4694 ret = -TARGET_EFAULT;
4695 break;
4696 }
4697
4698 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4699
4700 unlock_user_struct(tmp, ptr, 0);
4701 break;
4702 }
4703 default:
4704 ret = do_msgrcv(first, ptr, second, fifth, third);
4705 }
4706 break;
4707
4708 case IPCOP_shmat:
4709 switch (version) {
4710 default:
4711 {
4712 abi_ulong raddr;
4713 raddr = do_shmat(cpu_env, first, ptr, second);
4714 if (is_error(raddr))
4715 return get_errno(raddr);
4716 if (put_user_ual(raddr, third))
4717 return -TARGET_EFAULT;
4718 break;
4719 }
4720 case 1:
4721 ret = -TARGET_EINVAL;
4722 break;
4723 }
4724 break;
4725 case IPCOP_shmdt:
4726 ret = do_shmdt(ptr);
4727 break;
4728
4729 case IPCOP_shmget:
4730 /* IPC_* flag values are the same on all linux platforms */
4731 ret = get_errno(shmget(first, second, third));
4732 break;
4733
4734 /* IPC_* and SHM_* command values are the same on all linux platforms */
4735 case IPCOP_shmctl:
4736 ret = do_shmctl(first, second, ptr);
4737 break;
4738 default:
4739 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4740 call, version);
4741 ret = -TARGET_ENOSYS;
4742 break;
4743 }
4744 return ret;
4745 }
4746 #endif
4747
4748 /* kernel structure types definitions */
4749
4750 #define STRUCT(name, ...) STRUCT_ ## name,
4751 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4752 enum {
4753 #include "syscall_types.h"
4754 STRUCT_MAX
4755 };
4756 #undef STRUCT
4757 #undef STRUCT_SPECIAL
4758
4759 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4760 #define STRUCT_SPECIAL(name)
4761 #include "syscall_types.h"
4762 #undef STRUCT
4763 #undef STRUCT_SPECIAL
4764
4765 #define MAX_STRUCT_SIZE 4096
4766
4767 #ifdef CONFIG_FIEMAP
4768 /* So fiemap access checks don't overflow on 32 bit systems.
4769 * This is very slightly smaller than the limit imposed by
4770 * the underlying kernel.
4771 */
4772 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4773 / sizeof(struct fiemap_extent))
4774
4775 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4776 int fd, int cmd, abi_long arg)
4777 {
4778 /* The parameter for this ioctl is a struct fiemap followed
4779 * by an array of struct fiemap_extent whose size is set
4780 * in fiemap->fm_extent_count. The array is filled in by the
4781 * ioctl.
4782 */
4783 int target_size_in, target_size_out;
4784 struct fiemap *fm;
4785 const argtype *arg_type = ie->arg_type;
4786 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4787 void *argptr, *p;
4788 abi_long ret;
4789 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4790 uint32_t outbufsz;
4791 int free_fm = 0;
4792
4793 assert(arg_type[0] == TYPE_PTR);
4794 assert(ie->access == IOC_RW);
4795 arg_type++;
4796 target_size_in = thunk_type_size(arg_type, 0);
4797 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4798 if (!argptr) {
4799 return -TARGET_EFAULT;
4800 }
4801 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4802 unlock_user(argptr, arg, 0);
4803 fm = (struct fiemap *)buf_temp;
4804 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4805 return -TARGET_EINVAL;
4806 }
4807
4808 outbufsz = sizeof (*fm) +
4809 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4810
4811 if (outbufsz > MAX_STRUCT_SIZE) {
4812 /* We can't fit all the extents into the fixed size buffer.
4813 * Allocate one that is large enough and use it instead.
4814 */
4815 fm = g_try_malloc(outbufsz);
4816 if (!fm) {
4817 return -TARGET_ENOMEM;
4818 }
4819 memcpy(fm, buf_temp, sizeof(struct fiemap));
4820 free_fm = 1;
4821 }
4822 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4823 if (!is_error(ret)) {
4824 target_size_out = target_size_in;
4825 /* An extent_count of 0 means we were only counting the extents
4826 * so there are no structs to copy
4827 */
4828 if (fm->fm_extent_count != 0) {
4829 target_size_out += fm->fm_mapped_extents * extent_size;
4830 }
4831 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4832 if (!argptr) {
4833 ret = -TARGET_EFAULT;
4834 } else {
4835 /* Convert the struct fiemap */
4836 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4837 if (fm->fm_extent_count != 0) {
4838 p = argptr + target_size_in;
4839 /* ...and then all the struct fiemap_extents */
4840 for (i = 0; i < fm->fm_mapped_extents; i++) {
4841 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4842 THUNK_TARGET);
4843 p += extent_size;
4844 }
4845 }
4846 unlock_user(argptr, arg, target_size_out);
4847 }
4848 }
4849 if (free_fm) {
4850 g_free(fm);
4851 }
4852 return ret;
4853 }
4854 #endif
4855
4856 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4857 int fd, int cmd, abi_long arg)
4858 {
4859 const argtype *arg_type = ie->arg_type;
4860 int target_size;
4861 void *argptr;
4862 int ret;
4863 struct ifconf *host_ifconf;
4864 uint32_t outbufsz;
4865 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4866 const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4867 int target_ifreq_size;
4868 int nb_ifreq;
4869 int free_buf = 0;
4870 int i;
4871 int target_ifc_len;
4872 abi_long target_ifc_buf;
4873 int host_ifc_len;
4874 char *host_ifc_buf;
4875
4876 assert(arg_type[0] == TYPE_PTR);
4877 assert(ie->access == IOC_RW);
4878
4879 arg_type++;
4880 target_size = thunk_type_size(arg_type, 0);
4881
4882 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4883 if (!argptr)
4884 return -TARGET_EFAULT;
4885 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4886 unlock_user(argptr, arg, 0);
4887
4888 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4889 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4890 target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4891
4892 if (target_ifc_buf != 0) {
4893 target_ifc_len = host_ifconf->ifc_len;
4894 nb_ifreq = target_ifc_len / target_ifreq_size;
4895 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4896
4897 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4898 if (outbufsz > MAX_STRUCT_SIZE) {
4899 /*
4900 * We can't fit all the extents into the fixed size buffer.
4901 * Allocate one that is large enough and use it instead.
4902 */
4903 host_ifconf = g_try_malloc(outbufsz);
4904 if (!host_ifconf) {
4905 return -TARGET_ENOMEM;
4906 }
4907 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4908 free_buf = 1;
4909 }
4910 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4911
4912 host_ifconf->ifc_len = host_ifc_len;
4913 } else {
4914 host_ifc_buf = NULL;
4915 }
4916 host_ifconf->ifc_buf = host_ifc_buf;
4917
4918 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4919 if (!is_error(ret)) {
4920 /* convert host ifc_len to target ifc_len */
4921
4922 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4923 target_ifc_len = nb_ifreq * target_ifreq_size;
4924 host_ifconf->ifc_len = target_ifc_len;
4925
4926 /* restore target ifc_buf */
4927
4928 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4929
4930 /* copy struct ifconf to target user */
4931
4932 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4933 if (!argptr)
4934 return -TARGET_EFAULT;
4935 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4936 unlock_user(argptr, arg, target_size);
4937
4938 if (target_ifc_buf != 0) {
4939 /* copy ifreq[] to target user */
4940 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4941 for (i = 0; i < nb_ifreq ; i++) {
4942 thunk_convert(argptr + i * target_ifreq_size,
4943 host_ifc_buf + i * sizeof(struct ifreq),
4944 ifreq_arg_type, THUNK_TARGET);
4945 }
4946 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4947 }
4948 }
4949
4950 if (free_buf) {
4951 g_free(host_ifconf);
4952 }
4953
4954 return ret;
4955 }
4956
4957 #if defined(CONFIG_USBFS)
4958 #if HOST_LONG_BITS > 64
4959 #error USBDEVFS thunks do not support >64 bit hosts yet.
4960 #endif
4961 struct live_urb {
4962 uint64_t target_urb_adr;
4963 uint64_t target_buf_adr;
4964 char *target_buf_ptr;
4965 struct usbdevfs_urb host_urb;
4966 };
4967
4968 static GHashTable *usbdevfs_urb_hashtable(void)
4969 {
4970 static GHashTable *urb_hashtable;
4971
4972 if (!urb_hashtable) {
4973 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4974 }
4975 return urb_hashtable;
4976 }
4977
4978 static void urb_hashtable_insert(struct live_urb *urb)
4979 {
4980 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4981 g_hash_table_insert(urb_hashtable, urb, urb);
4982 }
4983
4984 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4985 {
4986 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4987 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4988 }
4989
4990 static void urb_hashtable_remove(struct live_urb *urb)
4991 {
4992 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4993 g_hash_table_remove(urb_hashtable, urb);
4994 }
4995
4996 static abi_long
4997 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4998 int fd, int cmd, abi_long arg)
4999 {
5000 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5001 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5002 struct live_urb *lurb;
5003 void *argptr;
5004 uint64_t hurb;
5005 int target_size;
5006 uintptr_t target_urb_adr;
5007 abi_long ret;
5008
5009 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5010
5011 memset(buf_temp, 0, sizeof(uint64_t));
5012 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5013 if (is_error(ret)) {
5014 return ret;
5015 }
5016
5017 memcpy(&hurb, buf_temp, sizeof(uint64_t));
5018 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5019 if (!lurb->target_urb_adr) {
5020 return -TARGET_EFAULT;
5021 }
5022 urb_hashtable_remove(lurb);
5023 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5024 lurb->host_urb.buffer_length);
5025 lurb->target_buf_ptr = NULL;
5026
5027 /* restore the guest buffer pointer */
5028 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5029
5030 /* update the guest urb struct */
5031 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5032 if (!argptr) {
5033 g_free(lurb);
5034 return -TARGET_EFAULT;
5035 }
5036 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5037 unlock_user(argptr, lurb->target_urb_adr, target_size);
5038
5039 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5040 /* write back the urb handle */
5041 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5042 if (!argptr) {
5043 g_free(lurb);
5044 return -TARGET_EFAULT;
5045 }
5046
5047 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5048 target_urb_adr = lurb->target_urb_adr;
5049 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5050 unlock_user(argptr, arg, target_size);
5051
5052 g_free(lurb);
5053 return ret;
5054 }
5055
5056 static abi_long
5057 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5058 uint8_t *buf_temp __attribute__((unused)),
5059 int fd, int cmd, abi_long arg)
5060 {
5061 struct live_urb *lurb;
5062
5063 /* map target address back to host URB with metadata. */
5064 lurb = urb_hashtable_lookup(arg);
5065 if (!lurb) {
5066 return -TARGET_EFAULT;
5067 }
5068 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5069 }
5070
5071 static abi_long
5072 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5073 int fd, int cmd, abi_long arg)
5074 {
5075 const argtype *arg_type = ie->arg_type;
5076 int target_size;
5077 abi_long ret;
5078 void *argptr;
5079 int rw_dir;
5080 struct live_urb *lurb;
5081
5082 /*
5083 * each submitted URB needs to map to a unique ID for the
5084 * kernel, and that unique ID needs to be a pointer to
5085 * host memory. hence, we need to malloc for each URB.
5086 * isochronous transfers have a variable length struct.
5087 */
5088 arg_type++;
5089 target_size = thunk_type_size(arg_type, THUNK_TARGET);
5090
5091 /* construct host copy of urb and metadata */
5092 lurb = g_try_new0(struct live_urb, 1);
5093 if (!lurb) {
5094 return -TARGET_ENOMEM;
5095 }
5096
5097 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5098 if (!argptr) {
5099 g_free(lurb);
5100 return -TARGET_EFAULT;
5101 }
5102 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5103 unlock_user(argptr, arg, 0);
5104
5105 lurb->target_urb_adr = arg;
5106 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5107
5108 /* buffer space used depends on endpoint type so lock the entire buffer */
5109 /* control type urbs should check the buffer contents for true direction */
5110 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5111 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5112 lurb->host_urb.buffer_length, 1);
5113 if (lurb->target_buf_ptr == NULL) {
5114 g_free(lurb);
5115 return -TARGET_EFAULT;
5116 }
5117
5118 /* update buffer pointer in host copy */
5119 lurb->host_urb.buffer = lurb->target_buf_ptr;
5120
5121 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5122 if (is_error(ret)) {
5123 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5124 g_free(lurb);
5125 } else {
5126 urb_hashtable_insert(lurb);
5127 }
5128
5129 return ret;
5130 }
5131 #endif /* CONFIG_USBFS */
5132
5133 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5134 int cmd, abi_long arg)
5135 {
5136 void *argptr;
5137 struct dm_ioctl *host_dm;
5138 abi_long guest_data;
5139 uint32_t guest_data_size;
5140 int target_size;
5141 const argtype *arg_type = ie->arg_type;
5142 abi_long ret;
5143 void *big_buf = NULL;
5144 char *host_data;
5145
5146 arg_type++;
5147 target_size = thunk_type_size(arg_type, 0);
5148 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5149 if (!argptr) {
5150 ret = -TARGET_EFAULT;
5151 goto out;
5152 }
5153 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5154 unlock_user(argptr, arg, 0);
5155
5156 /* buf_temp is too small, so fetch things into a bigger buffer */
5157 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5158 memcpy(big_buf, buf_temp, target_size);
5159 buf_temp = big_buf;
5160 host_dm = big_buf;
5161
5162 guest_data = arg + host_dm->data_start;
5163 if ((guest_data - arg) < 0) {
5164 ret = -TARGET_EINVAL;
5165 goto out;
5166 }
5167 guest_data_size = host_dm->data_size - host_dm->data_start;
5168 host_data = (char*)host_dm + host_dm->data_start;
5169
5170 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5171 if (!argptr) {
5172 ret = -TARGET_EFAULT;
5173 goto out;
5174 }
5175
5176 switch (ie->host_cmd) {
5177 case DM_REMOVE_ALL:
5178 case DM_LIST_DEVICES:
5179 case DM_DEV_CREATE:
5180 case DM_DEV_REMOVE:
5181 case DM_DEV_SUSPEND:
5182 case DM_DEV_STATUS:
5183 case DM_DEV_WAIT:
5184 case DM_TABLE_STATUS:
5185 case DM_TABLE_CLEAR:
5186 case DM_TABLE_DEPS:
5187 case DM_LIST_VERSIONS:
5188 /* no input data */
5189 break;
5190 case DM_DEV_RENAME:
5191 case DM_DEV_SET_GEOMETRY:
5192 /* data contains only strings */
5193 memcpy(host_data, argptr, guest_data_size);
5194 break;
5195 case DM_TARGET_MSG:
5196 memcpy(host_data, argptr, guest_data_size);
5197 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5198 break;
5199 case DM_TABLE_LOAD:
5200 {
5201 void *gspec = argptr;
5202 void *cur_data = host_data;
5203 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5204 int spec_size = thunk_type_size(arg_type, 0);
5205 int i;
5206
5207 for (i = 0; i < host_dm->target_count; i++) {
5208 struct dm_target_spec *spec = cur_data;
5209 uint32_t next;
5210 int slen;
5211
5212 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5213 slen = strlen((char*)gspec + spec_size) + 1;
5214 next = spec->next;
5215 spec->next = sizeof(*spec) + slen;
5216 strcpy((char*)&spec[1], gspec + spec_size);
5217 gspec += next;
5218 cur_data += spec->next;
5219 }
5220 break;
5221 }
5222 default:
5223 ret = -TARGET_EINVAL;
5224 unlock_user(argptr, guest_data, 0);
5225 goto out;
5226 }
5227 unlock_user(argptr, guest_data, 0);
5228
5229 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5230 if (!is_error(ret)) {
5231 guest_data = arg + host_dm->data_start;
5232 guest_data_size = host_dm->data_size - host_dm->data_start;
5233 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5234 switch (ie->host_cmd) {
5235 case DM_REMOVE_ALL:
5236 case DM_DEV_CREATE:
5237 case DM_DEV_REMOVE:
5238 case DM_DEV_RENAME:
5239 case DM_DEV_SUSPEND:
5240 case DM_DEV_STATUS:
5241 case DM_TABLE_LOAD:
5242 case DM_TABLE_CLEAR:
5243 case DM_TARGET_MSG:
5244 case DM_DEV_SET_GEOMETRY:
5245 /* no return data */
5246 break;
5247 case DM_LIST_DEVICES:
5248 {
5249 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5250 uint32_t remaining_data = guest_data_size;
5251 void *cur_data = argptr;
5252 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5253 int nl_size = 12; /* can't use thunk_size due to alignment */
5254
5255 while (1) {
5256 uint32_t next = nl->next;
5257 if (next) {
5258 nl->next = nl_size + (strlen(nl->name) + 1);
5259 }
5260 if (remaining_data < nl->next) {
5261 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5262 break;
5263 }
5264 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5265 strcpy(cur_data + nl_size, nl->name);
5266 cur_data += nl->next;
5267 remaining_data -= nl->next;
5268 if (!next) {
5269 break;
5270 }
5271 nl = (void*)nl + next;
5272 }
5273 break;
5274 }
5275 case DM_DEV_WAIT:
5276 case DM_TABLE_STATUS:
5277 {
5278 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5279 void *cur_data = argptr;
5280 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5281 int spec_size = thunk_type_size(arg_type, 0);
5282 int i;
5283
5284 for (i = 0; i < host_dm->target_count; i++) {
5285 uint32_t next = spec->next;
5286 int slen = strlen((char*)&spec[1]) + 1;
5287 spec->next = (cur_data - argptr) + spec_size + slen;
5288 if (guest_data_size < spec->next) {
5289 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5290 break;
5291 }
5292 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5293 strcpy(cur_data + spec_size, (char*)&spec[1]);
5294 cur_data = argptr + spec->next;
5295 spec = (void*)host_dm + host_dm->data_start + next;
5296 }
5297 break;
5298 }
5299 case DM_TABLE_DEPS:
5300 {
5301 void *hdata = (void*)host_dm + host_dm->data_start;
5302 int count = *(uint32_t*)hdata;
5303 uint64_t *hdev = hdata + 8;
5304 uint64_t *gdev = argptr + 8;
5305 int i;
5306
5307 *(uint32_t*)argptr = tswap32(count);
5308 for (i = 0; i < count; i++) {
5309 *gdev = tswap64(*hdev);
5310 gdev++;
5311 hdev++;
5312 }
5313 break;
5314 }
5315 case DM_LIST_VERSIONS:
5316 {
5317 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5318 uint32_t remaining_data = guest_data_size;
5319 void *cur_data = argptr;
5320 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5321 int vers_size = thunk_type_size(arg_type, 0);
5322
5323 while (1) {
5324 uint32_t next = vers->next;
5325 if (next) {
5326 vers->next = vers_size + (strlen(vers->name) + 1);
5327 }
5328 if (remaining_data < vers->next) {
5329 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5330 break;
5331 }
5332 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5333 strcpy(cur_data + vers_size, vers->name);
5334 cur_data += vers->next;
5335 remaining_data -= vers->next;
5336 if (!next) {
5337 break;
5338 }
5339 vers = (void*)vers + next;
5340 }
5341 break;
5342 }
5343 default:
5344 unlock_user(argptr, guest_data, 0);
5345 ret = -TARGET_EINVAL;
5346 goto out;
5347 }
5348 unlock_user(argptr, guest_data, guest_data_size);
5349
5350 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5351 if (!argptr) {
5352 ret = -TARGET_EFAULT;
5353 goto out;
5354 }
5355 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5356 unlock_user(argptr, arg, target_size);
5357 }
5358 out:
5359 g_free(big_buf);
5360 return ret;
5361 }
5362
5363 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5364 int cmd, abi_long arg)
5365 {
5366 void *argptr;
5367 int target_size;
5368 const argtype *arg_type = ie->arg_type;
5369 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5370 abi_long ret;
5371
5372 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5373 struct blkpg_partition host_part;
5374
5375 /* Read and convert blkpg */
5376 arg_type++;
5377 target_size = thunk_type_size(arg_type, 0);
5378 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5379 if (!argptr) {
5380 ret = -TARGET_EFAULT;
5381 goto out;
5382 }
5383 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5384 unlock_user(argptr, arg, 0);
5385
5386 switch (host_blkpg->op) {
5387 case BLKPG_ADD_PARTITION:
5388 case BLKPG_DEL_PARTITION:
5389 /* payload is struct blkpg_partition */
5390 break;
5391 default:
5392 /* Unknown opcode */
5393 ret = -TARGET_EINVAL;
5394 goto out;
5395 }
5396
5397 /* Read and convert blkpg->data */
5398 arg = (abi_long)(uintptr_t)host_blkpg->data;
5399 target_size = thunk_type_size(part_arg_type, 0);
5400 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5401 if (!argptr) {
5402 ret = -TARGET_EFAULT;
5403 goto out;
5404 }
5405 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5406 unlock_user(argptr, arg, 0);
5407
5408 /* Swizzle the data pointer to our local copy and call! */
5409 host_blkpg->data = &host_part;
5410 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5411
5412 out:
5413 return ret;
5414 }
5415
5416 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5417 int fd, int cmd, abi_long arg)
5418 {
5419 const argtype *arg_type = ie->arg_type;
5420 const StructEntry *se;
5421 const argtype *field_types;
5422 const int *dst_offsets, *src_offsets;
5423 int target_size;
5424 void *argptr;
5425 abi_ulong *target_rt_dev_ptr = NULL;
5426 unsigned long *host_rt_dev_ptr = NULL;
5427 abi_long ret;
5428 int i;
5429
5430 assert(ie->access == IOC_W);
5431 assert(*arg_type == TYPE_PTR);
5432 arg_type++;
5433 assert(*arg_type == TYPE_STRUCT);
5434 target_size = thunk_type_size(arg_type, 0);
5435 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5436 if (!argptr) {
5437 return -TARGET_EFAULT;
5438 }
5439 arg_type++;
5440 assert(*arg_type == (int)STRUCT_rtentry);
5441 se = struct_entries + *arg_type++;
5442 assert(se->convert[0] == NULL);
5443 /* convert struct here to be able to catch rt_dev string */
5444 field_types = se->field_types;
5445 dst_offsets = se->field_offsets[THUNK_HOST];
5446 src_offsets = se->field_offsets[THUNK_TARGET];
5447 for (i = 0; i < se->nb_fields; i++) {
5448 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5449 assert(*field_types == TYPE_PTRVOID);
5450 target_rt_dev_ptr = argptr + src_offsets[i];
5451 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5452 if (*target_rt_dev_ptr != 0) {
5453 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5454 tswapal(*target_rt_dev_ptr));
5455 if (!*host_rt_dev_ptr) {
5456 unlock_user(argptr, arg, 0);
5457 return -TARGET_EFAULT;
5458 }
5459 } else {
5460 *host_rt_dev_ptr = 0;
5461 }
5462 field_types++;
5463 continue;
5464 }
5465 field_types = thunk_convert(buf_temp + dst_offsets[i],
5466 argptr + src_offsets[i],
5467 field_types, THUNK_HOST);
5468 }
5469 unlock_user(argptr, arg, 0);
5470
5471 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5472
5473 assert(host_rt_dev_ptr != NULL);
5474 assert(target_rt_dev_ptr != NULL);
5475 if (*host_rt_dev_ptr != 0) {
5476 unlock_user((void *)*host_rt_dev_ptr,
5477 *target_rt_dev_ptr, 0);
5478 }
5479 return ret;
5480 }
5481
5482 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5483 int fd, int cmd, abi_long arg)
5484 {
5485 int sig = target_to_host_signal(arg);
5486 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5487 }
5488
5489 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5490 int fd, int cmd, abi_long arg)
5491 {
5492 struct timeval tv;
5493 abi_long ret;
5494
5495 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5496 if (is_error(ret)) {
5497 return ret;
5498 }
5499
5500 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5501 if (copy_to_user_timeval(arg, &tv)) {
5502 return -TARGET_EFAULT;
5503 }
5504 } else {
5505 if (copy_to_user_timeval64(arg, &tv)) {
5506 return -TARGET_EFAULT;
5507 }
5508 }
5509
5510 return ret;
5511 }
5512
5513 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5514 int fd, int cmd, abi_long arg)
5515 {
5516 struct timespec ts;
5517 abi_long ret;
5518
5519 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5520 if (is_error(ret)) {
5521 return ret;
5522 }
5523
5524 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5525 if (host_to_target_timespec(arg, &ts)) {
5526 return -TARGET_EFAULT;
5527 }
5528 } else{
5529 if (host_to_target_timespec64(arg, &ts)) {
5530 return -TARGET_EFAULT;
5531 }
5532 }
5533
5534 return ret;
5535 }
5536
5537 #ifdef TIOCGPTPEER
5538 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5539 int fd, int cmd, abi_long arg)
5540 {
5541 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5542 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5543 }
5544 #endif
5545
5546 #ifdef HAVE_DRM_H
5547
5548 static void unlock_drm_version(struct drm_version *host_ver,
5549 struct target_drm_version *target_ver,
5550 bool copy)
5551 {
5552 unlock_user(host_ver->name, target_ver->name,
5553 copy ? host_ver->name_len : 0);
5554 unlock_user(host_ver->date, target_ver->date,
5555 copy ? host_ver->date_len : 0);
5556 unlock_user(host_ver->desc, target_ver->desc,
5557 copy ? host_ver->desc_len : 0);
5558 }
5559
5560 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5561 struct target_drm_version *target_ver)
5562 {
5563 memset(host_ver, 0, sizeof(*host_ver));
5564
5565 __get_user(host_ver->name_len, &target_ver->name_len);
5566 if (host_ver->name_len) {
5567 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5568 target_ver->name_len, 0);
5569 if (!host_ver->name) {
5570 return -EFAULT;
5571 }
5572 }
5573
5574 __get_user(host_ver->date_len, &target_ver->date_len);
5575 if (host_ver->date_len) {
5576 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5577 target_ver->date_len, 0);
5578 if (!host_ver->date) {
5579 goto err;
5580 }
5581 }
5582
5583 __get_user(host_ver->desc_len, &target_ver->desc_len);
5584 if (host_ver->desc_len) {
5585 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5586 target_ver->desc_len, 0);
5587 if (!host_ver->desc) {
5588 goto err;
5589 }
5590 }
5591
5592 return 0;
5593 err:
5594 unlock_drm_version(host_ver, target_ver, false);
5595 return -EFAULT;
5596 }
5597
5598 static inline void host_to_target_drmversion(
5599 struct target_drm_version *target_ver,
5600 struct drm_version *host_ver)
5601 {
5602 __put_user(host_ver->version_major, &target_ver->version_major);
5603 __put_user(host_ver->version_minor, &target_ver->version_minor);
5604 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5605 __put_user(host_ver->name_len, &target_ver->name_len);
5606 __put_user(host_ver->date_len, &target_ver->date_len);
5607 __put_user(host_ver->desc_len, &target_ver->desc_len);
5608 unlock_drm_version(host_ver, target_ver, true);
5609 }
5610
5611 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5612 int fd, int cmd, abi_long arg)
5613 {
5614 struct drm_version *ver;
5615 struct target_drm_version *target_ver;
5616 abi_long ret;
5617
5618 switch (ie->host_cmd) {
5619 case DRM_IOCTL_VERSION:
5620 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5621 return -TARGET_EFAULT;
5622 }
5623 ver = (struct drm_version *)buf_temp;
5624 ret = target_to_host_drmversion(ver, target_ver);
5625 if (!is_error(ret)) {
5626 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5627 if (is_error(ret)) {
5628 unlock_drm_version(ver, target_ver, false);
5629 } else {
5630 host_to_target_drmversion(target_ver, ver);
5631 }
5632 }
5633 unlock_user_struct(target_ver, arg, 0);
5634 return ret;
5635 }
5636 return -TARGET_ENOSYS;
5637 }
5638
5639 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5640 struct drm_i915_getparam *gparam,
5641 int fd, abi_long arg)
5642 {
5643 abi_long ret;
5644 int value;
5645 struct target_drm_i915_getparam *target_gparam;
5646
5647 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5648 return -TARGET_EFAULT;
5649 }
5650
5651 __get_user(gparam->param, &target_gparam->param);
5652 gparam->value = &value;
5653 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5654 put_user_s32(value, target_gparam->value);
5655
5656 unlock_user_struct(target_gparam, arg, 0);
5657 return ret;
5658 }
5659
5660 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5661 int fd, int cmd, abi_long arg)
5662 {
5663 switch (ie->host_cmd) {
5664 case DRM_IOCTL_I915_GETPARAM:
5665 return do_ioctl_drm_i915_getparam(ie,
5666 (struct drm_i915_getparam *)buf_temp,
5667 fd, arg);
5668 default:
5669 return -TARGET_ENOSYS;
5670 }
5671 }
5672
5673 #endif
5674
5675 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5676 int fd, int cmd, abi_long arg)
5677 {
5678 struct tun_filter *filter = (struct tun_filter *)buf_temp;
5679 struct tun_filter *target_filter;
5680 char *target_addr;
5681
5682 assert(ie->access == IOC_W);
5683
5684 target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5685 if (!target_filter) {
5686 return -TARGET_EFAULT;
5687 }
5688 filter->flags = tswap16(target_filter->flags);
5689 filter->count = tswap16(target_filter->count);
5690 unlock_user(target_filter, arg, 0);
5691
5692 if (filter->count) {
5693 if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5694 MAX_STRUCT_SIZE) {
5695 return -TARGET_EFAULT;
5696 }
5697
5698 target_addr = lock_user(VERIFY_READ,
5699 arg + offsetof(struct tun_filter, addr),
5700 filter->count * ETH_ALEN, 1);
5701 if (!target_addr) {
5702 return -TARGET_EFAULT;
5703 }
5704 memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5705 unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5706 }
5707
5708 return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5709 }
5710
5711 IOCTLEntry ioctl_entries[] = {
5712 #define IOCTL(cmd, access, ...) \
5713 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5714 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5715 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5716 #define IOCTL_IGNORE(cmd) \
5717 { TARGET_ ## cmd, 0, #cmd },
5718 #include "ioctls.h"
5719 { 0, 0, },
5720 };
5721
5722 /* ??? Implement proper locking for ioctls. */
5723 /* do_ioctl() Must return target values and target errnos. */
5724 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5725 {
5726 const IOCTLEntry *ie;
5727 const argtype *arg_type;
5728 abi_long ret;
5729 uint8_t buf_temp[MAX_STRUCT_SIZE];
5730 int target_size;
5731 void *argptr;
5732
5733 ie = ioctl_entries;
5734 for(;;) {
5735 if (ie->target_cmd == 0) {
5736 qemu_log_mask(
5737 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5738 return -TARGET_ENOSYS;
5739 }
5740 if (ie->target_cmd == cmd)
5741 break;
5742 ie++;
5743 }
5744 arg_type = ie->arg_type;
5745 if (ie->do_ioctl) {
5746 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5747 } else if (!ie->host_cmd) {
5748 /* Some architectures define BSD ioctls in their headers
5749 that are not implemented in Linux. */
5750 return -TARGET_ENOSYS;
5751 }
5752
5753 switch(arg_type[0]) {
5754 case TYPE_NULL:
5755 /* no argument */
5756 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5757 break;
5758 case TYPE_PTRVOID:
5759 case TYPE_INT:
5760 case TYPE_LONG:
5761 case TYPE_ULONG:
5762 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5763 break;
5764 case TYPE_PTR:
5765 arg_type++;
5766 target_size = thunk_type_size(arg_type, 0);
5767 switch(ie->access) {
5768 case IOC_R:
5769 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5770 if (!is_error(ret)) {
5771 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5772 if (!argptr)
5773 return -TARGET_EFAULT;
5774 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5775 unlock_user(argptr, arg, target_size);
5776 }
5777 break;
5778 case IOC_W:
5779 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5780 if (!argptr)
5781 return -TARGET_EFAULT;
5782 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5783 unlock_user(argptr, arg, 0);
5784 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5785 break;
5786 default:
5787 case IOC_RW:
5788 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5789 if (!argptr)
5790 return -TARGET_EFAULT;
5791 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5792 unlock_user(argptr, arg, 0);
5793 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5794 if (!is_error(ret)) {
5795 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5796 if (!argptr)
5797 return -TARGET_EFAULT;
5798 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5799 unlock_user(argptr, arg, target_size);
5800 }
5801 break;
5802 }
5803 break;
5804 default:
5805 qemu_log_mask(LOG_UNIMP,
5806 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5807 (long)cmd, arg_type[0]);
5808 ret = -TARGET_ENOSYS;
5809 break;
5810 }
5811 return ret;
5812 }
5813
5814 static const bitmask_transtbl iflag_tbl[] = {
5815 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5816 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5817 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5818 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5819 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5820 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5821 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5822 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5823 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5824 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5825 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5826 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5827 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5828 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5829 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5830 { 0, 0, 0, 0 }
5831 };
5832
5833 static const bitmask_transtbl oflag_tbl[] = {
5834 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5835 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5836 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5837 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5838 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5839 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5840 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5841 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5842 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5843 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5844 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5845 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5846 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5847 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5848 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5849 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5850 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5851 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5852 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5853 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5854 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5855 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5856 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5857 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5858 { 0, 0, 0, 0 }
5859 };
5860
5861 static const bitmask_transtbl cflag_tbl[] = {
5862 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5863 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5864 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5865 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5866 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5867 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5868 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5869 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5870 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5871 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5872 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5873 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5874 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5875 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5876 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5877 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5878 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5879 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5880 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5881 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5882 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5883 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5884 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5885 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5886 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5887 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5888 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5889 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5890 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5891 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5892 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5893 { 0, 0, 0, 0 }
5894 };
5895
5896 static const bitmask_transtbl lflag_tbl[] = {
5897 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5898 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5899 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5900 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5901 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5902 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5903 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5904 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5905 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5906 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5907 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5908 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5909 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5910 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5911 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5912 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5913 { 0, 0, 0, 0 }
5914 };
5915
5916 static void target_to_host_termios (void *dst, const void *src)
5917 {
5918 struct host_termios *host = dst;
5919 const struct target_termios *target = src;
5920
5921 host->c_iflag =
5922 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5923 host->c_oflag =
5924 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5925 host->c_cflag =
5926 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5927 host->c_lflag =
5928 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5929 host->c_line = target->c_line;
5930
5931 memset(host->c_cc, 0, sizeof(host->c_cc));
5932 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5933 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5934 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5935 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5936 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5937 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5938 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5939 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5940 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5941 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5942 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5943 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5944 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5945 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5946 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5947 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5948 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5949 }
5950
5951 static void host_to_target_termios (void *dst, const void *src)
5952 {
5953 struct target_termios *target = dst;
5954 const struct host_termios *host = src;
5955
5956 target->c_iflag =
5957 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5958 target->c_oflag =
5959 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5960 target->c_cflag =
5961 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5962 target->c_lflag =
5963 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5964 target->c_line = host->c_line;
5965
5966 memset(target->c_cc, 0, sizeof(target->c_cc));
5967 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5968 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5969 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5970 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5971 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5972 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5973 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5974 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5975 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5976 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5977 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5978 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5979 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5980 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5981 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5982 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5983 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5984 }
5985
5986 static const StructEntry struct_termios_def = {
5987 .convert = { host_to_target_termios, target_to_host_termios },
5988 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5989 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5990 .print = print_termios,
5991 };
5992
5993 static const bitmask_transtbl mmap_flags_tbl[] = {
5994 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5995 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5996 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5997 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5998 MAP_ANONYMOUS, MAP_ANONYMOUS },
5999 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6000 MAP_GROWSDOWN, MAP_GROWSDOWN },
6001 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6002 MAP_DENYWRITE, MAP_DENYWRITE },
6003 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6004 MAP_EXECUTABLE, MAP_EXECUTABLE },
6005 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6006 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6007 MAP_NORESERVE, MAP_NORESERVE },
6008 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6009 /* MAP_STACK had been ignored by the kernel for quite some time.
6010 Recognize it for the target insofar as we do not want to pass
6011 it through to the host. */
6012 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6013 { 0, 0, 0, 0 }
6014 };
6015
6016 /*
6017 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6018 * TARGET_I386 is defined if TARGET_X86_64 is defined
6019 */
6020 #if defined(TARGET_I386)
6021
6022 /* NOTE: there is really one LDT for all the threads */
6023 static uint8_t *ldt_table;
6024
6025 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6026 {
6027 int size;
6028 void *p;
6029
6030 if (!ldt_table)
6031 return 0;
6032 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6033 if (size > bytecount)
6034 size = bytecount;
6035 p = lock_user(VERIFY_WRITE, ptr, size, 0);
6036 if (!p)
6037 return -TARGET_EFAULT;
6038 /* ??? Should this by byteswapped? */
6039 memcpy(p, ldt_table, size);
6040 unlock_user(p, ptr, size);
6041 return size;
6042 }
6043
6044 /* XXX: add locking support */
6045 static abi_long write_ldt(CPUX86State *env,
6046 abi_ulong ptr, unsigned long bytecount, int oldmode)
6047 {
6048 struct target_modify_ldt_ldt_s ldt_info;
6049 struct target_modify_ldt_ldt_s *target_ldt_info;
6050 int seg_32bit, contents, read_exec_only, limit_in_pages;
6051 int seg_not_present, useable, lm;
6052 uint32_t *lp, entry_1, entry_2;
6053
6054 if (bytecount != sizeof(ldt_info))
6055 return -TARGET_EINVAL;
6056 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6057 return -TARGET_EFAULT;
6058 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6059 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6060 ldt_info.limit = tswap32(target_ldt_info->limit);
6061 ldt_info.flags = tswap32(target_ldt_info->flags);
6062 unlock_user_struct(target_ldt_info, ptr, 0);
6063
6064 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6065 return -TARGET_EINVAL;
6066 seg_32bit = ldt_info.flags & 1;
6067 contents = (ldt_info.flags >> 1) & 3;
6068 read_exec_only = (ldt_info.flags >> 3) & 1;
6069 limit_in_pages = (ldt_info.flags >> 4) & 1;
6070 seg_not_present = (ldt_info.flags >> 5) & 1;
6071 useable = (ldt_info.flags >> 6) & 1;
6072 #ifdef TARGET_ABI32
6073 lm = 0;
6074 #else
6075 lm = (ldt_info.flags >> 7) & 1;
6076 #endif
6077 if (contents == 3) {
6078 if (oldmode)
6079 return -TARGET_EINVAL;
6080 if (seg_not_present == 0)
6081 return -TARGET_EINVAL;
6082 }
6083 /* allocate the LDT */
6084 if (!ldt_table) {
6085 env->ldt.base = target_mmap(0,
6086 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6087 PROT_READ|PROT_WRITE,
6088 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6089 if (env->ldt.base == -1)
6090 return -TARGET_ENOMEM;
6091 memset(g2h_untagged(env->ldt.base), 0,
6092 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6093 env->ldt.limit = 0xffff;
6094 ldt_table = g2h_untagged(env->ldt.base);
6095 }
6096
6097 /* NOTE: same code as Linux kernel */
6098 /* Allow LDTs to be cleared by the user. */
6099 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6100 if (oldmode ||
6101 (contents == 0 &&
6102 read_exec_only == 1 &&
6103 seg_32bit == 0 &&
6104 limit_in_pages == 0 &&
6105 seg_not_present == 1 &&
6106 useable == 0 )) {
6107 entry_1 = 0;
6108 entry_2 = 0;
6109 goto install;
6110 }
6111 }
6112
6113 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6114 (ldt_info.limit & 0x0ffff);
6115 entry_2 = (ldt_info.base_addr & 0xff000000) |
6116 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6117 (ldt_info.limit & 0xf0000) |
6118 ((read_exec_only ^ 1) << 9) |
6119 (contents << 10) |
6120 ((seg_not_present ^ 1) << 15) |
6121 (seg_32bit << 22) |
6122 (limit_in_pages << 23) |
6123 (lm << 21) |
6124 0x7000;
6125 if (!oldmode)
6126 entry_2 |= (useable << 20);
6127
6128 /* Install the new entry ... */
6129 install:
6130 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6131 lp[0] = tswap32(entry_1);
6132 lp[1] = tswap32(entry_2);
6133 return 0;
6134 }
6135
6136 /* specific and weird i386 syscalls */
6137 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6138 unsigned long bytecount)
6139 {
6140 abi_long ret;
6141
6142 switch (func) {
6143 case 0:
6144 ret = read_ldt(ptr, bytecount);
6145 break;
6146 case 1:
6147 ret = write_ldt(env, ptr, bytecount, 1);
6148 break;
6149 case 0x11:
6150 ret = write_ldt(env, ptr, bytecount, 0);
6151 break;
6152 default:
6153 ret = -TARGET_ENOSYS;
6154 break;
6155 }
6156 return ret;
6157 }
6158
6159 #if defined(TARGET_ABI32)
6160 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6161 {
6162 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6163 struct target_modify_ldt_ldt_s ldt_info;
6164 struct target_modify_ldt_ldt_s *target_ldt_info;
6165 int seg_32bit, contents, read_exec_only, limit_in_pages;
6166 int seg_not_present, useable, lm;
6167 uint32_t *lp, entry_1, entry_2;
6168 int i;
6169
6170 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6171 if (!target_ldt_info)
6172 return -TARGET_EFAULT;
6173 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6174 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6175 ldt_info.limit = tswap32(target_ldt_info->limit);
6176 ldt_info.flags = tswap32(target_ldt_info->flags);
6177 if (ldt_info.entry_number == -1) {
6178 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6179 if (gdt_table[i] == 0) {
6180 ldt_info.entry_number = i;
6181 target_ldt_info->entry_number = tswap32(i);
6182 break;
6183 }
6184 }
6185 }
6186 unlock_user_struct(target_ldt_info, ptr, 1);
6187
6188 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6189 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6190 return -TARGET_EINVAL;
6191 seg_32bit = ldt_info.flags & 1;
6192 contents = (ldt_info.flags >> 1) & 3;
6193 read_exec_only = (ldt_info.flags >> 3) & 1;
6194 limit_in_pages = (ldt_info.flags >> 4) & 1;
6195 seg_not_present = (ldt_info.flags >> 5) & 1;
6196 useable = (ldt_info.flags >> 6) & 1;
6197 #ifdef TARGET_ABI32
6198 lm = 0;
6199 #else
6200 lm = (ldt_info.flags >> 7) & 1;
6201 #endif
6202
6203 if (contents == 3) {
6204 if (seg_not_present == 0)
6205 return -TARGET_EINVAL;
6206 }
6207
6208 /* NOTE: same code as Linux kernel */
6209 /* Allow LDTs to be cleared by the user. */
6210 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6211 if ((contents == 0 &&
6212 read_exec_only == 1 &&
6213 seg_32bit == 0 &&
6214 limit_in_pages == 0 &&
6215 seg_not_present == 1 &&
6216 useable == 0 )) {
6217 entry_1 = 0;
6218 entry_2 = 0;
6219 goto install;
6220 }
6221 }
6222
6223 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6224 (ldt_info.limit & 0x0ffff);
6225 entry_2 = (ldt_info.base_addr & 0xff000000) |
6226 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6227 (ldt_info.limit & 0xf0000) |
6228 ((read_exec_only ^ 1) << 9) |
6229 (contents << 10) |
6230 ((seg_not_present ^ 1) << 15) |
6231 (seg_32bit << 22) |
6232 (limit_in_pages << 23) |
6233 (useable << 20) |
6234 (lm << 21) |
6235 0x7000;
6236
6237 /* Install the new entry ... */
6238 install:
6239 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6240 lp[0] = tswap32(entry_1);
6241 lp[1] = tswap32(entry_2);
6242 return 0;
6243 }
6244
6245 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6246 {
6247 struct target_modify_ldt_ldt_s *target_ldt_info;
6248 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6249 uint32_t base_addr, limit, flags;
6250 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6251 int seg_not_present, useable, lm;
6252 uint32_t *lp, entry_1, entry_2;
6253
6254 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6255 if (!target_ldt_info)
6256 return -TARGET_EFAULT;
6257 idx = tswap32(target_ldt_info->entry_number);
6258 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6259 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6260 unlock_user_struct(target_ldt_info, ptr, 1);
6261 return -TARGET_EINVAL;
6262 }
6263 lp = (uint32_t *)(gdt_table + idx);
6264 entry_1 = tswap32(lp[0]);
6265 entry_2 = tswap32(lp[1]);
6266
6267 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6268 contents = (entry_2 >> 10) & 3;
6269 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6270 seg_32bit = (entry_2 >> 22) & 1;
6271 limit_in_pages = (entry_2 >> 23) & 1;
6272 useable = (entry_2 >> 20) & 1;
6273 #ifdef TARGET_ABI32
6274 lm = 0;
6275 #else
6276 lm = (entry_2 >> 21) & 1;
6277 #endif
6278 flags = (seg_32bit << 0) | (contents << 1) |
6279 (read_exec_only << 3) | (limit_in_pages << 4) |
6280 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6281 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6282 base_addr = (entry_1 >> 16) |
6283 (entry_2 & 0xff000000) |
6284 ((entry_2 & 0xff) << 16);
6285 target_ldt_info->base_addr = tswapal(base_addr);
6286 target_ldt_info->limit = tswap32(limit);
6287 target_ldt_info->flags = tswap32(flags);
6288 unlock_user_struct(target_ldt_info, ptr, 1);
6289 return 0;
6290 }
6291
6292 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6293 {
6294 return -TARGET_ENOSYS;
6295 }
6296 #else
6297 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6298 {
6299 abi_long ret = 0;
6300 abi_ulong val;
6301 int idx;
6302
6303 switch(code) {
6304 case TARGET_ARCH_SET_GS:
6305 case TARGET_ARCH_SET_FS:
6306 if (code == TARGET_ARCH_SET_GS)
6307 idx = R_GS;
6308 else
6309 idx = R_FS;
6310 cpu_x86_load_seg(env, idx, 0);
6311 env->segs[idx].base = addr;
6312 break;
6313 case TARGET_ARCH_GET_GS:
6314 case TARGET_ARCH_GET_FS:
6315 if (code == TARGET_ARCH_GET_GS)
6316 idx = R_GS;
6317 else
6318 idx = R_FS;
6319 val = env->segs[idx].base;
6320 if (put_user(val, addr, abi_ulong))
6321 ret = -TARGET_EFAULT;
6322 break;
6323 default:
6324 ret = -TARGET_EINVAL;
6325 break;
6326 }
6327 return ret;
6328 }
6329 #endif /* defined(TARGET_ABI32 */
6330 #endif /* defined(TARGET_I386) */
6331
6332 /*
6333 * These constants are generic. Supply any that are missing from the host.
6334 */
6335 #ifndef PR_SET_NAME
6336 # define PR_SET_NAME 15
6337 # define PR_GET_NAME 16
6338 #endif
6339 #ifndef PR_SET_FP_MODE
6340 # define PR_SET_FP_MODE 45
6341 # define PR_GET_FP_MODE 46
6342 # define PR_FP_MODE_FR (1 << 0)
6343 # define PR_FP_MODE_FRE (1 << 1)
6344 #endif
6345 #ifndef PR_SVE_SET_VL
6346 # define PR_SVE_SET_VL 50
6347 # define PR_SVE_GET_VL 51
6348 # define PR_SVE_VL_LEN_MASK 0xffff
6349 # define PR_SVE_VL_INHERIT (1 << 17)
6350 #endif
6351 #ifndef PR_PAC_RESET_KEYS
6352 # define PR_PAC_RESET_KEYS 54
6353 # define PR_PAC_APIAKEY (1 << 0)
6354 # define PR_PAC_APIBKEY (1 << 1)
6355 # define PR_PAC_APDAKEY (1 << 2)
6356 # define PR_PAC_APDBKEY (1 << 3)
6357 # define PR_PAC_APGAKEY (1 << 4)
6358 #endif
6359 #ifndef PR_SET_TAGGED_ADDR_CTRL
6360 # define PR_SET_TAGGED_ADDR_CTRL 55
6361 # define PR_GET_TAGGED_ADDR_CTRL 56
6362 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6363 #endif
6364 #ifndef PR_MTE_TCF_SHIFT
6365 # define PR_MTE_TCF_SHIFT 1
6366 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6367 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6368 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6369 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6370 # define PR_MTE_TAG_SHIFT 3
6371 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6372 #endif
6373 #ifndef PR_SET_IO_FLUSHER
6374 # define PR_SET_IO_FLUSHER 57
6375 # define PR_GET_IO_FLUSHER 58
6376 #endif
6377 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6378 # define PR_SET_SYSCALL_USER_DISPATCH 59
6379 #endif
6380 #ifndef PR_SME_SET_VL
6381 # define PR_SME_SET_VL 63
6382 # define PR_SME_GET_VL 64
6383 # define PR_SME_VL_LEN_MASK 0xffff
6384 # define PR_SME_VL_INHERIT (1 << 17)
6385 #endif
6386
6387 #include "target_prctl.h"
6388
6389 static abi_long do_prctl_inval0(CPUArchState *env)
6390 {
6391 return -TARGET_EINVAL;
6392 }
6393
6394 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6395 {
6396 return -TARGET_EINVAL;
6397 }
6398
6399 #ifndef do_prctl_get_fp_mode
6400 #define do_prctl_get_fp_mode do_prctl_inval0
6401 #endif
6402 #ifndef do_prctl_set_fp_mode
6403 #define do_prctl_set_fp_mode do_prctl_inval1
6404 #endif
6405 #ifndef do_prctl_sve_get_vl
6406 #define do_prctl_sve_get_vl do_prctl_inval0
6407 #endif
6408 #ifndef do_prctl_sve_set_vl
6409 #define do_prctl_sve_set_vl do_prctl_inval1
6410 #endif
6411 #ifndef do_prctl_reset_keys
6412 #define do_prctl_reset_keys do_prctl_inval1
6413 #endif
6414 #ifndef do_prctl_set_tagged_addr_ctrl
6415 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6416 #endif
6417 #ifndef do_prctl_get_tagged_addr_ctrl
6418 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6419 #endif
6420 #ifndef do_prctl_get_unalign
6421 #define do_prctl_get_unalign do_prctl_inval1
6422 #endif
6423 #ifndef do_prctl_set_unalign
6424 #define do_prctl_set_unalign do_prctl_inval1
6425 #endif
6426 #ifndef do_prctl_sme_get_vl
6427 #define do_prctl_sme_get_vl do_prctl_inval0
6428 #endif
6429 #ifndef do_prctl_sme_set_vl
6430 #define do_prctl_sme_set_vl do_prctl_inval1
6431 #endif
6432
6433 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6434 abi_long arg3, abi_long arg4, abi_long arg5)
6435 {
6436 abi_long ret;
6437
6438 switch (option) {
6439 case PR_GET_PDEATHSIG:
6440 {
6441 int deathsig;
6442 ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6443 arg3, arg4, arg5));
6444 if (!is_error(ret) &&
6445 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6446 return -TARGET_EFAULT;
6447 }
6448 return ret;
6449 }
6450 case PR_SET_PDEATHSIG:
6451 return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6452 arg3, arg4, arg5));
6453 case PR_GET_NAME:
6454 {
6455 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6456 if (!name) {
6457 return -TARGET_EFAULT;
6458 }
6459 ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6460 arg3, arg4, arg5));
6461 unlock_user(name, arg2, 16);
6462 return ret;
6463 }
6464 case PR_SET_NAME:
6465 {
6466 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6467 if (!name) {
6468 return -TARGET_EFAULT;
6469 }
6470 ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6471 arg3, arg4, arg5));
6472 unlock_user(name, arg2, 0);
6473 return ret;
6474 }
6475 case PR_GET_FP_MODE:
6476 return do_prctl_get_fp_mode(env);
6477 case PR_SET_FP_MODE:
6478 return do_prctl_set_fp_mode(env, arg2);
6479 case PR_SVE_GET_VL:
6480 return do_prctl_sve_get_vl(env);
6481 case PR_SVE_SET_VL:
6482 return do_prctl_sve_set_vl(env, arg2);
6483 case PR_SME_GET_VL:
6484 return do_prctl_sme_get_vl(env);
6485 case PR_SME_SET_VL:
6486 return do_prctl_sme_set_vl(env, arg2);
6487 case PR_PAC_RESET_KEYS:
6488 if (arg3 || arg4 || arg5) {
6489 return -TARGET_EINVAL;
6490 }
6491 return do_prctl_reset_keys(env, arg2);
6492 case PR_SET_TAGGED_ADDR_CTRL:
6493 if (arg3 || arg4 || arg5) {
6494 return -TARGET_EINVAL;
6495 }
6496 return do_prctl_set_tagged_addr_ctrl(env, arg2);
6497 case PR_GET_TAGGED_ADDR_CTRL:
6498 if (arg2 || arg3 || arg4 || arg5) {
6499 return -TARGET_EINVAL;
6500 }
6501 return do_prctl_get_tagged_addr_ctrl(env);
6502
6503 case PR_GET_UNALIGN:
6504 return do_prctl_get_unalign(env, arg2);
6505 case PR_SET_UNALIGN:
6506 return do_prctl_set_unalign(env, arg2);
6507
6508 case PR_CAP_AMBIENT:
6509 case PR_CAPBSET_READ:
6510 case PR_CAPBSET_DROP:
6511 case PR_GET_DUMPABLE:
6512 case PR_SET_DUMPABLE:
6513 case PR_GET_KEEPCAPS:
6514 case PR_SET_KEEPCAPS:
6515 case PR_GET_SECUREBITS:
6516 case PR_SET_SECUREBITS:
6517 case PR_GET_TIMING:
6518 case PR_SET_TIMING:
6519 case PR_GET_TIMERSLACK:
6520 case PR_SET_TIMERSLACK:
6521 case PR_MCE_KILL:
6522 case PR_MCE_KILL_GET:
6523 case PR_GET_NO_NEW_PRIVS:
6524 case PR_SET_NO_NEW_PRIVS:
6525 case PR_GET_IO_FLUSHER:
6526 case PR_SET_IO_FLUSHER:
6527 /* Some prctl options have no pointer arguments and we can pass on. */
6528 return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6529
6530 case PR_GET_CHILD_SUBREAPER:
6531 case PR_SET_CHILD_SUBREAPER:
6532 case PR_GET_SPECULATION_CTRL:
6533 case PR_SET_SPECULATION_CTRL:
6534 case PR_GET_TID_ADDRESS:
6535 /* TODO */
6536 return -TARGET_EINVAL;
6537
6538 case PR_GET_FPEXC:
6539 case PR_SET_FPEXC:
6540 /* Was used for SPE on PowerPC. */
6541 return -TARGET_EINVAL;
6542
6543 case PR_GET_ENDIAN:
6544 case PR_SET_ENDIAN:
6545 case PR_GET_FPEMU:
6546 case PR_SET_FPEMU:
6547 case PR_SET_MM:
6548 case PR_GET_SECCOMP:
6549 case PR_SET_SECCOMP:
6550 case PR_SET_SYSCALL_USER_DISPATCH:
6551 case PR_GET_THP_DISABLE:
6552 case PR_SET_THP_DISABLE:
6553 case PR_GET_TSC:
6554 case PR_SET_TSC:
6555 /* Disable to prevent the target disabling stuff we need. */
6556 return -TARGET_EINVAL;
6557
6558 default:
6559 qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6560 option);
6561 return -TARGET_EINVAL;
6562 }
6563 }
6564
6565 #define NEW_STACK_SIZE 0x40000
6566
6567
6568 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6569 typedef struct {
6570 CPUArchState *env;
6571 pthread_mutex_t mutex;
6572 pthread_cond_t cond;
6573 pthread_t thread;
6574 uint32_t tid;
6575 abi_ulong child_tidptr;
6576 abi_ulong parent_tidptr;
6577 sigset_t sigmask;
6578 } new_thread_info;
6579
6580 static void *clone_func(void *arg)
6581 {
6582 new_thread_info *info = arg;
6583 CPUArchState *env;
6584 CPUState *cpu;
6585 TaskState *ts;
6586
6587 rcu_register_thread();
6588 tcg_register_thread();
6589 env = info->env;
6590 cpu = env_cpu(env);
6591 thread_cpu = cpu;
6592 ts = (TaskState *)cpu->opaque;
6593 info->tid = sys_gettid();
6594 task_settid(ts);
6595 if (info->child_tidptr)
6596 put_user_u32(info->tid, info->child_tidptr);
6597 if (info->parent_tidptr)
6598 put_user_u32(info->tid, info->parent_tidptr);
6599 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6600 /* Enable signals. */
6601 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6602 /* Signal to the parent that we're ready. */
6603 pthread_mutex_lock(&info->mutex);
6604 pthread_cond_broadcast(&info->cond);
6605 pthread_mutex_unlock(&info->mutex);
6606 /* Wait until the parent has finished initializing the tls state. */
6607 pthread_mutex_lock(&clone_lock);
6608 pthread_mutex_unlock(&clone_lock);
6609 cpu_loop(env);
6610 /* never exits */
6611 return NULL;
6612 }
6613
6614 /* do_fork() Must return host values and target errnos (unlike most
6615 do_*() functions). */
6616 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6617 abi_ulong parent_tidptr, target_ulong newtls,
6618 abi_ulong child_tidptr)
6619 {
6620 CPUState *cpu = env_cpu(env);
6621 int ret;
6622 TaskState *ts;
6623 CPUState *new_cpu;
6624 CPUArchState *new_env;
6625 sigset_t sigmask;
6626
6627 flags &= ~CLONE_IGNORED_FLAGS;
6628
6629 /* Emulate vfork() with fork() */
6630 if (flags & CLONE_VFORK)
6631 flags &= ~(CLONE_VFORK | CLONE_VM);
6632
6633 if (flags & CLONE_VM) {
6634 TaskState *parent_ts = (TaskState *)cpu->opaque;
6635 new_thread_info info;
6636 pthread_attr_t attr;
6637
6638 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6639 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6640 return -TARGET_EINVAL;
6641 }
6642
6643 ts = g_new0(TaskState, 1);
6644 init_task_state(ts);
6645
6646 /* Grab a mutex so that thread setup appears atomic. */
6647 pthread_mutex_lock(&clone_lock);
6648
6649 /*
6650 * If this is our first additional thread, we need to ensure we
6651 * generate code for parallel execution and flush old translations.
6652 * Do this now so that the copy gets CF_PARALLEL too.
6653 */
6654 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6655 cpu->tcg_cflags |= CF_PARALLEL;
6656 tb_flush(cpu);
6657 }
6658
6659 /* we create a new CPU instance. */
6660 new_env = cpu_copy(env);
6661 /* Init regs that differ from the parent. */
6662 cpu_clone_regs_child(new_env, newsp, flags);
6663 cpu_clone_regs_parent(env, flags);
6664 new_cpu = env_cpu(new_env);
6665 new_cpu->opaque = ts;
6666 ts->bprm = parent_ts->bprm;
6667 ts->info = parent_ts->info;
6668 ts->signal_mask = parent_ts->signal_mask;
6669
6670 if (flags & CLONE_CHILD_CLEARTID) {
6671 ts->child_tidptr = child_tidptr;
6672 }
6673
6674 if (flags & CLONE_SETTLS) {
6675 cpu_set_tls (new_env, newtls);
6676 }
6677
6678 memset(&info, 0, sizeof(info));
6679 pthread_mutex_init(&info.mutex, NULL);
6680 pthread_mutex_lock(&info.mutex);
6681 pthread_cond_init(&info.cond, NULL);
6682 info.env = new_env;
6683 if (flags & CLONE_CHILD_SETTID) {
6684 info.child_tidptr = child_tidptr;
6685 }
6686 if (flags & CLONE_PARENT_SETTID) {
6687 info.parent_tidptr = parent_tidptr;
6688 }
6689
6690 ret = pthread_attr_init(&attr);
6691 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6692 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6693 /* It is not safe to deliver signals until the child has finished
6694 initializing, so temporarily block all signals. */
6695 sigfillset(&sigmask);
6696 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6697 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6698
6699 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6700 /* TODO: Free new CPU state if thread creation failed. */
6701
6702 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6703 pthread_attr_destroy(&attr);
6704 if (ret == 0) {
6705 /* Wait for the child to initialize. */
6706 pthread_cond_wait(&info.cond, &info.mutex);
6707 ret = info.tid;
6708 } else {
6709 ret = -1;
6710 }
6711 pthread_mutex_unlock(&info.mutex);
6712 pthread_cond_destroy(&info.cond);
6713 pthread_mutex_destroy(&info.mutex);
6714 pthread_mutex_unlock(&clone_lock);
6715 } else {
6716 /* if no CLONE_VM, we consider it is a fork */
6717 if (flags & CLONE_INVALID_FORK_FLAGS) {
6718 return -TARGET_EINVAL;
6719 }
6720
6721 /* We can't support custom termination signals */
6722 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6723 return -TARGET_EINVAL;
6724 }
6725
6726 if (block_signals()) {
6727 return -QEMU_ERESTARTSYS;
6728 }
6729
6730 fork_start();
6731 ret = fork();
6732 if (ret == 0) {
6733 /* Child Process. */
6734 cpu_clone_regs_child(env, newsp, flags);
6735 fork_end(1);
6736 /* There is a race condition here. The parent process could
6737 theoretically read the TID in the child process before the child
6738 tid is set. This would require using either ptrace
6739 (not implemented) or having *_tidptr to point at a shared memory
6740 mapping. We can't repeat the spinlock hack used above because
6741 the child process gets its own copy of the lock. */
6742 if (flags & CLONE_CHILD_SETTID)
6743 put_user_u32(sys_gettid(), child_tidptr);
6744 if (flags & CLONE_PARENT_SETTID)
6745 put_user_u32(sys_gettid(), parent_tidptr);
6746 ts = (TaskState *)cpu->opaque;
6747 if (flags & CLONE_SETTLS)
6748 cpu_set_tls (env, newtls);
6749 if (flags & CLONE_CHILD_CLEARTID)
6750 ts->child_tidptr = child_tidptr;
6751 } else {
6752 cpu_clone_regs_parent(env, flags);
6753 fork_end(0);
6754 }
6755 g_assert(!cpu_in_exclusive_context(cpu));
6756 }
6757 return ret;
6758 }
6759
6760 /* warning : doesn't handle linux specific flags... */
6761 static int target_to_host_fcntl_cmd(int cmd)
6762 {
6763 int ret;
6764
6765 switch(cmd) {
6766 case TARGET_F_DUPFD:
6767 case TARGET_F_GETFD:
6768 case TARGET_F_SETFD:
6769 case TARGET_F_GETFL:
6770 case TARGET_F_SETFL:
6771 case TARGET_F_OFD_GETLK:
6772 case TARGET_F_OFD_SETLK:
6773 case TARGET_F_OFD_SETLKW:
6774 ret = cmd;
6775 break;
6776 case TARGET_F_GETLK:
6777 ret = F_GETLK64;
6778 break;
6779 case TARGET_F_SETLK:
6780 ret = F_SETLK64;
6781 break;
6782 case TARGET_F_SETLKW:
6783 ret = F_SETLKW64;
6784 break;
6785 case TARGET_F_GETOWN:
6786 ret = F_GETOWN;
6787 break;
6788 case TARGET_F_SETOWN:
6789 ret = F_SETOWN;
6790 break;
6791 case TARGET_F_GETSIG:
6792 ret = F_GETSIG;
6793 break;
6794 case TARGET_F_SETSIG:
6795 ret = F_SETSIG;
6796 break;
6797 #if TARGET_ABI_BITS == 32
6798 case TARGET_F_GETLK64:
6799 ret = F_GETLK64;
6800 break;
6801 case TARGET_F_SETLK64:
6802 ret = F_SETLK64;
6803 break;
6804 case TARGET_F_SETLKW64:
6805 ret = F_SETLKW64;
6806 break;
6807 #endif
6808 case TARGET_F_SETLEASE:
6809 ret = F_SETLEASE;
6810 break;
6811 case TARGET_F_GETLEASE:
6812 ret = F_GETLEASE;
6813 break;
6814 #ifdef F_DUPFD_CLOEXEC
6815 case TARGET_F_DUPFD_CLOEXEC:
6816 ret = F_DUPFD_CLOEXEC;
6817 break;
6818 #endif
6819 case TARGET_F_NOTIFY:
6820 ret = F_NOTIFY;
6821 break;
6822 #ifdef F_GETOWN_EX
6823 case TARGET_F_GETOWN_EX:
6824 ret = F_GETOWN_EX;
6825 break;
6826 #endif
6827 #ifdef F_SETOWN_EX
6828 case TARGET_F_SETOWN_EX:
6829 ret = F_SETOWN_EX;
6830 break;
6831 #endif
6832 #ifdef F_SETPIPE_SZ
6833 case TARGET_F_SETPIPE_SZ:
6834 ret = F_SETPIPE_SZ;
6835 break;
6836 case TARGET_F_GETPIPE_SZ:
6837 ret = F_GETPIPE_SZ;
6838 break;
6839 #endif
6840 #ifdef F_ADD_SEALS
6841 case TARGET_F_ADD_SEALS:
6842 ret = F_ADD_SEALS;
6843 break;
6844 case TARGET_F_GET_SEALS:
6845 ret = F_GET_SEALS;
6846 break;
6847 #endif
6848 default:
6849 ret = -TARGET_EINVAL;
6850 break;
6851 }
6852
6853 #if defined(__powerpc64__)
6854 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6855 * is not supported by kernel. The glibc fcntl call actually adjusts
6856 * them to 5, 6 and 7 before making the syscall(). Since we make the
6857 * syscall directly, adjust to what is supported by the kernel.
6858 */
6859 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6860 ret -= F_GETLK64 - 5;
6861 }
6862 #endif
6863
6864 return ret;
6865 }
6866
6867 #define FLOCK_TRANSTBL \
6868 switch (type) { \
6869 TRANSTBL_CONVERT(F_RDLCK); \
6870 TRANSTBL_CONVERT(F_WRLCK); \
6871 TRANSTBL_CONVERT(F_UNLCK); \
6872 }
6873
6874 static int target_to_host_flock(int type)
6875 {
6876 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6877 FLOCK_TRANSTBL
6878 #undef TRANSTBL_CONVERT
6879 return -TARGET_EINVAL;
6880 }
6881
6882 static int host_to_target_flock(int type)
6883 {
6884 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6885 FLOCK_TRANSTBL
6886 #undef TRANSTBL_CONVERT
6887 /* if we don't know how to convert the value coming
6888 * from the host we copy to the target field as-is
6889 */
6890 return type;
6891 }
6892
6893 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6894 abi_ulong target_flock_addr)
6895 {
6896 struct target_flock *target_fl;
6897 int l_type;
6898
6899 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6900 return -TARGET_EFAULT;
6901 }
6902
6903 __get_user(l_type, &target_fl->l_type);
6904 l_type = target_to_host_flock(l_type);
6905 if (l_type < 0) {
6906 return l_type;
6907 }
6908 fl->l_type = l_type;
6909 __get_user(fl->l_whence, &target_fl->l_whence);
6910 __get_user(fl->l_start, &target_fl->l_start);
6911 __get_user(fl->l_len, &target_fl->l_len);
6912 __get_user(fl->l_pid, &target_fl->l_pid);
6913 unlock_user_struct(target_fl, target_flock_addr, 0);
6914 return 0;
6915 }
6916
6917 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6918 const struct flock64 *fl)
6919 {
6920 struct target_flock *target_fl;
6921 short l_type;
6922
6923 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6924 return -TARGET_EFAULT;
6925 }
6926
6927 l_type = host_to_target_flock(fl->l_type);
6928 __put_user(l_type, &target_fl->l_type);
6929 __put_user(fl->l_whence, &target_fl->l_whence);
6930 __put_user(fl->l_start, &target_fl->l_start);
6931 __put_user(fl->l_len, &target_fl->l_len);
6932 __put_user(fl->l_pid, &target_fl->l_pid);
6933 unlock_user_struct(target_fl, target_flock_addr, 1);
6934 return 0;
6935 }
6936
6937 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6938 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6939
6940 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6941 struct target_oabi_flock64 {
6942 abi_short l_type;
6943 abi_short l_whence;
6944 abi_llong l_start;
6945 abi_llong l_len;
6946 abi_int l_pid;
6947 } QEMU_PACKED;
6948
6949 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6950 abi_ulong target_flock_addr)
6951 {
6952 struct target_oabi_flock64 *target_fl;
6953 int l_type;
6954
6955 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6956 return -TARGET_EFAULT;
6957 }
6958
6959 __get_user(l_type, &target_fl->l_type);
6960 l_type = target_to_host_flock(l_type);
6961 if (l_type < 0) {
6962 return l_type;
6963 }
6964 fl->l_type = l_type;
6965 __get_user(fl->l_whence, &target_fl->l_whence);
6966 __get_user(fl->l_start, &target_fl->l_start);
6967 __get_user(fl->l_len, &target_fl->l_len);
6968 __get_user(fl->l_pid, &target_fl->l_pid);
6969 unlock_user_struct(target_fl, target_flock_addr, 0);
6970 return 0;
6971 }
6972
6973 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6974 const struct flock64 *fl)
6975 {
6976 struct target_oabi_flock64 *target_fl;
6977 short l_type;
6978
6979 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6980 return -TARGET_EFAULT;
6981 }
6982
6983 l_type = host_to_target_flock(fl->l_type);
6984 __put_user(l_type, &target_fl->l_type);
6985 __put_user(fl->l_whence, &target_fl->l_whence);
6986 __put_user(fl->l_start, &target_fl->l_start);
6987 __put_user(fl->l_len, &target_fl->l_len);
6988 __put_user(fl->l_pid, &target_fl->l_pid);
6989 unlock_user_struct(target_fl, target_flock_addr, 1);
6990 return 0;
6991 }
6992 #endif
6993
6994 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6995 abi_ulong target_flock_addr)
6996 {
6997 struct target_flock64 *target_fl;
6998 int l_type;
6999
7000 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7001 return -TARGET_EFAULT;
7002 }
7003
7004 __get_user(l_type, &target_fl->l_type);
7005 l_type = target_to_host_flock(l_type);
7006 if (l_type < 0) {
7007 return l_type;
7008 }
7009 fl->l_type = l_type;
7010 __get_user(fl->l_whence, &target_fl->l_whence);
7011 __get_user(fl->l_start, &target_fl->l_start);
7012 __get_user(fl->l_len, &target_fl->l_len);
7013 __get_user(fl->l_pid, &target_fl->l_pid);
7014 unlock_user_struct(target_fl, target_flock_addr, 0);
7015 return 0;
7016 }
7017
7018 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7019 const struct flock64 *fl)
7020 {
7021 struct target_flock64 *target_fl;
7022 short l_type;
7023
7024 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7025 return -TARGET_EFAULT;
7026 }
7027
7028 l_type = host_to_target_flock(fl->l_type);
7029 __put_user(l_type, &target_fl->l_type);
7030 __put_user(fl->l_whence, &target_fl->l_whence);
7031 __put_user(fl->l_start, &target_fl->l_start);
7032 __put_user(fl->l_len, &target_fl->l_len);
7033 __put_user(fl->l_pid, &target_fl->l_pid);
7034 unlock_user_struct(target_fl, target_flock_addr, 1);
7035 return 0;
7036 }
7037
7038 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7039 {
7040 struct flock64 fl64;
7041 #ifdef F_GETOWN_EX
7042 struct f_owner_ex fox;
7043 struct target_f_owner_ex *target_fox;
7044 #endif
7045 abi_long ret;
7046 int host_cmd = target_to_host_fcntl_cmd(cmd);
7047
7048 if (host_cmd == -TARGET_EINVAL)
7049 return host_cmd;
7050
7051 switch(cmd) {
7052 case TARGET_F_GETLK:
7053 ret = copy_from_user_flock(&fl64, arg);
7054 if (ret) {
7055 return ret;
7056 }
7057 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7058 if (ret == 0) {
7059 ret = copy_to_user_flock(arg, &fl64);
7060 }
7061 break;
7062
7063 case TARGET_F_SETLK:
7064 case TARGET_F_SETLKW:
7065 ret = copy_from_user_flock(&fl64, arg);
7066 if (ret) {
7067 return ret;
7068 }
7069 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7070 break;
7071
7072 case TARGET_F_GETLK64:
7073 case TARGET_F_OFD_GETLK:
7074 ret = copy_from_user_flock64(&fl64, arg);
7075 if (ret) {
7076 return ret;
7077 }
7078 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7079 if (ret == 0) {
7080 ret = copy_to_user_flock64(arg, &fl64);
7081 }
7082 break;
7083 case TARGET_F_SETLK64:
7084 case TARGET_F_SETLKW64:
7085 case TARGET_F_OFD_SETLK:
7086 case TARGET_F_OFD_SETLKW:
7087 ret = copy_from_user_flock64(&fl64, arg);
7088 if (ret) {
7089 return ret;
7090 }
7091 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7092 break;
7093
7094 case TARGET_F_GETFL:
7095 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7096 if (ret >= 0) {
7097 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7098 }
7099 break;
7100
7101 case TARGET_F_SETFL:
7102 ret = get_errno(safe_fcntl(fd, host_cmd,
7103 target_to_host_bitmask(arg,
7104 fcntl_flags_tbl)));
7105 break;
7106
7107 #ifdef F_GETOWN_EX
7108 case TARGET_F_GETOWN_EX:
7109 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7110 if (ret >= 0) {
7111 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7112 return -TARGET_EFAULT;
7113 target_fox->type = tswap32(fox.type);
7114 target_fox->pid = tswap32(fox.pid);
7115 unlock_user_struct(target_fox, arg, 1);
7116 }
7117 break;
7118 #endif
7119
7120 #ifdef F_SETOWN_EX
7121 case TARGET_F_SETOWN_EX:
7122 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7123 return -TARGET_EFAULT;
7124 fox.type = tswap32(target_fox->type);
7125 fox.pid = tswap32(target_fox->pid);
7126 unlock_user_struct(target_fox, arg, 0);
7127 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7128 break;
7129 #endif
7130
7131 case TARGET_F_SETSIG:
7132 ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7133 break;
7134
7135 case TARGET_F_GETSIG:
7136 ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7137 break;
7138
7139 case TARGET_F_SETOWN:
7140 case TARGET_F_GETOWN:
7141 case TARGET_F_SETLEASE:
7142 case TARGET_F_GETLEASE:
7143 case TARGET_F_SETPIPE_SZ:
7144 case TARGET_F_GETPIPE_SZ:
7145 case TARGET_F_ADD_SEALS:
7146 case TARGET_F_GET_SEALS:
7147 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7148 break;
7149
7150 default:
7151 ret = get_errno(safe_fcntl(fd, cmd, arg));
7152 break;
7153 }
7154 return ret;
7155 }
7156
7157 #ifdef USE_UID16
7158
7159 static inline int high2lowuid(int uid)
7160 {
7161 if (uid > 65535)
7162 return 65534;
7163 else
7164 return uid;
7165 }
7166
7167 static inline int high2lowgid(int gid)
7168 {
7169 if (gid > 65535)
7170 return 65534;
7171 else
7172 return gid;
7173 }
7174
7175 static inline int low2highuid(int uid)
7176 {
7177 if ((int16_t)uid == -1)
7178 return -1;
7179 else
7180 return uid;
7181 }
7182
7183 static inline int low2highgid(int gid)
7184 {
7185 if ((int16_t)gid == -1)
7186 return -1;
7187 else
7188 return gid;
7189 }
7190 static inline int tswapid(int id)
7191 {
7192 return tswap16(id);
7193 }
7194
7195 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7196
7197 #else /* !USE_UID16 */
7198 static inline int high2lowuid(int uid)
7199 {
7200 return uid;
7201 }
7202 static inline int high2lowgid(int gid)
7203 {
7204 return gid;
7205 }
7206 static inline int low2highuid(int uid)
7207 {
7208 return uid;
7209 }
7210 static inline int low2highgid(int gid)
7211 {
7212 return gid;
7213 }
7214 static inline int tswapid(int id)
7215 {
7216 return tswap32(id);
7217 }
7218
7219 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7220
7221 #endif /* USE_UID16 */
7222
7223 /* We must do direct syscalls for setting UID/GID, because we want to
7224 * implement the Linux system call semantics of "change only for this thread",
7225 * not the libc/POSIX semantics of "change for all threads in process".
7226 * (See http://ewontfix.com/17/ for more details.)
7227 * We use the 32-bit version of the syscalls if present; if it is not
7228 * then either the host architecture supports 32-bit UIDs natively with
7229 * the standard syscall, or the 16-bit UID is the best we can do.
7230 */
7231 #ifdef __NR_setuid32
7232 #define __NR_sys_setuid __NR_setuid32
7233 #else
7234 #define __NR_sys_setuid __NR_setuid
7235 #endif
7236 #ifdef __NR_setgid32
7237 #define __NR_sys_setgid __NR_setgid32
7238 #else
7239 #define __NR_sys_setgid __NR_setgid
7240 #endif
7241 #ifdef __NR_setresuid32
7242 #define __NR_sys_setresuid __NR_setresuid32
7243 #else
7244 #define __NR_sys_setresuid __NR_setresuid
7245 #endif
7246 #ifdef __NR_setresgid32
7247 #define __NR_sys_setresgid __NR_setresgid32
7248 #else
7249 #define __NR_sys_setresgid __NR_setresgid
7250 #endif
7251
7252 _syscall1(int, sys_setuid, uid_t, uid)
7253 _syscall1(int, sys_setgid, gid_t, gid)
7254 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7255 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7256
7257 void syscall_init(void)
7258 {
7259 IOCTLEntry *ie;
7260 const argtype *arg_type;
7261 int size;
7262
7263 thunk_init(STRUCT_MAX);
7264
7265 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7266 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7267 #include "syscall_types.h"
7268 #undef STRUCT
7269 #undef STRUCT_SPECIAL
7270
7271 /* we patch the ioctl size if necessary. We rely on the fact that
7272 no ioctl has all the bits at '1' in the size field */
7273 ie = ioctl_entries;
7274 while (ie->target_cmd != 0) {
7275 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7276 TARGET_IOC_SIZEMASK) {
7277 arg_type = ie->arg_type;
7278 if (arg_type[0] != TYPE_PTR) {
7279 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7280 ie->target_cmd);
7281 exit(1);
7282 }
7283 arg_type++;
7284 size = thunk_type_size(arg_type, 0);
7285 ie->target_cmd = (ie->target_cmd &
7286 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7287 (size << TARGET_IOC_SIZESHIFT);
7288 }
7289
7290 /* automatic consistency check if same arch */
7291 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7292 (defined(__x86_64__) && defined(TARGET_X86_64))
7293 if (unlikely(ie->target_cmd != ie->host_cmd)) {
7294 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7295 ie->name, ie->target_cmd, ie->host_cmd);
7296 }
7297 #endif
7298 ie++;
7299 }
7300 }
7301
7302 #ifdef TARGET_NR_truncate64
7303 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7304 abi_long arg2,
7305 abi_long arg3,
7306 abi_long arg4)
7307 {
7308 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7309 arg2 = arg3;
7310 arg3 = arg4;
7311 }
7312 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7313 }
7314 #endif
7315
7316 #ifdef TARGET_NR_ftruncate64
7317 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7318 abi_long arg2,
7319 abi_long arg3,
7320 abi_long arg4)
7321 {
7322 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7323 arg2 = arg3;
7324 arg3 = arg4;
7325 }
7326 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7327 }
7328 #endif
7329
7330 #if defined(TARGET_NR_timer_settime) || \
7331 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7332 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7333 abi_ulong target_addr)
7334 {
7335 if (target_to_host_timespec(&host_its->it_interval, target_addr +
7336 offsetof(struct target_itimerspec,
7337 it_interval)) ||
7338 target_to_host_timespec(&host_its->it_value, target_addr +
7339 offsetof(struct target_itimerspec,
7340 it_value))) {
7341 return -TARGET_EFAULT;
7342 }
7343
7344 return 0;
7345 }
7346 #endif
7347
7348 #if defined(TARGET_NR_timer_settime64) || \
7349 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7350 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7351 abi_ulong target_addr)
7352 {
7353 if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7354 offsetof(struct target__kernel_itimerspec,
7355 it_interval)) ||
7356 target_to_host_timespec64(&host_its->it_value, target_addr +
7357 offsetof(struct target__kernel_itimerspec,
7358 it_value))) {
7359 return -TARGET_EFAULT;
7360 }
7361
7362 return 0;
7363 }
7364 #endif
7365
7366 #if ((defined(TARGET_NR_timerfd_gettime) || \
7367 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7368 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7369 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7370 struct itimerspec *host_its)
7371 {
7372 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7373 it_interval),
7374 &host_its->it_interval) ||
7375 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7376 it_value),
7377 &host_its->it_value)) {
7378 return -TARGET_EFAULT;
7379 }
7380 return 0;
7381 }
7382 #endif
7383
7384 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7385 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7386 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7387 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7388 struct itimerspec *host_its)
7389 {
7390 if (host_to_target_timespec64(target_addr +
7391 offsetof(struct target__kernel_itimerspec,
7392 it_interval),
7393 &host_its->it_interval) ||
7394 host_to_target_timespec64(target_addr +
7395 offsetof(struct target__kernel_itimerspec,
7396 it_value),
7397 &host_its->it_value)) {
7398 return -TARGET_EFAULT;
7399 }
7400 return 0;
7401 }
7402 #endif
7403
7404 #if defined(TARGET_NR_adjtimex) || \
7405 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7406 static inline abi_long target_to_host_timex(struct timex *host_tx,
7407 abi_long target_addr)
7408 {
7409 struct target_timex *target_tx;
7410
7411 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7412 return -TARGET_EFAULT;
7413 }
7414
7415 __get_user(host_tx->modes, &target_tx->modes);
7416 __get_user(host_tx->offset, &target_tx->offset);
7417 __get_user(host_tx->freq, &target_tx->freq);
7418 __get_user(host_tx->maxerror, &target_tx->maxerror);
7419 __get_user(host_tx->esterror, &target_tx->esterror);
7420 __get_user(host_tx->status, &target_tx->status);
7421 __get_user(host_tx->constant, &target_tx->constant);
7422 __get_user(host_tx->precision, &target_tx->precision);
7423 __get_user(host_tx->tolerance, &target_tx->tolerance);
7424 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7425 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7426 __get_user(host_tx->tick, &target_tx->tick);
7427 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7428 __get_user(host_tx->jitter, &target_tx->jitter);
7429 __get_user(host_tx->shift, &target_tx->shift);
7430 __get_user(host_tx->stabil, &target_tx->stabil);
7431 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7432 __get_user(host_tx->calcnt, &target_tx->calcnt);
7433 __get_user(host_tx->errcnt, &target_tx->errcnt);
7434 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7435 __get_user(host_tx->tai, &target_tx->tai);
7436
7437 unlock_user_struct(target_tx, target_addr, 0);
7438 return 0;
7439 }
7440
7441 static inline abi_long host_to_target_timex(abi_long target_addr,
7442 struct timex *host_tx)
7443 {
7444 struct target_timex *target_tx;
7445
7446 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7447 return -TARGET_EFAULT;
7448 }
7449
7450 __put_user(host_tx->modes, &target_tx->modes);
7451 __put_user(host_tx->offset, &target_tx->offset);
7452 __put_user(host_tx->freq, &target_tx->freq);
7453 __put_user(host_tx->maxerror, &target_tx->maxerror);
7454 __put_user(host_tx->esterror, &target_tx->esterror);
7455 __put_user(host_tx->status, &target_tx->status);
7456 __put_user(host_tx->constant, &target_tx->constant);
7457 __put_user(host_tx->precision, &target_tx->precision);
7458 __put_user(host_tx->tolerance, &target_tx->tolerance);
7459 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7460 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7461 __put_user(host_tx->tick, &target_tx->tick);
7462 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7463 __put_user(host_tx->jitter, &target_tx->jitter);
7464 __put_user(host_tx->shift, &target_tx->shift);
7465 __put_user(host_tx->stabil, &target_tx->stabil);
7466 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7467 __put_user(host_tx->calcnt, &target_tx->calcnt);
7468 __put_user(host_tx->errcnt, &target_tx->errcnt);
7469 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7470 __put_user(host_tx->tai, &target_tx->tai);
7471
7472 unlock_user_struct(target_tx, target_addr, 1);
7473 return 0;
7474 }
7475 #endif
7476
7477
7478 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7479 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7480 abi_long target_addr)
7481 {
7482 struct target__kernel_timex *target_tx;
7483
7484 if (copy_from_user_timeval64(&host_tx->time, target_addr +
7485 offsetof(struct target__kernel_timex,
7486 time))) {
7487 return -TARGET_EFAULT;
7488 }
7489
7490 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7491 return -TARGET_EFAULT;
7492 }
7493
7494 __get_user(host_tx->modes, &target_tx->modes);
7495 __get_user(host_tx->offset, &target_tx->offset);
7496 __get_user(host_tx->freq, &target_tx->freq);
7497 __get_user(host_tx->maxerror, &target_tx->maxerror);
7498 __get_user(host_tx->esterror, &target_tx->esterror);
7499 __get_user(host_tx->status, &target_tx->status);
7500 __get_user(host_tx->constant, &target_tx->constant);
7501 __get_user(host_tx->precision, &target_tx->precision);
7502 __get_user(host_tx->tolerance, &target_tx->tolerance);
7503 __get_user(host_tx->tick, &target_tx->tick);
7504 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7505 __get_user(host_tx->jitter, &target_tx->jitter);
7506 __get_user(host_tx->shift, &target_tx->shift);
7507 __get_user(host_tx->stabil, &target_tx->stabil);
7508 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7509 __get_user(host_tx->calcnt, &target_tx->calcnt);
7510 __get_user(host_tx->errcnt, &target_tx->errcnt);
7511 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7512 __get_user(host_tx->tai, &target_tx->tai);
7513
7514 unlock_user_struct(target_tx, target_addr, 0);
7515 return 0;
7516 }
7517
7518 static inline abi_long host_to_target_timex64(abi_long target_addr,
7519 struct timex *host_tx)
7520 {
7521 struct target__kernel_timex *target_tx;
7522
7523 if (copy_to_user_timeval64(target_addr +
7524 offsetof(struct target__kernel_timex, time),
7525 &host_tx->time)) {
7526 return -TARGET_EFAULT;
7527 }
7528
7529 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7530 return -TARGET_EFAULT;
7531 }
7532
7533 __put_user(host_tx->modes, &target_tx->modes);
7534 __put_user(host_tx->offset, &target_tx->offset);
7535 __put_user(host_tx->freq, &target_tx->freq);
7536 __put_user(host_tx->maxerror, &target_tx->maxerror);
7537 __put_user(host_tx->esterror, &target_tx->esterror);
7538 __put_user(host_tx->status, &target_tx->status);
7539 __put_user(host_tx->constant, &target_tx->constant);
7540 __put_user(host_tx->precision, &target_tx->precision);
7541 __put_user(host_tx->tolerance, &target_tx->tolerance);
7542 __put_user(host_tx->tick, &target_tx->tick);
7543 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7544 __put_user(host_tx->jitter, &target_tx->jitter);
7545 __put_user(host_tx->shift, &target_tx->shift);
7546 __put_user(host_tx->stabil, &target_tx->stabil);
7547 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7548 __put_user(host_tx->calcnt, &target_tx->calcnt);
7549 __put_user(host_tx->errcnt, &target_tx->errcnt);
7550 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7551 __put_user(host_tx->tai, &target_tx->tai);
7552
7553 unlock_user_struct(target_tx, target_addr, 1);
7554 return 0;
7555 }
7556 #endif
7557
7558 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7559 #define sigev_notify_thread_id _sigev_un._tid
7560 #endif
7561
7562 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7563 abi_ulong target_addr)
7564 {
7565 struct target_sigevent *target_sevp;
7566
7567 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7568 return -TARGET_EFAULT;
7569 }
7570
7571 /* This union is awkward on 64 bit systems because it has a 32 bit
7572 * integer and a pointer in it; we follow the conversion approach
7573 * used for handling sigval types in signal.c so the guest should get
7574 * the correct value back even if we did a 64 bit byteswap and it's
7575 * using the 32 bit integer.
7576 */
7577 host_sevp->sigev_value.sival_ptr =
7578 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7579 host_sevp->sigev_signo =
7580 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7581 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7582 host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7583
7584 unlock_user_struct(target_sevp, target_addr, 1);
7585 return 0;
7586 }
7587
7588 #if defined(TARGET_NR_mlockall)
7589 static inline int target_to_host_mlockall_arg(int arg)
7590 {
7591 int result = 0;
7592
7593 if (arg & TARGET_MCL_CURRENT) {
7594 result |= MCL_CURRENT;
7595 }
7596 if (arg & TARGET_MCL_FUTURE) {
7597 result |= MCL_FUTURE;
7598 }
7599 #ifdef MCL_ONFAULT
7600 if (arg & TARGET_MCL_ONFAULT) {
7601 result |= MCL_ONFAULT;
7602 }
7603 #endif
7604
7605 return result;
7606 }
7607 #endif
7608
7609 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7610 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7611 defined(TARGET_NR_newfstatat))
7612 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7613 abi_ulong target_addr,
7614 struct stat *host_st)
7615 {
7616 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7617 if (cpu_env->eabi) {
7618 struct target_eabi_stat64 *target_st;
7619
7620 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7621 return -TARGET_EFAULT;
7622 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7623 __put_user(host_st->st_dev, &target_st->st_dev);
7624 __put_user(host_st->st_ino, &target_st->st_ino);
7625 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7626 __put_user(host_st->st_ino, &target_st->__st_ino);
7627 #endif
7628 __put_user(host_st->st_mode, &target_st->st_mode);
7629 __put_user(host_st->st_nlink, &target_st->st_nlink);
7630 __put_user(host_st->st_uid, &target_st->st_uid);
7631 __put_user(host_st->st_gid, &target_st->st_gid);
7632 __put_user(host_st->st_rdev, &target_st->st_rdev);
7633 __put_user(host_st->st_size, &target_st->st_size);
7634 __put_user(host_st->st_blksize, &target_st->st_blksize);
7635 __put_user(host_st->st_blocks, &target_st->st_blocks);
7636 __put_user(host_st->st_atime, &target_st->target_st_atime);
7637 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7638 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7639 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7640 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7641 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7642 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7643 #endif
7644 unlock_user_struct(target_st, target_addr, 1);
7645 } else
7646 #endif
7647 {
7648 #if defined(TARGET_HAS_STRUCT_STAT64)
7649 struct target_stat64 *target_st;
7650 #else
7651 struct target_stat *target_st;
7652 #endif
7653
7654 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7655 return -TARGET_EFAULT;
7656 memset(target_st, 0, sizeof(*target_st));
7657 __put_user(host_st->st_dev, &target_st->st_dev);
7658 __put_user(host_st->st_ino, &target_st->st_ino);
7659 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7660 __put_user(host_st->st_ino, &target_st->__st_ino);
7661 #endif
7662 __put_user(host_st->st_mode, &target_st->st_mode);
7663 __put_user(host_st->st_nlink, &target_st->st_nlink);
7664 __put_user(host_st->st_uid, &target_st->st_uid);
7665 __put_user(host_st->st_gid, &target_st->st_gid);
7666 __put_user(host_st->st_rdev, &target_st->st_rdev);
7667 /* XXX: better use of kernel struct */
7668 __put_user(host_st->st_size, &target_st->st_size);
7669 __put_user(host_st->st_blksize, &target_st->st_blksize);
7670 __put_user(host_st->st_blocks, &target_st->st_blocks);
7671 __put_user(host_st->st_atime, &target_st->target_st_atime);
7672 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7673 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7674 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7675 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7676 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7677 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7678 #endif
7679 unlock_user_struct(target_st, target_addr, 1);
7680 }
7681
7682 return 0;
7683 }
7684 #endif
7685
7686 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7687 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7688 abi_ulong target_addr)
7689 {
7690 struct target_statx *target_stx;
7691
7692 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7693 return -TARGET_EFAULT;
7694 }
7695 memset(target_stx, 0, sizeof(*target_stx));
7696
7697 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7698 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7699 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7700 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7701 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7702 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7703 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7704 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7705 __put_user(host_stx->stx_size, &target_stx->stx_size);
7706 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7707 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7708 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7709 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7710 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7711 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7712 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7713 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7714 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7715 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7716 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7717 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7718 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7719 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7720
7721 unlock_user_struct(target_stx, target_addr, 1);
7722
7723 return 0;
7724 }
7725 #endif
7726
7727 static int do_sys_futex(int *uaddr, int op, int val,
7728 const struct timespec *timeout, int *uaddr2,
7729 int val3)
7730 {
7731 #if HOST_LONG_BITS == 64
7732 #if defined(__NR_futex)
7733 /* always a 64-bit time_t, it doesn't define _time64 version */
7734 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7735
7736 #endif
7737 #else /* HOST_LONG_BITS == 64 */
7738 #if defined(__NR_futex_time64)
7739 if (sizeof(timeout->tv_sec) == 8) {
7740 /* _time64 function on 32bit arch */
7741 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7742 }
7743 #endif
7744 #if defined(__NR_futex)
7745 /* old function on 32bit arch */
7746 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7747 #endif
7748 #endif /* HOST_LONG_BITS == 64 */
7749 g_assert_not_reached();
7750 }
7751
7752 static int do_safe_futex(int *uaddr, int op, int val,
7753 const struct timespec *timeout, int *uaddr2,
7754 int val3)
7755 {
7756 #if HOST_LONG_BITS == 64
7757 #if defined(__NR_futex)
7758 /* always a 64-bit time_t, it doesn't define _time64 version */
7759 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7760 #endif
7761 #else /* HOST_LONG_BITS == 64 */
7762 #if defined(__NR_futex_time64)
7763 if (sizeof(timeout->tv_sec) == 8) {
7764 /* _time64 function on 32bit arch */
7765 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7766 val3));
7767 }
7768 #endif
7769 #if defined(__NR_futex)
7770 /* old function on 32bit arch */
7771 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7772 #endif
7773 #endif /* HOST_LONG_BITS == 64 */
7774 return -TARGET_ENOSYS;
7775 }
7776
7777 /* ??? Using host futex calls even when target atomic operations
7778 are not really atomic probably breaks things. However implementing
7779 futexes locally would make futexes shared between multiple processes
7780 tricky. However they're probably useless because guest atomic
7781 operations won't work either. */
7782 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7783 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7784 int op, int val, target_ulong timeout,
7785 target_ulong uaddr2, int val3)
7786 {
7787 struct timespec ts, *pts = NULL;
7788 void *haddr2 = NULL;
7789 int base_op;
7790
7791 /* We assume FUTEX_* constants are the same on both host and target. */
7792 #ifdef FUTEX_CMD_MASK
7793 base_op = op & FUTEX_CMD_MASK;
7794 #else
7795 base_op = op;
7796 #endif
7797 switch (base_op) {
7798 case FUTEX_WAIT:
7799 case FUTEX_WAIT_BITSET:
7800 val = tswap32(val);
7801 break;
7802 case FUTEX_WAIT_REQUEUE_PI:
7803 val = tswap32(val);
7804 haddr2 = g2h(cpu, uaddr2);
7805 break;
7806 case FUTEX_LOCK_PI:
7807 case FUTEX_LOCK_PI2:
7808 break;
7809 case FUTEX_WAKE:
7810 case FUTEX_WAKE_BITSET:
7811 case FUTEX_TRYLOCK_PI:
7812 case FUTEX_UNLOCK_PI:
7813 timeout = 0;
7814 break;
7815 case FUTEX_FD:
7816 val = target_to_host_signal(val);
7817 timeout = 0;
7818 break;
7819 case FUTEX_CMP_REQUEUE:
7820 case FUTEX_CMP_REQUEUE_PI:
7821 val3 = tswap32(val3);
7822 /* fall through */
7823 case FUTEX_REQUEUE:
7824 case FUTEX_WAKE_OP:
7825 /*
7826 * For these, the 4th argument is not TIMEOUT, but VAL2.
7827 * But the prototype of do_safe_futex takes a pointer, so
7828 * insert casts to satisfy the compiler. We do not need
7829 * to tswap VAL2 since it's not compared to guest memory.
7830 */
7831 pts = (struct timespec *)(uintptr_t)timeout;
7832 timeout = 0;
7833 haddr2 = g2h(cpu, uaddr2);
7834 break;
7835 default:
7836 return -TARGET_ENOSYS;
7837 }
7838 if (timeout) {
7839 pts = &ts;
7840 if (time64
7841 ? target_to_host_timespec64(pts, timeout)
7842 : target_to_host_timespec(pts, timeout)) {
7843 return -TARGET_EFAULT;
7844 }
7845 }
7846 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7847 }
7848 #endif
7849
7850 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7851 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7852 abi_long handle, abi_long mount_id,
7853 abi_long flags)
7854 {
7855 struct file_handle *target_fh;
7856 struct file_handle *fh;
7857 int mid = 0;
7858 abi_long ret;
7859 char *name;
7860 unsigned int size, total_size;
7861
7862 if (get_user_s32(size, handle)) {
7863 return -TARGET_EFAULT;
7864 }
7865
7866 name = lock_user_string(pathname);
7867 if (!name) {
7868 return -TARGET_EFAULT;
7869 }
7870
7871 total_size = sizeof(struct file_handle) + size;
7872 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7873 if (!target_fh) {
7874 unlock_user(name, pathname, 0);
7875 return -TARGET_EFAULT;
7876 }
7877
7878 fh = g_malloc0(total_size);
7879 fh->handle_bytes = size;
7880
7881 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7882 unlock_user(name, pathname, 0);
7883
7884 /* man name_to_handle_at(2):
7885 * Other than the use of the handle_bytes field, the caller should treat
7886 * the file_handle structure as an opaque data type
7887 */
7888
7889 memcpy(target_fh, fh, total_size);
7890 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7891 target_fh->handle_type = tswap32(fh->handle_type);
7892 g_free(fh);
7893 unlock_user(target_fh, handle, total_size);
7894
7895 if (put_user_s32(mid, mount_id)) {
7896 return -TARGET_EFAULT;
7897 }
7898
7899 return ret;
7900
7901 }
7902 #endif
7903
7904 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7905 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7906 abi_long flags)
7907 {
7908 struct file_handle *target_fh;
7909 struct file_handle *fh;
7910 unsigned int size, total_size;
7911 abi_long ret;
7912
7913 if (get_user_s32(size, handle)) {
7914 return -TARGET_EFAULT;
7915 }
7916
7917 total_size = sizeof(struct file_handle) + size;
7918 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7919 if (!target_fh) {
7920 return -TARGET_EFAULT;
7921 }
7922
7923 fh = g_memdup(target_fh, total_size);
7924 fh->handle_bytes = size;
7925 fh->handle_type = tswap32(target_fh->handle_type);
7926
7927 ret = get_errno(open_by_handle_at(mount_fd, fh,
7928 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7929
7930 g_free(fh);
7931
7932 unlock_user(target_fh, handle, total_size);
7933
7934 return ret;
7935 }
7936 #endif
7937
7938 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7939
7940 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7941 {
7942 int host_flags;
7943 target_sigset_t *target_mask;
7944 sigset_t host_mask;
7945 abi_long ret;
7946
7947 if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7948 return -TARGET_EINVAL;
7949 }
7950 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7951 return -TARGET_EFAULT;
7952 }
7953
7954 target_to_host_sigset(&host_mask, target_mask);
7955
7956 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7957
7958 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7959 if (ret >= 0) {
7960 fd_trans_register(ret, &target_signalfd_trans);
7961 }
7962
7963 unlock_user_struct(target_mask, mask, 0);
7964
7965 return ret;
7966 }
7967 #endif
7968
7969 /* Map host to target signal numbers for the wait family of syscalls.
7970 Assume all other status bits are the same. */
7971 int host_to_target_waitstatus(int status)
7972 {
7973 if (WIFSIGNALED(status)) {
7974 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7975 }
7976 if (WIFSTOPPED(status)) {
7977 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7978 | (status & 0xff);
7979 }
7980 return status;
7981 }
7982
7983 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7984 {
7985 CPUState *cpu = env_cpu(cpu_env);
7986 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7987 int i;
7988
7989 for (i = 0; i < bprm->argc; i++) {
7990 size_t len = strlen(bprm->argv[i]) + 1;
7991
7992 if (write(fd, bprm->argv[i], len) != len) {
7993 return -1;
7994 }
7995 }
7996
7997 return 0;
7998 }
7999
8000 static int open_self_maps(CPUArchState *cpu_env, int fd)
8001 {
8002 CPUState *cpu = env_cpu(cpu_env);
8003 TaskState *ts = cpu->opaque;
8004 GSList *map_info = read_self_maps();
8005 GSList *s;
8006 int count;
8007
8008 for (s = map_info; s; s = g_slist_next(s)) {
8009 MapInfo *e = (MapInfo *) s->data;
8010
8011 if (h2g_valid(e->start)) {
8012 unsigned long min = e->start;
8013 unsigned long max = e->end;
8014 int flags = page_get_flags(h2g(min));
8015 const char *path;
8016
8017 max = h2g_valid(max - 1) ?
8018 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8019
8020 if (page_check_range(h2g(min), max - min, flags) == -1) {
8021 continue;
8022 }
8023
8024 #ifdef TARGET_HPPA
8025 if (h2g(max) == ts->info->stack_limit) {
8026 #else
8027 if (h2g(min) == ts->info->stack_limit) {
8028 #endif
8029 path = "[stack]";
8030 } else {
8031 path = e->path;
8032 }
8033
8034 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8035 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8036 h2g(min), h2g(max - 1) + 1,
8037 (flags & PAGE_READ) ? 'r' : '-',
8038 (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8039 (flags & PAGE_EXEC) ? 'x' : '-',
8040 e->is_priv ? 'p' : 's',
8041 (uint64_t) e->offset, e->dev, e->inode);
8042 if (path) {
8043 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8044 } else {
8045 dprintf(fd, "\n");
8046 }
8047 }
8048 }
8049
8050 free_self_maps(map_info);
8051
8052 #ifdef TARGET_VSYSCALL_PAGE
8053 /*
8054 * We only support execution from the vsyscall page.
8055 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8056 */
8057 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8058 " --xp 00000000 00:00 0",
8059 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8060 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
8061 #endif
8062
8063 return 0;
8064 }
8065
8066 static int open_self_stat(CPUArchState *cpu_env, int fd)
8067 {
8068 CPUState *cpu = env_cpu(cpu_env);
8069 TaskState *ts = cpu->opaque;
8070 g_autoptr(GString) buf = g_string_new(NULL);
8071 int i;
8072
8073 for (i = 0; i < 44; i++) {
8074 if (i == 0) {
8075 /* pid */
8076 g_string_printf(buf, FMT_pid " ", getpid());
8077 } else if (i == 1) {
8078 /* app name */
8079 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8080 bin = bin ? bin + 1 : ts->bprm->argv[0];
8081 g_string_printf(buf, "(%.15s) ", bin);
8082 } else if (i == 3) {
8083 /* ppid */
8084 g_string_printf(buf, FMT_pid " ", getppid());
8085 } else if (i == 21) {
8086 /* starttime */
8087 g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8088 } else if (i == 27) {
8089 /* stack bottom */
8090 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8091 } else {
8092 /* for the rest, there is MasterCard */
8093 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8094 }
8095
8096 if (write(fd, buf->str, buf->len) != buf->len) {
8097 return -1;
8098 }
8099 }
8100
8101 return 0;
8102 }
8103
8104 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8105 {
8106 CPUState *cpu = env_cpu(cpu_env);
8107 TaskState *ts = cpu->opaque;
8108 abi_ulong auxv = ts->info->saved_auxv;
8109 abi_ulong len = ts->info->auxv_len;
8110 char *ptr;
8111
8112 /*
8113 * Auxiliary vector is stored in target process stack.
8114 * read in whole auxv vector and copy it to file
8115 */
8116 ptr = lock_user(VERIFY_READ, auxv, len, 0);
8117 if (ptr != NULL) {
8118 while (len > 0) {
8119 ssize_t r;
8120 r = write(fd, ptr, len);
8121 if (r <= 0) {
8122 break;
8123 }
8124 len -= r;
8125 ptr += r;
8126 }
8127 lseek(fd, 0, SEEK_SET);
8128 unlock_user(ptr, auxv, len);
8129 }
8130
8131 return 0;
8132 }
8133
8134 static int is_proc_myself(const char *filename, const char *entry)
8135 {
8136 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8137 filename += strlen("/proc/");
8138 if (!strncmp(filename, "self/", strlen("self/"))) {
8139 filename += strlen("self/");
8140 } else if (*filename >= '1' && *filename <= '9') {
8141 char myself[80];
8142 snprintf(myself, sizeof(myself), "%d/", getpid());
8143 if (!strncmp(filename, myself, strlen(myself))) {
8144 filename += strlen(myself);
8145 } else {
8146 return 0;
8147 }
8148 } else {
8149 return 0;
8150 }
8151 if (!strcmp(filename, entry)) {
8152 return 1;
8153 }
8154 }
8155 return 0;
8156 }
8157
8158 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8159 const char *fmt, int code)
8160 {
8161 if (logfile) {
8162 CPUState *cs = env_cpu(env);
8163
8164 fprintf(logfile, fmt, code);
8165 fprintf(logfile, "Failing executable: %s\n", exec_path);
8166 cpu_dump_state(cs, logfile, 0);
8167 open_self_maps(env, fileno(logfile));
8168 }
8169 }
8170
8171 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8172 {
8173 /* dump to console */
8174 excp_dump_file(stderr, env, fmt, code);
8175
8176 /* dump to log file */
8177 if (qemu_log_separate()) {
8178 FILE *logfile = qemu_log_trylock();
8179
8180 excp_dump_file(logfile, env, fmt, code);
8181 qemu_log_unlock(logfile);
8182 }
8183 }
8184
8185 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8186 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8187 static int is_proc(const char *filename, const char *entry)
8188 {
8189 return strcmp(filename, entry) == 0;
8190 }
8191 #endif
8192
8193 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8194 static int open_net_route(CPUArchState *cpu_env, int fd)
8195 {
8196 FILE *fp;
8197 char *line = NULL;
8198 size_t len = 0;
8199 ssize_t read;
8200
8201 fp = fopen("/proc/net/route", "r");
8202 if (fp == NULL) {
8203 return -1;
8204 }
8205
8206 /* read header */
8207
8208 read = getline(&line, &len, fp);
8209 dprintf(fd, "%s", line);
8210
8211 /* read routes */
8212
8213 while ((read = getline(&line, &len, fp)) != -1) {
8214 char iface[16];
8215 uint32_t dest, gw, mask;
8216 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8217 int fields;
8218
8219 fields = sscanf(line,
8220 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8221 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8222 &mask, &mtu, &window, &irtt);
8223 if (fields != 11) {
8224 continue;
8225 }
8226 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8227 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8228 metric, tswap32(mask), mtu, window, irtt);
8229 }
8230
8231 free(line);
8232 fclose(fp);
8233
8234 return 0;
8235 }
8236 #endif
8237
8238 #if defined(TARGET_SPARC)
8239 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8240 {
8241 dprintf(fd, "type\t\t: sun4u\n");
8242 return 0;
8243 }
8244 #endif
8245
8246 #if defined(TARGET_HPPA)
8247 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8248 {
8249 int i, num_cpus;
8250
8251 num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8252 for (i = 0; i < num_cpus; i++) {
8253 dprintf(fd, "processor\t: %d\n", i);
8254 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8255 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8256 dprintf(fd, "capabilities\t: os32\n");
8257 dprintf(fd, "model\t\t: 9000/778/B160L - "
8258 "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8259 }
8260 return 0;
8261 }
8262 #endif
8263
8264 #if defined(TARGET_M68K)
8265 static int open_hardware(CPUArchState *cpu_env, int fd)
8266 {
8267 dprintf(fd, "Model:\t\tqemu-m68k\n");
8268 return 0;
8269 }
8270 #endif
8271
8272 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8273 {
8274 struct fake_open {
8275 const char *filename;
8276 int (*fill)(CPUArchState *cpu_env, int fd);
8277 int (*cmp)(const char *s1, const char *s2);
8278 };
8279 const struct fake_open *fake_open;
8280 static const struct fake_open fakes[] = {
8281 { "maps", open_self_maps, is_proc_myself },
8282 { "stat", open_self_stat, is_proc_myself },
8283 { "auxv", open_self_auxv, is_proc_myself },
8284 { "cmdline", open_self_cmdline, is_proc_myself },
8285 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8286 { "/proc/net/route", open_net_route, is_proc },
8287 #endif
8288 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8289 { "/proc/cpuinfo", open_cpuinfo, is_proc },
8290 #endif
8291 #if defined(TARGET_M68K)
8292 { "/proc/hardware", open_hardware, is_proc },
8293 #endif
8294 { NULL, NULL, NULL }
8295 };
8296
8297 if (is_proc_myself(pathname, "exe")) {
8298 return safe_openat(dirfd, exec_path, flags, mode);
8299 }
8300
8301 for (fake_open = fakes; fake_open->filename; fake_open++) {
8302 if (fake_open->cmp(pathname, fake_open->filename)) {
8303 break;
8304 }
8305 }
8306
8307 if (fake_open->filename) {
8308 const char *tmpdir;
8309 char filename[PATH_MAX];
8310 int fd, r;
8311
8312 fd = memfd_create("qemu-open", 0);
8313 if (fd < 0) {
8314 if (errno != ENOSYS) {
8315 return fd;
8316 }
8317 /* create temporary file to map stat to */
8318 tmpdir = getenv("TMPDIR");
8319 if (!tmpdir)
8320 tmpdir = "/tmp";
8321 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8322 fd = mkstemp(filename);
8323 if (fd < 0) {
8324 return fd;
8325 }
8326 unlink(filename);
8327 }
8328
8329 if ((r = fake_open->fill(cpu_env, fd))) {
8330 int e = errno;
8331 close(fd);
8332 errno = e;
8333 return r;
8334 }
8335 lseek(fd, 0, SEEK_SET);
8336
8337 return fd;
8338 }
8339
8340 return safe_openat(dirfd, path(pathname), flags, mode);
8341 }
8342
8343 static int do_execveat(CPUArchState *cpu_env, int dirfd,
8344 abi_long pathname, abi_long guest_argp,
8345 abi_long guest_envp, int flags)
8346 {
8347 int ret;
8348 char **argp, **envp;
8349 int argc, envc;
8350 abi_ulong gp;
8351 abi_ulong addr;
8352 char **q;
8353 void *p;
8354
8355 argc = 0;
8356
8357 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8358 if (get_user_ual(addr, gp)) {
8359 return -TARGET_EFAULT;
8360 }
8361 if (!addr) {
8362 break;
8363 }
8364 argc++;
8365 }
8366 envc = 0;
8367 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8368 if (get_user_ual(addr, gp)) {
8369 return -TARGET_EFAULT;
8370 }
8371 if (!addr) {
8372 break;
8373 }
8374 envc++;
8375 }
8376
8377 argp = g_new0(char *, argc + 1);
8378 envp = g_new0(char *, envc + 1);
8379
8380 for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8381 if (get_user_ual(addr, gp)) {
8382 goto execve_efault;
8383 }
8384 if (!addr) {
8385 break;
8386 }
8387 *q = lock_user_string(addr);
8388 if (!*q) {
8389 goto execve_efault;
8390 }
8391 }
8392 *q = NULL;
8393
8394 for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8395 if (get_user_ual(addr, gp)) {
8396 goto execve_efault;
8397 }
8398 if (!addr) {
8399 break;
8400 }
8401 *q = lock_user_string(addr);
8402 if (!*q) {
8403 goto execve_efault;
8404 }
8405 }
8406 *q = NULL;
8407
8408 /*
8409 * Although execve() is not an interruptible syscall it is
8410 * a special case where we must use the safe_syscall wrapper:
8411 * if we allow a signal to happen before we make the host
8412 * syscall then we will 'lose' it, because at the point of
8413 * execve the process leaves QEMU's control. So we use the
8414 * safe syscall wrapper to ensure that we either take the
8415 * signal as a guest signal, or else it does not happen
8416 * before the execve completes and makes it the other
8417 * program's problem.
8418 */
8419 p = lock_user_string(pathname);
8420 if (!p) {
8421 goto execve_efault;
8422 }
8423
8424 if (is_proc_myself(p, "exe")) {
8425 ret = get_errno(safe_execveat(dirfd, exec_path, argp, envp, flags));
8426 } else {
8427 ret = get_errno(safe_execveat(dirfd, p, argp, envp, flags));
8428 }
8429
8430 unlock_user(p, pathname, 0);
8431
8432 goto execve_end;
8433
8434 execve_efault:
8435 ret = -TARGET_EFAULT;
8436
8437 execve_end:
8438 for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8439 if (get_user_ual(addr, gp) || !addr) {
8440 break;
8441 }
8442 unlock_user(*q, addr, 0);
8443 }
8444 for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8445 if (get_user_ual(addr, gp) || !addr) {
8446 break;
8447 }
8448 unlock_user(*q, addr, 0);
8449 }
8450
8451 g_free(argp);
8452 g_free(envp);
8453 return ret;
8454 }
8455
8456 #define TIMER_MAGIC 0x0caf0000
8457 #define TIMER_MAGIC_MASK 0xffff0000
8458
8459 /* Convert QEMU provided timer ID back to internal 16bit index format */
8460 static target_timer_t get_timer_id(abi_long arg)
8461 {
8462 target_timer_t timerid = arg;
8463
8464 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8465 return -TARGET_EINVAL;
8466 }
8467
8468 timerid &= 0xffff;
8469
8470 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8471 return -TARGET_EINVAL;
8472 }
8473
8474 return timerid;
8475 }
8476
8477 static int target_to_host_cpu_mask(unsigned long *host_mask,
8478 size_t host_size,
8479 abi_ulong target_addr,
8480 size_t target_size)
8481 {
8482 unsigned target_bits = sizeof(abi_ulong) * 8;
8483 unsigned host_bits = sizeof(*host_mask) * 8;
8484 abi_ulong *target_mask;
8485 unsigned i, j;
8486
8487 assert(host_size >= target_size);
8488
8489 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8490 if (!target_mask) {
8491 return -TARGET_EFAULT;
8492 }
8493 memset(host_mask, 0, host_size);
8494
8495 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8496 unsigned bit = i * target_bits;
8497 abi_ulong val;
8498
8499 __get_user(val, &target_mask[i]);
8500 for (j = 0; j < target_bits; j++, bit++) {
8501 if (val & (1UL << j)) {
8502 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8503 }
8504 }
8505 }
8506
8507 unlock_user(target_mask, target_addr, 0);
8508 return 0;
8509 }
8510
8511 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8512 size_t host_size,
8513 abi_ulong target_addr,
8514 size_t target_size)
8515 {
8516 unsigned target_bits = sizeof(abi_ulong) * 8;
8517 unsigned host_bits = sizeof(*host_mask) * 8;
8518 abi_ulong *target_mask;
8519 unsigned i, j;
8520
8521 assert(host_size >= target_size);
8522
8523 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8524 if (!target_mask) {
8525 return -TARGET_EFAULT;
8526 }
8527
8528 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8529 unsigned bit = i * target_bits;
8530 abi_ulong val = 0;
8531
8532 for (j = 0; j < target_bits; j++, bit++) {
8533 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8534 val |= 1UL << j;
8535 }
8536 }
8537 __put_user(val, &target_mask[i]);
8538 }
8539
8540 unlock_user(target_mask, target_addr, target_size);
8541 return 0;
8542 }
8543
8544 #ifdef TARGET_NR_getdents
8545 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8546 {
8547 g_autofree void *hdirp = NULL;
8548 void *tdirp;
8549 int hlen, hoff, toff;
8550 int hreclen, treclen;
8551 off64_t prev_diroff = 0;
8552
8553 hdirp = g_try_malloc(count);
8554 if (!hdirp) {
8555 return -TARGET_ENOMEM;
8556 }
8557
8558 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8559 hlen = sys_getdents(dirfd, hdirp, count);
8560 #else
8561 hlen = sys_getdents64(dirfd, hdirp, count);
8562 #endif
8563
8564 hlen = get_errno(hlen);
8565 if (is_error(hlen)) {
8566 return hlen;
8567 }
8568
8569 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8570 if (!tdirp) {
8571 return -TARGET_EFAULT;
8572 }
8573
8574 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8575 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8576 struct linux_dirent *hde = hdirp + hoff;
8577 #else
8578 struct linux_dirent64 *hde = hdirp + hoff;
8579 #endif
8580 struct target_dirent *tde = tdirp + toff;
8581 int namelen;
8582 uint8_t type;
8583
8584 namelen = strlen(hde->d_name);
8585 hreclen = hde->d_reclen;
8586 treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8587 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8588
8589 if (toff + treclen > count) {
8590 /*
8591 * If the host struct is smaller than the target struct, or
8592 * requires less alignment and thus packs into less space,
8593 * then the host can return more entries than we can pass
8594 * on to the guest.
8595 */
8596 if (toff == 0) {
8597 toff = -TARGET_EINVAL; /* result buffer is too small */
8598 break;
8599 }
8600 /*
8601 * Return what we have, resetting the file pointer to the
8602 * location of the first record not returned.
8603 */
8604 lseek64(dirfd, prev_diroff, SEEK_SET);
8605 break;
8606 }
8607
8608 prev_diroff = hde->d_off;
8609 tde->d_ino = tswapal(hde->d_ino);
8610 tde->d_off = tswapal(hde->d_off);
8611 tde->d_reclen = tswap16(treclen);
8612 memcpy(tde->d_name, hde->d_name, namelen + 1);
8613
8614 /*
8615 * The getdents type is in what was formerly a padding byte at the
8616 * end of the structure.
8617 */
8618 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8619 type = *((uint8_t *)hde + hreclen - 1);
8620 #else
8621 type = hde->d_type;
8622 #endif
8623 *((uint8_t *)tde + treclen - 1) = type;
8624 }
8625
8626 unlock_user(tdirp, arg2, toff);
8627 return toff;
8628 }
8629 #endif /* TARGET_NR_getdents */
8630
8631 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8632 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8633 {
8634 g_autofree void *hdirp = NULL;
8635 void *tdirp;
8636 int hlen, hoff, toff;
8637 int hreclen, treclen;
8638 off64_t prev_diroff = 0;
8639
8640 hdirp = g_try_malloc(count);
8641 if (!hdirp) {
8642 return -TARGET_ENOMEM;
8643 }
8644
8645 hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8646 if (is_error(hlen)) {
8647 return hlen;
8648 }
8649
8650 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8651 if (!tdirp) {
8652 return -TARGET_EFAULT;
8653 }
8654
8655 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8656 struct linux_dirent64 *hde = hdirp + hoff;
8657 struct target_dirent64 *tde = tdirp + toff;
8658 int namelen;
8659
8660 namelen = strlen(hde->d_name) + 1;
8661 hreclen = hde->d_reclen;
8662 treclen = offsetof(struct target_dirent64, d_name) + namelen;
8663 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8664
8665 if (toff + treclen > count) {
8666 /*
8667 * If the host struct is smaller than the target struct, or
8668 * requires less alignment and thus packs into less space,
8669 * then the host can return more entries than we can pass
8670 * on to the guest.
8671 */
8672 if (toff == 0) {
8673 toff = -TARGET_EINVAL; /* result buffer is too small */
8674 break;
8675 }
8676 /*
8677 * Return what we have, resetting the file pointer to the
8678 * location of the first record not returned.
8679 */
8680 lseek64(dirfd, prev_diroff, SEEK_SET);
8681 break;
8682 }
8683
8684 prev_diroff = hde->d_off;
8685 tde->d_ino = tswap64(hde->d_ino);
8686 tde->d_off = tswap64(hde->d_off);
8687 tde->d_reclen = tswap16(treclen);
8688 tde->d_type = hde->d_type;
8689 memcpy(tde->d_name, hde->d_name, namelen);
8690 }
8691
8692 unlock_user(tdirp, arg2, toff);
8693 return toff;
8694 }
8695 #endif /* TARGET_NR_getdents64 */
8696
8697 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8698 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8699 #endif
8700
8701 /* This is an internal helper for do_syscall so that it is easier
8702 * to have a single return point, so that actions, such as logging
8703 * of syscall results, can be performed.
8704 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8705 */
8706 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8707 abi_long arg2, abi_long arg3, abi_long arg4,
8708 abi_long arg5, abi_long arg6, abi_long arg7,
8709 abi_long arg8)
8710 {
8711 CPUState *cpu = env_cpu(cpu_env);
8712 abi_long ret;
8713 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8714 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8715 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8716 || defined(TARGET_NR_statx)
8717 struct stat st;
8718 #endif
8719 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8720 || defined(TARGET_NR_fstatfs)
8721 struct statfs stfs;
8722 #endif
8723 void *p;
8724
8725 switch(num) {
8726 case TARGET_NR_exit:
8727 /* In old applications this may be used to implement _exit(2).
8728 However in threaded applications it is used for thread termination,
8729 and _exit_group is used for application termination.
8730 Do thread termination if we have more then one thread. */
8731
8732 if (block_signals()) {
8733 return -QEMU_ERESTARTSYS;
8734 }
8735
8736 pthread_mutex_lock(&clone_lock);
8737
8738 if (CPU_NEXT(first_cpu)) {
8739 TaskState *ts = cpu->opaque;
8740
8741 if (ts->child_tidptr) {
8742 put_user_u32(0, ts->child_tidptr);
8743 do_sys_futex(g2h(cpu, ts->child_tidptr),
8744 FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8745 }
8746
8747 object_unparent(OBJECT(cpu));
8748 object_unref(OBJECT(cpu));
8749 /*
8750 * At this point the CPU should be unrealized and removed
8751 * from cpu lists. We can clean-up the rest of the thread
8752 * data without the lock held.
8753 */
8754
8755 pthread_mutex_unlock(&clone_lock);
8756
8757 thread_cpu = NULL;
8758 g_free(ts);
8759 rcu_unregister_thread();
8760 pthread_exit(NULL);
8761 }
8762
8763 pthread_mutex_unlock(&clone_lock);
8764 preexit_cleanup(cpu_env, arg1);
8765 _exit(arg1);
8766 return 0; /* avoid warning */
8767 case TARGET_NR_read:
8768 if (arg2 == 0 && arg3 == 0) {
8769 return get_errno(safe_read(arg1, 0, 0));
8770 } else {
8771 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8772 return -TARGET_EFAULT;
8773 ret = get_errno(safe_read(arg1, p, arg3));
8774 if (ret >= 0 &&
8775 fd_trans_host_to_target_data(arg1)) {
8776 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8777 }
8778 unlock_user(p, arg2, ret);
8779 }
8780 return ret;
8781 case TARGET_NR_write:
8782 if (arg2 == 0 && arg3 == 0) {
8783 return get_errno(safe_write(arg1, 0, 0));
8784 }
8785 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8786 return -TARGET_EFAULT;
8787 if (fd_trans_target_to_host_data(arg1)) {
8788 void *copy = g_malloc(arg3);
8789 memcpy(copy, p, arg3);
8790 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8791 if (ret >= 0) {
8792 ret = get_errno(safe_write(arg1, copy, ret));
8793 }
8794 g_free(copy);
8795 } else {
8796 ret = get_errno(safe_write(arg1, p, arg3));
8797 }
8798 unlock_user(p, arg2, 0);
8799 return ret;
8800
8801 #ifdef TARGET_NR_open
8802 case TARGET_NR_open:
8803 if (!(p = lock_user_string(arg1)))
8804 return -TARGET_EFAULT;
8805 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8806 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8807 arg3));
8808 fd_trans_unregister(ret);
8809 unlock_user(p, arg1, 0);
8810 return ret;
8811 #endif
8812 case TARGET_NR_openat:
8813 if (!(p = lock_user_string(arg2)))
8814 return -TARGET_EFAULT;
8815 ret = get_errno(do_openat(cpu_env, arg1, p,
8816 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8817 arg4));
8818 fd_trans_unregister(ret);
8819 unlock_user(p, arg2, 0);
8820 return ret;
8821 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8822 case TARGET_NR_name_to_handle_at:
8823 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8824 return ret;
8825 #endif
8826 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8827 case TARGET_NR_open_by_handle_at:
8828 ret = do_open_by_handle_at(arg1, arg2, arg3);
8829 fd_trans_unregister(ret);
8830 return ret;
8831 #endif
8832 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8833 case TARGET_NR_pidfd_open:
8834 return get_errno(pidfd_open(arg1, arg2));
8835 #endif
8836 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8837 case TARGET_NR_pidfd_send_signal:
8838 {
8839 siginfo_t uinfo, *puinfo;
8840
8841 if (arg3) {
8842 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8843 if (!p) {
8844 return -TARGET_EFAULT;
8845 }
8846 target_to_host_siginfo(&uinfo, p);
8847 unlock_user(p, arg3, 0);
8848 puinfo = &uinfo;
8849 } else {
8850 puinfo = NULL;
8851 }
8852 ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
8853 puinfo, arg4));
8854 }
8855 return ret;
8856 #endif
8857 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8858 case TARGET_NR_pidfd_getfd:
8859 return get_errno(pidfd_getfd(arg1, arg2, arg3));
8860 #endif
8861 case TARGET_NR_close:
8862 fd_trans_unregister(arg1);
8863 return get_errno(close(arg1));
8864 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
8865 case TARGET_NR_close_range:
8866 ret = get_errno(sys_close_range(arg1, arg2, arg3));
8867 if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
8868 abi_long fd, maxfd;
8869 maxfd = MIN(arg2, target_fd_max);
8870 for (fd = arg1; fd < maxfd; fd++) {
8871 fd_trans_unregister(fd);
8872 }
8873 }
8874 return ret;
8875 #endif
8876
8877 case TARGET_NR_brk:
8878 return do_brk(arg1);
8879 #ifdef TARGET_NR_fork
8880 case TARGET_NR_fork:
8881 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8882 #endif
8883 #ifdef TARGET_NR_waitpid
8884 case TARGET_NR_waitpid:
8885 {
8886 int status;
8887 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8888 if (!is_error(ret) && arg2 && ret
8889 && put_user_s32(host_to_target_waitstatus(status), arg2))
8890 return -TARGET_EFAULT;
8891 }
8892 return ret;
8893 #endif
8894 #ifdef TARGET_NR_waitid
8895 case TARGET_NR_waitid:
8896 {
8897 siginfo_t info;
8898 info.si_pid = 0;
8899 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8900 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8901 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8902 return -TARGET_EFAULT;
8903 host_to_target_siginfo(p, &info);
8904 unlock_user(p, arg3, sizeof(target_siginfo_t));
8905 }
8906 }
8907 return ret;
8908 #endif
8909 #ifdef TARGET_NR_creat /* not on alpha */
8910 case TARGET_NR_creat:
8911 if (!(p = lock_user_string(arg1)))
8912 return -TARGET_EFAULT;
8913 ret = get_errno(creat(p, arg2));
8914 fd_trans_unregister(ret);
8915 unlock_user(p, arg1, 0);
8916 return ret;
8917 #endif
8918 #ifdef TARGET_NR_link
8919 case TARGET_NR_link:
8920 {
8921 void * p2;
8922 p = lock_user_string(arg1);
8923 p2 = lock_user_string(arg2);
8924 if (!p || !p2)
8925 ret = -TARGET_EFAULT;
8926 else
8927 ret = get_errno(link(p, p2));
8928 unlock_user(p2, arg2, 0);
8929 unlock_user(p, arg1, 0);
8930 }
8931 return ret;
8932 #endif
8933 #if defined(TARGET_NR_linkat)
8934 case TARGET_NR_linkat:
8935 {
8936 void * p2 = NULL;
8937 if (!arg2 || !arg4)
8938 return -TARGET_EFAULT;
8939 p = lock_user_string(arg2);
8940 p2 = lock_user_string(arg4);
8941 if (!p || !p2)
8942 ret = -TARGET_EFAULT;
8943 else
8944 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8945 unlock_user(p, arg2, 0);
8946 unlock_user(p2, arg4, 0);
8947 }
8948 return ret;
8949 #endif
8950 #ifdef TARGET_NR_unlink
8951 case TARGET_NR_unlink:
8952 if (!(p = lock_user_string(arg1)))
8953 return -TARGET_EFAULT;
8954 ret = get_errno(unlink(p));
8955 unlock_user(p, arg1, 0);
8956 return ret;
8957 #endif
8958 #if defined(TARGET_NR_unlinkat)
8959 case TARGET_NR_unlinkat:
8960 if (!(p = lock_user_string(arg2)))
8961 return -TARGET_EFAULT;
8962 ret = get_errno(unlinkat(arg1, p, arg3));
8963 unlock_user(p, arg2, 0);
8964 return ret;
8965 #endif
8966 case TARGET_NR_execveat:
8967 return do_execveat(cpu_env, arg1, arg2, arg3, arg4, arg5);
8968 case TARGET_NR_execve:
8969 return do_execveat(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0);
8970 case TARGET_NR_chdir:
8971 if (!(p = lock_user_string(arg1)))
8972 return -TARGET_EFAULT;
8973 ret = get_errno(chdir(p));
8974 unlock_user(p, arg1, 0);
8975 return ret;
8976 #ifdef TARGET_NR_time
8977 case TARGET_NR_time:
8978 {
8979 time_t host_time;
8980 ret = get_errno(time(&host_time));
8981 if (!is_error(ret)
8982 && arg1
8983 && put_user_sal(host_time, arg1))
8984 return -TARGET_EFAULT;
8985 }
8986 return ret;
8987 #endif
8988 #ifdef TARGET_NR_mknod
8989 case TARGET_NR_mknod:
8990 if (!(p = lock_user_string(arg1)))
8991 return -TARGET_EFAULT;
8992 ret = get_errno(mknod(p, arg2, arg3));
8993 unlock_user(p, arg1, 0);
8994 return ret;
8995 #endif
8996 #if defined(TARGET_NR_mknodat)
8997 case TARGET_NR_mknodat:
8998 if (!(p = lock_user_string(arg2)))
8999 return -TARGET_EFAULT;
9000 ret = get_errno(mknodat(arg1, p, arg3, arg4));
9001 unlock_user(p, arg2, 0);
9002 return ret;
9003 #endif
9004 #ifdef TARGET_NR_chmod
9005 case TARGET_NR_chmod:
9006 if (!(p = lock_user_string(arg1)))
9007 return -TARGET_EFAULT;
9008 ret = get_errno(chmod(p, arg2));
9009 unlock_user(p, arg1, 0);
9010 return ret;
9011 #endif
9012 #ifdef TARGET_NR_lseek
9013 case TARGET_NR_lseek:
9014 return get_errno(lseek(arg1, arg2, arg3));
9015 #endif
9016 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9017 /* Alpha specific */
9018 case TARGET_NR_getxpid:
9019 cpu_env->ir[IR_A4] = getppid();
9020 return get_errno(getpid());
9021 #endif
9022 #ifdef TARGET_NR_getpid
9023 case TARGET_NR_getpid:
9024 return get_errno(getpid());
9025 #endif
9026 case TARGET_NR_mount:
9027 {
9028 /* need to look at the data field */
9029 void *p2, *p3;
9030
9031 if (arg1) {
9032 p = lock_user_string(arg1);
9033 if (!p) {
9034 return -TARGET_EFAULT;
9035 }
9036 } else {
9037 p = NULL;
9038 }
9039
9040 p2 = lock_user_string(arg2);
9041 if (!p2) {
9042 if (arg1) {
9043 unlock_user(p, arg1, 0);
9044 }
9045 return -TARGET_EFAULT;
9046 }
9047
9048 if (arg3) {
9049 p3 = lock_user_string(arg3);
9050 if (!p3) {
9051 if (arg1) {
9052 unlock_user(p, arg1, 0);
9053 }
9054 unlock_user(p2, arg2, 0);
9055 return -TARGET_EFAULT;
9056 }
9057 } else {
9058 p3 = NULL;
9059 }
9060
9061 /* FIXME - arg5 should be locked, but it isn't clear how to
9062 * do that since it's not guaranteed to be a NULL-terminated
9063 * string.
9064 */
9065 if (!arg5) {
9066 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9067 } else {
9068 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9069 }
9070 ret = get_errno(ret);
9071
9072 if (arg1) {
9073 unlock_user(p, arg1, 0);
9074 }
9075 unlock_user(p2, arg2, 0);
9076 if (arg3) {
9077 unlock_user(p3, arg3, 0);
9078 }
9079 }
9080 return ret;
9081 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9082 #if defined(TARGET_NR_umount)
9083 case TARGET_NR_umount:
9084 #endif
9085 #if defined(TARGET_NR_oldumount)
9086 case TARGET_NR_oldumount:
9087 #endif
9088 if (!(p = lock_user_string(arg1)))
9089 return -TARGET_EFAULT;
9090 ret = get_errno(umount(p));
9091 unlock_user(p, arg1, 0);
9092 return ret;
9093 #endif
9094 #ifdef TARGET_NR_stime /* not on alpha */
9095 case TARGET_NR_stime:
9096 {
9097 struct timespec ts;
9098 ts.tv_nsec = 0;
9099 if (get_user_sal(ts.tv_sec, arg1)) {
9100 return -TARGET_EFAULT;
9101 }
9102 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9103 }
9104 #endif
9105 #ifdef TARGET_NR_alarm /* not on alpha */
9106 case TARGET_NR_alarm:
9107 return alarm(arg1);
9108 #endif
9109 #ifdef TARGET_NR_pause /* not on alpha */
9110 case TARGET_NR_pause:
9111 if (!block_signals()) {
9112 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9113 }
9114 return -TARGET_EINTR;
9115 #endif
9116 #ifdef TARGET_NR_utime
9117 case TARGET_NR_utime:
9118 {
9119 struct utimbuf tbuf, *host_tbuf;
9120 struct target_utimbuf *target_tbuf;
9121 if (arg2) {
9122 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9123 return -TARGET_EFAULT;
9124 tbuf.actime = tswapal(target_tbuf->actime);
9125 tbuf.modtime = tswapal(target_tbuf->modtime);
9126 unlock_user_struct(target_tbuf, arg2, 0);
9127 host_tbuf = &tbuf;
9128 } else {
9129 host_tbuf = NULL;
9130 }
9131 if (!(p = lock_user_string(arg1)))
9132 return -TARGET_EFAULT;
9133 ret = get_errno(utime(p, host_tbuf));
9134 unlock_user(p, arg1, 0);
9135 }
9136 return ret;
9137 #endif
9138 #ifdef TARGET_NR_utimes
9139 case TARGET_NR_utimes:
9140 {
9141 struct timeval *tvp, tv[2];
9142 if (arg2) {
9143 if (copy_from_user_timeval(&tv[0], arg2)
9144 || copy_from_user_timeval(&tv[1],
9145 arg2 + sizeof(struct target_timeval)))
9146 return -TARGET_EFAULT;
9147 tvp = tv;
9148 } else {
9149 tvp = NULL;
9150 }
9151 if (!(p = lock_user_string(arg1)))
9152 return -TARGET_EFAULT;
9153 ret = get_errno(utimes(p, tvp));
9154 unlock_user(p, arg1, 0);
9155 }
9156 return ret;
9157 #endif
9158 #if defined(TARGET_NR_futimesat)
9159 case TARGET_NR_futimesat:
9160 {
9161 struct timeval *tvp, tv[2];
9162 if (arg3) {
9163 if (copy_from_user_timeval(&tv[0], arg3)
9164 || copy_from_user_timeval(&tv[1],
9165 arg3 + sizeof(struct target_timeval)))
9166 return -TARGET_EFAULT;
9167 tvp = tv;
9168 } else {
9169 tvp = NULL;
9170 }
9171 if (!(p = lock_user_string(arg2))) {
9172 return -TARGET_EFAULT;
9173 }
9174 ret = get_errno(futimesat(arg1, path(p), tvp));
9175 unlock_user(p, arg2, 0);
9176 }
9177 return ret;
9178 #endif
9179 #ifdef TARGET_NR_access
9180 case TARGET_NR_access:
9181 if (!(p = lock_user_string(arg1))) {
9182 return -TARGET_EFAULT;
9183 }
9184 ret = get_errno(access(path(p), arg2));
9185 unlock_user(p, arg1, 0);
9186 return ret;
9187 #endif
9188 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9189 case TARGET_NR_faccessat:
9190 if (!(p = lock_user_string(arg2))) {
9191 return -TARGET_EFAULT;
9192 }
9193 ret = get_errno(faccessat(arg1, p, arg3, 0));
9194 unlock_user(p, arg2, 0);
9195 return ret;
9196 #endif
9197 #if defined(TARGET_NR_faccessat2)
9198 case TARGET_NR_faccessat2:
9199 if (!(p = lock_user_string(arg2))) {
9200 return -TARGET_EFAULT;
9201 }
9202 ret = get_errno(faccessat(arg1, p, arg3, arg4));
9203 unlock_user(p, arg2, 0);
9204 return ret;
9205 #endif
9206 #ifdef TARGET_NR_nice /* not on alpha */
9207 case TARGET_NR_nice:
9208 return get_errno(nice(arg1));
9209 #endif
9210 case TARGET_NR_sync:
9211 sync();
9212 return 0;
9213 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9214 case TARGET_NR_syncfs:
9215 return get_errno(syncfs(arg1));
9216 #endif
9217 case TARGET_NR_kill:
9218 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9219 #ifdef TARGET_NR_rename
9220 case TARGET_NR_rename:
9221 {
9222 void *p2;
9223 p = lock_user_string(arg1);
9224 p2 = lock_user_string(arg2);
9225 if (!p || !p2)
9226 ret = -TARGET_EFAULT;
9227 else
9228 ret = get_errno(rename(p, p2));
9229 unlock_user(p2, arg2, 0);
9230 unlock_user(p, arg1, 0);
9231 }
9232 return ret;
9233 #endif
9234 #if defined(TARGET_NR_renameat)
9235 case TARGET_NR_renameat:
9236 {
9237 void *p2;
9238 p = lock_user_string(arg2);
9239 p2 = lock_user_string(arg4);
9240 if (!p || !p2)
9241 ret = -TARGET_EFAULT;
9242 else
9243 ret = get_errno(renameat(arg1, p, arg3, p2));
9244 unlock_user(p2, arg4, 0);
9245 unlock_user(p, arg2, 0);
9246 }
9247 return ret;
9248 #endif
9249 #if defined(TARGET_NR_renameat2)
9250 case TARGET_NR_renameat2:
9251 {
9252 void *p2;
9253 p = lock_user_string(arg2);
9254 p2 = lock_user_string(arg4);
9255 if (!p || !p2) {
9256 ret = -TARGET_EFAULT;
9257 } else {
9258 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9259 }
9260 unlock_user(p2, arg4, 0);
9261 unlock_user(p, arg2, 0);
9262 }
9263 return ret;
9264 #endif
9265 #ifdef TARGET_NR_mkdir
9266 case TARGET_NR_mkdir:
9267 if (!(p = lock_user_string(arg1)))
9268 return -TARGET_EFAULT;
9269 ret = get_errno(mkdir(p, arg2));
9270 unlock_user(p, arg1, 0);
9271 return ret;
9272 #endif
9273 #if defined(TARGET_NR_mkdirat)
9274 case TARGET_NR_mkdirat:
9275 if (!(p = lock_user_string(arg2)))
9276 return -TARGET_EFAULT;
9277 ret = get_errno(mkdirat(arg1, p, arg3));
9278 unlock_user(p, arg2, 0);
9279 return ret;
9280 #endif
9281 #ifdef TARGET_NR_rmdir
9282 case TARGET_NR_rmdir:
9283 if (!(p = lock_user_string(arg1)))
9284 return -TARGET_EFAULT;
9285 ret = get_errno(rmdir(p));
9286 unlock_user(p, arg1, 0);
9287 return ret;
9288 #endif
9289 case TARGET_NR_dup:
9290 ret = get_errno(dup(arg1));
9291 if (ret >= 0) {
9292 fd_trans_dup(arg1, ret);
9293 }
9294 return ret;
9295 #ifdef TARGET_NR_pipe
9296 case TARGET_NR_pipe:
9297 return do_pipe(cpu_env, arg1, 0, 0);
9298 #endif
9299 #ifdef TARGET_NR_pipe2
9300 case TARGET_NR_pipe2:
9301 return do_pipe(cpu_env, arg1,
9302 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9303 #endif
9304 case TARGET_NR_times:
9305 {
9306 struct target_tms *tmsp;
9307 struct tms tms;
9308 ret = get_errno(times(&tms));
9309 if (arg1) {
9310 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9311 if (!tmsp)
9312 return -TARGET_EFAULT;
9313 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9314 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9315 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9316 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9317 }
9318 if (!is_error(ret))
9319 ret = host_to_target_clock_t(ret);
9320 }
9321 return ret;
9322 case TARGET_NR_acct:
9323 if (arg1 == 0) {
9324 ret = get_errno(acct(NULL));
9325 } else {
9326 if (!(p = lock_user_string(arg1))) {
9327 return -TARGET_EFAULT;
9328 }
9329 ret = get_errno(acct(path(p)));
9330 unlock_user(p, arg1, 0);
9331 }
9332 return ret;
9333 #ifdef TARGET_NR_umount2
9334 case TARGET_NR_umount2:
9335 if (!(p = lock_user_string(arg1)))
9336 return -TARGET_EFAULT;
9337 ret = get_errno(umount2(p, arg2));
9338 unlock_user(p, arg1, 0);
9339 return ret;
9340 #endif
9341 case TARGET_NR_ioctl:
9342 return do_ioctl(arg1, arg2, arg3);
9343 #ifdef TARGET_NR_fcntl
9344 case TARGET_NR_fcntl:
9345 return do_fcntl(arg1, arg2, arg3);
9346 #endif
9347 case TARGET_NR_setpgid:
9348 return get_errno(setpgid(arg1, arg2));
9349 case TARGET_NR_umask:
9350 return get_errno(umask(arg1));
9351 case TARGET_NR_chroot:
9352 if (!(p = lock_user_string(arg1)))
9353 return -TARGET_EFAULT;
9354 ret = get_errno(chroot(p));
9355 unlock_user(p, arg1, 0);
9356 return ret;
9357 #ifdef TARGET_NR_dup2
9358 case TARGET_NR_dup2:
9359 ret = get_errno(dup2(arg1, arg2));
9360 if (ret >= 0) {
9361 fd_trans_dup(arg1, arg2);
9362 }
9363 return ret;
9364 #endif
9365 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9366 case TARGET_NR_dup3:
9367 {
9368 int host_flags;
9369
9370 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9371 return -EINVAL;
9372 }
9373 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9374 ret = get_errno(dup3(arg1, arg2, host_flags));
9375 if (ret >= 0) {
9376 fd_trans_dup(arg1, arg2);
9377 }
9378 return ret;
9379 }
9380 #endif
9381 #ifdef TARGET_NR_getppid /* not on alpha */
9382 case TARGET_NR_getppid:
9383 return get_errno(getppid());
9384 #endif
9385 #ifdef TARGET_NR_getpgrp
9386 case TARGET_NR_getpgrp:
9387 return get_errno(getpgrp());
9388 #endif
9389 case TARGET_NR_setsid:
9390 return get_errno(setsid());
9391 #ifdef TARGET_NR_sigaction
9392 case TARGET_NR_sigaction:
9393 {
9394 #if defined(TARGET_MIPS)
9395 struct target_sigaction act, oact, *pact, *old_act;
9396
9397 if (arg2) {
9398 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9399 return -TARGET_EFAULT;
9400 act._sa_handler = old_act->_sa_handler;
9401 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9402 act.sa_flags = old_act->sa_flags;
9403 unlock_user_struct(old_act, arg2, 0);
9404 pact = &act;
9405 } else {
9406 pact = NULL;
9407 }
9408
9409 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9410
9411 if (!is_error(ret) && arg3) {
9412 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9413 return -TARGET_EFAULT;
9414 old_act->_sa_handler = oact._sa_handler;
9415 old_act->sa_flags = oact.sa_flags;
9416 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9417 old_act->sa_mask.sig[1] = 0;
9418 old_act->sa_mask.sig[2] = 0;
9419 old_act->sa_mask.sig[3] = 0;
9420 unlock_user_struct(old_act, arg3, 1);
9421 }
9422 #else
9423 struct target_old_sigaction *old_act;
9424 struct target_sigaction act, oact, *pact;
9425 if (arg2) {
9426 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9427 return -TARGET_EFAULT;
9428 act._sa_handler = old_act->_sa_handler;
9429 target_siginitset(&act.sa_mask, old_act->sa_mask);
9430 act.sa_flags = old_act->sa_flags;
9431 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9432 act.sa_restorer = old_act->sa_restorer;
9433 #endif
9434 unlock_user_struct(old_act, arg2, 0);
9435 pact = &act;
9436 } else {
9437 pact = NULL;
9438 }
9439 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9440 if (!is_error(ret) && arg3) {
9441 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9442 return -TARGET_EFAULT;
9443 old_act->_sa_handler = oact._sa_handler;
9444 old_act->sa_mask = oact.sa_mask.sig[0];
9445 old_act->sa_flags = oact.sa_flags;
9446 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9447 old_act->sa_restorer = oact.sa_restorer;
9448 #endif
9449 unlock_user_struct(old_act, arg3, 1);
9450 }
9451 #endif
9452 }
9453 return ret;
9454 #endif
9455 case TARGET_NR_rt_sigaction:
9456 {
9457 /*
9458 * For Alpha and SPARC this is a 5 argument syscall, with
9459 * a 'restorer' parameter which must be copied into the
9460 * sa_restorer field of the sigaction struct.
9461 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9462 * and arg5 is the sigsetsize.
9463 */
9464 #if defined(TARGET_ALPHA)
9465 target_ulong sigsetsize = arg4;
9466 target_ulong restorer = arg5;
9467 #elif defined(TARGET_SPARC)
9468 target_ulong restorer = arg4;
9469 target_ulong sigsetsize = arg5;
9470 #else
9471 target_ulong sigsetsize = arg4;
9472 target_ulong restorer = 0;
9473 #endif
9474 struct target_sigaction *act = NULL;
9475 struct target_sigaction *oact = NULL;
9476
9477 if (sigsetsize != sizeof(target_sigset_t)) {
9478 return -TARGET_EINVAL;
9479 }
9480 if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9481 return -TARGET_EFAULT;
9482 }
9483 if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9484 ret = -TARGET_EFAULT;
9485 } else {
9486 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9487 if (oact) {
9488 unlock_user_struct(oact, arg3, 1);
9489 }
9490 }
9491 if (act) {
9492 unlock_user_struct(act, arg2, 0);
9493 }
9494 }
9495 return ret;
9496 #ifdef TARGET_NR_sgetmask /* not on alpha */
9497 case TARGET_NR_sgetmask:
9498 {
9499 sigset_t cur_set;
9500 abi_ulong target_set;
9501 ret = do_sigprocmask(0, NULL, &cur_set);
9502 if (!ret) {
9503 host_to_target_old_sigset(&target_set, &cur_set);
9504 ret = target_set;
9505 }
9506 }
9507 return ret;
9508 #endif
9509 #ifdef TARGET_NR_ssetmask /* not on alpha */
9510 case TARGET_NR_ssetmask:
9511 {
9512 sigset_t set, oset;
9513 abi_ulong target_set = arg1;
9514 target_to_host_old_sigset(&set, &target_set);
9515 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9516 if (!ret) {
9517 host_to_target_old_sigset(&target_set, &oset);
9518 ret = target_set;
9519 }
9520 }
9521 return ret;
9522 #endif
9523 #ifdef TARGET_NR_sigprocmask
9524 case TARGET_NR_sigprocmask:
9525 {
9526 #if defined(TARGET_ALPHA)
9527 sigset_t set, oldset;
9528 abi_ulong mask;
9529 int how;
9530
9531 switch (arg1) {
9532 case TARGET_SIG_BLOCK:
9533 how = SIG_BLOCK;
9534 break;
9535 case TARGET_SIG_UNBLOCK:
9536 how = SIG_UNBLOCK;
9537 break;
9538 case TARGET_SIG_SETMASK:
9539 how = SIG_SETMASK;
9540 break;
9541 default:
9542 return -TARGET_EINVAL;
9543 }
9544 mask = arg2;
9545 target_to_host_old_sigset(&set, &mask);
9546
9547 ret = do_sigprocmask(how, &set, &oldset);
9548 if (!is_error(ret)) {
9549 host_to_target_old_sigset(&mask, &oldset);
9550 ret = mask;
9551 cpu_env->ir[IR_V0] = 0; /* force no error */
9552 }
9553 #else
9554 sigset_t set, oldset, *set_ptr;
9555 int how;
9556
9557 if (arg2) {
9558 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9559 if (!p) {
9560 return -TARGET_EFAULT;
9561 }
9562 target_to_host_old_sigset(&set, p);
9563 unlock_user(p, arg2, 0);
9564 set_ptr = &set;
9565 switch (arg1) {
9566 case TARGET_SIG_BLOCK:
9567 how = SIG_BLOCK;
9568 break;
9569 case TARGET_SIG_UNBLOCK:
9570 how = SIG_UNBLOCK;
9571 break;
9572 case TARGET_SIG_SETMASK:
9573 how = SIG_SETMASK;
9574 break;
9575 default:
9576 return -TARGET_EINVAL;
9577 }
9578 } else {
9579 how = 0;
9580 set_ptr = NULL;
9581 }
9582 ret = do_sigprocmask(how, set_ptr, &oldset);
9583 if (!is_error(ret) && arg3) {
9584 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9585 return -TARGET_EFAULT;
9586 host_to_target_old_sigset(p, &oldset);
9587 unlock_user(p, arg3, sizeof(target_sigset_t));
9588 }
9589 #endif
9590 }
9591 return ret;
9592 #endif
9593 case TARGET_NR_rt_sigprocmask:
9594 {
9595 int how = arg1;
9596 sigset_t set, oldset, *set_ptr;
9597
9598 if (arg4 != sizeof(target_sigset_t)) {
9599 return -TARGET_EINVAL;
9600 }
9601
9602 if (arg2) {
9603 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9604 if (!p) {
9605 return -TARGET_EFAULT;
9606 }
9607 target_to_host_sigset(&set, p);
9608 unlock_user(p, arg2, 0);
9609 set_ptr = &set;
9610 switch(how) {
9611 case TARGET_SIG_BLOCK:
9612 how = SIG_BLOCK;
9613 break;
9614 case TARGET_SIG_UNBLOCK:
9615 how = SIG_UNBLOCK;
9616 break;
9617 case TARGET_SIG_SETMASK:
9618 how = SIG_SETMASK;
9619 break;
9620 default:
9621 return -TARGET_EINVAL;
9622 }
9623 } else {
9624 how = 0;
9625 set_ptr = NULL;
9626 }
9627 ret = do_sigprocmask(how, set_ptr, &oldset);
9628 if (!is_error(ret) && arg3) {
9629 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9630 return -TARGET_EFAULT;
9631 host_to_target_sigset(p, &oldset);
9632 unlock_user(p, arg3, sizeof(target_sigset_t));
9633 }
9634 }
9635 return ret;
9636 #ifdef TARGET_NR_sigpending
9637 case TARGET_NR_sigpending:
9638 {
9639 sigset_t set;
9640 ret = get_errno(sigpending(&set));
9641 if (!is_error(ret)) {
9642 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9643 return -TARGET_EFAULT;
9644 host_to_target_old_sigset(p, &set);
9645 unlock_user(p, arg1, sizeof(target_sigset_t));
9646 }
9647 }
9648 return ret;
9649 #endif
9650 case TARGET_NR_rt_sigpending:
9651 {
9652 sigset_t set;
9653
9654 /* Yes, this check is >, not != like most. We follow the kernel's
9655 * logic and it does it like this because it implements
9656 * NR_sigpending through the same code path, and in that case
9657 * the old_sigset_t is smaller in size.
9658 */
9659 if (arg2 > sizeof(target_sigset_t)) {
9660 return -TARGET_EINVAL;
9661 }
9662
9663 ret = get_errno(sigpending(&set));
9664 if (!is_error(ret)) {
9665 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9666 return -TARGET_EFAULT;
9667 host_to_target_sigset(p, &set);
9668 unlock_user(p, arg1, sizeof(target_sigset_t));
9669 }
9670 }
9671 return ret;
9672 #ifdef TARGET_NR_sigsuspend
9673 case TARGET_NR_sigsuspend:
9674 {
9675 sigset_t *set;
9676
9677 #if defined(TARGET_ALPHA)
9678 TaskState *ts = cpu->opaque;
9679 /* target_to_host_old_sigset will bswap back */
9680 abi_ulong mask = tswapal(arg1);
9681 set = &ts->sigsuspend_mask;
9682 target_to_host_old_sigset(set, &mask);
9683 #else
9684 ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9685 if (ret != 0) {
9686 return ret;
9687 }
9688 #endif
9689 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9690 finish_sigsuspend_mask(ret);
9691 }
9692 return ret;
9693 #endif
9694 case TARGET_NR_rt_sigsuspend:
9695 {
9696 sigset_t *set;
9697
9698 ret = process_sigsuspend_mask(&set, arg1, arg2);
9699 if (ret != 0) {
9700 return ret;
9701 }
9702 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9703 finish_sigsuspend_mask(ret);
9704 }
9705 return ret;
9706 #ifdef TARGET_NR_rt_sigtimedwait
9707 case TARGET_NR_rt_sigtimedwait:
9708 {
9709 sigset_t set;
9710 struct timespec uts, *puts;
9711 siginfo_t uinfo;
9712
9713 if (arg4 != sizeof(target_sigset_t)) {
9714 return -TARGET_EINVAL;
9715 }
9716
9717 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9718 return -TARGET_EFAULT;
9719 target_to_host_sigset(&set, p);
9720 unlock_user(p, arg1, 0);
9721 if (arg3) {
9722 puts = &uts;
9723 if (target_to_host_timespec(puts, arg3)) {
9724 return -TARGET_EFAULT;
9725 }
9726 } else {
9727 puts = NULL;
9728 }
9729 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9730 SIGSET_T_SIZE));
9731 if (!is_error(ret)) {
9732 if (arg2) {
9733 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9734 0);
9735 if (!p) {
9736 return -TARGET_EFAULT;
9737 }
9738 host_to_target_siginfo(p, &uinfo);
9739 unlock_user(p, arg2, sizeof(target_siginfo_t));
9740 }
9741 ret = host_to_target_signal(ret);
9742 }
9743 }
9744 return ret;
9745 #endif
9746 #ifdef TARGET_NR_rt_sigtimedwait_time64
9747 case TARGET_NR_rt_sigtimedwait_time64:
9748 {
9749 sigset_t set;
9750 struct timespec uts, *puts;
9751 siginfo_t uinfo;
9752
9753 if (arg4 != sizeof(target_sigset_t)) {
9754 return -TARGET_EINVAL;
9755 }
9756
9757 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9758 if (!p) {
9759 return -TARGET_EFAULT;
9760 }
9761 target_to_host_sigset(&set, p);
9762 unlock_user(p, arg1, 0);
9763 if (arg3) {
9764 puts = &uts;
9765 if (target_to_host_timespec64(puts, arg3)) {
9766 return -TARGET_EFAULT;
9767 }
9768 } else {
9769 puts = NULL;
9770 }
9771 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9772 SIGSET_T_SIZE));
9773 if (!is_error(ret)) {
9774 if (arg2) {
9775 p = lock_user(VERIFY_WRITE, arg2,
9776 sizeof(target_siginfo_t), 0);
9777 if (!p) {
9778 return -TARGET_EFAULT;
9779 }
9780 host_to_target_siginfo(p, &uinfo);
9781 unlock_user(p, arg2, sizeof(target_siginfo_t));
9782 }
9783 ret = host_to_target_signal(ret);
9784 }
9785 }
9786 return ret;
9787 #endif
9788 case TARGET_NR_rt_sigqueueinfo:
9789 {
9790 siginfo_t uinfo;
9791
9792 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9793 if (!p) {
9794 return -TARGET_EFAULT;
9795 }
9796 target_to_host_siginfo(&uinfo, p);
9797 unlock_user(p, arg3, 0);
9798 ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
9799 }
9800 return ret;
9801 case TARGET_NR_rt_tgsigqueueinfo:
9802 {
9803 siginfo_t uinfo;
9804
9805 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9806 if (!p) {
9807 return -TARGET_EFAULT;
9808 }
9809 target_to_host_siginfo(&uinfo, p);
9810 unlock_user(p, arg4, 0);
9811 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
9812 }
9813 return ret;
9814 #ifdef TARGET_NR_sigreturn
9815 case TARGET_NR_sigreturn:
9816 if (block_signals()) {
9817 return -QEMU_ERESTARTSYS;
9818 }
9819 return do_sigreturn(cpu_env);
9820 #endif
9821 case TARGET_NR_rt_sigreturn:
9822 if (block_signals()) {
9823 return -QEMU_ERESTARTSYS;
9824 }
9825 return do_rt_sigreturn(cpu_env);
9826 case TARGET_NR_sethostname:
9827 if (!(p = lock_user_string(arg1)))
9828 return -TARGET_EFAULT;
9829 ret = get_errno(sethostname(p, arg2));
9830 unlock_user(p, arg1, 0);
9831 return ret;
9832 #ifdef TARGET_NR_setrlimit
9833 case TARGET_NR_setrlimit:
9834 {
9835 int resource = target_to_host_resource(arg1);
9836 struct target_rlimit *target_rlim;
9837 struct rlimit rlim;
9838 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9839 return -TARGET_EFAULT;
9840 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9841 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9842 unlock_user_struct(target_rlim, arg2, 0);
9843 /*
9844 * If we just passed through resource limit settings for memory then
9845 * they would also apply to QEMU's own allocations, and QEMU will
9846 * crash or hang or die if its allocations fail. Ideally we would
9847 * track the guest allocations in QEMU and apply the limits ourselves.
9848 * For now, just tell the guest the call succeeded but don't actually
9849 * limit anything.
9850 */
9851 if (resource != RLIMIT_AS &&
9852 resource != RLIMIT_DATA &&
9853 resource != RLIMIT_STACK) {
9854 return get_errno(setrlimit(resource, &rlim));
9855 } else {
9856 return 0;
9857 }
9858 }
9859 #endif
9860 #ifdef TARGET_NR_getrlimit
9861 case TARGET_NR_getrlimit:
9862 {
9863 int resource = target_to_host_resource(arg1);
9864 struct target_rlimit *target_rlim;
9865 struct rlimit rlim;
9866
9867 ret = get_errno(getrlimit(resource, &rlim));
9868 if (!is_error(ret)) {
9869 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9870 return -TARGET_EFAULT;
9871 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9872 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9873 unlock_user_struct(target_rlim, arg2, 1);
9874 }
9875 }
9876 return ret;
9877 #endif
9878 case TARGET_NR_getrusage:
9879 {
9880 struct rusage rusage;
9881 ret = get_errno(getrusage(arg1, &rusage));
9882 if (!is_error(ret)) {
9883 ret = host_to_target_rusage(arg2, &rusage);
9884 }
9885 }
9886 return ret;
9887 #if defined(TARGET_NR_gettimeofday)
9888 case TARGET_NR_gettimeofday:
9889 {
9890 struct timeval tv;
9891 struct timezone tz;
9892
9893 ret = get_errno(gettimeofday(&tv, &tz));
9894 if (!is_error(ret)) {
9895 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9896 return -TARGET_EFAULT;
9897 }
9898 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9899 return -TARGET_EFAULT;
9900 }
9901 }
9902 }
9903 return ret;
9904 #endif
9905 #if defined(TARGET_NR_settimeofday)
9906 case TARGET_NR_settimeofday:
9907 {
9908 struct timeval tv, *ptv = NULL;
9909 struct timezone tz, *ptz = NULL;
9910
9911 if (arg1) {
9912 if (copy_from_user_timeval(&tv, arg1)) {
9913 return -TARGET_EFAULT;
9914 }
9915 ptv = &tv;
9916 }
9917
9918 if (arg2) {
9919 if (copy_from_user_timezone(&tz, arg2)) {
9920 return -TARGET_EFAULT;
9921 }
9922 ptz = &tz;
9923 }
9924
9925 return get_errno(settimeofday(ptv, ptz));
9926 }
9927 #endif
9928 #if defined(TARGET_NR_select)
9929 case TARGET_NR_select:
9930 #if defined(TARGET_WANT_NI_OLD_SELECT)
9931 /* some architectures used to have old_select here
9932 * but now ENOSYS it.
9933 */
9934 ret = -TARGET_ENOSYS;
9935 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9936 ret = do_old_select(arg1);
9937 #else
9938 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9939 #endif
9940 return ret;
9941 #endif
9942 #ifdef TARGET_NR_pselect6
9943 case TARGET_NR_pselect6:
9944 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9945 #endif
9946 #ifdef TARGET_NR_pselect6_time64
9947 case TARGET_NR_pselect6_time64:
9948 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9949 #endif
9950 #ifdef TARGET_NR_symlink
9951 case TARGET_NR_symlink:
9952 {
9953 void *p2;
9954 p = lock_user_string(arg1);
9955 p2 = lock_user_string(arg2);
9956 if (!p || !p2)
9957 ret = -TARGET_EFAULT;
9958 else
9959 ret = get_errno(symlink(p, p2));
9960 unlock_user(p2, arg2, 0);
9961 unlock_user(p, arg1, 0);
9962 }
9963 return ret;
9964 #endif
9965 #if defined(TARGET_NR_symlinkat)
9966 case TARGET_NR_symlinkat:
9967 {
9968 void *p2;
9969 p = lock_user_string(arg1);
9970 p2 = lock_user_string(arg3);
9971 if (!p || !p2)
9972 ret = -TARGET_EFAULT;
9973 else
9974 ret = get_errno(symlinkat(p, arg2, p2));
9975 unlock_user(p2, arg3, 0);
9976 unlock_user(p, arg1, 0);
9977 }
9978 return ret;
9979 #endif
9980 #ifdef TARGET_NR_readlink
9981 case TARGET_NR_readlink:
9982 {
9983 void *p2;
9984 p = lock_user_string(arg1);
9985 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9986 if (!p || !p2) {
9987 ret = -TARGET_EFAULT;
9988 } else if (!arg3) {
9989 /* Short circuit this for the magic exe check. */
9990 ret = -TARGET_EINVAL;
9991 } else if (is_proc_myself((const char *)p, "exe")) {
9992 /*
9993 * Don't worry about sign mismatch as earlier mapping
9994 * logic would have thrown a bad address error.
9995 */
9996 ret = MIN(strlen(exec_path), arg3);
9997 /* We cannot NUL terminate the string. */
9998 memcpy(p2, exec_path, ret);
9999 } else {
10000 ret = get_errno(readlink(path(p), p2, arg3));
10001 }
10002 unlock_user(p2, arg2, ret);
10003 unlock_user(p, arg1, 0);
10004 }
10005 return ret;
10006 #endif
10007 #if defined(TARGET_NR_readlinkat)
10008 case TARGET_NR_readlinkat:
10009 {
10010 void *p2;
10011 p = lock_user_string(arg2);
10012 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10013 if (!p || !p2) {
10014 ret = -TARGET_EFAULT;
10015 } else if (!arg4) {
10016 /* Short circuit this for the magic exe check. */
10017 ret = -TARGET_EINVAL;
10018 } else if (is_proc_myself((const char *)p, "exe")) {
10019 /*
10020 * Don't worry about sign mismatch as earlier mapping
10021 * logic would have thrown a bad address error.
10022 */
10023 ret = MIN(strlen(exec_path), arg4);
10024 /* We cannot NUL terminate the string. */
10025 memcpy(p2, exec_path, ret);
10026 } else {
10027 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10028 }
10029 unlock_user(p2, arg3, ret);
10030 unlock_user(p, arg2, 0);
10031 }
10032 return ret;
10033 #endif
10034 #ifdef TARGET_NR_swapon
10035 case TARGET_NR_swapon:
10036 if (!(p = lock_user_string(arg1)))
10037 return -TARGET_EFAULT;
10038 ret = get_errno(swapon(p, arg2));
10039 unlock_user(p, arg1, 0);
10040 return ret;
10041 #endif
10042 case TARGET_NR_reboot:
10043 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10044 /* arg4 must be ignored in all other cases */
10045 p = lock_user_string(arg4);
10046 if (!p) {
10047 return -TARGET_EFAULT;
10048 }
10049 ret = get_errno(reboot(arg1, arg2, arg3, p));
10050 unlock_user(p, arg4, 0);
10051 } else {
10052 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10053 }
10054 return ret;
10055 #ifdef TARGET_NR_mmap
10056 case TARGET_NR_mmap:
10057 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10058 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10059 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10060 || defined(TARGET_S390X)
10061 {
10062 abi_ulong *v;
10063 abi_ulong v1, v2, v3, v4, v5, v6;
10064 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10065 return -TARGET_EFAULT;
10066 v1 = tswapal(v[0]);
10067 v2 = tswapal(v[1]);
10068 v3 = tswapal(v[2]);
10069 v4 = tswapal(v[3]);
10070 v5 = tswapal(v[4]);
10071 v6 = tswapal(v[5]);
10072 unlock_user(v, arg1, 0);
10073 ret = get_errno(target_mmap(v1, v2, v3,
10074 target_to_host_bitmask(v4, mmap_flags_tbl),
10075 v5, v6));
10076 }
10077 #else
10078 /* mmap pointers are always untagged */
10079 ret = get_errno(target_mmap(arg1, arg2, arg3,
10080 target_to_host_bitmask(arg4, mmap_flags_tbl),
10081 arg5,
10082 arg6));
10083 #endif
10084 return ret;
10085 #endif
10086 #ifdef TARGET_NR_mmap2
10087 case TARGET_NR_mmap2:
10088 #ifndef MMAP_SHIFT
10089 #define MMAP_SHIFT 12
10090 #endif
10091 ret = target_mmap(arg1, arg2, arg3,
10092 target_to_host_bitmask(arg4, mmap_flags_tbl),
10093 arg5, arg6 << MMAP_SHIFT);
10094 return get_errno(ret);
10095 #endif
10096 case TARGET_NR_munmap:
10097 arg1 = cpu_untagged_addr(cpu, arg1);
10098 return get_errno(target_munmap(arg1, arg2));
10099 case TARGET_NR_mprotect:
10100 arg1 = cpu_untagged_addr(cpu, arg1);
10101 {
10102 TaskState *ts = cpu->opaque;
10103 /* Special hack to detect libc making the stack executable. */
10104 if ((arg3 & PROT_GROWSDOWN)
10105 && arg1 >= ts->info->stack_limit
10106 && arg1 <= ts->info->start_stack) {
10107 arg3 &= ~PROT_GROWSDOWN;
10108 arg2 = arg2 + arg1 - ts->info->stack_limit;
10109 arg1 = ts->info->stack_limit;
10110 }
10111 }
10112 return get_errno(target_mprotect(arg1, arg2, arg3));
10113 #ifdef TARGET_NR_mremap
10114 case TARGET_NR_mremap:
10115 arg1 = cpu_untagged_addr(cpu, arg1);
10116 /* mremap new_addr (arg5) is always untagged */
10117 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10118 #endif
10119 /* ??? msync/mlock/munlock are broken for softmmu. */
10120 #ifdef TARGET_NR_msync
10121 case TARGET_NR_msync:
10122 return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10123 #endif
10124 #ifdef TARGET_NR_mlock
10125 case TARGET_NR_mlock:
10126 return get_errno(mlock(g2h(cpu, arg1), arg2));
10127 #endif
10128 #ifdef TARGET_NR_munlock
10129 case TARGET_NR_munlock:
10130 return get_errno(munlock(g2h(cpu, arg1), arg2));
10131 #endif
10132 #ifdef TARGET_NR_mlockall
10133 case TARGET_NR_mlockall:
10134 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10135 #endif
10136 #ifdef TARGET_NR_munlockall
10137 case TARGET_NR_munlockall:
10138 return get_errno(munlockall());
10139 #endif
10140 #ifdef TARGET_NR_truncate
10141 case TARGET_NR_truncate:
10142 if (!(p = lock_user_string(arg1)))
10143 return -TARGET_EFAULT;
10144 ret = get_errno(truncate(p, arg2));
10145 unlock_user(p, arg1, 0);
10146 return ret;
10147 #endif
10148 #ifdef TARGET_NR_ftruncate
10149 case TARGET_NR_ftruncate:
10150 return get_errno(ftruncate(arg1, arg2));
10151 #endif
10152 case TARGET_NR_fchmod:
10153 return get_errno(fchmod(arg1, arg2));
10154 #if defined(TARGET_NR_fchmodat)
10155 case TARGET_NR_fchmodat:
10156 if (!(p = lock_user_string(arg2)))
10157 return -TARGET_EFAULT;
10158 ret = get_errno(fchmodat(arg1, p, arg3, 0));
10159 unlock_user(p, arg2, 0);
10160 return ret;
10161 #endif
10162 case TARGET_NR_getpriority:
10163 /* Note that negative values are valid for getpriority, so we must
10164 differentiate based on errno settings. */
10165 errno = 0;
10166 ret = getpriority(arg1, arg2);
10167 if (ret == -1 && errno != 0) {
10168 return -host_to_target_errno(errno);
10169 }
10170 #ifdef TARGET_ALPHA
10171 /* Return value is the unbiased priority. Signal no error. */
10172 cpu_env->ir[IR_V0] = 0;
10173 #else
10174 /* Return value is a biased priority to avoid negative numbers. */
10175 ret = 20 - ret;
10176 #endif
10177 return ret;
10178 case TARGET_NR_setpriority:
10179 return get_errno(setpriority(arg1, arg2, arg3));
10180 #ifdef TARGET_NR_statfs
10181 case TARGET_NR_statfs:
10182 if (!(p = lock_user_string(arg1))) {
10183 return -TARGET_EFAULT;
10184 }
10185 ret = get_errno(statfs(path(p), &stfs));
10186 unlock_user(p, arg1, 0);
10187 convert_statfs:
10188 if (!is_error(ret)) {
10189 struct target_statfs *target_stfs;
10190
10191 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10192 return -TARGET_EFAULT;
10193 __put_user(stfs.f_type, &target_stfs->f_type);
10194 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10195 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10196 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10197 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10198 __put_user(stfs.f_files, &target_stfs->f_files);
10199 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10200 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10201 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10202 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10203 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10204 #ifdef _STATFS_F_FLAGS
10205 __put_user(stfs.f_flags, &target_stfs->f_flags);
10206 #else
10207 __put_user(0, &target_stfs->f_flags);
10208 #endif
10209 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10210 unlock_user_struct(target_stfs, arg2, 1);
10211 }
10212 return ret;
10213 #endif
10214 #ifdef TARGET_NR_fstatfs
10215 case TARGET_NR_fstatfs:
10216 ret = get_errno(fstatfs(arg1, &stfs));
10217 goto convert_statfs;
10218 #endif
10219 #ifdef TARGET_NR_statfs64
10220 case TARGET_NR_statfs64:
10221 if (!(p = lock_user_string(arg1))) {
10222 return -TARGET_EFAULT;
10223 }
10224 ret = get_errno(statfs(path(p), &stfs));
10225 unlock_user(p, arg1, 0);
10226 convert_statfs64:
10227 if (!is_error(ret)) {
10228 struct target_statfs64 *target_stfs;
10229
10230 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10231 return -TARGET_EFAULT;
10232 __put_user(stfs.f_type, &target_stfs->f_type);
10233 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10234 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10235 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10236 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10237 __put_user(stfs.f_files, &target_stfs->f_files);
10238 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10239 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10240 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10241 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10242 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10243 #ifdef _STATFS_F_FLAGS
10244 __put_user(stfs.f_flags, &target_stfs->f_flags);
10245 #else
10246 __put_user(0, &target_stfs->f_flags);
10247 #endif
10248 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10249 unlock_user_struct(target_stfs, arg3, 1);
10250 }
10251 return ret;
10252 case TARGET_NR_fstatfs64:
10253 ret = get_errno(fstatfs(arg1, &stfs));
10254 goto convert_statfs64;
10255 #endif
10256 #ifdef TARGET_NR_socketcall
10257 case TARGET_NR_socketcall:
10258 return do_socketcall(arg1, arg2);
10259 #endif
10260 #ifdef TARGET_NR_accept
10261 case TARGET_NR_accept:
10262 return do_accept4(arg1, arg2, arg3, 0);
10263 #endif
10264 #ifdef TARGET_NR_accept4
10265 case TARGET_NR_accept4:
10266 return do_accept4(arg1, arg2, arg3, arg4);
10267 #endif
10268 #ifdef TARGET_NR_bind
10269 case TARGET_NR_bind:
10270 return do_bind(arg1, arg2, arg3);
10271 #endif
10272 #ifdef TARGET_NR_connect
10273 case TARGET_NR_connect:
10274 return do_connect(arg1, arg2, arg3);
10275 #endif
10276 #ifdef TARGET_NR_getpeername
10277 case TARGET_NR_getpeername:
10278 return do_getpeername(arg1, arg2, arg3);
10279 #endif
10280 #ifdef TARGET_NR_getsockname
10281 case TARGET_NR_getsockname:
10282 return do_getsockname(arg1, arg2, arg3);
10283 #endif
10284 #ifdef TARGET_NR_getsockopt
10285 case TARGET_NR_getsockopt:
10286 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10287 #endif
10288 #ifdef TARGET_NR_listen
10289 case TARGET_NR_listen:
10290 return get_errno(listen(arg1, arg2));
10291 #endif
10292 #ifdef TARGET_NR_recv
10293 case TARGET_NR_recv:
10294 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10295 #endif
10296 #ifdef TARGET_NR_recvfrom
10297 case TARGET_NR_recvfrom:
10298 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10299 #endif
10300 #ifdef TARGET_NR_recvmsg
10301 case TARGET_NR_recvmsg:
10302 return do_sendrecvmsg(arg1, arg2, arg3, 0);
10303 #endif
10304 #ifdef TARGET_NR_send
10305 case TARGET_NR_send:
10306 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10307 #endif
10308 #ifdef TARGET_NR_sendmsg
10309 case TARGET_NR_sendmsg:
10310 return do_sendrecvmsg(arg1, arg2, arg3, 1);
10311 #endif
10312 #ifdef TARGET_NR_sendmmsg
10313 case TARGET_NR_sendmmsg:
10314 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10315 #endif
10316 #ifdef TARGET_NR_recvmmsg
10317 case TARGET_NR_recvmmsg:
10318 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10319 #endif
10320 #ifdef TARGET_NR_sendto
10321 case TARGET_NR_sendto:
10322 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10323 #endif
10324 #ifdef TARGET_NR_shutdown
10325 case TARGET_NR_shutdown:
10326 return get_errno(shutdown(arg1, arg2));
10327 #endif
10328 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10329 case TARGET_NR_getrandom:
10330 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10331 if (!p) {
10332 return -TARGET_EFAULT;
10333 }
10334 ret = get_errno(getrandom(p, arg2, arg3));
10335 unlock_user(p, arg1, ret);
10336 return ret;
10337 #endif
10338 #ifdef TARGET_NR_socket
10339 case TARGET_NR_socket:
10340 return do_socket(arg1, arg2, arg3);
10341 #endif
10342 #ifdef TARGET_NR_socketpair
10343 case TARGET_NR_socketpair:
10344 return do_socketpair(arg1, arg2, arg3, arg4);
10345 #endif
10346 #ifdef TARGET_NR_setsockopt
10347 case TARGET_NR_setsockopt:
10348 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10349 #endif
10350 #if defined(TARGET_NR_syslog)
10351 case TARGET_NR_syslog:
10352 {
10353 int len = arg2;
10354
10355 switch (arg1) {
10356 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
10357 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
10358 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
10359 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
10360 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
10361 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10362 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
10363 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
10364 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10365 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
10366 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
10367 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
10368 {
10369 if (len < 0) {
10370 return -TARGET_EINVAL;
10371 }
10372 if (len == 0) {
10373 return 0;
10374 }
10375 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10376 if (!p) {
10377 return -TARGET_EFAULT;
10378 }
10379 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10380 unlock_user(p, arg2, arg3);
10381 }
10382 return ret;
10383 default:
10384 return -TARGET_EINVAL;
10385 }
10386 }
10387 break;
10388 #endif
10389 case TARGET_NR_setitimer:
10390 {
10391 struct itimerval value, ovalue, *pvalue;
10392
10393 if (arg2) {
10394 pvalue = &value;
10395 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10396 || copy_from_user_timeval(&pvalue->it_value,
10397 arg2 + sizeof(struct target_timeval)))
10398 return -TARGET_EFAULT;
10399 } else {
10400 pvalue = NULL;
10401 }
10402 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10403 if (!is_error(ret) && arg3) {
10404 if (copy_to_user_timeval(arg3,
10405 &ovalue.it_interval)
10406 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10407 &ovalue.it_value))
10408 return -TARGET_EFAULT;
10409 }
10410 }
10411 return ret;
10412 case TARGET_NR_getitimer:
10413 {
10414 struct itimerval value;
10415
10416 ret = get_errno(getitimer(arg1, &value));
10417 if (!is_error(ret) && arg2) {
10418 if (copy_to_user_timeval(arg2,
10419 &value.it_interval)
10420 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10421 &value.it_value))
10422 return -TARGET_EFAULT;
10423 }
10424 }
10425 return ret;
10426 #ifdef TARGET_NR_stat
10427 case TARGET_NR_stat:
10428 if (!(p = lock_user_string(arg1))) {
10429 return -TARGET_EFAULT;
10430 }
10431 ret = get_errno(stat(path(p), &st));
10432 unlock_user(p, arg1, 0);
10433 goto do_stat;
10434 #endif
10435 #ifdef TARGET_NR_lstat
10436 case TARGET_NR_lstat:
10437 if (!(p = lock_user_string(arg1))) {
10438 return -TARGET_EFAULT;
10439 }
10440 ret = get_errno(lstat(path(p), &st));
10441 unlock_user(p, arg1, 0);
10442 goto do_stat;
10443 #endif
10444 #ifdef TARGET_NR_fstat
10445 case TARGET_NR_fstat:
10446 {
10447 ret = get_errno(fstat(arg1, &st));
10448 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10449 do_stat:
10450 #endif
10451 if (!is_error(ret)) {
10452 struct target_stat *target_st;
10453
10454 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10455 return -TARGET_EFAULT;
10456 memset(target_st, 0, sizeof(*target_st));
10457 __put_user(st.st_dev, &target_st->st_dev);
10458 __put_user(st.st_ino, &target_st->st_ino);
10459 __put_user(st.st_mode, &target_st->st_mode);
10460 __put_user(st.st_uid, &target_st->st_uid);
10461 __put_user(st.st_gid, &target_st->st_gid);
10462 __put_user(st.st_nlink, &target_st->st_nlink);
10463 __put_user(st.st_rdev, &target_st->st_rdev);
10464 __put_user(st.st_size, &target_st->st_size);
10465 __put_user(st.st_blksize, &target_st->st_blksize);
10466 __put_user(st.st_blocks, &target_st->st_blocks);
10467 __put_user(st.st_atime, &target_st->target_st_atime);
10468 __put_user(st.st_mtime, &target_st->target_st_mtime);
10469 __put_user(st.st_ctime, &target_st->target_st_ctime);
10470 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10471 __put_user(st.st_atim.tv_nsec,
10472 &target_st->target_st_atime_nsec);
10473 __put_user(st.st_mtim.tv_nsec,
10474 &target_st->target_st_mtime_nsec);
10475 __put_user(st.st_ctim.tv_nsec,
10476 &target_st->target_st_ctime_nsec);
10477 #endif
10478 unlock_user_struct(target_st, arg2, 1);
10479 }
10480 }
10481 return ret;
10482 #endif
10483 case TARGET_NR_vhangup:
10484 return get_errno(vhangup());
10485 #ifdef TARGET_NR_syscall
10486 case TARGET_NR_syscall:
10487 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10488 arg6, arg7, arg8, 0);
10489 #endif
10490 #if defined(TARGET_NR_wait4)
10491 case TARGET_NR_wait4:
10492 {
10493 int status;
10494 abi_long status_ptr = arg2;
10495 struct rusage rusage, *rusage_ptr;
10496 abi_ulong target_rusage = arg4;
10497 abi_long rusage_err;
10498 if (target_rusage)
10499 rusage_ptr = &rusage;
10500 else
10501 rusage_ptr = NULL;
10502 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10503 if (!is_error(ret)) {
10504 if (status_ptr && ret) {
10505 status = host_to_target_waitstatus(status);
10506 if (put_user_s32(status, status_ptr))
10507 return -TARGET_EFAULT;
10508 }
10509 if (target_rusage) {
10510 rusage_err = host_to_target_rusage(target_rusage, &rusage);
10511 if (rusage_err) {
10512 ret = rusage_err;
10513 }
10514 }
10515 }
10516 }
10517 return ret;
10518 #endif
10519 #ifdef TARGET_NR_swapoff
10520 case TARGET_NR_swapoff:
10521 if (!(p = lock_user_string(arg1)))
10522 return -TARGET_EFAULT;
10523 ret = get_errno(swapoff(p));
10524 unlock_user(p, arg1, 0);
10525 return ret;
10526 #endif
10527 case TARGET_NR_sysinfo:
10528 {
10529 struct target_sysinfo *target_value;
10530 struct sysinfo value;
10531 ret = get_errno(sysinfo(&value));
10532 if (!is_error(ret) && arg1)
10533 {
10534 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10535 return -TARGET_EFAULT;
10536 __put_user(value.uptime, &target_value->uptime);
10537 __put_user(value.loads[0], &target_value->loads[0]);
10538 __put_user(value.loads[1], &target_value->loads[1]);
10539 __put_user(value.loads[2], &target_value->loads[2]);
10540 __put_user(value.totalram, &target_value->totalram);
10541 __put_user(value.freeram, &target_value->freeram);
10542 __put_user(value.sharedram, &target_value->sharedram);
10543 __put_user(value.bufferram, &target_value->bufferram);
10544 __put_user(value.totalswap, &target_value->totalswap);
10545 __put_user(value.freeswap, &target_value->freeswap);
10546 __put_user(value.procs, &target_value->procs);
10547 __put_user(value.totalhigh, &target_value->totalhigh);
10548 __put_user(value.freehigh, &target_value->freehigh);
10549 __put_user(value.mem_unit, &target_value->mem_unit);
10550 unlock_user_struct(target_value, arg1, 1);
10551 }
10552 }
10553 return ret;
10554 #ifdef TARGET_NR_ipc
10555 case TARGET_NR_ipc:
10556 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10557 #endif
10558 #ifdef TARGET_NR_semget
10559 case TARGET_NR_semget:
10560 return get_errno(semget(arg1, arg2, arg3));
10561 #endif
10562 #ifdef TARGET_NR_semop
10563 case TARGET_NR_semop:
10564 return do_semtimedop(arg1, arg2, arg3, 0, false);
10565 #endif
10566 #ifdef TARGET_NR_semtimedop
10567 case TARGET_NR_semtimedop:
10568 return do_semtimedop(arg1, arg2, arg3, arg4, false);
10569 #endif
10570 #ifdef TARGET_NR_semtimedop_time64
10571 case TARGET_NR_semtimedop_time64:
10572 return do_semtimedop(arg1, arg2, arg3, arg4, true);
10573 #endif
10574 #ifdef TARGET_NR_semctl
10575 case TARGET_NR_semctl:
10576 return do_semctl(arg1, arg2, arg3, arg4);
10577 #endif
10578 #ifdef TARGET_NR_msgctl
10579 case TARGET_NR_msgctl:
10580 return do_msgctl(arg1, arg2, arg3);
10581 #endif
10582 #ifdef TARGET_NR_msgget
10583 case TARGET_NR_msgget:
10584 return get_errno(msgget(arg1, arg2));
10585 #endif
10586 #ifdef TARGET_NR_msgrcv
10587 case TARGET_NR_msgrcv:
10588 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10589 #endif
10590 #ifdef TARGET_NR_msgsnd
10591 case TARGET_NR_msgsnd:
10592 return do_msgsnd(arg1, arg2, arg3, arg4);
10593 #endif
10594 #ifdef TARGET_NR_shmget
10595 case TARGET_NR_shmget:
10596 return get_errno(shmget(arg1, arg2, arg3));
10597 #endif
10598 #ifdef TARGET_NR_shmctl
10599 case TARGET_NR_shmctl:
10600 return do_shmctl(arg1, arg2, arg3);
10601 #endif
10602 #ifdef TARGET_NR_shmat
10603 case TARGET_NR_shmat:
10604 return do_shmat(cpu_env, arg1, arg2, arg3);
10605 #endif
10606 #ifdef TARGET_NR_shmdt
10607 case TARGET_NR_shmdt:
10608 return do_shmdt(arg1);
10609 #endif
10610 case TARGET_NR_fsync:
10611 return get_errno(fsync(arg1));
10612 case TARGET_NR_clone:
10613 /* Linux manages to have three different orderings for its
10614 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10615 * match the kernel's CONFIG_CLONE_* settings.
10616 * Microblaze is further special in that it uses a sixth
10617 * implicit argument to clone for the TLS pointer.
10618 */
10619 #if defined(TARGET_MICROBLAZE)
10620 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10621 #elif defined(TARGET_CLONE_BACKWARDS)
10622 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10623 #elif defined(TARGET_CLONE_BACKWARDS2)
10624 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10625 #else
10626 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10627 #endif
10628 return ret;
10629 #ifdef __NR_exit_group
10630 /* new thread calls */
10631 case TARGET_NR_exit_group:
10632 preexit_cleanup(cpu_env, arg1);
10633 return get_errno(exit_group(arg1));
10634 #endif
10635 case TARGET_NR_setdomainname:
10636 if (!(p = lock_user_string(arg1)))
10637 return -TARGET_EFAULT;
10638 ret = get_errno(setdomainname(p, arg2));
10639 unlock_user(p, arg1, 0);
10640 return ret;
10641 case TARGET_NR_uname:
10642 /* no need to transcode because we use the linux syscall */
10643 {
10644 struct new_utsname * buf;
10645
10646 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10647 return -TARGET_EFAULT;
10648 ret = get_errno(sys_uname(buf));
10649 if (!is_error(ret)) {
10650 /* Overwrite the native machine name with whatever is being
10651 emulated. */
10652 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10653 sizeof(buf->machine));
10654 /* Allow the user to override the reported release. */
10655 if (qemu_uname_release && *qemu_uname_release) {
10656 g_strlcpy(buf->release, qemu_uname_release,
10657 sizeof(buf->release));
10658 }
10659 }
10660 unlock_user_struct(buf, arg1, 1);
10661 }
10662 return ret;
10663 #ifdef TARGET_I386
10664 case TARGET_NR_modify_ldt:
10665 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10666 #if !defined(TARGET_X86_64)
10667 case TARGET_NR_vm86:
10668 return do_vm86(cpu_env, arg1, arg2);
10669 #endif
10670 #endif
10671 #if defined(TARGET_NR_adjtimex)
10672 case TARGET_NR_adjtimex:
10673 {
10674 struct timex host_buf;
10675
10676 if (target_to_host_timex(&host_buf, arg1) != 0) {
10677 return -TARGET_EFAULT;
10678 }
10679 ret = get_errno(adjtimex(&host_buf));
10680 if (!is_error(ret)) {
10681 if (host_to_target_timex(arg1, &host_buf) != 0) {
10682 return -TARGET_EFAULT;
10683 }
10684 }
10685 }
10686 return ret;
10687 #endif
10688 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10689 case TARGET_NR_clock_adjtime:
10690 {
10691 struct timex htx, *phtx = &htx;
10692
10693 if (target_to_host_timex(phtx, arg2) != 0) {
10694 return -TARGET_EFAULT;
10695 }
10696 ret = get_errno(clock_adjtime(arg1, phtx));
10697 if (!is_error(ret) && phtx) {
10698 if (host_to_target_timex(arg2, phtx) != 0) {
10699 return -TARGET_EFAULT;
10700 }
10701 }
10702 }
10703 return ret;
10704 #endif
10705 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10706 case TARGET_NR_clock_adjtime64:
10707 {
10708 struct timex htx;
10709
10710 if (target_to_host_timex64(&htx, arg2) != 0) {
10711 return -TARGET_EFAULT;
10712 }
10713 ret = get_errno(clock_adjtime(arg1, &htx));
10714 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10715 return -TARGET_EFAULT;
10716 }
10717 }
10718 return ret;
10719 #endif
10720 case TARGET_NR_getpgid:
10721 return get_errno(getpgid(arg1));
10722 case TARGET_NR_fchdir:
10723 return get_errno(fchdir(arg1));
10724 case TARGET_NR_personality:
10725 return get_errno(personality(arg1));
10726 #ifdef TARGET_NR__llseek /* Not on alpha */
10727 case TARGET_NR__llseek:
10728 {
10729 int64_t res;
10730 #if !defined(__NR_llseek)
10731 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10732 if (res == -1) {
10733 ret = get_errno(res);
10734 } else {
10735 ret = 0;
10736 }
10737 #else
10738 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10739 #endif
10740 if ((ret == 0) && put_user_s64(res, arg4)) {
10741 return -TARGET_EFAULT;
10742 }
10743 }
10744 return ret;
10745 #endif
10746 #ifdef TARGET_NR_getdents
10747 case TARGET_NR_getdents:
10748 return do_getdents(arg1, arg2, arg3);
10749 #endif /* TARGET_NR_getdents */
10750 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10751 case TARGET_NR_getdents64:
10752 return do_getdents64(arg1, arg2, arg3);
10753 #endif /* TARGET_NR_getdents64 */
10754 #if defined(TARGET_NR__newselect)
10755 case TARGET_NR__newselect:
10756 return do_select(arg1, arg2, arg3, arg4, arg5);
10757 #endif
10758 #ifdef TARGET_NR_poll
10759 case TARGET_NR_poll:
10760 return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10761 #endif
10762 #ifdef TARGET_NR_ppoll
10763 case TARGET_NR_ppoll:
10764 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10765 #endif
10766 #ifdef TARGET_NR_ppoll_time64
10767 case TARGET_NR_ppoll_time64:
10768 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10769 #endif
10770 case TARGET_NR_flock:
10771 /* NOTE: the flock constant seems to be the same for every
10772 Linux platform */
10773 return get_errno(safe_flock(arg1, arg2));
10774 case TARGET_NR_readv:
10775 {
10776 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10777 if (vec != NULL) {
10778 ret = get_errno(safe_readv(arg1, vec, arg3));
10779 unlock_iovec(vec, arg2, arg3, 1);
10780 } else {
10781 ret = -host_to_target_errno(errno);
10782 }
10783 }
10784 return ret;
10785 case TARGET_NR_writev:
10786 {
10787 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10788 if (vec != NULL) {
10789 ret = get_errno(safe_writev(arg1, vec, arg3));
10790 unlock_iovec(vec, arg2, arg3, 0);
10791 } else {
10792 ret = -host_to_target_errno(errno);
10793 }
10794 }
10795 return ret;
10796 #if defined(TARGET_NR_preadv)
10797 case TARGET_NR_preadv:
10798 {
10799 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10800 if (vec != NULL) {
10801 unsigned long low, high;
10802
10803 target_to_host_low_high(arg4, arg5, &low, &high);
10804 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10805 unlock_iovec(vec, arg2, arg3, 1);
10806 } else {
10807 ret = -host_to_target_errno(errno);
10808 }
10809 }
10810 return ret;
10811 #endif
10812 #if defined(TARGET_NR_pwritev)
10813 case TARGET_NR_pwritev:
10814 {
10815 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10816 if (vec != NULL) {
10817 unsigned long low, high;
10818
10819 target_to_host_low_high(arg4, arg5, &low, &high);
10820 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10821 unlock_iovec(vec, arg2, arg3, 0);
10822 } else {
10823 ret = -host_to_target_errno(errno);
10824 }
10825 }
10826 return ret;
10827 #endif
10828 case TARGET_NR_getsid:
10829 return get_errno(getsid(arg1));
10830 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10831 case TARGET_NR_fdatasync:
10832 return get_errno(fdatasync(arg1));
10833 #endif
10834 case TARGET_NR_sched_getaffinity:
10835 {
10836 unsigned int mask_size;
10837 unsigned long *mask;
10838
10839 /*
10840 * sched_getaffinity needs multiples of ulong, so need to take
10841 * care of mismatches between target ulong and host ulong sizes.
10842 */
10843 if (arg2 & (sizeof(abi_ulong) - 1)) {
10844 return -TARGET_EINVAL;
10845 }
10846 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10847
10848 mask = alloca(mask_size);
10849 memset(mask, 0, mask_size);
10850 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10851
10852 if (!is_error(ret)) {
10853 if (ret > arg2) {
10854 /* More data returned than the caller's buffer will fit.
10855 * This only happens if sizeof(abi_long) < sizeof(long)
10856 * and the caller passed us a buffer holding an odd number
10857 * of abi_longs. If the host kernel is actually using the
10858 * extra 4 bytes then fail EINVAL; otherwise we can just
10859 * ignore them and only copy the interesting part.
10860 */
10861 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10862 if (numcpus > arg2 * 8) {
10863 return -TARGET_EINVAL;
10864 }
10865 ret = arg2;
10866 }
10867
10868 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10869 return -TARGET_EFAULT;
10870 }
10871 }
10872 }
10873 return ret;
10874 case TARGET_NR_sched_setaffinity:
10875 {
10876 unsigned int mask_size;
10877 unsigned long *mask;
10878
10879 /*
10880 * sched_setaffinity needs multiples of ulong, so need to take
10881 * care of mismatches between target ulong and host ulong sizes.
10882 */
10883 if (arg2 & (sizeof(abi_ulong) - 1)) {
10884 return -TARGET_EINVAL;
10885 }
10886 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10887 mask = alloca(mask_size);
10888
10889 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10890 if (ret) {
10891 return ret;
10892 }
10893
10894 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10895 }
10896 case TARGET_NR_getcpu:
10897 {
10898 unsigned cpu, node;
10899 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10900 arg2 ? &node : NULL,
10901 NULL));
10902 if (is_error(ret)) {
10903 return ret;
10904 }
10905 if (arg1 && put_user_u32(cpu, arg1)) {
10906 return -TARGET_EFAULT;
10907 }
10908 if (arg2 && put_user_u32(node, arg2)) {
10909 return -TARGET_EFAULT;
10910 }
10911 }
10912 return ret;
10913 case TARGET_NR_sched_setparam:
10914 {
10915 struct target_sched_param *target_schp;
10916 struct sched_param schp;
10917
10918 if (arg2 == 0) {
10919 return -TARGET_EINVAL;
10920 }
10921 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10922 return -TARGET_EFAULT;
10923 }
10924 schp.sched_priority = tswap32(target_schp->sched_priority);
10925 unlock_user_struct(target_schp, arg2, 0);
10926 return get_errno(sys_sched_setparam(arg1, &schp));
10927 }
10928 case TARGET_NR_sched_getparam:
10929 {
10930 struct target_sched_param *target_schp;
10931 struct sched_param schp;
10932
10933 if (arg2 == 0) {
10934 return -TARGET_EINVAL;
10935 }
10936 ret = get_errno(sys_sched_getparam(arg1, &schp));
10937 if (!is_error(ret)) {
10938 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10939 return -TARGET_EFAULT;
10940 }
10941 target_schp->sched_priority = tswap32(schp.sched_priority);
10942 unlock_user_struct(target_schp, arg2, 1);
10943 }
10944 }
10945 return ret;
10946 case TARGET_NR_sched_setscheduler:
10947 {
10948 struct target_sched_param *target_schp;
10949 struct sched_param schp;
10950 if (arg3 == 0) {
10951 return -TARGET_EINVAL;
10952 }
10953 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10954 return -TARGET_EFAULT;
10955 }
10956 schp.sched_priority = tswap32(target_schp->sched_priority);
10957 unlock_user_struct(target_schp, arg3, 0);
10958 return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10959 }
10960 case TARGET_NR_sched_getscheduler:
10961 return get_errno(sys_sched_getscheduler(arg1));
10962 case TARGET_NR_sched_getattr:
10963 {
10964 struct target_sched_attr *target_scha;
10965 struct sched_attr scha;
10966 if (arg2 == 0) {
10967 return -TARGET_EINVAL;
10968 }
10969 if (arg3 > sizeof(scha)) {
10970 arg3 = sizeof(scha);
10971 }
10972 ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10973 if (!is_error(ret)) {
10974 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10975 if (!target_scha) {
10976 return -TARGET_EFAULT;
10977 }
10978 target_scha->size = tswap32(scha.size);
10979 target_scha->sched_policy = tswap32(scha.sched_policy);
10980 target_scha->sched_flags = tswap64(scha.sched_flags);
10981 target_scha->sched_nice = tswap32(scha.sched_nice);
10982 target_scha->sched_priority = tswap32(scha.sched_priority);
10983 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10984 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10985 target_scha->sched_period = tswap64(scha.sched_period);
10986 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10987 target_scha->sched_util_min = tswap32(scha.sched_util_min);
10988 target_scha->sched_util_max = tswap32(scha.sched_util_max);
10989 }
10990 unlock_user(target_scha, arg2, arg3);
10991 }
10992 return ret;
10993 }
10994 case TARGET_NR_sched_setattr:
10995 {
10996 struct target_sched_attr *target_scha;
10997 struct sched_attr scha;
10998 uint32_t size;
10999 int zeroed;
11000 if (arg2 == 0) {
11001 return -TARGET_EINVAL;
11002 }
11003 if (get_user_u32(size, arg2)) {
11004 return -TARGET_EFAULT;
11005 }
11006 if (!size) {
11007 size = offsetof(struct target_sched_attr, sched_util_min);
11008 }
11009 if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11010 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11011 return -TARGET_EFAULT;
11012 }
11013 return -TARGET_E2BIG;
11014 }
11015
11016 zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11017 if (zeroed < 0) {
11018 return zeroed;
11019 } else if (zeroed == 0) {
11020 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11021 return -TARGET_EFAULT;
11022 }
11023 return -TARGET_E2BIG;
11024 }
11025 if (size > sizeof(struct target_sched_attr)) {
11026 size = sizeof(struct target_sched_attr);
11027 }
11028
11029 target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11030 if (!target_scha) {
11031 return -TARGET_EFAULT;
11032 }
11033 scha.size = size;
11034 scha.sched_policy = tswap32(target_scha->sched_policy);
11035 scha.sched_flags = tswap64(target_scha->sched_flags);
11036 scha.sched_nice = tswap32(target_scha->sched_nice);
11037 scha.sched_priority = tswap32(target_scha->sched_priority);
11038 scha.sched_runtime = tswap64(target_scha->sched_runtime);
11039 scha.sched_deadline = tswap64(target_scha->sched_deadline);
11040 scha.sched_period = tswap64(target_scha->sched_period);
11041 if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11042 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11043 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11044 }
11045 unlock_user(target_scha, arg2, 0);
11046 return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11047 }
11048 case TARGET_NR_sched_yield:
11049 return get_errno(sched_yield());
11050 case TARGET_NR_sched_get_priority_max:
11051 return get_errno(sched_get_priority_max(arg1));
11052 case TARGET_NR_sched_get_priority_min:
11053 return get_errno(sched_get_priority_min(arg1));
11054 #ifdef TARGET_NR_sched_rr_get_interval
11055 case TARGET_NR_sched_rr_get_interval:
11056 {
11057 struct timespec ts;
11058 ret = get_errno(sched_rr_get_interval(arg1, &ts));
11059 if (!is_error(ret)) {
11060 ret = host_to_target_timespec(arg2, &ts);
11061 }
11062 }
11063 return ret;
11064 #endif
11065 #ifdef TARGET_NR_sched_rr_get_interval_time64
11066 case TARGET_NR_sched_rr_get_interval_time64:
11067 {
11068 struct timespec ts;
11069 ret = get_errno(sched_rr_get_interval(arg1, &ts));
11070 if (!is_error(ret)) {
11071 ret = host_to_target_timespec64(arg2, &ts);
11072 }
11073 }
11074 return ret;
11075 #endif
11076 #if defined(TARGET_NR_nanosleep)
11077 case TARGET_NR_nanosleep:
11078 {
11079 struct timespec req, rem;
11080 target_to_host_timespec(&req, arg1);
11081 ret = get_errno(safe_nanosleep(&req, &rem));
11082 if (is_error(ret) && arg2) {
11083 host_to_target_timespec(arg2, &rem);
11084 }
11085 }
11086 return ret;
11087 #endif
11088 case TARGET_NR_prctl:
11089 return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11090 break;
11091 #ifdef TARGET_NR_arch_prctl
11092 case TARGET_NR_arch_prctl:
11093 return do_arch_prctl(cpu_env, arg1, arg2);
11094 #endif
11095 #ifdef TARGET_NR_pread64
11096 case TARGET_NR_pread64:
11097 if (regpairs_aligned(cpu_env, num)) {
11098 arg4 = arg5;
11099 arg5 = arg6;
11100 }
11101 if (arg2 == 0 && arg3 == 0) {
11102 /* Special-case NULL buffer and zero length, which should succeed */
11103 p = 0;
11104 } else {
11105 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11106 if (!p) {
11107 return -TARGET_EFAULT;
11108 }
11109 }
11110 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11111 unlock_user(p, arg2, ret);
11112 return ret;
11113 case TARGET_NR_pwrite64:
11114 if (regpairs_aligned(cpu_env, num)) {
11115 arg4 = arg5;
11116 arg5 = arg6;
11117 }
11118 if (arg2 == 0 && arg3 == 0) {
11119 /* Special-case NULL buffer and zero length, which should succeed */
11120 p = 0;
11121 } else {
11122 p = lock_user(VERIFY_READ, arg2, arg3, 1);
11123 if (!p) {
11124 return -TARGET_EFAULT;
11125 }
11126 }
11127 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11128 unlock_user(p, arg2, 0);
11129 return ret;
11130 #endif
11131 case TARGET_NR_getcwd:
11132 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11133 return -TARGET_EFAULT;
11134 ret = get_errno(sys_getcwd1(p, arg2));
11135 unlock_user(p, arg1, ret);
11136 return ret;
11137 case TARGET_NR_capget:
11138 case TARGET_NR_capset:
11139 {
11140 struct target_user_cap_header *target_header;
11141 struct target_user_cap_data *target_data = NULL;
11142 struct __user_cap_header_struct header;
11143 struct __user_cap_data_struct data[2];
11144 struct __user_cap_data_struct *dataptr = NULL;
11145 int i, target_datalen;
11146 int data_items = 1;
11147
11148 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11149 return -TARGET_EFAULT;
11150 }
11151 header.version = tswap32(target_header->version);
11152 header.pid = tswap32(target_header->pid);
11153
11154 if (header.version != _LINUX_CAPABILITY_VERSION) {
11155 /* Version 2 and up takes pointer to two user_data structs */
11156 data_items = 2;
11157 }
11158
11159 target_datalen = sizeof(*target_data) * data_items;
11160
11161 if (arg2) {
11162 if (num == TARGET_NR_capget) {
11163 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11164 } else {
11165 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11166 }
11167 if (!target_data) {
11168 unlock_user_struct(target_header, arg1, 0);
11169 return -TARGET_EFAULT;
11170 }
11171
11172 if (num == TARGET_NR_capset) {
11173 for (i = 0; i < data_items; i++) {
11174 data[i].effective = tswap32(target_data[i].effective);
11175 data[i].permitted = tswap32(target_data[i].permitted);
11176 data[i].inheritable = tswap32(target_data[i].inheritable);
11177 }
11178 }
11179
11180 dataptr = data;
11181 }
11182
11183 if (num == TARGET_NR_capget) {
11184 ret = get_errno(capget(&header, dataptr));
11185 } else {
11186 ret = get_errno(capset(&header, dataptr));
11187 }
11188
11189 /* The kernel always updates version for both capget and capset */
11190 target_header->version = tswap32(header.version);
11191 unlock_user_struct(target_header, arg1, 1);
11192
11193 if (arg2) {
11194 if (num == TARGET_NR_capget) {
11195 for (i = 0; i < data_items; i++) {
11196 target_data[i].effective = tswap32(data[i].effective);
11197 target_data[i].permitted = tswap32(data[i].permitted);
11198 target_data[i].inheritable = tswap32(data[i].inheritable);
11199 }
11200 unlock_user(target_data, arg2, target_datalen);
11201 } else {
11202 unlock_user(target_data, arg2, 0);
11203 }
11204 }
11205 return ret;
11206 }
11207 case TARGET_NR_sigaltstack:
11208 return do_sigaltstack(arg1, arg2, cpu_env);
11209
11210 #ifdef CONFIG_SENDFILE
11211 #ifdef TARGET_NR_sendfile
11212 case TARGET_NR_sendfile:
11213 {
11214 off_t *offp = NULL;
11215 off_t off;
11216 if (arg3) {
11217 ret = get_user_sal(off, arg3);
11218 if (is_error(ret)) {
11219 return ret;
11220 }
11221 offp = &off;
11222 }
11223 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11224 if (!is_error(ret) && arg3) {
11225 abi_long ret2 = put_user_sal(off, arg3);
11226 if (is_error(ret2)) {
11227 ret = ret2;
11228 }
11229 }
11230 return ret;
11231 }
11232 #endif
11233 #ifdef TARGET_NR_sendfile64
11234 case TARGET_NR_sendfile64:
11235 {
11236 off_t *offp = NULL;
11237 off_t off;
11238 if (arg3) {
11239 ret = get_user_s64(off, arg3);
11240 if (is_error(ret)) {
11241 return ret;
11242 }
11243 offp = &off;
11244 }
11245 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11246 if (!is_error(ret) && arg3) {
11247 abi_long ret2 = put_user_s64(off, arg3);
11248 if (is_error(ret2)) {
11249 ret = ret2;
11250 }
11251 }
11252 return ret;
11253 }
11254 #endif
11255 #endif
11256 #ifdef TARGET_NR_vfork
11257 case TARGET_NR_vfork:
11258 return get_errno(do_fork(cpu_env,
11259 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11260 0, 0, 0, 0));
11261 #endif
11262 #ifdef TARGET_NR_ugetrlimit
11263 case TARGET_NR_ugetrlimit:
11264 {
11265 struct rlimit rlim;
11266 int resource = target_to_host_resource(arg1);
11267 ret = get_errno(getrlimit(resource, &rlim));
11268 if (!is_error(ret)) {
11269 struct target_rlimit *target_rlim;
11270 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11271 return -TARGET_EFAULT;
11272 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11273 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11274 unlock_user_struct(target_rlim, arg2, 1);
11275 }
11276 return ret;
11277 }
11278 #endif
11279 #ifdef TARGET_NR_truncate64
11280 case TARGET_NR_truncate64:
11281 if (!(p = lock_user_string(arg1)))
11282 return -TARGET_EFAULT;
11283 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11284 unlock_user(p, arg1, 0);
11285 return ret;
11286 #endif
11287 #ifdef TARGET_NR_ftruncate64
11288 case TARGET_NR_ftruncate64:
11289 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11290 #endif
11291 #ifdef TARGET_NR_stat64
11292 case TARGET_NR_stat64:
11293 if (!(p = lock_user_string(arg1))) {
11294 return -TARGET_EFAULT;
11295 }
11296 ret = get_errno(stat(path(p), &st));
11297 unlock_user(p, arg1, 0);
11298 if (!is_error(ret))
11299 ret = host_to_target_stat64(cpu_env, arg2, &st);
11300 return ret;
11301 #endif
11302 #ifdef TARGET_NR_lstat64
11303 case TARGET_NR_lstat64:
11304 if (!(p = lock_user_string(arg1))) {
11305 return -TARGET_EFAULT;
11306 }
11307 ret = get_errno(lstat(path(p), &st));
11308 unlock_user(p, arg1, 0);
11309 if (!is_error(ret))
11310 ret = host_to_target_stat64(cpu_env, arg2, &st);
11311 return ret;
11312 #endif
11313 #ifdef TARGET_NR_fstat64
11314 case TARGET_NR_fstat64:
11315 ret = get_errno(fstat(arg1, &st));
11316 if (!is_error(ret))
11317 ret = host_to_target_stat64(cpu_env, arg2, &st);
11318 return ret;
11319 #endif
11320 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11321 #ifdef TARGET_NR_fstatat64
11322 case TARGET_NR_fstatat64:
11323 #endif
11324 #ifdef TARGET_NR_newfstatat
11325 case TARGET_NR_newfstatat:
11326 #endif
11327 if (!(p = lock_user_string(arg2))) {
11328 return -TARGET_EFAULT;
11329 }
11330 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11331 unlock_user(p, arg2, 0);
11332 if (!is_error(ret))
11333 ret = host_to_target_stat64(cpu_env, arg3, &st);
11334 return ret;
11335 #endif
11336 #if defined(TARGET_NR_statx)
11337 case TARGET_NR_statx:
11338 {
11339 struct target_statx *target_stx;
11340 int dirfd = arg1;
11341 int flags = arg3;
11342
11343 p = lock_user_string(arg2);
11344 if (p == NULL) {
11345 return -TARGET_EFAULT;
11346 }
11347 #if defined(__NR_statx)
11348 {
11349 /*
11350 * It is assumed that struct statx is architecture independent.
11351 */
11352 struct target_statx host_stx;
11353 int mask = arg4;
11354
11355 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11356 if (!is_error(ret)) {
11357 if (host_to_target_statx(&host_stx, arg5) != 0) {
11358 unlock_user(p, arg2, 0);
11359 return -TARGET_EFAULT;
11360 }
11361 }
11362
11363 if (ret != -TARGET_ENOSYS) {
11364 unlock_user(p, arg2, 0);
11365 return ret;
11366 }
11367 }
11368 #endif
11369 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11370 unlock_user(p, arg2, 0);
11371
11372 if (!is_error(ret)) {
11373 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11374 return -TARGET_EFAULT;
11375 }
11376 memset(target_stx, 0, sizeof(*target_stx));
11377 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11378 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11379 __put_user(st.st_ino, &target_stx->stx_ino);
11380 __put_user(st.st_mode, &target_stx->stx_mode);
11381 __put_user(st.st_uid, &target_stx->stx_uid);
11382 __put_user(st.st_gid, &target_stx->stx_gid);
11383 __put_user(st.st_nlink, &target_stx->stx_nlink);
11384 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11385 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11386 __put_user(st.st_size, &target_stx->stx_size);
11387 __put_user(st.st_blksize, &target_stx->stx_blksize);
11388 __put_user(st.st_blocks, &target_stx->stx_blocks);
11389 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11390 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11391 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11392 unlock_user_struct(target_stx, arg5, 1);
11393 }
11394 }
11395 return ret;
11396 #endif
11397 #ifdef TARGET_NR_lchown
11398 case TARGET_NR_lchown:
11399 if (!(p = lock_user_string(arg1)))
11400 return -TARGET_EFAULT;
11401 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11402 unlock_user(p, arg1, 0);
11403 return ret;
11404 #endif
11405 #ifdef TARGET_NR_getuid
11406 case TARGET_NR_getuid:
11407 return get_errno(high2lowuid(getuid()));
11408 #endif
11409 #ifdef TARGET_NR_getgid
11410 case TARGET_NR_getgid:
11411 return get_errno(high2lowgid(getgid()));
11412 #endif
11413 #ifdef TARGET_NR_geteuid
11414 case TARGET_NR_geteuid:
11415 return get_errno(high2lowuid(geteuid()));
11416 #endif
11417 #ifdef TARGET_NR_getegid
11418 case TARGET_NR_getegid:
11419 return get_errno(high2lowgid(getegid()));
11420 #endif
11421 case TARGET_NR_setreuid:
11422 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11423 case TARGET_NR_setregid:
11424 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11425 case TARGET_NR_getgroups:
11426 {
11427 int gidsetsize = arg1;
11428 target_id *target_grouplist;
11429 gid_t *grouplist;
11430 int i;
11431
11432 grouplist = alloca(gidsetsize * sizeof(gid_t));
11433 ret = get_errno(getgroups(gidsetsize, grouplist));
11434 if (gidsetsize == 0)
11435 return ret;
11436 if (!is_error(ret)) {
11437 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11438 if (!target_grouplist)
11439 return -TARGET_EFAULT;
11440 for(i = 0;i < ret; i++)
11441 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11442 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11443 }
11444 }
11445 return ret;
11446 case TARGET_NR_setgroups:
11447 {
11448 int gidsetsize = arg1;
11449 target_id *target_grouplist;
11450 gid_t *grouplist = NULL;
11451 int i;
11452 if (gidsetsize) {
11453 grouplist = alloca(gidsetsize * sizeof(gid_t));
11454 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11455 if (!target_grouplist) {
11456 return -TARGET_EFAULT;
11457 }
11458 for (i = 0; i < gidsetsize; i++) {
11459 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11460 }
11461 unlock_user(target_grouplist, arg2, 0);
11462 }
11463 return get_errno(setgroups(gidsetsize, grouplist));
11464 }
11465 case TARGET_NR_fchown:
11466 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11467 #if defined(TARGET_NR_fchownat)
11468 case TARGET_NR_fchownat:
11469 if (!(p = lock_user_string(arg2)))
11470 return -TARGET_EFAULT;
11471 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11472 low2highgid(arg4), arg5));
11473 unlock_user(p, arg2, 0);
11474 return ret;
11475 #endif
11476 #ifdef TARGET_NR_setresuid
11477 case TARGET_NR_setresuid:
11478 return get_errno(sys_setresuid(low2highuid(arg1),
11479 low2highuid(arg2),
11480 low2highuid(arg3)));
11481 #endif
11482 #ifdef TARGET_NR_getresuid
11483 case TARGET_NR_getresuid:
11484 {
11485 uid_t ruid, euid, suid;
11486 ret = get_errno(getresuid(&ruid, &euid, &suid));
11487 if (!is_error(ret)) {
11488 if (put_user_id(high2lowuid(ruid), arg1)
11489 || put_user_id(high2lowuid(euid), arg2)
11490 || put_user_id(high2lowuid(suid), arg3))
11491 return -TARGET_EFAULT;
11492 }
11493 }
11494 return ret;
11495 #endif
11496 #ifdef TARGET_NR_getresgid
11497 case TARGET_NR_setresgid:
11498 return get_errno(sys_setresgid(low2highgid(arg1),
11499 low2highgid(arg2),
11500 low2highgid(arg3)));
11501 #endif
11502 #ifdef TARGET_NR_getresgid
11503 case TARGET_NR_getresgid:
11504 {
11505 gid_t rgid, egid, sgid;
11506 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11507 if (!is_error(ret)) {
11508 if (put_user_id(high2lowgid(rgid), arg1)
11509 || put_user_id(high2lowgid(egid), arg2)
11510 || put_user_id(high2lowgid(sgid), arg3))
11511 return -TARGET_EFAULT;
11512 }
11513 }
11514 return ret;
11515 #endif
11516 #ifdef TARGET_NR_chown
11517 case TARGET_NR_chown:
11518 if (!(p = lock_user_string(arg1)))
11519 return -TARGET_EFAULT;
11520 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11521 unlock_user(p, arg1, 0);
11522 return ret;
11523 #endif
11524 case TARGET_NR_setuid:
11525 return get_errno(sys_setuid(low2highuid(arg1)));
11526 case TARGET_NR_setgid:
11527 return get_errno(sys_setgid(low2highgid(arg1)));
11528 case TARGET_NR_setfsuid:
11529 return get_errno(setfsuid(arg1));
11530 case TARGET_NR_setfsgid:
11531 return get_errno(setfsgid(arg1));
11532
11533 #ifdef TARGET_NR_lchown32
11534 case TARGET_NR_lchown32:
11535 if (!(p = lock_user_string(arg1)))
11536 return -TARGET_EFAULT;
11537 ret = get_errno(lchown(p, arg2, arg3));
11538 unlock_user(p, arg1, 0);
11539 return ret;
11540 #endif
11541 #ifdef TARGET_NR_getuid32
11542 case TARGET_NR_getuid32:
11543 return get_errno(getuid());
11544 #endif
11545
11546 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11547 /* Alpha specific */
11548 case TARGET_NR_getxuid:
11549 {
11550 uid_t euid;
11551 euid=geteuid();
11552 cpu_env->ir[IR_A4]=euid;
11553 }
11554 return get_errno(getuid());
11555 #endif
11556 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11557 /* Alpha specific */
11558 case TARGET_NR_getxgid:
11559 {
11560 uid_t egid;
11561 egid=getegid();
11562 cpu_env->ir[IR_A4]=egid;
11563 }
11564 return get_errno(getgid());
11565 #endif
11566 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11567 /* Alpha specific */
11568 case TARGET_NR_osf_getsysinfo:
11569 ret = -TARGET_EOPNOTSUPP;
11570 switch (arg1) {
11571 case TARGET_GSI_IEEE_FP_CONTROL:
11572 {
11573 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11574 uint64_t swcr = cpu_env->swcr;
11575
11576 swcr &= ~SWCR_STATUS_MASK;
11577 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11578
11579 if (put_user_u64 (swcr, arg2))
11580 return -TARGET_EFAULT;
11581 ret = 0;
11582 }
11583 break;
11584
11585 /* case GSI_IEEE_STATE_AT_SIGNAL:
11586 -- Not implemented in linux kernel.
11587 case GSI_UACPROC:
11588 -- Retrieves current unaligned access state; not much used.
11589 case GSI_PROC_TYPE:
11590 -- Retrieves implver information; surely not used.
11591 case GSI_GET_HWRPB:
11592 -- Grabs a copy of the HWRPB; surely not used.
11593 */
11594 }
11595 return ret;
11596 #endif
11597 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11598 /* Alpha specific */
11599 case TARGET_NR_osf_setsysinfo:
11600 ret = -TARGET_EOPNOTSUPP;
11601 switch (arg1) {
11602 case TARGET_SSI_IEEE_FP_CONTROL:
11603 {
11604 uint64_t swcr, fpcr;
11605
11606 if (get_user_u64 (swcr, arg2)) {
11607 return -TARGET_EFAULT;
11608 }
11609
11610 /*
11611 * The kernel calls swcr_update_status to update the
11612 * status bits from the fpcr at every point that it
11613 * could be queried. Therefore, we store the status
11614 * bits only in FPCR.
11615 */
11616 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11617
11618 fpcr = cpu_alpha_load_fpcr(cpu_env);
11619 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11620 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11621 cpu_alpha_store_fpcr(cpu_env, fpcr);
11622 ret = 0;
11623 }
11624 break;
11625
11626 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11627 {
11628 uint64_t exc, fpcr, fex;
11629
11630 if (get_user_u64(exc, arg2)) {
11631 return -TARGET_EFAULT;
11632 }
11633 exc &= SWCR_STATUS_MASK;
11634 fpcr = cpu_alpha_load_fpcr(cpu_env);
11635
11636 /* Old exceptions are not signaled. */
11637 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11638 fex = exc & ~fex;
11639 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11640 fex &= (cpu_env)->swcr;
11641
11642 /* Update the hardware fpcr. */
11643 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11644 cpu_alpha_store_fpcr(cpu_env, fpcr);
11645
11646 if (fex) {
11647 int si_code = TARGET_FPE_FLTUNK;
11648 target_siginfo_t info;
11649
11650 if (fex & SWCR_TRAP_ENABLE_DNO) {
11651 si_code = TARGET_FPE_FLTUND;
11652 }
11653 if (fex & SWCR_TRAP_ENABLE_INE) {
11654 si_code = TARGET_FPE_FLTRES;
11655 }
11656 if (fex & SWCR_TRAP_ENABLE_UNF) {
11657 si_code = TARGET_FPE_FLTUND;
11658 }
11659 if (fex & SWCR_TRAP_ENABLE_OVF) {
11660 si_code = TARGET_FPE_FLTOVF;
11661 }
11662 if (fex & SWCR_TRAP_ENABLE_DZE) {
11663 si_code = TARGET_FPE_FLTDIV;
11664 }
11665 if (fex & SWCR_TRAP_ENABLE_INV) {
11666 si_code = TARGET_FPE_FLTINV;
11667 }
11668
11669 info.si_signo = SIGFPE;
11670 info.si_errno = 0;
11671 info.si_code = si_code;
11672 info._sifields._sigfault._addr = (cpu_env)->pc;
11673 queue_signal(cpu_env, info.si_signo,
11674 QEMU_SI_FAULT, &info);
11675 }
11676 ret = 0;
11677 }
11678 break;
11679
11680 /* case SSI_NVPAIRS:
11681 -- Used with SSIN_UACPROC to enable unaligned accesses.
11682 case SSI_IEEE_STATE_AT_SIGNAL:
11683 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11684 -- Not implemented in linux kernel
11685 */
11686 }
11687 return ret;
11688 #endif
11689 #ifdef TARGET_NR_osf_sigprocmask
11690 /* Alpha specific. */
11691 case TARGET_NR_osf_sigprocmask:
11692 {
11693 abi_ulong mask;
11694 int how;
11695 sigset_t set, oldset;
11696
11697 switch(arg1) {
11698 case TARGET_SIG_BLOCK:
11699 how = SIG_BLOCK;
11700 break;
11701 case TARGET_SIG_UNBLOCK:
11702 how = SIG_UNBLOCK;
11703 break;
11704 case TARGET_SIG_SETMASK:
11705 how = SIG_SETMASK;
11706 break;
11707 default:
11708 return -TARGET_EINVAL;
11709 }
11710 mask = arg2;
11711 target_to_host_old_sigset(&set, &mask);
11712 ret = do_sigprocmask(how, &set, &oldset);
11713 if (!ret) {
11714 host_to_target_old_sigset(&mask, &oldset);
11715 ret = mask;
11716 }
11717 }
11718 return ret;
11719 #endif
11720
11721 #ifdef TARGET_NR_getgid32
11722 case TARGET_NR_getgid32:
11723 return get_errno(getgid());
11724 #endif
11725 #ifdef TARGET_NR_geteuid32
11726 case TARGET_NR_geteuid32:
11727 return get_errno(geteuid());
11728 #endif
11729 #ifdef TARGET_NR_getegid32
11730 case TARGET_NR_getegid32:
11731 return get_errno(getegid());
11732 #endif
11733 #ifdef TARGET_NR_setreuid32
11734 case TARGET_NR_setreuid32:
11735 return get_errno(setreuid(arg1, arg2));
11736 #endif
11737 #ifdef TARGET_NR_setregid32
11738 case TARGET_NR_setregid32:
11739 return get_errno(setregid(arg1, arg2));
11740 #endif
11741 #ifdef TARGET_NR_getgroups32
11742 case TARGET_NR_getgroups32:
11743 {
11744 int gidsetsize = arg1;
11745 uint32_t *target_grouplist;
11746 gid_t *grouplist;
11747 int i;
11748
11749 grouplist = alloca(gidsetsize * sizeof(gid_t));
11750 ret = get_errno(getgroups(gidsetsize, grouplist));
11751 if (gidsetsize == 0)
11752 return ret;
11753 if (!is_error(ret)) {
11754 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11755 if (!target_grouplist) {
11756 return -TARGET_EFAULT;
11757 }
11758 for(i = 0;i < ret; i++)
11759 target_grouplist[i] = tswap32(grouplist[i]);
11760 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11761 }
11762 }
11763 return ret;
11764 #endif
11765 #ifdef TARGET_NR_setgroups32
11766 case TARGET_NR_setgroups32:
11767 {
11768 int gidsetsize = arg1;
11769 uint32_t *target_grouplist;
11770 gid_t *grouplist;
11771 int i;
11772
11773 grouplist = alloca(gidsetsize * sizeof(gid_t));
11774 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11775 if (!target_grouplist) {
11776 return -TARGET_EFAULT;
11777 }
11778 for(i = 0;i < gidsetsize; i++)
11779 grouplist[i] = tswap32(target_grouplist[i]);
11780 unlock_user(target_grouplist, arg2, 0);
11781 return get_errno(setgroups(gidsetsize, grouplist));
11782 }
11783 #endif
11784 #ifdef TARGET_NR_fchown32
11785 case TARGET_NR_fchown32:
11786 return get_errno(fchown(arg1, arg2, arg3));
11787 #endif
11788 #ifdef TARGET_NR_setresuid32
11789 case TARGET_NR_setresuid32:
11790 return get_errno(sys_setresuid(arg1, arg2, arg3));
11791 #endif
11792 #ifdef TARGET_NR_getresuid32
11793 case TARGET_NR_getresuid32:
11794 {
11795 uid_t ruid, euid, suid;
11796 ret = get_errno(getresuid(&ruid, &euid, &suid));
11797 if (!is_error(ret)) {
11798 if (put_user_u32(ruid, arg1)
11799 || put_user_u32(euid, arg2)
11800 || put_user_u32(suid, arg3))
11801 return -TARGET_EFAULT;
11802 }
11803 }
11804 return ret;
11805 #endif
11806 #ifdef TARGET_NR_setresgid32
11807 case TARGET_NR_setresgid32:
11808 return get_errno(sys_setresgid(arg1, arg2, arg3));
11809 #endif
11810 #ifdef TARGET_NR_getresgid32
11811 case TARGET_NR_getresgid32:
11812 {
11813 gid_t rgid, egid, sgid;
11814 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11815 if (!is_error(ret)) {
11816 if (put_user_u32(rgid, arg1)
11817 || put_user_u32(egid, arg2)
11818 || put_user_u32(sgid, arg3))
11819 return -TARGET_EFAULT;
11820 }
11821 }
11822 return ret;
11823 #endif
11824 #ifdef TARGET_NR_chown32
11825 case TARGET_NR_chown32:
11826 if (!(p = lock_user_string(arg1)))
11827 return -TARGET_EFAULT;
11828 ret = get_errno(chown(p, arg2, arg3));
11829 unlock_user(p, arg1, 0);
11830 return ret;
11831 #endif
11832 #ifdef TARGET_NR_setuid32
11833 case TARGET_NR_setuid32:
11834 return get_errno(sys_setuid(arg1));
11835 #endif
11836 #ifdef TARGET_NR_setgid32
11837 case TARGET_NR_setgid32:
11838 return get_errno(sys_setgid(arg1));
11839 #endif
11840 #ifdef TARGET_NR_setfsuid32
11841 case TARGET_NR_setfsuid32:
11842 return get_errno(setfsuid(arg1));
11843 #endif
11844 #ifdef TARGET_NR_setfsgid32
11845 case TARGET_NR_setfsgid32:
11846 return get_errno(setfsgid(arg1));
11847 #endif
11848 #ifdef TARGET_NR_mincore
11849 case TARGET_NR_mincore:
11850 {
11851 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11852 if (!a) {
11853 return -TARGET_ENOMEM;
11854 }
11855 p = lock_user_string(arg3);
11856 if (!p) {
11857 ret = -TARGET_EFAULT;
11858 } else {
11859 ret = get_errno(mincore(a, arg2, p));
11860 unlock_user(p, arg3, ret);
11861 }
11862 unlock_user(a, arg1, 0);
11863 }
11864 return ret;
11865 #endif
11866 #ifdef TARGET_NR_arm_fadvise64_64
11867 case TARGET_NR_arm_fadvise64_64:
11868 /* arm_fadvise64_64 looks like fadvise64_64 but
11869 * with different argument order: fd, advice, offset, len
11870 * rather than the usual fd, offset, len, advice.
11871 * Note that offset and len are both 64-bit so appear as
11872 * pairs of 32-bit registers.
11873 */
11874 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11875 target_offset64(arg5, arg6), arg2);
11876 return -host_to_target_errno(ret);
11877 #endif
11878
11879 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11880
11881 #ifdef TARGET_NR_fadvise64_64
11882 case TARGET_NR_fadvise64_64:
11883 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11884 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11885 ret = arg2;
11886 arg2 = arg3;
11887 arg3 = arg4;
11888 arg4 = arg5;
11889 arg5 = arg6;
11890 arg6 = ret;
11891 #else
11892 /* 6 args: fd, offset (high, low), len (high, low), advice */
11893 if (regpairs_aligned(cpu_env, num)) {
11894 /* offset is in (3,4), len in (5,6) and advice in 7 */
11895 arg2 = arg3;
11896 arg3 = arg4;
11897 arg4 = arg5;
11898 arg5 = arg6;
11899 arg6 = arg7;
11900 }
11901 #endif
11902 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11903 target_offset64(arg4, arg5), arg6);
11904 return -host_to_target_errno(ret);
11905 #endif
11906
11907 #ifdef TARGET_NR_fadvise64
11908 case TARGET_NR_fadvise64:
11909 /* 5 args: fd, offset (high, low), len, advice */
11910 if (regpairs_aligned(cpu_env, num)) {
11911 /* offset is in (3,4), len in 5 and advice in 6 */
11912 arg2 = arg3;
11913 arg3 = arg4;
11914 arg4 = arg5;
11915 arg5 = arg6;
11916 }
11917 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11918 return -host_to_target_errno(ret);
11919 #endif
11920
11921 #else /* not a 32-bit ABI */
11922 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11923 #ifdef TARGET_NR_fadvise64_64
11924 case TARGET_NR_fadvise64_64:
11925 #endif
11926 #ifdef TARGET_NR_fadvise64
11927 case TARGET_NR_fadvise64:
11928 #endif
11929 #ifdef TARGET_S390X
11930 switch (arg4) {
11931 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11932 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11933 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11934 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11935 default: break;
11936 }
11937 #endif
11938 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11939 #endif
11940 #endif /* end of 64-bit ABI fadvise handling */
11941
11942 #ifdef TARGET_NR_madvise
11943 case TARGET_NR_madvise:
11944 return target_madvise(arg1, arg2, arg3);
11945 #endif
11946 #ifdef TARGET_NR_fcntl64
11947 case TARGET_NR_fcntl64:
11948 {
11949 int cmd;
11950 struct flock64 fl;
11951 from_flock64_fn *copyfrom = copy_from_user_flock64;
11952 to_flock64_fn *copyto = copy_to_user_flock64;
11953
11954 #ifdef TARGET_ARM
11955 if (!cpu_env->eabi) {
11956 copyfrom = copy_from_user_oabi_flock64;
11957 copyto = copy_to_user_oabi_flock64;
11958 }
11959 #endif
11960
11961 cmd = target_to_host_fcntl_cmd(arg2);
11962 if (cmd == -TARGET_EINVAL) {
11963 return cmd;
11964 }
11965
11966 switch(arg2) {
11967 case TARGET_F_GETLK64:
11968 ret = copyfrom(&fl, arg3);
11969 if (ret) {
11970 break;
11971 }
11972 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11973 if (ret == 0) {
11974 ret = copyto(arg3, &fl);
11975 }
11976 break;
11977
11978 case TARGET_F_SETLK64:
11979 case TARGET_F_SETLKW64:
11980 ret = copyfrom(&fl, arg3);
11981 if (ret) {
11982 break;
11983 }
11984 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11985 break;
11986 default:
11987 ret = do_fcntl(arg1, arg2, arg3);
11988 break;
11989 }
11990 return ret;
11991 }
11992 #endif
11993 #ifdef TARGET_NR_cacheflush
11994 case TARGET_NR_cacheflush:
11995 /* self-modifying code is handled automatically, so nothing needed */
11996 return 0;
11997 #endif
11998 #ifdef TARGET_NR_getpagesize
11999 case TARGET_NR_getpagesize:
12000 return TARGET_PAGE_SIZE;
12001 #endif
12002 case TARGET_NR_gettid:
12003 return get_errno(sys_gettid());
12004 #ifdef TARGET_NR_readahead
12005 case TARGET_NR_readahead:
12006 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12007 if (regpairs_aligned(cpu_env, num)) {
12008 arg2 = arg3;
12009 arg3 = arg4;
12010 arg4 = arg5;
12011 }
12012 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12013 #else
12014 ret = get_errno(readahead(arg1, arg2, arg3));
12015 #endif
12016 return ret;
12017 #endif
12018 #ifdef CONFIG_ATTR
12019 #ifdef TARGET_NR_setxattr
12020 case TARGET_NR_listxattr:
12021 case TARGET_NR_llistxattr:
12022 {
12023 void *p, *b = 0;
12024 if (arg2) {
12025 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12026 if (!b) {
12027 return -TARGET_EFAULT;
12028 }
12029 }
12030 p = lock_user_string(arg1);
12031 if (p) {
12032 if (num == TARGET_NR_listxattr) {
12033 ret = get_errno(listxattr(p, b, arg3));
12034 } else {
12035 ret = get_errno(llistxattr(p, b, arg3));
12036 }
12037 } else {
12038 ret = -TARGET_EFAULT;
12039 }
12040 unlock_user(p, arg1, 0);
12041 unlock_user(b, arg2, arg3);
12042 return ret;
12043 }
12044 case TARGET_NR_flistxattr:
12045 {
12046 void *b = 0;
12047 if (arg2) {
12048 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12049 if (!b) {
12050 return -TARGET_EFAULT;
12051 }
12052 }
12053 ret = get_errno(flistxattr(arg1, b, arg3));
12054 unlock_user(b, arg2, arg3);
12055 return ret;
12056 }
12057 case TARGET_NR_setxattr:
12058 case TARGET_NR_lsetxattr:
12059 {
12060 void *p, *n, *v = 0;
12061 if (arg3) {
12062 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12063 if (!v) {
12064 return -TARGET_EFAULT;
12065 }
12066 }
12067 p = lock_user_string(arg1);
12068 n = lock_user_string(arg2);
12069 if (p && n) {
12070 if (num == TARGET_NR_setxattr) {
12071 ret = get_errno(setxattr(p, n, v, arg4, arg5));
12072 } else {
12073 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12074 }
12075 } else {
12076 ret = -TARGET_EFAULT;
12077 }
12078 unlock_user(p, arg1, 0);
12079 unlock_user(n, arg2, 0);
12080 unlock_user(v, arg3, 0);
12081 }
12082 return ret;
12083 case TARGET_NR_fsetxattr:
12084 {
12085 void *n, *v = 0;
12086 if (arg3) {
12087 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12088 if (!v) {
12089 return -TARGET_EFAULT;
12090 }
12091 }
12092 n = lock_user_string(arg2);
12093 if (n) {
12094 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12095 } else {
12096 ret = -TARGET_EFAULT;
12097 }
12098 unlock_user(n, arg2, 0);
12099 unlock_user(v, arg3, 0);
12100 }
12101 return ret;
12102 case TARGET_NR_getxattr:
12103 case TARGET_NR_lgetxattr:
12104 {
12105 void *p, *n, *v = 0;
12106 if (arg3) {
12107 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12108 if (!v) {
12109 return -TARGET_EFAULT;
12110 }
12111 }
12112 p = lock_user_string(arg1);
12113 n = lock_user_string(arg2);
12114 if (p && n) {
12115 if (num == TARGET_NR_getxattr) {
12116 ret = get_errno(getxattr(p, n, v, arg4));
12117 } else {
12118 ret = get_errno(lgetxattr(p, n, v, arg4));
12119 }
12120 } else {
12121 ret = -TARGET_EFAULT;
12122 }
12123 unlock_user(p, arg1, 0);
12124 unlock_user(n, arg2, 0);
12125 unlock_user(v, arg3, arg4);
12126 }
12127 return ret;
12128 case TARGET_NR_fgetxattr:
12129 {
12130 void *n, *v = 0;
12131 if (arg3) {
12132 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12133 if (!v) {
12134 return -TARGET_EFAULT;
12135 }
12136 }
12137 n = lock_user_string(arg2);
12138 if (n) {
12139 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12140 } else {
12141 ret = -TARGET_EFAULT;
12142 }
12143 unlock_user(n, arg2, 0);
12144 unlock_user(v, arg3, arg4);
12145 }
12146 return ret;
12147 case TARGET_NR_removexattr:
12148 case TARGET_NR_lremovexattr:
12149 {
12150 void *p, *n;
12151 p = lock_user_string(arg1);
12152 n = lock_user_string(arg2);
12153 if (p && n) {
12154 if (num == TARGET_NR_removexattr) {
12155 ret = get_errno(removexattr(p, n));
12156 } else {
12157 ret = get_errno(lremovexattr(p, n));
12158 }
12159 } else {
12160 ret = -TARGET_EFAULT;
12161 }
12162 unlock_user(p, arg1, 0);
12163 unlock_user(n, arg2, 0);
12164 }
12165 return ret;
12166 case TARGET_NR_fremovexattr:
12167 {
12168 void *n;
12169 n = lock_user_string(arg2);
12170 if (n) {
12171 ret = get_errno(fremovexattr(arg1, n));
12172 } else {
12173 ret = -TARGET_EFAULT;
12174 }
12175 unlock_user(n, arg2, 0);
12176 }
12177 return ret;
12178 #endif
12179 #endif /* CONFIG_ATTR */
12180 #ifdef TARGET_NR_set_thread_area
12181 case TARGET_NR_set_thread_area:
12182 #if defined(TARGET_MIPS)
12183 cpu_env->active_tc.CP0_UserLocal = arg1;
12184 return 0;
12185 #elif defined(TARGET_CRIS)
12186 if (arg1 & 0xff)
12187 ret = -TARGET_EINVAL;
12188 else {
12189 cpu_env->pregs[PR_PID] = arg1;
12190 ret = 0;
12191 }
12192 return ret;
12193 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12194 return do_set_thread_area(cpu_env, arg1);
12195 #elif defined(TARGET_M68K)
12196 {
12197 TaskState *ts = cpu->opaque;
12198 ts->tp_value = arg1;
12199 return 0;
12200 }
12201 #else
12202 return -TARGET_ENOSYS;
12203 #endif
12204 #endif
12205 #ifdef TARGET_NR_get_thread_area
12206 case TARGET_NR_get_thread_area:
12207 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12208 return do_get_thread_area(cpu_env, arg1);
12209 #elif defined(TARGET_M68K)
12210 {
12211 TaskState *ts = cpu->opaque;
12212 return ts->tp_value;
12213 }
12214 #else
12215 return -TARGET_ENOSYS;
12216 #endif
12217 #endif
12218 #ifdef TARGET_NR_getdomainname
12219 case TARGET_NR_getdomainname:
12220 return -TARGET_ENOSYS;
12221 #endif
12222
12223 #ifdef TARGET_NR_clock_settime
12224 case TARGET_NR_clock_settime:
12225 {
12226 struct timespec ts;
12227
12228 ret = target_to_host_timespec(&ts, arg2);
12229 if (!is_error(ret)) {
12230 ret = get_errno(clock_settime(arg1, &ts));
12231 }
12232 return ret;
12233 }
12234 #endif
12235 #ifdef TARGET_NR_clock_settime64
12236 case TARGET_NR_clock_settime64:
12237 {
12238 struct timespec ts;
12239
12240 ret = target_to_host_timespec64(&ts, arg2);
12241 if (!is_error(ret)) {
12242 ret = get_errno(clock_settime(arg1, &ts));
12243 }
12244 return ret;
12245 }
12246 #endif
12247 #ifdef TARGET_NR_clock_gettime
12248 case TARGET_NR_clock_gettime:
12249 {
12250 struct timespec ts;
12251 ret = get_errno(clock_gettime(arg1, &ts));
12252 if (!is_error(ret)) {
12253 ret = host_to_target_timespec(arg2, &ts);
12254 }
12255 return ret;
12256 }
12257 #endif
12258 #ifdef TARGET_NR_clock_gettime64
12259 case TARGET_NR_clock_gettime64:
12260 {
12261 struct timespec ts;
12262 ret = get_errno(clock_gettime(arg1, &ts));
12263 if (!is_error(ret)) {
12264 ret = host_to_target_timespec64(arg2, &ts);
12265 }
12266 return ret;
12267 }
12268 #endif
12269 #ifdef TARGET_NR_clock_getres
12270 case TARGET_NR_clock_getres:
12271 {
12272 struct timespec ts;
12273 ret = get_errno(clock_getres(arg1, &ts));
12274 if (!is_error(ret)) {
12275 host_to_target_timespec(arg2, &ts);
12276 }
12277 return ret;
12278 }
12279 #endif
12280 #ifdef TARGET_NR_clock_getres_time64
12281 case TARGET_NR_clock_getres_time64:
12282 {
12283 struct timespec ts;
12284 ret = get_errno(clock_getres(arg1, &ts));
12285 if (!is_error(ret)) {
12286 host_to_target_timespec64(arg2, &ts);
12287 }
12288 return ret;
12289 }
12290 #endif
12291 #ifdef TARGET_NR_clock_nanosleep
12292 case TARGET_NR_clock_nanosleep:
12293 {
12294 struct timespec ts;
12295 if (target_to_host_timespec(&ts, arg3)) {
12296 return -TARGET_EFAULT;
12297 }
12298 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12299 &ts, arg4 ? &ts : NULL));
12300 /*
12301 * if the call is interrupted by a signal handler, it fails
12302 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12303 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12304 */
12305 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12306 host_to_target_timespec(arg4, &ts)) {
12307 return -TARGET_EFAULT;
12308 }
12309
12310 return ret;
12311 }
12312 #endif
12313 #ifdef TARGET_NR_clock_nanosleep_time64
12314 case TARGET_NR_clock_nanosleep_time64:
12315 {
12316 struct timespec ts;
12317
12318 if (target_to_host_timespec64(&ts, arg3)) {
12319 return -TARGET_EFAULT;
12320 }
12321
12322 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12323 &ts, arg4 ? &ts : NULL));
12324
12325 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12326 host_to_target_timespec64(arg4, &ts)) {
12327 return -TARGET_EFAULT;
12328 }
12329 return ret;
12330 }
12331 #endif
12332
12333 #if defined(TARGET_NR_set_tid_address)
12334 case TARGET_NR_set_tid_address:
12335 {
12336 TaskState *ts = cpu->opaque;
12337 ts->child_tidptr = arg1;
12338 /* do not call host set_tid_address() syscall, instead return tid() */
12339 return get_errno(sys_gettid());
12340 }
12341 #endif
12342
12343 case TARGET_NR_tkill:
12344 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12345
12346 case TARGET_NR_tgkill:
12347 return get_errno(safe_tgkill((int)arg1, (int)arg2,
12348 target_to_host_signal(arg3)));
12349
12350 #ifdef TARGET_NR_set_robust_list
12351 case TARGET_NR_set_robust_list:
12352 case TARGET_NR_get_robust_list:
12353 /* The ABI for supporting robust futexes has userspace pass
12354 * the kernel a pointer to a linked list which is updated by
12355 * userspace after the syscall; the list is walked by the kernel
12356 * when the thread exits. Since the linked list in QEMU guest
12357 * memory isn't a valid linked list for the host and we have
12358 * no way to reliably intercept the thread-death event, we can't
12359 * support these. Silently return ENOSYS so that guest userspace
12360 * falls back to a non-robust futex implementation (which should
12361 * be OK except in the corner case of the guest crashing while
12362 * holding a mutex that is shared with another process via
12363 * shared memory).
12364 */
12365 return -TARGET_ENOSYS;
12366 #endif
12367
12368 #if defined(TARGET_NR_utimensat)
12369 case TARGET_NR_utimensat:
12370 {
12371 struct timespec *tsp, ts[2];
12372 if (!arg3) {
12373 tsp = NULL;
12374 } else {
12375 if (target_to_host_timespec(ts, arg3)) {
12376 return -TARGET_EFAULT;
12377 }
12378 if (target_to_host_timespec(ts + 1, arg3 +
12379 sizeof(struct target_timespec))) {
12380 return -TARGET_EFAULT;
12381 }
12382 tsp = ts;
12383 }
12384 if (!arg2)
12385 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12386 else {
12387 if (!(p = lock_user_string(arg2))) {
12388 return -TARGET_EFAULT;
12389 }
12390 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12391 unlock_user(p, arg2, 0);
12392 }
12393 }
12394 return ret;
12395 #endif
12396 #ifdef TARGET_NR_utimensat_time64
12397 case TARGET_NR_utimensat_time64:
12398 {
12399 struct timespec *tsp, ts[2];
12400 if (!arg3) {
12401 tsp = NULL;
12402 } else {
12403 if (target_to_host_timespec64(ts, arg3)) {
12404 return -TARGET_EFAULT;
12405 }
12406 if (target_to_host_timespec64(ts + 1, arg3 +
12407 sizeof(struct target__kernel_timespec))) {
12408 return -TARGET_EFAULT;
12409 }
12410 tsp = ts;
12411 }
12412 if (!arg2)
12413 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12414 else {
12415 p = lock_user_string(arg2);
12416 if (!p) {
12417 return -TARGET_EFAULT;
12418 }
12419 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12420 unlock_user(p, arg2, 0);
12421 }
12422 }
12423 return ret;
12424 #endif
12425 #ifdef TARGET_NR_futex
12426 case TARGET_NR_futex:
12427 return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12428 #endif
12429 #ifdef TARGET_NR_futex_time64
12430 case TARGET_NR_futex_time64:
12431 return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12432 #endif
12433 #ifdef CONFIG_INOTIFY
12434 #if defined(TARGET_NR_inotify_init)
12435 case TARGET_NR_inotify_init:
12436 ret = get_errno(inotify_init());
12437 if (ret >= 0) {
12438 fd_trans_register(ret, &target_inotify_trans);
12439 }
12440 return ret;
12441 #endif
12442 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12443 case TARGET_NR_inotify_init1:
12444 ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12445 fcntl_flags_tbl)));
12446 if (ret >= 0) {
12447 fd_trans_register(ret, &target_inotify_trans);
12448 }
12449 return ret;
12450 #endif
12451 #if defined(TARGET_NR_inotify_add_watch)
12452 case TARGET_NR_inotify_add_watch:
12453 p = lock_user_string(arg2);
12454 ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12455 unlock_user(p, arg2, 0);
12456 return ret;
12457 #endif
12458 #if defined(TARGET_NR_inotify_rm_watch)
12459 case TARGET_NR_inotify_rm_watch:
12460 return get_errno(inotify_rm_watch(arg1, arg2));
12461 #endif
12462 #endif
12463
12464 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12465 case TARGET_NR_mq_open:
12466 {
12467 struct mq_attr posix_mq_attr;
12468 struct mq_attr *pposix_mq_attr;
12469 int host_flags;
12470
12471 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12472 pposix_mq_attr = NULL;
12473 if (arg4) {
12474 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12475 return -TARGET_EFAULT;
12476 }
12477 pposix_mq_attr = &posix_mq_attr;
12478 }
12479 p = lock_user_string(arg1 - 1);
12480 if (!p) {
12481 return -TARGET_EFAULT;
12482 }
12483 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12484 unlock_user (p, arg1, 0);
12485 }
12486 return ret;
12487
12488 case TARGET_NR_mq_unlink:
12489 p = lock_user_string(arg1 - 1);
12490 if (!p) {
12491 return -TARGET_EFAULT;
12492 }
12493 ret = get_errno(mq_unlink(p));
12494 unlock_user (p, arg1, 0);
12495 return ret;
12496
12497 #ifdef TARGET_NR_mq_timedsend
12498 case TARGET_NR_mq_timedsend:
12499 {
12500 struct timespec ts;
12501
12502 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12503 if (arg5 != 0) {
12504 if (target_to_host_timespec(&ts, arg5)) {
12505 return -TARGET_EFAULT;
12506 }
12507 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12508 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12509 return -TARGET_EFAULT;
12510 }
12511 } else {
12512 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12513 }
12514 unlock_user (p, arg2, arg3);
12515 }
12516 return ret;
12517 #endif
12518 #ifdef TARGET_NR_mq_timedsend_time64
12519 case TARGET_NR_mq_timedsend_time64:
12520 {
12521 struct timespec ts;
12522
12523 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12524 if (arg5 != 0) {
12525 if (target_to_host_timespec64(&ts, arg5)) {
12526 return -TARGET_EFAULT;
12527 }
12528 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12529 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12530 return -TARGET_EFAULT;
12531 }
12532 } else {
12533 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12534 }
12535 unlock_user(p, arg2, arg3);
12536 }
12537 return ret;
12538 #endif
12539
12540 #ifdef TARGET_NR_mq_timedreceive
12541 case TARGET_NR_mq_timedreceive:
12542 {
12543 struct timespec ts;
12544 unsigned int prio;
12545
12546 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12547 if (arg5 != 0) {
12548 if (target_to_host_timespec(&ts, arg5)) {
12549 return -TARGET_EFAULT;
12550 }
12551 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12552 &prio, &ts));
12553 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12554 return -TARGET_EFAULT;
12555 }
12556 } else {
12557 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12558 &prio, NULL));
12559 }
12560 unlock_user (p, arg2, arg3);
12561 if (arg4 != 0)
12562 put_user_u32(prio, arg4);
12563 }
12564 return ret;
12565 #endif
12566 #ifdef TARGET_NR_mq_timedreceive_time64
12567 case TARGET_NR_mq_timedreceive_time64:
12568 {
12569 struct timespec ts;
12570 unsigned int prio;
12571
12572 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12573 if (arg5 != 0) {
12574 if (target_to_host_timespec64(&ts, arg5)) {
12575 return -TARGET_EFAULT;
12576 }
12577 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12578 &prio, &ts));
12579 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12580 return -TARGET_EFAULT;
12581 }
12582 } else {
12583 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12584 &prio, NULL));
12585 }
12586 unlock_user(p, arg2, arg3);
12587 if (arg4 != 0) {
12588 put_user_u32(prio, arg4);
12589 }
12590 }
12591 return ret;
12592 #endif
12593
12594 /* Not implemented for now... */
12595 /* case TARGET_NR_mq_notify: */
12596 /* break; */
12597
12598 case TARGET_NR_mq_getsetattr:
12599 {
12600 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12601 ret = 0;
12602 if (arg2 != 0) {
12603 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12604 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12605 &posix_mq_attr_out));
12606 } else if (arg3 != 0) {
12607 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12608 }
12609 if (ret == 0 && arg3 != 0) {
12610 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12611 }
12612 }
12613 return ret;
12614 #endif
12615
12616 #ifdef CONFIG_SPLICE
12617 #ifdef TARGET_NR_tee
12618 case TARGET_NR_tee:
12619 {
12620 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12621 }
12622 return ret;
12623 #endif
12624 #ifdef TARGET_NR_splice
12625 case TARGET_NR_splice:
12626 {
12627 loff_t loff_in, loff_out;
12628 loff_t *ploff_in = NULL, *ploff_out = NULL;
12629 if (arg2) {
12630 if (get_user_u64(loff_in, arg2)) {
12631 return -TARGET_EFAULT;
12632 }
12633 ploff_in = &loff_in;
12634 }
12635 if (arg4) {
12636 if (get_user_u64(loff_out, arg4)) {
12637 return -TARGET_EFAULT;
12638 }
12639 ploff_out = &loff_out;
12640 }
12641 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12642 if (arg2) {
12643 if (put_user_u64(loff_in, arg2)) {
12644 return -TARGET_EFAULT;
12645 }
12646 }
12647 if (arg4) {
12648 if (put_user_u64(loff_out, arg4)) {
12649 return -TARGET_EFAULT;
12650 }
12651 }
12652 }
12653 return ret;
12654 #endif
12655 #ifdef TARGET_NR_vmsplice
12656 case TARGET_NR_vmsplice:
12657 {
12658 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12659 if (vec != NULL) {
12660 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12661 unlock_iovec(vec, arg2, arg3, 0);
12662 } else {
12663 ret = -host_to_target_errno(errno);
12664 }
12665 }
12666 return ret;
12667 #endif
12668 #endif /* CONFIG_SPLICE */
12669 #ifdef CONFIG_EVENTFD
12670 #if defined(TARGET_NR_eventfd)
12671 case TARGET_NR_eventfd:
12672 ret = get_errno(eventfd(arg1, 0));
12673 if (ret >= 0) {
12674 fd_trans_register(ret, &target_eventfd_trans);
12675 }
12676 return ret;
12677 #endif
12678 #if defined(TARGET_NR_eventfd2)
12679 case TARGET_NR_eventfd2:
12680 {
12681 int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12682 if (arg2 & TARGET_O_NONBLOCK) {
12683 host_flags |= O_NONBLOCK;
12684 }
12685 if (arg2 & TARGET_O_CLOEXEC) {
12686 host_flags |= O_CLOEXEC;
12687 }
12688 ret = get_errno(eventfd(arg1, host_flags));
12689 if (ret >= 0) {
12690 fd_trans_register(ret, &target_eventfd_trans);
12691 }
12692 return ret;
12693 }
12694 #endif
12695 #endif /* CONFIG_EVENTFD */
12696 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12697 case TARGET_NR_fallocate:
12698 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12699 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12700 target_offset64(arg5, arg6)));
12701 #else
12702 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12703 #endif
12704 return ret;
12705 #endif
12706 #if defined(CONFIG_SYNC_FILE_RANGE)
12707 #if defined(TARGET_NR_sync_file_range)
12708 case TARGET_NR_sync_file_range:
12709 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12710 #if defined(TARGET_MIPS)
12711 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12712 target_offset64(arg5, arg6), arg7));
12713 #else
12714 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12715 target_offset64(arg4, arg5), arg6));
12716 #endif /* !TARGET_MIPS */
12717 #else
12718 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12719 #endif
12720 return ret;
12721 #endif
12722 #if defined(TARGET_NR_sync_file_range2) || \
12723 defined(TARGET_NR_arm_sync_file_range)
12724 #if defined(TARGET_NR_sync_file_range2)
12725 case TARGET_NR_sync_file_range2:
12726 #endif
12727 #if defined(TARGET_NR_arm_sync_file_range)
12728 case TARGET_NR_arm_sync_file_range:
12729 #endif
12730 /* This is like sync_file_range but the arguments are reordered */
12731 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12732 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12733 target_offset64(arg5, arg6), arg2));
12734 #else
12735 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12736 #endif
12737 return ret;
12738 #endif
12739 #endif
12740 #if defined(TARGET_NR_signalfd4)
12741 case TARGET_NR_signalfd4:
12742 return do_signalfd4(arg1, arg2, arg4);
12743 #endif
12744 #if defined(TARGET_NR_signalfd)
12745 case TARGET_NR_signalfd:
12746 return do_signalfd4(arg1, arg2, 0);
12747 #endif
12748 #if defined(CONFIG_EPOLL)
12749 #if defined(TARGET_NR_epoll_create)
12750 case TARGET_NR_epoll_create:
12751 return get_errno(epoll_create(arg1));
12752 #endif
12753 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12754 case TARGET_NR_epoll_create1:
12755 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12756 #endif
12757 #if defined(TARGET_NR_epoll_ctl)
12758 case TARGET_NR_epoll_ctl:
12759 {
12760 struct epoll_event ep;
12761 struct epoll_event *epp = 0;
12762 if (arg4) {
12763 if (arg2 != EPOLL_CTL_DEL) {
12764 struct target_epoll_event *target_ep;
12765 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12766 return -TARGET_EFAULT;
12767 }
12768 ep.events = tswap32(target_ep->events);
12769 /*
12770 * The epoll_data_t union is just opaque data to the kernel,
12771 * so we transfer all 64 bits across and need not worry what
12772 * actual data type it is.
12773 */
12774 ep.data.u64 = tswap64(target_ep->data.u64);
12775 unlock_user_struct(target_ep, arg4, 0);
12776 }
12777 /*
12778 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12779 * non-null pointer, even though this argument is ignored.
12780 *
12781 */
12782 epp = &ep;
12783 }
12784 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12785 }
12786 #endif
12787
12788 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12789 #if defined(TARGET_NR_epoll_wait)
12790 case TARGET_NR_epoll_wait:
12791 #endif
12792 #if defined(TARGET_NR_epoll_pwait)
12793 case TARGET_NR_epoll_pwait:
12794 #endif
12795 {
12796 struct target_epoll_event *target_ep;
12797 struct epoll_event *ep;
12798 int epfd = arg1;
12799 int maxevents = arg3;
12800 int timeout = arg4;
12801
12802 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12803 return -TARGET_EINVAL;
12804 }
12805
12806 target_ep = lock_user(VERIFY_WRITE, arg2,
12807 maxevents * sizeof(struct target_epoll_event), 1);
12808 if (!target_ep) {
12809 return -TARGET_EFAULT;
12810 }
12811
12812 ep = g_try_new(struct epoll_event, maxevents);
12813 if (!ep) {
12814 unlock_user(target_ep, arg2, 0);
12815 return -TARGET_ENOMEM;
12816 }
12817
12818 switch (num) {
12819 #if defined(TARGET_NR_epoll_pwait)
12820 case TARGET_NR_epoll_pwait:
12821 {
12822 sigset_t *set = NULL;
12823
12824 if (arg5) {
12825 ret = process_sigsuspend_mask(&set, arg5, arg6);
12826 if (ret != 0) {
12827 break;
12828 }
12829 }
12830
12831 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12832 set, SIGSET_T_SIZE));
12833
12834 if (set) {
12835 finish_sigsuspend_mask(ret);
12836 }
12837 break;
12838 }
12839 #endif
12840 #if defined(TARGET_NR_epoll_wait)
12841 case TARGET_NR_epoll_wait:
12842 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12843 NULL, 0));
12844 break;
12845 #endif
12846 default:
12847 ret = -TARGET_ENOSYS;
12848 }
12849 if (!is_error(ret)) {
12850 int i;
12851 for (i = 0; i < ret; i++) {
12852 target_ep[i].events = tswap32(ep[i].events);
12853 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12854 }
12855 unlock_user(target_ep, arg2,
12856 ret * sizeof(struct target_epoll_event));
12857 } else {
12858 unlock_user(target_ep, arg2, 0);
12859 }
12860 g_free(ep);
12861 return ret;
12862 }
12863 #endif
12864 #endif
12865 #ifdef TARGET_NR_prlimit64
12866 case TARGET_NR_prlimit64:
12867 {
12868 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12869 struct target_rlimit64 *target_rnew, *target_rold;
12870 struct host_rlimit64 rnew, rold, *rnewp = 0;
12871 int resource = target_to_host_resource(arg2);
12872
12873 if (arg3 && (resource != RLIMIT_AS &&
12874 resource != RLIMIT_DATA &&
12875 resource != RLIMIT_STACK)) {
12876 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12877 return -TARGET_EFAULT;
12878 }
12879 __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
12880 __get_user(rnew.rlim_max, &target_rnew->rlim_max);
12881 unlock_user_struct(target_rnew, arg3, 0);
12882 rnewp = &rnew;
12883 }
12884
12885 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12886 if (!is_error(ret) && arg4) {
12887 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12888 return -TARGET_EFAULT;
12889 }
12890 __put_user(rold.rlim_cur, &target_rold->rlim_cur);
12891 __put_user(rold.rlim_max, &target_rold->rlim_max);
12892 unlock_user_struct(target_rold, arg4, 1);
12893 }
12894 return ret;
12895 }
12896 #endif
12897 #ifdef TARGET_NR_gethostname
12898 case TARGET_NR_gethostname:
12899 {
12900 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12901 if (name) {
12902 ret = get_errno(gethostname(name, arg2));
12903 unlock_user(name, arg1, arg2);
12904 } else {
12905 ret = -TARGET_EFAULT;
12906 }
12907 return ret;
12908 }
12909 #endif
12910 #ifdef TARGET_NR_atomic_cmpxchg_32
12911 case TARGET_NR_atomic_cmpxchg_32:
12912 {
12913 /* should use start_exclusive from main.c */
12914 abi_ulong mem_value;
12915 if (get_user_u32(mem_value, arg6)) {
12916 target_siginfo_t info;
12917 info.si_signo = SIGSEGV;
12918 info.si_errno = 0;
12919 info.si_code = TARGET_SEGV_MAPERR;
12920 info._sifields._sigfault._addr = arg6;
12921 queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12922 ret = 0xdeadbeef;
12923
12924 }
12925 if (mem_value == arg2)
12926 put_user_u32(arg1, arg6);
12927 return mem_value;
12928 }
12929 #endif
12930 #ifdef TARGET_NR_atomic_barrier
12931 case TARGET_NR_atomic_barrier:
12932 /* Like the kernel implementation and the
12933 qemu arm barrier, no-op this? */
12934 return 0;
12935 #endif
12936
12937 #ifdef TARGET_NR_timer_create
12938 case TARGET_NR_timer_create:
12939 {
12940 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12941
12942 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12943
12944 int clkid = arg1;
12945 int timer_index = next_free_host_timer();
12946
12947 if (timer_index < 0) {
12948 ret = -TARGET_EAGAIN;
12949 } else {
12950 timer_t *phtimer = g_posix_timers + timer_index;
12951
12952 if (arg2) {
12953 phost_sevp = &host_sevp;
12954 ret = target_to_host_sigevent(phost_sevp, arg2);
12955 if (ret != 0) {
12956 free_host_timer_slot(timer_index);
12957 return ret;
12958 }
12959 }
12960
12961 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12962 if (ret) {
12963 free_host_timer_slot(timer_index);
12964 } else {
12965 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12966 timer_delete(*phtimer);
12967 free_host_timer_slot(timer_index);
12968 return -TARGET_EFAULT;
12969 }
12970 }
12971 }
12972 return ret;
12973 }
12974 #endif
12975
12976 #ifdef TARGET_NR_timer_settime
12977 case TARGET_NR_timer_settime:
12978 {
12979 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12980 * struct itimerspec * old_value */
12981 target_timer_t timerid = get_timer_id(arg1);
12982
12983 if (timerid < 0) {
12984 ret = timerid;
12985 } else if (arg3 == 0) {
12986 ret = -TARGET_EINVAL;
12987 } else {
12988 timer_t htimer = g_posix_timers[timerid];
12989 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12990
12991 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12992 return -TARGET_EFAULT;
12993 }
12994 ret = get_errno(
12995 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12996 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12997 return -TARGET_EFAULT;
12998 }
12999 }
13000 return ret;
13001 }
13002 #endif
13003
13004 #ifdef TARGET_NR_timer_settime64
13005 case TARGET_NR_timer_settime64:
13006 {
13007 target_timer_t timerid = get_timer_id(arg1);
13008
13009 if (timerid < 0) {
13010 ret = timerid;
13011 } else if (arg3 == 0) {
13012 ret = -TARGET_EINVAL;
13013 } else {
13014 timer_t htimer = g_posix_timers[timerid];
13015 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13016
13017 if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13018 return -TARGET_EFAULT;
13019 }
13020 ret = get_errno(
13021 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13022 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13023 return -TARGET_EFAULT;
13024 }
13025 }
13026 return ret;
13027 }
13028 #endif
13029
13030 #ifdef TARGET_NR_timer_gettime
13031 case TARGET_NR_timer_gettime:
13032 {
13033 /* args: timer_t timerid, struct itimerspec *curr_value */
13034 target_timer_t timerid = get_timer_id(arg1);
13035
13036 if (timerid < 0) {
13037 ret = timerid;
13038 } else if (!arg2) {
13039 ret = -TARGET_EFAULT;
13040 } else {
13041 timer_t htimer = g_posix_timers[timerid];
13042 struct itimerspec hspec;
13043 ret = get_errno(timer_gettime(htimer, &hspec));
13044
13045 if (host_to_target_itimerspec(arg2, &hspec)) {
13046 ret = -TARGET_EFAULT;
13047 }
13048 }
13049 return ret;
13050 }
13051 #endif
13052
13053 #ifdef TARGET_NR_timer_gettime64
13054 case TARGET_NR_timer_gettime64:
13055 {
13056 /* args: timer_t timerid, struct itimerspec64 *curr_value */
13057 target_timer_t timerid = get_timer_id(arg1);
13058
13059 if (timerid < 0) {
13060 ret = timerid;
13061 } else if (!arg2) {
13062 ret = -TARGET_EFAULT;
13063 } else {
13064 timer_t htimer = g_posix_timers[timerid];
13065 struct itimerspec hspec;
13066 ret = get_errno(timer_gettime(htimer, &hspec));
13067
13068 if (host_to_target_itimerspec64(arg2, &hspec)) {
13069 ret = -TARGET_EFAULT;
13070 }
13071 }
13072 return ret;
13073 }
13074 #endif
13075
13076 #ifdef TARGET_NR_timer_getoverrun
13077 case TARGET_NR_timer_getoverrun:
13078 {
13079 /* args: timer_t timerid */
13080 target_timer_t timerid = get_timer_id(arg1);
13081
13082 if (timerid < 0) {
13083 ret = timerid;
13084 } else {
13085 timer_t htimer = g_posix_timers[timerid];
13086 ret = get_errno(timer_getoverrun(htimer));
13087 }
13088 return ret;
13089 }
13090 #endif
13091
13092 #ifdef TARGET_NR_timer_delete
13093 case TARGET_NR_timer_delete:
13094 {
13095 /* args: timer_t timerid */
13096 target_timer_t timerid = get_timer_id(arg1);
13097
13098 if (timerid < 0) {
13099 ret = timerid;
13100 } else {
13101 timer_t htimer = g_posix_timers[timerid];
13102 ret = get_errno(timer_delete(htimer));
13103 free_host_timer_slot(timerid);
13104 }
13105 return ret;
13106 }
13107 #endif
13108
13109 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13110 case TARGET_NR_timerfd_create:
13111 ret = get_errno(timerfd_create(arg1,
13112 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13113 if (ret >= 0) {
13114 fd_trans_register(ret, &target_timerfd_trans);
13115 }
13116 return ret;
13117 #endif
13118
13119 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13120 case TARGET_NR_timerfd_gettime:
13121 {
13122 struct itimerspec its_curr;
13123
13124 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13125
13126 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13127 return -TARGET_EFAULT;
13128 }
13129 }
13130 return ret;
13131 #endif
13132
13133 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13134 case TARGET_NR_timerfd_gettime64:
13135 {
13136 struct itimerspec its_curr;
13137
13138 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13139
13140 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13141 return -TARGET_EFAULT;
13142 }
13143 }
13144 return ret;
13145 #endif
13146
13147 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13148 case TARGET_NR_timerfd_settime:
13149 {
13150 struct itimerspec its_new, its_old, *p_new;
13151
13152 if (arg3) {
13153 if (target_to_host_itimerspec(&its_new, arg3)) {
13154 return -TARGET_EFAULT;
13155 }
13156 p_new = &its_new;
13157 } else {
13158 p_new = NULL;
13159 }
13160
13161 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13162
13163 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13164 return -TARGET_EFAULT;
13165 }
13166 }
13167 return ret;
13168 #endif
13169
13170 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13171 case TARGET_NR_timerfd_settime64:
13172 {
13173 struct itimerspec its_new, its_old, *p_new;
13174
13175 if (arg3) {
13176 if (target_to_host_itimerspec64(&its_new, arg3)) {
13177 return -TARGET_EFAULT;
13178 }
13179 p_new = &its_new;
13180 } else {
13181 p_new = NULL;
13182 }
13183
13184 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13185
13186 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13187 return -TARGET_EFAULT;
13188 }
13189 }
13190 return ret;
13191 #endif
13192
13193 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13194 case TARGET_NR_ioprio_get:
13195 return get_errno(ioprio_get(arg1, arg2));
13196 #endif
13197
13198 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13199 case TARGET_NR_ioprio_set:
13200 return get_errno(ioprio_set(arg1, arg2, arg3));
13201 #endif
13202
13203 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13204 case TARGET_NR_setns:
13205 return get_errno(setns(arg1, arg2));
13206 #endif
13207 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13208 case TARGET_NR_unshare:
13209 return get_errno(unshare(arg1));
13210 #endif
13211 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13212 case TARGET_NR_kcmp:
13213 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13214 #endif
13215 #ifdef TARGET_NR_swapcontext
13216 case TARGET_NR_swapcontext:
13217 /* PowerPC specific. */
13218 return do_swapcontext(cpu_env, arg1, arg2, arg3);
13219 #endif
13220 #ifdef TARGET_NR_memfd_create
13221 case TARGET_NR_memfd_create:
13222 p = lock_user_string(arg1);
13223 if (!p) {
13224 return -TARGET_EFAULT;
13225 }
13226 ret = get_errno(memfd_create(p, arg2));
13227 fd_trans_unregister(ret);
13228 unlock_user(p, arg1, 0);
13229 return ret;
13230 #endif
13231 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13232 case TARGET_NR_membarrier:
13233 return get_errno(membarrier(arg1, arg2));
13234 #endif
13235
13236 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13237 case TARGET_NR_copy_file_range:
13238 {
13239 loff_t inoff, outoff;
13240 loff_t *pinoff = NULL, *poutoff = NULL;
13241
13242 if (arg2) {
13243 if (get_user_u64(inoff, arg2)) {
13244 return -TARGET_EFAULT;
13245 }
13246 pinoff = &inoff;
13247 }
13248 if (arg4) {
13249 if (get_user_u64(outoff, arg4)) {
13250 return -TARGET_EFAULT;
13251 }
13252 poutoff = &outoff;
13253 }
13254 /* Do not sign-extend the count parameter. */
13255 ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13256 (abi_ulong)arg5, arg6));
13257 if (!is_error(ret) && ret > 0) {
13258 if (arg2) {
13259 if (put_user_u64(inoff, arg2)) {
13260 return -TARGET_EFAULT;
13261 }
13262 }
13263 if (arg4) {
13264 if (put_user_u64(outoff, arg4)) {
13265 return -TARGET_EFAULT;
13266 }
13267 }
13268 }
13269 }
13270 return ret;
13271 #endif
13272
13273 #if defined(TARGET_NR_pivot_root)
13274 case TARGET_NR_pivot_root:
13275 {
13276 void *p2;
13277 p = lock_user_string(arg1); /* new_root */
13278 p2 = lock_user_string(arg2); /* put_old */
13279 if (!p || !p2) {
13280 ret = -TARGET_EFAULT;
13281 } else {
13282 ret = get_errno(pivot_root(p, p2));
13283 }
13284 unlock_user(p2, arg2, 0);
13285 unlock_user(p, arg1, 0);
13286 }
13287 return ret;
13288 #endif
13289
13290 default:
13291 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13292 return -TARGET_ENOSYS;
13293 }
13294 return ret;
13295 }
13296
13297 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13298 abi_long arg2, abi_long arg3, abi_long arg4,
13299 abi_long arg5, abi_long arg6, abi_long arg7,
13300 abi_long arg8)
13301 {
13302 CPUState *cpu = env_cpu(cpu_env);
13303 abi_long ret;
13304
13305 #ifdef DEBUG_ERESTARTSYS
13306 /* Debug-only code for exercising the syscall-restart code paths
13307 * in the per-architecture cpu main loops: restart every syscall
13308 * the guest makes once before letting it through.
13309 */
13310 {
13311 static bool flag;
13312 flag = !flag;
13313 if (flag) {
13314 return -QEMU_ERESTARTSYS;
13315 }
13316 }
13317 #endif
13318
13319 record_syscall_start(cpu, num, arg1,
13320 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13321
13322 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13323 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13324 }
13325
13326 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13327 arg5, arg6, arg7, arg8);
13328
13329 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13330 print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13331 arg3, arg4, arg5, arg6);
13332 }
13333
13334 record_syscall_return(cpu, num, ret);
13335 return ret;
13336 }