]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/syscall.c
5246f360dffce5cc2006c64116d02d65a63106eb
[mirror_qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/mman.h>
36 #include <sys/swap.h>
37 #include <linux/capability.h>
38 #include <sched.h>
39 #ifdef __ia64__
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
42 #endif
43 #include <sys/socket.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <sys/poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <utime.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
60 #ifdef CONFIG_TIMERFD
61 #include <sys/timerfd.h>
62 #endif
63 #ifdef TARGET_GPROF
64 #include <sys/gmon.h>
65 #endif
66 #ifdef CONFIG_EVENTFD
67 #include <sys/eventfd.h>
68 #endif
69 #ifdef CONFIG_EPOLL
70 #include <sys/epoll.h>
71 #endif
72 #ifdef CONFIG_ATTR
73 #include "qemu/xattr.h"
74 #endif
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
77 #endif
78
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
85
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
91 #include <linux/kd.h>
92 #include <linux/mtio.h>
93 #include <linux/fs.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
96 #endif
97 #include <linux/fb.h>
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include "linux_loop.h"
105 #include "uname.h"
106
107 #include "qemu.h"
108
109 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
110 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
111
112 //#define DEBUG
113
114 //#include <linux/msdos_fs.h>
115 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
116 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
117
118
119 #undef _syscall0
120 #undef _syscall1
121 #undef _syscall2
122 #undef _syscall3
123 #undef _syscall4
124 #undef _syscall5
125 #undef _syscall6
126
127 #define _syscall0(type,name) \
128 static type name (void) \
129 { \
130 return syscall(__NR_##name); \
131 }
132
133 #define _syscall1(type,name,type1,arg1) \
134 static type name (type1 arg1) \
135 { \
136 return syscall(__NR_##name, arg1); \
137 }
138
139 #define _syscall2(type,name,type1,arg1,type2,arg2) \
140 static type name (type1 arg1,type2 arg2) \
141 { \
142 return syscall(__NR_##name, arg1, arg2); \
143 }
144
145 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
146 static type name (type1 arg1,type2 arg2,type3 arg3) \
147 { \
148 return syscall(__NR_##name, arg1, arg2, arg3); \
149 }
150
151 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
152 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
153 { \
154 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
155 }
156
157 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
158 type5,arg5) \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
160 { \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
162 }
163
164
165 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
166 type5,arg5,type6,arg6) \
167 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
168 type6 arg6) \
169 { \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
171 }
172
173
174 #define __NR_sys_uname __NR_uname
175 #define __NR_sys_getcwd1 __NR_getcwd
176 #define __NR_sys_getdents __NR_getdents
177 #define __NR_sys_getdents64 __NR_getdents64
178 #define __NR_sys_getpriority __NR_getpriority
179 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
180 #define __NR_sys_syslog __NR_syslog
181 #define __NR_sys_tgkill __NR_tgkill
182 #define __NR_sys_tkill __NR_tkill
183 #define __NR_sys_futex __NR_futex
184 #define __NR_sys_inotify_init __NR_inotify_init
185 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
186 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
187
188 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
189 defined(__s390x__)
190 #define __NR__llseek __NR_lseek
191 #endif
192
193 /* Newer kernel ports have llseek() instead of _llseek() */
194 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
195 #define TARGET_NR__llseek TARGET_NR_llseek
196 #endif
197
198 #ifdef __NR_gettid
199 _syscall0(int, gettid)
200 #else
201 /* This is a replacement for the host gettid() and must return a host
202 errno. */
203 static int gettid(void) {
204 return -ENOSYS;
205 }
206 #endif
207 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
208 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
209 #endif
210 #if !defined(__NR_getdents) || \
211 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
212 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
213 #endif
214 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
215 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
216 loff_t *, res, uint, wh);
217 #endif
218 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
219 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
220 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
221 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
222 #endif
223 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
224 _syscall2(int,sys_tkill,int,tid,int,sig)
225 #endif
226 #ifdef __NR_exit_group
227 _syscall1(int,exit_group,int,error_code)
228 #endif
229 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
230 _syscall1(int,set_tid_address,int *,tidptr)
231 #endif
232 #if defined(TARGET_NR_futex) && defined(__NR_futex)
233 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
234 const struct timespec *,timeout,int *,uaddr2,int,val3)
235 #endif
236 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
237 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
238 unsigned long *, user_mask_ptr);
239 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
240 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
241 unsigned long *, user_mask_ptr);
242 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
243 void *, arg);
244 _syscall2(int, capget, struct __user_cap_header_struct *, header,
245 struct __user_cap_data_struct *, data);
246 _syscall2(int, capset, struct __user_cap_header_struct *, header,
247 struct __user_cap_data_struct *, data);
248 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
249 _syscall2(int, ioprio_get, int, which, int, who)
250 #endif
251 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
252 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
253 #endif
254 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
255 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
256 #endif
257
258 static bitmask_transtbl fcntl_flags_tbl[] = {
259 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
260 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
261 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
262 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
263 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
264 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
265 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
266 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
267 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
268 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
269 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
270 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
271 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
272 #if defined(O_DIRECT)
273 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
274 #endif
275 #if defined(O_NOATIME)
276 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
277 #endif
278 #if defined(O_CLOEXEC)
279 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
280 #endif
281 #if defined(O_PATH)
282 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
283 #endif
284 /* Don't terminate the list prematurely on 64-bit host+guest. */
285 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
286 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
287 #endif
288 { 0, 0, 0, 0 }
289 };
290
291 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
292 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
293 typedef struct TargetFdTrans {
294 TargetFdDataFunc host_to_target_data;
295 TargetFdDataFunc target_to_host_data;
296 TargetFdAddrFunc target_to_host_addr;
297 } TargetFdTrans;
298
299 static TargetFdTrans **target_fd_trans;
300
301 static unsigned int target_fd_max;
302
303 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
304 {
305 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
306 return target_fd_trans[fd]->host_to_target_data;
307 }
308 return NULL;
309 }
310
311 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
312 {
313 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
314 return target_fd_trans[fd]->target_to_host_addr;
315 }
316 return NULL;
317 }
318
319 static void fd_trans_register(int fd, TargetFdTrans *trans)
320 {
321 unsigned int oldmax;
322
323 if (fd >= target_fd_max) {
324 oldmax = target_fd_max;
325 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
326 target_fd_trans = g_renew(TargetFdTrans *,
327 target_fd_trans, target_fd_max);
328 memset((void *)(target_fd_trans + oldmax), 0,
329 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
330 }
331 target_fd_trans[fd] = trans;
332 }
333
334 static void fd_trans_unregister(int fd)
335 {
336 if (fd >= 0 && fd < target_fd_max) {
337 target_fd_trans[fd] = NULL;
338 }
339 }
340
341 static void fd_trans_dup(int oldfd, int newfd)
342 {
343 fd_trans_unregister(newfd);
344 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
345 fd_trans_register(newfd, target_fd_trans[oldfd]);
346 }
347 }
348
349 static int sys_getcwd1(char *buf, size_t size)
350 {
351 if (getcwd(buf, size) == NULL) {
352 /* getcwd() sets errno */
353 return (-1);
354 }
355 return strlen(buf)+1;
356 }
357
358 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
359 {
360 /*
361 * open(2) has extra parameter 'mode' when called with
362 * flag O_CREAT.
363 */
364 if ((flags & O_CREAT) != 0) {
365 return (openat(dirfd, pathname, flags, mode));
366 }
367 return (openat(dirfd, pathname, flags));
368 }
369
370 #ifdef TARGET_NR_utimensat
371 #ifdef CONFIG_UTIMENSAT
372 static int sys_utimensat(int dirfd, const char *pathname,
373 const struct timespec times[2], int flags)
374 {
375 if (pathname == NULL)
376 return futimens(dirfd, times);
377 else
378 return utimensat(dirfd, pathname, times, flags);
379 }
380 #elif defined(__NR_utimensat)
381 #define __NR_sys_utimensat __NR_utimensat
382 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
383 const struct timespec *,tsp,int,flags)
384 #else
385 static int sys_utimensat(int dirfd, const char *pathname,
386 const struct timespec times[2], int flags)
387 {
388 errno = ENOSYS;
389 return -1;
390 }
391 #endif
392 #endif /* TARGET_NR_utimensat */
393
394 #ifdef CONFIG_INOTIFY
395 #include <sys/inotify.h>
396
397 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
398 static int sys_inotify_init(void)
399 {
400 return (inotify_init());
401 }
402 #endif
403 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
404 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
405 {
406 return (inotify_add_watch(fd, pathname, mask));
407 }
408 #endif
409 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
410 static int sys_inotify_rm_watch(int fd, int32_t wd)
411 {
412 return (inotify_rm_watch(fd, wd));
413 }
414 #endif
415 #ifdef CONFIG_INOTIFY1
416 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
417 static int sys_inotify_init1(int flags)
418 {
419 return (inotify_init1(flags));
420 }
421 #endif
422 #endif
423 #else
424 /* Userspace can usually survive runtime without inotify */
425 #undef TARGET_NR_inotify_init
426 #undef TARGET_NR_inotify_init1
427 #undef TARGET_NR_inotify_add_watch
428 #undef TARGET_NR_inotify_rm_watch
429 #endif /* CONFIG_INOTIFY */
430
431 #if defined(TARGET_NR_ppoll)
432 #ifndef __NR_ppoll
433 # define __NR_ppoll -1
434 #endif
435 #define __NR_sys_ppoll __NR_ppoll
436 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
437 struct timespec *, timeout, const sigset_t *, sigmask,
438 size_t, sigsetsize)
439 #endif
440
441 #if defined(TARGET_NR_pselect6)
442 #ifndef __NR_pselect6
443 # define __NR_pselect6 -1
444 #endif
445 #define __NR_sys_pselect6 __NR_pselect6
446 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
447 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
448 #endif
449
450 #if defined(TARGET_NR_prlimit64)
451 #ifndef __NR_prlimit64
452 # define __NR_prlimit64 -1
453 #endif
454 #define __NR_sys_prlimit64 __NR_prlimit64
455 /* The glibc rlimit structure may not be that used by the underlying syscall */
456 struct host_rlimit64 {
457 uint64_t rlim_cur;
458 uint64_t rlim_max;
459 };
460 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
461 const struct host_rlimit64 *, new_limit,
462 struct host_rlimit64 *, old_limit)
463 #endif
464
465
466 #if defined(TARGET_NR_timer_create)
467 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
468 static timer_t g_posix_timers[32] = { 0, } ;
469
470 static inline int next_free_host_timer(void)
471 {
472 int k ;
473 /* FIXME: Does finding the next free slot require a lock? */
474 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
475 if (g_posix_timers[k] == 0) {
476 g_posix_timers[k] = (timer_t) 1;
477 return k;
478 }
479 }
480 return -1;
481 }
482 #endif
483
484 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
485 #ifdef TARGET_ARM
486 static inline int regpairs_aligned(void *cpu_env) {
487 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
488 }
489 #elif defined(TARGET_MIPS)
490 static inline int regpairs_aligned(void *cpu_env) { return 1; }
491 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
492 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
493 * of registers which translates to the same as ARM/MIPS, because we start with
494 * r3 as arg1 */
495 static inline int regpairs_aligned(void *cpu_env) { return 1; }
496 #else
497 static inline int regpairs_aligned(void *cpu_env) { return 0; }
498 #endif
499
500 #define ERRNO_TABLE_SIZE 1200
501
502 /* target_to_host_errno_table[] is initialized from
503 * host_to_target_errno_table[] in syscall_init(). */
504 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
505 };
506
507 /*
508 * This list is the union of errno values overridden in asm-<arch>/errno.h
509 * minus the errnos that are not actually generic to all archs.
510 */
511 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
512 [EAGAIN] = TARGET_EAGAIN,
513 [EIDRM] = TARGET_EIDRM,
514 [ECHRNG] = TARGET_ECHRNG,
515 [EL2NSYNC] = TARGET_EL2NSYNC,
516 [EL3HLT] = TARGET_EL3HLT,
517 [EL3RST] = TARGET_EL3RST,
518 [ELNRNG] = TARGET_ELNRNG,
519 [EUNATCH] = TARGET_EUNATCH,
520 [ENOCSI] = TARGET_ENOCSI,
521 [EL2HLT] = TARGET_EL2HLT,
522 [EDEADLK] = TARGET_EDEADLK,
523 [ENOLCK] = TARGET_ENOLCK,
524 [EBADE] = TARGET_EBADE,
525 [EBADR] = TARGET_EBADR,
526 [EXFULL] = TARGET_EXFULL,
527 [ENOANO] = TARGET_ENOANO,
528 [EBADRQC] = TARGET_EBADRQC,
529 [EBADSLT] = TARGET_EBADSLT,
530 [EBFONT] = TARGET_EBFONT,
531 [ENOSTR] = TARGET_ENOSTR,
532 [ENODATA] = TARGET_ENODATA,
533 [ETIME] = TARGET_ETIME,
534 [ENOSR] = TARGET_ENOSR,
535 [ENONET] = TARGET_ENONET,
536 [ENOPKG] = TARGET_ENOPKG,
537 [EREMOTE] = TARGET_EREMOTE,
538 [ENOLINK] = TARGET_ENOLINK,
539 [EADV] = TARGET_EADV,
540 [ESRMNT] = TARGET_ESRMNT,
541 [ECOMM] = TARGET_ECOMM,
542 [EPROTO] = TARGET_EPROTO,
543 [EDOTDOT] = TARGET_EDOTDOT,
544 [EMULTIHOP] = TARGET_EMULTIHOP,
545 [EBADMSG] = TARGET_EBADMSG,
546 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
547 [EOVERFLOW] = TARGET_EOVERFLOW,
548 [ENOTUNIQ] = TARGET_ENOTUNIQ,
549 [EBADFD] = TARGET_EBADFD,
550 [EREMCHG] = TARGET_EREMCHG,
551 [ELIBACC] = TARGET_ELIBACC,
552 [ELIBBAD] = TARGET_ELIBBAD,
553 [ELIBSCN] = TARGET_ELIBSCN,
554 [ELIBMAX] = TARGET_ELIBMAX,
555 [ELIBEXEC] = TARGET_ELIBEXEC,
556 [EILSEQ] = TARGET_EILSEQ,
557 [ENOSYS] = TARGET_ENOSYS,
558 [ELOOP] = TARGET_ELOOP,
559 [ERESTART] = TARGET_ERESTART,
560 [ESTRPIPE] = TARGET_ESTRPIPE,
561 [ENOTEMPTY] = TARGET_ENOTEMPTY,
562 [EUSERS] = TARGET_EUSERS,
563 [ENOTSOCK] = TARGET_ENOTSOCK,
564 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
565 [EMSGSIZE] = TARGET_EMSGSIZE,
566 [EPROTOTYPE] = TARGET_EPROTOTYPE,
567 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
568 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
569 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
570 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
571 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
572 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
573 [EADDRINUSE] = TARGET_EADDRINUSE,
574 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
575 [ENETDOWN] = TARGET_ENETDOWN,
576 [ENETUNREACH] = TARGET_ENETUNREACH,
577 [ENETRESET] = TARGET_ENETRESET,
578 [ECONNABORTED] = TARGET_ECONNABORTED,
579 [ECONNRESET] = TARGET_ECONNRESET,
580 [ENOBUFS] = TARGET_ENOBUFS,
581 [EISCONN] = TARGET_EISCONN,
582 [ENOTCONN] = TARGET_ENOTCONN,
583 [EUCLEAN] = TARGET_EUCLEAN,
584 [ENOTNAM] = TARGET_ENOTNAM,
585 [ENAVAIL] = TARGET_ENAVAIL,
586 [EISNAM] = TARGET_EISNAM,
587 [EREMOTEIO] = TARGET_EREMOTEIO,
588 [ESHUTDOWN] = TARGET_ESHUTDOWN,
589 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
590 [ETIMEDOUT] = TARGET_ETIMEDOUT,
591 [ECONNREFUSED] = TARGET_ECONNREFUSED,
592 [EHOSTDOWN] = TARGET_EHOSTDOWN,
593 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
594 [EALREADY] = TARGET_EALREADY,
595 [EINPROGRESS] = TARGET_EINPROGRESS,
596 [ESTALE] = TARGET_ESTALE,
597 [ECANCELED] = TARGET_ECANCELED,
598 [ENOMEDIUM] = TARGET_ENOMEDIUM,
599 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
600 #ifdef ENOKEY
601 [ENOKEY] = TARGET_ENOKEY,
602 #endif
603 #ifdef EKEYEXPIRED
604 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
605 #endif
606 #ifdef EKEYREVOKED
607 [EKEYREVOKED] = TARGET_EKEYREVOKED,
608 #endif
609 #ifdef EKEYREJECTED
610 [EKEYREJECTED] = TARGET_EKEYREJECTED,
611 #endif
612 #ifdef EOWNERDEAD
613 [EOWNERDEAD] = TARGET_EOWNERDEAD,
614 #endif
615 #ifdef ENOTRECOVERABLE
616 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
617 #endif
618 };
619
620 static inline int host_to_target_errno(int err)
621 {
622 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
623 host_to_target_errno_table[err]) {
624 return host_to_target_errno_table[err];
625 }
626 return err;
627 }
628
629 static inline int target_to_host_errno(int err)
630 {
631 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
632 target_to_host_errno_table[err]) {
633 return target_to_host_errno_table[err];
634 }
635 return err;
636 }
637
638 static inline abi_long get_errno(abi_long ret)
639 {
640 if (ret == -1)
641 return -host_to_target_errno(errno);
642 else
643 return ret;
644 }
645
646 static inline int is_error(abi_long ret)
647 {
648 return (abi_ulong)ret >= (abi_ulong)(-4096);
649 }
650
651 char *target_strerror(int err)
652 {
653 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
654 return NULL;
655 }
656 return strerror(target_to_host_errno(err));
657 }
658
659 static inline int host_to_target_sock_type(int host_type)
660 {
661 int target_type;
662
663 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
664 case SOCK_DGRAM:
665 target_type = TARGET_SOCK_DGRAM;
666 break;
667 case SOCK_STREAM:
668 target_type = TARGET_SOCK_STREAM;
669 break;
670 default:
671 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
672 break;
673 }
674
675 #if defined(SOCK_CLOEXEC)
676 if (host_type & SOCK_CLOEXEC) {
677 target_type |= TARGET_SOCK_CLOEXEC;
678 }
679 #endif
680
681 #if defined(SOCK_NONBLOCK)
682 if (host_type & SOCK_NONBLOCK) {
683 target_type |= TARGET_SOCK_NONBLOCK;
684 }
685 #endif
686
687 return target_type;
688 }
689
690 static abi_ulong target_brk;
691 static abi_ulong target_original_brk;
692 static abi_ulong brk_page;
693
694 void target_set_brk(abi_ulong new_brk)
695 {
696 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
697 brk_page = HOST_PAGE_ALIGN(target_brk);
698 }
699
700 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
701 #define DEBUGF_BRK(message, args...)
702
703 /* do_brk() must return target values and target errnos. */
704 abi_long do_brk(abi_ulong new_brk)
705 {
706 abi_long mapped_addr;
707 int new_alloc_size;
708
709 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
710
711 if (!new_brk) {
712 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
713 return target_brk;
714 }
715 if (new_brk < target_original_brk) {
716 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
717 target_brk);
718 return target_brk;
719 }
720
721 /* If the new brk is less than the highest page reserved to the
722 * target heap allocation, set it and we're almost done... */
723 if (new_brk <= brk_page) {
724 /* Heap contents are initialized to zero, as for anonymous
725 * mapped pages. */
726 if (new_brk > target_brk) {
727 memset(g2h(target_brk), 0, new_brk - target_brk);
728 }
729 target_brk = new_brk;
730 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
731 return target_brk;
732 }
733
734 /* We need to allocate more memory after the brk... Note that
735 * we don't use MAP_FIXED because that will map over the top of
736 * any existing mapping (like the one with the host libc or qemu
737 * itself); instead we treat "mapped but at wrong address" as
738 * a failure and unmap again.
739 */
740 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
741 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
742 PROT_READ|PROT_WRITE,
743 MAP_ANON|MAP_PRIVATE, 0, 0));
744
745 if (mapped_addr == brk_page) {
746 /* Heap contents are initialized to zero, as for anonymous
747 * mapped pages. Technically the new pages are already
748 * initialized to zero since they *are* anonymous mapped
749 * pages, however we have to take care with the contents that
750 * come from the remaining part of the previous page: it may
751 * contains garbage data due to a previous heap usage (grown
752 * then shrunken). */
753 memset(g2h(target_brk), 0, brk_page - target_brk);
754
755 target_brk = new_brk;
756 brk_page = HOST_PAGE_ALIGN(target_brk);
757 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
758 target_brk);
759 return target_brk;
760 } else if (mapped_addr != -1) {
761 /* Mapped but at wrong address, meaning there wasn't actually
762 * enough space for this brk.
763 */
764 target_munmap(mapped_addr, new_alloc_size);
765 mapped_addr = -1;
766 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
767 }
768 else {
769 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
770 }
771
772 #if defined(TARGET_ALPHA)
773 /* We (partially) emulate OSF/1 on Alpha, which requires we
774 return a proper errno, not an unchanged brk value. */
775 return -TARGET_ENOMEM;
776 #endif
777 /* For everything else, return the previous break. */
778 return target_brk;
779 }
780
781 static inline abi_long copy_from_user_fdset(fd_set *fds,
782 abi_ulong target_fds_addr,
783 int n)
784 {
785 int i, nw, j, k;
786 abi_ulong b, *target_fds;
787
788 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
789 if (!(target_fds = lock_user(VERIFY_READ,
790 target_fds_addr,
791 sizeof(abi_ulong) * nw,
792 1)))
793 return -TARGET_EFAULT;
794
795 FD_ZERO(fds);
796 k = 0;
797 for (i = 0; i < nw; i++) {
798 /* grab the abi_ulong */
799 __get_user(b, &target_fds[i]);
800 for (j = 0; j < TARGET_ABI_BITS; j++) {
801 /* check the bit inside the abi_ulong */
802 if ((b >> j) & 1)
803 FD_SET(k, fds);
804 k++;
805 }
806 }
807
808 unlock_user(target_fds, target_fds_addr, 0);
809
810 return 0;
811 }
812
813 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
814 abi_ulong target_fds_addr,
815 int n)
816 {
817 if (target_fds_addr) {
818 if (copy_from_user_fdset(fds, target_fds_addr, n))
819 return -TARGET_EFAULT;
820 *fds_ptr = fds;
821 } else {
822 *fds_ptr = NULL;
823 }
824 return 0;
825 }
826
827 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
828 const fd_set *fds,
829 int n)
830 {
831 int i, nw, j, k;
832 abi_long v;
833 abi_ulong *target_fds;
834
835 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
836 if (!(target_fds = lock_user(VERIFY_WRITE,
837 target_fds_addr,
838 sizeof(abi_ulong) * nw,
839 0)))
840 return -TARGET_EFAULT;
841
842 k = 0;
843 for (i = 0; i < nw; i++) {
844 v = 0;
845 for (j = 0; j < TARGET_ABI_BITS; j++) {
846 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
847 k++;
848 }
849 __put_user(v, &target_fds[i]);
850 }
851
852 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
853
854 return 0;
855 }
856
857 #if defined(__alpha__)
858 #define HOST_HZ 1024
859 #else
860 #define HOST_HZ 100
861 #endif
862
863 static inline abi_long host_to_target_clock_t(long ticks)
864 {
865 #if HOST_HZ == TARGET_HZ
866 return ticks;
867 #else
868 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
869 #endif
870 }
871
872 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
873 const struct rusage *rusage)
874 {
875 struct target_rusage *target_rusage;
876
877 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
878 return -TARGET_EFAULT;
879 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
880 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
881 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
882 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
883 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
884 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
885 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
886 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
887 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
888 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
889 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
890 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
891 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
892 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
893 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
894 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
895 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
896 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
897 unlock_user_struct(target_rusage, target_addr, 1);
898
899 return 0;
900 }
901
902 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
903 {
904 abi_ulong target_rlim_swap;
905 rlim_t result;
906
907 target_rlim_swap = tswapal(target_rlim);
908 if (target_rlim_swap == TARGET_RLIM_INFINITY)
909 return RLIM_INFINITY;
910
911 result = target_rlim_swap;
912 if (target_rlim_swap != (rlim_t)result)
913 return RLIM_INFINITY;
914
915 return result;
916 }
917
918 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
919 {
920 abi_ulong target_rlim_swap;
921 abi_ulong result;
922
923 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
924 target_rlim_swap = TARGET_RLIM_INFINITY;
925 else
926 target_rlim_swap = rlim;
927 result = tswapal(target_rlim_swap);
928
929 return result;
930 }
931
932 static inline int target_to_host_resource(int code)
933 {
934 switch (code) {
935 case TARGET_RLIMIT_AS:
936 return RLIMIT_AS;
937 case TARGET_RLIMIT_CORE:
938 return RLIMIT_CORE;
939 case TARGET_RLIMIT_CPU:
940 return RLIMIT_CPU;
941 case TARGET_RLIMIT_DATA:
942 return RLIMIT_DATA;
943 case TARGET_RLIMIT_FSIZE:
944 return RLIMIT_FSIZE;
945 case TARGET_RLIMIT_LOCKS:
946 return RLIMIT_LOCKS;
947 case TARGET_RLIMIT_MEMLOCK:
948 return RLIMIT_MEMLOCK;
949 case TARGET_RLIMIT_MSGQUEUE:
950 return RLIMIT_MSGQUEUE;
951 case TARGET_RLIMIT_NICE:
952 return RLIMIT_NICE;
953 case TARGET_RLIMIT_NOFILE:
954 return RLIMIT_NOFILE;
955 case TARGET_RLIMIT_NPROC:
956 return RLIMIT_NPROC;
957 case TARGET_RLIMIT_RSS:
958 return RLIMIT_RSS;
959 case TARGET_RLIMIT_RTPRIO:
960 return RLIMIT_RTPRIO;
961 case TARGET_RLIMIT_SIGPENDING:
962 return RLIMIT_SIGPENDING;
963 case TARGET_RLIMIT_STACK:
964 return RLIMIT_STACK;
965 default:
966 return code;
967 }
968 }
969
970 static inline abi_long copy_from_user_timeval(struct timeval *tv,
971 abi_ulong target_tv_addr)
972 {
973 struct target_timeval *target_tv;
974
975 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
976 return -TARGET_EFAULT;
977
978 __get_user(tv->tv_sec, &target_tv->tv_sec);
979 __get_user(tv->tv_usec, &target_tv->tv_usec);
980
981 unlock_user_struct(target_tv, target_tv_addr, 0);
982
983 return 0;
984 }
985
986 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
987 const struct timeval *tv)
988 {
989 struct target_timeval *target_tv;
990
991 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
992 return -TARGET_EFAULT;
993
994 __put_user(tv->tv_sec, &target_tv->tv_sec);
995 __put_user(tv->tv_usec, &target_tv->tv_usec);
996
997 unlock_user_struct(target_tv, target_tv_addr, 1);
998
999 return 0;
1000 }
1001
1002 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1003 abi_ulong target_tz_addr)
1004 {
1005 struct target_timezone *target_tz;
1006
1007 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1008 return -TARGET_EFAULT;
1009 }
1010
1011 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1012 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1013
1014 unlock_user_struct(target_tz, target_tz_addr, 0);
1015
1016 return 0;
1017 }
1018
1019 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1020 #include <mqueue.h>
1021
1022 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1023 abi_ulong target_mq_attr_addr)
1024 {
1025 struct target_mq_attr *target_mq_attr;
1026
1027 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1028 target_mq_attr_addr, 1))
1029 return -TARGET_EFAULT;
1030
1031 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1032 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1033 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1034 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1035
1036 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1037
1038 return 0;
1039 }
1040
1041 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1042 const struct mq_attr *attr)
1043 {
1044 struct target_mq_attr *target_mq_attr;
1045
1046 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1047 target_mq_attr_addr, 0))
1048 return -TARGET_EFAULT;
1049
1050 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1051 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1052 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1053 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1054
1055 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1056
1057 return 0;
1058 }
1059 #endif
1060
1061 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1062 /* do_select() must return target values and target errnos. */
1063 static abi_long do_select(int n,
1064 abi_ulong rfd_addr, abi_ulong wfd_addr,
1065 abi_ulong efd_addr, abi_ulong target_tv_addr)
1066 {
1067 fd_set rfds, wfds, efds;
1068 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1069 struct timeval tv, *tv_ptr;
1070 abi_long ret;
1071
1072 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1073 if (ret) {
1074 return ret;
1075 }
1076 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1077 if (ret) {
1078 return ret;
1079 }
1080 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1081 if (ret) {
1082 return ret;
1083 }
1084
1085 if (target_tv_addr) {
1086 if (copy_from_user_timeval(&tv, target_tv_addr))
1087 return -TARGET_EFAULT;
1088 tv_ptr = &tv;
1089 } else {
1090 tv_ptr = NULL;
1091 }
1092
1093 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1094
1095 if (!is_error(ret)) {
1096 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1097 return -TARGET_EFAULT;
1098 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1099 return -TARGET_EFAULT;
1100 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1101 return -TARGET_EFAULT;
1102
1103 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1104 return -TARGET_EFAULT;
1105 }
1106
1107 return ret;
1108 }
1109 #endif
1110
1111 static abi_long do_pipe2(int host_pipe[], int flags)
1112 {
1113 #ifdef CONFIG_PIPE2
1114 return pipe2(host_pipe, flags);
1115 #else
1116 return -ENOSYS;
1117 #endif
1118 }
1119
1120 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1121 int flags, int is_pipe2)
1122 {
1123 int host_pipe[2];
1124 abi_long ret;
1125 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1126
1127 if (is_error(ret))
1128 return get_errno(ret);
1129
1130 /* Several targets have special calling conventions for the original
1131 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1132 if (!is_pipe2) {
1133 #if defined(TARGET_ALPHA)
1134 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1135 return host_pipe[0];
1136 #elif defined(TARGET_MIPS)
1137 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1138 return host_pipe[0];
1139 #elif defined(TARGET_SH4)
1140 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1141 return host_pipe[0];
1142 #elif defined(TARGET_SPARC)
1143 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1144 return host_pipe[0];
1145 #endif
1146 }
1147
1148 if (put_user_s32(host_pipe[0], pipedes)
1149 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1150 return -TARGET_EFAULT;
1151 return get_errno(ret);
1152 }
1153
1154 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1155 abi_ulong target_addr,
1156 socklen_t len)
1157 {
1158 struct target_ip_mreqn *target_smreqn;
1159
1160 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1161 if (!target_smreqn)
1162 return -TARGET_EFAULT;
1163 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1164 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1165 if (len == sizeof(struct target_ip_mreqn))
1166 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1167 unlock_user(target_smreqn, target_addr, 0);
1168
1169 return 0;
1170 }
1171
1172 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1173 abi_ulong target_addr,
1174 socklen_t len)
1175 {
1176 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1177 sa_family_t sa_family;
1178 struct target_sockaddr *target_saddr;
1179
1180 if (fd_trans_target_to_host_addr(fd)) {
1181 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1182 }
1183
1184 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1185 if (!target_saddr)
1186 return -TARGET_EFAULT;
1187
1188 sa_family = tswap16(target_saddr->sa_family);
1189
1190 /* Oops. The caller might send a incomplete sun_path; sun_path
1191 * must be terminated by \0 (see the manual page), but
1192 * unfortunately it is quite common to specify sockaddr_un
1193 * length as "strlen(x->sun_path)" while it should be
1194 * "strlen(...) + 1". We'll fix that here if needed.
1195 * Linux kernel has a similar feature.
1196 */
1197
1198 if (sa_family == AF_UNIX) {
1199 if (len < unix_maxlen && len > 0) {
1200 char *cp = (char*)target_saddr;
1201
1202 if ( cp[len-1] && !cp[len] )
1203 len++;
1204 }
1205 if (len > unix_maxlen)
1206 len = unix_maxlen;
1207 }
1208
1209 memcpy(addr, target_saddr, len);
1210 addr->sa_family = sa_family;
1211 if (sa_family == AF_PACKET) {
1212 struct target_sockaddr_ll *lladdr;
1213
1214 lladdr = (struct target_sockaddr_ll *)addr;
1215 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1216 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1217 }
1218 unlock_user(target_saddr, target_addr, 0);
1219
1220 return 0;
1221 }
1222
1223 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1224 struct sockaddr *addr,
1225 socklen_t len)
1226 {
1227 struct target_sockaddr *target_saddr;
1228
1229 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1230 if (!target_saddr)
1231 return -TARGET_EFAULT;
1232 memcpy(target_saddr, addr, len);
1233 target_saddr->sa_family = tswap16(addr->sa_family);
1234 unlock_user(target_saddr, target_addr, len);
1235
1236 return 0;
1237 }
1238
1239 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1240 struct target_msghdr *target_msgh)
1241 {
1242 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1243 abi_long msg_controllen;
1244 abi_ulong target_cmsg_addr;
1245 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1246 socklen_t space = 0;
1247
1248 msg_controllen = tswapal(target_msgh->msg_controllen);
1249 if (msg_controllen < sizeof (struct target_cmsghdr))
1250 goto the_end;
1251 target_cmsg_addr = tswapal(target_msgh->msg_control);
1252 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1253 target_cmsg_start = target_cmsg;
1254 if (!target_cmsg)
1255 return -TARGET_EFAULT;
1256
1257 while (cmsg && target_cmsg) {
1258 void *data = CMSG_DATA(cmsg);
1259 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1260
1261 int len = tswapal(target_cmsg->cmsg_len)
1262 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1263
1264 space += CMSG_SPACE(len);
1265 if (space > msgh->msg_controllen) {
1266 space -= CMSG_SPACE(len);
1267 /* This is a QEMU bug, since we allocated the payload
1268 * area ourselves (unlike overflow in host-to-target
1269 * conversion, which is just the guest giving us a buffer
1270 * that's too small). It can't happen for the payload types
1271 * we currently support; if it becomes an issue in future
1272 * we would need to improve our allocation strategy to
1273 * something more intelligent than "twice the size of the
1274 * target buffer we're reading from".
1275 */
1276 gemu_log("Host cmsg overflow\n");
1277 break;
1278 }
1279
1280 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1281 cmsg->cmsg_level = SOL_SOCKET;
1282 } else {
1283 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1284 }
1285 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1286 cmsg->cmsg_len = CMSG_LEN(len);
1287
1288 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1289 int *fd = (int *)data;
1290 int *target_fd = (int *)target_data;
1291 int i, numfds = len / sizeof(int);
1292
1293 for (i = 0; i < numfds; i++) {
1294 __get_user(fd[i], target_fd + i);
1295 }
1296 } else if (cmsg->cmsg_level == SOL_SOCKET
1297 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1298 struct ucred *cred = (struct ucred *)data;
1299 struct target_ucred *target_cred =
1300 (struct target_ucred *)target_data;
1301
1302 __get_user(cred->pid, &target_cred->pid);
1303 __get_user(cred->uid, &target_cred->uid);
1304 __get_user(cred->gid, &target_cred->gid);
1305 } else {
1306 gemu_log("Unsupported ancillary data: %d/%d\n",
1307 cmsg->cmsg_level, cmsg->cmsg_type);
1308 memcpy(data, target_data, len);
1309 }
1310
1311 cmsg = CMSG_NXTHDR(msgh, cmsg);
1312 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1313 target_cmsg_start);
1314 }
1315 unlock_user(target_cmsg, target_cmsg_addr, 0);
1316 the_end:
1317 msgh->msg_controllen = space;
1318 return 0;
1319 }
1320
1321 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1322 struct msghdr *msgh)
1323 {
1324 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1325 abi_long msg_controllen;
1326 abi_ulong target_cmsg_addr;
1327 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1328 socklen_t space = 0;
1329
1330 msg_controllen = tswapal(target_msgh->msg_controllen);
1331 if (msg_controllen < sizeof (struct target_cmsghdr))
1332 goto the_end;
1333 target_cmsg_addr = tswapal(target_msgh->msg_control);
1334 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1335 target_cmsg_start = target_cmsg;
1336 if (!target_cmsg)
1337 return -TARGET_EFAULT;
1338
1339 while (cmsg && target_cmsg) {
1340 void *data = CMSG_DATA(cmsg);
1341 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1342
1343 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1344 int tgt_len, tgt_space;
1345
1346 /* We never copy a half-header but may copy half-data;
1347 * this is Linux's behaviour in put_cmsg(). Note that
1348 * truncation here is a guest problem (which we report
1349 * to the guest via the CTRUNC bit), unlike truncation
1350 * in target_to_host_cmsg, which is a QEMU bug.
1351 */
1352 if (msg_controllen < sizeof(struct cmsghdr)) {
1353 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1354 break;
1355 }
1356
1357 if (cmsg->cmsg_level == SOL_SOCKET) {
1358 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1359 } else {
1360 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1361 }
1362 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1363
1364 tgt_len = TARGET_CMSG_LEN(len);
1365
1366 /* Payload types which need a different size of payload on
1367 * the target must adjust tgt_len here.
1368 */
1369 switch (cmsg->cmsg_level) {
1370 case SOL_SOCKET:
1371 switch (cmsg->cmsg_type) {
1372 case SO_TIMESTAMP:
1373 tgt_len = sizeof(struct target_timeval);
1374 break;
1375 default:
1376 break;
1377 }
1378 default:
1379 break;
1380 }
1381
1382 if (msg_controllen < tgt_len) {
1383 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1384 tgt_len = msg_controllen;
1385 }
1386
1387 /* We must now copy-and-convert len bytes of payload
1388 * into tgt_len bytes of destination space. Bear in mind
1389 * that in both source and destination we may be dealing
1390 * with a truncated value!
1391 */
1392 switch (cmsg->cmsg_level) {
1393 case SOL_SOCKET:
1394 switch (cmsg->cmsg_type) {
1395 case SCM_RIGHTS:
1396 {
1397 int *fd = (int *)data;
1398 int *target_fd = (int *)target_data;
1399 int i, numfds = tgt_len / sizeof(int);
1400
1401 for (i = 0; i < numfds; i++) {
1402 __put_user(fd[i], target_fd + i);
1403 }
1404 break;
1405 }
1406 case SO_TIMESTAMP:
1407 {
1408 struct timeval *tv = (struct timeval *)data;
1409 struct target_timeval *target_tv =
1410 (struct target_timeval *)target_data;
1411
1412 if (len != sizeof(struct timeval) ||
1413 tgt_len != sizeof(struct target_timeval)) {
1414 goto unimplemented;
1415 }
1416
1417 /* copy struct timeval to target */
1418 __put_user(tv->tv_sec, &target_tv->tv_sec);
1419 __put_user(tv->tv_usec, &target_tv->tv_usec);
1420 break;
1421 }
1422 case SCM_CREDENTIALS:
1423 {
1424 struct ucred *cred = (struct ucred *)data;
1425 struct target_ucred *target_cred =
1426 (struct target_ucred *)target_data;
1427
1428 __put_user(cred->pid, &target_cred->pid);
1429 __put_user(cred->uid, &target_cred->uid);
1430 __put_user(cred->gid, &target_cred->gid);
1431 break;
1432 }
1433 default:
1434 goto unimplemented;
1435 }
1436 break;
1437
1438 default:
1439 unimplemented:
1440 gemu_log("Unsupported ancillary data: %d/%d\n",
1441 cmsg->cmsg_level, cmsg->cmsg_type);
1442 memcpy(target_data, data, MIN(len, tgt_len));
1443 if (tgt_len > len) {
1444 memset(target_data + len, 0, tgt_len - len);
1445 }
1446 }
1447
1448 target_cmsg->cmsg_len = tswapal(tgt_len);
1449 tgt_space = TARGET_CMSG_SPACE(len);
1450 if (msg_controllen < tgt_space) {
1451 tgt_space = msg_controllen;
1452 }
1453 msg_controllen -= tgt_space;
1454 space += tgt_space;
1455 cmsg = CMSG_NXTHDR(msgh, cmsg);
1456 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1457 target_cmsg_start);
1458 }
1459 unlock_user(target_cmsg, target_cmsg_addr, space);
1460 the_end:
1461 target_msgh->msg_controllen = tswapal(space);
1462 return 0;
1463 }
1464
1465 /* do_setsockopt() Must return target values and target errnos. */
1466 static abi_long do_setsockopt(int sockfd, int level, int optname,
1467 abi_ulong optval_addr, socklen_t optlen)
1468 {
1469 abi_long ret;
1470 int val;
1471 struct ip_mreqn *ip_mreq;
1472 struct ip_mreq_source *ip_mreq_source;
1473
1474 switch(level) {
1475 case SOL_TCP:
1476 /* TCP options all take an 'int' value. */
1477 if (optlen < sizeof(uint32_t))
1478 return -TARGET_EINVAL;
1479
1480 if (get_user_u32(val, optval_addr))
1481 return -TARGET_EFAULT;
1482 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1483 break;
1484 case SOL_IP:
1485 switch(optname) {
1486 case IP_TOS:
1487 case IP_TTL:
1488 case IP_HDRINCL:
1489 case IP_ROUTER_ALERT:
1490 case IP_RECVOPTS:
1491 case IP_RETOPTS:
1492 case IP_PKTINFO:
1493 case IP_MTU_DISCOVER:
1494 case IP_RECVERR:
1495 case IP_RECVTOS:
1496 #ifdef IP_FREEBIND
1497 case IP_FREEBIND:
1498 #endif
1499 case IP_MULTICAST_TTL:
1500 case IP_MULTICAST_LOOP:
1501 val = 0;
1502 if (optlen >= sizeof(uint32_t)) {
1503 if (get_user_u32(val, optval_addr))
1504 return -TARGET_EFAULT;
1505 } else if (optlen >= 1) {
1506 if (get_user_u8(val, optval_addr))
1507 return -TARGET_EFAULT;
1508 }
1509 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1510 break;
1511 case IP_ADD_MEMBERSHIP:
1512 case IP_DROP_MEMBERSHIP:
1513 if (optlen < sizeof (struct target_ip_mreq) ||
1514 optlen > sizeof (struct target_ip_mreqn))
1515 return -TARGET_EINVAL;
1516
1517 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1518 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1519 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1520 break;
1521
1522 case IP_BLOCK_SOURCE:
1523 case IP_UNBLOCK_SOURCE:
1524 case IP_ADD_SOURCE_MEMBERSHIP:
1525 case IP_DROP_SOURCE_MEMBERSHIP:
1526 if (optlen != sizeof (struct target_ip_mreq_source))
1527 return -TARGET_EINVAL;
1528
1529 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1530 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1531 unlock_user (ip_mreq_source, optval_addr, 0);
1532 break;
1533
1534 default:
1535 goto unimplemented;
1536 }
1537 break;
1538 case SOL_IPV6:
1539 switch (optname) {
1540 case IPV6_MTU_DISCOVER:
1541 case IPV6_MTU:
1542 case IPV6_V6ONLY:
1543 case IPV6_RECVPKTINFO:
1544 val = 0;
1545 if (optlen < sizeof(uint32_t)) {
1546 return -TARGET_EINVAL;
1547 }
1548 if (get_user_u32(val, optval_addr)) {
1549 return -TARGET_EFAULT;
1550 }
1551 ret = get_errno(setsockopt(sockfd, level, optname,
1552 &val, sizeof(val)));
1553 break;
1554 default:
1555 goto unimplemented;
1556 }
1557 break;
1558 case SOL_RAW:
1559 switch (optname) {
1560 case ICMP_FILTER:
1561 /* struct icmp_filter takes an u32 value */
1562 if (optlen < sizeof(uint32_t)) {
1563 return -TARGET_EINVAL;
1564 }
1565
1566 if (get_user_u32(val, optval_addr)) {
1567 return -TARGET_EFAULT;
1568 }
1569 ret = get_errno(setsockopt(sockfd, level, optname,
1570 &val, sizeof(val)));
1571 break;
1572
1573 default:
1574 goto unimplemented;
1575 }
1576 break;
1577 case TARGET_SOL_SOCKET:
1578 switch (optname) {
1579 case TARGET_SO_RCVTIMEO:
1580 {
1581 struct timeval tv;
1582
1583 optname = SO_RCVTIMEO;
1584
1585 set_timeout:
1586 if (optlen != sizeof(struct target_timeval)) {
1587 return -TARGET_EINVAL;
1588 }
1589
1590 if (copy_from_user_timeval(&tv, optval_addr)) {
1591 return -TARGET_EFAULT;
1592 }
1593
1594 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1595 &tv, sizeof(tv)));
1596 return ret;
1597 }
1598 case TARGET_SO_SNDTIMEO:
1599 optname = SO_SNDTIMEO;
1600 goto set_timeout;
1601 case TARGET_SO_ATTACH_FILTER:
1602 {
1603 struct target_sock_fprog *tfprog;
1604 struct target_sock_filter *tfilter;
1605 struct sock_fprog fprog;
1606 struct sock_filter *filter;
1607 int i;
1608
1609 if (optlen != sizeof(*tfprog)) {
1610 return -TARGET_EINVAL;
1611 }
1612 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1613 return -TARGET_EFAULT;
1614 }
1615 if (!lock_user_struct(VERIFY_READ, tfilter,
1616 tswapal(tfprog->filter), 0)) {
1617 unlock_user_struct(tfprog, optval_addr, 1);
1618 return -TARGET_EFAULT;
1619 }
1620
1621 fprog.len = tswap16(tfprog->len);
1622 filter = g_try_new(struct sock_filter, fprog.len);
1623 if (filter == NULL) {
1624 unlock_user_struct(tfilter, tfprog->filter, 1);
1625 unlock_user_struct(tfprog, optval_addr, 1);
1626 return -TARGET_ENOMEM;
1627 }
1628 for (i = 0; i < fprog.len; i++) {
1629 filter[i].code = tswap16(tfilter[i].code);
1630 filter[i].jt = tfilter[i].jt;
1631 filter[i].jf = tfilter[i].jf;
1632 filter[i].k = tswap32(tfilter[i].k);
1633 }
1634 fprog.filter = filter;
1635
1636 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1637 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1638 g_free(filter);
1639
1640 unlock_user_struct(tfilter, tfprog->filter, 1);
1641 unlock_user_struct(tfprog, optval_addr, 1);
1642 return ret;
1643 }
1644 case TARGET_SO_BINDTODEVICE:
1645 {
1646 char *dev_ifname, *addr_ifname;
1647
1648 if (optlen > IFNAMSIZ - 1) {
1649 optlen = IFNAMSIZ - 1;
1650 }
1651 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1652 if (!dev_ifname) {
1653 return -TARGET_EFAULT;
1654 }
1655 optname = SO_BINDTODEVICE;
1656 addr_ifname = alloca(IFNAMSIZ);
1657 memcpy(addr_ifname, dev_ifname, optlen);
1658 addr_ifname[optlen] = 0;
1659 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1660 addr_ifname, optlen));
1661 unlock_user (dev_ifname, optval_addr, 0);
1662 return ret;
1663 }
1664 /* Options with 'int' argument. */
1665 case TARGET_SO_DEBUG:
1666 optname = SO_DEBUG;
1667 break;
1668 case TARGET_SO_REUSEADDR:
1669 optname = SO_REUSEADDR;
1670 break;
1671 case TARGET_SO_TYPE:
1672 optname = SO_TYPE;
1673 break;
1674 case TARGET_SO_ERROR:
1675 optname = SO_ERROR;
1676 break;
1677 case TARGET_SO_DONTROUTE:
1678 optname = SO_DONTROUTE;
1679 break;
1680 case TARGET_SO_BROADCAST:
1681 optname = SO_BROADCAST;
1682 break;
1683 case TARGET_SO_SNDBUF:
1684 optname = SO_SNDBUF;
1685 break;
1686 case TARGET_SO_SNDBUFFORCE:
1687 optname = SO_SNDBUFFORCE;
1688 break;
1689 case TARGET_SO_RCVBUF:
1690 optname = SO_RCVBUF;
1691 break;
1692 case TARGET_SO_RCVBUFFORCE:
1693 optname = SO_RCVBUFFORCE;
1694 break;
1695 case TARGET_SO_KEEPALIVE:
1696 optname = SO_KEEPALIVE;
1697 break;
1698 case TARGET_SO_OOBINLINE:
1699 optname = SO_OOBINLINE;
1700 break;
1701 case TARGET_SO_NO_CHECK:
1702 optname = SO_NO_CHECK;
1703 break;
1704 case TARGET_SO_PRIORITY:
1705 optname = SO_PRIORITY;
1706 break;
1707 #ifdef SO_BSDCOMPAT
1708 case TARGET_SO_BSDCOMPAT:
1709 optname = SO_BSDCOMPAT;
1710 break;
1711 #endif
1712 case TARGET_SO_PASSCRED:
1713 optname = SO_PASSCRED;
1714 break;
1715 case TARGET_SO_PASSSEC:
1716 optname = SO_PASSSEC;
1717 break;
1718 case TARGET_SO_TIMESTAMP:
1719 optname = SO_TIMESTAMP;
1720 break;
1721 case TARGET_SO_RCVLOWAT:
1722 optname = SO_RCVLOWAT;
1723 break;
1724 break;
1725 default:
1726 goto unimplemented;
1727 }
1728 if (optlen < sizeof(uint32_t))
1729 return -TARGET_EINVAL;
1730
1731 if (get_user_u32(val, optval_addr))
1732 return -TARGET_EFAULT;
1733 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1734 break;
1735 default:
1736 unimplemented:
1737 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1738 ret = -TARGET_ENOPROTOOPT;
1739 }
1740 return ret;
1741 }
1742
1743 /* do_getsockopt() Must return target values and target errnos. */
1744 static abi_long do_getsockopt(int sockfd, int level, int optname,
1745 abi_ulong optval_addr, abi_ulong optlen)
1746 {
1747 abi_long ret;
1748 int len, val;
1749 socklen_t lv;
1750
1751 switch(level) {
1752 case TARGET_SOL_SOCKET:
1753 level = SOL_SOCKET;
1754 switch (optname) {
1755 /* These don't just return a single integer */
1756 case TARGET_SO_LINGER:
1757 case TARGET_SO_RCVTIMEO:
1758 case TARGET_SO_SNDTIMEO:
1759 case TARGET_SO_PEERNAME:
1760 goto unimplemented;
1761 case TARGET_SO_PEERCRED: {
1762 struct ucred cr;
1763 socklen_t crlen;
1764 struct target_ucred *tcr;
1765
1766 if (get_user_u32(len, optlen)) {
1767 return -TARGET_EFAULT;
1768 }
1769 if (len < 0) {
1770 return -TARGET_EINVAL;
1771 }
1772
1773 crlen = sizeof(cr);
1774 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1775 &cr, &crlen));
1776 if (ret < 0) {
1777 return ret;
1778 }
1779 if (len > crlen) {
1780 len = crlen;
1781 }
1782 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1783 return -TARGET_EFAULT;
1784 }
1785 __put_user(cr.pid, &tcr->pid);
1786 __put_user(cr.uid, &tcr->uid);
1787 __put_user(cr.gid, &tcr->gid);
1788 unlock_user_struct(tcr, optval_addr, 1);
1789 if (put_user_u32(len, optlen)) {
1790 return -TARGET_EFAULT;
1791 }
1792 break;
1793 }
1794 /* Options with 'int' argument. */
1795 case TARGET_SO_DEBUG:
1796 optname = SO_DEBUG;
1797 goto int_case;
1798 case TARGET_SO_REUSEADDR:
1799 optname = SO_REUSEADDR;
1800 goto int_case;
1801 case TARGET_SO_TYPE:
1802 optname = SO_TYPE;
1803 goto int_case;
1804 case TARGET_SO_ERROR:
1805 optname = SO_ERROR;
1806 goto int_case;
1807 case TARGET_SO_DONTROUTE:
1808 optname = SO_DONTROUTE;
1809 goto int_case;
1810 case TARGET_SO_BROADCAST:
1811 optname = SO_BROADCAST;
1812 goto int_case;
1813 case TARGET_SO_SNDBUF:
1814 optname = SO_SNDBUF;
1815 goto int_case;
1816 case TARGET_SO_RCVBUF:
1817 optname = SO_RCVBUF;
1818 goto int_case;
1819 case TARGET_SO_KEEPALIVE:
1820 optname = SO_KEEPALIVE;
1821 goto int_case;
1822 case TARGET_SO_OOBINLINE:
1823 optname = SO_OOBINLINE;
1824 goto int_case;
1825 case TARGET_SO_NO_CHECK:
1826 optname = SO_NO_CHECK;
1827 goto int_case;
1828 case TARGET_SO_PRIORITY:
1829 optname = SO_PRIORITY;
1830 goto int_case;
1831 #ifdef SO_BSDCOMPAT
1832 case TARGET_SO_BSDCOMPAT:
1833 optname = SO_BSDCOMPAT;
1834 goto int_case;
1835 #endif
1836 case TARGET_SO_PASSCRED:
1837 optname = SO_PASSCRED;
1838 goto int_case;
1839 case TARGET_SO_TIMESTAMP:
1840 optname = SO_TIMESTAMP;
1841 goto int_case;
1842 case TARGET_SO_RCVLOWAT:
1843 optname = SO_RCVLOWAT;
1844 goto int_case;
1845 case TARGET_SO_ACCEPTCONN:
1846 optname = SO_ACCEPTCONN;
1847 goto int_case;
1848 default:
1849 goto int_case;
1850 }
1851 break;
1852 case SOL_TCP:
1853 /* TCP options all take an 'int' value. */
1854 int_case:
1855 if (get_user_u32(len, optlen))
1856 return -TARGET_EFAULT;
1857 if (len < 0)
1858 return -TARGET_EINVAL;
1859 lv = sizeof(lv);
1860 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1861 if (ret < 0)
1862 return ret;
1863 if (optname == SO_TYPE) {
1864 val = host_to_target_sock_type(val);
1865 }
1866 if (len > lv)
1867 len = lv;
1868 if (len == 4) {
1869 if (put_user_u32(val, optval_addr))
1870 return -TARGET_EFAULT;
1871 } else {
1872 if (put_user_u8(val, optval_addr))
1873 return -TARGET_EFAULT;
1874 }
1875 if (put_user_u32(len, optlen))
1876 return -TARGET_EFAULT;
1877 break;
1878 case SOL_IP:
1879 switch(optname) {
1880 case IP_TOS:
1881 case IP_TTL:
1882 case IP_HDRINCL:
1883 case IP_ROUTER_ALERT:
1884 case IP_RECVOPTS:
1885 case IP_RETOPTS:
1886 case IP_PKTINFO:
1887 case IP_MTU_DISCOVER:
1888 case IP_RECVERR:
1889 case IP_RECVTOS:
1890 #ifdef IP_FREEBIND
1891 case IP_FREEBIND:
1892 #endif
1893 case IP_MULTICAST_TTL:
1894 case IP_MULTICAST_LOOP:
1895 if (get_user_u32(len, optlen))
1896 return -TARGET_EFAULT;
1897 if (len < 0)
1898 return -TARGET_EINVAL;
1899 lv = sizeof(lv);
1900 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1901 if (ret < 0)
1902 return ret;
1903 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1904 len = 1;
1905 if (put_user_u32(len, optlen)
1906 || put_user_u8(val, optval_addr))
1907 return -TARGET_EFAULT;
1908 } else {
1909 if (len > sizeof(int))
1910 len = sizeof(int);
1911 if (put_user_u32(len, optlen)
1912 || put_user_u32(val, optval_addr))
1913 return -TARGET_EFAULT;
1914 }
1915 break;
1916 default:
1917 ret = -TARGET_ENOPROTOOPT;
1918 break;
1919 }
1920 break;
1921 default:
1922 unimplemented:
1923 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1924 level, optname);
1925 ret = -TARGET_EOPNOTSUPP;
1926 break;
1927 }
1928 return ret;
1929 }
1930
1931 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1932 int count, int copy)
1933 {
1934 struct target_iovec *target_vec;
1935 struct iovec *vec;
1936 abi_ulong total_len, max_len;
1937 int i;
1938 int err = 0;
1939 bool bad_address = false;
1940
1941 if (count == 0) {
1942 errno = 0;
1943 return NULL;
1944 }
1945 if (count < 0 || count > IOV_MAX) {
1946 errno = EINVAL;
1947 return NULL;
1948 }
1949
1950 vec = g_try_new0(struct iovec, count);
1951 if (vec == NULL) {
1952 errno = ENOMEM;
1953 return NULL;
1954 }
1955
1956 target_vec = lock_user(VERIFY_READ, target_addr,
1957 count * sizeof(struct target_iovec), 1);
1958 if (target_vec == NULL) {
1959 err = EFAULT;
1960 goto fail2;
1961 }
1962
1963 /* ??? If host page size > target page size, this will result in a
1964 value larger than what we can actually support. */
1965 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1966 total_len = 0;
1967
1968 for (i = 0; i < count; i++) {
1969 abi_ulong base = tswapal(target_vec[i].iov_base);
1970 abi_long len = tswapal(target_vec[i].iov_len);
1971
1972 if (len < 0) {
1973 err = EINVAL;
1974 goto fail;
1975 } else if (len == 0) {
1976 /* Zero length pointer is ignored. */
1977 vec[i].iov_base = 0;
1978 } else {
1979 vec[i].iov_base = lock_user(type, base, len, copy);
1980 /* If the first buffer pointer is bad, this is a fault. But
1981 * subsequent bad buffers will result in a partial write; this
1982 * is realized by filling the vector with null pointers and
1983 * zero lengths. */
1984 if (!vec[i].iov_base) {
1985 if (i == 0) {
1986 err = EFAULT;
1987 goto fail;
1988 } else {
1989 bad_address = true;
1990 }
1991 }
1992 if (bad_address) {
1993 len = 0;
1994 }
1995 if (len > max_len - total_len) {
1996 len = max_len - total_len;
1997 }
1998 }
1999 vec[i].iov_len = len;
2000 total_len += len;
2001 }
2002
2003 unlock_user(target_vec, target_addr, 0);
2004 return vec;
2005
2006 fail:
2007 while (--i >= 0) {
2008 if (tswapal(target_vec[i].iov_len) > 0) {
2009 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2010 }
2011 }
2012 unlock_user(target_vec, target_addr, 0);
2013 fail2:
2014 g_free(vec);
2015 errno = err;
2016 return NULL;
2017 }
2018
2019 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2020 int count, int copy)
2021 {
2022 struct target_iovec *target_vec;
2023 int i;
2024
2025 target_vec = lock_user(VERIFY_READ, target_addr,
2026 count * sizeof(struct target_iovec), 1);
2027 if (target_vec) {
2028 for (i = 0; i < count; i++) {
2029 abi_ulong base = tswapal(target_vec[i].iov_base);
2030 abi_long len = tswapal(target_vec[i].iov_len);
2031 if (len < 0) {
2032 break;
2033 }
2034 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2035 }
2036 unlock_user(target_vec, target_addr, 0);
2037 }
2038
2039 g_free(vec);
2040 }
2041
2042 static inline int target_to_host_sock_type(int *type)
2043 {
2044 int host_type = 0;
2045 int target_type = *type;
2046
2047 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2048 case TARGET_SOCK_DGRAM:
2049 host_type = SOCK_DGRAM;
2050 break;
2051 case TARGET_SOCK_STREAM:
2052 host_type = SOCK_STREAM;
2053 break;
2054 default:
2055 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2056 break;
2057 }
2058 if (target_type & TARGET_SOCK_CLOEXEC) {
2059 #if defined(SOCK_CLOEXEC)
2060 host_type |= SOCK_CLOEXEC;
2061 #else
2062 return -TARGET_EINVAL;
2063 #endif
2064 }
2065 if (target_type & TARGET_SOCK_NONBLOCK) {
2066 #if defined(SOCK_NONBLOCK)
2067 host_type |= SOCK_NONBLOCK;
2068 #elif !defined(O_NONBLOCK)
2069 return -TARGET_EINVAL;
2070 #endif
2071 }
2072 *type = host_type;
2073 return 0;
2074 }
2075
2076 /* Try to emulate socket type flags after socket creation. */
2077 static int sock_flags_fixup(int fd, int target_type)
2078 {
2079 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2080 if (target_type & TARGET_SOCK_NONBLOCK) {
2081 int flags = fcntl(fd, F_GETFL);
2082 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2083 close(fd);
2084 return -TARGET_EINVAL;
2085 }
2086 }
2087 #endif
2088 return fd;
2089 }
2090
2091 static abi_long packet_target_to_host_sockaddr(void *host_addr,
2092 abi_ulong target_addr,
2093 socklen_t len)
2094 {
2095 struct sockaddr *addr = host_addr;
2096 struct target_sockaddr *target_saddr;
2097
2098 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
2099 if (!target_saddr) {
2100 return -TARGET_EFAULT;
2101 }
2102
2103 memcpy(addr, target_saddr, len);
2104 addr->sa_family = tswap16(target_saddr->sa_family);
2105 /* spkt_protocol is big-endian */
2106
2107 unlock_user(target_saddr, target_addr, 0);
2108 return 0;
2109 }
2110
2111 static TargetFdTrans target_packet_trans = {
2112 .target_to_host_addr = packet_target_to_host_sockaddr,
2113 };
2114
2115 /* do_socket() Must return target values and target errnos. */
2116 static abi_long do_socket(int domain, int type, int protocol)
2117 {
2118 int target_type = type;
2119 int ret;
2120
2121 ret = target_to_host_sock_type(&type);
2122 if (ret) {
2123 return ret;
2124 }
2125
2126 if (domain == PF_NETLINK)
2127 return -TARGET_EAFNOSUPPORT;
2128
2129 if (domain == AF_PACKET ||
2130 (domain == AF_INET && type == SOCK_PACKET)) {
2131 protocol = tswap16(protocol);
2132 }
2133
2134 ret = get_errno(socket(domain, type, protocol));
2135 if (ret >= 0) {
2136 ret = sock_flags_fixup(ret, target_type);
2137 if (type == SOCK_PACKET) {
2138 /* Manage an obsolete case :
2139 * if socket type is SOCK_PACKET, bind by name
2140 */
2141 fd_trans_register(ret, &target_packet_trans);
2142 }
2143 }
2144 return ret;
2145 }
2146
2147 /* do_bind() Must return target values and target errnos. */
2148 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2149 socklen_t addrlen)
2150 {
2151 void *addr;
2152 abi_long ret;
2153
2154 if ((int)addrlen < 0) {
2155 return -TARGET_EINVAL;
2156 }
2157
2158 addr = alloca(addrlen+1);
2159
2160 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2161 if (ret)
2162 return ret;
2163
2164 return get_errno(bind(sockfd, addr, addrlen));
2165 }
2166
2167 /* do_connect() Must return target values and target errnos. */
2168 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2169 socklen_t addrlen)
2170 {
2171 void *addr;
2172 abi_long ret;
2173
2174 if ((int)addrlen < 0) {
2175 return -TARGET_EINVAL;
2176 }
2177
2178 addr = alloca(addrlen+1);
2179
2180 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2181 if (ret)
2182 return ret;
2183
2184 return get_errno(connect(sockfd, addr, addrlen));
2185 }
2186
2187 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2188 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2189 int flags, int send)
2190 {
2191 abi_long ret, len;
2192 struct msghdr msg;
2193 int count;
2194 struct iovec *vec;
2195 abi_ulong target_vec;
2196
2197 if (msgp->msg_name) {
2198 msg.msg_namelen = tswap32(msgp->msg_namelen);
2199 msg.msg_name = alloca(msg.msg_namelen+1);
2200 ret = target_to_host_sockaddr(fd, msg.msg_name,
2201 tswapal(msgp->msg_name),
2202 msg.msg_namelen);
2203 if (ret) {
2204 goto out2;
2205 }
2206 } else {
2207 msg.msg_name = NULL;
2208 msg.msg_namelen = 0;
2209 }
2210 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2211 msg.msg_control = alloca(msg.msg_controllen);
2212 msg.msg_flags = tswap32(msgp->msg_flags);
2213
2214 count = tswapal(msgp->msg_iovlen);
2215 target_vec = tswapal(msgp->msg_iov);
2216 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2217 target_vec, count, send);
2218 if (vec == NULL) {
2219 ret = -host_to_target_errno(errno);
2220 goto out2;
2221 }
2222 msg.msg_iovlen = count;
2223 msg.msg_iov = vec;
2224
2225 if (send) {
2226 ret = target_to_host_cmsg(&msg, msgp);
2227 if (ret == 0)
2228 ret = get_errno(sendmsg(fd, &msg, flags));
2229 } else {
2230 ret = get_errno(recvmsg(fd, &msg, flags));
2231 if (!is_error(ret)) {
2232 len = ret;
2233 ret = host_to_target_cmsg(msgp, &msg);
2234 if (!is_error(ret)) {
2235 msgp->msg_namelen = tswap32(msg.msg_namelen);
2236 if (msg.msg_name != NULL) {
2237 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2238 msg.msg_name, msg.msg_namelen);
2239 if (ret) {
2240 goto out;
2241 }
2242 }
2243
2244 ret = len;
2245 }
2246 }
2247 }
2248
2249 out:
2250 unlock_iovec(vec, target_vec, count, !send);
2251 out2:
2252 return ret;
2253 }
2254
2255 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2256 int flags, int send)
2257 {
2258 abi_long ret;
2259 struct target_msghdr *msgp;
2260
2261 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2262 msgp,
2263 target_msg,
2264 send ? 1 : 0)) {
2265 return -TARGET_EFAULT;
2266 }
2267 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2268 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2269 return ret;
2270 }
2271
2272 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2273 * so it might not have this *mmsg-specific flag either.
2274 */
2275 #ifndef MSG_WAITFORONE
2276 #define MSG_WAITFORONE 0x10000
2277 #endif
2278
2279 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2280 unsigned int vlen, unsigned int flags,
2281 int send)
2282 {
2283 struct target_mmsghdr *mmsgp;
2284 abi_long ret = 0;
2285 int i;
2286
2287 if (vlen > UIO_MAXIOV) {
2288 vlen = UIO_MAXIOV;
2289 }
2290
2291 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2292 if (!mmsgp) {
2293 return -TARGET_EFAULT;
2294 }
2295
2296 for (i = 0; i < vlen; i++) {
2297 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2298 if (is_error(ret)) {
2299 break;
2300 }
2301 mmsgp[i].msg_len = tswap32(ret);
2302 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2303 if (flags & MSG_WAITFORONE) {
2304 flags |= MSG_DONTWAIT;
2305 }
2306 }
2307
2308 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2309
2310 /* Return number of datagrams sent if we sent any at all;
2311 * otherwise return the error.
2312 */
2313 if (i) {
2314 return i;
2315 }
2316 return ret;
2317 }
2318
2319 /* If we don't have a system accept4() then just call accept.
2320 * The callsites to do_accept4() will ensure that they don't
2321 * pass a non-zero flags argument in this config.
2322 */
2323 #ifndef CONFIG_ACCEPT4
2324 static inline int accept4(int sockfd, struct sockaddr *addr,
2325 socklen_t *addrlen, int flags)
2326 {
2327 assert(flags == 0);
2328 return accept(sockfd, addr, addrlen);
2329 }
2330 #endif
2331
2332 /* do_accept4() Must return target values and target errnos. */
2333 static abi_long do_accept4(int fd, abi_ulong target_addr,
2334 abi_ulong target_addrlen_addr, int flags)
2335 {
2336 socklen_t addrlen;
2337 void *addr;
2338 abi_long ret;
2339 int host_flags;
2340
2341 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2342
2343 if (target_addr == 0) {
2344 return get_errno(accept4(fd, NULL, NULL, host_flags));
2345 }
2346
2347 /* linux returns EINVAL if addrlen pointer is invalid */
2348 if (get_user_u32(addrlen, target_addrlen_addr))
2349 return -TARGET_EINVAL;
2350
2351 if ((int)addrlen < 0) {
2352 return -TARGET_EINVAL;
2353 }
2354
2355 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2356 return -TARGET_EINVAL;
2357
2358 addr = alloca(addrlen);
2359
2360 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
2361 if (!is_error(ret)) {
2362 host_to_target_sockaddr(target_addr, addr, addrlen);
2363 if (put_user_u32(addrlen, target_addrlen_addr))
2364 ret = -TARGET_EFAULT;
2365 }
2366 return ret;
2367 }
2368
2369 /* do_getpeername() Must return target values and target errnos. */
2370 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2371 abi_ulong target_addrlen_addr)
2372 {
2373 socklen_t addrlen;
2374 void *addr;
2375 abi_long ret;
2376
2377 if (get_user_u32(addrlen, target_addrlen_addr))
2378 return -TARGET_EFAULT;
2379
2380 if ((int)addrlen < 0) {
2381 return -TARGET_EINVAL;
2382 }
2383
2384 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2385 return -TARGET_EFAULT;
2386
2387 addr = alloca(addrlen);
2388
2389 ret = get_errno(getpeername(fd, addr, &addrlen));
2390 if (!is_error(ret)) {
2391 host_to_target_sockaddr(target_addr, addr, addrlen);
2392 if (put_user_u32(addrlen, target_addrlen_addr))
2393 ret = -TARGET_EFAULT;
2394 }
2395 return ret;
2396 }
2397
2398 /* do_getsockname() Must return target values and target errnos. */
2399 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2400 abi_ulong target_addrlen_addr)
2401 {
2402 socklen_t addrlen;
2403 void *addr;
2404 abi_long ret;
2405
2406 if (get_user_u32(addrlen, target_addrlen_addr))
2407 return -TARGET_EFAULT;
2408
2409 if ((int)addrlen < 0) {
2410 return -TARGET_EINVAL;
2411 }
2412
2413 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2414 return -TARGET_EFAULT;
2415
2416 addr = alloca(addrlen);
2417
2418 ret = get_errno(getsockname(fd, addr, &addrlen));
2419 if (!is_error(ret)) {
2420 host_to_target_sockaddr(target_addr, addr, addrlen);
2421 if (put_user_u32(addrlen, target_addrlen_addr))
2422 ret = -TARGET_EFAULT;
2423 }
2424 return ret;
2425 }
2426
2427 /* do_socketpair() Must return target values and target errnos. */
2428 static abi_long do_socketpair(int domain, int type, int protocol,
2429 abi_ulong target_tab_addr)
2430 {
2431 int tab[2];
2432 abi_long ret;
2433
2434 target_to_host_sock_type(&type);
2435
2436 ret = get_errno(socketpair(domain, type, protocol, tab));
2437 if (!is_error(ret)) {
2438 if (put_user_s32(tab[0], target_tab_addr)
2439 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2440 ret = -TARGET_EFAULT;
2441 }
2442 return ret;
2443 }
2444
2445 /* do_sendto() Must return target values and target errnos. */
2446 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2447 abi_ulong target_addr, socklen_t addrlen)
2448 {
2449 void *addr;
2450 void *host_msg;
2451 abi_long ret;
2452
2453 if ((int)addrlen < 0) {
2454 return -TARGET_EINVAL;
2455 }
2456
2457 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2458 if (!host_msg)
2459 return -TARGET_EFAULT;
2460 if (target_addr) {
2461 addr = alloca(addrlen+1);
2462 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2463 if (ret) {
2464 unlock_user(host_msg, msg, 0);
2465 return ret;
2466 }
2467 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2468 } else {
2469 ret = get_errno(send(fd, host_msg, len, flags));
2470 }
2471 unlock_user(host_msg, msg, 0);
2472 return ret;
2473 }
2474
2475 /* do_recvfrom() Must return target values and target errnos. */
2476 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2477 abi_ulong target_addr,
2478 abi_ulong target_addrlen)
2479 {
2480 socklen_t addrlen;
2481 void *addr;
2482 void *host_msg;
2483 abi_long ret;
2484
2485 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2486 if (!host_msg)
2487 return -TARGET_EFAULT;
2488 if (target_addr) {
2489 if (get_user_u32(addrlen, target_addrlen)) {
2490 ret = -TARGET_EFAULT;
2491 goto fail;
2492 }
2493 if ((int)addrlen < 0) {
2494 ret = -TARGET_EINVAL;
2495 goto fail;
2496 }
2497 addr = alloca(addrlen);
2498 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2499 } else {
2500 addr = NULL; /* To keep compiler quiet. */
2501 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2502 }
2503 if (!is_error(ret)) {
2504 if (target_addr) {
2505 host_to_target_sockaddr(target_addr, addr, addrlen);
2506 if (put_user_u32(addrlen, target_addrlen)) {
2507 ret = -TARGET_EFAULT;
2508 goto fail;
2509 }
2510 }
2511 unlock_user(host_msg, msg, len);
2512 } else {
2513 fail:
2514 unlock_user(host_msg, msg, 0);
2515 }
2516 return ret;
2517 }
2518
2519 #ifdef TARGET_NR_socketcall
2520 /* do_socketcall() Must return target values and target errnos. */
2521 static abi_long do_socketcall(int num, abi_ulong vptr)
2522 {
2523 static const unsigned ac[] = { /* number of arguments per call */
2524 [SOCKOP_socket] = 3, /* domain, type, protocol */
2525 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2526 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2527 [SOCKOP_listen] = 2, /* sockfd, backlog */
2528 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2529 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2530 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2531 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2532 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2533 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2534 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2535 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2536 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2537 [SOCKOP_shutdown] = 2, /* sockfd, how */
2538 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2539 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2540 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
2541 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
2542 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2543 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2544 };
2545 abi_long a[6]; /* max 6 args */
2546
2547 /* first, collect the arguments in a[] according to ac[] */
2548 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2549 unsigned i;
2550 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2551 for (i = 0; i < ac[num]; ++i) {
2552 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2553 return -TARGET_EFAULT;
2554 }
2555 }
2556 }
2557
2558 /* now when we have the args, actually handle the call */
2559 switch (num) {
2560 case SOCKOP_socket: /* domain, type, protocol */
2561 return do_socket(a[0], a[1], a[2]);
2562 case SOCKOP_bind: /* sockfd, addr, addrlen */
2563 return do_bind(a[0], a[1], a[2]);
2564 case SOCKOP_connect: /* sockfd, addr, addrlen */
2565 return do_connect(a[0], a[1], a[2]);
2566 case SOCKOP_listen: /* sockfd, backlog */
2567 return get_errno(listen(a[0], a[1]));
2568 case SOCKOP_accept: /* sockfd, addr, addrlen */
2569 return do_accept4(a[0], a[1], a[2], 0);
2570 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2571 return do_accept4(a[0], a[1], a[2], a[3]);
2572 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2573 return do_getsockname(a[0], a[1], a[2]);
2574 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2575 return do_getpeername(a[0], a[1], a[2]);
2576 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2577 return do_socketpair(a[0], a[1], a[2], a[3]);
2578 case SOCKOP_send: /* sockfd, msg, len, flags */
2579 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2580 case SOCKOP_recv: /* sockfd, msg, len, flags */
2581 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2582 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2583 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2584 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2585 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2586 case SOCKOP_shutdown: /* sockfd, how */
2587 return get_errno(shutdown(a[0], a[1]));
2588 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2589 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2590 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2591 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2592 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
2593 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
2594 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
2595 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
2596 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2597 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2598 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2599 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2600 default:
2601 gemu_log("Unsupported socketcall: %d\n", num);
2602 return -TARGET_ENOSYS;
2603 }
2604 }
2605 #endif
2606
2607 #define N_SHM_REGIONS 32
2608
2609 static struct shm_region {
2610 abi_ulong start;
2611 abi_ulong size;
2612 bool in_use;
2613 } shm_regions[N_SHM_REGIONS];
2614
2615 struct target_semid_ds
2616 {
2617 struct target_ipc_perm sem_perm;
2618 abi_ulong sem_otime;
2619 #if !defined(TARGET_PPC64)
2620 abi_ulong __unused1;
2621 #endif
2622 abi_ulong sem_ctime;
2623 #if !defined(TARGET_PPC64)
2624 abi_ulong __unused2;
2625 #endif
2626 abi_ulong sem_nsems;
2627 abi_ulong __unused3;
2628 abi_ulong __unused4;
2629 };
2630
2631 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2632 abi_ulong target_addr)
2633 {
2634 struct target_ipc_perm *target_ip;
2635 struct target_semid_ds *target_sd;
2636
2637 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2638 return -TARGET_EFAULT;
2639 target_ip = &(target_sd->sem_perm);
2640 host_ip->__key = tswap32(target_ip->__key);
2641 host_ip->uid = tswap32(target_ip->uid);
2642 host_ip->gid = tswap32(target_ip->gid);
2643 host_ip->cuid = tswap32(target_ip->cuid);
2644 host_ip->cgid = tswap32(target_ip->cgid);
2645 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2646 host_ip->mode = tswap32(target_ip->mode);
2647 #else
2648 host_ip->mode = tswap16(target_ip->mode);
2649 #endif
2650 #if defined(TARGET_PPC)
2651 host_ip->__seq = tswap32(target_ip->__seq);
2652 #else
2653 host_ip->__seq = tswap16(target_ip->__seq);
2654 #endif
2655 unlock_user_struct(target_sd, target_addr, 0);
2656 return 0;
2657 }
2658
2659 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2660 struct ipc_perm *host_ip)
2661 {
2662 struct target_ipc_perm *target_ip;
2663 struct target_semid_ds *target_sd;
2664
2665 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2666 return -TARGET_EFAULT;
2667 target_ip = &(target_sd->sem_perm);
2668 target_ip->__key = tswap32(host_ip->__key);
2669 target_ip->uid = tswap32(host_ip->uid);
2670 target_ip->gid = tswap32(host_ip->gid);
2671 target_ip->cuid = tswap32(host_ip->cuid);
2672 target_ip->cgid = tswap32(host_ip->cgid);
2673 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2674 target_ip->mode = tswap32(host_ip->mode);
2675 #else
2676 target_ip->mode = tswap16(host_ip->mode);
2677 #endif
2678 #if defined(TARGET_PPC)
2679 target_ip->__seq = tswap32(host_ip->__seq);
2680 #else
2681 target_ip->__seq = tswap16(host_ip->__seq);
2682 #endif
2683 unlock_user_struct(target_sd, target_addr, 1);
2684 return 0;
2685 }
2686
2687 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2688 abi_ulong target_addr)
2689 {
2690 struct target_semid_ds *target_sd;
2691
2692 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2693 return -TARGET_EFAULT;
2694 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2695 return -TARGET_EFAULT;
2696 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2697 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2698 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2699 unlock_user_struct(target_sd, target_addr, 0);
2700 return 0;
2701 }
2702
2703 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2704 struct semid_ds *host_sd)
2705 {
2706 struct target_semid_ds *target_sd;
2707
2708 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2709 return -TARGET_EFAULT;
2710 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2711 return -TARGET_EFAULT;
2712 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2713 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2714 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2715 unlock_user_struct(target_sd, target_addr, 1);
2716 return 0;
2717 }
2718
2719 struct target_seminfo {
2720 int semmap;
2721 int semmni;
2722 int semmns;
2723 int semmnu;
2724 int semmsl;
2725 int semopm;
2726 int semume;
2727 int semusz;
2728 int semvmx;
2729 int semaem;
2730 };
2731
2732 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2733 struct seminfo *host_seminfo)
2734 {
2735 struct target_seminfo *target_seminfo;
2736 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2737 return -TARGET_EFAULT;
2738 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2739 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2740 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2741 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2742 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2743 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2744 __put_user(host_seminfo->semume, &target_seminfo->semume);
2745 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2746 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2747 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2748 unlock_user_struct(target_seminfo, target_addr, 1);
2749 return 0;
2750 }
2751
2752 union semun {
2753 int val;
2754 struct semid_ds *buf;
2755 unsigned short *array;
2756 struct seminfo *__buf;
2757 };
2758
2759 union target_semun {
2760 int val;
2761 abi_ulong buf;
2762 abi_ulong array;
2763 abi_ulong __buf;
2764 };
2765
2766 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2767 abi_ulong target_addr)
2768 {
2769 int nsems;
2770 unsigned short *array;
2771 union semun semun;
2772 struct semid_ds semid_ds;
2773 int i, ret;
2774
2775 semun.buf = &semid_ds;
2776
2777 ret = semctl(semid, 0, IPC_STAT, semun);
2778 if (ret == -1)
2779 return get_errno(ret);
2780
2781 nsems = semid_ds.sem_nsems;
2782
2783 *host_array = g_try_new(unsigned short, nsems);
2784 if (!*host_array) {
2785 return -TARGET_ENOMEM;
2786 }
2787 array = lock_user(VERIFY_READ, target_addr,
2788 nsems*sizeof(unsigned short), 1);
2789 if (!array) {
2790 g_free(*host_array);
2791 return -TARGET_EFAULT;
2792 }
2793
2794 for(i=0; i<nsems; i++) {
2795 __get_user((*host_array)[i], &array[i]);
2796 }
2797 unlock_user(array, target_addr, 0);
2798
2799 return 0;
2800 }
2801
2802 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2803 unsigned short **host_array)
2804 {
2805 int nsems;
2806 unsigned short *array;
2807 union semun semun;
2808 struct semid_ds semid_ds;
2809 int i, ret;
2810
2811 semun.buf = &semid_ds;
2812
2813 ret = semctl(semid, 0, IPC_STAT, semun);
2814 if (ret == -1)
2815 return get_errno(ret);
2816
2817 nsems = semid_ds.sem_nsems;
2818
2819 array = lock_user(VERIFY_WRITE, target_addr,
2820 nsems*sizeof(unsigned short), 0);
2821 if (!array)
2822 return -TARGET_EFAULT;
2823
2824 for(i=0; i<nsems; i++) {
2825 __put_user((*host_array)[i], &array[i]);
2826 }
2827 g_free(*host_array);
2828 unlock_user(array, target_addr, 1);
2829
2830 return 0;
2831 }
2832
2833 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2834 abi_ulong target_arg)
2835 {
2836 union target_semun target_su = { .buf = target_arg };
2837 union semun arg;
2838 struct semid_ds dsarg;
2839 unsigned short *array = NULL;
2840 struct seminfo seminfo;
2841 abi_long ret = -TARGET_EINVAL;
2842 abi_long err;
2843 cmd &= 0xff;
2844
2845 switch( cmd ) {
2846 case GETVAL:
2847 case SETVAL:
2848 /* In 64 bit cross-endian situations, we will erroneously pick up
2849 * the wrong half of the union for the "val" element. To rectify
2850 * this, the entire 8-byte structure is byteswapped, followed by
2851 * a swap of the 4 byte val field. In other cases, the data is
2852 * already in proper host byte order. */
2853 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
2854 target_su.buf = tswapal(target_su.buf);
2855 arg.val = tswap32(target_su.val);
2856 } else {
2857 arg.val = target_su.val;
2858 }
2859 ret = get_errno(semctl(semid, semnum, cmd, arg));
2860 break;
2861 case GETALL:
2862 case SETALL:
2863 err = target_to_host_semarray(semid, &array, target_su.array);
2864 if (err)
2865 return err;
2866 arg.array = array;
2867 ret = get_errno(semctl(semid, semnum, cmd, arg));
2868 err = host_to_target_semarray(semid, target_su.array, &array);
2869 if (err)
2870 return err;
2871 break;
2872 case IPC_STAT:
2873 case IPC_SET:
2874 case SEM_STAT:
2875 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2876 if (err)
2877 return err;
2878 arg.buf = &dsarg;
2879 ret = get_errno(semctl(semid, semnum, cmd, arg));
2880 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2881 if (err)
2882 return err;
2883 break;
2884 case IPC_INFO:
2885 case SEM_INFO:
2886 arg.__buf = &seminfo;
2887 ret = get_errno(semctl(semid, semnum, cmd, arg));
2888 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2889 if (err)
2890 return err;
2891 break;
2892 case IPC_RMID:
2893 case GETPID:
2894 case GETNCNT:
2895 case GETZCNT:
2896 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2897 break;
2898 }
2899
2900 return ret;
2901 }
2902
2903 struct target_sembuf {
2904 unsigned short sem_num;
2905 short sem_op;
2906 short sem_flg;
2907 };
2908
2909 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2910 abi_ulong target_addr,
2911 unsigned nsops)
2912 {
2913 struct target_sembuf *target_sembuf;
2914 int i;
2915
2916 target_sembuf = lock_user(VERIFY_READ, target_addr,
2917 nsops*sizeof(struct target_sembuf), 1);
2918 if (!target_sembuf)
2919 return -TARGET_EFAULT;
2920
2921 for(i=0; i<nsops; i++) {
2922 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2923 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2924 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2925 }
2926
2927 unlock_user(target_sembuf, target_addr, 0);
2928
2929 return 0;
2930 }
2931
2932 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2933 {
2934 struct sembuf sops[nsops];
2935
2936 if (target_to_host_sembuf(sops, ptr, nsops))
2937 return -TARGET_EFAULT;
2938
2939 return get_errno(semop(semid, sops, nsops));
2940 }
2941
2942 struct target_msqid_ds
2943 {
2944 struct target_ipc_perm msg_perm;
2945 abi_ulong msg_stime;
2946 #if TARGET_ABI_BITS == 32
2947 abi_ulong __unused1;
2948 #endif
2949 abi_ulong msg_rtime;
2950 #if TARGET_ABI_BITS == 32
2951 abi_ulong __unused2;
2952 #endif
2953 abi_ulong msg_ctime;
2954 #if TARGET_ABI_BITS == 32
2955 abi_ulong __unused3;
2956 #endif
2957 abi_ulong __msg_cbytes;
2958 abi_ulong msg_qnum;
2959 abi_ulong msg_qbytes;
2960 abi_ulong msg_lspid;
2961 abi_ulong msg_lrpid;
2962 abi_ulong __unused4;
2963 abi_ulong __unused5;
2964 };
2965
2966 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2967 abi_ulong target_addr)
2968 {
2969 struct target_msqid_ds *target_md;
2970
2971 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2972 return -TARGET_EFAULT;
2973 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2974 return -TARGET_EFAULT;
2975 host_md->msg_stime = tswapal(target_md->msg_stime);
2976 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2977 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2978 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2979 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2980 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2981 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2982 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2983 unlock_user_struct(target_md, target_addr, 0);
2984 return 0;
2985 }
2986
2987 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2988 struct msqid_ds *host_md)
2989 {
2990 struct target_msqid_ds *target_md;
2991
2992 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2993 return -TARGET_EFAULT;
2994 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2995 return -TARGET_EFAULT;
2996 target_md->msg_stime = tswapal(host_md->msg_stime);
2997 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2998 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2999 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3000 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3001 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3002 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3003 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3004 unlock_user_struct(target_md, target_addr, 1);
3005 return 0;
3006 }
3007
3008 struct target_msginfo {
3009 int msgpool;
3010 int msgmap;
3011 int msgmax;
3012 int msgmnb;
3013 int msgmni;
3014 int msgssz;
3015 int msgtql;
3016 unsigned short int msgseg;
3017 };
3018
3019 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3020 struct msginfo *host_msginfo)
3021 {
3022 struct target_msginfo *target_msginfo;
3023 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3024 return -TARGET_EFAULT;
3025 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3026 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3027 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3028 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3029 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3030 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3031 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3032 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3033 unlock_user_struct(target_msginfo, target_addr, 1);
3034 return 0;
3035 }
3036
3037 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3038 {
3039 struct msqid_ds dsarg;
3040 struct msginfo msginfo;
3041 abi_long ret = -TARGET_EINVAL;
3042
3043 cmd &= 0xff;
3044
3045 switch (cmd) {
3046 case IPC_STAT:
3047 case IPC_SET:
3048 case MSG_STAT:
3049 if (target_to_host_msqid_ds(&dsarg,ptr))
3050 return -TARGET_EFAULT;
3051 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3052 if (host_to_target_msqid_ds(ptr,&dsarg))
3053 return -TARGET_EFAULT;
3054 break;
3055 case IPC_RMID:
3056 ret = get_errno(msgctl(msgid, cmd, NULL));
3057 break;
3058 case IPC_INFO:
3059 case MSG_INFO:
3060 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3061 if (host_to_target_msginfo(ptr, &msginfo))
3062 return -TARGET_EFAULT;
3063 break;
3064 }
3065
3066 return ret;
3067 }
3068
3069 struct target_msgbuf {
3070 abi_long mtype;
3071 char mtext[1];
3072 };
3073
3074 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3075 ssize_t msgsz, int msgflg)
3076 {
3077 struct target_msgbuf *target_mb;
3078 struct msgbuf *host_mb;
3079 abi_long ret = 0;
3080
3081 if (msgsz < 0) {
3082 return -TARGET_EINVAL;
3083 }
3084
3085 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3086 return -TARGET_EFAULT;
3087 host_mb = g_try_malloc(msgsz + sizeof(long));
3088 if (!host_mb) {
3089 unlock_user_struct(target_mb, msgp, 0);
3090 return -TARGET_ENOMEM;
3091 }
3092 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3093 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3094 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
3095 g_free(host_mb);
3096 unlock_user_struct(target_mb, msgp, 0);
3097
3098 return ret;
3099 }
3100
3101 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3102 unsigned int msgsz, abi_long msgtyp,
3103 int msgflg)
3104 {
3105 struct target_msgbuf *target_mb;
3106 char *target_mtext;
3107 struct msgbuf *host_mb;
3108 abi_long ret = 0;
3109
3110 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3111 return -TARGET_EFAULT;
3112
3113 host_mb = g_malloc(msgsz+sizeof(long));
3114 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3115
3116 if (ret > 0) {
3117 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3118 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3119 if (!target_mtext) {
3120 ret = -TARGET_EFAULT;
3121 goto end;
3122 }
3123 memcpy(target_mb->mtext, host_mb->mtext, ret);
3124 unlock_user(target_mtext, target_mtext_addr, ret);
3125 }
3126
3127 target_mb->mtype = tswapal(host_mb->mtype);
3128
3129 end:
3130 if (target_mb)
3131 unlock_user_struct(target_mb, msgp, 1);
3132 g_free(host_mb);
3133 return ret;
3134 }
3135
3136 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3137 abi_ulong target_addr)
3138 {
3139 struct target_shmid_ds *target_sd;
3140
3141 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3142 return -TARGET_EFAULT;
3143 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3144 return -TARGET_EFAULT;
3145 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3146 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3147 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3148 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3149 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3150 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3151 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3152 unlock_user_struct(target_sd, target_addr, 0);
3153 return 0;
3154 }
3155
3156 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3157 struct shmid_ds *host_sd)
3158 {
3159 struct target_shmid_ds *target_sd;
3160
3161 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3162 return -TARGET_EFAULT;
3163 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3164 return -TARGET_EFAULT;
3165 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3166 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3167 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3168 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3169 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3170 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3171 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3172 unlock_user_struct(target_sd, target_addr, 1);
3173 return 0;
3174 }
3175
3176 struct target_shminfo {
3177 abi_ulong shmmax;
3178 abi_ulong shmmin;
3179 abi_ulong shmmni;
3180 abi_ulong shmseg;
3181 abi_ulong shmall;
3182 };
3183
3184 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3185 struct shminfo *host_shminfo)
3186 {
3187 struct target_shminfo *target_shminfo;
3188 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3189 return -TARGET_EFAULT;
3190 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3191 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3192 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3193 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3194 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3195 unlock_user_struct(target_shminfo, target_addr, 1);
3196 return 0;
3197 }
3198
3199 struct target_shm_info {
3200 int used_ids;
3201 abi_ulong shm_tot;
3202 abi_ulong shm_rss;
3203 abi_ulong shm_swp;
3204 abi_ulong swap_attempts;
3205 abi_ulong swap_successes;
3206 };
3207
3208 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3209 struct shm_info *host_shm_info)
3210 {
3211 struct target_shm_info *target_shm_info;
3212 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3213 return -TARGET_EFAULT;
3214 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3215 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3216 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3217 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3218 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3219 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3220 unlock_user_struct(target_shm_info, target_addr, 1);
3221 return 0;
3222 }
3223
3224 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3225 {
3226 struct shmid_ds dsarg;
3227 struct shminfo shminfo;
3228 struct shm_info shm_info;
3229 abi_long ret = -TARGET_EINVAL;
3230
3231 cmd &= 0xff;
3232
3233 switch(cmd) {
3234 case IPC_STAT:
3235 case IPC_SET:
3236 case SHM_STAT:
3237 if (target_to_host_shmid_ds(&dsarg, buf))
3238 return -TARGET_EFAULT;
3239 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3240 if (host_to_target_shmid_ds(buf, &dsarg))
3241 return -TARGET_EFAULT;
3242 break;
3243 case IPC_INFO:
3244 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3245 if (host_to_target_shminfo(buf, &shminfo))
3246 return -TARGET_EFAULT;
3247 break;
3248 case SHM_INFO:
3249 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3250 if (host_to_target_shm_info(buf, &shm_info))
3251 return -TARGET_EFAULT;
3252 break;
3253 case IPC_RMID:
3254 case SHM_LOCK:
3255 case SHM_UNLOCK:
3256 ret = get_errno(shmctl(shmid, cmd, NULL));
3257 break;
3258 }
3259
3260 return ret;
3261 }
3262
3263 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3264 {
3265 abi_long raddr;
3266 void *host_raddr;
3267 struct shmid_ds shm_info;
3268 int i,ret;
3269
3270 /* find out the length of the shared memory segment */
3271 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3272 if (is_error(ret)) {
3273 /* can't get length, bail out */
3274 return ret;
3275 }
3276
3277 mmap_lock();
3278
3279 if (shmaddr)
3280 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3281 else {
3282 abi_ulong mmap_start;
3283
3284 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3285
3286 if (mmap_start == -1) {
3287 errno = ENOMEM;
3288 host_raddr = (void *)-1;
3289 } else
3290 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3291 }
3292
3293 if (host_raddr == (void *)-1) {
3294 mmap_unlock();
3295 return get_errno((long)host_raddr);
3296 }
3297 raddr=h2g((unsigned long)host_raddr);
3298
3299 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3300 PAGE_VALID | PAGE_READ |
3301 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3302
3303 for (i = 0; i < N_SHM_REGIONS; i++) {
3304 if (!shm_regions[i].in_use) {
3305 shm_regions[i].in_use = true;
3306 shm_regions[i].start = raddr;
3307 shm_regions[i].size = shm_info.shm_segsz;
3308 break;
3309 }
3310 }
3311
3312 mmap_unlock();
3313 return raddr;
3314
3315 }
3316
3317 static inline abi_long do_shmdt(abi_ulong shmaddr)
3318 {
3319 int i;
3320
3321 for (i = 0; i < N_SHM_REGIONS; ++i) {
3322 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3323 shm_regions[i].in_use = false;
3324 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3325 break;
3326 }
3327 }
3328
3329 return get_errno(shmdt(g2h(shmaddr)));
3330 }
3331
3332 #ifdef TARGET_NR_ipc
3333 /* ??? This only works with linear mappings. */
3334 /* do_ipc() must return target values and target errnos. */
3335 static abi_long do_ipc(unsigned int call, abi_long first,
3336 abi_long second, abi_long third,
3337 abi_long ptr, abi_long fifth)
3338 {
3339 int version;
3340 abi_long ret = 0;
3341
3342 version = call >> 16;
3343 call &= 0xffff;
3344
3345 switch (call) {
3346 case IPCOP_semop:
3347 ret = do_semop(first, ptr, second);
3348 break;
3349
3350 case IPCOP_semget:
3351 ret = get_errno(semget(first, second, third));
3352 break;
3353
3354 case IPCOP_semctl: {
3355 /* The semun argument to semctl is passed by value, so dereference the
3356 * ptr argument. */
3357 abi_ulong atptr;
3358 get_user_ual(atptr, ptr);
3359 ret = do_semctl(first, second, third, atptr);
3360 break;
3361 }
3362
3363 case IPCOP_msgget:
3364 ret = get_errno(msgget(first, second));
3365 break;
3366
3367 case IPCOP_msgsnd:
3368 ret = do_msgsnd(first, ptr, second, third);
3369 break;
3370
3371 case IPCOP_msgctl:
3372 ret = do_msgctl(first, second, ptr);
3373 break;
3374
3375 case IPCOP_msgrcv:
3376 switch (version) {
3377 case 0:
3378 {
3379 struct target_ipc_kludge {
3380 abi_long msgp;
3381 abi_long msgtyp;
3382 } *tmp;
3383
3384 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3385 ret = -TARGET_EFAULT;
3386 break;
3387 }
3388
3389 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3390
3391 unlock_user_struct(tmp, ptr, 0);
3392 break;
3393 }
3394 default:
3395 ret = do_msgrcv(first, ptr, second, fifth, third);
3396 }
3397 break;
3398
3399 case IPCOP_shmat:
3400 switch (version) {
3401 default:
3402 {
3403 abi_ulong raddr;
3404 raddr = do_shmat(first, ptr, second);
3405 if (is_error(raddr))
3406 return get_errno(raddr);
3407 if (put_user_ual(raddr, third))
3408 return -TARGET_EFAULT;
3409 break;
3410 }
3411 case 1:
3412 ret = -TARGET_EINVAL;
3413 break;
3414 }
3415 break;
3416 case IPCOP_shmdt:
3417 ret = do_shmdt(ptr);
3418 break;
3419
3420 case IPCOP_shmget:
3421 /* IPC_* flag values are the same on all linux platforms */
3422 ret = get_errno(shmget(first, second, third));
3423 break;
3424
3425 /* IPC_* and SHM_* command values are the same on all linux platforms */
3426 case IPCOP_shmctl:
3427 ret = do_shmctl(first, second, ptr);
3428 break;
3429 default:
3430 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3431 ret = -TARGET_ENOSYS;
3432 break;
3433 }
3434 return ret;
3435 }
3436 #endif
3437
3438 /* kernel structure types definitions */
3439
3440 #define STRUCT(name, ...) STRUCT_ ## name,
3441 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3442 enum {
3443 #include "syscall_types.h"
3444 STRUCT_MAX
3445 };
3446 #undef STRUCT
3447 #undef STRUCT_SPECIAL
3448
3449 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3450 #define STRUCT_SPECIAL(name)
3451 #include "syscall_types.h"
3452 #undef STRUCT
3453 #undef STRUCT_SPECIAL
3454
3455 typedef struct IOCTLEntry IOCTLEntry;
3456
3457 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3458 int fd, int cmd, abi_long arg);
3459
3460 struct IOCTLEntry {
3461 int target_cmd;
3462 unsigned int host_cmd;
3463 const char *name;
3464 int access;
3465 do_ioctl_fn *do_ioctl;
3466 const argtype arg_type[5];
3467 };
3468
3469 #define IOC_R 0x0001
3470 #define IOC_W 0x0002
3471 #define IOC_RW (IOC_R | IOC_W)
3472
3473 #define MAX_STRUCT_SIZE 4096
3474
3475 #ifdef CONFIG_FIEMAP
3476 /* So fiemap access checks don't overflow on 32 bit systems.
3477 * This is very slightly smaller than the limit imposed by
3478 * the underlying kernel.
3479 */
3480 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3481 / sizeof(struct fiemap_extent))
3482
3483 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3484 int fd, int cmd, abi_long arg)
3485 {
3486 /* The parameter for this ioctl is a struct fiemap followed
3487 * by an array of struct fiemap_extent whose size is set
3488 * in fiemap->fm_extent_count. The array is filled in by the
3489 * ioctl.
3490 */
3491 int target_size_in, target_size_out;
3492 struct fiemap *fm;
3493 const argtype *arg_type = ie->arg_type;
3494 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3495 void *argptr, *p;
3496 abi_long ret;
3497 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3498 uint32_t outbufsz;
3499 int free_fm = 0;
3500
3501 assert(arg_type[0] == TYPE_PTR);
3502 assert(ie->access == IOC_RW);
3503 arg_type++;
3504 target_size_in = thunk_type_size(arg_type, 0);
3505 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3506 if (!argptr) {
3507 return -TARGET_EFAULT;
3508 }
3509 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3510 unlock_user(argptr, arg, 0);
3511 fm = (struct fiemap *)buf_temp;
3512 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3513 return -TARGET_EINVAL;
3514 }
3515
3516 outbufsz = sizeof (*fm) +
3517 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3518
3519 if (outbufsz > MAX_STRUCT_SIZE) {
3520 /* We can't fit all the extents into the fixed size buffer.
3521 * Allocate one that is large enough and use it instead.
3522 */
3523 fm = g_try_malloc(outbufsz);
3524 if (!fm) {
3525 return -TARGET_ENOMEM;
3526 }
3527 memcpy(fm, buf_temp, sizeof(struct fiemap));
3528 free_fm = 1;
3529 }
3530 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3531 if (!is_error(ret)) {
3532 target_size_out = target_size_in;
3533 /* An extent_count of 0 means we were only counting the extents
3534 * so there are no structs to copy
3535 */
3536 if (fm->fm_extent_count != 0) {
3537 target_size_out += fm->fm_mapped_extents * extent_size;
3538 }
3539 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3540 if (!argptr) {
3541 ret = -TARGET_EFAULT;
3542 } else {
3543 /* Convert the struct fiemap */
3544 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3545 if (fm->fm_extent_count != 0) {
3546 p = argptr + target_size_in;
3547 /* ...and then all the struct fiemap_extents */
3548 for (i = 0; i < fm->fm_mapped_extents; i++) {
3549 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3550 THUNK_TARGET);
3551 p += extent_size;
3552 }
3553 }
3554 unlock_user(argptr, arg, target_size_out);
3555 }
3556 }
3557 if (free_fm) {
3558 g_free(fm);
3559 }
3560 return ret;
3561 }
3562 #endif
3563
3564 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3565 int fd, int cmd, abi_long arg)
3566 {
3567 const argtype *arg_type = ie->arg_type;
3568 int target_size;
3569 void *argptr;
3570 int ret;
3571 struct ifconf *host_ifconf;
3572 uint32_t outbufsz;
3573 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3574 int target_ifreq_size;
3575 int nb_ifreq;
3576 int free_buf = 0;
3577 int i;
3578 int target_ifc_len;
3579 abi_long target_ifc_buf;
3580 int host_ifc_len;
3581 char *host_ifc_buf;
3582
3583 assert(arg_type[0] == TYPE_PTR);
3584 assert(ie->access == IOC_RW);
3585
3586 arg_type++;
3587 target_size = thunk_type_size(arg_type, 0);
3588
3589 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3590 if (!argptr)
3591 return -TARGET_EFAULT;
3592 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3593 unlock_user(argptr, arg, 0);
3594
3595 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3596 target_ifc_len = host_ifconf->ifc_len;
3597 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3598
3599 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3600 nb_ifreq = target_ifc_len / target_ifreq_size;
3601 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3602
3603 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3604 if (outbufsz > MAX_STRUCT_SIZE) {
3605 /* We can't fit all the extents into the fixed size buffer.
3606 * Allocate one that is large enough and use it instead.
3607 */
3608 host_ifconf = malloc(outbufsz);
3609 if (!host_ifconf) {
3610 return -TARGET_ENOMEM;
3611 }
3612 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3613 free_buf = 1;
3614 }
3615 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3616
3617 host_ifconf->ifc_len = host_ifc_len;
3618 host_ifconf->ifc_buf = host_ifc_buf;
3619
3620 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3621 if (!is_error(ret)) {
3622 /* convert host ifc_len to target ifc_len */
3623
3624 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3625 target_ifc_len = nb_ifreq * target_ifreq_size;
3626 host_ifconf->ifc_len = target_ifc_len;
3627
3628 /* restore target ifc_buf */
3629
3630 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3631
3632 /* copy struct ifconf to target user */
3633
3634 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3635 if (!argptr)
3636 return -TARGET_EFAULT;
3637 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3638 unlock_user(argptr, arg, target_size);
3639
3640 /* copy ifreq[] to target user */
3641
3642 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3643 for (i = 0; i < nb_ifreq ; i++) {
3644 thunk_convert(argptr + i * target_ifreq_size,
3645 host_ifc_buf + i * sizeof(struct ifreq),
3646 ifreq_arg_type, THUNK_TARGET);
3647 }
3648 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3649 }
3650
3651 if (free_buf) {
3652 free(host_ifconf);
3653 }
3654
3655 return ret;
3656 }
3657
3658 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3659 int cmd, abi_long arg)
3660 {
3661 void *argptr;
3662 struct dm_ioctl *host_dm;
3663 abi_long guest_data;
3664 uint32_t guest_data_size;
3665 int target_size;
3666 const argtype *arg_type = ie->arg_type;
3667 abi_long ret;
3668 void *big_buf = NULL;
3669 char *host_data;
3670
3671 arg_type++;
3672 target_size = thunk_type_size(arg_type, 0);
3673 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3674 if (!argptr) {
3675 ret = -TARGET_EFAULT;
3676 goto out;
3677 }
3678 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3679 unlock_user(argptr, arg, 0);
3680
3681 /* buf_temp is too small, so fetch things into a bigger buffer */
3682 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3683 memcpy(big_buf, buf_temp, target_size);
3684 buf_temp = big_buf;
3685 host_dm = big_buf;
3686
3687 guest_data = arg + host_dm->data_start;
3688 if ((guest_data - arg) < 0) {
3689 ret = -EINVAL;
3690 goto out;
3691 }
3692 guest_data_size = host_dm->data_size - host_dm->data_start;
3693 host_data = (char*)host_dm + host_dm->data_start;
3694
3695 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3696 switch (ie->host_cmd) {
3697 case DM_REMOVE_ALL:
3698 case DM_LIST_DEVICES:
3699 case DM_DEV_CREATE:
3700 case DM_DEV_REMOVE:
3701 case DM_DEV_SUSPEND:
3702 case DM_DEV_STATUS:
3703 case DM_DEV_WAIT:
3704 case DM_TABLE_STATUS:
3705 case DM_TABLE_CLEAR:
3706 case DM_TABLE_DEPS:
3707 case DM_LIST_VERSIONS:
3708 /* no input data */
3709 break;
3710 case DM_DEV_RENAME:
3711 case DM_DEV_SET_GEOMETRY:
3712 /* data contains only strings */
3713 memcpy(host_data, argptr, guest_data_size);
3714 break;
3715 case DM_TARGET_MSG:
3716 memcpy(host_data, argptr, guest_data_size);
3717 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3718 break;
3719 case DM_TABLE_LOAD:
3720 {
3721 void *gspec = argptr;
3722 void *cur_data = host_data;
3723 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3724 int spec_size = thunk_type_size(arg_type, 0);
3725 int i;
3726
3727 for (i = 0; i < host_dm->target_count; i++) {
3728 struct dm_target_spec *spec = cur_data;
3729 uint32_t next;
3730 int slen;
3731
3732 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3733 slen = strlen((char*)gspec + spec_size) + 1;
3734 next = spec->next;
3735 spec->next = sizeof(*spec) + slen;
3736 strcpy((char*)&spec[1], gspec + spec_size);
3737 gspec += next;
3738 cur_data += spec->next;
3739 }
3740 break;
3741 }
3742 default:
3743 ret = -TARGET_EINVAL;
3744 unlock_user(argptr, guest_data, 0);
3745 goto out;
3746 }
3747 unlock_user(argptr, guest_data, 0);
3748
3749 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3750 if (!is_error(ret)) {
3751 guest_data = arg + host_dm->data_start;
3752 guest_data_size = host_dm->data_size - host_dm->data_start;
3753 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3754 switch (ie->host_cmd) {
3755 case DM_REMOVE_ALL:
3756 case DM_DEV_CREATE:
3757 case DM_DEV_REMOVE:
3758 case DM_DEV_RENAME:
3759 case DM_DEV_SUSPEND:
3760 case DM_DEV_STATUS:
3761 case DM_TABLE_LOAD:
3762 case DM_TABLE_CLEAR:
3763 case DM_TARGET_MSG:
3764 case DM_DEV_SET_GEOMETRY:
3765 /* no return data */
3766 break;
3767 case DM_LIST_DEVICES:
3768 {
3769 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3770 uint32_t remaining_data = guest_data_size;
3771 void *cur_data = argptr;
3772 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3773 int nl_size = 12; /* can't use thunk_size due to alignment */
3774
3775 while (1) {
3776 uint32_t next = nl->next;
3777 if (next) {
3778 nl->next = nl_size + (strlen(nl->name) + 1);
3779 }
3780 if (remaining_data < nl->next) {
3781 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3782 break;
3783 }
3784 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3785 strcpy(cur_data + nl_size, nl->name);
3786 cur_data += nl->next;
3787 remaining_data -= nl->next;
3788 if (!next) {
3789 break;
3790 }
3791 nl = (void*)nl + next;
3792 }
3793 break;
3794 }
3795 case DM_DEV_WAIT:
3796 case DM_TABLE_STATUS:
3797 {
3798 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3799 void *cur_data = argptr;
3800 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3801 int spec_size = thunk_type_size(arg_type, 0);
3802 int i;
3803
3804 for (i = 0; i < host_dm->target_count; i++) {
3805 uint32_t next = spec->next;
3806 int slen = strlen((char*)&spec[1]) + 1;
3807 spec->next = (cur_data - argptr) + spec_size + slen;
3808 if (guest_data_size < spec->next) {
3809 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3810 break;
3811 }
3812 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3813 strcpy(cur_data + spec_size, (char*)&spec[1]);
3814 cur_data = argptr + spec->next;
3815 spec = (void*)host_dm + host_dm->data_start + next;
3816 }
3817 break;
3818 }
3819 case DM_TABLE_DEPS:
3820 {
3821 void *hdata = (void*)host_dm + host_dm->data_start;
3822 int count = *(uint32_t*)hdata;
3823 uint64_t *hdev = hdata + 8;
3824 uint64_t *gdev = argptr + 8;
3825 int i;
3826
3827 *(uint32_t*)argptr = tswap32(count);
3828 for (i = 0; i < count; i++) {
3829 *gdev = tswap64(*hdev);
3830 gdev++;
3831 hdev++;
3832 }
3833 break;
3834 }
3835 case DM_LIST_VERSIONS:
3836 {
3837 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3838 uint32_t remaining_data = guest_data_size;
3839 void *cur_data = argptr;
3840 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3841 int vers_size = thunk_type_size(arg_type, 0);
3842
3843 while (1) {
3844 uint32_t next = vers->next;
3845 if (next) {
3846 vers->next = vers_size + (strlen(vers->name) + 1);
3847 }
3848 if (remaining_data < vers->next) {
3849 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3850 break;
3851 }
3852 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3853 strcpy(cur_data + vers_size, vers->name);
3854 cur_data += vers->next;
3855 remaining_data -= vers->next;
3856 if (!next) {
3857 break;
3858 }
3859 vers = (void*)vers + next;
3860 }
3861 break;
3862 }
3863 default:
3864 unlock_user(argptr, guest_data, 0);
3865 ret = -TARGET_EINVAL;
3866 goto out;
3867 }
3868 unlock_user(argptr, guest_data, guest_data_size);
3869
3870 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3871 if (!argptr) {
3872 ret = -TARGET_EFAULT;
3873 goto out;
3874 }
3875 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3876 unlock_user(argptr, arg, target_size);
3877 }
3878 out:
3879 g_free(big_buf);
3880 return ret;
3881 }
3882
3883 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3884 int cmd, abi_long arg)
3885 {
3886 void *argptr;
3887 int target_size;
3888 const argtype *arg_type = ie->arg_type;
3889 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
3890 abi_long ret;
3891
3892 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
3893 struct blkpg_partition host_part;
3894
3895 /* Read and convert blkpg */
3896 arg_type++;
3897 target_size = thunk_type_size(arg_type, 0);
3898 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3899 if (!argptr) {
3900 ret = -TARGET_EFAULT;
3901 goto out;
3902 }
3903 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3904 unlock_user(argptr, arg, 0);
3905
3906 switch (host_blkpg->op) {
3907 case BLKPG_ADD_PARTITION:
3908 case BLKPG_DEL_PARTITION:
3909 /* payload is struct blkpg_partition */
3910 break;
3911 default:
3912 /* Unknown opcode */
3913 ret = -TARGET_EINVAL;
3914 goto out;
3915 }
3916
3917 /* Read and convert blkpg->data */
3918 arg = (abi_long)(uintptr_t)host_blkpg->data;
3919 target_size = thunk_type_size(part_arg_type, 0);
3920 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3921 if (!argptr) {
3922 ret = -TARGET_EFAULT;
3923 goto out;
3924 }
3925 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
3926 unlock_user(argptr, arg, 0);
3927
3928 /* Swizzle the data pointer to our local copy and call! */
3929 host_blkpg->data = &host_part;
3930 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
3931
3932 out:
3933 return ret;
3934 }
3935
3936 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3937 int fd, int cmd, abi_long arg)
3938 {
3939 const argtype *arg_type = ie->arg_type;
3940 const StructEntry *se;
3941 const argtype *field_types;
3942 const int *dst_offsets, *src_offsets;
3943 int target_size;
3944 void *argptr;
3945 abi_ulong *target_rt_dev_ptr;
3946 unsigned long *host_rt_dev_ptr;
3947 abi_long ret;
3948 int i;
3949
3950 assert(ie->access == IOC_W);
3951 assert(*arg_type == TYPE_PTR);
3952 arg_type++;
3953 assert(*arg_type == TYPE_STRUCT);
3954 target_size = thunk_type_size(arg_type, 0);
3955 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3956 if (!argptr) {
3957 return -TARGET_EFAULT;
3958 }
3959 arg_type++;
3960 assert(*arg_type == (int)STRUCT_rtentry);
3961 se = struct_entries + *arg_type++;
3962 assert(se->convert[0] == NULL);
3963 /* convert struct here to be able to catch rt_dev string */
3964 field_types = se->field_types;
3965 dst_offsets = se->field_offsets[THUNK_HOST];
3966 src_offsets = se->field_offsets[THUNK_TARGET];
3967 for (i = 0; i < se->nb_fields; i++) {
3968 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3969 assert(*field_types == TYPE_PTRVOID);
3970 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3971 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3972 if (*target_rt_dev_ptr != 0) {
3973 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3974 tswapal(*target_rt_dev_ptr));
3975 if (!*host_rt_dev_ptr) {
3976 unlock_user(argptr, arg, 0);
3977 return -TARGET_EFAULT;
3978 }
3979 } else {
3980 *host_rt_dev_ptr = 0;
3981 }
3982 field_types++;
3983 continue;
3984 }
3985 field_types = thunk_convert(buf_temp + dst_offsets[i],
3986 argptr + src_offsets[i],
3987 field_types, THUNK_HOST);
3988 }
3989 unlock_user(argptr, arg, 0);
3990
3991 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3992 if (*host_rt_dev_ptr != 0) {
3993 unlock_user((void *)*host_rt_dev_ptr,
3994 *target_rt_dev_ptr, 0);
3995 }
3996 return ret;
3997 }
3998
3999 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4000 int fd, int cmd, abi_long arg)
4001 {
4002 int sig = target_to_host_signal(arg);
4003 return get_errno(ioctl(fd, ie->host_cmd, sig));
4004 }
4005
4006 static IOCTLEntry ioctl_entries[] = {
4007 #define IOCTL(cmd, access, ...) \
4008 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4009 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4010 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4011 #include "ioctls.h"
4012 { 0, 0, },
4013 };
4014
4015 /* ??? Implement proper locking for ioctls. */
4016 /* do_ioctl() Must return target values and target errnos. */
4017 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4018 {
4019 const IOCTLEntry *ie;
4020 const argtype *arg_type;
4021 abi_long ret;
4022 uint8_t buf_temp[MAX_STRUCT_SIZE];
4023 int target_size;
4024 void *argptr;
4025
4026 ie = ioctl_entries;
4027 for(;;) {
4028 if (ie->target_cmd == 0) {
4029 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4030 return -TARGET_ENOSYS;
4031 }
4032 if (ie->target_cmd == cmd)
4033 break;
4034 ie++;
4035 }
4036 arg_type = ie->arg_type;
4037 #if defined(DEBUG)
4038 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
4039 #endif
4040 if (ie->do_ioctl) {
4041 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4042 }
4043
4044 switch(arg_type[0]) {
4045 case TYPE_NULL:
4046 /* no argument */
4047 ret = get_errno(ioctl(fd, ie->host_cmd));
4048 break;
4049 case TYPE_PTRVOID:
4050 case TYPE_INT:
4051 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
4052 break;
4053 case TYPE_PTR:
4054 arg_type++;
4055 target_size = thunk_type_size(arg_type, 0);
4056 switch(ie->access) {
4057 case IOC_R:
4058 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4059 if (!is_error(ret)) {
4060 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4061 if (!argptr)
4062 return -TARGET_EFAULT;
4063 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4064 unlock_user(argptr, arg, target_size);
4065 }
4066 break;
4067 case IOC_W:
4068 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4069 if (!argptr)
4070 return -TARGET_EFAULT;
4071 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4072 unlock_user(argptr, arg, 0);
4073 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4074 break;
4075 default:
4076 case IOC_RW:
4077 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4078 if (!argptr)
4079 return -TARGET_EFAULT;
4080 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4081 unlock_user(argptr, arg, 0);
4082 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4083 if (!is_error(ret)) {
4084 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4085 if (!argptr)
4086 return -TARGET_EFAULT;
4087 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4088 unlock_user(argptr, arg, target_size);
4089 }
4090 break;
4091 }
4092 break;
4093 default:
4094 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4095 (long)cmd, arg_type[0]);
4096 ret = -TARGET_ENOSYS;
4097 break;
4098 }
4099 return ret;
4100 }
4101
4102 static const bitmask_transtbl iflag_tbl[] = {
4103 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4104 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4105 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4106 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4107 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4108 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4109 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4110 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4111 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4112 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4113 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4114 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4115 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4116 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4117 { 0, 0, 0, 0 }
4118 };
4119
4120 static const bitmask_transtbl oflag_tbl[] = {
4121 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4122 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4123 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4124 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4125 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4126 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4127 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4128 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4129 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4130 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4131 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4132 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4133 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4134 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4135 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4136 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4137 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4138 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4139 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4140 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4141 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4142 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4143 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4144 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4145 { 0, 0, 0, 0 }
4146 };
4147
4148 static const bitmask_transtbl cflag_tbl[] = {
4149 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4150 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4151 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4152 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4153 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4154 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4155 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4156 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4157 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4158 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4159 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4160 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4161 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4162 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4163 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4164 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4165 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4166 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4167 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4168 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4169 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4170 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4171 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4172 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4173 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4174 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4175 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4176 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4177 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4178 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4179 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4180 { 0, 0, 0, 0 }
4181 };
4182
4183 static const bitmask_transtbl lflag_tbl[] = {
4184 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4185 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4186 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4187 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4188 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4189 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4190 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4191 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4192 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4193 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4194 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4195 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4196 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4197 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4198 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4199 { 0, 0, 0, 0 }
4200 };
4201
4202 static void target_to_host_termios (void *dst, const void *src)
4203 {
4204 struct host_termios *host = dst;
4205 const struct target_termios *target = src;
4206
4207 host->c_iflag =
4208 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4209 host->c_oflag =
4210 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4211 host->c_cflag =
4212 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4213 host->c_lflag =
4214 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4215 host->c_line = target->c_line;
4216
4217 memset(host->c_cc, 0, sizeof(host->c_cc));
4218 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4219 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4220 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4221 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4222 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4223 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4224 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4225 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4226 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4227 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4228 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4229 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4230 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4231 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4232 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4233 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4234 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4235 }
4236
4237 static void host_to_target_termios (void *dst, const void *src)
4238 {
4239 struct target_termios *target = dst;
4240 const struct host_termios *host = src;
4241
4242 target->c_iflag =
4243 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4244 target->c_oflag =
4245 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4246 target->c_cflag =
4247 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4248 target->c_lflag =
4249 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4250 target->c_line = host->c_line;
4251
4252 memset(target->c_cc, 0, sizeof(target->c_cc));
4253 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4254 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4255 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4256 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4257 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4258 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4259 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4260 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4261 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4262 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4263 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4264 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4265 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4266 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4267 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4268 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4269 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4270 }
4271
4272 static const StructEntry struct_termios_def = {
4273 .convert = { host_to_target_termios, target_to_host_termios },
4274 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4275 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4276 };
4277
4278 static bitmask_transtbl mmap_flags_tbl[] = {
4279 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4280 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4281 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4282 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4283 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4284 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4285 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4286 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4287 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
4288 MAP_NORESERVE },
4289 { 0, 0, 0, 0 }
4290 };
4291
4292 #if defined(TARGET_I386)
4293
4294 /* NOTE: there is really one LDT for all the threads */
4295 static uint8_t *ldt_table;
4296
4297 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4298 {
4299 int size;
4300 void *p;
4301
4302 if (!ldt_table)
4303 return 0;
4304 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4305 if (size > bytecount)
4306 size = bytecount;
4307 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4308 if (!p)
4309 return -TARGET_EFAULT;
4310 /* ??? Should this by byteswapped? */
4311 memcpy(p, ldt_table, size);
4312 unlock_user(p, ptr, size);
4313 return size;
4314 }
4315
4316 /* XXX: add locking support */
4317 static abi_long write_ldt(CPUX86State *env,
4318 abi_ulong ptr, unsigned long bytecount, int oldmode)
4319 {
4320 struct target_modify_ldt_ldt_s ldt_info;
4321 struct target_modify_ldt_ldt_s *target_ldt_info;
4322 int seg_32bit, contents, read_exec_only, limit_in_pages;
4323 int seg_not_present, useable, lm;
4324 uint32_t *lp, entry_1, entry_2;
4325
4326 if (bytecount != sizeof(ldt_info))
4327 return -TARGET_EINVAL;
4328 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4329 return -TARGET_EFAULT;
4330 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4331 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4332 ldt_info.limit = tswap32(target_ldt_info->limit);
4333 ldt_info.flags = tswap32(target_ldt_info->flags);
4334 unlock_user_struct(target_ldt_info, ptr, 0);
4335
4336 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4337 return -TARGET_EINVAL;
4338 seg_32bit = ldt_info.flags & 1;
4339 contents = (ldt_info.flags >> 1) & 3;
4340 read_exec_only = (ldt_info.flags >> 3) & 1;
4341 limit_in_pages = (ldt_info.flags >> 4) & 1;
4342 seg_not_present = (ldt_info.flags >> 5) & 1;
4343 useable = (ldt_info.flags >> 6) & 1;
4344 #ifdef TARGET_ABI32
4345 lm = 0;
4346 #else
4347 lm = (ldt_info.flags >> 7) & 1;
4348 #endif
4349 if (contents == 3) {
4350 if (oldmode)
4351 return -TARGET_EINVAL;
4352 if (seg_not_present == 0)
4353 return -TARGET_EINVAL;
4354 }
4355 /* allocate the LDT */
4356 if (!ldt_table) {
4357 env->ldt.base = target_mmap(0,
4358 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4359 PROT_READ|PROT_WRITE,
4360 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4361 if (env->ldt.base == -1)
4362 return -TARGET_ENOMEM;
4363 memset(g2h(env->ldt.base), 0,
4364 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4365 env->ldt.limit = 0xffff;
4366 ldt_table = g2h(env->ldt.base);
4367 }
4368
4369 /* NOTE: same code as Linux kernel */
4370 /* Allow LDTs to be cleared by the user. */
4371 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4372 if (oldmode ||
4373 (contents == 0 &&
4374 read_exec_only == 1 &&
4375 seg_32bit == 0 &&
4376 limit_in_pages == 0 &&
4377 seg_not_present == 1 &&
4378 useable == 0 )) {
4379 entry_1 = 0;
4380 entry_2 = 0;
4381 goto install;
4382 }
4383 }
4384
4385 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4386 (ldt_info.limit & 0x0ffff);
4387 entry_2 = (ldt_info.base_addr & 0xff000000) |
4388 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4389 (ldt_info.limit & 0xf0000) |
4390 ((read_exec_only ^ 1) << 9) |
4391 (contents << 10) |
4392 ((seg_not_present ^ 1) << 15) |
4393 (seg_32bit << 22) |
4394 (limit_in_pages << 23) |
4395 (lm << 21) |
4396 0x7000;
4397 if (!oldmode)
4398 entry_2 |= (useable << 20);
4399
4400 /* Install the new entry ... */
4401 install:
4402 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4403 lp[0] = tswap32(entry_1);
4404 lp[1] = tswap32(entry_2);
4405 return 0;
4406 }
4407
4408 /* specific and weird i386 syscalls */
4409 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4410 unsigned long bytecount)
4411 {
4412 abi_long ret;
4413
4414 switch (func) {
4415 case 0:
4416 ret = read_ldt(ptr, bytecount);
4417 break;
4418 case 1:
4419 ret = write_ldt(env, ptr, bytecount, 1);
4420 break;
4421 case 0x11:
4422 ret = write_ldt(env, ptr, bytecount, 0);
4423 break;
4424 default:
4425 ret = -TARGET_ENOSYS;
4426 break;
4427 }
4428 return ret;
4429 }
4430
4431 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4432 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4433 {
4434 uint64_t *gdt_table = g2h(env->gdt.base);
4435 struct target_modify_ldt_ldt_s ldt_info;
4436 struct target_modify_ldt_ldt_s *target_ldt_info;
4437 int seg_32bit, contents, read_exec_only, limit_in_pages;
4438 int seg_not_present, useable, lm;
4439 uint32_t *lp, entry_1, entry_2;
4440 int i;
4441
4442 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4443 if (!target_ldt_info)
4444 return -TARGET_EFAULT;
4445 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4446 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4447 ldt_info.limit = tswap32(target_ldt_info->limit);
4448 ldt_info.flags = tswap32(target_ldt_info->flags);
4449 if (ldt_info.entry_number == -1) {
4450 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4451 if (gdt_table[i] == 0) {
4452 ldt_info.entry_number = i;
4453 target_ldt_info->entry_number = tswap32(i);
4454 break;
4455 }
4456 }
4457 }
4458 unlock_user_struct(target_ldt_info, ptr, 1);
4459
4460 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4461 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4462 return -TARGET_EINVAL;
4463 seg_32bit = ldt_info.flags & 1;
4464 contents = (ldt_info.flags >> 1) & 3;
4465 read_exec_only = (ldt_info.flags >> 3) & 1;
4466 limit_in_pages = (ldt_info.flags >> 4) & 1;
4467 seg_not_present = (ldt_info.flags >> 5) & 1;
4468 useable = (ldt_info.flags >> 6) & 1;
4469 #ifdef TARGET_ABI32
4470 lm = 0;
4471 #else
4472 lm = (ldt_info.flags >> 7) & 1;
4473 #endif
4474
4475 if (contents == 3) {
4476 if (seg_not_present == 0)
4477 return -TARGET_EINVAL;
4478 }
4479
4480 /* NOTE: same code as Linux kernel */
4481 /* Allow LDTs to be cleared by the user. */
4482 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4483 if ((contents == 0 &&
4484 read_exec_only == 1 &&
4485 seg_32bit == 0 &&
4486 limit_in_pages == 0 &&
4487 seg_not_present == 1 &&
4488 useable == 0 )) {
4489 entry_1 = 0;
4490 entry_2 = 0;
4491 goto install;
4492 }
4493 }
4494
4495 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4496 (ldt_info.limit & 0x0ffff);
4497 entry_2 = (ldt_info.base_addr & 0xff000000) |
4498 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4499 (ldt_info.limit & 0xf0000) |
4500 ((read_exec_only ^ 1) << 9) |
4501 (contents << 10) |
4502 ((seg_not_present ^ 1) << 15) |
4503 (seg_32bit << 22) |
4504 (limit_in_pages << 23) |
4505 (useable << 20) |
4506 (lm << 21) |
4507 0x7000;
4508
4509 /* Install the new entry ... */
4510 install:
4511 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4512 lp[0] = tswap32(entry_1);
4513 lp[1] = tswap32(entry_2);
4514 return 0;
4515 }
4516
4517 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4518 {
4519 struct target_modify_ldt_ldt_s *target_ldt_info;
4520 uint64_t *gdt_table = g2h(env->gdt.base);
4521 uint32_t base_addr, limit, flags;
4522 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4523 int seg_not_present, useable, lm;
4524 uint32_t *lp, entry_1, entry_2;
4525
4526 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4527 if (!target_ldt_info)
4528 return -TARGET_EFAULT;
4529 idx = tswap32(target_ldt_info->entry_number);
4530 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4531 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4532 unlock_user_struct(target_ldt_info, ptr, 1);
4533 return -TARGET_EINVAL;
4534 }
4535 lp = (uint32_t *)(gdt_table + idx);
4536 entry_1 = tswap32(lp[0]);
4537 entry_2 = tswap32(lp[1]);
4538
4539 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4540 contents = (entry_2 >> 10) & 3;
4541 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4542 seg_32bit = (entry_2 >> 22) & 1;
4543 limit_in_pages = (entry_2 >> 23) & 1;
4544 useable = (entry_2 >> 20) & 1;
4545 #ifdef TARGET_ABI32
4546 lm = 0;
4547 #else
4548 lm = (entry_2 >> 21) & 1;
4549 #endif
4550 flags = (seg_32bit << 0) | (contents << 1) |
4551 (read_exec_only << 3) | (limit_in_pages << 4) |
4552 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4553 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4554 base_addr = (entry_1 >> 16) |
4555 (entry_2 & 0xff000000) |
4556 ((entry_2 & 0xff) << 16);
4557 target_ldt_info->base_addr = tswapal(base_addr);
4558 target_ldt_info->limit = tswap32(limit);
4559 target_ldt_info->flags = tswap32(flags);
4560 unlock_user_struct(target_ldt_info, ptr, 1);
4561 return 0;
4562 }
4563 #endif /* TARGET_I386 && TARGET_ABI32 */
4564
4565 #ifndef TARGET_ABI32
4566 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4567 {
4568 abi_long ret = 0;
4569 abi_ulong val;
4570 int idx;
4571
4572 switch(code) {
4573 case TARGET_ARCH_SET_GS:
4574 case TARGET_ARCH_SET_FS:
4575 if (code == TARGET_ARCH_SET_GS)
4576 idx = R_GS;
4577 else
4578 idx = R_FS;
4579 cpu_x86_load_seg(env, idx, 0);
4580 env->segs[idx].base = addr;
4581 break;
4582 case TARGET_ARCH_GET_GS:
4583 case TARGET_ARCH_GET_FS:
4584 if (code == TARGET_ARCH_GET_GS)
4585 idx = R_GS;
4586 else
4587 idx = R_FS;
4588 val = env->segs[idx].base;
4589 if (put_user(val, addr, abi_ulong))
4590 ret = -TARGET_EFAULT;
4591 break;
4592 default:
4593 ret = -TARGET_EINVAL;
4594 break;
4595 }
4596 return ret;
4597 }
4598 #endif
4599
4600 #endif /* defined(TARGET_I386) */
4601
4602 #define NEW_STACK_SIZE 0x40000
4603
4604
4605 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4606 typedef struct {
4607 CPUArchState *env;
4608 pthread_mutex_t mutex;
4609 pthread_cond_t cond;
4610 pthread_t thread;
4611 uint32_t tid;
4612 abi_ulong child_tidptr;
4613 abi_ulong parent_tidptr;
4614 sigset_t sigmask;
4615 } new_thread_info;
4616
4617 static void *clone_func(void *arg)
4618 {
4619 new_thread_info *info = arg;
4620 CPUArchState *env;
4621 CPUState *cpu;
4622 TaskState *ts;
4623
4624 rcu_register_thread();
4625 env = info->env;
4626 cpu = ENV_GET_CPU(env);
4627 thread_cpu = cpu;
4628 ts = (TaskState *)cpu->opaque;
4629 info->tid = gettid();
4630 cpu->host_tid = info->tid;
4631 task_settid(ts);
4632 if (info->child_tidptr)
4633 put_user_u32(info->tid, info->child_tidptr);
4634 if (info->parent_tidptr)
4635 put_user_u32(info->tid, info->parent_tidptr);
4636 /* Enable signals. */
4637 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4638 /* Signal to the parent that we're ready. */
4639 pthread_mutex_lock(&info->mutex);
4640 pthread_cond_broadcast(&info->cond);
4641 pthread_mutex_unlock(&info->mutex);
4642 /* Wait until the parent has finshed initializing the tls state. */
4643 pthread_mutex_lock(&clone_lock);
4644 pthread_mutex_unlock(&clone_lock);
4645 cpu_loop(env);
4646 /* never exits */
4647 return NULL;
4648 }
4649
4650 /* do_fork() Must return host values and target errnos (unlike most
4651 do_*() functions). */
4652 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4653 abi_ulong parent_tidptr, target_ulong newtls,
4654 abi_ulong child_tidptr)
4655 {
4656 CPUState *cpu = ENV_GET_CPU(env);
4657 int ret;
4658 TaskState *ts;
4659 CPUState *new_cpu;
4660 CPUArchState *new_env;
4661 unsigned int nptl_flags;
4662 sigset_t sigmask;
4663
4664 /* Emulate vfork() with fork() */
4665 if (flags & CLONE_VFORK)
4666 flags &= ~(CLONE_VFORK | CLONE_VM);
4667
4668 if (flags & CLONE_VM) {
4669 TaskState *parent_ts = (TaskState *)cpu->opaque;
4670 new_thread_info info;
4671 pthread_attr_t attr;
4672
4673 ts = g_new0(TaskState, 1);
4674 init_task_state(ts);
4675 /* we create a new CPU instance. */
4676 new_env = cpu_copy(env);
4677 /* Init regs that differ from the parent. */
4678 cpu_clone_regs(new_env, newsp);
4679 new_cpu = ENV_GET_CPU(new_env);
4680 new_cpu->opaque = ts;
4681 ts->bprm = parent_ts->bprm;
4682 ts->info = parent_ts->info;
4683 nptl_flags = flags;
4684 flags &= ~CLONE_NPTL_FLAGS2;
4685
4686 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4687 ts->child_tidptr = child_tidptr;
4688 }
4689
4690 if (nptl_flags & CLONE_SETTLS)
4691 cpu_set_tls (new_env, newtls);
4692
4693 /* Grab a mutex so that thread setup appears atomic. */
4694 pthread_mutex_lock(&clone_lock);
4695
4696 memset(&info, 0, sizeof(info));
4697 pthread_mutex_init(&info.mutex, NULL);
4698 pthread_mutex_lock(&info.mutex);
4699 pthread_cond_init(&info.cond, NULL);
4700 info.env = new_env;
4701 if (nptl_flags & CLONE_CHILD_SETTID)
4702 info.child_tidptr = child_tidptr;
4703 if (nptl_flags & CLONE_PARENT_SETTID)
4704 info.parent_tidptr = parent_tidptr;
4705
4706 ret = pthread_attr_init(&attr);
4707 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4708 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4709 /* It is not safe to deliver signals until the child has finished
4710 initializing, so temporarily block all signals. */
4711 sigfillset(&sigmask);
4712 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4713
4714 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4715 /* TODO: Free new CPU state if thread creation failed. */
4716
4717 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4718 pthread_attr_destroy(&attr);
4719 if (ret == 0) {
4720 /* Wait for the child to initialize. */
4721 pthread_cond_wait(&info.cond, &info.mutex);
4722 ret = info.tid;
4723 if (flags & CLONE_PARENT_SETTID)
4724 put_user_u32(ret, parent_tidptr);
4725 } else {
4726 ret = -1;
4727 }
4728 pthread_mutex_unlock(&info.mutex);
4729 pthread_cond_destroy(&info.cond);
4730 pthread_mutex_destroy(&info.mutex);
4731 pthread_mutex_unlock(&clone_lock);
4732 } else {
4733 /* if no CLONE_VM, we consider it is a fork */
4734 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
4735 return -TARGET_EINVAL;
4736 }
4737 fork_start();
4738 ret = fork();
4739 if (ret == 0) {
4740 /* Child Process. */
4741 rcu_after_fork();
4742 cpu_clone_regs(env, newsp);
4743 fork_end(1);
4744 /* There is a race condition here. The parent process could
4745 theoretically read the TID in the child process before the child
4746 tid is set. This would require using either ptrace
4747 (not implemented) or having *_tidptr to point at a shared memory
4748 mapping. We can't repeat the spinlock hack used above because
4749 the child process gets its own copy of the lock. */
4750 if (flags & CLONE_CHILD_SETTID)
4751 put_user_u32(gettid(), child_tidptr);
4752 if (flags & CLONE_PARENT_SETTID)
4753 put_user_u32(gettid(), parent_tidptr);
4754 ts = (TaskState *)cpu->opaque;
4755 if (flags & CLONE_SETTLS)
4756 cpu_set_tls (env, newtls);
4757 if (flags & CLONE_CHILD_CLEARTID)
4758 ts->child_tidptr = child_tidptr;
4759 } else {
4760 fork_end(0);
4761 }
4762 }
4763 return ret;
4764 }
4765
4766 /* warning : doesn't handle linux specific flags... */
4767 static int target_to_host_fcntl_cmd(int cmd)
4768 {
4769 switch(cmd) {
4770 case TARGET_F_DUPFD:
4771 case TARGET_F_GETFD:
4772 case TARGET_F_SETFD:
4773 case TARGET_F_GETFL:
4774 case TARGET_F_SETFL:
4775 return cmd;
4776 case TARGET_F_GETLK:
4777 return F_GETLK;
4778 case TARGET_F_SETLK:
4779 return F_SETLK;
4780 case TARGET_F_SETLKW:
4781 return F_SETLKW;
4782 case TARGET_F_GETOWN:
4783 return F_GETOWN;
4784 case TARGET_F_SETOWN:
4785 return F_SETOWN;
4786 case TARGET_F_GETSIG:
4787 return F_GETSIG;
4788 case TARGET_F_SETSIG:
4789 return F_SETSIG;
4790 #if TARGET_ABI_BITS == 32
4791 case TARGET_F_GETLK64:
4792 return F_GETLK64;
4793 case TARGET_F_SETLK64:
4794 return F_SETLK64;
4795 case TARGET_F_SETLKW64:
4796 return F_SETLKW64;
4797 #endif
4798 case TARGET_F_SETLEASE:
4799 return F_SETLEASE;
4800 case TARGET_F_GETLEASE:
4801 return F_GETLEASE;
4802 #ifdef F_DUPFD_CLOEXEC
4803 case TARGET_F_DUPFD_CLOEXEC:
4804 return F_DUPFD_CLOEXEC;
4805 #endif
4806 case TARGET_F_NOTIFY:
4807 return F_NOTIFY;
4808 #ifdef F_GETOWN_EX
4809 case TARGET_F_GETOWN_EX:
4810 return F_GETOWN_EX;
4811 #endif
4812 #ifdef F_SETOWN_EX
4813 case TARGET_F_SETOWN_EX:
4814 return F_SETOWN_EX;
4815 #endif
4816 default:
4817 return -TARGET_EINVAL;
4818 }
4819 return -TARGET_EINVAL;
4820 }
4821
4822 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4823 static const bitmask_transtbl flock_tbl[] = {
4824 TRANSTBL_CONVERT(F_RDLCK),
4825 TRANSTBL_CONVERT(F_WRLCK),
4826 TRANSTBL_CONVERT(F_UNLCK),
4827 TRANSTBL_CONVERT(F_EXLCK),
4828 TRANSTBL_CONVERT(F_SHLCK),
4829 { 0, 0, 0, 0 }
4830 };
4831
4832 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4833 {
4834 struct flock fl;
4835 struct target_flock *target_fl;
4836 struct flock64 fl64;
4837 struct target_flock64 *target_fl64;
4838 #ifdef F_GETOWN_EX
4839 struct f_owner_ex fox;
4840 struct target_f_owner_ex *target_fox;
4841 #endif
4842 abi_long ret;
4843 int host_cmd = target_to_host_fcntl_cmd(cmd);
4844
4845 if (host_cmd == -TARGET_EINVAL)
4846 return host_cmd;
4847
4848 switch(cmd) {
4849 case TARGET_F_GETLK:
4850 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4851 return -TARGET_EFAULT;
4852 fl.l_type =
4853 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4854 fl.l_whence = tswap16(target_fl->l_whence);
4855 fl.l_start = tswapal(target_fl->l_start);
4856 fl.l_len = tswapal(target_fl->l_len);
4857 fl.l_pid = tswap32(target_fl->l_pid);
4858 unlock_user_struct(target_fl, arg, 0);
4859 ret = get_errno(fcntl(fd, host_cmd, &fl));
4860 if (ret == 0) {
4861 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4862 return -TARGET_EFAULT;
4863 target_fl->l_type =
4864 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4865 target_fl->l_whence = tswap16(fl.l_whence);
4866 target_fl->l_start = tswapal(fl.l_start);
4867 target_fl->l_len = tswapal(fl.l_len);
4868 target_fl->l_pid = tswap32(fl.l_pid);
4869 unlock_user_struct(target_fl, arg, 1);
4870 }
4871 break;
4872
4873 case TARGET_F_SETLK:
4874 case TARGET_F_SETLKW:
4875 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4876 return -TARGET_EFAULT;
4877 fl.l_type =
4878 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4879 fl.l_whence = tswap16(target_fl->l_whence);
4880 fl.l_start = tswapal(target_fl->l_start);
4881 fl.l_len = tswapal(target_fl->l_len);
4882 fl.l_pid = tswap32(target_fl->l_pid);
4883 unlock_user_struct(target_fl, arg, 0);
4884 ret = get_errno(fcntl(fd, host_cmd, &fl));
4885 break;
4886
4887 case TARGET_F_GETLK64:
4888 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4889 return -TARGET_EFAULT;
4890 fl64.l_type =
4891 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4892 fl64.l_whence = tswap16(target_fl64->l_whence);
4893 fl64.l_start = tswap64(target_fl64->l_start);
4894 fl64.l_len = tswap64(target_fl64->l_len);
4895 fl64.l_pid = tswap32(target_fl64->l_pid);
4896 unlock_user_struct(target_fl64, arg, 0);
4897 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4898 if (ret == 0) {
4899 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4900 return -TARGET_EFAULT;
4901 target_fl64->l_type =
4902 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4903 target_fl64->l_whence = tswap16(fl64.l_whence);
4904 target_fl64->l_start = tswap64(fl64.l_start);
4905 target_fl64->l_len = tswap64(fl64.l_len);
4906 target_fl64->l_pid = tswap32(fl64.l_pid);
4907 unlock_user_struct(target_fl64, arg, 1);
4908 }
4909 break;
4910 case TARGET_F_SETLK64:
4911 case TARGET_F_SETLKW64:
4912 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4913 return -TARGET_EFAULT;
4914 fl64.l_type =
4915 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4916 fl64.l_whence = tswap16(target_fl64->l_whence);
4917 fl64.l_start = tswap64(target_fl64->l_start);
4918 fl64.l_len = tswap64(target_fl64->l_len);
4919 fl64.l_pid = tswap32(target_fl64->l_pid);
4920 unlock_user_struct(target_fl64, arg, 0);
4921 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4922 break;
4923
4924 case TARGET_F_GETFL:
4925 ret = get_errno(fcntl(fd, host_cmd, arg));
4926 if (ret >= 0) {
4927 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4928 }
4929 break;
4930
4931 case TARGET_F_SETFL:
4932 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4933 break;
4934
4935 #ifdef F_GETOWN_EX
4936 case TARGET_F_GETOWN_EX:
4937 ret = get_errno(fcntl(fd, host_cmd, &fox));
4938 if (ret >= 0) {
4939 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
4940 return -TARGET_EFAULT;
4941 target_fox->type = tswap32(fox.type);
4942 target_fox->pid = tswap32(fox.pid);
4943 unlock_user_struct(target_fox, arg, 1);
4944 }
4945 break;
4946 #endif
4947
4948 #ifdef F_SETOWN_EX
4949 case TARGET_F_SETOWN_EX:
4950 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
4951 return -TARGET_EFAULT;
4952 fox.type = tswap32(target_fox->type);
4953 fox.pid = tswap32(target_fox->pid);
4954 unlock_user_struct(target_fox, arg, 0);
4955 ret = get_errno(fcntl(fd, host_cmd, &fox));
4956 break;
4957 #endif
4958
4959 case TARGET_F_SETOWN:
4960 case TARGET_F_GETOWN:
4961 case TARGET_F_SETSIG:
4962 case TARGET_F_GETSIG:
4963 case TARGET_F_SETLEASE:
4964 case TARGET_F_GETLEASE:
4965 ret = get_errno(fcntl(fd, host_cmd, arg));
4966 break;
4967
4968 default:
4969 ret = get_errno(fcntl(fd, cmd, arg));
4970 break;
4971 }
4972 return ret;
4973 }
4974
4975 #ifdef USE_UID16
4976
4977 static inline int high2lowuid(int uid)
4978 {
4979 if (uid > 65535)
4980 return 65534;
4981 else
4982 return uid;
4983 }
4984
4985 static inline int high2lowgid(int gid)
4986 {
4987 if (gid > 65535)
4988 return 65534;
4989 else
4990 return gid;
4991 }
4992
4993 static inline int low2highuid(int uid)
4994 {
4995 if ((int16_t)uid == -1)
4996 return -1;
4997 else
4998 return uid;
4999 }
5000
5001 static inline int low2highgid(int gid)
5002 {
5003 if ((int16_t)gid == -1)
5004 return -1;
5005 else
5006 return gid;
5007 }
5008 static inline int tswapid(int id)
5009 {
5010 return tswap16(id);
5011 }
5012
5013 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5014
5015 #else /* !USE_UID16 */
5016 static inline int high2lowuid(int uid)
5017 {
5018 return uid;
5019 }
5020 static inline int high2lowgid(int gid)
5021 {
5022 return gid;
5023 }
5024 static inline int low2highuid(int uid)
5025 {
5026 return uid;
5027 }
5028 static inline int low2highgid(int gid)
5029 {
5030 return gid;
5031 }
5032 static inline int tswapid(int id)
5033 {
5034 return tswap32(id);
5035 }
5036
5037 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5038
5039 #endif /* USE_UID16 */
5040
5041 void syscall_init(void)
5042 {
5043 IOCTLEntry *ie;
5044 const argtype *arg_type;
5045 int size;
5046 int i;
5047
5048 thunk_init(STRUCT_MAX);
5049
5050 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5051 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5052 #include "syscall_types.h"
5053 #undef STRUCT
5054 #undef STRUCT_SPECIAL
5055
5056 /* Build target_to_host_errno_table[] table from
5057 * host_to_target_errno_table[]. */
5058 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5059 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5060 }
5061
5062 /* we patch the ioctl size if necessary. We rely on the fact that
5063 no ioctl has all the bits at '1' in the size field */
5064 ie = ioctl_entries;
5065 while (ie->target_cmd != 0) {
5066 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5067 TARGET_IOC_SIZEMASK) {
5068 arg_type = ie->arg_type;
5069 if (arg_type[0] != TYPE_PTR) {
5070 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5071 ie->target_cmd);
5072 exit(1);
5073 }
5074 arg_type++;
5075 size = thunk_type_size(arg_type, 0);
5076 ie->target_cmd = (ie->target_cmd &
5077 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5078 (size << TARGET_IOC_SIZESHIFT);
5079 }
5080
5081 /* automatic consistency check if same arch */
5082 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5083 (defined(__x86_64__) && defined(TARGET_X86_64))
5084 if (unlikely(ie->target_cmd != ie->host_cmd)) {
5085 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5086 ie->name, ie->target_cmd, ie->host_cmd);
5087 }
5088 #endif
5089 ie++;
5090 }
5091 }
5092
5093 #if TARGET_ABI_BITS == 32
5094 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5095 {
5096 #ifdef TARGET_WORDS_BIGENDIAN
5097 return ((uint64_t)word0 << 32) | word1;
5098 #else
5099 return ((uint64_t)word1 << 32) | word0;
5100 #endif
5101 }
5102 #else /* TARGET_ABI_BITS == 32 */
5103 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5104 {
5105 return word0;
5106 }
5107 #endif /* TARGET_ABI_BITS != 32 */
5108
5109 #ifdef TARGET_NR_truncate64
5110 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5111 abi_long arg2,
5112 abi_long arg3,
5113 abi_long arg4)
5114 {
5115 if (regpairs_aligned(cpu_env)) {
5116 arg2 = arg3;
5117 arg3 = arg4;
5118 }
5119 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5120 }
5121 #endif
5122
5123 #ifdef TARGET_NR_ftruncate64
5124 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5125 abi_long arg2,
5126 abi_long arg3,
5127 abi_long arg4)
5128 {
5129 if (regpairs_aligned(cpu_env)) {
5130 arg2 = arg3;
5131 arg3 = arg4;
5132 }
5133 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5134 }
5135 #endif
5136
5137 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5138 abi_ulong target_addr)
5139 {
5140 struct target_timespec *target_ts;
5141
5142 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5143 return -TARGET_EFAULT;
5144 host_ts->tv_sec = tswapal(target_ts->tv_sec);
5145 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
5146 unlock_user_struct(target_ts, target_addr, 0);
5147 return 0;
5148 }
5149
5150 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5151 struct timespec *host_ts)
5152 {
5153 struct target_timespec *target_ts;
5154
5155 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5156 return -TARGET_EFAULT;
5157 target_ts->tv_sec = tswapal(host_ts->tv_sec);
5158 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
5159 unlock_user_struct(target_ts, target_addr, 1);
5160 return 0;
5161 }
5162
5163 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5164 abi_ulong target_addr)
5165 {
5166 struct target_itimerspec *target_itspec;
5167
5168 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5169 return -TARGET_EFAULT;
5170 }
5171
5172 host_itspec->it_interval.tv_sec =
5173 tswapal(target_itspec->it_interval.tv_sec);
5174 host_itspec->it_interval.tv_nsec =
5175 tswapal(target_itspec->it_interval.tv_nsec);
5176 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5177 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5178
5179 unlock_user_struct(target_itspec, target_addr, 1);
5180 return 0;
5181 }
5182
5183 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5184 struct itimerspec *host_its)
5185 {
5186 struct target_itimerspec *target_itspec;
5187
5188 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5189 return -TARGET_EFAULT;
5190 }
5191
5192 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5193 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5194
5195 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5196 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5197
5198 unlock_user_struct(target_itspec, target_addr, 0);
5199 return 0;
5200 }
5201
5202 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5203 abi_ulong target_addr)
5204 {
5205 struct target_sigevent *target_sevp;
5206
5207 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5208 return -TARGET_EFAULT;
5209 }
5210
5211 /* This union is awkward on 64 bit systems because it has a 32 bit
5212 * integer and a pointer in it; we follow the conversion approach
5213 * used for handling sigval types in signal.c so the guest should get
5214 * the correct value back even if we did a 64 bit byteswap and it's
5215 * using the 32 bit integer.
5216 */
5217 host_sevp->sigev_value.sival_ptr =
5218 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
5219 host_sevp->sigev_signo =
5220 target_to_host_signal(tswap32(target_sevp->sigev_signo));
5221 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
5222 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
5223
5224 unlock_user_struct(target_sevp, target_addr, 1);
5225 return 0;
5226 }
5227
5228 #if defined(TARGET_NR_mlockall)
5229 static inline int target_to_host_mlockall_arg(int arg)
5230 {
5231 int result = 0;
5232
5233 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
5234 result |= MCL_CURRENT;
5235 }
5236 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
5237 result |= MCL_FUTURE;
5238 }
5239 return result;
5240 }
5241 #endif
5242
5243 static inline abi_long host_to_target_stat64(void *cpu_env,
5244 abi_ulong target_addr,
5245 struct stat *host_st)
5246 {
5247 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5248 if (((CPUARMState *)cpu_env)->eabi) {
5249 struct target_eabi_stat64 *target_st;
5250
5251 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5252 return -TARGET_EFAULT;
5253 memset(target_st, 0, sizeof(struct target_eabi_stat64));
5254 __put_user(host_st->st_dev, &target_st->st_dev);
5255 __put_user(host_st->st_ino, &target_st->st_ino);
5256 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5257 __put_user(host_st->st_ino, &target_st->__st_ino);
5258 #endif
5259 __put_user(host_st->st_mode, &target_st->st_mode);
5260 __put_user(host_st->st_nlink, &target_st->st_nlink);
5261 __put_user(host_st->st_uid, &target_st->st_uid);
5262 __put_user(host_st->st_gid, &target_st->st_gid);
5263 __put_user(host_st->st_rdev, &target_st->st_rdev);
5264 __put_user(host_st->st_size, &target_st->st_size);
5265 __put_user(host_st->st_blksize, &target_st->st_blksize);
5266 __put_user(host_st->st_blocks, &target_st->st_blocks);
5267 __put_user(host_st->st_atime, &target_st->target_st_atime);
5268 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5269 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5270 unlock_user_struct(target_st, target_addr, 1);
5271 } else
5272 #endif
5273 {
5274 #if defined(TARGET_HAS_STRUCT_STAT64)
5275 struct target_stat64 *target_st;
5276 #else
5277 struct target_stat *target_st;
5278 #endif
5279
5280 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5281 return -TARGET_EFAULT;
5282 memset(target_st, 0, sizeof(*target_st));
5283 __put_user(host_st->st_dev, &target_st->st_dev);
5284 __put_user(host_st->st_ino, &target_st->st_ino);
5285 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5286 __put_user(host_st->st_ino, &target_st->__st_ino);
5287 #endif
5288 __put_user(host_st->st_mode, &target_st->st_mode);
5289 __put_user(host_st->st_nlink, &target_st->st_nlink);
5290 __put_user(host_st->st_uid, &target_st->st_uid);
5291 __put_user(host_st->st_gid, &target_st->st_gid);
5292 __put_user(host_st->st_rdev, &target_st->st_rdev);
5293 /* XXX: better use of kernel struct */
5294 __put_user(host_st->st_size, &target_st->st_size);
5295 __put_user(host_st->st_blksize, &target_st->st_blksize);
5296 __put_user(host_st->st_blocks, &target_st->st_blocks);
5297 __put_user(host_st->st_atime, &target_st->target_st_atime);
5298 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5299 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5300 unlock_user_struct(target_st, target_addr, 1);
5301 }
5302
5303 return 0;
5304 }
5305
5306 /* ??? Using host futex calls even when target atomic operations
5307 are not really atomic probably breaks things. However implementing
5308 futexes locally would make futexes shared between multiple processes
5309 tricky. However they're probably useless because guest atomic
5310 operations won't work either. */
5311 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
5312 target_ulong uaddr2, int val3)
5313 {
5314 struct timespec ts, *pts;
5315 int base_op;
5316
5317 /* ??? We assume FUTEX_* constants are the same on both host
5318 and target. */
5319 #ifdef FUTEX_CMD_MASK
5320 base_op = op & FUTEX_CMD_MASK;
5321 #else
5322 base_op = op;
5323 #endif
5324 switch (base_op) {
5325 case FUTEX_WAIT:
5326 case FUTEX_WAIT_BITSET:
5327 if (timeout) {
5328 pts = &ts;
5329 target_to_host_timespec(pts, timeout);
5330 } else {
5331 pts = NULL;
5332 }
5333 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
5334 pts, NULL, val3));
5335 case FUTEX_WAKE:
5336 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5337 case FUTEX_FD:
5338 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5339 case FUTEX_REQUEUE:
5340 case FUTEX_CMP_REQUEUE:
5341 case FUTEX_WAKE_OP:
5342 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5343 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5344 But the prototype takes a `struct timespec *'; insert casts
5345 to satisfy the compiler. We do not need to tswap TIMEOUT
5346 since it's not compared to guest memory. */
5347 pts = (struct timespec *)(uintptr_t) timeout;
5348 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
5349 g2h(uaddr2),
5350 (base_op == FUTEX_CMP_REQUEUE
5351 ? tswap32(val3)
5352 : val3)));
5353 default:
5354 return -TARGET_ENOSYS;
5355 }
5356 }
5357 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5358 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
5359 abi_long handle, abi_long mount_id,
5360 abi_long flags)
5361 {
5362 struct file_handle *target_fh;
5363 struct file_handle *fh;
5364 int mid = 0;
5365 abi_long ret;
5366 char *name;
5367 unsigned int size, total_size;
5368
5369 if (get_user_s32(size, handle)) {
5370 return -TARGET_EFAULT;
5371 }
5372
5373 name = lock_user_string(pathname);
5374 if (!name) {
5375 return -TARGET_EFAULT;
5376 }
5377
5378 total_size = sizeof(struct file_handle) + size;
5379 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
5380 if (!target_fh) {
5381 unlock_user(name, pathname, 0);
5382 return -TARGET_EFAULT;
5383 }
5384
5385 fh = g_malloc0(total_size);
5386 fh->handle_bytes = size;
5387
5388 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
5389 unlock_user(name, pathname, 0);
5390
5391 /* man name_to_handle_at(2):
5392 * Other than the use of the handle_bytes field, the caller should treat
5393 * the file_handle structure as an opaque data type
5394 */
5395
5396 memcpy(target_fh, fh, total_size);
5397 target_fh->handle_bytes = tswap32(fh->handle_bytes);
5398 target_fh->handle_type = tswap32(fh->handle_type);
5399 g_free(fh);
5400 unlock_user(target_fh, handle, total_size);
5401
5402 if (put_user_s32(mid, mount_id)) {
5403 return -TARGET_EFAULT;
5404 }
5405
5406 return ret;
5407
5408 }
5409 #endif
5410
5411 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5412 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
5413 abi_long flags)
5414 {
5415 struct file_handle *target_fh;
5416 struct file_handle *fh;
5417 unsigned int size, total_size;
5418 abi_long ret;
5419
5420 if (get_user_s32(size, handle)) {
5421 return -TARGET_EFAULT;
5422 }
5423
5424 total_size = sizeof(struct file_handle) + size;
5425 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
5426 if (!target_fh) {
5427 return -TARGET_EFAULT;
5428 }
5429
5430 fh = g_memdup(target_fh, total_size);
5431 fh->handle_bytes = size;
5432 fh->handle_type = tswap32(target_fh->handle_type);
5433
5434 ret = get_errno(open_by_handle_at(mount_fd, fh,
5435 target_to_host_bitmask(flags, fcntl_flags_tbl)));
5436
5437 g_free(fh);
5438
5439 unlock_user(target_fh, handle, total_size);
5440
5441 return ret;
5442 }
5443 #endif
5444
5445 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
5446
5447 /* signalfd siginfo conversion */
5448
5449 static void
5450 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
5451 const struct signalfd_siginfo *info)
5452 {
5453 int sig = host_to_target_signal(info->ssi_signo);
5454
5455 /* linux/signalfd.h defines a ssi_addr_lsb
5456 * not defined in sys/signalfd.h but used by some kernels
5457 */
5458
5459 #ifdef BUS_MCEERR_AO
5460 if (tinfo->ssi_signo == SIGBUS &&
5461 (tinfo->ssi_code == BUS_MCEERR_AR ||
5462 tinfo->ssi_code == BUS_MCEERR_AO)) {
5463 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
5464 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
5465 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
5466 }
5467 #endif
5468
5469 tinfo->ssi_signo = tswap32(sig);
5470 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
5471 tinfo->ssi_code = tswap32(info->ssi_code);
5472 tinfo->ssi_pid = tswap32(info->ssi_pid);
5473 tinfo->ssi_uid = tswap32(info->ssi_uid);
5474 tinfo->ssi_fd = tswap32(info->ssi_fd);
5475 tinfo->ssi_tid = tswap32(info->ssi_tid);
5476 tinfo->ssi_band = tswap32(info->ssi_band);
5477 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
5478 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
5479 tinfo->ssi_status = tswap32(info->ssi_status);
5480 tinfo->ssi_int = tswap32(info->ssi_int);
5481 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
5482 tinfo->ssi_utime = tswap64(info->ssi_utime);
5483 tinfo->ssi_stime = tswap64(info->ssi_stime);
5484 tinfo->ssi_addr = tswap64(info->ssi_addr);
5485 }
5486
5487 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
5488 {
5489 int i;
5490
5491 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
5492 host_to_target_signalfd_siginfo(buf + i, buf + i);
5493 }
5494
5495 return len;
5496 }
5497
5498 static TargetFdTrans target_signalfd_trans = {
5499 .host_to_target_data = host_to_target_data_signalfd,
5500 };
5501
5502 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
5503 {
5504 int host_flags;
5505 target_sigset_t *target_mask;
5506 sigset_t host_mask;
5507 abi_long ret;
5508
5509 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
5510 return -TARGET_EINVAL;
5511 }
5512 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
5513 return -TARGET_EFAULT;
5514 }
5515
5516 target_to_host_sigset(&host_mask, target_mask);
5517
5518 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
5519
5520 ret = get_errno(signalfd(fd, &host_mask, host_flags));
5521 if (ret >= 0) {
5522 fd_trans_register(ret, &target_signalfd_trans);
5523 }
5524
5525 unlock_user_struct(target_mask, mask, 0);
5526
5527 return ret;
5528 }
5529 #endif
5530
5531 /* Map host to target signal numbers for the wait family of syscalls.
5532 Assume all other status bits are the same. */
5533 int host_to_target_waitstatus(int status)
5534 {
5535 if (WIFSIGNALED(status)) {
5536 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
5537 }
5538 if (WIFSTOPPED(status)) {
5539 return (host_to_target_signal(WSTOPSIG(status)) << 8)
5540 | (status & 0xff);
5541 }
5542 return status;
5543 }
5544
5545 static int open_self_cmdline(void *cpu_env, int fd)
5546 {
5547 int fd_orig = -1;
5548 bool word_skipped = false;
5549
5550 fd_orig = open("/proc/self/cmdline", O_RDONLY);
5551 if (fd_orig < 0) {
5552 return fd_orig;
5553 }
5554
5555 while (true) {
5556 ssize_t nb_read;
5557 char buf[128];
5558 char *cp_buf = buf;
5559
5560 nb_read = read(fd_orig, buf, sizeof(buf));
5561 if (nb_read < 0) {
5562 fd_orig = close(fd_orig);
5563 return -1;
5564 } else if (nb_read == 0) {
5565 break;
5566 }
5567
5568 if (!word_skipped) {
5569 /* Skip the first string, which is the path to qemu-*-static
5570 instead of the actual command. */
5571 cp_buf = memchr(buf, 0, sizeof(buf));
5572 if (cp_buf) {
5573 /* Null byte found, skip one string */
5574 cp_buf++;
5575 nb_read -= cp_buf - buf;
5576 word_skipped = true;
5577 }
5578 }
5579
5580 if (word_skipped) {
5581 if (write(fd, cp_buf, nb_read) != nb_read) {
5582 close(fd_orig);
5583 return -1;
5584 }
5585 }
5586 }
5587
5588 return close(fd_orig);
5589 }
5590
5591 static int open_self_maps(void *cpu_env, int fd)
5592 {
5593 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5594 TaskState *ts = cpu->opaque;
5595 FILE *fp;
5596 char *line = NULL;
5597 size_t len = 0;
5598 ssize_t read;
5599
5600 fp = fopen("/proc/self/maps", "r");
5601 if (fp == NULL) {
5602 return -EACCES;
5603 }
5604
5605 while ((read = getline(&line, &len, fp)) != -1) {
5606 int fields, dev_maj, dev_min, inode;
5607 uint64_t min, max, offset;
5608 char flag_r, flag_w, flag_x, flag_p;
5609 char path[512] = "";
5610 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5611 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5612 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5613
5614 if ((fields < 10) || (fields > 11)) {
5615 continue;
5616 }
5617 if (h2g_valid(min)) {
5618 int flags = page_get_flags(h2g(min));
5619 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
5620 if (page_check_range(h2g(min), max - min, flags) == -1) {
5621 continue;
5622 }
5623 if (h2g(min) == ts->info->stack_limit) {
5624 pstrcpy(path, sizeof(path), " [stack]");
5625 }
5626 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5627 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5628 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
5629 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5630 path[0] ? " " : "", path);
5631 }
5632 }
5633
5634 free(line);
5635 fclose(fp);
5636
5637 return 0;
5638 }
5639
5640 static int open_self_stat(void *cpu_env, int fd)
5641 {
5642 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5643 TaskState *ts = cpu->opaque;
5644 abi_ulong start_stack = ts->info->start_stack;
5645 int i;
5646
5647 for (i = 0; i < 44; i++) {
5648 char buf[128];
5649 int len;
5650 uint64_t val = 0;
5651
5652 if (i == 0) {
5653 /* pid */
5654 val = getpid();
5655 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5656 } else if (i == 1) {
5657 /* app name */
5658 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5659 } else if (i == 27) {
5660 /* stack bottom */
5661 val = start_stack;
5662 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5663 } else {
5664 /* for the rest, there is MasterCard */
5665 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5666 }
5667
5668 len = strlen(buf);
5669 if (write(fd, buf, len) != len) {
5670 return -1;
5671 }
5672 }
5673
5674 return 0;
5675 }
5676
5677 static int open_self_auxv(void *cpu_env, int fd)
5678 {
5679 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5680 TaskState *ts = cpu->opaque;
5681 abi_ulong auxv = ts->info->saved_auxv;
5682 abi_ulong len = ts->info->auxv_len;
5683 char *ptr;
5684
5685 /*
5686 * Auxiliary vector is stored in target process stack.
5687 * read in whole auxv vector and copy it to file
5688 */
5689 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5690 if (ptr != NULL) {
5691 while (len > 0) {
5692 ssize_t r;
5693 r = write(fd, ptr, len);
5694 if (r <= 0) {
5695 break;
5696 }
5697 len -= r;
5698 ptr += r;
5699 }
5700 lseek(fd, 0, SEEK_SET);
5701 unlock_user(ptr, auxv, len);
5702 }
5703
5704 return 0;
5705 }
5706
5707 static int is_proc_myself(const char *filename, const char *entry)
5708 {
5709 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5710 filename += strlen("/proc/");
5711 if (!strncmp(filename, "self/", strlen("self/"))) {
5712 filename += strlen("self/");
5713 } else if (*filename >= '1' && *filename <= '9') {
5714 char myself[80];
5715 snprintf(myself, sizeof(myself), "%d/", getpid());
5716 if (!strncmp(filename, myself, strlen(myself))) {
5717 filename += strlen(myself);
5718 } else {
5719 return 0;
5720 }
5721 } else {
5722 return 0;
5723 }
5724 if (!strcmp(filename, entry)) {
5725 return 1;
5726 }
5727 }
5728 return 0;
5729 }
5730
5731 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5732 static int is_proc(const char *filename, const char *entry)
5733 {
5734 return strcmp(filename, entry) == 0;
5735 }
5736
5737 static int open_net_route(void *cpu_env, int fd)
5738 {
5739 FILE *fp;
5740 char *line = NULL;
5741 size_t len = 0;
5742 ssize_t read;
5743
5744 fp = fopen("/proc/net/route", "r");
5745 if (fp == NULL) {
5746 return -EACCES;
5747 }
5748
5749 /* read header */
5750
5751 read = getline(&line, &len, fp);
5752 dprintf(fd, "%s", line);
5753
5754 /* read routes */
5755
5756 while ((read = getline(&line, &len, fp)) != -1) {
5757 char iface[16];
5758 uint32_t dest, gw, mask;
5759 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5760 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5761 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5762 &mask, &mtu, &window, &irtt);
5763 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5764 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5765 metric, tswap32(mask), mtu, window, irtt);
5766 }
5767
5768 free(line);
5769 fclose(fp);
5770
5771 return 0;
5772 }
5773 #endif
5774
5775 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
5776 {
5777 struct fake_open {
5778 const char *filename;
5779 int (*fill)(void *cpu_env, int fd);
5780 int (*cmp)(const char *s1, const char *s2);
5781 };
5782 const struct fake_open *fake_open;
5783 static const struct fake_open fakes[] = {
5784 { "maps", open_self_maps, is_proc_myself },
5785 { "stat", open_self_stat, is_proc_myself },
5786 { "auxv", open_self_auxv, is_proc_myself },
5787 { "cmdline", open_self_cmdline, is_proc_myself },
5788 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5789 { "/proc/net/route", open_net_route, is_proc },
5790 #endif
5791 { NULL, NULL, NULL }
5792 };
5793
5794 if (is_proc_myself(pathname, "exe")) {
5795 int execfd = qemu_getauxval(AT_EXECFD);
5796 return execfd ? execfd : get_errno(sys_openat(dirfd, exec_path, flags, mode));
5797 }
5798
5799 for (fake_open = fakes; fake_open->filename; fake_open++) {
5800 if (fake_open->cmp(pathname, fake_open->filename)) {
5801 break;
5802 }
5803 }
5804
5805 if (fake_open->filename) {
5806 const char *tmpdir;
5807 char filename[PATH_MAX];
5808 int fd, r;
5809
5810 /* create temporary file to map stat to */
5811 tmpdir = getenv("TMPDIR");
5812 if (!tmpdir)
5813 tmpdir = "/tmp";
5814 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5815 fd = mkstemp(filename);
5816 if (fd < 0) {
5817 return fd;
5818 }
5819 unlink(filename);
5820
5821 if ((r = fake_open->fill(cpu_env, fd))) {
5822 close(fd);
5823 return r;
5824 }
5825 lseek(fd, 0, SEEK_SET);
5826
5827 return fd;
5828 }
5829
5830 return get_errno(sys_openat(dirfd, path(pathname), flags, mode));
5831 }
5832
5833 #define TIMER_MAGIC 0x0caf0000
5834 #define TIMER_MAGIC_MASK 0xffff0000
5835
5836 /* Convert QEMU provided timer ID back to internal 16bit index format */
5837 static target_timer_t get_timer_id(abi_long arg)
5838 {
5839 target_timer_t timerid = arg;
5840
5841 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
5842 return -TARGET_EINVAL;
5843 }
5844
5845 timerid &= 0xffff;
5846
5847 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
5848 return -TARGET_EINVAL;
5849 }
5850
5851 return timerid;
5852 }
5853
5854 /* do_syscall() should always have a single exit point at the end so
5855 that actions, such as logging of syscall results, can be performed.
5856 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5857 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5858 abi_long arg2, abi_long arg3, abi_long arg4,
5859 abi_long arg5, abi_long arg6, abi_long arg7,
5860 abi_long arg8)
5861 {
5862 CPUState *cpu = ENV_GET_CPU(cpu_env);
5863 abi_long ret;
5864 struct stat st;
5865 struct statfs stfs;
5866 void *p;
5867
5868 #ifdef DEBUG
5869 gemu_log("syscall %d", num);
5870 #endif
5871 if(do_strace)
5872 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5873
5874 switch(num) {
5875 case TARGET_NR_exit:
5876 /* In old applications this may be used to implement _exit(2).
5877 However in threaded applictions it is used for thread termination,
5878 and _exit_group is used for application termination.
5879 Do thread termination if we have more then one thread. */
5880 /* FIXME: This probably breaks if a signal arrives. We should probably
5881 be disabling signals. */
5882 if (CPU_NEXT(first_cpu)) {
5883 TaskState *ts;
5884
5885 cpu_list_lock();
5886 /* Remove the CPU from the list. */
5887 QTAILQ_REMOVE(&cpus, cpu, node);
5888 cpu_list_unlock();
5889 ts = cpu->opaque;
5890 if (ts->child_tidptr) {
5891 put_user_u32(0, ts->child_tidptr);
5892 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5893 NULL, NULL, 0);
5894 }
5895 thread_cpu = NULL;
5896 object_unref(OBJECT(cpu));
5897 g_free(ts);
5898 rcu_unregister_thread();
5899 pthread_exit(NULL);
5900 }
5901 #ifdef TARGET_GPROF
5902 _mcleanup();
5903 #endif
5904 gdb_exit(cpu_env, arg1);
5905 _exit(arg1);
5906 ret = 0; /* avoid warning */
5907 break;
5908 case TARGET_NR_read:
5909 if (arg3 == 0)
5910 ret = 0;
5911 else {
5912 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5913 goto efault;
5914 ret = get_errno(read(arg1, p, arg3));
5915 if (ret >= 0 &&
5916 fd_trans_host_to_target_data(arg1)) {
5917 ret = fd_trans_host_to_target_data(arg1)(p, ret);
5918 }
5919 unlock_user(p, arg2, ret);
5920 }
5921 break;
5922 case TARGET_NR_write:
5923 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5924 goto efault;
5925 ret = get_errno(write(arg1, p, arg3));
5926 unlock_user(p, arg2, 0);
5927 break;
5928 #ifdef TARGET_NR_open
5929 case TARGET_NR_open:
5930 if (!(p = lock_user_string(arg1)))
5931 goto efault;
5932 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
5933 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5934 arg3));
5935 fd_trans_unregister(ret);
5936 unlock_user(p, arg1, 0);
5937 break;
5938 #endif
5939 case TARGET_NR_openat:
5940 if (!(p = lock_user_string(arg2)))
5941 goto efault;
5942 ret = get_errno(do_openat(cpu_env, arg1, p,
5943 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5944 arg4));
5945 fd_trans_unregister(ret);
5946 unlock_user(p, arg2, 0);
5947 break;
5948 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5949 case TARGET_NR_name_to_handle_at:
5950 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
5951 break;
5952 #endif
5953 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5954 case TARGET_NR_open_by_handle_at:
5955 ret = do_open_by_handle_at(arg1, arg2, arg3);
5956 fd_trans_unregister(ret);
5957 break;
5958 #endif
5959 case TARGET_NR_close:
5960 fd_trans_unregister(arg1);
5961 ret = get_errno(close(arg1));
5962 break;
5963 case TARGET_NR_brk:
5964 ret = do_brk(arg1);
5965 break;
5966 #ifdef TARGET_NR_fork
5967 case TARGET_NR_fork:
5968 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5969 break;
5970 #endif
5971 #ifdef TARGET_NR_waitpid
5972 case TARGET_NR_waitpid:
5973 {
5974 int status;
5975 ret = get_errno(waitpid(arg1, &status, arg3));
5976 if (!is_error(ret) && arg2 && ret
5977 && put_user_s32(host_to_target_waitstatus(status), arg2))
5978 goto efault;
5979 }
5980 break;
5981 #endif
5982 #ifdef TARGET_NR_waitid
5983 case TARGET_NR_waitid:
5984 {
5985 siginfo_t info;
5986 info.si_pid = 0;
5987 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5988 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5989 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5990 goto efault;
5991 host_to_target_siginfo(p, &info);
5992 unlock_user(p, arg3, sizeof(target_siginfo_t));
5993 }
5994 }
5995 break;
5996 #endif
5997 #ifdef TARGET_NR_creat /* not on alpha */
5998 case TARGET_NR_creat:
5999 if (!(p = lock_user_string(arg1)))
6000 goto efault;
6001 ret = get_errno(creat(p, arg2));
6002 fd_trans_unregister(ret);
6003 unlock_user(p, arg1, 0);
6004 break;
6005 #endif
6006 #ifdef TARGET_NR_link
6007 case TARGET_NR_link:
6008 {
6009 void * p2;
6010 p = lock_user_string(arg1);
6011 p2 = lock_user_string(arg2);
6012 if (!p || !p2)
6013 ret = -TARGET_EFAULT;
6014 else
6015 ret = get_errno(link(p, p2));
6016 unlock_user(p2, arg2, 0);
6017 unlock_user(p, arg1, 0);
6018 }
6019 break;
6020 #endif
6021 #if defined(TARGET_NR_linkat)
6022 case TARGET_NR_linkat:
6023 {
6024 void * p2 = NULL;
6025 if (!arg2 || !arg4)
6026 goto efault;
6027 p = lock_user_string(arg2);
6028 p2 = lock_user_string(arg4);
6029 if (!p || !p2)
6030 ret = -TARGET_EFAULT;
6031 else
6032 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6033 unlock_user(p, arg2, 0);
6034 unlock_user(p2, arg4, 0);
6035 }
6036 break;
6037 #endif
6038 #ifdef TARGET_NR_unlink
6039 case TARGET_NR_unlink:
6040 if (!(p = lock_user_string(arg1)))
6041 goto efault;
6042 ret = get_errno(unlink(p));
6043 unlock_user(p, arg1, 0);
6044 break;
6045 #endif
6046 #if defined(TARGET_NR_unlinkat)
6047 case TARGET_NR_unlinkat:
6048 if (!(p = lock_user_string(arg2)))
6049 goto efault;
6050 ret = get_errno(unlinkat(arg1, p, arg3));
6051 unlock_user(p, arg2, 0);
6052 break;
6053 #endif
6054 case TARGET_NR_execve:
6055 {
6056 char **argp, **envp;
6057 int argc, envc;
6058 abi_ulong gp;
6059 abi_ulong guest_argp;
6060 abi_ulong guest_envp;
6061 abi_ulong addr;
6062 char **q;
6063 int total_size = 0;
6064
6065 argc = 0;
6066 guest_argp = arg2;
6067 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6068 if (get_user_ual(addr, gp))
6069 goto efault;
6070 if (!addr)
6071 break;
6072 argc++;
6073 }
6074 envc = 0;
6075 guest_envp = arg3;
6076 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6077 if (get_user_ual(addr, gp))
6078 goto efault;
6079 if (!addr)
6080 break;
6081 envc++;
6082 }
6083
6084 argp = alloca((argc + 1) * sizeof(void *));
6085 envp = alloca((envc + 1) * sizeof(void *));
6086
6087 for (gp = guest_argp, q = argp; gp;
6088 gp += sizeof(abi_ulong), q++) {
6089 if (get_user_ual(addr, gp))
6090 goto execve_efault;
6091 if (!addr)
6092 break;
6093 if (!(*q = lock_user_string(addr)))
6094 goto execve_efault;
6095 total_size += strlen(*q) + 1;
6096 }
6097 *q = NULL;
6098
6099 for (gp = guest_envp, q = envp; gp;
6100 gp += sizeof(abi_ulong), q++) {
6101 if (get_user_ual(addr, gp))
6102 goto execve_efault;
6103 if (!addr)
6104 break;
6105 if (!(*q = lock_user_string(addr)))
6106 goto execve_efault;
6107 total_size += strlen(*q) + 1;
6108 }
6109 *q = NULL;
6110
6111 if (!(p = lock_user_string(arg1)))
6112 goto execve_efault;
6113 ret = get_errno(execve(p, argp, envp));
6114 unlock_user(p, arg1, 0);
6115
6116 goto execve_end;
6117
6118 execve_efault:
6119 ret = -TARGET_EFAULT;
6120
6121 execve_end:
6122 for (gp = guest_argp, q = argp; *q;
6123 gp += sizeof(abi_ulong), q++) {
6124 if (get_user_ual(addr, gp)
6125 || !addr)
6126 break;
6127 unlock_user(*q, addr, 0);
6128 }
6129 for (gp = guest_envp, q = envp; *q;
6130 gp += sizeof(abi_ulong), q++) {
6131 if (get_user_ual(addr, gp)
6132 || !addr)
6133 break;
6134 unlock_user(*q, addr, 0);
6135 }
6136 }
6137 break;
6138 case TARGET_NR_chdir:
6139 if (!(p = lock_user_string(arg1)))
6140 goto efault;
6141 ret = get_errno(chdir(p));
6142 unlock_user(p, arg1, 0);
6143 break;
6144 #ifdef TARGET_NR_time
6145 case TARGET_NR_time:
6146 {
6147 time_t host_time;
6148 ret = get_errno(time(&host_time));
6149 if (!is_error(ret)
6150 && arg1
6151 && put_user_sal(host_time, arg1))
6152 goto efault;
6153 }
6154 break;
6155 #endif
6156 #ifdef TARGET_NR_mknod
6157 case TARGET_NR_mknod:
6158 if (!(p = lock_user_string(arg1)))
6159 goto efault;
6160 ret = get_errno(mknod(p, arg2, arg3));
6161 unlock_user(p, arg1, 0);
6162 break;
6163 #endif
6164 #if defined(TARGET_NR_mknodat)
6165 case TARGET_NR_mknodat:
6166 if (!(p = lock_user_string(arg2)))
6167 goto efault;
6168 ret = get_errno(mknodat(arg1, p, arg3, arg4));
6169 unlock_user(p, arg2, 0);
6170 break;
6171 #endif
6172 #ifdef TARGET_NR_chmod
6173 case TARGET_NR_chmod:
6174 if (!(p = lock_user_string(arg1)))
6175 goto efault;
6176 ret = get_errno(chmod(p, arg2));
6177 unlock_user(p, arg1, 0);
6178 break;
6179 #endif
6180 #ifdef TARGET_NR_break
6181 case TARGET_NR_break:
6182 goto unimplemented;
6183 #endif
6184 #ifdef TARGET_NR_oldstat
6185 case TARGET_NR_oldstat:
6186 goto unimplemented;
6187 #endif
6188 case TARGET_NR_lseek:
6189 ret = get_errno(lseek(arg1, arg2, arg3));
6190 break;
6191 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
6192 /* Alpha specific */
6193 case TARGET_NR_getxpid:
6194 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
6195 ret = get_errno(getpid());
6196 break;
6197 #endif
6198 #ifdef TARGET_NR_getpid
6199 case TARGET_NR_getpid:
6200 ret = get_errno(getpid());
6201 break;
6202 #endif
6203 case TARGET_NR_mount:
6204 {
6205 /* need to look at the data field */
6206 void *p2, *p3;
6207
6208 if (arg1) {
6209 p = lock_user_string(arg1);
6210 if (!p) {
6211 goto efault;
6212 }
6213 } else {
6214 p = NULL;
6215 }
6216
6217 p2 = lock_user_string(arg2);
6218 if (!p2) {
6219 if (arg1) {
6220 unlock_user(p, arg1, 0);
6221 }
6222 goto efault;
6223 }
6224
6225 if (arg3) {
6226 p3 = lock_user_string(arg3);
6227 if (!p3) {
6228 if (arg1) {
6229 unlock_user(p, arg1, 0);
6230 }
6231 unlock_user(p2, arg2, 0);
6232 goto efault;
6233 }
6234 } else {
6235 p3 = NULL;
6236 }
6237
6238 /* FIXME - arg5 should be locked, but it isn't clear how to
6239 * do that since it's not guaranteed to be a NULL-terminated
6240 * string.
6241 */
6242 if (!arg5) {
6243 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
6244 } else {
6245 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
6246 }
6247 ret = get_errno(ret);
6248
6249 if (arg1) {
6250 unlock_user(p, arg1, 0);
6251 }
6252 unlock_user(p2, arg2, 0);
6253 if (arg3) {
6254 unlock_user(p3, arg3, 0);
6255 }
6256 }
6257 break;
6258 #ifdef TARGET_NR_umount
6259 case TARGET_NR_umount:
6260 if (!(p = lock_user_string(arg1)))
6261 goto efault;
6262 ret = get_errno(umount(p));
6263 unlock_user(p, arg1, 0);
6264 break;
6265 #endif
6266 #ifdef TARGET_NR_stime /* not on alpha */
6267 case TARGET_NR_stime:
6268 {
6269 time_t host_time;
6270 if (get_user_sal(host_time, arg1))
6271 goto efault;
6272 ret = get_errno(stime(&host_time));
6273 }
6274 break;
6275 #endif
6276 case TARGET_NR_ptrace:
6277 goto unimplemented;
6278 #ifdef TARGET_NR_alarm /* not on alpha */
6279 case TARGET_NR_alarm:
6280 ret = alarm(arg1);
6281 break;
6282 #endif
6283 #ifdef TARGET_NR_oldfstat
6284 case TARGET_NR_oldfstat:
6285 goto unimplemented;
6286 #endif
6287 #ifdef TARGET_NR_pause /* not on alpha */
6288 case TARGET_NR_pause:
6289 ret = get_errno(pause());
6290 break;
6291 #endif
6292 #ifdef TARGET_NR_utime
6293 case TARGET_NR_utime:
6294 {
6295 struct utimbuf tbuf, *host_tbuf;
6296 struct target_utimbuf *target_tbuf;
6297 if (arg2) {
6298 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
6299 goto efault;
6300 tbuf.actime = tswapal(target_tbuf->actime);
6301 tbuf.modtime = tswapal(target_tbuf->modtime);
6302 unlock_user_struct(target_tbuf, arg2, 0);
6303 host_tbuf = &tbuf;
6304 } else {
6305 host_tbuf = NULL;
6306 }
6307 if (!(p = lock_user_string(arg1)))
6308 goto efault;
6309 ret = get_errno(utime(p, host_tbuf));
6310 unlock_user(p, arg1, 0);
6311 }
6312 break;
6313 #endif
6314 #ifdef TARGET_NR_utimes
6315 case TARGET_NR_utimes:
6316 {
6317 struct timeval *tvp, tv[2];
6318 if (arg2) {
6319 if (copy_from_user_timeval(&tv[0], arg2)
6320 || copy_from_user_timeval(&tv[1],
6321 arg2 + sizeof(struct target_timeval)))
6322 goto efault;
6323 tvp = tv;
6324 } else {
6325 tvp = NULL;
6326 }
6327 if (!(p = lock_user_string(arg1)))
6328 goto efault;
6329 ret = get_errno(utimes(p, tvp));
6330 unlock_user(p, arg1, 0);
6331 }
6332 break;
6333 #endif
6334 #if defined(TARGET_NR_futimesat)
6335 case TARGET_NR_futimesat:
6336 {
6337 struct timeval *tvp, tv[2];
6338 if (arg3) {
6339 if (copy_from_user_timeval(&tv[0], arg3)
6340 || copy_from_user_timeval(&tv[1],
6341 arg3 + sizeof(struct target_timeval)))
6342 goto efault;
6343 tvp = tv;
6344 } else {
6345 tvp = NULL;
6346 }
6347 if (!(p = lock_user_string(arg2)))
6348 goto efault;
6349 ret = get_errno(futimesat(arg1, path(p), tvp));
6350 unlock_user(p, arg2, 0);
6351 }
6352 break;
6353 #endif
6354 #ifdef TARGET_NR_stty
6355 case TARGET_NR_stty:
6356 goto unimplemented;
6357 #endif
6358 #ifdef TARGET_NR_gtty
6359 case TARGET_NR_gtty:
6360 goto unimplemented;
6361 #endif
6362 #ifdef TARGET_NR_access
6363 case TARGET_NR_access:
6364 if (!(p = lock_user_string(arg1)))
6365 goto efault;
6366 ret = get_errno(access(path(p), arg2));
6367 unlock_user(p, arg1, 0);
6368 break;
6369 #endif
6370 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6371 case TARGET_NR_faccessat:
6372 if (!(p = lock_user_string(arg2)))
6373 goto efault;
6374 ret = get_errno(faccessat(arg1, p, arg3, 0));
6375 unlock_user(p, arg2, 0);
6376 break;
6377 #endif
6378 #ifdef TARGET_NR_nice /* not on alpha */
6379 case TARGET_NR_nice:
6380 ret = get_errno(nice(arg1));
6381 break;
6382 #endif
6383 #ifdef TARGET_NR_ftime
6384 case TARGET_NR_ftime:
6385 goto unimplemented;
6386 #endif
6387 case TARGET_NR_sync:
6388 sync();
6389 ret = 0;
6390 break;
6391 case TARGET_NR_kill:
6392 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
6393 break;
6394 #ifdef TARGET_NR_rename
6395 case TARGET_NR_rename:
6396 {
6397 void *p2;
6398 p = lock_user_string(arg1);
6399 p2 = lock_user_string(arg2);
6400 if (!p || !p2)
6401 ret = -TARGET_EFAULT;
6402 else
6403 ret = get_errno(rename(p, p2));
6404 unlock_user(p2, arg2, 0);
6405 unlock_user(p, arg1, 0);
6406 }
6407 break;
6408 #endif
6409 #if defined(TARGET_NR_renameat)
6410 case TARGET_NR_renameat:
6411 {
6412 void *p2;
6413 p = lock_user_string(arg2);
6414 p2 = lock_user_string(arg4);
6415 if (!p || !p2)
6416 ret = -TARGET_EFAULT;
6417 else
6418 ret = get_errno(renameat(arg1, p, arg3, p2));
6419 unlock_user(p2, arg4, 0);
6420 unlock_user(p, arg2, 0);
6421 }
6422 break;
6423 #endif
6424 #ifdef TARGET_NR_mkdir
6425 case TARGET_NR_mkdir:
6426 if (!(p = lock_user_string(arg1)))
6427 goto efault;
6428 ret = get_errno(mkdir(p, arg2));
6429 unlock_user(p, arg1, 0);
6430 break;
6431 #endif
6432 #if defined(TARGET_NR_mkdirat)
6433 case TARGET_NR_mkdirat:
6434 if (!(p = lock_user_string(arg2)))
6435 goto efault;
6436 ret = get_errno(mkdirat(arg1, p, arg3));
6437 unlock_user(p, arg2, 0);
6438 break;
6439 #endif
6440 #ifdef TARGET_NR_rmdir
6441 case TARGET_NR_rmdir:
6442 if (!(p = lock_user_string(arg1)))
6443 goto efault;
6444 ret = get_errno(rmdir(p));
6445 unlock_user(p, arg1, 0);
6446 break;
6447 #endif
6448 case TARGET_NR_dup:
6449 ret = get_errno(dup(arg1));
6450 if (ret >= 0) {
6451 fd_trans_dup(arg1, ret);
6452 }
6453 break;
6454 #ifdef TARGET_NR_pipe
6455 case TARGET_NR_pipe:
6456 ret = do_pipe(cpu_env, arg1, 0, 0);
6457 break;
6458 #endif
6459 #ifdef TARGET_NR_pipe2
6460 case TARGET_NR_pipe2:
6461 ret = do_pipe(cpu_env, arg1,
6462 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
6463 break;
6464 #endif
6465 case TARGET_NR_times:
6466 {
6467 struct target_tms *tmsp;
6468 struct tms tms;
6469 ret = get_errno(times(&tms));
6470 if (arg1) {
6471 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
6472 if (!tmsp)
6473 goto efault;
6474 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
6475 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
6476 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
6477 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
6478 }
6479 if (!is_error(ret))
6480 ret = host_to_target_clock_t(ret);
6481 }
6482 break;
6483 #ifdef TARGET_NR_prof
6484 case TARGET_NR_prof:
6485 goto unimplemented;
6486 #endif
6487 #ifdef TARGET_NR_signal
6488 case TARGET_NR_signal:
6489 goto unimplemented;
6490 #endif
6491 case TARGET_NR_acct:
6492 if (arg1 == 0) {
6493 ret = get_errno(acct(NULL));
6494 } else {
6495 if (!(p = lock_user_string(arg1)))
6496 goto efault;
6497 ret = get_errno(acct(path(p)));
6498 unlock_user(p, arg1, 0);
6499 }
6500 break;
6501 #ifdef TARGET_NR_umount2
6502 case TARGET_NR_umount2:
6503 if (!(p = lock_user_string(arg1)))
6504 goto efault;
6505 ret = get_errno(umount2(p, arg2));
6506 unlock_user(p, arg1, 0);
6507 break;
6508 #endif
6509 #ifdef TARGET_NR_lock
6510 case TARGET_NR_lock:
6511 goto unimplemented;
6512 #endif
6513 case TARGET_NR_ioctl:
6514 ret = do_ioctl(arg1, arg2, arg3);
6515 break;
6516 case TARGET_NR_fcntl:
6517 ret = do_fcntl(arg1, arg2, arg3);
6518 break;
6519 #ifdef TARGET_NR_mpx
6520 case TARGET_NR_mpx:
6521 goto unimplemented;
6522 #endif
6523 case TARGET_NR_setpgid:
6524 ret = get_errno(setpgid(arg1, arg2));
6525 break;
6526 #ifdef TARGET_NR_ulimit
6527 case TARGET_NR_ulimit:
6528 goto unimplemented;
6529 #endif
6530 #ifdef TARGET_NR_oldolduname
6531 case TARGET_NR_oldolduname:
6532 goto unimplemented;
6533 #endif
6534 case TARGET_NR_umask:
6535 ret = get_errno(umask(arg1));
6536 break;
6537 case TARGET_NR_chroot:
6538 if (!(p = lock_user_string(arg1)))
6539 goto efault;
6540 ret = get_errno(chroot(p));
6541 unlock_user(p, arg1, 0);
6542 break;
6543 #ifdef TARGET_NR_ustat
6544 case TARGET_NR_ustat:
6545 goto unimplemented;
6546 #endif
6547 #ifdef TARGET_NR_dup2
6548 case TARGET_NR_dup2:
6549 ret = get_errno(dup2(arg1, arg2));
6550 if (ret >= 0) {
6551 fd_trans_dup(arg1, arg2);
6552 }
6553 break;
6554 #endif
6555 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6556 case TARGET_NR_dup3:
6557 ret = get_errno(dup3(arg1, arg2, arg3));
6558 if (ret >= 0) {
6559 fd_trans_dup(arg1, arg2);
6560 }
6561 break;
6562 #endif
6563 #ifdef TARGET_NR_getppid /* not on alpha */
6564 case TARGET_NR_getppid:
6565 ret = get_errno(getppid());
6566 break;
6567 #endif
6568 #ifdef TARGET_NR_getpgrp
6569 case TARGET_NR_getpgrp:
6570 ret = get_errno(getpgrp());
6571 break;
6572 #endif
6573 case TARGET_NR_setsid:
6574 ret = get_errno(setsid());
6575 break;
6576 #ifdef TARGET_NR_sigaction
6577 case TARGET_NR_sigaction:
6578 {
6579 #if defined(TARGET_ALPHA)
6580 struct target_sigaction act, oact, *pact = 0;
6581 struct target_old_sigaction *old_act;
6582 if (arg2) {
6583 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6584 goto efault;
6585 act._sa_handler = old_act->_sa_handler;
6586 target_siginitset(&act.sa_mask, old_act->sa_mask);
6587 act.sa_flags = old_act->sa_flags;
6588 act.sa_restorer = 0;
6589 unlock_user_struct(old_act, arg2, 0);
6590 pact = &act;
6591 }
6592 ret = get_errno(do_sigaction(arg1, pact, &oact));
6593 if (!is_error(ret) && arg3) {
6594 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6595 goto efault;
6596 old_act->_sa_handler = oact._sa_handler;
6597 old_act->sa_mask = oact.sa_mask.sig[0];
6598 old_act->sa_flags = oact.sa_flags;
6599 unlock_user_struct(old_act, arg3, 1);
6600 }
6601 #elif defined(TARGET_MIPS)
6602 struct target_sigaction act, oact, *pact, *old_act;
6603
6604 if (arg2) {
6605 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6606 goto efault;
6607 act._sa_handler = old_act->_sa_handler;
6608 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
6609 act.sa_flags = old_act->sa_flags;
6610 unlock_user_struct(old_act, arg2, 0);
6611 pact = &act;
6612 } else {
6613 pact = NULL;
6614 }
6615
6616 ret = get_errno(do_sigaction(arg1, pact, &oact));
6617
6618 if (!is_error(ret) && arg3) {
6619 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6620 goto efault;
6621 old_act->_sa_handler = oact._sa_handler;
6622 old_act->sa_flags = oact.sa_flags;
6623 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
6624 old_act->sa_mask.sig[1] = 0;
6625 old_act->sa_mask.sig[2] = 0;
6626 old_act->sa_mask.sig[3] = 0;
6627 unlock_user_struct(old_act, arg3, 1);
6628 }
6629 #else
6630 struct target_old_sigaction *old_act;
6631 struct target_sigaction act, oact, *pact;
6632 if (arg2) {
6633 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6634 goto efault;
6635 act._sa_handler = old_act->_sa_handler;
6636 target_siginitset(&act.sa_mask, old_act->sa_mask);
6637 act.sa_flags = old_act->sa_flags;
6638 act.sa_restorer = old_act->sa_restorer;
6639 unlock_user_struct(old_act, arg2, 0);
6640 pact = &act;
6641 } else {
6642 pact = NULL;
6643 }
6644 ret = get_errno(do_sigaction(arg1, pact, &oact));
6645 if (!is_error(ret) && arg3) {
6646 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6647 goto efault;
6648 old_act->_sa_handler = oact._sa_handler;
6649 old_act->sa_mask = oact.sa_mask.sig[0];
6650 old_act->sa_flags = oact.sa_flags;
6651 old_act->sa_restorer = oact.sa_restorer;
6652 unlock_user_struct(old_act, arg3, 1);
6653 }
6654 #endif
6655 }
6656 break;
6657 #endif
6658 case TARGET_NR_rt_sigaction:
6659 {
6660 #if defined(TARGET_ALPHA)
6661 struct target_sigaction act, oact, *pact = 0;
6662 struct target_rt_sigaction *rt_act;
6663 /* ??? arg4 == sizeof(sigset_t). */
6664 if (arg2) {
6665 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6666 goto efault;
6667 act._sa_handler = rt_act->_sa_handler;
6668 act.sa_mask = rt_act->sa_mask;
6669 act.sa_flags = rt_act->sa_flags;
6670 act.sa_restorer = arg5;
6671 unlock_user_struct(rt_act, arg2, 0);
6672 pact = &act;
6673 }
6674 ret = get_errno(do_sigaction(arg1, pact, &oact));
6675 if (!is_error(ret) && arg3) {
6676 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6677 goto efault;
6678 rt_act->_sa_handler = oact._sa_handler;
6679 rt_act->sa_mask = oact.sa_mask;
6680 rt_act->sa_flags = oact.sa_flags;
6681 unlock_user_struct(rt_act, arg3, 1);
6682 }
6683 #else
6684 struct target_sigaction *act;
6685 struct target_sigaction *oact;
6686
6687 if (arg2) {
6688 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6689 goto efault;
6690 } else
6691 act = NULL;
6692 if (arg3) {
6693 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6694 ret = -TARGET_EFAULT;
6695 goto rt_sigaction_fail;
6696 }
6697 } else
6698 oact = NULL;
6699 ret = get_errno(do_sigaction(arg1, act, oact));
6700 rt_sigaction_fail:
6701 if (act)
6702 unlock_user_struct(act, arg2, 0);
6703 if (oact)
6704 unlock_user_struct(oact, arg3, 1);
6705 #endif
6706 }
6707 break;
6708 #ifdef TARGET_NR_sgetmask /* not on alpha */
6709 case TARGET_NR_sgetmask:
6710 {
6711 sigset_t cur_set;
6712 abi_ulong target_set;
6713 do_sigprocmask(0, NULL, &cur_set);
6714 host_to_target_old_sigset(&target_set, &cur_set);
6715 ret = target_set;
6716 }
6717 break;
6718 #endif
6719 #ifdef TARGET_NR_ssetmask /* not on alpha */
6720 case TARGET_NR_ssetmask:
6721 {
6722 sigset_t set, oset, cur_set;
6723 abi_ulong target_set = arg1;
6724 do_sigprocmask(0, NULL, &cur_set);
6725 target_to_host_old_sigset(&set, &target_set);
6726 sigorset(&set, &set, &cur_set);
6727 do_sigprocmask(SIG_SETMASK, &set, &oset);
6728 host_to_target_old_sigset(&target_set, &oset);
6729 ret = target_set;
6730 }
6731 break;
6732 #endif
6733 #ifdef TARGET_NR_sigprocmask
6734 case TARGET_NR_sigprocmask:
6735 {
6736 #if defined(TARGET_ALPHA)
6737 sigset_t set, oldset;
6738 abi_ulong mask;
6739 int how;
6740
6741 switch (arg1) {
6742 case TARGET_SIG_BLOCK:
6743 how = SIG_BLOCK;
6744 break;
6745 case TARGET_SIG_UNBLOCK:
6746 how = SIG_UNBLOCK;
6747 break;
6748 case TARGET_SIG_SETMASK:
6749 how = SIG_SETMASK;
6750 break;
6751 default:
6752 ret = -TARGET_EINVAL;
6753 goto fail;
6754 }
6755 mask = arg2;
6756 target_to_host_old_sigset(&set, &mask);
6757
6758 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6759 if (!is_error(ret)) {
6760 host_to_target_old_sigset(&mask, &oldset);
6761 ret = mask;
6762 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6763 }
6764 #else
6765 sigset_t set, oldset, *set_ptr;
6766 int how;
6767
6768 if (arg2) {
6769 switch (arg1) {
6770 case TARGET_SIG_BLOCK:
6771 how = SIG_BLOCK;
6772 break;
6773 case TARGET_SIG_UNBLOCK:
6774 how = SIG_UNBLOCK;
6775 break;
6776 case TARGET_SIG_SETMASK:
6777 how = SIG_SETMASK;
6778 break;
6779 default:
6780 ret = -TARGET_EINVAL;
6781 goto fail;
6782 }
6783 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6784 goto efault;
6785 target_to_host_old_sigset(&set, p);
6786 unlock_user(p, arg2, 0);
6787 set_ptr = &set;
6788 } else {
6789 how = 0;
6790 set_ptr = NULL;
6791 }
6792 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6793 if (!is_error(ret) && arg3) {
6794 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6795 goto efault;
6796 host_to_target_old_sigset(p, &oldset);
6797 unlock_user(p, arg3, sizeof(target_sigset_t));
6798 }
6799 #endif
6800 }
6801 break;
6802 #endif
6803 case TARGET_NR_rt_sigprocmask:
6804 {
6805 int how = arg1;
6806 sigset_t set, oldset, *set_ptr;
6807
6808 if (arg2) {
6809 switch(how) {
6810 case TARGET_SIG_BLOCK:
6811 how = SIG_BLOCK;
6812 break;
6813 case TARGET_SIG_UNBLOCK:
6814 how = SIG_UNBLOCK;
6815 break;
6816 case TARGET_SIG_SETMASK:
6817 how = SIG_SETMASK;
6818 break;
6819 default:
6820 ret = -TARGET_EINVAL;
6821 goto fail;
6822 }
6823 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6824 goto efault;
6825 target_to_host_sigset(&set, p);
6826 unlock_user(p, arg2, 0);
6827 set_ptr = &set;
6828 } else {
6829 how = 0;
6830 set_ptr = NULL;
6831 }
6832 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6833 if (!is_error(ret) && arg3) {
6834 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6835 goto efault;
6836 host_to_target_sigset(p, &oldset);
6837 unlock_user(p, arg3, sizeof(target_sigset_t));
6838 }
6839 }
6840 break;
6841 #ifdef TARGET_NR_sigpending
6842 case TARGET_NR_sigpending:
6843 {
6844 sigset_t set;
6845 ret = get_errno(sigpending(&set));
6846 if (!is_error(ret)) {
6847 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6848 goto efault;
6849 host_to_target_old_sigset(p, &set);
6850 unlock_user(p, arg1, sizeof(target_sigset_t));
6851 }
6852 }
6853 break;
6854 #endif
6855 case TARGET_NR_rt_sigpending:
6856 {
6857 sigset_t set;
6858 ret = get_errno(sigpending(&set));
6859 if (!is_error(ret)) {
6860 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6861 goto efault;
6862 host_to_target_sigset(p, &set);
6863 unlock_user(p, arg1, sizeof(target_sigset_t));
6864 }
6865 }
6866 break;
6867 #ifdef TARGET_NR_sigsuspend
6868 case TARGET_NR_sigsuspend:
6869 {
6870 sigset_t set;
6871 #if defined(TARGET_ALPHA)
6872 abi_ulong mask = arg1;
6873 target_to_host_old_sigset(&set, &mask);
6874 #else
6875 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6876 goto efault;
6877 target_to_host_old_sigset(&set, p);
6878 unlock_user(p, arg1, 0);
6879 #endif
6880 ret = get_errno(sigsuspend(&set));
6881 }
6882 break;
6883 #endif
6884 case TARGET_NR_rt_sigsuspend:
6885 {
6886 sigset_t set;
6887 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6888 goto efault;
6889 target_to_host_sigset(&set, p);
6890 unlock_user(p, arg1, 0);
6891 ret = get_errno(sigsuspend(&set));
6892 }
6893 break;
6894 case TARGET_NR_rt_sigtimedwait:
6895 {
6896 sigset_t set;
6897 struct timespec uts, *puts;
6898 siginfo_t uinfo;
6899
6900 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6901 goto efault;
6902 target_to_host_sigset(&set, p);
6903 unlock_user(p, arg1, 0);
6904 if (arg3) {
6905 puts = &uts;
6906 target_to_host_timespec(puts, arg3);
6907 } else {
6908 puts = NULL;
6909 }
6910 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6911 if (!is_error(ret)) {
6912 if (arg2) {
6913 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
6914 0);
6915 if (!p) {
6916 goto efault;
6917 }
6918 host_to_target_siginfo(p, &uinfo);
6919 unlock_user(p, arg2, sizeof(target_siginfo_t));
6920 }
6921 ret = host_to_target_signal(ret);
6922 }
6923 }
6924 break;
6925 case TARGET_NR_rt_sigqueueinfo:
6926 {
6927 siginfo_t uinfo;
6928 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6929 goto efault;
6930 target_to_host_siginfo(&uinfo, p);
6931 unlock_user(p, arg1, 0);
6932 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6933 }
6934 break;
6935 #ifdef TARGET_NR_sigreturn
6936 case TARGET_NR_sigreturn:
6937 /* NOTE: ret is eax, so not transcoding must be done */
6938 ret = do_sigreturn(cpu_env);
6939 break;
6940 #endif
6941 case TARGET_NR_rt_sigreturn:
6942 /* NOTE: ret is eax, so not transcoding must be done */
6943 ret = do_rt_sigreturn(cpu_env);
6944 break;
6945 case TARGET_NR_sethostname:
6946 if (!(p = lock_user_string(arg1)))
6947 goto efault;
6948 ret = get_errno(sethostname(p, arg2));
6949 unlock_user(p, arg1, 0);
6950 break;
6951 case TARGET_NR_setrlimit:
6952 {
6953 int resource = target_to_host_resource(arg1);
6954 struct target_rlimit *target_rlim;
6955 struct rlimit rlim;
6956 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6957 goto efault;
6958 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6959 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6960 unlock_user_struct(target_rlim, arg2, 0);
6961 ret = get_errno(setrlimit(resource, &rlim));
6962 }
6963 break;
6964 case TARGET_NR_getrlimit:
6965 {
6966 int resource = target_to_host_resource(arg1);
6967 struct target_rlimit *target_rlim;
6968 struct rlimit rlim;
6969
6970 ret = get_errno(getrlimit(resource, &rlim));
6971 if (!is_error(ret)) {
6972 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6973 goto efault;
6974 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6975 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6976 unlock_user_struct(target_rlim, arg2, 1);
6977 }
6978 }
6979 break;
6980 case TARGET_NR_getrusage:
6981 {
6982 struct rusage rusage;
6983 ret = get_errno(getrusage(arg1, &rusage));
6984 if (!is_error(ret)) {
6985 ret = host_to_target_rusage(arg2, &rusage);
6986 }
6987 }
6988 break;
6989 case TARGET_NR_gettimeofday:
6990 {
6991 struct timeval tv;
6992 ret = get_errno(gettimeofday(&tv, NULL));
6993 if (!is_error(ret)) {
6994 if (copy_to_user_timeval(arg1, &tv))
6995 goto efault;
6996 }
6997 }
6998 break;
6999 case TARGET_NR_settimeofday:
7000 {
7001 struct timeval tv, *ptv = NULL;
7002 struct timezone tz, *ptz = NULL;
7003
7004 if (arg1) {
7005 if (copy_from_user_timeval(&tv, arg1)) {
7006 goto efault;
7007 }
7008 ptv = &tv;
7009 }
7010
7011 if (arg2) {
7012 if (copy_from_user_timezone(&tz, arg2)) {
7013 goto efault;
7014 }
7015 ptz = &tz;
7016 }
7017
7018 ret = get_errno(settimeofday(ptv, ptz));
7019 }
7020 break;
7021 #if defined(TARGET_NR_select)
7022 case TARGET_NR_select:
7023 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7024 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7025 #else
7026 {
7027 struct target_sel_arg_struct *sel;
7028 abi_ulong inp, outp, exp, tvp;
7029 long nsel;
7030
7031 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
7032 goto efault;
7033 nsel = tswapal(sel->n);
7034 inp = tswapal(sel->inp);
7035 outp = tswapal(sel->outp);
7036 exp = tswapal(sel->exp);
7037 tvp = tswapal(sel->tvp);
7038 unlock_user_struct(sel, arg1, 0);
7039 ret = do_select(nsel, inp, outp, exp, tvp);
7040 }
7041 #endif
7042 break;
7043 #endif
7044 #ifdef TARGET_NR_pselect6
7045 case TARGET_NR_pselect6:
7046 {
7047 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
7048 fd_set rfds, wfds, efds;
7049 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
7050 struct timespec ts, *ts_ptr;
7051
7052 /*
7053 * The 6th arg is actually two args smashed together,
7054 * so we cannot use the C library.
7055 */
7056 sigset_t set;
7057 struct {
7058 sigset_t *set;
7059 size_t size;
7060 } sig, *sig_ptr;
7061
7062 abi_ulong arg_sigset, arg_sigsize, *arg7;
7063 target_sigset_t *target_sigset;
7064
7065 n = arg1;
7066 rfd_addr = arg2;
7067 wfd_addr = arg3;
7068 efd_addr = arg4;
7069 ts_addr = arg5;
7070
7071 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
7072 if (ret) {
7073 goto fail;
7074 }
7075 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
7076 if (ret) {
7077 goto fail;
7078 }
7079 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
7080 if (ret) {
7081 goto fail;
7082 }
7083
7084 /*
7085 * This takes a timespec, and not a timeval, so we cannot
7086 * use the do_select() helper ...
7087 */
7088 if (ts_addr) {
7089 if (target_to_host_timespec(&ts, ts_addr)) {
7090 goto efault;
7091 }
7092 ts_ptr = &ts;
7093 } else {
7094 ts_ptr = NULL;
7095 }
7096
7097 /* Extract the two packed args for the sigset */
7098 if (arg6) {
7099 sig_ptr = &sig;
7100 sig.size = _NSIG / 8;
7101
7102 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
7103 if (!arg7) {
7104 goto efault;
7105 }
7106 arg_sigset = tswapal(arg7[0]);
7107 arg_sigsize = tswapal(arg7[1]);
7108 unlock_user(arg7, arg6, 0);
7109
7110 if (arg_sigset) {
7111 sig.set = &set;
7112 if (arg_sigsize != sizeof(*target_sigset)) {
7113 /* Like the kernel, we enforce correct size sigsets */
7114 ret = -TARGET_EINVAL;
7115 goto fail;
7116 }
7117 target_sigset = lock_user(VERIFY_READ, arg_sigset,
7118 sizeof(*target_sigset), 1);
7119 if (!target_sigset) {
7120 goto efault;
7121 }
7122 target_to_host_sigset(&set, target_sigset);
7123 unlock_user(target_sigset, arg_sigset, 0);
7124 } else {
7125 sig.set = NULL;
7126 }
7127 } else {
7128 sig_ptr = NULL;
7129 }
7130
7131 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
7132 ts_ptr, sig_ptr));
7133
7134 if (!is_error(ret)) {
7135 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
7136 goto efault;
7137 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
7138 goto efault;
7139 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
7140 goto efault;
7141
7142 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
7143 goto efault;
7144 }
7145 }
7146 break;
7147 #endif
7148 #ifdef TARGET_NR_symlink
7149 case TARGET_NR_symlink:
7150 {
7151 void *p2;
7152 p = lock_user_string(arg1);
7153 p2 = lock_user_string(arg2);
7154 if (!p || !p2)
7155 ret = -TARGET_EFAULT;
7156 else
7157 ret = get_errno(symlink(p, p2));
7158 unlock_user(p2, arg2, 0);
7159 unlock_user(p, arg1, 0);
7160 }
7161 break;
7162 #endif
7163 #if defined(TARGET_NR_symlinkat)
7164 case TARGET_NR_symlinkat:
7165 {
7166 void *p2;
7167 p = lock_user_string(arg1);
7168 p2 = lock_user_string(arg3);
7169 if (!p || !p2)
7170 ret = -TARGET_EFAULT;
7171 else
7172 ret = get_errno(symlinkat(p, arg2, p2));
7173 unlock_user(p2, arg3, 0);
7174 unlock_user(p, arg1, 0);
7175 }
7176 break;
7177 #endif
7178 #ifdef TARGET_NR_oldlstat
7179 case TARGET_NR_oldlstat:
7180 goto unimplemented;
7181 #endif
7182 #ifdef TARGET_NR_readlink
7183 case TARGET_NR_readlink:
7184 {
7185 void *p2;
7186 p = lock_user_string(arg1);
7187 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
7188 if (!p || !p2) {
7189 ret = -TARGET_EFAULT;
7190 } else if (!arg3) {
7191 /* Short circuit this for the magic exe check. */
7192 ret = -TARGET_EINVAL;
7193 } else if (is_proc_myself((const char *)p, "exe")) {
7194 char real[PATH_MAX], *temp;
7195 temp = realpath(exec_path, real);
7196 /* Return value is # of bytes that we wrote to the buffer. */
7197 if (temp == NULL) {
7198 ret = get_errno(-1);
7199 } else {
7200 /* Don't worry about sign mismatch as earlier mapping
7201 * logic would have thrown a bad address error. */
7202 ret = MIN(strlen(real), arg3);
7203 /* We cannot NUL terminate the string. */
7204 memcpy(p2, real, ret);
7205 }
7206 } else {
7207 ret = get_errno(readlink(path(p), p2, arg3));
7208 }
7209 unlock_user(p2, arg2, ret);
7210 unlock_user(p, arg1, 0);
7211 }
7212 break;
7213 #endif
7214 #if defined(TARGET_NR_readlinkat)
7215 case TARGET_NR_readlinkat:
7216 {
7217 void *p2;
7218 p = lock_user_string(arg2);
7219 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
7220 if (!p || !p2) {
7221 ret = -TARGET_EFAULT;
7222 } else if (is_proc_myself((const char *)p, "exe")) {
7223 char real[PATH_MAX], *temp;
7224 temp = realpath(exec_path, real);
7225 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
7226 snprintf((char *)p2, arg4, "%s", real);
7227 } else {
7228 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
7229 }
7230 unlock_user(p2, arg3, ret);
7231 unlock_user(p, arg2, 0);
7232 }
7233 break;
7234 #endif
7235 #ifdef TARGET_NR_uselib
7236 case TARGET_NR_uselib:
7237 goto unimplemented;
7238 #endif
7239 #ifdef TARGET_NR_swapon
7240 case TARGET_NR_swapon:
7241 if (!(p = lock_user_string(arg1)))
7242 goto efault;
7243 ret = get_errno(swapon(p, arg2));
7244 unlock_user(p, arg1, 0);
7245 break;
7246 #endif
7247 case TARGET_NR_reboot:
7248 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
7249 /* arg4 must be ignored in all other cases */
7250 p = lock_user_string(arg4);
7251 if (!p) {
7252 goto efault;
7253 }
7254 ret = get_errno(reboot(arg1, arg2, arg3, p));
7255 unlock_user(p, arg4, 0);
7256 } else {
7257 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
7258 }
7259 break;
7260 #ifdef TARGET_NR_readdir
7261 case TARGET_NR_readdir:
7262 goto unimplemented;
7263 #endif
7264 #ifdef TARGET_NR_mmap
7265 case TARGET_NR_mmap:
7266 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7267 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
7268 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
7269 || defined(TARGET_S390X)
7270 {
7271 abi_ulong *v;
7272 abi_ulong v1, v2, v3, v4, v5, v6;
7273 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
7274 goto efault;
7275 v1 = tswapal(v[0]);
7276 v2 = tswapal(v[1]);
7277 v3 = tswapal(v[2]);
7278 v4 = tswapal(v[3]);
7279 v5 = tswapal(v[4]);
7280 v6 = tswapal(v[5]);
7281 unlock_user(v, arg1, 0);
7282 ret = get_errno(target_mmap(v1, v2, v3,
7283 target_to_host_bitmask(v4, mmap_flags_tbl),
7284 v5, v6));
7285 }
7286 #else
7287 ret = get_errno(target_mmap(arg1, arg2, arg3,
7288 target_to_host_bitmask(arg4, mmap_flags_tbl),
7289 arg5,
7290 arg6));
7291 #endif
7292 break;
7293 #endif
7294 #ifdef TARGET_NR_mmap2
7295 case TARGET_NR_mmap2:
7296 #ifndef MMAP_SHIFT
7297 #define MMAP_SHIFT 12
7298 #endif
7299 ret = get_errno(target_mmap(arg1, arg2, arg3,
7300 target_to_host_bitmask(arg4, mmap_flags_tbl),
7301 arg5,
7302 arg6 << MMAP_SHIFT));
7303 break;
7304 #endif
7305 case TARGET_NR_munmap:
7306 ret = get_errno(target_munmap(arg1, arg2));
7307 break;
7308 case TARGET_NR_mprotect:
7309 {
7310 TaskState *ts = cpu->opaque;
7311 /* Special hack to detect libc making the stack executable. */
7312 if ((arg3 & PROT_GROWSDOWN)
7313 && arg1 >= ts->info->stack_limit
7314 && arg1 <= ts->info->start_stack) {
7315 arg3 &= ~PROT_GROWSDOWN;
7316 arg2 = arg2 + arg1 - ts->info->stack_limit;
7317 arg1 = ts->info->stack_limit;
7318 }
7319 }
7320 ret = get_errno(target_mprotect(arg1, arg2, arg3));
7321 break;
7322 #ifdef TARGET_NR_mremap
7323 case TARGET_NR_mremap:
7324 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
7325 break;
7326 #endif
7327 /* ??? msync/mlock/munlock are broken for softmmu. */
7328 #ifdef TARGET_NR_msync
7329 case TARGET_NR_msync:
7330 ret = get_errno(msync(g2h(arg1), arg2, arg3));
7331 break;
7332 #endif
7333 #ifdef TARGET_NR_mlock
7334 case TARGET_NR_mlock:
7335 ret = get_errno(mlock(g2h(arg1), arg2));
7336 break;
7337 #endif
7338 #ifdef TARGET_NR_munlock
7339 case TARGET_NR_munlock:
7340 ret = get_errno(munlock(g2h(arg1), arg2));
7341 break;
7342 #endif
7343 #ifdef TARGET_NR_mlockall
7344 case TARGET_NR_mlockall:
7345 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
7346 break;
7347 #endif
7348 #ifdef TARGET_NR_munlockall
7349 case TARGET_NR_munlockall:
7350 ret = get_errno(munlockall());
7351 break;
7352 #endif
7353 case TARGET_NR_truncate:
7354 if (!(p = lock_user_string(arg1)))
7355 goto efault;
7356 ret = get_errno(truncate(p, arg2));
7357 unlock_user(p, arg1, 0);
7358 break;
7359 case TARGET_NR_ftruncate:
7360 ret = get_errno(ftruncate(arg1, arg2));
7361 break;
7362 case TARGET_NR_fchmod:
7363 ret = get_errno(fchmod(arg1, arg2));
7364 break;
7365 #if defined(TARGET_NR_fchmodat)
7366 case TARGET_NR_fchmodat:
7367 if (!(p = lock_user_string(arg2)))
7368 goto efault;
7369 ret = get_errno(fchmodat(arg1, p, arg3, 0));
7370 unlock_user(p, arg2, 0);
7371 break;
7372 #endif
7373 case TARGET_NR_getpriority:
7374 /* Note that negative values are valid for getpriority, so we must
7375 differentiate based on errno settings. */
7376 errno = 0;
7377 ret = getpriority(arg1, arg2);
7378 if (ret == -1 && errno != 0) {
7379 ret = -host_to_target_errno(errno);
7380 break;
7381 }
7382 #ifdef TARGET_ALPHA
7383 /* Return value is the unbiased priority. Signal no error. */
7384 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
7385 #else
7386 /* Return value is a biased priority to avoid negative numbers. */
7387 ret = 20 - ret;
7388 #endif
7389 break;
7390 case TARGET_NR_setpriority:
7391 ret = get_errno(setpriority(arg1, arg2, arg3));
7392 break;
7393 #ifdef TARGET_NR_profil
7394 case TARGET_NR_profil:
7395 goto unimplemented;
7396 #endif
7397 case TARGET_NR_statfs:
7398 if (!(p = lock_user_string(arg1)))
7399 goto efault;
7400 ret = get_errno(statfs(path(p), &stfs));
7401 unlock_user(p, arg1, 0);
7402 convert_statfs:
7403 if (!is_error(ret)) {
7404 struct target_statfs *target_stfs;
7405
7406 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
7407 goto efault;
7408 __put_user(stfs.f_type, &target_stfs->f_type);
7409 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7410 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7411 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7412 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7413 __put_user(stfs.f_files, &target_stfs->f_files);
7414 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7415 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7416 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7417 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7418 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7419 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7420 unlock_user_struct(target_stfs, arg2, 1);
7421 }
7422 break;
7423 case TARGET_NR_fstatfs:
7424 ret = get_errno(fstatfs(arg1, &stfs));
7425 goto convert_statfs;
7426 #ifdef TARGET_NR_statfs64
7427 case TARGET_NR_statfs64:
7428 if (!(p = lock_user_string(arg1)))
7429 goto efault;
7430 ret = get_errno(statfs(path(p), &stfs));
7431 unlock_user(p, arg1, 0);
7432 convert_statfs64:
7433 if (!is_error(ret)) {
7434 struct target_statfs64 *target_stfs;
7435
7436 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
7437 goto efault;
7438 __put_user(stfs.f_type, &target_stfs->f_type);
7439 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7440 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7441 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7442 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7443 __put_user(stfs.f_files, &target_stfs->f_files);
7444 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7445 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7446 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7447 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7448 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7449 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7450 unlock_user_struct(target_stfs, arg3, 1);
7451 }
7452 break;
7453 case TARGET_NR_fstatfs64:
7454 ret = get_errno(fstatfs(arg1, &stfs));
7455 goto convert_statfs64;
7456 #endif
7457 #ifdef TARGET_NR_ioperm
7458 case TARGET_NR_ioperm:
7459 goto unimplemented;
7460 #endif
7461 #ifdef TARGET_NR_socketcall
7462 case TARGET_NR_socketcall:
7463 ret = do_socketcall(arg1, arg2);
7464 break;
7465 #endif
7466 #ifdef TARGET_NR_accept
7467 case TARGET_NR_accept:
7468 ret = do_accept4(arg1, arg2, arg3, 0);
7469 break;
7470 #endif
7471 #ifdef TARGET_NR_accept4
7472 case TARGET_NR_accept4:
7473 #ifdef CONFIG_ACCEPT4
7474 ret = do_accept4(arg1, arg2, arg3, arg4);
7475 #else
7476 goto unimplemented;
7477 #endif
7478 break;
7479 #endif
7480 #ifdef TARGET_NR_bind
7481 case TARGET_NR_bind:
7482 ret = do_bind(arg1, arg2, arg3);
7483 break;
7484 #endif
7485 #ifdef TARGET_NR_connect
7486 case TARGET_NR_connect:
7487 ret = do_connect(arg1, arg2, arg3);
7488 break;
7489 #endif
7490 #ifdef TARGET_NR_getpeername
7491 case TARGET_NR_getpeername:
7492 ret = do_getpeername(arg1, arg2, arg3);
7493 break;
7494 #endif
7495 #ifdef TARGET_NR_getsockname
7496 case TARGET_NR_getsockname:
7497 ret = do_getsockname(arg1, arg2, arg3);
7498 break;
7499 #endif
7500 #ifdef TARGET_NR_getsockopt
7501 case TARGET_NR_getsockopt:
7502 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
7503 break;
7504 #endif
7505 #ifdef TARGET_NR_listen
7506 case TARGET_NR_listen:
7507 ret = get_errno(listen(arg1, arg2));
7508 break;
7509 #endif
7510 #ifdef TARGET_NR_recv
7511 case TARGET_NR_recv:
7512 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
7513 break;
7514 #endif
7515 #ifdef TARGET_NR_recvfrom
7516 case TARGET_NR_recvfrom:
7517 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
7518 break;
7519 #endif
7520 #ifdef TARGET_NR_recvmsg
7521 case TARGET_NR_recvmsg:
7522 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
7523 break;
7524 #endif
7525 #ifdef TARGET_NR_send
7526 case TARGET_NR_send:
7527 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
7528 break;
7529 #endif
7530 #ifdef TARGET_NR_sendmsg
7531 case TARGET_NR_sendmsg:
7532 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
7533 break;
7534 #endif
7535 #ifdef TARGET_NR_sendmmsg
7536 case TARGET_NR_sendmmsg:
7537 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
7538 break;
7539 case TARGET_NR_recvmmsg:
7540 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
7541 break;
7542 #endif
7543 #ifdef TARGET_NR_sendto
7544 case TARGET_NR_sendto:
7545 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
7546 break;
7547 #endif
7548 #ifdef TARGET_NR_shutdown
7549 case TARGET_NR_shutdown:
7550 ret = get_errno(shutdown(arg1, arg2));
7551 break;
7552 #endif
7553 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
7554 case TARGET_NR_getrandom:
7555 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
7556 if (!p) {
7557 goto efault;
7558 }
7559 ret = get_errno(getrandom(p, arg2, arg3));
7560 unlock_user(p, arg1, ret);
7561 break;
7562 #endif
7563 #ifdef TARGET_NR_socket
7564 case TARGET_NR_socket:
7565 ret = do_socket(arg1, arg2, arg3);
7566 fd_trans_unregister(ret);
7567 break;
7568 #endif
7569 #ifdef TARGET_NR_socketpair
7570 case TARGET_NR_socketpair:
7571 ret = do_socketpair(arg1, arg2, arg3, arg4);
7572 break;
7573 #endif
7574 #ifdef TARGET_NR_setsockopt
7575 case TARGET_NR_setsockopt:
7576 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
7577 break;
7578 #endif
7579
7580 case TARGET_NR_syslog:
7581 if (!(p = lock_user_string(arg2)))
7582 goto efault;
7583 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
7584 unlock_user(p, arg2, 0);
7585 break;
7586
7587 case TARGET_NR_setitimer:
7588 {
7589 struct itimerval value, ovalue, *pvalue;
7590
7591 if (arg2) {
7592 pvalue = &value;
7593 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
7594 || copy_from_user_timeval(&pvalue->it_value,
7595 arg2 + sizeof(struct target_timeval)))
7596 goto efault;
7597 } else {
7598 pvalue = NULL;
7599 }
7600 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
7601 if (!is_error(ret) && arg3) {
7602 if (copy_to_user_timeval(arg3,
7603 &ovalue.it_interval)
7604 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
7605 &ovalue.it_value))
7606 goto efault;
7607 }
7608 }
7609 break;
7610 case TARGET_NR_getitimer:
7611 {
7612 struct itimerval value;
7613
7614 ret = get_errno(getitimer(arg1, &value));
7615 if (!is_error(ret) && arg2) {
7616 if (copy_to_user_timeval(arg2,
7617 &value.it_interval)
7618 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
7619 &value.it_value))
7620 goto efault;
7621 }
7622 }
7623 break;
7624 #ifdef TARGET_NR_stat
7625 case TARGET_NR_stat:
7626 if (!(p = lock_user_string(arg1)))
7627 goto efault;
7628 ret = get_errno(stat(path(p), &st));
7629 unlock_user(p, arg1, 0);
7630 goto do_stat;
7631 #endif
7632 #ifdef TARGET_NR_lstat
7633 case TARGET_NR_lstat:
7634 if (!(p = lock_user_string(arg1)))
7635 goto efault;
7636 ret = get_errno(lstat(path(p), &st));
7637 unlock_user(p, arg1, 0);
7638 goto do_stat;
7639 #endif
7640 case TARGET_NR_fstat:
7641 {
7642 ret = get_errno(fstat(arg1, &st));
7643 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
7644 do_stat:
7645 #endif
7646 if (!is_error(ret)) {
7647 struct target_stat *target_st;
7648
7649 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
7650 goto efault;
7651 memset(target_st, 0, sizeof(*target_st));
7652 __put_user(st.st_dev, &target_st->st_dev);
7653 __put_user(st.st_ino, &target_st->st_ino);
7654 __put_user(st.st_mode, &target_st->st_mode);
7655 __put_user(st.st_uid, &target_st->st_uid);
7656 __put_user(st.st_gid, &target_st->st_gid);
7657 __put_user(st.st_nlink, &target_st->st_nlink);
7658 __put_user(st.st_rdev, &target_st->st_rdev);
7659 __put_user(st.st_size, &target_st->st_size);
7660 __put_user(st.st_blksize, &target_st->st_blksize);
7661 __put_user(st.st_blocks, &target_st->st_blocks);
7662 __put_user(st.st_atime, &target_st->target_st_atime);
7663 __put_user(st.st_mtime, &target_st->target_st_mtime);
7664 __put_user(st.st_ctime, &target_st->target_st_ctime);
7665 unlock_user_struct(target_st, arg2, 1);
7666 }
7667 }
7668 break;
7669 #ifdef TARGET_NR_olduname
7670 case TARGET_NR_olduname:
7671 goto unimplemented;
7672 #endif
7673 #ifdef TARGET_NR_iopl
7674 case TARGET_NR_iopl:
7675 goto unimplemented;
7676 #endif
7677 case TARGET_NR_vhangup:
7678 ret = get_errno(vhangup());
7679 break;
7680 #ifdef TARGET_NR_idle
7681 case TARGET_NR_idle:
7682 goto unimplemented;
7683 #endif
7684 #ifdef TARGET_NR_syscall
7685 case TARGET_NR_syscall:
7686 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
7687 arg6, arg7, arg8, 0);
7688 break;
7689 #endif
7690 case TARGET_NR_wait4:
7691 {
7692 int status;
7693 abi_long status_ptr = arg2;
7694 struct rusage rusage, *rusage_ptr;
7695 abi_ulong target_rusage = arg4;
7696 abi_long rusage_err;
7697 if (target_rusage)
7698 rusage_ptr = &rusage;
7699 else
7700 rusage_ptr = NULL;
7701 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
7702 if (!is_error(ret)) {
7703 if (status_ptr && ret) {
7704 status = host_to_target_waitstatus(status);
7705 if (put_user_s32(status, status_ptr))
7706 goto efault;
7707 }
7708 if (target_rusage) {
7709 rusage_err = host_to_target_rusage(target_rusage, &rusage);
7710 if (rusage_err) {
7711 ret = rusage_err;
7712 }
7713 }
7714 }
7715 }
7716 break;
7717 #ifdef TARGET_NR_swapoff
7718 case TARGET_NR_swapoff:
7719 if (!(p = lock_user_string(arg1)))
7720 goto efault;
7721 ret = get_errno(swapoff(p));
7722 unlock_user(p, arg1, 0);
7723 break;
7724 #endif
7725 case TARGET_NR_sysinfo:
7726 {
7727 struct target_sysinfo *target_value;
7728 struct sysinfo value;
7729 ret = get_errno(sysinfo(&value));
7730 if (!is_error(ret) && arg1)
7731 {
7732 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7733 goto efault;
7734 __put_user(value.uptime, &target_value->uptime);
7735 __put_user(value.loads[0], &target_value->loads[0]);
7736 __put_user(value.loads[1], &target_value->loads[1]);
7737 __put_user(value.loads[2], &target_value->loads[2]);
7738 __put_user(value.totalram, &target_value->totalram);
7739 __put_user(value.freeram, &target_value->freeram);
7740 __put_user(value.sharedram, &target_value->sharedram);
7741 __put_user(value.bufferram, &target_value->bufferram);
7742 __put_user(value.totalswap, &target_value->totalswap);
7743 __put_user(value.freeswap, &target_value->freeswap);
7744 __put_user(value.procs, &target_value->procs);
7745 __put_user(value.totalhigh, &target_value->totalhigh);
7746 __put_user(value.freehigh, &target_value->freehigh);
7747 __put_user(value.mem_unit, &target_value->mem_unit);
7748 unlock_user_struct(target_value, arg1, 1);
7749 }
7750 }
7751 break;
7752 #ifdef TARGET_NR_ipc
7753 case TARGET_NR_ipc:
7754 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7755 break;
7756 #endif
7757 #ifdef TARGET_NR_semget
7758 case TARGET_NR_semget:
7759 ret = get_errno(semget(arg1, arg2, arg3));
7760 break;
7761 #endif
7762 #ifdef TARGET_NR_semop
7763 case TARGET_NR_semop:
7764 ret = do_semop(arg1, arg2, arg3);
7765 break;
7766 #endif
7767 #ifdef TARGET_NR_semctl
7768 case TARGET_NR_semctl:
7769 ret = do_semctl(arg1, arg2, arg3, arg4);
7770 break;
7771 #endif
7772 #ifdef TARGET_NR_msgctl
7773 case TARGET_NR_msgctl:
7774 ret = do_msgctl(arg1, arg2, arg3);
7775 break;
7776 #endif
7777 #ifdef TARGET_NR_msgget
7778 case TARGET_NR_msgget:
7779 ret = get_errno(msgget(arg1, arg2));
7780 break;
7781 #endif
7782 #ifdef TARGET_NR_msgrcv
7783 case TARGET_NR_msgrcv:
7784 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7785 break;
7786 #endif
7787 #ifdef TARGET_NR_msgsnd
7788 case TARGET_NR_msgsnd:
7789 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7790 break;
7791 #endif
7792 #ifdef TARGET_NR_shmget
7793 case TARGET_NR_shmget:
7794 ret = get_errno(shmget(arg1, arg2, arg3));
7795 break;
7796 #endif
7797 #ifdef TARGET_NR_shmctl
7798 case TARGET_NR_shmctl:
7799 ret = do_shmctl(arg1, arg2, arg3);
7800 break;
7801 #endif
7802 #ifdef TARGET_NR_shmat
7803 case TARGET_NR_shmat:
7804 ret = do_shmat(arg1, arg2, arg3);
7805 break;
7806 #endif
7807 #ifdef TARGET_NR_shmdt
7808 case TARGET_NR_shmdt:
7809 ret = do_shmdt(arg1);
7810 break;
7811 #endif
7812 case TARGET_NR_fsync:
7813 ret = get_errno(fsync(arg1));
7814 break;
7815 case TARGET_NR_clone:
7816 /* Linux manages to have three different orderings for its
7817 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7818 * match the kernel's CONFIG_CLONE_* settings.
7819 * Microblaze is further special in that it uses a sixth
7820 * implicit argument to clone for the TLS pointer.
7821 */
7822 #if defined(TARGET_MICROBLAZE)
7823 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7824 #elif defined(TARGET_CLONE_BACKWARDS)
7825 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7826 #elif defined(TARGET_CLONE_BACKWARDS2)
7827 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7828 #else
7829 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7830 #endif
7831 break;
7832 #ifdef __NR_exit_group
7833 /* new thread calls */
7834 case TARGET_NR_exit_group:
7835 #ifdef TARGET_GPROF
7836 _mcleanup();
7837 #endif
7838 gdb_exit(cpu_env, arg1);
7839 ret = get_errno(exit_group(arg1));
7840 break;
7841 #endif
7842 case TARGET_NR_setdomainname:
7843 if (!(p = lock_user_string(arg1)))
7844 goto efault;
7845 ret = get_errno(setdomainname(p, arg2));
7846 unlock_user(p, arg1, 0);
7847 break;
7848 case TARGET_NR_uname:
7849 /* no need to transcode because we use the linux syscall */
7850 {
7851 struct new_utsname * buf;
7852
7853 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7854 goto efault;
7855 ret = get_errno(sys_uname(buf));
7856 if (!is_error(ret)) {
7857 /* Overrite the native machine name with whatever is being
7858 emulated. */
7859 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7860 /* Allow the user to override the reported release. */
7861 if (qemu_uname_release && *qemu_uname_release)
7862 strcpy (buf->release, qemu_uname_release);
7863 }
7864 unlock_user_struct(buf, arg1, 1);
7865 }
7866 break;
7867 #ifdef TARGET_I386
7868 case TARGET_NR_modify_ldt:
7869 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7870 break;
7871 #if !defined(TARGET_X86_64)
7872 case TARGET_NR_vm86old:
7873 goto unimplemented;
7874 case TARGET_NR_vm86:
7875 ret = do_vm86(cpu_env, arg1, arg2);
7876 break;
7877 #endif
7878 #endif
7879 case TARGET_NR_adjtimex:
7880 goto unimplemented;
7881 #ifdef TARGET_NR_create_module
7882 case TARGET_NR_create_module:
7883 #endif
7884 case TARGET_NR_init_module:
7885 case TARGET_NR_delete_module:
7886 #ifdef TARGET_NR_get_kernel_syms
7887 case TARGET_NR_get_kernel_syms:
7888 #endif
7889 goto unimplemented;
7890 case TARGET_NR_quotactl:
7891 goto unimplemented;
7892 case TARGET_NR_getpgid:
7893 ret = get_errno(getpgid(arg1));
7894 break;
7895 case TARGET_NR_fchdir:
7896 ret = get_errno(fchdir(arg1));
7897 break;
7898 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7899 case TARGET_NR_bdflush:
7900 goto unimplemented;
7901 #endif
7902 #ifdef TARGET_NR_sysfs
7903 case TARGET_NR_sysfs:
7904 goto unimplemented;
7905 #endif
7906 case TARGET_NR_personality:
7907 ret = get_errno(personality(arg1));
7908 break;
7909 #ifdef TARGET_NR_afs_syscall
7910 case TARGET_NR_afs_syscall:
7911 goto unimplemented;
7912 #endif
7913 #ifdef TARGET_NR__llseek /* Not on alpha */
7914 case TARGET_NR__llseek:
7915 {
7916 int64_t res;
7917 #if !defined(__NR_llseek)
7918 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7919 if (res == -1) {
7920 ret = get_errno(res);
7921 } else {
7922 ret = 0;
7923 }
7924 #else
7925 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7926 #endif
7927 if ((ret == 0) && put_user_s64(res, arg4)) {
7928 goto efault;
7929 }
7930 }
7931 break;
7932 #endif
7933 #ifdef TARGET_NR_getdents
7934 case TARGET_NR_getdents:
7935 #ifdef __NR_getdents
7936 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7937 {
7938 struct target_dirent *target_dirp;
7939 struct linux_dirent *dirp;
7940 abi_long count = arg3;
7941
7942 dirp = g_try_malloc(count);
7943 if (!dirp) {
7944 ret = -TARGET_ENOMEM;
7945 goto fail;
7946 }
7947
7948 ret = get_errno(sys_getdents(arg1, dirp, count));
7949 if (!is_error(ret)) {
7950 struct linux_dirent *de;
7951 struct target_dirent *tde;
7952 int len = ret;
7953 int reclen, treclen;
7954 int count1, tnamelen;
7955
7956 count1 = 0;
7957 de = dirp;
7958 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7959 goto efault;
7960 tde = target_dirp;
7961 while (len > 0) {
7962 reclen = de->d_reclen;
7963 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7964 assert(tnamelen >= 0);
7965 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7966 assert(count1 + treclen <= count);
7967 tde->d_reclen = tswap16(treclen);
7968 tde->d_ino = tswapal(de->d_ino);
7969 tde->d_off = tswapal(de->d_off);
7970 memcpy(tde->d_name, de->d_name, tnamelen);
7971 de = (struct linux_dirent *)((char *)de + reclen);
7972 len -= reclen;
7973 tde = (struct target_dirent *)((char *)tde + treclen);
7974 count1 += treclen;
7975 }
7976 ret = count1;
7977 unlock_user(target_dirp, arg2, ret);
7978 }
7979 g_free(dirp);
7980 }
7981 #else
7982 {
7983 struct linux_dirent *dirp;
7984 abi_long count = arg3;
7985
7986 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7987 goto efault;
7988 ret = get_errno(sys_getdents(arg1, dirp, count));
7989 if (!is_error(ret)) {
7990 struct linux_dirent *de;
7991 int len = ret;
7992 int reclen;
7993 de = dirp;
7994 while (len > 0) {
7995 reclen = de->d_reclen;
7996 if (reclen > len)
7997 break;
7998 de->d_reclen = tswap16(reclen);
7999 tswapls(&de->d_ino);
8000 tswapls(&de->d_off);
8001 de = (struct linux_dirent *)((char *)de + reclen);
8002 len -= reclen;
8003 }
8004 }
8005 unlock_user(dirp, arg2, ret);
8006 }
8007 #endif
8008 #else
8009 /* Implement getdents in terms of getdents64 */
8010 {
8011 struct linux_dirent64 *dirp;
8012 abi_long count = arg3;
8013
8014 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8015 if (!dirp) {
8016 goto efault;
8017 }
8018 ret = get_errno(sys_getdents64(arg1, dirp, count));
8019 if (!is_error(ret)) {
8020 /* Convert the dirent64 structs to target dirent. We do this
8021 * in-place, since we can guarantee that a target_dirent is no
8022 * larger than a dirent64; however this means we have to be
8023 * careful to read everything before writing in the new format.
8024 */
8025 struct linux_dirent64 *de;
8026 struct target_dirent *tde;
8027 int len = ret;
8028 int tlen = 0;
8029
8030 de = dirp;
8031 tde = (struct target_dirent *)dirp;
8032 while (len > 0) {
8033 int namelen, treclen;
8034 int reclen = de->d_reclen;
8035 uint64_t ino = de->d_ino;
8036 int64_t off = de->d_off;
8037 uint8_t type = de->d_type;
8038
8039 namelen = strlen(de->d_name);
8040 treclen = offsetof(struct target_dirent, d_name)
8041 + namelen + 2;
8042 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8043
8044 memmove(tde->d_name, de->d_name, namelen + 1);
8045 tde->d_ino = tswapal(ino);
8046 tde->d_off = tswapal(off);
8047 tde->d_reclen = tswap16(treclen);
8048 /* The target_dirent type is in what was formerly a padding
8049 * byte at the end of the structure:
8050 */
8051 *(((char *)tde) + treclen - 1) = type;
8052
8053 de = (struct linux_dirent64 *)((char *)de + reclen);
8054 tde = (struct target_dirent *)((char *)tde + treclen);
8055 len -= reclen;
8056 tlen += treclen;
8057 }
8058 ret = tlen;
8059 }
8060 unlock_user(dirp, arg2, ret);
8061 }
8062 #endif
8063 break;
8064 #endif /* TARGET_NR_getdents */
8065 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8066 case TARGET_NR_getdents64:
8067 {
8068 struct linux_dirent64 *dirp;
8069 abi_long count = arg3;
8070 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8071 goto efault;
8072 ret = get_errno(sys_getdents64(arg1, dirp, count));
8073 if (!is_error(ret)) {
8074 struct linux_dirent64 *de;
8075 int len = ret;
8076 int reclen;
8077 de = dirp;
8078 while (len > 0) {
8079 reclen = de->d_reclen;
8080 if (reclen > len)
8081 break;
8082 de->d_reclen = tswap16(reclen);
8083 tswap64s((uint64_t *)&de->d_ino);
8084 tswap64s((uint64_t *)&de->d_off);
8085 de = (struct linux_dirent64 *)((char *)de + reclen);
8086 len -= reclen;
8087 }
8088 }
8089 unlock_user(dirp, arg2, ret);
8090 }
8091 break;
8092 #endif /* TARGET_NR_getdents64 */
8093 #if defined(TARGET_NR__newselect)
8094 case TARGET_NR__newselect:
8095 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8096 break;
8097 #endif
8098 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8099 # ifdef TARGET_NR_poll
8100 case TARGET_NR_poll:
8101 # endif
8102 # ifdef TARGET_NR_ppoll
8103 case TARGET_NR_ppoll:
8104 # endif
8105 {
8106 struct target_pollfd *target_pfd;
8107 unsigned int nfds = arg2;
8108 int timeout = arg3;
8109 struct pollfd *pfd;
8110 unsigned int i;
8111
8112 pfd = NULL;
8113 target_pfd = NULL;
8114 if (nfds) {
8115 target_pfd = lock_user(VERIFY_WRITE, arg1,
8116 sizeof(struct target_pollfd) * nfds, 1);
8117 if (!target_pfd) {
8118 goto efault;
8119 }
8120
8121 pfd = alloca(sizeof(struct pollfd) * nfds);
8122 for (i = 0; i < nfds; i++) {
8123 pfd[i].fd = tswap32(target_pfd[i].fd);
8124 pfd[i].events = tswap16(target_pfd[i].events);
8125 }
8126 }
8127
8128 # ifdef TARGET_NR_ppoll
8129 if (num == TARGET_NR_ppoll) {
8130 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
8131 target_sigset_t *target_set;
8132 sigset_t _set, *set = &_set;
8133
8134 if (arg3) {
8135 if (target_to_host_timespec(timeout_ts, arg3)) {
8136 unlock_user(target_pfd, arg1, 0);
8137 goto efault;
8138 }
8139 } else {
8140 timeout_ts = NULL;
8141 }
8142
8143 if (arg4) {
8144 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
8145 if (!target_set) {
8146 unlock_user(target_pfd, arg1, 0);
8147 goto efault;
8148 }
8149 target_to_host_sigset(set, target_set);
8150 } else {
8151 set = NULL;
8152 }
8153
8154 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
8155
8156 if (!is_error(ret) && arg3) {
8157 host_to_target_timespec(arg3, timeout_ts);
8158 }
8159 if (arg4) {
8160 unlock_user(target_set, arg4, 0);
8161 }
8162 } else
8163 # endif
8164 ret = get_errno(poll(pfd, nfds, timeout));
8165
8166 if (!is_error(ret)) {
8167 for(i = 0; i < nfds; i++) {
8168 target_pfd[i].revents = tswap16(pfd[i].revents);
8169 }
8170 }
8171 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
8172 }
8173 break;
8174 #endif
8175 case TARGET_NR_flock:
8176 /* NOTE: the flock constant seems to be the same for every
8177 Linux platform */
8178 ret = get_errno(flock(arg1, arg2));
8179 break;
8180 case TARGET_NR_readv:
8181 {
8182 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
8183 if (vec != NULL) {
8184 ret = get_errno(readv(arg1, vec, arg3));
8185 unlock_iovec(vec, arg2, arg3, 1);
8186 } else {
8187 ret = -host_to_target_errno(errno);
8188 }
8189 }
8190 break;
8191 case TARGET_NR_writev:
8192 {
8193 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8194 if (vec != NULL) {
8195 ret = get_errno(writev(arg1, vec, arg3));
8196 unlock_iovec(vec, arg2, arg3, 0);
8197 } else {
8198 ret = -host_to_target_errno(errno);
8199 }
8200 }
8201 break;
8202 case TARGET_NR_getsid:
8203 ret = get_errno(getsid(arg1));
8204 break;
8205 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
8206 case TARGET_NR_fdatasync:
8207 ret = get_errno(fdatasync(arg1));
8208 break;
8209 #endif
8210 #ifdef TARGET_NR__sysctl
8211 case TARGET_NR__sysctl:
8212 /* We don't implement this, but ENOTDIR is always a safe
8213 return value. */
8214 ret = -TARGET_ENOTDIR;
8215 break;
8216 #endif
8217 case TARGET_NR_sched_getaffinity:
8218 {
8219 unsigned int mask_size;
8220 unsigned long *mask;
8221
8222 /*
8223 * sched_getaffinity needs multiples of ulong, so need to take
8224 * care of mismatches between target ulong and host ulong sizes.
8225 */
8226 if (arg2 & (sizeof(abi_ulong) - 1)) {
8227 ret = -TARGET_EINVAL;
8228 break;
8229 }
8230 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8231
8232 mask = alloca(mask_size);
8233 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
8234
8235 if (!is_error(ret)) {
8236 if (ret > arg2) {
8237 /* More data returned than the caller's buffer will fit.
8238 * This only happens if sizeof(abi_long) < sizeof(long)
8239 * and the caller passed us a buffer holding an odd number
8240 * of abi_longs. If the host kernel is actually using the
8241 * extra 4 bytes then fail EINVAL; otherwise we can just
8242 * ignore them and only copy the interesting part.
8243 */
8244 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
8245 if (numcpus > arg2 * 8) {
8246 ret = -TARGET_EINVAL;
8247 break;
8248 }
8249 ret = arg2;
8250 }
8251
8252 if (copy_to_user(arg3, mask, ret)) {
8253 goto efault;
8254 }
8255 }
8256 }
8257 break;
8258 case TARGET_NR_sched_setaffinity:
8259 {
8260 unsigned int mask_size;
8261 unsigned long *mask;
8262
8263 /*
8264 * sched_setaffinity needs multiples of ulong, so need to take
8265 * care of mismatches between target ulong and host ulong sizes.
8266 */
8267 if (arg2 & (sizeof(abi_ulong) - 1)) {
8268 ret = -TARGET_EINVAL;
8269 break;
8270 }
8271 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8272
8273 mask = alloca(mask_size);
8274 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
8275 goto efault;
8276 }
8277 memcpy(mask, p, arg2);
8278 unlock_user_struct(p, arg2, 0);
8279
8280 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
8281 }
8282 break;
8283 case TARGET_NR_sched_setparam:
8284 {
8285 struct sched_param *target_schp;
8286 struct sched_param schp;
8287
8288 if (arg2 == 0) {
8289 return -TARGET_EINVAL;
8290 }
8291 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
8292 goto efault;
8293 schp.sched_priority = tswap32(target_schp->sched_priority);
8294 unlock_user_struct(target_schp, arg2, 0);
8295 ret = get_errno(sched_setparam(arg1, &schp));
8296 }
8297 break;
8298 case TARGET_NR_sched_getparam:
8299 {
8300 struct sched_param *target_schp;
8301 struct sched_param schp;
8302
8303 if (arg2 == 0) {
8304 return -TARGET_EINVAL;
8305 }
8306 ret = get_errno(sched_getparam(arg1, &schp));
8307 if (!is_error(ret)) {
8308 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
8309 goto efault;
8310 target_schp->sched_priority = tswap32(schp.sched_priority);
8311 unlock_user_struct(target_schp, arg2, 1);
8312 }
8313 }
8314 break;
8315 case TARGET_NR_sched_setscheduler:
8316 {
8317 struct sched_param *target_schp;
8318 struct sched_param schp;
8319 if (arg3 == 0) {
8320 return -TARGET_EINVAL;
8321 }
8322 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
8323 goto efault;
8324 schp.sched_priority = tswap32(target_schp->sched_priority);
8325 unlock_user_struct(target_schp, arg3, 0);
8326 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
8327 }
8328 break;
8329 case TARGET_NR_sched_getscheduler:
8330 ret = get_errno(sched_getscheduler(arg1));
8331 break;
8332 case TARGET_NR_sched_yield:
8333 ret = get_errno(sched_yield());
8334 break;
8335 case TARGET_NR_sched_get_priority_max:
8336 ret = get_errno(sched_get_priority_max(arg1));
8337 break;
8338 case TARGET_NR_sched_get_priority_min:
8339 ret = get_errno(sched_get_priority_min(arg1));
8340 break;
8341 case TARGET_NR_sched_rr_get_interval:
8342 {
8343 struct timespec ts;
8344 ret = get_errno(sched_rr_get_interval(arg1, &ts));
8345 if (!is_error(ret)) {
8346 ret = host_to_target_timespec(arg2, &ts);
8347 }
8348 }
8349 break;
8350 case TARGET_NR_nanosleep:
8351 {
8352 struct timespec req, rem;
8353 target_to_host_timespec(&req, arg1);
8354 ret = get_errno(nanosleep(&req, &rem));
8355 if (is_error(ret) && arg2) {
8356 host_to_target_timespec(arg2, &rem);
8357 }
8358 }
8359 break;
8360 #ifdef TARGET_NR_query_module
8361 case TARGET_NR_query_module:
8362 goto unimplemented;
8363 #endif
8364 #ifdef TARGET_NR_nfsservctl
8365 case TARGET_NR_nfsservctl:
8366 goto unimplemented;
8367 #endif
8368 case TARGET_NR_prctl:
8369 switch (arg1) {
8370 case PR_GET_PDEATHSIG:
8371 {
8372 int deathsig;
8373 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
8374 if (!is_error(ret) && arg2
8375 && put_user_ual(deathsig, arg2)) {
8376 goto efault;
8377 }
8378 break;
8379 }
8380 #ifdef PR_GET_NAME
8381 case PR_GET_NAME:
8382 {
8383 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
8384 if (!name) {
8385 goto efault;
8386 }
8387 ret = get_errno(prctl(arg1, (unsigned long)name,
8388 arg3, arg4, arg5));
8389 unlock_user(name, arg2, 16);
8390 break;
8391 }
8392 case PR_SET_NAME:
8393 {
8394 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
8395 if (!name) {
8396 goto efault;
8397 }
8398 ret = get_errno(prctl(arg1, (unsigned long)name,
8399 arg3, arg4, arg5));
8400 unlock_user(name, arg2, 0);
8401 break;
8402 }
8403 #endif
8404 default:
8405 /* Most prctl options have no pointer arguments */
8406 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
8407 break;
8408 }
8409 break;
8410 #ifdef TARGET_NR_arch_prctl
8411 case TARGET_NR_arch_prctl:
8412 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
8413 ret = do_arch_prctl(cpu_env, arg1, arg2);
8414 break;
8415 #else
8416 goto unimplemented;
8417 #endif
8418 #endif
8419 #ifdef TARGET_NR_pread64
8420 case TARGET_NR_pread64:
8421 if (regpairs_aligned(cpu_env)) {
8422 arg4 = arg5;
8423 arg5 = arg6;
8424 }
8425 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8426 goto efault;
8427 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
8428 unlock_user(p, arg2, ret);
8429 break;
8430 case TARGET_NR_pwrite64:
8431 if (regpairs_aligned(cpu_env)) {
8432 arg4 = arg5;
8433 arg5 = arg6;
8434 }
8435 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8436 goto efault;
8437 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
8438 unlock_user(p, arg2, 0);
8439 break;
8440 #endif
8441 case TARGET_NR_getcwd:
8442 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
8443 goto efault;
8444 ret = get_errno(sys_getcwd1(p, arg2));
8445 unlock_user(p, arg1, ret);
8446 break;
8447 case TARGET_NR_capget:
8448 case TARGET_NR_capset:
8449 {
8450 struct target_user_cap_header *target_header;
8451 struct target_user_cap_data *target_data = NULL;
8452 struct __user_cap_header_struct header;
8453 struct __user_cap_data_struct data[2];
8454 struct __user_cap_data_struct *dataptr = NULL;
8455 int i, target_datalen;
8456 int data_items = 1;
8457
8458 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
8459 goto efault;
8460 }
8461 header.version = tswap32(target_header->version);
8462 header.pid = tswap32(target_header->pid);
8463
8464 if (header.version != _LINUX_CAPABILITY_VERSION) {
8465 /* Version 2 and up takes pointer to two user_data structs */
8466 data_items = 2;
8467 }
8468
8469 target_datalen = sizeof(*target_data) * data_items;
8470
8471 if (arg2) {
8472 if (num == TARGET_NR_capget) {
8473 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
8474 } else {
8475 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
8476 }
8477 if (!target_data) {
8478 unlock_user_struct(target_header, arg1, 0);
8479 goto efault;
8480 }
8481
8482 if (num == TARGET_NR_capset) {
8483 for (i = 0; i < data_items; i++) {
8484 data[i].effective = tswap32(target_data[i].effective);
8485 data[i].permitted = tswap32(target_data[i].permitted);
8486 data[i].inheritable = tswap32(target_data[i].inheritable);
8487 }
8488 }
8489
8490 dataptr = data;
8491 }
8492
8493 if (num == TARGET_NR_capget) {
8494 ret = get_errno(capget(&header, dataptr));
8495 } else {
8496 ret = get_errno(capset(&header, dataptr));
8497 }
8498
8499 /* The kernel always updates version for both capget and capset */
8500 target_header->version = tswap32(header.version);
8501 unlock_user_struct(target_header, arg1, 1);
8502
8503 if (arg2) {
8504 if (num == TARGET_NR_capget) {
8505 for (i = 0; i < data_items; i++) {
8506 target_data[i].effective = tswap32(data[i].effective);
8507 target_data[i].permitted = tswap32(data[i].permitted);
8508 target_data[i].inheritable = tswap32(data[i].inheritable);
8509 }
8510 unlock_user(target_data, arg2, target_datalen);
8511 } else {
8512 unlock_user(target_data, arg2, 0);
8513 }
8514 }
8515 break;
8516 }
8517 case TARGET_NR_sigaltstack:
8518 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
8519 break;
8520
8521 #ifdef CONFIG_SENDFILE
8522 case TARGET_NR_sendfile:
8523 {
8524 off_t *offp = NULL;
8525 off_t off;
8526 if (arg3) {
8527 ret = get_user_sal(off, arg3);
8528 if (is_error(ret)) {
8529 break;
8530 }
8531 offp = &off;
8532 }
8533 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8534 if (!is_error(ret) && arg3) {
8535 abi_long ret2 = put_user_sal(off, arg3);
8536 if (is_error(ret2)) {
8537 ret = ret2;
8538 }
8539 }
8540 break;
8541 }
8542 #ifdef TARGET_NR_sendfile64
8543 case TARGET_NR_sendfile64:
8544 {
8545 off_t *offp = NULL;
8546 off_t off;
8547 if (arg3) {
8548 ret = get_user_s64(off, arg3);
8549 if (is_error(ret)) {
8550 break;
8551 }
8552 offp = &off;
8553 }
8554 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8555 if (!is_error(ret) && arg3) {
8556 abi_long ret2 = put_user_s64(off, arg3);
8557 if (is_error(ret2)) {
8558 ret = ret2;
8559 }
8560 }
8561 break;
8562 }
8563 #endif
8564 #else
8565 case TARGET_NR_sendfile:
8566 #ifdef TARGET_NR_sendfile64
8567 case TARGET_NR_sendfile64:
8568 #endif
8569 goto unimplemented;
8570 #endif
8571
8572 #ifdef TARGET_NR_getpmsg
8573 case TARGET_NR_getpmsg:
8574 goto unimplemented;
8575 #endif
8576 #ifdef TARGET_NR_putpmsg
8577 case TARGET_NR_putpmsg:
8578 goto unimplemented;
8579 #endif
8580 #ifdef TARGET_NR_vfork
8581 case TARGET_NR_vfork:
8582 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
8583 0, 0, 0, 0));
8584 break;
8585 #endif
8586 #ifdef TARGET_NR_ugetrlimit
8587 case TARGET_NR_ugetrlimit:
8588 {
8589 struct rlimit rlim;
8590 int resource = target_to_host_resource(arg1);
8591 ret = get_errno(getrlimit(resource, &rlim));
8592 if (!is_error(ret)) {
8593 struct target_rlimit *target_rlim;
8594 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8595 goto efault;
8596 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8597 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8598 unlock_user_struct(target_rlim, arg2, 1);
8599 }
8600 break;
8601 }
8602 #endif
8603 #ifdef TARGET_NR_truncate64
8604 case TARGET_NR_truncate64:
8605 if (!(p = lock_user_string(arg1)))
8606 goto efault;
8607 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
8608 unlock_user(p, arg1, 0);
8609 break;
8610 #endif
8611 #ifdef TARGET_NR_ftruncate64
8612 case TARGET_NR_ftruncate64:
8613 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
8614 break;
8615 #endif
8616 #ifdef TARGET_NR_stat64
8617 case TARGET_NR_stat64:
8618 if (!(p = lock_user_string(arg1)))
8619 goto efault;
8620 ret = get_errno(stat(path(p), &st));
8621 unlock_user(p, arg1, 0);
8622 if (!is_error(ret))
8623 ret = host_to_target_stat64(cpu_env, arg2, &st);
8624 break;
8625 #endif
8626 #ifdef TARGET_NR_lstat64
8627 case TARGET_NR_lstat64:
8628 if (!(p = lock_user_string(arg1)))
8629 goto efault;
8630 ret = get_errno(lstat(path(p), &st));
8631 unlock_user(p, arg1, 0);
8632 if (!is_error(ret))
8633 ret = host_to_target_stat64(cpu_env, arg2, &st);
8634 break;
8635 #endif
8636 #ifdef TARGET_NR_fstat64
8637 case TARGET_NR_fstat64:
8638 ret = get_errno(fstat(arg1, &st));
8639 if (!is_error(ret))
8640 ret = host_to_target_stat64(cpu_env, arg2, &st);
8641 break;
8642 #endif
8643 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8644 #ifdef TARGET_NR_fstatat64
8645 case TARGET_NR_fstatat64:
8646 #endif
8647 #ifdef TARGET_NR_newfstatat
8648 case TARGET_NR_newfstatat:
8649 #endif
8650 if (!(p = lock_user_string(arg2)))
8651 goto efault;
8652 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
8653 if (!is_error(ret))
8654 ret = host_to_target_stat64(cpu_env, arg3, &st);
8655 break;
8656 #endif
8657 #ifdef TARGET_NR_lchown
8658 case TARGET_NR_lchown:
8659 if (!(p = lock_user_string(arg1)))
8660 goto efault;
8661 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
8662 unlock_user(p, arg1, 0);
8663 break;
8664 #endif
8665 #ifdef TARGET_NR_getuid
8666 case TARGET_NR_getuid:
8667 ret = get_errno(high2lowuid(getuid()));
8668 break;
8669 #endif
8670 #ifdef TARGET_NR_getgid
8671 case TARGET_NR_getgid:
8672 ret = get_errno(high2lowgid(getgid()));
8673 break;
8674 #endif
8675 #ifdef TARGET_NR_geteuid
8676 case TARGET_NR_geteuid:
8677 ret = get_errno(high2lowuid(geteuid()));
8678 break;
8679 #endif
8680 #ifdef TARGET_NR_getegid
8681 case TARGET_NR_getegid:
8682 ret = get_errno(high2lowgid(getegid()));
8683 break;
8684 #endif
8685 case TARGET_NR_setreuid:
8686 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
8687 break;
8688 case TARGET_NR_setregid:
8689 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
8690 break;
8691 case TARGET_NR_getgroups:
8692 {
8693 int gidsetsize = arg1;
8694 target_id *target_grouplist;
8695 gid_t *grouplist;
8696 int i;
8697
8698 grouplist = alloca(gidsetsize * sizeof(gid_t));
8699 ret = get_errno(getgroups(gidsetsize, grouplist));
8700 if (gidsetsize == 0)
8701 break;
8702 if (!is_error(ret)) {
8703 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
8704 if (!target_grouplist)
8705 goto efault;
8706 for(i = 0;i < ret; i++)
8707 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
8708 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
8709 }
8710 }
8711 break;
8712 case TARGET_NR_setgroups:
8713 {
8714 int gidsetsize = arg1;
8715 target_id *target_grouplist;
8716 gid_t *grouplist = NULL;
8717 int i;
8718 if (gidsetsize) {
8719 grouplist = alloca(gidsetsize * sizeof(gid_t));
8720 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
8721 if (!target_grouplist) {
8722 ret = -TARGET_EFAULT;
8723 goto fail;
8724 }
8725 for (i = 0; i < gidsetsize; i++) {
8726 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
8727 }
8728 unlock_user(target_grouplist, arg2, 0);
8729 }
8730 ret = get_errno(setgroups(gidsetsize, grouplist));
8731 }
8732 break;
8733 case TARGET_NR_fchown:
8734 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
8735 break;
8736 #if defined(TARGET_NR_fchownat)
8737 case TARGET_NR_fchownat:
8738 if (!(p = lock_user_string(arg2)))
8739 goto efault;
8740 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
8741 low2highgid(arg4), arg5));
8742 unlock_user(p, arg2, 0);
8743 break;
8744 #endif
8745 #ifdef TARGET_NR_setresuid
8746 case TARGET_NR_setresuid:
8747 ret = get_errno(setresuid(low2highuid(arg1),
8748 low2highuid(arg2),
8749 low2highuid(arg3)));
8750 break;
8751 #endif
8752 #ifdef TARGET_NR_getresuid
8753 case TARGET_NR_getresuid:
8754 {
8755 uid_t ruid, euid, suid;
8756 ret = get_errno(getresuid(&ruid, &euid, &suid));
8757 if (!is_error(ret)) {
8758 if (put_user_id(high2lowuid(ruid), arg1)
8759 || put_user_id(high2lowuid(euid), arg2)
8760 || put_user_id(high2lowuid(suid), arg3))
8761 goto efault;
8762 }
8763 }
8764 break;
8765 #endif
8766 #ifdef TARGET_NR_getresgid
8767 case TARGET_NR_setresgid:
8768 ret = get_errno(setresgid(low2highgid(arg1),
8769 low2highgid(arg2),
8770 low2highgid(arg3)));
8771 break;
8772 #endif
8773 #ifdef TARGET_NR_getresgid
8774 case TARGET_NR_getresgid:
8775 {
8776 gid_t rgid, egid, sgid;
8777 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8778 if (!is_error(ret)) {
8779 if (put_user_id(high2lowgid(rgid), arg1)
8780 || put_user_id(high2lowgid(egid), arg2)
8781 || put_user_id(high2lowgid(sgid), arg3))
8782 goto efault;
8783 }
8784 }
8785 break;
8786 #endif
8787 #ifdef TARGET_NR_chown
8788 case TARGET_NR_chown:
8789 if (!(p = lock_user_string(arg1)))
8790 goto efault;
8791 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8792 unlock_user(p, arg1, 0);
8793 break;
8794 #endif
8795 case TARGET_NR_setuid:
8796 ret = get_errno(setuid(low2highuid(arg1)));
8797 break;
8798 case TARGET_NR_setgid:
8799 ret = get_errno(setgid(low2highgid(arg1)));
8800 break;
8801 case TARGET_NR_setfsuid:
8802 ret = get_errno(setfsuid(arg1));
8803 break;
8804 case TARGET_NR_setfsgid:
8805 ret = get_errno(setfsgid(arg1));
8806 break;
8807
8808 #ifdef TARGET_NR_lchown32
8809 case TARGET_NR_lchown32:
8810 if (!(p = lock_user_string(arg1)))
8811 goto efault;
8812 ret = get_errno(lchown(p, arg2, arg3));
8813 unlock_user(p, arg1, 0);
8814 break;
8815 #endif
8816 #ifdef TARGET_NR_getuid32
8817 case TARGET_NR_getuid32:
8818 ret = get_errno(getuid());
8819 break;
8820 #endif
8821
8822 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8823 /* Alpha specific */
8824 case TARGET_NR_getxuid:
8825 {
8826 uid_t euid;
8827 euid=geteuid();
8828 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8829 }
8830 ret = get_errno(getuid());
8831 break;
8832 #endif
8833 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8834 /* Alpha specific */
8835 case TARGET_NR_getxgid:
8836 {
8837 uid_t egid;
8838 egid=getegid();
8839 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8840 }
8841 ret = get_errno(getgid());
8842 break;
8843 #endif
8844 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8845 /* Alpha specific */
8846 case TARGET_NR_osf_getsysinfo:
8847 ret = -TARGET_EOPNOTSUPP;
8848 switch (arg1) {
8849 case TARGET_GSI_IEEE_FP_CONTROL:
8850 {
8851 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8852
8853 /* Copied from linux ieee_fpcr_to_swcr. */
8854 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8855 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8856 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8857 | SWCR_TRAP_ENABLE_DZE
8858 | SWCR_TRAP_ENABLE_OVF);
8859 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8860 | SWCR_TRAP_ENABLE_INE);
8861 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8862 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8863
8864 if (put_user_u64 (swcr, arg2))
8865 goto efault;
8866 ret = 0;
8867 }
8868 break;
8869
8870 /* case GSI_IEEE_STATE_AT_SIGNAL:
8871 -- Not implemented in linux kernel.
8872 case GSI_UACPROC:
8873 -- Retrieves current unaligned access state; not much used.
8874 case GSI_PROC_TYPE:
8875 -- Retrieves implver information; surely not used.
8876 case GSI_GET_HWRPB:
8877 -- Grabs a copy of the HWRPB; surely not used.
8878 */
8879 }
8880 break;
8881 #endif
8882 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8883 /* Alpha specific */
8884 case TARGET_NR_osf_setsysinfo:
8885 ret = -TARGET_EOPNOTSUPP;
8886 switch (arg1) {
8887 case TARGET_SSI_IEEE_FP_CONTROL:
8888 {
8889 uint64_t swcr, fpcr, orig_fpcr;
8890
8891 if (get_user_u64 (swcr, arg2)) {
8892 goto efault;
8893 }
8894 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8895 fpcr = orig_fpcr & FPCR_DYN_MASK;
8896
8897 /* Copied from linux ieee_swcr_to_fpcr. */
8898 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8899 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8900 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8901 | SWCR_TRAP_ENABLE_DZE
8902 | SWCR_TRAP_ENABLE_OVF)) << 48;
8903 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8904 | SWCR_TRAP_ENABLE_INE)) << 57;
8905 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8906 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8907
8908 cpu_alpha_store_fpcr(cpu_env, fpcr);
8909 ret = 0;
8910 }
8911 break;
8912
8913 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8914 {
8915 uint64_t exc, fpcr, orig_fpcr;
8916 int si_code;
8917
8918 if (get_user_u64(exc, arg2)) {
8919 goto efault;
8920 }
8921
8922 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8923
8924 /* We only add to the exception status here. */
8925 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8926
8927 cpu_alpha_store_fpcr(cpu_env, fpcr);
8928 ret = 0;
8929
8930 /* Old exceptions are not signaled. */
8931 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8932
8933 /* If any exceptions set by this call,
8934 and are unmasked, send a signal. */
8935 si_code = 0;
8936 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8937 si_code = TARGET_FPE_FLTRES;
8938 }
8939 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8940 si_code = TARGET_FPE_FLTUND;
8941 }
8942 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8943 si_code = TARGET_FPE_FLTOVF;
8944 }
8945 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8946 si_code = TARGET_FPE_FLTDIV;
8947 }
8948 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8949 si_code = TARGET_FPE_FLTINV;
8950 }
8951 if (si_code != 0) {
8952 target_siginfo_t info;
8953 info.si_signo = SIGFPE;
8954 info.si_errno = 0;
8955 info.si_code = si_code;
8956 info._sifields._sigfault._addr
8957 = ((CPUArchState *)cpu_env)->pc;
8958 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8959 }
8960 }
8961 break;
8962
8963 /* case SSI_NVPAIRS:
8964 -- Used with SSIN_UACPROC to enable unaligned accesses.
8965 case SSI_IEEE_STATE_AT_SIGNAL:
8966 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8967 -- Not implemented in linux kernel
8968 */
8969 }
8970 break;
8971 #endif
8972 #ifdef TARGET_NR_osf_sigprocmask
8973 /* Alpha specific. */
8974 case TARGET_NR_osf_sigprocmask:
8975 {
8976 abi_ulong mask;
8977 int how;
8978 sigset_t set, oldset;
8979
8980 switch(arg1) {
8981 case TARGET_SIG_BLOCK:
8982 how = SIG_BLOCK;
8983 break;
8984 case TARGET_SIG_UNBLOCK:
8985 how = SIG_UNBLOCK;
8986 break;
8987 case TARGET_SIG_SETMASK:
8988 how = SIG_SETMASK;
8989 break;
8990 default:
8991 ret = -TARGET_EINVAL;
8992 goto fail;
8993 }
8994 mask = arg2;
8995 target_to_host_old_sigset(&set, &mask);
8996 do_sigprocmask(how, &set, &oldset);
8997 host_to_target_old_sigset(&mask, &oldset);
8998 ret = mask;
8999 }
9000 break;
9001 #endif
9002
9003 #ifdef TARGET_NR_getgid32
9004 case TARGET_NR_getgid32:
9005 ret = get_errno(getgid());
9006 break;
9007 #endif
9008 #ifdef TARGET_NR_geteuid32
9009 case TARGET_NR_geteuid32:
9010 ret = get_errno(geteuid());
9011 break;
9012 #endif
9013 #ifdef TARGET_NR_getegid32
9014 case TARGET_NR_getegid32:
9015 ret = get_errno(getegid());
9016 break;
9017 #endif
9018 #ifdef TARGET_NR_setreuid32
9019 case TARGET_NR_setreuid32:
9020 ret = get_errno(setreuid(arg1, arg2));
9021 break;
9022 #endif
9023 #ifdef TARGET_NR_setregid32
9024 case TARGET_NR_setregid32:
9025 ret = get_errno(setregid(arg1, arg2));
9026 break;
9027 #endif
9028 #ifdef TARGET_NR_getgroups32
9029 case TARGET_NR_getgroups32:
9030 {
9031 int gidsetsize = arg1;
9032 uint32_t *target_grouplist;
9033 gid_t *grouplist;
9034 int i;
9035
9036 grouplist = alloca(gidsetsize * sizeof(gid_t));
9037 ret = get_errno(getgroups(gidsetsize, grouplist));
9038 if (gidsetsize == 0)
9039 break;
9040 if (!is_error(ret)) {
9041 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
9042 if (!target_grouplist) {
9043 ret = -TARGET_EFAULT;
9044 goto fail;
9045 }
9046 for(i = 0;i < ret; i++)
9047 target_grouplist[i] = tswap32(grouplist[i]);
9048 unlock_user(target_grouplist, arg2, gidsetsize * 4);
9049 }
9050 }
9051 break;
9052 #endif
9053 #ifdef TARGET_NR_setgroups32
9054 case TARGET_NR_setgroups32:
9055 {
9056 int gidsetsize = arg1;
9057 uint32_t *target_grouplist;
9058 gid_t *grouplist;
9059 int i;
9060
9061 grouplist = alloca(gidsetsize * sizeof(gid_t));
9062 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
9063 if (!target_grouplist) {
9064 ret = -TARGET_EFAULT;
9065 goto fail;
9066 }
9067 for(i = 0;i < gidsetsize; i++)
9068 grouplist[i] = tswap32(target_grouplist[i]);
9069 unlock_user(target_grouplist, arg2, 0);
9070 ret = get_errno(setgroups(gidsetsize, grouplist));
9071 }
9072 break;
9073 #endif
9074 #ifdef TARGET_NR_fchown32
9075 case TARGET_NR_fchown32:
9076 ret = get_errno(fchown(arg1, arg2, arg3));
9077 break;
9078 #endif
9079 #ifdef TARGET_NR_setresuid32
9080 case TARGET_NR_setresuid32:
9081 ret = get_errno(setresuid(arg1, arg2, arg3));
9082 break;
9083 #endif
9084 #ifdef TARGET_NR_getresuid32
9085 case TARGET_NR_getresuid32:
9086 {
9087 uid_t ruid, euid, suid;
9088 ret = get_errno(getresuid(&ruid, &euid, &suid));
9089 if (!is_error(ret)) {
9090 if (put_user_u32(ruid, arg1)
9091 || put_user_u32(euid, arg2)
9092 || put_user_u32(suid, arg3))
9093 goto efault;
9094 }
9095 }
9096 break;
9097 #endif
9098 #ifdef TARGET_NR_setresgid32
9099 case TARGET_NR_setresgid32:
9100 ret = get_errno(setresgid(arg1, arg2, arg3));
9101 break;
9102 #endif
9103 #ifdef TARGET_NR_getresgid32
9104 case TARGET_NR_getresgid32:
9105 {
9106 gid_t rgid, egid, sgid;
9107 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9108 if (!is_error(ret)) {
9109 if (put_user_u32(rgid, arg1)
9110 || put_user_u32(egid, arg2)
9111 || put_user_u32(sgid, arg3))
9112 goto efault;
9113 }
9114 }
9115 break;
9116 #endif
9117 #ifdef TARGET_NR_chown32
9118 case TARGET_NR_chown32:
9119 if (!(p = lock_user_string(arg1)))
9120 goto efault;
9121 ret = get_errno(chown(p, arg2, arg3));
9122 unlock_user(p, arg1, 0);
9123 break;
9124 #endif
9125 #ifdef TARGET_NR_setuid32
9126 case TARGET_NR_setuid32:
9127 ret = get_errno(setuid(arg1));
9128 break;
9129 #endif
9130 #ifdef TARGET_NR_setgid32
9131 case TARGET_NR_setgid32:
9132 ret = get_errno(setgid(arg1));
9133 break;
9134 #endif
9135 #ifdef TARGET_NR_setfsuid32
9136 case TARGET_NR_setfsuid32:
9137 ret = get_errno(setfsuid(arg1));
9138 break;
9139 #endif
9140 #ifdef TARGET_NR_setfsgid32
9141 case TARGET_NR_setfsgid32:
9142 ret = get_errno(setfsgid(arg1));
9143 break;
9144 #endif
9145
9146 case TARGET_NR_pivot_root:
9147 goto unimplemented;
9148 #ifdef TARGET_NR_mincore
9149 case TARGET_NR_mincore:
9150 {
9151 void *a;
9152 ret = -TARGET_EFAULT;
9153 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
9154 goto efault;
9155 if (!(p = lock_user_string(arg3)))
9156 goto mincore_fail;
9157 ret = get_errno(mincore(a, arg2, p));
9158 unlock_user(p, arg3, ret);
9159 mincore_fail:
9160 unlock_user(a, arg1, 0);
9161 }
9162 break;
9163 #endif
9164 #ifdef TARGET_NR_arm_fadvise64_64
9165 case TARGET_NR_arm_fadvise64_64:
9166 {
9167 /*
9168 * arm_fadvise64_64 looks like fadvise64_64 but
9169 * with different argument order
9170 */
9171 abi_long temp;
9172 temp = arg3;
9173 arg3 = arg4;
9174 arg4 = temp;
9175 }
9176 #endif
9177 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
9178 #ifdef TARGET_NR_fadvise64_64
9179 case TARGET_NR_fadvise64_64:
9180 #endif
9181 #ifdef TARGET_NR_fadvise64
9182 case TARGET_NR_fadvise64:
9183 #endif
9184 #ifdef TARGET_S390X
9185 switch (arg4) {
9186 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
9187 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
9188 case 6: arg4 = POSIX_FADV_DONTNEED; break;
9189 case 7: arg4 = POSIX_FADV_NOREUSE; break;
9190 default: break;
9191 }
9192 #endif
9193 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
9194 break;
9195 #endif
9196 #ifdef TARGET_NR_madvise
9197 case TARGET_NR_madvise:
9198 /* A straight passthrough may not be safe because qemu sometimes
9199 turns private file-backed mappings into anonymous mappings.
9200 This will break MADV_DONTNEED.
9201 This is a hint, so ignoring and returning success is ok. */
9202 ret = get_errno(0);
9203 break;
9204 #endif
9205 #if TARGET_ABI_BITS == 32
9206 case TARGET_NR_fcntl64:
9207 {
9208 int cmd;
9209 struct flock64 fl;
9210 struct target_flock64 *target_fl;
9211 #ifdef TARGET_ARM
9212 struct target_eabi_flock64 *target_efl;
9213 #endif
9214
9215 cmd = target_to_host_fcntl_cmd(arg2);
9216 if (cmd == -TARGET_EINVAL) {
9217 ret = cmd;
9218 break;
9219 }
9220
9221 switch(arg2) {
9222 case TARGET_F_GETLK64:
9223 #ifdef TARGET_ARM
9224 if (((CPUARMState *)cpu_env)->eabi) {
9225 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9226 goto efault;
9227 fl.l_type = tswap16(target_efl->l_type);
9228 fl.l_whence = tswap16(target_efl->l_whence);
9229 fl.l_start = tswap64(target_efl->l_start);
9230 fl.l_len = tswap64(target_efl->l_len);
9231 fl.l_pid = tswap32(target_efl->l_pid);
9232 unlock_user_struct(target_efl, arg3, 0);
9233 } else
9234 #endif
9235 {
9236 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9237 goto efault;
9238 fl.l_type = tswap16(target_fl->l_type);
9239 fl.l_whence = tswap16(target_fl->l_whence);
9240 fl.l_start = tswap64(target_fl->l_start);
9241 fl.l_len = tswap64(target_fl->l_len);
9242 fl.l_pid = tswap32(target_fl->l_pid);
9243 unlock_user_struct(target_fl, arg3, 0);
9244 }
9245 ret = get_errno(fcntl(arg1, cmd, &fl));
9246 if (ret == 0) {
9247 #ifdef TARGET_ARM
9248 if (((CPUARMState *)cpu_env)->eabi) {
9249 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
9250 goto efault;
9251 target_efl->l_type = tswap16(fl.l_type);
9252 target_efl->l_whence = tswap16(fl.l_whence);
9253 target_efl->l_start = tswap64(fl.l_start);
9254 target_efl->l_len = tswap64(fl.l_len);
9255 target_efl->l_pid = tswap32(fl.l_pid);
9256 unlock_user_struct(target_efl, arg3, 1);
9257 } else
9258 #endif
9259 {
9260 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
9261 goto efault;
9262 target_fl->l_type = tswap16(fl.l_type);
9263 target_fl->l_whence = tswap16(fl.l_whence);
9264 target_fl->l_start = tswap64(fl.l_start);
9265 target_fl->l_len = tswap64(fl.l_len);
9266 target_fl->l_pid = tswap32(fl.l_pid);
9267 unlock_user_struct(target_fl, arg3, 1);
9268 }
9269 }
9270 break;
9271
9272 case TARGET_F_SETLK64:
9273 case TARGET_F_SETLKW64:
9274 #ifdef TARGET_ARM
9275 if (((CPUARMState *)cpu_env)->eabi) {
9276 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9277 goto efault;
9278 fl.l_type = tswap16(target_efl->l_type);
9279 fl.l_whence = tswap16(target_efl->l_whence);
9280 fl.l_start = tswap64(target_efl->l_start);
9281 fl.l_len = tswap64(target_efl->l_len);
9282 fl.l_pid = tswap32(target_efl->l_pid);
9283 unlock_user_struct(target_efl, arg3, 0);
9284 } else
9285 #endif
9286 {
9287 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9288 goto efault;
9289 fl.l_type = tswap16(target_fl->l_type);
9290 fl.l_whence = tswap16(target_fl->l_whence);
9291 fl.l_start = tswap64(target_fl->l_start);
9292 fl.l_len = tswap64(target_fl->l_len);
9293 fl.l_pid = tswap32(target_fl->l_pid);
9294 unlock_user_struct(target_fl, arg3, 0);
9295 }
9296 ret = get_errno(fcntl(arg1, cmd, &fl));
9297 break;
9298 default:
9299 ret = do_fcntl(arg1, arg2, arg3);
9300 break;
9301 }
9302 break;
9303 }
9304 #endif
9305 #ifdef TARGET_NR_cacheflush
9306 case TARGET_NR_cacheflush:
9307 /* self-modifying code is handled automatically, so nothing needed */
9308 ret = 0;
9309 break;
9310 #endif
9311 #ifdef TARGET_NR_security
9312 case TARGET_NR_security:
9313 goto unimplemented;
9314 #endif
9315 #ifdef TARGET_NR_getpagesize
9316 case TARGET_NR_getpagesize:
9317 ret = TARGET_PAGE_SIZE;
9318 break;
9319 #endif
9320 case TARGET_NR_gettid:
9321 ret = get_errno(gettid());
9322 break;
9323 #ifdef TARGET_NR_readahead
9324 case TARGET_NR_readahead:
9325 #if TARGET_ABI_BITS == 32
9326 if (regpairs_aligned(cpu_env)) {
9327 arg2 = arg3;
9328 arg3 = arg4;
9329 arg4 = arg5;
9330 }
9331 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
9332 #else
9333 ret = get_errno(readahead(arg1, arg2, arg3));
9334 #endif
9335 break;
9336 #endif
9337 #ifdef CONFIG_ATTR
9338 #ifdef TARGET_NR_setxattr
9339 case TARGET_NR_listxattr:
9340 case TARGET_NR_llistxattr:
9341 {
9342 void *p, *b = 0;
9343 if (arg2) {
9344 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9345 if (!b) {
9346 ret = -TARGET_EFAULT;
9347 break;
9348 }
9349 }
9350 p = lock_user_string(arg1);
9351 if (p) {
9352 if (num == TARGET_NR_listxattr) {
9353 ret = get_errno(listxattr(p, b, arg3));
9354 } else {
9355 ret = get_errno(llistxattr(p, b, arg3));
9356 }
9357 } else {
9358 ret = -TARGET_EFAULT;
9359 }
9360 unlock_user(p, arg1, 0);
9361 unlock_user(b, arg2, arg3);
9362 break;
9363 }
9364 case TARGET_NR_flistxattr:
9365 {
9366 void *b = 0;
9367 if (arg2) {
9368 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9369 if (!b) {
9370 ret = -TARGET_EFAULT;
9371 break;
9372 }
9373 }
9374 ret = get_errno(flistxattr(arg1, b, arg3));
9375 unlock_user(b, arg2, arg3);
9376 break;
9377 }
9378 case TARGET_NR_setxattr:
9379 case TARGET_NR_lsetxattr:
9380 {
9381 void *p, *n, *v = 0;
9382 if (arg3) {
9383 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9384 if (!v) {
9385 ret = -TARGET_EFAULT;
9386 break;
9387 }
9388 }
9389 p = lock_user_string(arg1);
9390 n = lock_user_string(arg2);
9391 if (p && n) {
9392 if (num == TARGET_NR_setxattr) {
9393 ret = get_errno(setxattr(p, n, v, arg4, arg5));
9394 } else {
9395 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
9396 }
9397 } else {
9398 ret = -TARGET_EFAULT;
9399 }
9400 unlock_user(p, arg1, 0);
9401 unlock_user(n, arg2, 0);
9402 unlock_user(v, arg3, 0);
9403 }
9404 break;
9405 case TARGET_NR_fsetxattr:
9406 {
9407 void *n, *v = 0;
9408 if (arg3) {
9409 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9410 if (!v) {
9411 ret = -TARGET_EFAULT;
9412 break;
9413 }
9414 }
9415 n = lock_user_string(arg2);
9416 if (n) {
9417 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
9418 } else {
9419 ret = -TARGET_EFAULT;
9420 }
9421 unlock_user(n, arg2, 0);
9422 unlock_user(v, arg3, 0);
9423 }
9424 break;
9425 case TARGET_NR_getxattr:
9426 case TARGET_NR_lgetxattr:
9427 {
9428 void *p, *n, *v = 0;
9429 if (arg3) {
9430 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9431 if (!v) {
9432 ret = -TARGET_EFAULT;
9433 break;
9434 }
9435 }
9436 p = lock_user_string(arg1);
9437 n = lock_user_string(arg2);
9438 if (p && n) {
9439 if (num == TARGET_NR_getxattr) {
9440 ret = get_errno(getxattr(p, n, v, arg4));
9441 } else {
9442 ret = get_errno(lgetxattr(p, n, v, arg4));
9443 }
9444 } else {
9445 ret = -TARGET_EFAULT;
9446 }
9447 unlock_user(p, arg1, 0);
9448 unlock_user(n, arg2, 0);
9449 unlock_user(v, arg3, arg4);
9450 }
9451 break;
9452 case TARGET_NR_fgetxattr:
9453 {
9454 void *n, *v = 0;
9455 if (arg3) {
9456 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9457 if (!v) {
9458 ret = -TARGET_EFAULT;
9459 break;
9460 }
9461 }
9462 n = lock_user_string(arg2);
9463 if (n) {
9464 ret = get_errno(fgetxattr(arg1, n, v, arg4));
9465 } else {
9466 ret = -TARGET_EFAULT;
9467 }
9468 unlock_user(n, arg2, 0);
9469 unlock_user(v, arg3, arg4);
9470 }
9471 break;
9472 case TARGET_NR_removexattr:
9473 case TARGET_NR_lremovexattr:
9474 {
9475 void *p, *n;
9476 p = lock_user_string(arg1);
9477 n = lock_user_string(arg2);
9478 if (p && n) {
9479 if (num == TARGET_NR_removexattr) {
9480 ret = get_errno(removexattr(p, n));
9481 } else {
9482 ret = get_errno(lremovexattr(p, n));
9483 }
9484 } else {
9485 ret = -TARGET_EFAULT;
9486 }
9487 unlock_user(p, arg1, 0);
9488 unlock_user(n, arg2, 0);
9489 }
9490 break;
9491 case TARGET_NR_fremovexattr:
9492 {
9493 void *n;
9494 n = lock_user_string(arg2);
9495 if (n) {
9496 ret = get_errno(fremovexattr(arg1, n));
9497 } else {
9498 ret = -TARGET_EFAULT;
9499 }
9500 unlock_user(n, arg2, 0);
9501 }
9502 break;
9503 #endif
9504 #endif /* CONFIG_ATTR */
9505 #ifdef TARGET_NR_set_thread_area
9506 case TARGET_NR_set_thread_area:
9507 #if defined(TARGET_MIPS)
9508 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
9509 ret = 0;
9510 break;
9511 #elif defined(TARGET_CRIS)
9512 if (arg1 & 0xff)
9513 ret = -TARGET_EINVAL;
9514 else {
9515 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
9516 ret = 0;
9517 }
9518 break;
9519 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9520 ret = do_set_thread_area(cpu_env, arg1);
9521 break;
9522 #elif defined(TARGET_M68K)
9523 {
9524 TaskState *ts = cpu->opaque;
9525 ts->tp_value = arg1;
9526 ret = 0;
9527 break;
9528 }
9529 #else
9530 goto unimplemented_nowarn;
9531 #endif
9532 #endif
9533 #ifdef TARGET_NR_get_thread_area
9534 case TARGET_NR_get_thread_area:
9535 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9536 ret = do_get_thread_area(cpu_env, arg1);
9537 break;
9538 #elif defined(TARGET_M68K)
9539 {
9540 TaskState *ts = cpu->opaque;
9541 ret = ts->tp_value;
9542 break;
9543 }
9544 #else
9545 goto unimplemented_nowarn;
9546 #endif
9547 #endif
9548 #ifdef TARGET_NR_getdomainname
9549 case TARGET_NR_getdomainname:
9550 goto unimplemented_nowarn;
9551 #endif
9552
9553 #ifdef TARGET_NR_clock_gettime
9554 case TARGET_NR_clock_gettime:
9555 {
9556 struct timespec ts;
9557 ret = get_errno(clock_gettime(arg1, &ts));
9558 if (!is_error(ret)) {
9559 host_to_target_timespec(arg2, &ts);
9560 }
9561 break;
9562 }
9563 #endif
9564 #ifdef TARGET_NR_clock_getres
9565 case TARGET_NR_clock_getres:
9566 {
9567 struct timespec ts;
9568 ret = get_errno(clock_getres(arg1, &ts));
9569 if (!is_error(ret)) {
9570 host_to_target_timespec(arg2, &ts);
9571 }
9572 break;
9573 }
9574 #endif
9575 #ifdef TARGET_NR_clock_nanosleep
9576 case TARGET_NR_clock_nanosleep:
9577 {
9578 struct timespec ts;
9579 target_to_host_timespec(&ts, arg3);
9580 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
9581 if (arg4)
9582 host_to_target_timespec(arg4, &ts);
9583
9584 #if defined(TARGET_PPC)
9585 /* clock_nanosleep is odd in that it returns positive errno values.
9586 * On PPC, CR0 bit 3 should be set in such a situation. */
9587 if (ret) {
9588 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
9589 }
9590 #endif
9591 break;
9592 }
9593 #endif
9594
9595 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9596 case TARGET_NR_set_tid_address:
9597 ret = get_errno(set_tid_address((int *)g2h(arg1)));
9598 break;
9599 #endif
9600
9601 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9602 case TARGET_NR_tkill:
9603 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
9604 break;
9605 #endif
9606
9607 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9608 case TARGET_NR_tgkill:
9609 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
9610 target_to_host_signal(arg3)));
9611 break;
9612 #endif
9613
9614 #ifdef TARGET_NR_set_robust_list
9615 case TARGET_NR_set_robust_list:
9616 case TARGET_NR_get_robust_list:
9617 /* The ABI for supporting robust futexes has userspace pass
9618 * the kernel a pointer to a linked list which is updated by
9619 * userspace after the syscall; the list is walked by the kernel
9620 * when the thread exits. Since the linked list in QEMU guest
9621 * memory isn't a valid linked list for the host and we have
9622 * no way to reliably intercept the thread-death event, we can't
9623 * support these. Silently return ENOSYS so that guest userspace
9624 * falls back to a non-robust futex implementation (which should
9625 * be OK except in the corner case of the guest crashing while
9626 * holding a mutex that is shared with another process via
9627 * shared memory).
9628 */
9629 goto unimplemented_nowarn;
9630 #endif
9631
9632 #if defined(TARGET_NR_utimensat)
9633 case TARGET_NR_utimensat:
9634 {
9635 struct timespec *tsp, ts[2];
9636 if (!arg3) {
9637 tsp = NULL;
9638 } else {
9639 target_to_host_timespec(ts, arg3);
9640 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
9641 tsp = ts;
9642 }
9643 if (!arg2)
9644 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
9645 else {
9646 if (!(p = lock_user_string(arg2))) {
9647 ret = -TARGET_EFAULT;
9648 goto fail;
9649 }
9650 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
9651 unlock_user(p, arg2, 0);
9652 }
9653 }
9654 break;
9655 #endif
9656 case TARGET_NR_futex:
9657 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
9658 break;
9659 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9660 case TARGET_NR_inotify_init:
9661 ret = get_errno(sys_inotify_init());
9662 break;
9663 #endif
9664 #ifdef CONFIG_INOTIFY1
9665 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9666 case TARGET_NR_inotify_init1:
9667 ret = get_errno(sys_inotify_init1(arg1));
9668 break;
9669 #endif
9670 #endif
9671 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9672 case TARGET_NR_inotify_add_watch:
9673 p = lock_user_string(arg2);
9674 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
9675 unlock_user(p, arg2, 0);
9676 break;
9677 #endif
9678 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9679 case TARGET_NR_inotify_rm_watch:
9680 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
9681 break;
9682 #endif
9683
9684 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9685 case TARGET_NR_mq_open:
9686 {
9687 struct mq_attr posix_mq_attr, *attrp;
9688
9689 p = lock_user_string(arg1 - 1);
9690 if (arg4 != 0) {
9691 copy_from_user_mq_attr (&posix_mq_attr, arg4);
9692 attrp = &posix_mq_attr;
9693 } else {
9694 attrp = 0;
9695 }
9696 ret = get_errno(mq_open(p, arg2, arg3, attrp));
9697 unlock_user (p, arg1, 0);
9698 }
9699 break;
9700
9701 case TARGET_NR_mq_unlink:
9702 p = lock_user_string(arg1 - 1);
9703 ret = get_errno(mq_unlink(p));
9704 unlock_user (p, arg1, 0);
9705 break;
9706
9707 case TARGET_NR_mq_timedsend:
9708 {
9709 struct timespec ts;
9710
9711 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9712 if (arg5 != 0) {
9713 target_to_host_timespec(&ts, arg5);
9714 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
9715 host_to_target_timespec(arg5, &ts);
9716 }
9717 else
9718 ret = get_errno(mq_send(arg1, p, arg3, arg4));
9719 unlock_user (p, arg2, arg3);
9720 }
9721 break;
9722
9723 case TARGET_NR_mq_timedreceive:
9724 {
9725 struct timespec ts;
9726 unsigned int prio;
9727
9728 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9729 if (arg5 != 0) {
9730 target_to_host_timespec(&ts, arg5);
9731 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
9732 host_to_target_timespec(arg5, &ts);
9733 }
9734 else
9735 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
9736 unlock_user (p, arg2, arg3);
9737 if (arg4 != 0)
9738 put_user_u32(prio, arg4);
9739 }
9740 break;
9741
9742 /* Not implemented for now... */
9743 /* case TARGET_NR_mq_notify: */
9744 /* break; */
9745
9746 case TARGET_NR_mq_getsetattr:
9747 {
9748 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
9749 ret = 0;
9750 if (arg3 != 0) {
9751 ret = mq_getattr(arg1, &posix_mq_attr_out);
9752 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
9753 }
9754 if (arg2 != 0) {
9755 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
9756 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
9757 }
9758
9759 }
9760 break;
9761 #endif
9762
9763 #ifdef CONFIG_SPLICE
9764 #ifdef TARGET_NR_tee
9765 case TARGET_NR_tee:
9766 {
9767 ret = get_errno(tee(arg1,arg2,arg3,arg4));
9768 }
9769 break;
9770 #endif
9771 #ifdef TARGET_NR_splice
9772 case TARGET_NR_splice:
9773 {
9774 loff_t loff_in, loff_out;
9775 loff_t *ploff_in = NULL, *ploff_out = NULL;
9776 if (arg2) {
9777 if (get_user_u64(loff_in, arg2)) {
9778 goto efault;
9779 }
9780 ploff_in = &loff_in;
9781 }
9782 if (arg4) {
9783 if (get_user_u64(loff_out, arg4)) {
9784 goto efault;
9785 }
9786 ploff_out = &loff_out;
9787 }
9788 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9789 if (arg2) {
9790 if (put_user_u64(loff_in, arg2)) {
9791 goto efault;
9792 }
9793 }
9794 if (arg4) {
9795 if (put_user_u64(loff_out, arg4)) {
9796 goto efault;
9797 }
9798 }
9799 }
9800 break;
9801 #endif
9802 #ifdef TARGET_NR_vmsplice
9803 case TARGET_NR_vmsplice:
9804 {
9805 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9806 if (vec != NULL) {
9807 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9808 unlock_iovec(vec, arg2, arg3, 0);
9809 } else {
9810 ret = -host_to_target_errno(errno);
9811 }
9812 }
9813 break;
9814 #endif
9815 #endif /* CONFIG_SPLICE */
9816 #ifdef CONFIG_EVENTFD
9817 #if defined(TARGET_NR_eventfd)
9818 case TARGET_NR_eventfd:
9819 ret = get_errno(eventfd(arg1, 0));
9820 fd_trans_unregister(ret);
9821 break;
9822 #endif
9823 #if defined(TARGET_NR_eventfd2)
9824 case TARGET_NR_eventfd2:
9825 {
9826 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9827 if (arg2 & TARGET_O_NONBLOCK) {
9828 host_flags |= O_NONBLOCK;
9829 }
9830 if (arg2 & TARGET_O_CLOEXEC) {
9831 host_flags |= O_CLOEXEC;
9832 }
9833 ret = get_errno(eventfd(arg1, host_flags));
9834 fd_trans_unregister(ret);
9835 break;
9836 }
9837 #endif
9838 #endif /* CONFIG_EVENTFD */
9839 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9840 case TARGET_NR_fallocate:
9841 #if TARGET_ABI_BITS == 32
9842 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9843 target_offset64(arg5, arg6)));
9844 #else
9845 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9846 #endif
9847 break;
9848 #endif
9849 #if defined(CONFIG_SYNC_FILE_RANGE)
9850 #if defined(TARGET_NR_sync_file_range)
9851 case TARGET_NR_sync_file_range:
9852 #if TARGET_ABI_BITS == 32
9853 #if defined(TARGET_MIPS)
9854 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9855 target_offset64(arg5, arg6), arg7));
9856 #else
9857 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9858 target_offset64(arg4, arg5), arg6));
9859 #endif /* !TARGET_MIPS */
9860 #else
9861 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9862 #endif
9863 break;
9864 #endif
9865 #if defined(TARGET_NR_sync_file_range2)
9866 case TARGET_NR_sync_file_range2:
9867 /* This is like sync_file_range but the arguments are reordered */
9868 #if TARGET_ABI_BITS == 32
9869 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9870 target_offset64(arg5, arg6), arg2));
9871 #else
9872 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9873 #endif
9874 break;
9875 #endif
9876 #endif
9877 #if defined(TARGET_NR_signalfd4)
9878 case TARGET_NR_signalfd4:
9879 ret = do_signalfd4(arg1, arg2, arg4);
9880 break;
9881 #endif
9882 #if defined(TARGET_NR_signalfd)
9883 case TARGET_NR_signalfd:
9884 ret = do_signalfd4(arg1, arg2, 0);
9885 break;
9886 #endif
9887 #if defined(CONFIG_EPOLL)
9888 #if defined(TARGET_NR_epoll_create)
9889 case TARGET_NR_epoll_create:
9890 ret = get_errno(epoll_create(arg1));
9891 break;
9892 #endif
9893 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9894 case TARGET_NR_epoll_create1:
9895 ret = get_errno(epoll_create1(arg1));
9896 break;
9897 #endif
9898 #if defined(TARGET_NR_epoll_ctl)
9899 case TARGET_NR_epoll_ctl:
9900 {
9901 struct epoll_event ep;
9902 struct epoll_event *epp = 0;
9903 if (arg4) {
9904 struct target_epoll_event *target_ep;
9905 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
9906 goto efault;
9907 }
9908 ep.events = tswap32(target_ep->events);
9909 /* The epoll_data_t union is just opaque data to the kernel,
9910 * so we transfer all 64 bits across and need not worry what
9911 * actual data type it is.
9912 */
9913 ep.data.u64 = tswap64(target_ep->data.u64);
9914 unlock_user_struct(target_ep, arg4, 0);
9915 epp = &ep;
9916 }
9917 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
9918 break;
9919 }
9920 #endif
9921
9922 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9923 #define IMPLEMENT_EPOLL_PWAIT
9924 #endif
9925 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9926 #if defined(TARGET_NR_epoll_wait)
9927 case TARGET_NR_epoll_wait:
9928 #endif
9929 #if defined(IMPLEMENT_EPOLL_PWAIT)
9930 case TARGET_NR_epoll_pwait:
9931 #endif
9932 {
9933 struct target_epoll_event *target_ep;
9934 struct epoll_event *ep;
9935 int epfd = arg1;
9936 int maxevents = arg3;
9937 int timeout = arg4;
9938
9939 target_ep = lock_user(VERIFY_WRITE, arg2,
9940 maxevents * sizeof(struct target_epoll_event), 1);
9941 if (!target_ep) {
9942 goto efault;
9943 }
9944
9945 ep = alloca(maxevents * sizeof(struct epoll_event));
9946
9947 switch (num) {
9948 #if defined(IMPLEMENT_EPOLL_PWAIT)
9949 case TARGET_NR_epoll_pwait:
9950 {
9951 target_sigset_t *target_set;
9952 sigset_t _set, *set = &_set;
9953
9954 if (arg5) {
9955 target_set = lock_user(VERIFY_READ, arg5,
9956 sizeof(target_sigset_t), 1);
9957 if (!target_set) {
9958 unlock_user(target_ep, arg2, 0);
9959 goto efault;
9960 }
9961 target_to_host_sigset(set, target_set);
9962 unlock_user(target_set, arg5, 0);
9963 } else {
9964 set = NULL;
9965 }
9966
9967 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9968 break;
9969 }
9970 #endif
9971 #if defined(TARGET_NR_epoll_wait)
9972 case TARGET_NR_epoll_wait:
9973 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9974 break;
9975 #endif
9976 default:
9977 ret = -TARGET_ENOSYS;
9978 }
9979 if (!is_error(ret)) {
9980 int i;
9981 for (i = 0; i < ret; i++) {
9982 target_ep[i].events = tswap32(ep[i].events);
9983 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9984 }
9985 }
9986 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9987 break;
9988 }
9989 #endif
9990 #endif
9991 #ifdef TARGET_NR_prlimit64
9992 case TARGET_NR_prlimit64:
9993 {
9994 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9995 struct target_rlimit64 *target_rnew, *target_rold;
9996 struct host_rlimit64 rnew, rold, *rnewp = 0;
9997 int resource = target_to_host_resource(arg2);
9998 if (arg3) {
9999 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
10000 goto efault;
10001 }
10002 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
10003 rnew.rlim_max = tswap64(target_rnew->rlim_max);
10004 unlock_user_struct(target_rnew, arg3, 0);
10005 rnewp = &rnew;
10006 }
10007
10008 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10009 if (!is_error(ret) && arg4) {
10010 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10011 goto efault;
10012 }
10013 target_rold->rlim_cur = tswap64(rold.rlim_cur);
10014 target_rold->rlim_max = tswap64(rold.rlim_max);
10015 unlock_user_struct(target_rold, arg4, 1);
10016 }
10017 break;
10018 }
10019 #endif
10020 #ifdef TARGET_NR_gethostname
10021 case TARGET_NR_gethostname:
10022 {
10023 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10024 if (name) {
10025 ret = get_errno(gethostname(name, arg2));
10026 unlock_user(name, arg1, arg2);
10027 } else {
10028 ret = -TARGET_EFAULT;
10029 }
10030 break;
10031 }
10032 #endif
10033 #ifdef TARGET_NR_atomic_cmpxchg_32
10034 case TARGET_NR_atomic_cmpxchg_32:
10035 {
10036 /* should use start_exclusive from main.c */
10037 abi_ulong mem_value;
10038 if (get_user_u32(mem_value, arg6)) {
10039 target_siginfo_t info;
10040 info.si_signo = SIGSEGV;
10041 info.si_errno = 0;
10042 info.si_code = TARGET_SEGV_MAPERR;
10043 info._sifields._sigfault._addr = arg6;
10044 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10045 ret = 0xdeadbeef;
10046
10047 }
10048 if (mem_value == arg2)
10049 put_user_u32(arg1, arg6);
10050 ret = mem_value;
10051 break;
10052 }
10053 #endif
10054 #ifdef TARGET_NR_atomic_barrier
10055 case TARGET_NR_atomic_barrier:
10056 {
10057 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10058 ret = 0;
10059 break;
10060 }
10061 #endif
10062
10063 #ifdef TARGET_NR_timer_create
10064 case TARGET_NR_timer_create:
10065 {
10066 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10067
10068 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
10069
10070 int clkid = arg1;
10071 int timer_index = next_free_host_timer();
10072
10073 if (timer_index < 0) {
10074 ret = -TARGET_EAGAIN;
10075 } else {
10076 timer_t *phtimer = g_posix_timers + timer_index;
10077
10078 if (arg2) {
10079 phost_sevp = &host_sevp;
10080 ret = target_to_host_sigevent(phost_sevp, arg2);
10081 if (ret != 0) {
10082 break;
10083 }
10084 }
10085
10086 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
10087 if (ret) {
10088 phtimer = NULL;
10089 } else {
10090 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
10091 goto efault;
10092 }
10093 }
10094 }
10095 break;
10096 }
10097 #endif
10098
10099 #ifdef TARGET_NR_timer_settime
10100 case TARGET_NR_timer_settime:
10101 {
10102 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10103 * struct itimerspec * old_value */
10104 target_timer_t timerid = get_timer_id(arg1);
10105
10106 if (timerid < 0) {
10107 ret = timerid;
10108 } else if (arg3 == 0) {
10109 ret = -TARGET_EINVAL;
10110 } else {
10111 timer_t htimer = g_posix_timers[timerid];
10112 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
10113
10114 target_to_host_itimerspec(&hspec_new, arg3);
10115 ret = get_errno(
10116 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
10117 host_to_target_itimerspec(arg2, &hspec_old);
10118 }
10119 break;
10120 }
10121 #endif
10122
10123 #ifdef TARGET_NR_timer_gettime
10124 case TARGET_NR_timer_gettime:
10125 {
10126 /* args: timer_t timerid, struct itimerspec *curr_value */
10127 target_timer_t timerid = get_timer_id(arg1);
10128
10129 if (timerid < 0) {
10130 ret = timerid;
10131 } else if (!arg2) {
10132 ret = -TARGET_EFAULT;
10133 } else {
10134 timer_t htimer = g_posix_timers[timerid];
10135 struct itimerspec hspec;
10136 ret = get_errno(timer_gettime(htimer, &hspec));
10137
10138 if (host_to_target_itimerspec(arg2, &hspec)) {
10139 ret = -TARGET_EFAULT;
10140 }
10141 }
10142 break;
10143 }
10144 #endif
10145
10146 #ifdef TARGET_NR_timer_getoverrun
10147 case TARGET_NR_timer_getoverrun:
10148 {
10149 /* args: timer_t timerid */
10150 target_timer_t timerid = get_timer_id(arg1);
10151
10152 if (timerid < 0) {
10153 ret = timerid;
10154 } else {
10155 timer_t htimer = g_posix_timers[timerid];
10156 ret = get_errno(timer_getoverrun(htimer));
10157 }
10158 fd_trans_unregister(ret);
10159 break;
10160 }
10161 #endif
10162
10163 #ifdef TARGET_NR_timer_delete
10164 case TARGET_NR_timer_delete:
10165 {
10166 /* args: timer_t timerid */
10167 target_timer_t timerid = get_timer_id(arg1);
10168
10169 if (timerid < 0) {
10170 ret = timerid;
10171 } else {
10172 timer_t htimer = g_posix_timers[timerid];
10173 ret = get_errno(timer_delete(htimer));
10174 g_posix_timers[timerid] = 0;
10175 }
10176 break;
10177 }
10178 #endif
10179
10180 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
10181 case TARGET_NR_timerfd_create:
10182 ret = get_errno(timerfd_create(arg1,
10183 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
10184 break;
10185 #endif
10186
10187 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
10188 case TARGET_NR_timerfd_gettime:
10189 {
10190 struct itimerspec its_curr;
10191
10192 ret = get_errno(timerfd_gettime(arg1, &its_curr));
10193
10194 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
10195 goto efault;
10196 }
10197 }
10198 break;
10199 #endif
10200
10201 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
10202 case TARGET_NR_timerfd_settime:
10203 {
10204 struct itimerspec its_new, its_old, *p_new;
10205
10206 if (arg3) {
10207 if (target_to_host_itimerspec(&its_new, arg3)) {
10208 goto efault;
10209 }
10210 p_new = &its_new;
10211 } else {
10212 p_new = NULL;
10213 }
10214
10215 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
10216
10217 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
10218 goto efault;
10219 }
10220 }
10221 break;
10222 #endif
10223
10224 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
10225 case TARGET_NR_ioprio_get:
10226 ret = get_errno(ioprio_get(arg1, arg2));
10227 break;
10228 #endif
10229
10230 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
10231 case TARGET_NR_ioprio_set:
10232 ret = get_errno(ioprio_set(arg1, arg2, arg3));
10233 break;
10234 #endif
10235
10236 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
10237 case TARGET_NR_setns:
10238 ret = get_errno(setns(arg1, arg2));
10239 break;
10240 #endif
10241 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
10242 case TARGET_NR_unshare:
10243 ret = get_errno(unshare(arg1));
10244 break;
10245 #endif
10246
10247 default:
10248 unimplemented:
10249 gemu_log("qemu: Unsupported syscall: %d\n", num);
10250 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
10251 unimplemented_nowarn:
10252 #endif
10253 ret = -TARGET_ENOSYS;
10254 break;
10255 }
10256 fail:
10257 #ifdef DEBUG
10258 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
10259 #endif
10260 if(do_strace)
10261 print_syscall_ret(num, ret);
10262 return ret;
10263 efault:
10264 ret = -TARGET_EFAULT;
10265 goto fail;
10266 }