]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/syscall.c
linux-user: Use safe_syscall wrapper for flock
[mirror_qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/mman.h>
36 #include <sys/swap.h>
37 #include <linux/capability.h>
38 #include <sched.h>
39 #ifdef __ia64__
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
42 #endif
43 #include <sys/socket.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <sys/poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <utime.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
60 #ifdef CONFIG_TIMERFD
61 #include <sys/timerfd.h>
62 #endif
63 #ifdef TARGET_GPROF
64 #include <sys/gmon.h>
65 #endif
66 #ifdef CONFIG_EVENTFD
67 #include <sys/eventfd.h>
68 #endif
69 #ifdef CONFIG_EPOLL
70 #include <sys/epoll.h>
71 #endif
72 #ifdef CONFIG_ATTR
73 #include "qemu/xattr.h"
74 #endif
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
77 #endif
78
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
85
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
91 #include <linux/kd.h>
92 #include <linux/mtio.h>
93 #include <linux/fs.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
96 #endif
97 #include <linux/fb.h>
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #endif
108 #include <linux/audit.h>
109 #include "linux_loop.h"
110 #include "uname.h"
111
112 #include "qemu.h"
113
114 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
115 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
116
117 //#define DEBUG
118 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
119 * once. This exercises the codepaths for restart.
120 */
121 //#define DEBUG_ERESTARTSYS
122
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
126
127 /* This is the size of the host kernel's sigset_t, needed where we make
128 * direct system calls that take a sigset_t pointer and a size.
129 */
130 #define SIGSET_T_SIZE (_NSIG / 8)
131
132 #undef _syscall0
133 #undef _syscall1
134 #undef _syscall2
135 #undef _syscall3
136 #undef _syscall4
137 #undef _syscall5
138 #undef _syscall6
139
140 #define _syscall0(type,name) \
141 static type name (void) \
142 { \
143 return syscall(__NR_##name); \
144 }
145
146 #define _syscall1(type,name,type1,arg1) \
147 static type name (type1 arg1) \
148 { \
149 return syscall(__NR_##name, arg1); \
150 }
151
152 #define _syscall2(type,name,type1,arg1,type2,arg2) \
153 static type name (type1 arg1,type2 arg2) \
154 { \
155 return syscall(__NR_##name, arg1, arg2); \
156 }
157
158 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
159 static type name (type1 arg1,type2 arg2,type3 arg3) \
160 { \
161 return syscall(__NR_##name, arg1, arg2, arg3); \
162 }
163
164 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
166 { \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
168 }
169
170 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
171 type5,arg5) \
172 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
173 { \
174 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
175 }
176
177
178 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
179 type5,arg5,type6,arg6) \
180 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
181 type6 arg6) \
182 { \
183 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
184 }
185
186
187 #define __NR_sys_uname __NR_uname
188 #define __NR_sys_getcwd1 __NR_getcwd
189 #define __NR_sys_getdents __NR_getdents
190 #define __NR_sys_getdents64 __NR_getdents64
191 #define __NR_sys_getpriority __NR_getpriority
192 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
193 #define __NR_sys_syslog __NR_syslog
194 #define __NR_sys_futex __NR_futex
195 #define __NR_sys_inotify_init __NR_inotify_init
196 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
197 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
198
199 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
200 defined(__s390x__)
201 #define __NR__llseek __NR_lseek
202 #endif
203
204 /* Newer kernel ports have llseek() instead of _llseek() */
205 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
206 #define TARGET_NR__llseek TARGET_NR_llseek
207 #endif
208
209 #ifdef __NR_gettid
210 _syscall0(int, gettid)
211 #else
212 /* This is a replacement for the host gettid() and must return a host
213 errno. */
214 static int gettid(void) {
215 return -ENOSYS;
216 }
217 #endif
218 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
219 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
220 #endif
221 #if !defined(__NR_getdents) || \
222 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
223 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
224 #endif
225 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
226 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
227 loff_t *, res, uint, wh);
228 #endif
229 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
230 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group,int,error_code)
233 #endif
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address,int *,tidptr)
236 #endif
237 #if defined(TARGET_NR_futex) && defined(__NR_futex)
238 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
239 const struct timespec *,timeout,int *,uaddr2,int,val3)
240 #endif
241 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
242 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
243 unsigned long *, user_mask_ptr);
244 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
245 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
246 unsigned long *, user_mask_ptr);
247 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
248 void *, arg);
249 _syscall2(int, capget, struct __user_cap_header_struct *, header,
250 struct __user_cap_data_struct *, data);
251 _syscall2(int, capset, struct __user_cap_header_struct *, header,
252 struct __user_cap_data_struct *, data);
253 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
254 _syscall2(int, ioprio_get, int, which, int, who)
255 #endif
256 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
257 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
258 #endif
259 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
260 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
261 #endif
262
263 static bitmask_transtbl fcntl_flags_tbl[] = {
264 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
265 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
266 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
267 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
268 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
269 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
270 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
271 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
272 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
273 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
274 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
275 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
276 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
277 #if defined(O_DIRECT)
278 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
279 #endif
280 #if defined(O_NOATIME)
281 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
282 #endif
283 #if defined(O_CLOEXEC)
284 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
285 #endif
286 #if defined(O_PATH)
287 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
288 #endif
289 /* Don't terminate the list prematurely on 64-bit host+guest. */
290 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
291 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
292 #endif
293 { 0, 0, 0, 0 }
294 };
295
296 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
297 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
298 typedef struct TargetFdTrans {
299 TargetFdDataFunc host_to_target_data;
300 TargetFdDataFunc target_to_host_data;
301 TargetFdAddrFunc target_to_host_addr;
302 } TargetFdTrans;
303
304 static TargetFdTrans **target_fd_trans;
305
306 static unsigned int target_fd_max;
307
308 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
309 {
310 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
311 return target_fd_trans[fd]->target_to_host_data;
312 }
313 return NULL;
314 }
315
316 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
317 {
318 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
319 return target_fd_trans[fd]->host_to_target_data;
320 }
321 return NULL;
322 }
323
324 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
325 {
326 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
327 return target_fd_trans[fd]->target_to_host_addr;
328 }
329 return NULL;
330 }
331
332 static void fd_trans_register(int fd, TargetFdTrans *trans)
333 {
334 unsigned int oldmax;
335
336 if (fd >= target_fd_max) {
337 oldmax = target_fd_max;
338 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
339 target_fd_trans = g_renew(TargetFdTrans *,
340 target_fd_trans, target_fd_max);
341 memset((void *)(target_fd_trans + oldmax), 0,
342 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
343 }
344 target_fd_trans[fd] = trans;
345 }
346
347 static void fd_trans_unregister(int fd)
348 {
349 if (fd >= 0 && fd < target_fd_max) {
350 target_fd_trans[fd] = NULL;
351 }
352 }
353
354 static void fd_trans_dup(int oldfd, int newfd)
355 {
356 fd_trans_unregister(newfd);
357 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
358 fd_trans_register(newfd, target_fd_trans[oldfd]);
359 }
360 }
361
362 static int sys_getcwd1(char *buf, size_t size)
363 {
364 if (getcwd(buf, size) == NULL) {
365 /* getcwd() sets errno */
366 return (-1);
367 }
368 return strlen(buf)+1;
369 }
370
371 #ifdef TARGET_NR_utimensat
372 #ifdef CONFIG_UTIMENSAT
373 static int sys_utimensat(int dirfd, const char *pathname,
374 const struct timespec times[2], int flags)
375 {
376 if (pathname == NULL)
377 return futimens(dirfd, times);
378 else
379 return utimensat(dirfd, pathname, times, flags);
380 }
381 #elif defined(__NR_utimensat)
382 #define __NR_sys_utimensat __NR_utimensat
383 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
384 const struct timespec *,tsp,int,flags)
385 #else
386 static int sys_utimensat(int dirfd, const char *pathname,
387 const struct timespec times[2], int flags)
388 {
389 errno = ENOSYS;
390 return -1;
391 }
392 #endif
393 #endif /* TARGET_NR_utimensat */
394
395 #ifdef CONFIG_INOTIFY
396 #include <sys/inotify.h>
397
398 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
399 static int sys_inotify_init(void)
400 {
401 return (inotify_init());
402 }
403 #endif
404 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
405 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
406 {
407 return (inotify_add_watch(fd, pathname, mask));
408 }
409 #endif
410 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
411 static int sys_inotify_rm_watch(int fd, int32_t wd)
412 {
413 return (inotify_rm_watch(fd, wd));
414 }
415 #endif
416 #ifdef CONFIG_INOTIFY1
417 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
418 static int sys_inotify_init1(int flags)
419 {
420 return (inotify_init1(flags));
421 }
422 #endif
423 #endif
424 #else
425 /* Userspace can usually survive runtime without inotify */
426 #undef TARGET_NR_inotify_init
427 #undef TARGET_NR_inotify_init1
428 #undef TARGET_NR_inotify_add_watch
429 #undef TARGET_NR_inotify_rm_watch
430 #endif /* CONFIG_INOTIFY */
431
432 #if defined(TARGET_NR_ppoll)
433 #ifndef __NR_ppoll
434 # define __NR_ppoll -1
435 #endif
436 #define __NR_sys_ppoll __NR_ppoll
437 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
438 struct timespec *, timeout, const sigset_t *, sigmask,
439 size_t, sigsetsize)
440 #endif
441
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
445 #endif
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64 {
449 uint64_t rlim_cur;
450 uint64_t rlim_max;
451 };
452 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
453 const struct host_rlimit64 *, new_limit,
454 struct host_rlimit64 *, old_limit)
455 #endif
456
457
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers[32] = { 0, } ;
461
462 static inline int next_free_host_timer(void)
463 {
464 int k ;
465 /* FIXME: Does finding the next free slot require a lock? */
466 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
467 if (g_posix_timers[k] == 0) {
468 g_posix_timers[k] = (timer_t) 1;
469 return k;
470 }
471 }
472 return -1;
473 }
474 #endif
475
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
477 #ifdef TARGET_ARM
478 static inline int regpairs_aligned(void *cpu_env) {
479 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
480 }
481 #elif defined(TARGET_MIPS)
482 static inline int regpairs_aligned(void *cpu_env) { return 1; }
483 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
484 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
485 * of registers which translates to the same as ARM/MIPS, because we start with
486 * r3 as arg1 */
487 static inline int regpairs_aligned(void *cpu_env) { return 1; }
488 #else
489 static inline int regpairs_aligned(void *cpu_env) { return 0; }
490 #endif
491
492 #define ERRNO_TABLE_SIZE 1200
493
494 /* target_to_host_errno_table[] is initialized from
495 * host_to_target_errno_table[] in syscall_init(). */
496 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
497 };
498
499 /*
500 * This list is the union of errno values overridden in asm-<arch>/errno.h
501 * minus the errnos that are not actually generic to all archs.
502 */
503 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
504 [EAGAIN] = TARGET_EAGAIN,
505 [EIDRM] = TARGET_EIDRM,
506 [ECHRNG] = TARGET_ECHRNG,
507 [EL2NSYNC] = TARGET_EL2NSYNC,
508 [EL3HLT] = TARGET_EL3HLT,
509 [EL3RST] = TARGET_EL3RST,
510 [ELNRNG] = TARGET_ELNRNG,
511 [EUNATCH] = TARGET_EUNATCH,
512 [ENOCSI] = TARGET_ENOCSI,
513 [EL2HLT] = TARGET_EL2HLT,
514 [EDEADLK] = TARGET_EDEADLK,
515 [ENOLCK] = TARGET_ENOLCK,
516 [EBADE] = TARGET_EBADE,
517 [EBADR] = TARGET_EBADR,
518 [EXFULL] = TARGET_EXFULL,
519 [ENOANO] = TARGET_ENOANO,
520 [EBADRQC] = TARGET_EBADRQC,
521 [EBADSLT] = TARGET_EBADSLT,
522 [EBFONT] = TARGET_EBFONT,
523 [ENOSTR] = TARGET_ENOSTR,
524 [ENODATA] = TARGET_ENODATA,
525 [ETIME] = TARGET_ETIME,
526 [ENOSR] = TARGET_ENOSR,
527 [ENONET] = TARGET_ENONET,
528 [ENOPKG] = TARGET_ENOPKG,
529 [EREMOTE] = TARGET_EREMOTE,
530 [ENOLINK] = TARGET_ENOLINK,
531 [EADV] = TARGET_EADV,
532 [ESRMNT] = TARGET_ESRMNT,
533 [ECOMM] = TARGET_ECOMM,
534 [EPROTO] = TARGET_EPROTO,
535 [EDOTDOT] = TARGET_EDOTDOT,
536 [EMULTIHOP] = TARGET_EMULTIHOP,
537 [EBADMSG] = TARGET_EBADMSG,
538 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
539 [EOVERFLOW] = TARGET_EOVERFLOW,
540 [ENOTUNIQ] = TARGET_ENOTUNIQ,
541 [EBADFD] = TARGET_EBADFD,
542 [EREMCHG] = TARGET_EREMCHG,
543 [ELIBACC] = TARGET_ELIBACC,
544 [ELIBBAD] = TARGET_ELIBBAD,
545 [ELIBSCN] = TARGET_ELIBSCN,
546 [ELIBMAX] = TARGET_ELIBMAX,
547 [ELIBEXEC] = TARGET_ELIBEXEC,
548 [EILSEQ] = TARGET_EILSEQ,
549 [ENOSYS] = TARGET_ENOSYS,
550 [ELOOP] = TARGET_ELOOP,
551 [ERESTART] = TARGET_ERESTART,
552 [ESTRPIPE] = TARGET_ESTRPIPE,
553 [ENOTEMPTY] = TARGET_ENOTEMPTY,
554 [EUSERS] = TARGET_EUSERS,
555 [ENOTSOCK] = TARGET_ENOTSOCK,
556 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
557 [EMSGSIZE] = TARGET_EMSGSIZE,
558 [EPROTOTYPE] = TARGET_EPROTOTYPE,
559 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
560 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
561 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
562 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
563 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
564 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
565 [EADDRINUSE] = TARGET_EADDRINUSE,
566 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
567 [ENETDOWN] = TARGET_ENETDOWN,
568 [ENETUNREACH] = TARGET_ENETUNREACH,
569 [ENETRESET] = TARGET_ENETRESET,
570 [ECONNABORTED] = TARGET_ECONNABORTED,
571 [ECONNRESET] = TARGET_ECONNRESET,
572 [ENOBUFS] = TARGET_ENOBUFS,
573 [EISCONN] = TARGET_EISCONN,
574 [ENOTCONN] = TARGET_ENOTCONN,
575 [EUCLEAN] = TARGET_EUCLEAN,
576 [ENOTNAM] = TARGET_ENOTNAM,
577 [ENAVAIL] = TARGET_ENAVAIL,
578 [EISNAM] = TARGET_EISNAM,
579 [EREMOTEIO] = TARGET_EREMOTEIO,
580 [ESHUTDOWN] = TARGET_ESHUTDOWN,
581 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
582 [ETIMEDOUT] = TARGET_ETIMEDOUT,
583 [ECONNREFUSED] = TARGET_ECONNREFUSED,
584 [EHOSTDOWN] = TARGET_EHOSTDOWN,
585 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
586 [EALREADY] = TARGET_EALREADY,
587 [EINPROGRESS] = TARGET_EINPROGRESS,
588 [ESTALE] = TARGET_ESTALE,
589 [ECANCELED] = TARGET_ECANCELED,
590 [ENOMEDIUM] = TARGET_ENOMEDIUM,
591 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
592 #ifdef ENOKEY
593 [ENOKEY] = TARGET_ENOKEY,
594 #endif
595 #ifdef EKEYEXPIRED
596 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
597 #endif
598 #ifdef EKEYREVOKED
599 [EKEYREVOKED] = TARGET_EKEYREVOKED,
600 #endif
601 #ifdef EKEYREJECTED
602 [EKEYREJECTED] = TARGET_EKEYREJECTED,
603 #endif
604 #ifdef EOWNERDEAD
605 [EOWNERDEAD] = TARGET_EOWNERDEAD,
606 #endif
607 #ifdef ENOTRECOVERABLE
608 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
609 #endif
610 };
611
612 static inline int host_to_target_errno(int err)
613 {
614 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
615 host_to_target_errno_table[err]) {
616 return host_to_target_errno_table[err];
617 }
618 return err;
619 }
620
621 static inline int target_to_host_errno(int err)
622 {
623 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
624 target_to_host_errno_table[err]) {
625 return target_to_host_errno_table[err];
626 }
627 return err;
628 }
629
630 static inline abi_long get_errno(abi_long ret)
631 {
632 if (ret == -1)
633 return -host_to_target_errno(errno);
634 else
635 return ret;
636 }
637
638 static inline int is_error(abi_long ret)
639 {
640 return (abi_ulong)ret >= (abi_ulong)(-4096);
641 }
642
643 char *target_strerror(int err)
644 {
645 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
646 return NULL;
647 }
648 return strerror(target_to_host_errno(err));
649 }
650
651 #define safe_syscall0(type, name) \
652 static type safe_##name(void) \
653 { \
654 return safe_syscall(__NR_##name); \
655 }
656
657 #define safe_syscall1(type, name, type1, arg1) \
658 static type safe_##name(type1 arg1) \
659 { \
660 return safe_syscall(__NR_##name, arg1); \
661 }
662
663 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
664 static type safe_##name(type1 arg1, type2 arg2) \
665 { \
666 return safe_syscall(__NR_##name, arg1, arg2); \
667 }
668
669 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
670 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
671 { \
672 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
673 }
674
675 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
676 type4, arg4) \
677 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
678 { \
679 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
680 }
681
682 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
683 type4, arg4, type5, arg5) \
684 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
685 type5 arg5) \
686 { \
687 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
688 }
689
690 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
691 type4, arg4, type5, arg5, type6, arg6) \
692 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
693 type5 arg5, type6 arg6) \
694 { \
695 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
696 }
697
698 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
699 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
700 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
701 int, flags, mode_t, mode)
702 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
703 struct rusage *, rusage)
704 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
705 int, options, struct rusage *, rusage)
706 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
707 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
708 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
709 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
710 const struct timespec *,timeout,int *,uaddr2,int,val3)
711 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
712 safe_syscall2(int, kill, pid_t, pid, int, sig)
713 safe_syscall2(int, tkill, int, tid, int, sig)
714 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
715 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
716 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
717 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
718 socklen_t, addrlen)
719 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
720 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
721 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
722 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
723 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
724 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
725 safe_syscall2(int, flock, int, fd, int, operation)
726 #ifdef __NR_msgsnd
727 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
728 int, flags)
729 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
730 long, msgtype, int, flags)
731 #else
732 /* This host kernel architecture uses a single ipc syscall; fake up
733 * wrappers for the sub-operations to hide this implementation detail.
734 * Annoyingly we can't include linux/ipc.h to get the constant definitions
735 * for the call parameter because some structs in there conflict with the
736 * sys/ipc.h ones. So we just define them here, and rely on them being
737 * the same for all host architectures.
738 */
739 #define Q_MSGSND 11
740 #define Q_MSGRCV 12
741 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
742
743 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
744 void *, ptr, long, fifth)
745 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
746 {
747 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
748 }
749 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
750 {
751 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
752 }
753 #endif
754 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
755 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
756 size_t, len, unsigned, prio, const struct timespec *, timeout)
757 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
758 size_t, len, unsigned *, prio, const struct timespec *, timeout)
759 #endif
760
761 static inline int host_to_target_sock_type(int host_type)
762 {
763 int target_type;
764
765 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
766 case SOCK_DGRAM:
767 target_type = TARGET_SOCK_DGRAM;
768 break;
769 case SOCK_STREAM:
770 target_type = TARGET_SOCK_STREAM;
771 break;
772 default:
773 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
774 break;
775 }
776
777 #if defined(SOCK_CLOEXEC)
778 if (host_type & SOCK_CLOEXEC) {
779 target_type |= TARGET_SOCK_CLOEXEC;
780 }
781 #endif
782
783 #if defined(SOCK_NONBLOCK)
784 if (host_type & SOCK_NONBLOCK) {
785 target_type |= TARGET_SOCK_NONBLOCK;
786 }
787 #endif
788
789 return target_type;
790 }
791
792 static abi_ulong target_brk;
793 static abi_ulong target_original_brk;
794 static abi_ulong brk_page;
795
796 void target_set_brk(abi_ulong new_brk)
797 {
798 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
799 brk_page = HOST_PAGE_ALIGN(target_brk);
800 }
801
802 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
803 #define DEBUGF_BRK(message, args...)
804
805 /* do_brk() must return target values and target errnos. */
806 abi_long do_brk(abi_ulong new_brk)
807 {
808 abi_long mapped_addr;
809 int new_alloc_size;
810
811 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
812
813 if (!new_brk) {
814 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
815 return target_brk;
816 }
817 if (new_brk < target_original_brk) {
818 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
819 target_brk);
820 return target_brk;
821 }
822
823 /* If the new brk is less than the highest page reserved to the
824 * target heap allocation, set it and we're almost done... */
825 if (new_brk <= brk_page) {
826 /* Heap contents are initialized to zero, as for anonymous
827 * mapped pages. */
828 if (new_brk > target_brk) {
829 memset(g2h(target_brk), 0, new_brk - target_brk);
830 }
831 target_brk = new_brk;
832 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
833 return target_brk;
834 }
835
836 /* We need to allocate more memory after the brk... Note that
837 * we don't use MAP_FIXED because that will map over the top of
838 * any existing mapping (like the one with the host libc or qemu
839 * itself); instead we treat "mapped but at wrong address" as
840 * a failure and unmap again.
841 */
842 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
843 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
844 PROT_READ|PROT_WRITE,
845 MAP_ANON|MAP_PRIVATE, 0, 0));
846
847 if (mapped_addr == brk_page) {
848 /* Heap contents are initialized to zero, as for anonymous
849 * mapped pages. Technically the new pages are already
850 * initialized to zero since they *are* anonymous mapped
851 * pages, however we have to take care with the contents that
852 * come from the remaining part of the previous page: it may
853 * contains garbage data due to a previous heap usage (grown
854 * then shrunken). */
855 memset(g2h(target_brk), 0, brk_page - target_brk);
856
857 target_brk = new_brk;
858 brk_page = HOST_PAGE_ALIGN(target_brk);
859 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
860 target_brk);
861 return target_brk;
862 } else if (mapped_addr != -1) {
863 /* Mapped but at wrong address, meaning there wasn't actually
864 * enough space for this brk.
865 */
866 target_munmap(mapped_addr, new_alloc_size);
867 mapped_addr = -1;
868 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
869 }
870 else {
871 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
872 }
873
874 #if defined(TARGET_ALPHA)
875 /* We (partially) emulate OSF/1 on Alpha, which requires we
876 return a proper errno, not an unchanged brk value. */
877 return -TARGET_ENOMEM;
878 #endif
879 /* For everything else, return the previous break. */
880 return target_brk;
881 }
882
883 static inline abi_long copy_from_user_fdset(fd_set *fds,
884 abi_ulong target_fds_addr,
885 int n)
886 {
887 int i, nw, j, k;
888 abi_ulong b, *target_fds;
889
890 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
891 if (!(target_fds = lock_user(VERIFY_READ,
892 target_fds_addr,
893 sizeof(abi_ulong) * nw,
894 1)))
895 return -TARGET_EFAULT;
896
897 FD_ZERO(fds);
898 k = 0;
899 for (i = 0; i < nw; i++) {
900 /* grab the abi_ulong */
901 __get_user(b, &target_fds[i]);
902 for (j = 0; j < TARGET_ABI_BITS; j++) {
903 /* check the bit inside the abi_ulong */
904 if ((b >> j) & 1)
905 FD_SET(k, fds);
906 k++;
907 }
908 }
909
910 unlock_user(target_fds, target_fds_addr, 0);
911
912 return 0;
913 }
914
915 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
916 abi_ulong target_fds_addr,
917 int n)
918 {
919 if (target_fds_addr) {
920 if (copy_from_user_fdset(fds, target_fds_addr, n))
921 return -TARGET_EFAULT;
922 *fds_ptr = fds;
923 } else {
924 *fds_ptr = NULL;
925 }
926 return 0;
927 }
928
929 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
930 const fd_set *fds,
931 int n)
932 {
933 int i, nw, j, k;
934 abi_long v;
935 abi_ulong *target_fds;
936
937 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
938 if (!(target_fds = lock_user(VERIFY_WRITE,
939 target_fds_addr,
940 sizeof(abi_ulong) * nw,
941 0)))
942 return -TARGET_EFAULT;
943
944 k = 0;
945 for (i = 0; i < nw; i++) {
946 v = 0;
947 for (j = 0; j < TARGET_ABI_BITS; j++) {
948 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
949 k++;
950 }
951 __put_user(v, &target_fds[i]);
952 }
953
954 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
955
956 return 0;
957 }
958
959 #if defined(__alpha__)
960 #define HOST_HZ 1024
961 #else
962 #define HOST_HZ 100
963 #endif
964
965 static inline abi_long host_to_target_clock_t(long ticks)
966 {
967 #if HOST_HZ == TARGET_HZ
968 return ticks;
969 #else
970 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
971 #endif
972 }
973
974 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
975 const struct rusage *rusage)
976 {
977 struct target_rusage *target_rusage;
978
979 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
980 return -TARGET_EFAULT;
981 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
982 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
983 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
984 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
985 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
986 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
987 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
988 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
989 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
990 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
991 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
992 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
993 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
994 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
995 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
996 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
997 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
998 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
999 unlock_user_struct(target_rusage, target_addr, 1);
1000
1001 return 0;
1002 }
1003
1004 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1005 {
1006 abi_ulong target_rlim_swap;
1007 rlim_t result;
1008
1009 target_rlim_swap = tswapal(target_rlim);
1010 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1011 return RLIM_INFINITY;
1012
1013 result = target_rlim_swap;
1014 if (target_rlim_swap != (rlim_t)result)
1015 return RLIM_INFINITY;
1016
1017 return result;
1018 }
1019
1020 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1021 {
1022 abi_ulong target_rlim_swap;
1023 abi_ulong result;
1024
1025 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1026 target_rlim_swap = TARGET_RLIM_INFINITY;
1027 else
1028 target_rlim_swap = rlim;
1029 result = tswapal(target_rlim_swap);
1030
1031 return result;
1032 }
1033
1034 static inline int target_to_host_resource(int code)
1035 {
1036 switch (code) {
1037 case TARGET_RLIMIT_AS:
1038 return RLIMIT_AS;
1039 case TARGET_RLIMIT_CORE:
1040 return RLIMIT_CORE;
1041 case TARGET_RLIMIT_CPU:
1042 return RLIMIT_CPU;
1043 case TARGET_RLIMIT_DATA:
1044 return RLIMIT_DATA;
1045 case TARGET_RLIMIT_FSIZE:
1046 return RLIMIT_FSIZE;
1047 case TARGET_RLIMIT_LOCKS:
1048 return RLIMIT_LOCKS;
1049 case TARGET_RLIMIT_MEMLOCK:
1050 return RLIMIT_MEMLOCK;
1051 case TARGET_RLIMIT_MSGQUEUE:
1052 return RLIMIT_MSGQUEUE;
1053 case TARGET_RLIMIT_NICE:
1054 return RLIMIT_NICE;
1055 case TARGET_RLIMIT_NOFILE:
1056 return RLIMIT_NOFILE;
1057 case TARGET_RLIMIT_NPROC:
1058 return RLIMIT_NPROC;
1059 case TARGET_RLIMIT_RSS:
1060 return RLIMIT_RSS;
1061 case TARGET_RLIMIT_RTPRIO:
1062 return RLIMIT_RTPRIO;
1063 case TARGET_RLIMIT_SIGPENDING:
1064 return RLIMIT_SIGPENDING;
1065 case TARGET_RLIMIT_STACK:
1066 return RLIMIT_STACK;
1067 default:
1068 return code;
1069 }
1070 }
1071
1072 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1073 abi_ulong target_tv_addr)
1074 {
1075 struct target_timeval *target_tv;
1076
1077 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1078 return -TARGET_EFAULT;
1079
1080 __get_user(tv->tv_sec, &target_tv->tv_sec);
1081 __get_user(tv->tv_usec, &target_tv->tv_usec);
1082
1083 unlock_user_struct(target_tv, target_tv_addr, 0);
1084
1085 return 0;
1086 }
1087
1088 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1089 const struct timeval *tv)
1090 {
1091 struct target_timeval *target_tv;
1092
1093 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1094 return -TARGET_EFAULT;
1095
1096 __put_user(tv->tv_sec, &target_tv->tv_sec);
1097 __put_user(tv->tv_usec, &target_tv->tv_usec);
1098
1099 unlock_user_struct(target_tv, target_tv_addr, 1);
1100
1101 return 0;
1102 }
1103
1104 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1105 abi_ulong target_tz_addr)
1106 {
1107 struct target_timezone *target_tz;
1108
1109 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1110 return -TARGET_EFAULT;
1111 }
1112
1113 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1114 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1115
1116 unlock_user_struct(target_tz, target_tz_addr, 0);
1117
1118 return 0;
1119 }
1120
1121 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1122 #include <mqueue.h>
1123
1124 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1125 abi_ulong target_mq_attr_addr)
1126 {
1127 struct target_mq_attr *target_mq_attr;
1128
1129 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1130 target_mq_attr_addr, 1))
1131 return -TARGET_EFAULT;
1132
1133 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1134 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1135 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1136 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1137
1138 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1139
1140 return 0;
1141 }
1142
1143 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1144 const struct mq_attr *attr)
1145 {
1146 struct target_mq_attr *target_mq_attr;
1147
1148 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1149 target_mq_attr_addr, 0))
1150 return -TARGET_EFAULT;
1151
1152 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1153 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1154 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1155 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1156
1157 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1158
1159 return 0;
1160 }
1161 #endif
1162
1163 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1164 /* do_select() must return target values and target errnos. */
1165 static abi_long do_select(int n,
1166 abi_ulong rfd_addr, abi_ulong wfd_addr,
1167 abi_ulong efd_addr, abi_ulong target_tv_addr)
1168 {
1169 fd_set rfds, wfds, efds;
1170 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1171 struct timeval tv;
1172 struct timespec ts, *ts_ptr;
1173 abi_long ret;
1174
1175 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1176 if (ret) {
1177 return ret;
1178 }
1179 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1180 if (ret) {
1181 return ret;
1182 }
1183 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1184 if (ret) {
1185 return ret;
1186 }
1187
1188 if (target_tv_addr) {
1189 if (copy_from_user_timeval(&tv, target_tv_addr))
1190 return -TARGET_EFAULT;
1191 ts.tv_sec = tv.tv_sec;
1192 ts.tv_nsec = tv.tv_usec * 1000;
1193 ts_ptr = &ts;
1194 } else {
1195 ts_ptr = NULL;
1196 }
1197
1198 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1199 ts_ptr, NULL));
1200
1201 if (!is_error(ret)) {
1202 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1203 return -TARGET_EFAULT;
1204 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1205 return -TARGET_EFAULT;
1206 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1207 return -TARGET_EFAULT;
1208
1209 if (target_tv_addr) {
1210 tv.tv_sec = ts.tv_sec;
1211 tv.tv_usec = ts.tv_nsec / 1000;
1212 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1213 return -TARGET_EFAULT;
1214 }
1215 }
1216 }
1217
1218 return ret;
1219 }
1220 #endif
1221
1222 static abi_long do_pipe2(int host_pipe[], int flags)
1223 {
1224 #ifdef CONFIG_PIPE2
1225 return pipe2(host_pipe, flags);
1226 #else
1227 return -ENOSYS;
1228 #endif
1229 }
1230
1231 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1232 int flags, int is_pipe2)
1233 {
1234 int host_pipe[2];
1235 abi_long ret;
1236 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1237
1238 if (is_error(ret))
1239 return get_errno(ret);
1240
1241 /* Several targets have special calling conventions for the original
1242 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1243 if (!is_pipe2) {
1244 #if defined(TARGET_ALPHA)
1245 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1246 return host_pipe[0];
1247 #elif defined(TARGET_MIPS)
1248 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1249 return host_pipe[0];
1250 #elif defined(TARGET_SH4)
1251 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1252 return host_pipe[0];
1253 #elif defined(TARGET_SPARC)
1254 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1255 return host_pipe[0];
1256 #endif
1257 }
1258
1259 if (put_user_s32(host_pipe[0], pipedes)
1260 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1261 return -TARGET_EFAULT;
1262 return get_errno(ret);
1263 }
1264
1265 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1266 abi_ulong target_addr,
1267 socklen_t len)
1268 {
1269 struct target_ip_mreqn *target_smreqn;
1270
1271 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1272 if (!target_smreqn)
1273 return -TARGET_EFAULT;
1274 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1275 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1276 if (len == sizeof(struct target_ip_mreqn))
1277 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1278 unlock_user(target_smreqn, target_addr, 0);
1279
1280 return 0;
1281 }
1282
1283 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1284 abi_ulong target_addr,
1285 socklen_t len)
1286 {
1287 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1288 sa_family_t sa_family;
1289 struct target_sockaddr *target_saddr;
1290
1291 if (fd_trans_target_to_host_addr(fd)) {
1292 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1293 }
1294
1295 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1296 if (!target_saddr)
1297 return -TARGET_EFAULT;
1298
1299 sa_family = tswap16(target_saddr->sa_family);
1300
1301 /* Oops. The caller might send a incomplete sun_path; sun_path
1302 * must be terminated by \0 (see the manual page), but
1303 * unfortunately it is quite common to specify sockaddr_un
1304 * length as "strlen(x->sun_path)" while it should be
1305 * "strlen(...) + 1". We'll fix that here if needed.
1306 * Linux kernel has a similar feature.
1307 */
1308
1309 if (sa_family == AF_UNIX) {
1310 if (len < unix_maxlen && len > 0) {
1311 char *cp = (char*)target_saddr;
1312
1313 if ( cp[len-1] && !cp[len] )
1314 len++;
1315 }
1316 if (len > unix_maxlen)
1317 len = unix_maxlen;
1318 }
1319
1320 memcpy(addr, target_saddr, len);
1321 addr->sa_family = sa_family;
1322 if (sa_family == AF_NETLINK) {
1323 struct sockaddr_nl *nladdr;
1324
1325 nladdr = (struct sockaddr_nl *)addr;
1326 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1327 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1328 } else if (sa_family == AF_PACKET) {
1329 struct target_sockaddr_ll *lladdr;
1330
1331 lladdr = (struct target_sockaddr_ll *)addr;
1332 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1333 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1334 }
1335 unlock_user(target_saddr, target_addr, 0);
1336
1337 return 0;
1338 }
1339
1340 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1341 struct sockaddr *addr,
1342 socklen_t len)
1343 {
1344 struct target_sockaddr *target_saddr;
1345
1346 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1347 if (!target_saddr)
1348 return -TARGET_EFAULT;
1349 memcpy(target_saddr, addr, len);
1350 target_saddr->sa_family = tswap16(addr->sa_family);
1351 if (addr->sa_family == AF_NETLINK) {
1352 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1353 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1354 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1355 }
1356 unlock_user(target_saddr, target_addr, len);
1357
1358 return 0;
1359 }
1360
1361 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1362 struct target_msghdr *target_msgh)
1363 {
1364 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1365 abi_long msg_controllen;
1366 abi_ulong target_cmsg_addr;
1367 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1368 socklen_t space = 0;
1369
1370 msg_controllen = tswapal(target_msgh->msg_controllen);
1371 if (msg_controllen < sizeof (struct target_cmsghdr))
1372 goto the_end;
1373 target_cmsg_addr = tswapal(target_msgh->msg_control);
1374 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1375 target_cmsg_start = target_cmsg;
1376 if (!target_cmsg)
1377 return -TARGET_EFAULT;
1378
1379 while (cmsg && target_cmsg) {
1380 void *data = CMSG_DATA(cmsg);
1381 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1382
1383 int len = tswapal(target_cmsg->cmsg_len)
1384 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1385
1386 space += CMSG_SPACE(len);
1387 if (space > msgh->msg_controllen) {
1388 space -= CMSG_SPACE(len);
1389 /* This is a QEMU bug, since we allocated the payload
1390 * area ourselves (unlike overflow in host-to-target
1391 * conversion, which is just the guest giving us a buffer
1392 * that's too small). It can't happen for the payload types
1393 * we currently support; if it becomes an issue in future
1394 * we would need to improve our allocation strategy to
1395 * something more intelligent than "twice the size of the
1396 * target buffer we're reading from".
1397 */
1398 gemu_log("Host cmsg overflow\n");
1399 break;
1400 }
1401
1402 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1403 cmsg->cmsg_level = SOL_SOCKET;
1404 } else {
1405 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1406 }
1407 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1408 cmsg->cmsg_len = CMSG_LEN(len);
1409
1410 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1411 int *fd = (int *)data;
1412 int *target_fd = (int *)target_data;
1413 int i, numfds = len / sizeof(int);
1414
1415 for (i = 0; i < numfds; i++) {
1416 __get_user(fd[i], target_fd + i);
1417 }
1418 } else if (cmsg->cmsg_level == SOL_SOCKET
1419 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1420 struct ucred *cred = (struct ucred *)data;
1421 struct target_ucred *target_cred =
1422 (struct target_ucred *)target_data;
1423
1424 __get_user(cred->pid, &target_cred->pid);
1425 __get_user(cred->uid, &target_cred->uid);
1426 __get_user(cred->gid, &target_cred->gid);
1427 } else {
1428 gemu_log("Unsupported ancillary data: %d/%d\n",
1429 cmsg->cmsg_level, cmsg->cmsg_type);
1430 memcpy(data, target_data, len);
1431 }
1432
1433 cmsg = CMSG_NXTHDR(msgh, cmsg);
1434 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1435 target_cmsg_start);
1436 }
1437 unlock_user(target_cmsg, target_cmsg_addr, 0);
1438 the_end:
1439 msgh->msg_controllen = space;
1440 return 0;
1441 }
1442
1443 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1444 struct msghdr *msgh)
1445 {
1446 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1447 abi_long msg_controllen;
1448 abi_ulong target_cmsg_addr;
1449 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1450 socklen_t space = 0;
1451
1452 msg_controllen = tswapal(target_msgh->msg_controllen);
1453 if (msg_controllen < sizeof (struct target_cmsghdr))
1454 goto the_end;
1455 target_cmsg_addr = tswapal(target_msgh->msg_control);
1456 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1457 target_cmsg_start = target_cmsg;
1458 if (!target_cmsg)
1459 return -TARGET_EFAULT;
1460
1461 while (cmsg && target_cmsg) {
1462 void *data = CMSG_DATA(cmsg);
1463 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1464
1465 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1466 int tgt_len, tgt_space;
1467
1468 /* We never copy a half-header but may copy half-data;
1469 * this is Linux's behaviour in put_cmsg(). Note that
1470 * truncation here is a guest problem (which we report
1471 * to the guest via the CTRUNC bit), unlike truncation
1472 * in target_to_host_cmsg, which is a QEMU bug.
1473 */
1474 if (msg_controllen < sizeof(struct cmsghdr)) {
1475 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1476 break;
1477 }
1478
1479 if (cmsg->cmsg_level == SOL_SOCKET) {
1480 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1481 } else {
1482 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1483 }
1484 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1485
1486 tgt_len = TARGET_CMSG_LEN(len);
1487
1488 /* Payload types which need a different size of payload on
1489 * the target must adjust tgt_len here.
1490 */
1491 switch (cmsg->cmsg_level) {
1492 case SOL_SOCKET:
1493 switch (cmsg->cmsg_type) {
1494 case SO_TIMESTAMP:
1495 tgt_len = sizeof(struct target_timeval);
1496 break;
1497 default:
1498 break;
1499 }
1500 default:
1501 break;
1502 }
1503
1504 if (msg_controllen < tgt_len) {
1505 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1506 tgt_len = msg_controllen;
1507 }
1508
1509 /* We must now copy-and-convert len bytes of payload
1510 * into tgt_len bytes of destination space. Bear in mind
1511 * that in both source and destination we may be dealing
1512 * with a truncated value!
1513 */
1514 switch (cmsg->cmsg_level) {
1515 case SOL_SOCKET:
1516 switch (cmsg->cmsg_type) {
1517 case SCM_RIGHTS:
1518 {
1519 int *fd = (int *)data;
1520 int *target_fd = (int *)target_data;
1521 int i, numfds = tgt_len / sizeof(int);
1522
1523 for (i = 0; i < numfds; i++) {
1524 __put_user(fd[i], target_fd + i);
1525 }
1526 break;
1527 }
1528 case SO_TIMESTAMP:
1529 {
1530 struct timeval *tv = (struct timeval *)data;
1531 struct target_timeval *target_tv =
1532 (struct target_timeval *)target_data;
1533
1534 if (len != sizeof(struct timeval) ||
1535 tgt_len != sizeof(struct target_timeval)) {
1536 goto unimplemented;
1537 }
1538
1539 /* copy struct timeval to target */
1540 __put_user(tv->tv_sec, &target_tv->tv_sec);
1541 __put_user(tv->tv_usec, &target_tv->tv_usec);
1542 break;
1543 }
1544 case SCM_CREDENTIALS:
1545 {
1546 struct ucred *cred = (struct ucred *)data;
1547 struct target_ucred *target_cred =
1548 (struct target_ucred *)target_data;
1549
1550 __put_user(cred->pid, &target_cred->pid);
1551 __put_user(cred->uid, &target_cred->uid);
1552 __put_user(cred->gid, &target_cred->gid);
1553 break;
1554 }
1555 default:
1556 goto unimplemented;
1557 }
1558 break;
1559
1560 default:
1561 unimplemented:
1562 gemu_log("Unsupported ancillary data: %d/%d\n",
1563 cmsg->cmsg_level, cmsg->cmsg_type);
1564 memcpy(target_data, data, MIN(len, tgt_len));
1565 if (tgt_len > len) {
1566 memset(target_data + len, 0, tgt_len - len);
1567 }
1568 }
1569
1570 target_cmsg->cmsg_len = tswapal(tgt_len);
1571 tgt_space = TARGET_CMSG_SPACE(len);
1572 if (msg_controllen < tgt_space) {
1573 tgt_space = msg_controllen;
1574 }
1575 msg_controllen -= tgt_space;
1576 space += tgt_space;
1577 cmsg = CMSG_NXTHDR(msgh, cmsg);
1578 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1579 target_cmsg_start);
1580 }
1581 unlock_user(target_cmsg, target_cmsg_addr, space);
1582 the_end:
1583 target_msgh->msg_controllen = tswapal(space);
1584 return 0;
1585 }
1586
1587 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1588 {
1589 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1590 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1591 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1592 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1593 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1594 }
1595
1596 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1597 size_t len,
1598 abi_long (*host_to_target_nlmsg)
1599 (struct nlmsghdr *))
1600 {
1601 uint32_t nlmsg_len;
1602 abi_long ret;
1603
1604 while (len > sizeof(struct nlmsghdr)) {
1605
1606 nlmsg_len = nlh->nlmsg_len;
1607 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1608 nlmsg_len > len) {
1609 break;
1610 }
1611
1612 switch (nlh->nlmsg_type) {
1613 case NLMSG_DONE:
1614 tswap_nlmsghdr(nlh);
1615 return 0;
1616 case NLMSG_NOOP:
1617 break;
1618 case NLMSG_ERROR:
1619 {
1620 struct nlmsgerr *e = NLMSG_DATA(nlh);
1621 e->error = tswap32(e->error);
1622 tswap_nlmsghdr(&e->msg);
1623 tswap_nlmsghdr(nlh);
1624 return 0;
1625 }
1626 default:
1627 ret = host_to_target_nlmsg(nlh);
1628 if (ret < 0) {
1629 tswap_nlmsghdr(nlh);
1630 return ret;
1631 }
1632 break;
1633 }
1634 tswap_nlmsghdr(nlh);
1635 len -= NLMSG_ALIGN(nlmsg_len);
1636 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1637 }
1638 return 0;
1639 }
1640
1641 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1642 size_t len,
1643 abi_long (*target_to_host_nlmsg)
1644 (struct nlmsghdr *))
1645 {
1646 int ret;
1647
1648 while (len > sizeof(struct nlmsghdr)) {
1649 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1650 tswap32(nlh->nlmsg_len) > len) {
1651 break;
1652 }
1653 tswap_nlmsghdr(nlh);
1654 switch (nlh->nlmsg_type) {
1655 case NLMSG_DONE:
1656 return 0;
1657 case NLMSG_NOOP:
1658 break;
1659 case NLMSG_ERROR:
1660 {
1661 struct nlmsgerr *e = NLMSG_DATA(nlh);
1662 e->error = tswap32(e->error);
1663 tswap_nlmsghdr(&e->msg);
1664 }
1665 default:
1666 ret = target_to_host_nlmsg(nlh);
1667 if (ret < 0) {
1668 return ret;
1669 }
1670 }
1671 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1672 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1673 }
1674 return 0;
1675 }
1676
1677 #ifdef CONFIG_RTNETLINK
1678 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1679 size_t len,
1680 abi_long (*host_to_target_rtattr)
1681 (struct rtattr *))
1682 {
1683 unsigned short rta_len;
1684 abi_long ret;
1685
1686 while (len > sizeof(struct rtattr)) {
1687 rta_len = rtattr->rta_len;
1688 if (rta_len < sizeof(struct rtattr) ||
1689 rta_len > len) {
1690 break;
1691 }
1692 ret = host_to_target_rtattr(rtattr);
1693 rtattr->rta_len = tswap16(rtattr->rta_len);
1694 rtattr->rta_type = tswap16(rtattr->rta_type);
1695 if (ret < 0) {
1696 return ret;
1697 }
1698 len -= RTA_ALIGN(rta_len);
1699 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1700 }
1701 return 0;
1702 }
1703
1704 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
1705 {
1706 uint32_t *u32;
1707 struct rtnl_link_stats *st;
1708 struct rtnl_link_stats64 *st64;
1709 struct rtnl_link_ifmap *map;
1710
1711 switch (rtattr->rta_type) {
1712 /* binary stream */
1713 case IFLA_ADDRESS:
1714 case IFLA_BROADCAST:
1715 /* string */
1716 case IFLA_IFNAME:
1717 case IFLA_QDISC:
1718 break;
1719 /* uin8_t */
1720 case IFLA_OPERSTATE:
1721 case IFLA_LINKMODE:
1722 case IFLA_CARRIER:
1723 case IFLA_PROTO_DOWN:
1724 break;
1725 /* uint32_t */
1726 case IFLA_MTU:
1727 case IFLA_LINK:
1728 case IFLA_WEIGHT:
1729 case IFLA_TXQLEN:
1730 case IFLA_CARRIER_CHANGES:
1731 case IFLA_NUM_RX_QUEUES:
1732 case IFLA_NUM_TX_QUEUES:
1733 case IFLA_PROMISCUITY:
1734 case IFLA_EXT_MASK:
1735 case IFLA_LINK_NETNSID:
1736 case IFLA_GROUP:
1737 case IFLA_MASTER:
1738 case IFLA_NUM_VF:
1739 u32 = RTA_DATA(rtattr);
1740 *u32 = tswap32(*u32);
1741 break;
1742 /* struct rtnl_link_stats */
1743 case IFLA_STATS:
1744 st = RTA_DATA(rtattr);
1745 st->rx_packets = tswap32(st->rx_packets);
1746 st->tx_packets = tswap32(st->tx_packets);
1747 st->rx_bytes = tswap32(st->rx_bytes);
1748 st->tx_bytes = tswap32(st->tx_bytes);
1749 st->rx_errors = tswap32(st->rx_errors);
1750 st->tx_errors = tswap32(st->tx_errors);
1751 st->rx_dropped = tswap32(st->rx_dropped);
1752 st->tx_dropped = tswap32(st->tx_dropped);
1753 st->multicast = tswap32(st->multicast);
1754 st->collisions = tswap32(st->collisions);
1755
1756 /* detailed rx_errors: */
1757 st->rx_length_errors = tswap32(st->rx_length_errors);
1758 st->rx_over_errors = tswap32(st->rx_over_errors);
1759 st->rx_crc_errors = tswap32(st->rx_crc_errors);
1760 st->rx_frame_errors = tswap32(st->rx_frame_errors);
1761 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
1762 st->rx_missed_errors = tswap32(st->rx_missed_errors);
1763
1764 /* detailed tx_errors */
1765 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
1766 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
1767 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
1768 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
1769 st->tx_window_errors = tswap32(st->tx_window_errors);
1770
1771 /* for cslip etc */
1772 st->rx_compressed = tswap32(st->rx_compressed);
1773 st->tx_compressed = tswap32(st->tx_compressed);
1774 break;
1775 /* struct rtnl_link_stats64 */
1776 case IFLA_STATS64:
1777 st64 = RTA_DATA(rtattr);
1778 st64->rx_packets = tswap64(st64->rx_packets);
1779 st64->tx_packets = tswap64(st64->tx_packets);
1780 st64->rx_bytes = tswap64(st64->rx_bytes);
1781 st64->tx_bytes = tswap64(st64->tx_bytes);
1782 st64->rx_errors = tswap64(st64->rx_errors);
1783 st64->tx_errors = tswap64(st64->tx_errors);
1784 st64->rx_dropped = tswap64(st64->rx_dropped);
1785 st64->tx_dropped = tswap64(st64->tx_dropped);
1786 st64->multicast = tswap64(st64->multicast);
1787 st64->collisions = tswap64(st64->collisions);
1788
1789 /* detailed rx_errors: */
1790 st64->rx_length_errors = tswap64(st64->rx_length_errors);
1791 st64->rx_over_errors = tswap64(st64->rx_over_errors);
1792 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
1793 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
1794 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
1795 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
1796
1797 /* detailed tx_errors */
1798 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
1799 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
1800 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
1801 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
1802 st64->tx_window_errors = tswap64(st64->tx_window_errors);
1803
1804 /* for cslip etc */
1805 st64->rx_compressed = tswap64(st64->rx_compressed);
1806 st64->tx_compressed = tswap64(st64->tx_compressed);
1807 break;
1808 /* struct rtnl_link_ifmap */
1809 case IFLA_MAP:
1810 map = RTA_DATA(rtattr);
1811 map->mem_start = tswap64(map->mem_start);
1812 map->mem_end = tswap64(map->mem_end);
1813 map->base_addr = tswap64(map->base_addr);
1814 map->irq = tswap16(map->irq);
1815 break;
1816 /* nested */
1817 case IFLA_AF_SPEC:
1818 case IFLA_LINKINFO:
1819 /* FIXME: implement nested type */
1820 gemu_log("Unimplemented nested type %d\n", rtattr->rta_type);
1821 break;
1822 default:
1823 gemu_log("Unknown host IFLA type: %d\n", rtattr->rta_type);
1824 break;
1825 }
1826 return 0;
1827 }
1828
1829 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
1830 {
1831 uint32_t *u32;
1832 struct ifa_cacheinfo *ci;
1833
1834 switch (rtattr->rta_type) {
1835 /* binary: depends on family type */
1836 case IFA_ADDRESS:
1837 case IFA_LOCAL:
1838 break;
1839 /* string */
1840 case IFA_LABEL:
1841 break;
1842 /* u32 */
1843 case IFA_FLAGS:
1844 case IFA_BROADCAST:
1845 u32 = RTA_DATA(rtattr);
1846 *u32 = tswap32(*u32);
1847 break;
1848 /* struct ifa_cacheinfo */
1849 case IFA_CACHEINFO:
1850 ci = RTA_DATA(rtattr);
1851 ci->ifa_prefered = tswap32(ci->ifa_prefered);
1852 ci->ifa_valid = tswap32(ci->ifa_valid);
1853 ci->cstamp = tswap32(ci->cstamp);
1854 ci->tstamp = tswap32(ci->tstamp);
1855 break;
1856 default:
1857 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
1858 break;
1859 }
1860 return 0;
1861 }
1862
1863 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
1864 {
1865 uint32_t *u32;
1866 switch (rtattr->rta_type) {
1867 /* binary: depends on family type */
1868 case RTA_GATEWAY:
1869 case RTA_DST:
1870 case RTA_PREFSRC:
1871 break;
1872 /* u32 */
1873 case RTA_PRIORITY:
1874 case RTA_TABLE:
1875 case RTA_OIF:
1876 u32 = RTA_DATA(rtattr);
1877 *u32 = tswap32(*u32);
1878 break;
1879 default:
1880 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
1881 break;
1882 }
1883 return 0;
1884 }
1885
1886 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
1887 uint32_t rtattr_len)
1888 {
1889 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1890 host_to_target_data_link_rtattr);
1891 }
1892
1893 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
1894 uint32_t rtattr_len)
1895 {
1896 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1897 host_to_target_data_addr_rtattr);
1898 }
1899
1900 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
1901 uint32_t rtattr_len)
1902 {
1903 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1904 host_to_target_data_route_rtattr);
1905 }
1906
1907 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
1908 {
1909 uint32_t nlmsg_len;
1910 struct ifinfomsg *ifi;
1911 struct ifaddrmsg *ifa;
1912 struct rtmsg *rtm;
1913
1914 nlmsg_len = nlh->nlmsg_len;
1915 switch (nlh->nlmsg_type) {
1916 case RTM_NEWLINK:
1917 case RTM_DELLINK:
1918 case RTM_GETLINK:
1919 ifi = NLMSG_DATA(nlh);
1920 ifi->ifi_type = tswap16(ifi->ifi_type);
1921 ifi->ifi_index = tswap32(ifi->ifi_index);
1922 ifi->ifi_flags = tswap32(ifi->ifi_flags);
1923 ifi->ifi_change = tswap32(ifi->ifi_change);
1924 host_to_target_link_rtattr(IFLA_RTA(ifi),
1925 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
1926 break;
1927 case RTM_NEWADDR:
1928 case RTM_DELADDR:
1929 case RTM_GETADDR:
1930 ifa = NLMSG_DATA(nlh);
1931 ifa->ifa_index = tswap32(ifa->ifa_index);
1932 host_to_target_addr_rtattr(IFA_RTA(ifa),
1933 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
1934 break;
1935 case RTM_NEWROUTE:
1936 case RTM_DELROUTE:
1937 case RTM_GETROUTE:
1938 rtm = NLMSG_DATA(nlh);
1939 rtm->rtm_flags = tswap32(rtm->rtm_flags);
1940 host_to_target_route_rtattr(RTM_RTA(rtm),
1941 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
1942 break;
1943 default:
1944 return -TARGET_EINVAL;
1945 }
1946 return 0;
1947 }
1948
1949 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
1950 size_t len)
1951 {
1952 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
1953 }
1954
1955 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
1956 size_t len,
1957 abi_long (*target_to_host_rtattr)
1958 (struct rtattr *))
1959 {
1960 abi_long ret;
1961
1962 while (len >= sizeof(struct rtattr)) {
1963 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
1964 tswap16(rtattr->rta_len) > len) {
1965 break;
1966 }
1967 rtattr->rta_len = tswap16(rtattr->rta_len);
1968 rtattr->rta_type = tswap16(rtattr->rta_type);
1969 ret = target_to_host_rtattr(rtattr);
1970 if (ret < 0) {
1971 return ret;
1972 }
1973 len -= RTA_ALIGN(rtattr->rta_len);
1974 rtattr = (struct rtattr *)(((char *)rtattr) +
1975 RTA_ALIGN(rtattr->rta_len));
1976 }
1977 return 0;
1978 }
1979
1980 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
1981 {
1982 switch (rtattr->rta_type) {
1983 default:
1984 gemu_log("Unknown target IFLA type: %d\n", rtattr->rta_type);
1985 break;
1986 }
1987 return 0;
1988 }
1989
1990 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
1991 {
1992 switch (rtattr->rta_type) {
1993 /* binary: depends on family type */
1994 case IFA_LOCAL:
1995 case IFA_ADDRESS:
1996 break;
1997 default:
1998 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
1999 break;
2000 }
2001 return 0;
2002 }
2003
2004 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2005 {
2006 uint32_t *u32;
2007 switch (rtattr->rta_type) {
2008 /* binary: depends on family type */
2009 case RTA_DST:
2010 case RTA_SRC:
2011 case RTA_GATEWAY:
2012 break;
2013 /* u32 */
2014 case RTA_OIF:
2015 u32 = RTA_DATA(rtattr);
2016 *u32 = tswap32(*u32);
2017 break;
2018 default:
2019 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2020 break;
2021 }
2022 return 0;
2023 }
2024
2025 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2026 uint32_t rtattr_len)
2027 {
2028 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2029 target_to_host_data_link_rtattr);
2030 }
2031
2032 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2033 uint32_t rtattr_len)
2034 {
2035 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2036 target_to_host_data_addr_rtattr);
2037 }
2038
2039 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2040 uint32_t rtattr_len)
2041 {
2042 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2043 target_to_host_data_route_rtattr);
2044 }
2045
2046 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2047 {
2048 struct ifinfomsg *ifi;
2049 struct ifaddrmsg *ifa;
2050 struct rtmsg *rtm;
2051
2052 switch (nlh->nlmsg_type) {
2053 case RTM_GETLINK:
2054 break;
2055 case RTM_NEWLINK:
2056 case RTM_DELLINK:
2057 ifi = NLMSG_DATA(nlh);
2058 ifi->ifi_type = tswap16(ifi->ifi_type);
2059 ifi->ifi_index = tswap32(ifi->ifi_index);
2060 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2061 ifi->ifi_change = tswap32(ifi->ifi_change);
2062 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2063 NLMSG_LENGTH(sizeof(*ifi)));
2064 break;
2065 case RTM_GETADDR:
2066 case RTM_NEWADDR:
2067 case RTM_DELADDR:
2068 ifa = NLMSG_DATA(nlh);
2069 ifa->ifa_index = tswap32(ifa->ifa_index);
2070 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2071 NLMSG_LENGTH(sizeof(*ifa)));
2072 break;
2073 case RTM_GETROUTE:
2074 break;
2075 case RTM_NEWROUTE:
2076 case RTM_DELROUTE:
2077 rtm = NLMSG_DATA(nlh);
2078 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2079 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2080 NLMSG_LENGTH(sizeof(*rtm)));
2081 break;
2082 default:
2083 return -TARGET_EOPNOTSUPP;
2084 }
2085 return 0;
2086 }
2087
2088 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2089 {
2090 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2091 }
2092 #endif /* CONFIG_RTNETLINK */
2093
2094 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2095 {
2096 switch (nlh->nlmsg_type) {
2097 default:
2098 gemu_log("Unknown host audit message type %d\n",
2099 nlh->nlmsg_type);
2100 return -TARGET_EINVAL;
2101 }
2102 return 0;
2103 }
2104
2105 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2106 size_t len)
2107 {
2108 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2109 }
2110
2111 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2112 {
2113 switch (nlh->nlmsg_type) {
2114 case AUDIT_USER:
2115 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2116 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2117 break;
2118 default:
2119 gemu_log("Unknown target audit message type %d\n",
2120 nlh->nlmsg_type);
2121 return -TARGET_EINVAL;
2122 }
2123
2124 return 0;
2125 }
2126
2127 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2128 {
2129 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2130 }
2131
2132 /* do_setsockopt() Must return target values and target errnos. */
2133 static abi_long do_setsockopt(int sockfd, int level, int optname,
2134 abi_ulong optval_addr, socklen_t optlen)
2135 {
2136 abi_long ret;
2137 int val;
2138 struct ip_mreqn *ip_mreq;
2139 struct ip_mreq_source *ip_mreq_source;
2140
2141 switch(level) {
2142 case SOL_TCP:
2143 /* TCP options all take an 'int' value. */
2144 if (optlen < sizeof(uint32_t))
2145 return -TARGET_EINVAL;
2146
2147 if (get_user_u32(val, optval_addr))
2148 return -TARGET_EFAULT;
2149 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2150 break;
2151 case SOL_IP:
2152 switch(optname) {
2153 case IP_TOS:
2154 case IP_TTL:
2155 case IP_HDRINCL:
2156 case IP_ROUTER_ALERT:
2157 case IP_RECVOPTS:
2158 case IP_RETOPTS:
2159 case IP_PKTINFO:
2160 case IP_MTU_DISCOVER:
2161 case IP_RECVERR:
2162 case IP_RECVTOS:
2163 #ifdef IP_FREEBIND
2164 case IP_FREEBIND:
2165 #endif
2166 case IP_MULTICAST_TTL:
2167 case IP_MULTICAST_LOOP:
2168 val = 0;
2169 if (optlen >= sizeof(uint32_t)) {
2170 if (get_user_u32(val, optval_addr))
2171 return -TARGET_EFAULT;
2172 } else if (optlen >= 1) {
2173 if (get_user_u8(val, optval_addr))
2174 return -TARGET_EFAULT;
2175 }
2176 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2177 break;
2178 case IP_ADD_MEMBERSHIP:
2179 case IP_DROP_MEMBERSHIP:
2180 if (optlen < sizeof (struct target_ip_mreq) ||
2181 optlen > sizeof (struct target_ip_mreqn))
2182 return -TARGET_EINVAL;
2183
2184 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2185 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2186 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2187 break;
2188
2189 case IP_BLOCK_SOURCE:
2190 case IP_UNBLOCK_SOURCE:
2191 case IP_ADD_SOURCE_MEMBERSHIP:
2192 case IP_DROP_SOURCE_MEMBERSHIP:
2193 if (optlen != sizeof (struct target_ip_mreq_source))
2194 return -TARGET_EINVAL;
2195
2196 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2197 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2198 unlock_user (ip_mreq_source, optval_addr, 0);
2199 break;
2200
2201 default:
2202 goto unimplemented;
2203 }
2204 break;
2205 case SOL_IPV6:
2206 switch (optname) {
2207 case IPV6_MTU_DISCOVER:
2208 case IPV6_MTU:
2209 case IPV6_V6ONLY:
2210 case IPV6_RECVPKTINFO:
2211 val = 0;
2212 if (optlen < sizeof(uint32_t)) {
2213 return -TARGET_EINVAL;
2214 }
2215 if (get_user_u32(val, optval_addr)) {
2216 return -TARGET_EFAULT;
2217 }
2218 ret = get_errno(setsockopt(sockfd, level, optname,
2219 &val, sizeof(val)));
2220 break;
2221 default:
2222 goto unimplemented;
2223 }
2224 break;
2225 case SOL_RAW:
2226 switch (optname) {
2227 case ICMP_FILTER:
2228 /* struct icmp_filter takes an u32 value */
2229 if (optlen < sizeof(uint32_t)) {
2230 return -TARGET_EINVAL;
2231 }
2232
2233 if (get_user_u32(val, optval_addr)) {
2234 return -TARGET_EFAULT;
2235 }
2236 ret = get_errno(setsockopt(sockfd, level, optname,
2237 &val, sizeof(val)));
2238 break;
2239
2240 default:
2241 goto unimplemented;
2242 }
2243 break;
2244 case TARGET_SOL_SOCKET:
2245 switch (optname) {
2246 case TARGET_SO_RCVTIMEO:
2247 {
2248 struct timeval tv;
2249
2250 optname = SO_RCVTIMEO;
2251
2252 set_timeout:
2253 if (optlen != sizeof(struct target_timeval)) {
2254 return -TARGET_EINVAL;
2255 }
2256
2257 if (copy_from_user_timeval(&tv, optval_addr)) {
2258 return -TARGET_EFAULT;
2259 }
2260
2261 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2262 &tv, sizeof(tv)));
2263 return ret;
2264 }
2265 case TARGET_SO_SNDTIMEO:
2266 optname = SO_SNDTIMEO;
2267 goto set_timeout;
2268 case TARGET_SO_ATTACH_FILTER:
2269 {
2270 struct target_sock_fprog *tfprog;
2271 struct target_sock_filter *tfilter;
2272 struct sock_fprog fprog;
2273 struct sock_filter *filter;
2274 int i;
2275
2276 if (optlen != sizeof(*tfprog)) {
2277 return -TARGET_EINVAL;
2278 }
2279 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2280 return -TARGET_EFAULT;
2281 }
2282 if (!lock_user_struct(VERIFY_READ, tfilter,
2283 tswapal(tfprog->filter), 0)) {
2284 unlock_user_struct(tfprog, optval_addr, 1);
2285 return -TARGET_EFAULT;
2286 }
2287
2288 fprog.len = tswap16(tfprog->len);
2289 filter = g_try_new(struct sock_filter, fprog.len);
2290 if (filter == NULL) {
2291 unlock_user_struct(tfilter, tfprog->filter, 1);
2292 unlock_user_struct(tfprog, optval_addr, 1);
2293 return -TARGET_ENOMEM;
2294 }
2295 for (i = 0; i < fprog.len; i++) {
2296 filter[i].code = tswap16(tfilter[i].code);
2297 filter[i].jt = tfilter[i].jt;
2298 filter[i].jf = tfilter[i].jf;
2299 filter[i].k = tswap32(tfilter[i].k);
2300 }
2301 fprog.filter = filter;
2302
2303 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2304 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2305 g_free(filter);
2306
2307 unlock_user_struct(tfilter, tfprog->filter, 1);
2308 unlock_user_struct(tfprog, optval_addr, 1);
2309 return ret;
2310 }
2311 case TARGET_SO_BINDTODEVICE:
2312 {
2313 char *dev_ifname, *addr_ifname;
2314
2315 if (optlen > IFNAMSIZ - 1) {
2316 optlen = IFNAMSIZ - 1;
2317 }
2318 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2319 if (!dev_ifname) {
2320 return -TARGET_EFAULT;
2321 }
2322 optname = SO_BINDTODEVICE;
2323 addr_ifname = alloca(IFNAMSIZ);
2324 memcpy(addr_ifname, dev_ifname, optlen);
2325 addr_ifname[optlen] = 0;
2326 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2327 addr_ifname, optlen));
2328 unlock_user (dev_ifname, optval_addr, 0);
2329 return ret;
2330 }
2331 /* Options with 'int' argument. */
2332 case TARGET_SO_DEBUG:
2333 optname = SO_DEBUG;
2334 break;
2335 case TARGET_SO_REUSEADDR:
2336 optname = SO_REUSEADDR;
2337 break;
2338 case TARGET_SO_TYPE:
2339 optname = SO_TYPE;
2340 break;
2341 case TARGET_SO_ERROR:
2342 optname = SO_ERROR;
2343 break;
2344 case TARGET_SO_DONTROUTE:
2345 optname = SO_DONTROUTE;
2346 break;
2347 case TARGET_SO_BROADCAST:
2348 optname = SO_BROADCAST;
2349 break;
2350 case TARGET_SO_SNDBUF:
2351 optname = SO_SNDBUF;
2352 break;
2353 case TARGET_SO_SNDBUFFORCE:
2354 optname = SO_SNDBUFFORCE;
2355 break;
2356 case TARGET_SO_RCVBUF:
2357 optname = SO_RCVBUF;
2358 break;
2359 case TARGET_SO_RCVBUFFORCE:
2360 optname = SO_RCVBUFFORCE;
2361 break;
2362 case TARGET_SO_KEEPALIVE:
2363 optname = SO_KEEPALIVE;
2364 break;
2365 case TARGET_SO_OOBINLINE:
2366 optname = SO_OOBINLINE;
2367 break;
2368 case TARGET_SO_NO_CHECK:
2369 optname = SO_NO_CHECK;
2370 break;
2371 case TARGET_SO_PRIORITY:
2372 optname = SO_PRIORITY;
2373 break;
2374 #ifdef SO_BSDCOMPAT
2375 case TARGET_SO_BSDCOMPAT:
2376 optname = SO_BSDCOMPAT;
2377 break;
2378 #endif
2379 case TARGET_SO_PASSCRED:
2380 optname = SO_PASSCRED;
2381 break;
2382 case TARGET_SO_PASSSEC:
2383 optname = SO_PASSSEC;
2384 break;
2385 case TARGET_SO_TIMESTAMP:
2386 optname = SO_TIMESTAMP;
2387 break;
2388 case TARGET_SO_RCVLOWAT:
2389 optname = SO_RCVLOWAT;
2390 break;
2391 break;
2392 default:
2393 goto unimplemented;
2394 }
2395 if (optlen < sizeof(uint32_t))
2396 return -TARGET_EINVAL;
2397
2398 if (get_user_u32(val, optval_addr))
2399 return -TARGET_EFAULT;
2400 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2401 break;
2402 default:
2403 unimplemented:
2404 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2405 ret = -TARGET_ENOPROTOOPT;
2406 }
2407 return ret;
2408 }
2409
2410 /* do_getsockopt() Must return target values and target errnos. */
2411 static abi_long do_getsockopt(int sockfd, int level, int optname,
2412 abi_ulong optval_addr, abi_ulong optlen)
2413 {
2414 abi_long ret;
2415 int len, val;
2416 socklen_t lv;
2417
2418 switch(level) {
2419 case TARGET_SOL_SOCKET:
2420 level = SOL_SOCKET;
2421 switch (optname) {
2422 /* These don't just return a single integer */
2423 case TARGET_SO_LINGER:
2424 case TARGET_SO_RCVTIMEO:
2425 case TARGET_SO_SNDTIMEO:
2426 case TARGET_SO_PEERNAME:
2427 goto unimplemented;
2428 case TARGET_SO_PEERCRED: {
2429 struct ucred cr;
2430 socklen_t crlen;
2431 struct target_ucred *tcr;
2432
2433 if (get_user_u32(len, optlen)) {
2434 return -TARGET_EFAULT;
2435 }
2436 if (len < 0) {
2437 return -TARGET_EINVAL;
2438 }
2439
2440 crlen = sizeof(cr);
2441 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2442 &cr, &crlen));
2443 if (ret < 0) {
2444 return ret;
2445 }
2446 if (len > crlen) {
2447 len = crlen;
2448 }
2449 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2450 return -TARGET_EFAULT;
2451 }
2452 __put_user(cr.pid, &tcr->pid);
2453 __put_user(cr.uid, &tcr->uid);
2454 __put_user(cr.gid, &tcr->gid);
2455 unlock_user_struct(tcr, optval_addr, 1);
2456 if (put_user_u32(len, optlen)) {
2457 return -TARGET_EFAULT;
2458 }
2459 break;
2460 }
2461 /* Options with 'int' argument. */
2462 case TARGET_SO_DEBUG:
2463 optname = SO_DEBUG;
2464 goto int_case;
2465 case TARGET_SO_REUSEADDR:
2466 optname = SO_REUSEADDR;
2467 goto int_case;
2468 case TARGET_SO_TYPE:
2469 optname = SO_TYPE;
2470 goto int_case;
2471 case TARGET_SO_ERROR:
2472 optname = SO_ERROR;
2473 goto int_case;
2474 case TARGET_SO_DONTROUTE:
2475 optname = SO_DONTROUTE;
2476 goto int_case;
2477 case TARGET_SO_BROADCAST:
2478 optname = SO_BROADCAST;
2479 goto int_case;
2480 case TARGET_SO_SNDBUF:
2481 optname = SO_SNDBUF;
2482 goto int_case;
2483 case TARGET_SO_RCVBUF:
2484 optname = SO_RCVBUF;
2485 goto int_case;
2486 case TARGET_SO_KEEPALIVE:
2487 optname = SO_KEEPALIVE;
2488 goto int_case;
2489 case TARGET_SO_OOBINLINE:
2490 optname = SO_OOBINLINE;
2491 goto int_case;
2492 case TARGET_SO_NO_CHECK:
2493 optname = SO_NO_CHECK;
2494 goto int_case;
2495 case TARGET_SO_PRIORITY:
2496 optname = SO_PRIORITY;
2497 goto int_case;
2498 #ifdef SO_BSDCOMPAT
2499 case TARGET_SO_BSDCOMPAT:
2500 optname = SO_BSDCOMPAT;
2501 goto int_case;
2502 #endif
2503 case TARGET_SO_PASSCRED:
2504 optname = SO_PASSCRED;
2505 goto int_case;
2506 case TARGET_SO_TIMESTAMP:
2507 optname = SO_TIMESTAMP;
2508 goto int_case;
2509 case TARGET_SO_RCVLOWAT:
2510 optname = SO_RCVLOWAT;
2511 goto int_case;
2512 case TARGET_SO_ACCEPTCONN:
2513 optname = SO_ACCEPTCONN;
2514 goto int_case;
2515 default:
2516 goto int_case;
2517 }
2518 break;
2519 case SOL_TCP:
2520 /* TCP options all take an 'int' value. */
2521 int_case:
2522 if (get_user_u32(len, optlen))
2523 return -TARGET_EFAULT;
2524 if (len < 0)
2525 return -TARGET_EINVAL;
2526 lv = sizeof(lv);
2527 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2528 if (ret < 0)
2529 return ret;
2530 if (optname == SO_TYPE) {
2531 val = host_to_target_sock_type(val);
2532 }
2533 if (len > lv)
2534 len = lv;
2535 if (len == 4) {
2536 if (put_user_u32(val, optval_addr))
2537 return -TARGET_EFAULT;
2538 } else {
2539 if (put_user_u8(val, optval_addr))
2540 return -TARGET_EFAULT;
2541 }
2542 if (put_user_u32(len, optlen))
2543 return -TARGET_EFAULT;
2544 break;
2545 case SOL_IP:
2546 switch(optname) {
2547 case IP_TOS:
2548 case IP_TTL:
2549 case IP_HDRINCL:
2550 case IP_ROUTER_ALERT:
2551 case IP_RECVOPTS:
2552 case IP_RETOPTS:
2553 case IP_PKTINFO:
2554 case IP_MTU_DISCOVER:
2555 case IP_RECVERR:
2556 case IP_RECVTOS:
2557 #ifdef IP_FREEBIND
2558 case IP_FREEBIND:
2559 #endif
2560 case IP_MULTICAST_TTL:
2561 case IP_MULTICAST_LOOP:
2562 if (get_user_u32(len, optlen))
2563 return -TARGET_EFAULT;
2564 if (len < 0)
2565 return -TARGET_EINVAL;
2566 lv = sizeof(lv);
2567 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2568 if (ret < 0)
2569 return ret;
2570 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2571 len = 1;
2572 if (put_user_u32(len, optlen)
2573 || put_user_u8(val, optval_addr))
2574 return -TARGET_EFAULT;
2575 } else {
2576 if (len > sizeof(int))
2577 len = sizeof(int);
2578 if (put_user_u32(len, optlen)
2579 || put_user_u32(val, optval_addr))
2580 return -TARGET_EFAULT;
2581 }
2582 break;
2583 default:
2584 ret = -TARGET_ENOPROTOOPT;
2585 break;
2586 }
2587 break;
2588 default:
2589 unimplemented:
2590 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2591 level, optname);
2592 ret = -TARGET_EOPNOTSUPP;
2593 break;
2594 }
2595 return ret;
2596 }
2597
2598 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2599 int count, int copy)
2600 {
2601 struct target_iovec *target_vec;
2602 struct iovec *vec;
2603 abi_ulong total_len, max_len;
2604 int i;
2605 int err = 0;
2606 bool bad_address = false;
2607
2608 if (count == 0) {
2609 errno = 0;
2610 return NULL;
2611 }
2612 if (count < 0 || count > IOV_MAX) {
2613 errno = EINVAL;
2614 return NULL;
2615 }
2616
2617 vec = g_try_new0(struct iovec, count);
2618 if (vec == NULL) {
2619 errno = ENOMEM;
2620 return NULL;
2621 }
2622
2623 target_vec = lock_user(VERIFY_READ, target_addr,
2624 count * sizeof(struct target_iovec), 1);
2625 if (target_vec == NULL) {
2626 err = EFAULT;
2627 goto fail2;
2628 }
2629
2630 /* ??? If host page size > target page size, this will result in a
2631 value larger than what we can actually support. */
2632 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2633 total_len = 0;
2634
2635 for (i = 0; i < count; i++) {
2636 abi_ulong base = tswapal(target_vec[i].iov_base);
2637 abi_long len = tswapal(target_vec[i].iov_len);
2638
2639 if (len < 0) {
2640 err = EINVAL;
2641 goto fail;
2642 } else if (len == 0) {
2643 /* Zero length pointer is ignored. */
2644 vec[i].iov_base = 0;
2645 } else {
2646 vec[i].iov_base = lock_user(type, base, len, copy);
2647 /* If the first buffer pointer is bad, this is a fault. But
2648 * subsequent bad buffers will result in a partial write; this
2649 * is realized by filling the vector with null pointers and
2650 * zero lengths. */
2651 if (!vec[i].iov_base) {
2652 if (i == 0) {
2653 err = EFAULT;
2654 goto fail;
2655 } else {
2656 bad_address = true;
2657 }
2658 }
2659 if (bad_address) {
2660 len = 0;
2661 }
2662 if (len > max_len - total_len) {
2663 len = max_len - total_len;
2664 }
2665 }
2666 vec[i].iov_len = len;
2667 total_len += len;
2668 }
2669
2670 unlock_user(target_vec, target_addr, 0);
2671 return vec;
2672
2673 fail:
2674 while (--i >= 0) {
2675 if (tswapal(target_vec[i].iov_len) > 0) {
2676 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2677 }
2678 }
2679 unlock_user(target_vec, target_addr, 0);
2680 fail2:
2681 g_free(vec);
2682 errno = err;
2683 return NULL;
2684 }
2685
2686 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2687 int count, int copy)
2688 {
2689 struct target_iovec *target_vec;
2690 int i;
2691
2692 target_vec = lock_user(VERIFY_READ, target_addr,
2693 count * sizeof(struct target_iovec), 1);
2694 if (target_vec) {
2695 for (i = 0; i < count; i++) {
2696 abi_ulong base = tswapal(target_vec[i].iov_base);
2697 abi_long len = tswapal(target_vec[i].iov_len);
2698 if (len < 0) {
2699 break;
2700 }
2701 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2702 }
2703 unlock_user(target_vec, target_addr, 0);
2704 }
2705
2706 g_free(vec);
2707 }
2708
2709 static inline int target_to_host_sock_type(int *type)
2710 {
2711 int host_type = 0;
2712 int target_type = *type;
2713
2714 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2715 case TARGET_SOCK_DGRAM:
2716 host_type = SOCK_DGRAM;
2717 break;
2718 case TARGET_SOCK_STREAM:
2719 host_type = SOCK_STREAM;
2720 break;
2721 default:
2722 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2723 break;
2724 }
2725 if (target_type & TARGET_SOCK_CLOEXEC) {
2726 #if defined(SOCK_CLOEXEC)
2727 host_type |= SOCK_CLOEXEC;
2728 #else
2729 return -TARGET_EINVAL;
2730 #endif
2731 }
2732 if (target_type & TARGET_SOCK_NONBLOCK) {
2733 #if defined(SOCK_NONBLOCK)
2734 host_type |= SOCK_NONBLOCK;
2735 #elif !defined(O_NONBLOCK)
2736 return -TARGET_EINVAL;
2737 #endif
2738 }
2739 *type = host_type;
2740 return 0;
2741 }
2742
2743 /* Try to emulate socket type flags after socket creation. */
2744 static int sock_flags_fixup(int fd, int target_type)
2745 {
2746 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2747 if (target_type & TARGET_SOCK_NONBLOCK) {
2748 int flags = fcntl(fd, F_GETFL);
2749 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2750 close(fd);
2751 return -TARGET_EINVAL;
2752 }
2753 }
2754 #endif
2755 return fd;
2756 }
2757
2758 static abi_long packet_target_to_host_sockaddr(void *host_addr,
2759 abi_ulong target_addr,
2760 socklen_t len)
2761 {
2762 struct sockaddr *addr = host_addr;
2763 struct target_sockaddr *target_saddr;
2764
2765 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
2766 if (!target_saddr) {
2767 return -TARGET_EFAULT;
2768 }
2769
2770 memcpy(addr, target_saddr, len);
2771 addr->sa_family = tswap16(target_saddr->sa_family);
2772 /* spkt_protocol is big-endian */
2773
2774 unlock_user(target_saddr, target_addr, 0);
2775 return 0;
2776 }
2777
2778 static TargetFdTrans target_packet_trans = {
2779 .target_to_host_addr = packet_target_to_host_sockaddr,
2780 };
2781
2782 #ifdef CONFIG_RTNETLINK
2783 static abi_long netlink_route_target_to_host(void *buf, size_t len)
2784 {
2785 return target_to_host_nlmsg_route(buf, len);
2786 }
2787
2788 static abi_long netlink_route_host_to_target(void *buf, size_t len)
2789 {
2790 return host_to_target_nlmsg_route(buf, len);
2791 }
2792
2793 static TargetFdTrans target_netlink_route_trans = {
2794 .target_to_host_data = netlink_route_target_to_host,
2795 .host_to_target_data = netlink_route_host_to_target,
2796 };
2797 #endif /* CONFIG_RTNETLINK */
2798
2799 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
2800 {
2801 return target_to_host_nlmsg_audit(buf, len);
2802 }
2803
2804 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
2805 {
2806 return host_to_target_nlmsg_audit(buf, len);
2807 }
2808
2809 static TargetFdTrans target_netlink_audit_trans = {
2810 .target_to_host_data = netlink_audit_target_to_host,
2811 .host_to_target_data = netlink_audit_host_to_target,
2812 };
2813
2814 /* do_socket() Must return target values and target errnos. */
2815 static abi_long do_socket(int domain, int type, int protocol)
2816 {
2817 int target_type = type;
2818 int ret;
2819
2820 ret = target_to_host_sock_type(&type);
2821 if (ret) {
2822 return ret;
2823 }
2824
2825 if (domain == PF_NETLINK && !(
2826 #ifdef CONFIG_RTNETLINK
2827 protocol == NETLINK_ROUTE ||
2828 #endif
2829 protocol == NETLINK_KOBJECT_UEVENT ||
2830 protocol == NETLINK_AUDIT)) {
2831 return -EPFNOSUPPORT;
2832 }
2833
2834 if (domain == AF_PACKET ||
2835 (domain == AF_INET && type == SOCK_PACKET)) {
2836 protocol = tswap16(protocol);
2837 }
2838
2839 ret = get_errno(socket(domain, type, protocol));
2840 if (ret >= 0) {
2841 ret = sock_flags_fixup(ret, target_type);
2842 if (type == SOCK_PACKET) {
2843 /* Manage an obsolete case :
2844 * if socket type is SOCK_PACKET, bind by name
2845 */
2846 fd_trans_register(ret, &target_packet_trans);
2847 } else if (domain == PF_NETLINK) {
2848 switch (protocol) {
2849 #ifdef CONFIG_RTNETLINK
2850 case NETLINK_ROUTE:
2851 fd_trans_register(ret, &target_netlink_route_trans);
2852 break;
2853 #endif
2854 case NETLINK_KOBJECT_UEVENT:
2855 /* nothing to do: messages are strings */
2856 break;
2857 case NETLINK_AUDIT:
2858 fd_trans_register(ret, &target_netlink_audit_trans);
2859 break;
2860 default:
2861 g_assert_not_reached();
2862 }
2863 }
2864 }
2865 return ret;
2866 }
2867
2868 /* do_bind() Must return target values and target errnos. */
2869 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2870 socklen_t addrlen)
2871 {
2872 void *addr;
2873 abi_long ret;
2874
2875 if ((int)addrlen < 0) {
2876 return -TARGET_EINVAL;
2877 }
2878
2879 addr = alloca(addrlen+1);
2880
2881 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2882 if (ret)
2883 return ret;
2884
2885 return get_errno(bind(sockfd, addr, addrlen));
2886 }
2887
2888 /* do_connect() Must return target values and target errnos. */
2889 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2890 socklen_t addrlen)
2891 {
2892 void *addr;
2893 abi_long ret;
2894
2895 if ((int)addrlen < 0) {
2896 return -TARGET_EINVAL;
2897 }
2898
2899 addr = alloca(addrlen+1);
2900
2901 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2902 if (ret)
2903 return ret;
2904
2905 return get_errno(safe_connect(sockfd, addr, addrlen));
2906 }
2907
2908 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2909 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2910 int flags, int send)
2911 {
2912 abi_long ret, len;
2913 struct msghdr msg;
2914 int count;
2915 struct iovec *vec;
2916 abi_ulong target_vec;
2917
2918 if (msgp->msg_name) {
2919 msg.msg_namelen = tswap32(msgp->msg_namelen);
2920 msg.msg_name = alloca(msg.msg_namelen+1);
2921 ret = target_to_host_sockaddr(fd, msg.msg_name,
2922 tswapal(msgp->msg_name),
2923 msg.msg_namelen);
2924 if (ret) {
2925 goto out2;
2926 }
2927 } else {
2928 msg.msg_name = NULL;
2929 msg.msg_namelen = 0;
2930 }
2931 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2932 msg.msg_control = alloca(msg.msg_controllen);
2933 msg.msg_flags = tswap32(msgp->msg_flags);
2934
2935 count = tswapal(msgp->msg_iovlen);
2936 target_vec = tswapal(msgp->msg_iov);
2937 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2938 target_vec, count, send);
2939 if (vec == NULL) {
2940 ret = -host_to_target_errno(errno);
2941 goto out2;
2942 }
2943 msg.msg_iovlen = count;
2944 msg.msg_iov = vec;
2945
2946 if (send) {
2947 if (fd_trans_target_to_host_data(fd)) {
2948 ret = fd_trans_target_to_host_data(fd)(msg.msg_iov->iov_base,
2949 msg.msg_iov->iov_len);
2950 } else {
2951 ret = target_to_host_cmsg(&msg, msgp);
2952 }
2953 if (ret == 0) {
2954 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2955 }
2956 } else {
2957 ret = get_errno(safe_recvmsg(fd, &msg, flags));
2958 if (!is_error(ret)) {
2959 len = ret;
2960 if (fd_trans_host_to_target_data(fd)) {
2961 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2962 msg.msg_iov->iov_len);
2963 } else {
2964 ret = host_to_target_cmsg(msgp, &msg);
2965 }
2966 if (!is_error(ret)) {
2967 msgp->msg_namelen = tswap32(msg.msg_namelen);
2968 if (msg.msg_name != NULL) {
2969 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2970 msg.msg_name, msg.msg_namelen);
2971 if (ret) {
2972 goto out;
2973 }
2974 }
2975
2976 ret = len;
2977 }
2978 }
2979 }
2980
2981 out:
2982 unlock_iovec(vec, target_vec, count, !send);
2983 out2:
2984 return ret;
2985 }
2986
2987 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2988 int flags, int send)
2989 {
2990 abi_long ret;
2991 struct target_msghdr *msgp;
2992
2993 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2994 msgp,
2995 target_msg,
2996 send ? 1 : 0)) {
2997 return -TARGET_EFAULT;
2998 }
2999 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3000 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3001 return ret;
3002 }
3003
3004 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3005 * so it might not have this *mmsg-specific flag either.
3006 */
3007 #ifndef MSG_WAITFORONE
3008 #define MSG_WAITFORONE 0x10000
3009 #endif
3010
3011 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3012 unsigned int vlen, unsigned int flags,
3013 int send)
3014 {
3015 struct target_mmsghdr *mmsgp;
3016 abi_long ret = 0;
3017 int i;
3018
3019 if (vlen > UIO_MAXIOV) {
3020 vlen = UIO_MAXIOV;
3021 }
3022
3023 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3024 if (!mmsgp) {
3025 return -TARGET_EFAULT;
3026 }
3027
3028 for (i = 0; i < vlen; i++) {
3029 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3030 if (is_error(ret)) {
3031 break;
3032 }
3033 mmsgp[i].msg_len = tswap32(ret);
3034 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3035 if (flags & MSG_WAITFORONE) {
3036 flags |= MSG_DONTWAIT;
3037 }
3038 }
3039
3040 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3041
3042 /* Return number of datagrams sent if we sent any at all;
3043 * otherwise return the error.
3044 */
3045 if (i) {
3046 return i;
3047 }
3048 return ret;
3049 }
3050
3051 /* If we don't have a system accept4() then just call accept.
3052 * The callsites to do_accept4() will ensure that they don't
3053 * pass a non-zero flags argument in this config.
3054 */
3055 #ifndef CONFIG_ACCEPT4
3056 static inline int accept4(int sockfd, struct sockaddr *addr,
3057 socklen_t *addrlen, int flags)
3058 {
3059 assert(flags == 0);
3060 return accept(sockfd, addr, addrlen);
3061 }
3062 #endif
3063
3064 /* do_accept4() Must return target values and target errnos. */
3065 static abi_long do_accept4(int fd, abi_ulong target_addr,
3066 abi_ulong target_addrlen_addr, int flags)
3067 {
3068 socklen_t addrlen;
3069 void *addr;
3070 abi_long ret;
3071 int host_flags;
3072
3073 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3074
3075 if (target_addr == 0) {
3076 return get_errno(accept4(fd, NULL, NULL, host_flags));
3077 }
3078
3079 /* linux returns EINVAL if addrlen pointer is invalid */
3080 if (get_user_u32(addrlen, target_addrlen_addr))
3081 return -TARGET_EINVAL;
3082
3083 if ((int)addrlen < 0) {
3084 return -TARGET_EINVAL;
3085 }
3086
3087 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3088 return -TARGET_EINVAL;
3089
3090 addr = alloca(addrlen);
3091
3092 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
3093 if (!is_error(ret)) {
3094 host_to_target_sockaddr(target_addr, addr, addrlen);
3095 if (put_user_u32(addrlen, target_addrlen_addr))
3096 ret = -TARGET_EFAULT;
3097 }
3098 return ret;
3099 }
3100
3101 /* do_getpeername() Must return target values and target errnos. */
3102 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3103 abi_ulong target_addrlen_addr)
3104 {
3105 socklen_t addrlen;
3106 void *addr;
3107 abi_long ret;
3108
3109 if (get_user_u32(addrlen, target_addrlen_addr))
3110 return -TARGET_EFAULT;
3111
3112 if ((int)addrlen < 0) {
3113 return -TARGET_EINVAL;
3114 }
3115
3116 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3117 return -TARGET_EFAULT;
3118
3119 addr = alloca(addrlen);
3120
3121 ret = get_errno(getpeername(fd, addr, &addrlen));
3122 if (!is_error(ret)) {
3123 host_to_target_sockaddr(target_addr, addr, addrlen);
3124 if (put_user_u32(addrlen, target_addrlen_addr))
3125 ret = -TARGET_EFAULT;
3126 }
3127 return ret;
3128 }
3129
3130 /* do_getsockname() Must return target values and target errnos. */
3131 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3132 abi_ulong target_addrlen_addr)
3133 {
3134 socklen_t addrlen;
3135 void *addr;
3136 abi_long ret;
3137
3138 if (get_user_u32(addrlen, target_addrlen_addr))
3139 return -TARGET_EFAULT;
3140
3141 if ((int)addrlen < 0) {
3142 return -TARGET_EINVAL;
3143 }
3144
3145 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3146 return -TARGET_EFAULT;
3147
3148 addr = alloca(addrlen);
3149
3150 ret = get_errno(getsockname(fd, addr, &addrlen));
3151 if (!is_error(ret)) {
3152 host_to_target_sockaddr(target_addr, addr, addrlen);
3153 if (put_user_u32(addrlen, target_addrlen_addr))
3154 ret = -TARGET_EFAULT;
3155 }
3156 return ret;
3157 }
3158
3159 /* do_socketpair() Must return target values and target errnos. */
3160 static abi_long do_socketpair(int domain, int type, int protocol,
3161 abi_ulong target_tab_addr)
3162 {
3163 int tab[2];
3164 abi_long ret;
3165
3166 target_to_host_sock_type(&type);
3167
3168 ret = get_errno(socketpair(domain, type, protocol, tab));
3169 if (!is_error(ret)) {
3170 if (put_user_s32(tab[0], target_tab_addr)
3171 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3172 ret = -TARGET_EFAULT;
3173 }
3174 return ret;
3175 }
3176
3177 /* do_sendto() Must return target values and target errnos. */
3178 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3179 abi_ulong target_addr, socklen_t addrlen)
3180 {
3181 void *addr;
3182 void *host_msg;
3183 abi_long ret;
3184
3185 if ((int)addrlen < 0) {
3186 return -TARGET_EINVAL;
3187 }
3188
3189 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3190 if (!host_msg)
3191 return -TARGET_EFAULT;
3192 if (fd_trans_target_to_host_data(fd)) {
3193 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3194 if (ret < 0) {
3195 unlock_user(host_msg, msg, 0);
3196 return ret;
3197 }
3198 }
3199 if (target_addr) {
3200 addr = alloca(addrlen+1);
3201 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3202 if (ret) {
3203 unlock_user(host_msg, msg, 0);
3204 return ret;
3205 }
3206 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3207 } else {
3208 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3209 }
3210 unlock_user(host_msg, msg, 0);
3211 return ret;
3212 }
3213
3214 /* do_recvfrom() Must return target values and target errnos. */
3215 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3216 abi_ulong target_addr,
3217 abi_ulong target_addrlen)
3218 {
3219 socklen_t addrlen;
3220 void *addr;
3221 void *host_msg;
3222 abi_long ret;
3223
3224 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3225 if (!host_msg)
3226 return -TARGET_EFAULT;
3227 if (target_addr) {
3228 if (get_user_u32(addrlen, target_addrlen)) {
3229 ret = -TARGET_EFAULT;
3230 goto fail;
3231 }
3232 if ((int)addrlen < 0) {
3233 ret = -TARGET_EINVAL;
3234 goto fail;
3235 }
3236 addr = alloca(addrlen);
3237 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3238 addr, &addrlen));
3239 } else {
3240 addr = NULL; /* To keep compiler quiet. */
3241 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3242 }
3243 if (!is_error(ret)) {
3244 if (target_addr) {
3245 host_to_target_sockaddr(target_addr, addr, addrlen);
3246 if (put_user_u32(addrlen, target_addrlen)) {
3247 ret = -TARGET_EFAULT;
3248 goto fail;
3249 }
3250 }
3251 unlock_user(host_msg, msg, len);
3252 } else {
3253 fail:
3254 unlock_user(host_msg, msg, 0);
3255 }
3256 return ret;
3257 }
3258
3259 #ifdef TARGET_NR_socketcall
3260 /* do_socketcall() Must return target values and target errnos. */
3261 static abi_long do_socketcall(int num, abi_ulong vptr)
3262 {
3263 static const unsigned ac[] = { /* number of arguments per call */
3264 [SOCKOP_socket] = 3, /* domain, type, protocol */
3265 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3266 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3267 [SOCKOP_listen] = 2, /* sockfd, backlog */
3268 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3269 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3270 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3271 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3272 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3273 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3274 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3275 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3276 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3277 [SOCKOP_shutdown] = 2, /* sockfd, how */
3278 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3279 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3280 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3281 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3282 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3283 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3284 };
3285 abi_long a[6]; /* max 6 args */
3286
3287 /* first, collect the arguments in a[] according to ac[] */
3288 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3289 unsigned i;
3290 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3291 for (i = 0; i < ac[num]; ++i) {
3292 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3293 return -TARGET_EFAULT;
3294 }
3295 }
3296 }
3297
3298 /* now when we have the args, actually handle the call */
3299 switch (num) {
3300 case SOCKOP_socket: /* domain, type, protocol */
3301 return do_socket(a[0], a[1], a[2]);
3302 case SOCKOP_bind: /* sockfd, addr, addrlen */
3303 return do_bind(a[0], a[1], a[2]);
3304 case SOCKOP_connect: /* sockfd, addr, addrlen */
3305 return do_connect(a[0], a[1], a[2]);
3306 case SOCKOP_listen: /* sockfd, backlog */
3307 return get_errno(listen(a[0], a[1]));
3308 case SOCKOP_accept: /* sockfd, addr, addrlen */
3309 return do_accept4(a[0], a[1], a[2], 0);
3310 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3311 return do_accept4(a[0], a[1], a[2], a[3]);
3312 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3313 return do_getsockname(a[0], a[1], a[2]);
3314 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3315 return do_getpeername(a[0], a[1], a[2]);
3316 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3317 return do_socketpair(a[0], a[1], a[2], a[3]);
3318 case SOCKOP_send: /* sockfd, msg, len, flags */
3319 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3320 case SOCKOP_recv: /* sockfd, msg, len, flags */
3321 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3322 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3323 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3324 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3325 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3326 case SOCKOP_shutdown: /* sockfd, how */
3327 return get_errno(shutdown(a[0], a[1]));
3328 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3329 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3330 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3331 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3332 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3333 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3334 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3335 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3336 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3337 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3338 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3339 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3340 default:
3341 gemu_log("Unsupported socketcall: %d\n", num);
3342 return -TARGET_ENOSYS;
3343 }
3344 }
3345 #endif
3346
3347 #define N_SHM_REGIONS 32
3348
3349 static struct shm_region {
3350 abi_ulong start;
3351 abi_ulong size;
3352 bool in_use;
3353 } shm_regions[N_SHM_REGIONS];
3354
3355 struct target_semid_ds
3356 {
3357 struct target_ipc_perm sem_perm;
3358 abi_ulong sem_otime;
3359 #if !defined(TARGET_PPC64)
3360 abi_ulong __unused1;
3361 #endif
3362 abi_ulong sem_ctime;
3363 #if !defined(TARGET_PPC64)
3364 abi_ulong __unused2;
3365 #endif
3366 abi_ulong sem_nsems;
3367 abi_ulong __unused3;
3368 abi_ulong __unused4;
3369 };
3370
3371 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3372 abi_ulong target_addr)
3373 {
3374 struct target_ipc_perm *target_ip;
3375 struct target_semid_ds *target_sd;
3376
3377 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3378 return -TARGET_EFAULT;
3379 target_ip = &(target_sd->sem_perm);
3380 host_ip->__key = tswap32(target_ip->__key);
3381 host_ip->uid = tswap32(target_ip->uid);
3382 host_ip->gid = tswap32(target_ip->gid);
3383 host_ip->cuid = tswap32(target_ip->cuid);
3384 host_ip->cgid = tswap32(target_ip->cgid);
3385 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3386 host_ip->mode = tswap32(target_ip->mode);
3387 #else
3388 host_ip->mode = tswap16(target_ip->mode);
3389 #endif
3390 #if defined(TARGET_PPC)
3391 host_ip->__seq = tswap32(target_ip->__seq);
3392 #else
3393 host_ip->__seq = tswap16(target_ip->__seq);
3394 #endif
3395 unlock_user_struct(target_sd, target_addr, 0);
3396 return 0;
3397 }
3398
3399 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3400 struct ipc_perm *host_ip)
3401 {
3402 struct target_ipc_perm *target_ip;
3403 struct target_semid_ds *target_sd;
3404
3405 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3406 return -TARGET_EFAULT;
3407 target_ip = &(target_sd->sem_perm);
3408 target_ip->__key = tswap32(host_ip->__key);
3409 target_ip->uid = tswap32(host_ip->uid);
3410 target_ip->gid = tswap32(host_ip->gid);
3411 target_ip->cuid = tswap32(host_ip->cuid);
3412 target_ip->cgid = tswap32(host_ip->cgid);
3413 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3414 target_ip->mode = tswap32(host_ip->mode);
3415 #else
3416 target_ip->mode = tswap16(host_ip->mode);
3417 #endif
3418 #if defined(TARGET_PPC)
3419 target_ip->__seq = tswap32(host_ip->__seq);
3420 #else
3421 target_ip->__seq = tswap16(host_ip->__seq);
3422 #endif
3423 unlock_user_struct(target_sd, target_addr, 1);
3424 return 0;
3425 }
3426
3427 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3428 abi_ulong target_addr)
3429 {
3430 struct target_semid_ds *target_sd;
3431
3432 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3433 return -TARGET_EFAULT;
3434 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3435 return -TARGET_EFAULT;
3436 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3437 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3438 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3439 unlock_user_struct(target_sd, target_addr, 0);
3440 return 0;
3441 }
3442
3443 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3444 struct semid_ds *host_sd)
3445 {
3446 struct target_semid_ds *target_sd;
3447
3448 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3449 return -TARGET_EFAULT;
3450 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3451 return -TARGET_EFAULT;
3452 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3453 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3454 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3455 unlock_user_struct(target_sd, target_addr, 1);
3456 return 0;
3457 }
3458
3459 struct target_seminfo {
3460 int semmap;
3461 int semmni;
3462 int semmns;
3463 int semmnu;
3464 int semmsl;
3465 int semopm;
3466 int semume;
3467 int semusz;
3468 int semvmx;
3469 int semaem;
3470 };
3471
3472 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3473 struct seminfo *host_seminfo)
3474 {
3475 struct target_seminfo *target_seminfo;
3476 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3477 return -TARGET_EFAULT;
3478 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3479 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3480 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3481 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3482 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3483 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3484 __put_user(host_seminfo->semume, &target_seminfo->semume);
3485 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3486 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3487 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3488 unlock_user_struct(target_seminfo, target_addr, 1);
3489 return 0;
3490 }
3491
3492 union semun {
3493 int val;
3494 struct semid_ds *buf;
3495 unsigned short *array;
3496 struct seminfo *__buf;
3497 };
3498
3499 union target_semun {
3500 int val;
3501 abi_ulong buf;
3502 abi_ulong array;
3503 abi_ulong __buf;
3504 };
3505
3506 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3507 abi_ulong target_addr)
3508 {
3509 int nsems;
3510 unsigned short *array;
3511 union semun semun;
3512 struct semid_ds semid_ds;
3513 int i, ret;
3514
3515 semun.buf = &semid_ds;
3516
3517 ret = semctl(semid, 0, IPC_STAT, semun);
3518 if (ret == -1)
3519 return get_errno(ret);
3520
3521 nsems = semid_ds.sem_nsems;
3522
3523 *host_array = g_try_new(unsigned short, nsems);
3524 if (!*host_array) {
3525 return -TARGET_ENOMEM;
3526 }
3527 array = lock_user(VERIFY_READ, target_addr,
3528 nsems*sizeof(unsigned short), 1);
3529 if (!array) {
3530 g_free(*host_array);
3531 return -TARGET_EFAULT;
3532 }
3533
3534 for(i=0; i<nsems; i++) {
3535 __get_user((*host_array)[i], &array[i]);
3536 }
3537 unlock_user(array, target_addr, 0);
3538
3539 return 0;
3540 }
3541
3542 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3543 unsigned short **host_array)
3544 {
3545 int nsems;
3546 unsigned short *array;
3547 union semun semun;
3548 struct semid_ds semid_ds;
3549 int i, ret;
3550
3551 semun.buf = &semid_ds;
3552
3553 ret = semctl(semid, 0, IPC_STAT, semun);
3554 if (ret == -1)
3555 return get_errno(ret);
3556
3557 nsems = semid_ds.sem_nsems;
3558
3559 array = lock_user(VERIFY_WRITE, target_addr,
3560 nsems*sizeof(unsigned short), 0);
3561 if (!array)
3562 return -TARGET_EFAULT;
3563
3564 for(i=0; i<nsems; i++) {
3565 __put_user((*host_array)[i], &array[i]);
3566 }
3567 g_free(*host_array);
3568 unlock_user(array, target_addr, 1);
3569
3570 return 0;
3571 }
3572
3573 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3574 abi_ulong target_arg)
3575 {
3576 union target_semun target_su = { .buf = target_arg };
3577 union semun arg;
3578 struct semid_ds dsarg;
3579 unsigned short *array = NULL;
3580 struct seminfo seminfo;
3581 abi_long ret = -TARGET_EINVAL;
3582 abi_long err;
3583 cmd &= 0xff;
3584
3585 switch( cmd ) {
3586 case GETVAL:
3587 case SETVAL:
3588 /* In 64 bit cross-endian situations, we will erroneously pick up
3589 * the wrong half of the union for the "val" element. To rectify
3590 * this, the entire 8-byte structure is byteswapped, followed by
3591 * a swap of the 4 byte val field. In other cases, the data is
3592 * already in proper host byte order. */
3593 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3594 target_su.buf = tswapal(target_su.buf);
3595 arg.val = tswap32(target_su.val);
3596 } else {
3597 arg.val = target_su.val;
3598 }
3599 ret = get_errno(semctl(semid, semnum, cmd, arg));
3600 break;
3601 case GETALL:
3602 case SETALL:
3603 err = target_to_host_semarray(semid, &array, target_su.array);
3604 if (err)
3605 return err;
3606 arg.array = array;
3607 ret = get_errno(semctl(semid, semnum, cmd, arg));
3608 err = host_to_target_semarray(semid, target_su.array, &array);
3609 if (err)
3610 return err;
3611 break;
3612 case IPC_STAT:
3613 case IPC_SET:
3614 case SEM_STAT:
3615 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3616 if (err)
3617 return err;
3618 arg.buf = &dsarg;
3619 ret = get_errno(semctl(semid, semnum, cmd, arg));
3620 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3621 if (err)
3622 return err;
3623 break;
3624 case IPC_INFO:
3625 case SEM_INFO:
3626 arg.__buf = &seminfo;
3627 ret = get_errno(semctl(semid, semnum, cmd, arg));
3628 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3629 if (err)
3630 return err;
3631 break;
3632 case IPC_RMID:
3633 case GETPID:
3634 case GETNCNT:
3635 case GETZCNT:
3636 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3637 break;
3638 }
3639
3640 return ret;
3641 }
3642
3643 struct target_sembuf {
3644 unsigned short sem_num;
3645 short sem_op;
3646 short sem_flg;
3647 };
3648
3649 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3650 abi_ulong target_addr,
3651 unsigned nsops)
3652 {
3653 struct target_sembuf *target_sembuf;
3654 int i;
3655
3656 target_sembuf = lock_user(VERIFY_READ, target_addr,
3657 nsops*sizeof(struct target_sembuf), 1);
3658 if (!target_sembuf)
3659 return -TARGET_EFAULT;
3660
3661 for(i=0; i<nsops; i++) {
3662 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3663 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3664 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3665 }
3666
3667 unlock_user(target_sembuf, target_addr, 0);
3668
3669 return 0;
3670 }
3671
3672 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3673 {
3674 struct sembuf sops[nsops];
3675
3676 if (target_to_host_sembuf(sops, ptr, nsops))
3677 return -TARGET_EFAULT;
3678
3679 return get_errno(semop(semid, sops, nsops));
3680 }
3681
3682 struct target_msqid_ds
3683 {
3684 struct target_ipc_perm msg_perm;
3685 abi_ulong msg_stime;
3686 #if TARGET_ABI_BITS == 32
3687 abi_ulong __unused1;
3688 #endif
3689 abi_ulong msg_rtime;
3690 #if TARGET_ABI_BITS == 32
3691 abi_ulong __unused2;
3692 #endif
3693 abi_ulong msg_ctime;
3694 #if TARGET_ABI_BITS == 32
3695 abi_ulong __unused3;
3696 #endif
3697 abi_ulong __msg_cbytes;
3698 abi_ulong msg_qnum;
3699 abi_ulong msg_qbytes;
3700 abi_ulong msg_lspid;
3701 abi_ulong msg_lrpid;
3702 abi_ulong __unused4;
3703 abi_ulong __unused5;
3704 };
3705
3706 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3707 abi_ulong target_addr)
3708 {
3709 struct target_msqid_ds *target_md;
3710
3711 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3712 return -TARGET_EFAULT;
3713 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3714 return -TARGET_EFAULT;
3715 host_md->msg_stime = tswapal(target_md->msg_stime);
3716 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3717 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3718 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3719 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3720 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3721 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3722 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3723 unlock_user_struct(target_md, target_addr, 0);
3724 return 0;
3725 }
3726
3727 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3728 struct msqid_ds *host_md)
3729 {
3730 struct target_msqid_ds *target_md;
3731
3732 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3733 return -TARGET_EFAULT;
3734 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3735 return -TARGET_EFAULT;
3736 target_md->msg_stime = tswapal(host_md->msg_stime);
3737 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3738 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3739 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3740 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3741 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3742 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3743 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3744 unlock_user_struct(target_md, target_addr, 1);
3745 return 0;
3746 }
3747
3748 struct target_msginfo {
3749 int msgpool;
3750 int msgmap;
3751 int msgmax;
3752 int msgmnb;
3753 int msgmni;
3754 int msgssz;
3755 int msgtql;
3756 unsigned short int msgseg;
3757 };
3758
3759 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3760 struct msginfo *host_msginfo)
3761 {
3762 struct target_msginfo *target_msginfo;
3763 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3764 return -TARGET_EFAULT;
3765 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3766 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3767 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3768 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3769 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3770 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3771 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3772 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3773 unlock_user_struct(target_msginfo, target_addr, 1);
3774 return 0;
3775 }
3776
3777 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3778 {
3779 struct msqid_ds dsarg;
3780 struct msginfo msginfo;
3781 abi_long ret = -TARGET_EINVAL;
3782
3783 cmd &= 0xff;
3784
3785 switch (cmd) {
3786 case IPC_STAT:
3787 case IPC_SET:
3788 case MSG_STAT:
3789 if (target_to_host_msqid_ds(&dsarg,ptr))
3790 return -TARGET_EFAULT;
3791 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3792 if (host_to_target_msqid_ds(ptr,&dsarg))
3793 return -TARGET_EFAULT;
3794 break;
3795 case IPC_RMID:
3796 ret = get_errno(msgctl(msgid, cmd, NULL));
3797 break;
3798 case IPC_INFO:
3799 case MSG_INFO:
3800 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3801 if (host_to_target_msginfo(ptr, &msginfo))
3802 return -TARGET_EFAULT;
3803 break;
3804 }
3805
3806 return ret;
3807 }
3808
3809 struct target_msgbuf {
3810 abi_long mtype;
3811 char mtext[1];
3812 };
3813
3814 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3815 ssize_t msgsz, int msgflg)
3816 {
3817 struct target_msgbuf *target_mb;
3818 struct msgbuf *host_mb;
3819 abi_long ret = 0;
3820
3821 if (msgsz < 0) {
3822 return -TARGET_EINVAL;
3823 }
3824
3825 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3826 return -TARGET_EFAULT;
3827 host_mb = g_try_malloc(msgsz + sizeof(long));
3828 if (!host_mb) {
3829 unlock_user_struct(target_mb, msgp, 0);
3830 return -TARGET_ENOMEM;
3831 }
3832 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3833 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3834 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3835 g_free(host_mb);
3836 unlock_user_struct(target_mb, msgp, 0);
3837
3838 return ret;
3839 }
3840
3841 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3842 ssize_t msgsz, abi_long msgtyp,
3843 int msgflg)
3844 {
3845 struct target_msgbuf *target_mb;
3846 char *target_mtext;
3847 struct msgbuf *host_mb;
3848 abi_long ret = 0;
3849
3850 if (msgsz < 0) {
3851 return -TARGET_EINVAL;
3852 }
3853
3854 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3855 return -TARGET_EFAULT;
3856
3857 host_mb = g_try_malloc(msgsz + sizeof(long));
3858 if (!host_mb) {
3859 ret = -TARGET_ENOMEM;
3860 goto end;
3861 }
3862 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3863
3864 if (ret > 0) {
3865 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3866 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3867 if (!target_mtext) {
3868 ret = -TARGET_EFAULT;
3869 goto end;
3870 }
3871 memcpy(target_mb->mtext, host_mb->mtext, ret);
3872 unlock_user(target_mtext, target_mtext_addr, ret);
3873 }
3874
3875 target_mb->mtype = tswapal(host_mb->mtype);
3876
3877 end:
3878 if (target_mb)
3879 unlock_user_struct(target_mb, msgp, 1);
3880 g_free(host_mb);
3881 return ret;
3882 }
3883
3884 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3885 abi_ulong target_addr)
3886 {
3887 struct target_shmid_ds *target_sd;
3888
3889 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3890 return -TARGET_EFAULT;
3891 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3892 return -TARGET_EFAULT;
3893 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3894 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3895 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3896 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3897 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3898 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3899 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3900 unlock_user_struct(target_sd, target_addr, 0);
3901 return 0;
3902 }
3903
3904 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3905 struct shmid_ds *host_sd)
3906 {
3907 struct target_shmid_ds *target_sd;
3908
3909 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3910 return -TARGET_EFAULT;
3911 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3912 return -TARGET_EFAULT;
3913 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3914 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3915 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3916 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3917 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3918 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3919 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3920 unlock_user_struct(target_sd, target_addr, 1);
3921 return 0;
3922 }
3923
3924 struct target_shminfo {
3925 abi_ulong shmmax;
3926 abi_ulong shmmin;
3927 abi_ulong shmmni;
3928 abi_ulong shmseg;
3929 abi_ulong shmall;
3930 };
3931
3932 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3933 struct shminfo *host_shminfo)
3934 {
3935 struct target_shminfo *target_shminfo;
3936 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3937 return -TARGET_EFAULT;
3938 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3939 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3940 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3941 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3942 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3943 unlock_user_struct(target_shminfo, target_addr, 1);
3944 return 0;
3945 }
3946
3947 struct target_shm_info {
3948 int used_ids;
3949 abi_ulong shm_tot;
3950 abi_ulong shm_rss;
3951 abi_ulong shm_swp;
3952 abi_ulong swap_attempts;
3953 abi_ulong swap_successes;
3954 };
3955
3956 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3957 struct shm_info *host_shm_info)
3958 {
3959 struct target_shm_info *target_shm_info;
3960 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3961 return -TARGET_EFAULT;
3962 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3963 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3964 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3965 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3966 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3967 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3968 unlock_user_struct(target_shm_info, target_addr, 1);
3969 return 0;
3970 }
3971
3972 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3973 {
3974 struct shmid_ds dsarg;
3975 struct shminfo shminfo;
3976 struct shm_info shm_info;
3977 abi_long ret = -TARGET_EINVAL;
3978
3979 cmd &= 0xff;
3980
3981 switch(cmd) {
3982 case IPC_STAT:
3983 case IPC_SET:
3984 case SHM_STAT:
3985 if (target_to_host_shmid_ds(&dsarg, buf))
3986 return -TARGET_EFAULT;
3987 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3988 if (host_to_target_shmid_ds(buf, &dsarg))
3989 return -TARGET_EFAULT;
3990 break;
3991 case IPC_INFO:
3992 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3993 if (host_to_target_shminfo(buf, &shminfo))
3994 return -TARGET_EFAULT;
3995 break;
3996 case SHM_INFO:
3997 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3998 if (host_to_target_shm_info(buf, &shm_info))
3999 return -TARGET_EFAULT;
4000 break;
4001 case IPC_RMID:
4002 case SHM_LOCK:
4003 case SHM_UNLOCK:
4004 ret = get_errno(shmctl(shmid, cmd, NULL));
4005 break;
4006 }
4007
4008 return ret;
4009 }
4010
4011 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
4012 {
4013 abi_long raddr;
4014 void *host_raddr;
4015 struct shmid_ds shm_info;
4016 int i,ret;
4017
4018 /* find out the length of the shared memory segment */
4019 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4020 if (is_error(ret)) {
4021 /* can't get length, bail out */
4022 return ret;
4023 }
4024
4025 mmap_lock();
4026
4027 if (shmaddr)
4028 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4029 else {
4030 abi_ulong mmap_start;
4031
4032 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4033
4034 if (mmap_start == -1) {
4035 errno = ENOMEM;
4036 host_raddr = (void *)-1;
4037 } else
4038 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4039 }
4040
4041 if (host_raddr == (void *)-1) {
4042 mmap_unlock();
4043 return get_errno((long)host_raddr);
4044 }
4045 raddr=h2g((unsigned long)host_raddr);
4046
4047 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4048 PAGE_VALID | PAGE_READ |
4049 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4050
4051 for (i = 0; i < N_SHM_REGIONS; i++) {
4052 if (!shm_regions[i].in_use) {
4053 shm_regions[i].in_use = true;
4054 shm_regions[i].start = raddr;
4055 shm_regions[i].size = shm_info.shm_segsz;
4056 break;
4057 }
4058 }
4059
4060 mmap_unlock();
4061 return raddr;
4062
4063 }
4064
4065 static inline abi_long do_shmdt(abi_ulong shmaddr)
4066 {
4067 int i;
4068
4069 for (i = 0; i < N_SHM_REGIONS; ++i) {
4070 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4071 shm_regions[i].in_use = false;
4072 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4073 break;
4074 }
4075 }
4076
4077 return get_errno(shmdt(g2h(shmaddr)));
4078 }
4079
4080 #ifdef TARGET_NR_ipc
4081 /* ??? This only works with linear mappings. */
4082 /* do_ipc() must return target values and target errnos. */
4083 static abi_long do_ipc(unsigned int call, abi_long first,
4084 abi_long second, abi_long third,
4085 abi_long ptr, abi_long fifth)
4086 {
4087 int version;
4088 abi_long ret = 0;
4089
4090 version = call >> 16;
4091 call &= 0xffff;
4092
4093 switch (call) {
4094 case IPCOP_semop:
4095 ret = do_semop(first, ptr, second);
4096 break;
4097
4098 case IPCOP_semget:
4099 ret = get_errno(semget(first, second, third));
4100 break;
4101
4102 case IPCOP_semctl: {
4103 /* The semun argument to semctl is passed by value, so dereference the
4104 * ptr argument. */
4105 abi_ulong atptr;
4106 get_user_ual(atptr, ptr);
4107 ret = do_semctl(first, second, third, atptr);
4108 break;
4109 }
4110
4111 case IPCOP_msgget:
4112 ret = get_errno(msgget(first, second));
4113 break;
4114
4115 case IPCOP_msgsnd:
4116 ret = do_msgsnd(first, ptr, second, third);
4117 break;
4118
4119 case IPCOP_msgctl:
4120 ret = do_msgctl(first, second, ptr);
4121 break;
4122
4123 case IPCOP_msgrcv:
4124 switch (version) {
4125 case 0:
4126 {
4127 struct target_ipc_kludge {
4128 abi_long msgp;
4129 abi_long msgtyp;
4130 } *tmp;
4131
4132 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4133 ret = -TARGET_EFAULT;
4134 break;
4135 }
4136
4137 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4138
4139 unlock_user_struct(tmp, ptr, 0);
4140 break;
4141 }
4142 default:
4143 ret = do_msgrcv(first, ptr, second, fifth, third);
4144 }
4145 break;
4146
4147 case IPCOP_shmat:
4148 switch (version) {
4149 default:
4150 {
4151 abi_ulong raddr;
4152 raddr = do_shmat(first, ptr, second);
4153 if (is_error(raddr))
4154 return get_errno(raddr);
4155 if (put_user_ual(raddr, third))
4156 return -TARGET_EFAULT;
4157 break;
4158 }
4159 case 1:
4160 ret = -TARGET_EINVAL;
4161 break;
4162 }
4163 break;
4164 case IPCOP_shmdt:
4165 ret = do_shmdt(ptr);
4166 break;
4167
4168 case IPCOP_shmget:
4169 /* IPC_* flag values are the same on all linux platforms */
4170 ret = get_errno(shmget(first, second, third));
4171 break;
4172
4173 /* IPC_* and SHM_* command values are the same on all linux platforms */
4174 case IPCOP_shmctl:
4175 ret = do_shmctl(first, second, ptr);
4176 break;
4177 default:
4178 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4179 ret = -TARGET_ENOSYS;
4180 break;
4181 }
4182 return ret;
4183 }
4184 #endif
4185
4186 /* kernel structure types definitions */
4187
4188 #define STRUCT(name, ...) STRUCT_ ## name,
4189 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4190 enum {
4191 #include "syscall_types.h"
4192 STRUCT_MAX
4193 };
4194 #undef STRUCT
4195 #undef STRUCT_SPECIAL
4196
4197 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4198 #define STRUCT_SPECIAL(name)
4199 #include "syscall_types.h"
4200 #undef STRUCT
4201 #undef STRUCT_SPECIAL
4202
4203 typedef struct IOCTLEntry IOCTLEntry;
4204
4205 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4206 int fd, int cmd, abi_long arg);
4207
4208 struct IOCTLEntry {
4209 int target_cmd;
4210 unsigned int host_cmd;
4211 const char *name;
4212 int access;
4213 do_ioctl_fn *do_ioctl;
4214 const argtype arg_type[5];
4215 };
4216
4217 #define IOC_R 0x0001
4218 #define IOC_W 0x0002
4219 #define IOC_RW (IOC_R | IOC_W)
4220
4221 #define MAX_STRUCT_SIZE 4096
4222
4223 #ifdef CONFIG_FIEMAP
4224 /* So fiemap access checks don't overflow on 32 bit systems.
4225 * This is very slightly smaller than the limit imposed by
4226 * the underlying kernel.
4227 */
4228 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4229 / sizeof(struct fiemap_extent))
4230
4231 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4232 int fd, int cmd, abi_long arg)
4233 {
4234 /* The parameter for this ioctl is a struct fiemap followed
4235 * by an array of struct fiemap_extent whose size is set
4236 * in fiemap->fm_extent_count. The array is filled in by the
4237 * ioctl.
4238 */
4239 int target_size_in, target_size_out;
4240 struct fiemap *fm;
4241 const argtype *arg_type = ie->arg_type;
4242 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4243 void *argptr, *p;
4244 abi_long ret;
4245 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4246 uint32_t outbufsz;
4247 int free_fm = 0;
4248
4249 assert(arg_type[0] == TYPE_PTR);
4250 assert(ie->access == IOC_RW);
4251 arg_type++;
4252 target_size_in = thunk_type_size(arg_type, 0);
4253 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4254 if (!argptr) {
4255 return -TARGET_EFAULT;
4256 }
4257 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4258 unlock_user(argptr, arg, 0);
4259 fm = (struct fiemap *)buf_temp;
4260 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4261 return -TARGET_EINVAL;
4262 }
4263
4264 outbufsz = sizeof (*fm) +
4265 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4266
4267 if (outbufsz > MAX_STRUCT_SIZE) {
4268 /* We can't fit all the extents into the fixed size buffer.
4269 * Allocate one that is large enough and use it instead.
4270 */
4271 fm = g_try_malloc(outbufsz);
4272 if (!fm) {
4273 return -TARGET_ENOMEM;
4274 }
4275 memcpy(fm, buf_temp, sizeof(struct fiemap));
4276 free_fm = 1;
4277 }
4278 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
4279 if (!is_error(ret)) {
4280 target_size_out = target_size_in;
4281 /* An extent_count of 0 means we were only counting the extents
4282 * so there are no structs to copy
4283 */
4284 if (fm->fm_extent_count != 0) {
4285 target_size_out += fm->fm_mapped_extents * extent_size;
4286 }
4287 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4288 if (!argptr) {
4289 ret = -TARGET_EFAULT;
4290 } else {
4291 /* Convert the struct fiemap */
4292 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4293 if (fm->fm_extent_count != 0) {
4294 p = argptr + target_size_in;
4295 /* ...and then all the struct fiemap_extents */
4296 for (i = 0; i < fm->fm_mapped_extents; i++) {
4297 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4298 THUNK_TARGET);
4299 p += extent_size;
4300 }
4301 }
4302 unlock_user(argptr, arg, target_size_out);
4303 }
4304 }
4305 if (free_fm) {
4306 g_free(fm);
4307 }
4308 return ret;
4309 }
4310 #endif
4311
4312 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4313 int fd, int cmd, abi_long arg)
4314 {
4315 const argtype *arg_type = ie->arg_type;
4316 int target_size;
4317 void *argptr;
4318 int ret;
4319 struct ifconf *host_ifconf;
4320 uint32_t outbufsz;
4321 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4322 int target_ifreq_size;
4323 int nb_ifreq;
4324 int free_buf = 0;
4325 int i;
4326 int target_ifc_len;
4327 abi_long target_ifc_buf;
4328 int host_ifc_len;
4329 char *host_ifc_buf;
4330
4331 assert(arg_type[0] == TYPE_PTR);
4332 assert(ie->access == IOC_RW);
4333
4334 arg_type++;
4335 target_size = thunk_type_size(arg_type, 0);
4336
4337 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4338 if (!argptr)
4339 return -TARGET_EFAULT;
4340 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4341 unlock_user(argptr, arg, 0);
4342
4343 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4344 target_ifc_len = host_ifconf->ifc_len;
4345 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4346
4347 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4348 nb_ifreq = target_ifc_len / target_ifreq_size;
4349 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4350
4351 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4352 if (outbufsz > MAX_STRUCT_SIZE) {
4353 /* We can't fit all the extents into the fixed size buffer.
4354 * Allocate one that is large enough and use it instead.
4355 */
4356 host_ifconf = malloc(outbufsz);
4357 if (!host_ifconf) {
4358 return -TARGET_ENOMEM;
4359 }
4360 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4361 free_buf = 1;
4362 }
4363 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4364
4365 host_ifconf->ifc_len = host_ifc_len;
4366 host_ifconf->ifc_buf = host_ifc_buf;
4367
4368 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
4369 if (!is_error(ret)) {
4370 /* convert host ifc_len to target ifc_len */
4371
4372 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4373 target_ifc_len = nb_ifreq * target_ifreq_size;
4374 host_ifconf->ifc_len = target_ifc_len;
4375
4376 /* restore target ifc_buf */
4377
4378 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4379
4380 /* copy struct ifconf to target user */
4381
4382 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4383 if (!argptr)
4384 return -TARGET_EFAULT;
4385 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4386 unlock_user(argptr, arg, target_size);
4387
4388 /* copy ifreq[] to target user */
4389
4390 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4391 for (i = 0; i < nb_ifreq ; i++) {
4392 thunk_convert(argptr + i * target_ifreq_size,
4393 host_ifc_buf + i * sizeof(struct ifreq),
4394 ifreq_arg_type, THUNK_TARGET);
4395 }
4396 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4397 }
4398
4399 if (free_buf) {
4400 free(host_ifconf);
4401 }
4402
4403 return ret;
4404 }
4405
4406 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4407 int cmd, abi_long arg)
4408 {
4409 void *argptr;
4410 struct dm_ioctl *host_dm;
4411 abi_long guest_data;
4412 uint32_t guest_data_size;
4413 int target_size;
4414 const argtype *arg_type = ie->arg_type;
4415 abi_long ret;
4416 void *big_buf = NULL;
4417 char *host_data;
4418
4419 arg_type++;
4420 target_size = thunk_type_size(arg_type, 0);
4421 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4422 if (!argptr) {
4423 ret = -TARGET_EFAULT;
4424 goto out;
4425 }
4426 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4427 unlock_user(argptr, arg, 0);
4428
4429 /* buf_temp is too small, so fetch things into a bigger buffer */
4430 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4431 memcpy(big_buf, buf_temp, target_size);
4432 buf_temp = big_buf;
4433 host_dm = big_buf;
4434
4435 guest_data = arg + host_dm->data_start;
4436 if ((guest_data - arg) < 0) {
4437 ret = -EINVAL;
4438 goto out;
4439 }
4440 guest_data_size = host_dm->data_size - host_dm->data_start;
4441 host_data = (char*)host_dm + host_dm->data_start;
4442
4443 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4444 switch (ie->host_cmd) {
4445 case DM_REMOVE_ALL:
4446 case DM_LIST_DEVICES:
4447 case DM_DEV_CREATE:
4448 case DM_DEV_REMOVE:
4449 case DM_DEV_SUSPEND:
4450 case DM_DEV_STATUS:
4451 case DM_DEV_WAIT:
4452 case DM_TABLE_STATUS:
4453 case DM_TABLE_CLEAR:
4454 case DM_TABLE_DEPS:
4455 case DM_LIST_VERSIONS:
4456 /* no input data */
4457 break;
4458 case DM_DEV_RENAME:
4459 case DM_DEV_SET_GEOMETRY:
4460 /* data contains only strings */
4461 memcpy(host_data, argptr, guest_data_size);
4462 break;
4463 case DM_TARGET_MSG:
4464 memcpy(host_data, argptr, guest_data_size);
4465 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4466 break;
4467 case DM_TABLE_LOAD:
4468 {
4469 void *gspec = argptr;
4470 void *cur_data = host_data;
4471 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4472 int spec_size = thunk_type_size(arg_type, 0);
4473 int i;
4474
4475 for (i = 0; i < host_dm->target_count; i++) {
4476 struct dm_target_spec *spec = cur_data;
4477 uint32_t next;
4478 int slen;
4479
4480 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4481 slen = strlen((char*)gspec + spec_size) + 1;
4482 next = spec->next;
4483 spec->next = sizeof(*spec) + slen;
4484 strcpy((char*)&spec[1], gspec + spec_size);
4485 gspec += next;
4486 cur_data += spec->next;
4487 }
4488 break;
4489 }
4490 default:
4491 ret = -TARGET_EINVAL;
4492 unlock_user(argptr, guest_data, 0);
4493 goto out;
4494 }
4495 unlock_user(argptr, guest_data, 0);
4496
4497 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4498 if (!is_error(ret)) {
4499 guest_data = arg + host_dm->data_start;
4500 guest_data_size = host_dm->data_size - host_dm->data_start;
4501 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4502 switch (ie->host_cmd) {
4503 case DM_REMOVE_ALL:
4504 case DM_DEV_CREATE:
4505 case DM_DEV_REMOVE:
4506 case DM_DEV_RENAME:
4507 case DM_DEV_SUSPEND:
4508 case DM_DEV_STATUS:
4509 case DM_TABLE_LOAD:
4510 case DM_TABLE_CLEAR:
4511 case DM_TARGET_MSG:
4512 case DM_DEV_SET_GEOMETRY:
4513 /* no return data */
4514 break;
4515 case DM_LIST_DEVICES:
4516 {
4517 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4518 uint32_t remaining_data = guest_data_size;
4519 void *cur_data = argptr;
4520 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4521 int nl_size = 12; /* can't use thunk_size due to alignment */
4522
4523 while (1) {
4524 uint32_t next = nl->next;
4525 if (next) {
4526 nl->next = nl_size + (strlen(nl->name) + 1);
4527 }
4528 if (remaining_data < nl->next) {
4529 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4530 break;
4531 }
4532 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4533 strcpy(cur_data + nl_size, nl->name);
4534 cur_data += nl->next;
4535 remaining_data -= nl->next;
4536 if (!next) {
4537 break;
4538 }
4539 nl = (void*)nl + next;
4540 }
4541 break;
4542 }
4543 case DM_DEV_WAIT:
4544 case DM_TABLE_STATUS:
4545 {
4546 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4547 void *cur_data = argptr;
4548 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4549 int spec_size = thunk_type_size(arg_type, 0);
4550 int i;
4551
4552 for (i = 0; i < host_dm->target_count; i++) {
4553 uint32_t next = spec->next;
4554 int slen = strlen((char*)&spec[1]) + 1;
4555 spec->next = (cur_data - argptr) + spec_size + slen;
4556 if (guest_data_size < spec->next) {
4557 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4558 break;
4559 }
4560 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4561 strcpy(cur_data + spec_size, (char*)&spec[1]);
4562 cur_data = argptr + spec->next;
4563 spec = (void*)host_dm + host_dm->data_start + next;
4564 }
4565 break;
4566 }
4567 case DM_TABLE_DEPS:
4568 {
4569 void *hdata = (void*)host_dm + host_dm->data_start;
4570 int count = *(uint32_t*)hdata;
4571 uint64_t *hdev = hdata + 8;
4572 uint64_t *gdev = argptr + 8;
4573 int i;
4574
4575 *(uint32_t*)argptr = tswap32(count);
4576 for (i = 0; i < count; i++) {
4577 *gdev = tswap64(*hdev);
4578 gdev++;
4579 hdev++;
4580 }
4581 break;
4582 }
4583 case DM_LIST_VERSIONS:
4584 {
4585 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4586 uint32_t remaining_data = guest_data_size;
4587 void *cur_data = argptr;
4588 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4589 int vers_size = thunk_type_size(arg_type, 0);
4590
4591 while (1) {
4592 uint32_t next = vers->next;
4593 if (next) {
4594 vers->next = vers_size + (strlen(vers->name) + 1);
4595 }
4596 if (remaining_data < vers->next) {
4597 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4598 break;
4599 }
4600 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4601 strcpy(cur_data + vers_size, vers->name);
4602 cur_data += vers->next;
4603 remaining_data -= vers->next;
4604 if (!next) {
4605 break;
4606 }
4607 vers = (void*)vers + next;
4608 }
4609 break;
4610 }
4611 default:
4612 unlock_user(argptr, guest_data, 0);
4613 ret = -TARGET_EINVAL;
4614 goto out;
4615 }
4616 unlock_user(argptr, guest_data, guest_data_size);
4617
4618 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4619 if (!argptr) {
4620 ret = -TARGET_EFAULT;
4621 goto out;
4622 }
4623 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4624 unlock_user(argptr, arg, target_size);
4625 }
4626 out:
4627 g_free(big_buf);
4628 return ret;
4629 }
4630
4631 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4632 int cmd, abi_long arg)
4633 {
4634 void *argptr;
4635 int target_size;
4636 const argtype *arg_type = ie->arg_type;
4637 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4638 abi_long ret;
4639
4640 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4641 struct blkpg_partition host_part;
4642
4643 /* Read and convert blkpg */
4644 arg_type++;
4645 target_size = thunk_type_size(arg_type, 0);
4646 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4647 if (!argptr) {
4648 ret = -TARGET_EFAULT;
4649 goto out;
4650 }
4651 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4652 unlock_user(argptr, arg, 0);
4653
4654 switch (host_blkpg->op) {
4655 case BLKPG_ADD_PARTITION:
4656 case BLKPG_DEL_PARTITION:
4657 /* payload is struct blkpg_partition */
4658 break;
4659 default:
4660 /* Unknown opcode */
4661 ret = -TARGET_EINVAL;
4662 goto out;
4663 }
4664
4665 /* Read and convert blkpg->data */
4666 arg = (abi_long)(uintptr_t)host_blkpg->data;
4667 target_size = thunk_type_size(part_arg_type, 0);
4668 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4669 if (!argptr) {
4670 ret = -TARGET_EFAULT;
4671 goto out;
4672 }
4673 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4674 unlock_user(argptr, arg, 0);
4675
4676 /* Swizzle the data pointer to our local copy and call! */
4677 host_blkpg->data = &host_part;
4678 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
4679
4680 out:
4681 return ret;
4682 }
4683
4684 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4685 int fd, int cmd, abi_long arg)
4686 {
4687 const argtype *arg_type = ie->arg_type;
4688 const StructEntry *se;
4689 const argtype *field_types;
4690 const int *dst_offsets, *src_offsets;
4691 int target_size;
4692 void *argptr;
4693 abi_ulong *target_rt_dev_ptr;
4694 unsigned long *host_rt_dev_ptr;
4695 abi_long ret;
4696 int i;
4697
4698 assert(ie->access == IOC_W);
4699 assert(*arg_type == TYPE_PTR);
4700 arg_type++;
4701 assert(*arg_type == TYPE_STRUCT);
4702 target_size = thunk_type_size(arg_type, 0);
4703 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4704 if (!argptr) {
4705 return -TARGET_EFAULT;
4706 }
4707 arg_type++;
4708 assert(*arg_type == (int)STRUCT_rtentry);
4709 se = struct_entries + *arg_type++;
4710 assert(se->convert[0] == NULL);
4711 /* convert struct here to be able to catch rt_dev string */
4712 field_types = se->field_types;
4713 dst_offsets = se->field_offsets[THUNK_HOST];
4714 src_offsets = se->field_offsets[THUNK_TARGET];
4715 for (i = 0; i < se->nb_fields; i++) {
4716 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4717 assert(*field_types == TYPE_PTRVOID);
4718 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4719 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4720 if (*target_rt_dev_ptr != 0) {
4721 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4722 tswapal(*target_rt_dev_ptr));
4723 if (!*host_rt_dev_ptr) {
4724 unlock_user(argptr, arg, 0);
4725 return -TARGET_EFAULT;
4726 }
4727 } else {
4728 *host_rt_dev_ptr = 0;
4729 }
4730 field_types++;
4731 continue;
4732 }
4733 field_types = thunk_convert(buf_temp + dst_offsets[i],
4734 argptr + src_offsets[i],
4735 field_types, THUNK_HOST);
4736 }
4737 unlock_user(argptr, arg, 0);
4738
4739 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4740 if (*host_rt_dev_ptr != 0) {
4741 unlock_user((void *)*host_rt_dev_ptr,
4742 *target_rt_dev_ptr, 0);
4743 }
4744 return ret;
4745 }
4746
4747 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4748 int fd, int cmd, abi_long arg)
4749 {
4750 int sig = target_to_host_signal(arg);
4751 return get_errno(ioctl(fd, ie->host_cmd, sig));
4752 }
4753
4754 static IOCTLEntry ioctl_entries[] = {
4755 #define IOCTL(cmd, access, ...) \
4756 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4757 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4758 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4759 #include "ioctls.h"
4760 { 0, 0, },
4761 };
4762
4763 /* ??? Implement proper locking for ioctls. */
4764 /* do_ioctl() Must return target values and target errnos. */
4765 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4766 {
4767 const IOCTLEntry *ie;
4768 const argtype *arg_type;
4769 abi_long ret;
4770 uint8_t buf_temp[MAX_STRUCT_SIZE];
4771 int target_size;
4772 void *argptr;
4773
4774 ie = ioctl_entries;
4775 for(;;) {
4776 if (ie->target_cmd == 0) {
4777 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4778 return -TARGET_ENOSYS;
4779 }
4780 if (ie->target_cmd == cmd)
4781 break;
4782 ie++;
4783 }
4784 arg_type = ie->arg_type;
4785 #if defined(DEBUG)
4786 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
4787 #endif
4788 if (ie->do_ioctl) {
4789 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4790 }
4791
4792 switch(arg_type[0]) {
4793 case TYPE_NULL:
4794 /* no argument */
4795 ret = get_errno(ioctl(fd, ie->host_cmd));
4796 break;
4797 case TYPE_PTRVOID:
4798 case TYPE_INT:
4799 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
4800 break;
4801 case TYPE_PTR:
4802 arg_type++;
4803 target_size = thunk_type_size(arg_type, 0);
4804 switch(ie->access) {
4805 case IOC_R:
4806 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4807 if (!is_error(ret)) {
4808 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4809 if (!argptr)
4810 return -TARGET_EFAULT;
4811 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4812 unlock_user(argptr, arg, target_size);
4813 }
4814 break;
4815 case IOC_W:
4816 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4817 if (!argptr)
4818 return -TARGET_EFAULT;
4819 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4820 unlock_user(argptr, arg, 0);
4821 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4822 break;
4823 default:
4824 case IOC_RW:
4825 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4826 if (!argptr)
4827 return -TARGET_EFAULT;
4828 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4829 unlock_user(argptr, arg, 0);
4830 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4831 if (!is_error(ret)) {
4832 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4833 if (!argptr)
4834 return -TARGET_EFAULT;
4835 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4836 unlock_user(argptr, arg, target_size);
4837 }
4838 break;
4839 }
4840 break;
4841 default:
4842 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4843 (long)cmd, arg_type[0]);
4844 ret = -TARGET_ENOSYS;
4845 break;
4846 }
4847 return ret;
4848 }
4849
4850 static const bitmask_transtbl iflag_tbl[] = {
4851 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4852 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4853 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4854 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4855 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4856 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4857 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4858 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4859 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4860 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4861 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4862 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4863 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4864 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4865 { 0, 0, 0, 0 }
4866 };
4867
4868 static const bitmask_transtbl oflag_tbl[] = {
4869 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4870 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4871 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4872 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4873 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4874 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4875 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4876 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4877 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4878 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4879 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4880 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4881 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4882 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4883 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4884 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4885 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4886 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4887 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4888 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4889 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4890 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4891 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4892 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4893 { 0, 0, 0, 0 }
4894 };
4895
4896 static const bitmask_transtbl cflag_tbl[] = {
4897 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4898 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4899 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4900 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4901 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4902 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4903 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4904 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4905 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4906 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4907 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4908 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4909 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4910 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4911 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4912 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4913 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4914 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4915 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4916 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4917 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4918 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4919 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4920 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4921 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4922 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4923 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4924 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4925 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4926 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4927 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4928 { 0, 0, 0, 0 }
4929 };
4930
4931 static const bitmask_transtbl lflag_tbl[] = {
4932 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4933 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4934 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4935 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4936 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4937 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4938 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4939 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4940 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4941 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4942 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4943 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4944 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4945 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4946 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4947 { 0, 0, 0, 0 }
4948 };
4949
4950 static void target_to_host_termios (void *dst, const void *src)
4951 {
4952 struct host_termios *host = dst;
4953 const struct target_termios *target = src;
4954
4955 host->c_iflag =
4956 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4957 host->c_oflag =
4958 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4959 host->c_cflag =
4960 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4961 host->c_lflag =
4962 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4963 host->c_line = target->c_line;
4964
4965 memset(host->c_cc, 0, sizeof(host->c_cc));
4966 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4967 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4968 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4969 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4970 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4971 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4972 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4973 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4974 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4975 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4976 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4977 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4978 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4979 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4980 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4981 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4982 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4983 }
4984
4985 static void host_to_target_termios (void *dst, const void *src)
4986 {
4987 struct target_termios *target = dst;
4988 const struct host_termios *host = src;
4989
4990 target->c_iflag =
4991 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4992 target->c_oflag =
4993 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4994 target->c_cflag =
4995 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4996 target->c_lflag =
4997 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4998 target->c_line = host->c_line;
4999
5000 memset(target->c_cc, 0, sizeof(target->c_cc));
5001 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5002 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5003 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5004 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5005 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5006 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5007 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5008 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5009 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5010 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5011 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5012 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5013 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5014 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5015 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5016 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5017 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5018 }
5019
5020 static const StructEntry struct_termios_def = {
5021 .convert = { host_to_target_termios, target_to_host_termios },
5022 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5023 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5024 };
5025
5026 static bitmask_transtbl mmap_flags_tbl[] = {
5027 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5028 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5029 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5030 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5031 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5032 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5033 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5034 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5035 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5036 MAP_NORESERVE },
5037 { 0, 0, 0, 0 }
5038 };
5039
5040 #if defined(TARGET_I386)
5041
5042 /* NOTE: there is really one LDT for all the threads */
5043 static uint8_t *ldt_table;
5044
5045 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5046 {
5047 int size;
5048 void *p;
5049
5050 if (!ldt_table)
5051 return 0;
5052 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5053 if (size > bytecount)
5054 size = bytecount;
5055 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5056 if (!p)
5057 return -TARGET_EFAULT;
5058 /* ??? Should this by byteswapped? */
5059 memcpy(p, ldt_table, size);
5060 unlock_user(p, ptr, size);
5061 return size;
5062 }
5063
5064 /* XXX: add locking support */
5065 static abi_long write_ldt(CPUX86State *env,
5066 abi_ulong ptr, unsigned long bytecount, int oldmode)
5067 {
5068 struct target_modify_ldt_ldt_s ldt_info;
5069 struct target_modify_ldt_ldt_s *target_ldt_info;
5070 int seg_32bit, contents, read_exec_only, limit_in_pages;
5071 int seg_not_present, useable, lm;
5072 uint32_t *lp, entry_1, entry_2;
5073
5074 if (bytecount != sizeof(ldt_info))
5075 return -TARGET_EINVAL;
5076 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5077 return -TARGET_EFAULT;
5078 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5079 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5080 ldt_info.limit = tswap32(target_ldt_info->limit);
5081 ldt_info.flags = tswap32(target_ldt_info->flags);
5082 unlock_user_struct(target_ldt_info, ptr, 0);
5083
5084 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5085 return -TARGET_EINVAL;
5086 seg_32bit = ldt_info.flags & 1;
5087 contents = (ldt_info.flags >> 1) & 3;
5088 read_exec_only = (ldt_info.flags >> 3) & 1;
5089 limit_in_pages = (ldt_info.flags >> 4) & 1;
5090 seg_not_present = (ldt_info.flags >> 5) & 1;
5091 useable = (ldt_info.flags >> 6) & 1;
5092 #ifdef TARGET_ABI32
5093 lm = 0;
5094 #else
5095 lm = (ldt_info.flags >> 7) & 1;
5096 #endif
5097 if (contents == 3) {
5098 if (oldmode)
5099 return -TARGET_EINVAL;
5100 if (seg_not_present == 0)
5101 return -TARGET_EINVAL;
5102 }
5103 /* allocate the LDT */
5104 if (!ldt_table) {
5105 env->ldt.base = target_mmap(0,
5106 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5107 PROT_READ|PROT_WRITE,
5108 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5109 if (env->ldt.base == -1)
5110 return -TARGET_ENOMEM;
5111 memset(g2h(env->ldt.base), 0,
5112 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5113 env->ldt.limit = 0xffff;
5114 ldt_table = g2h(env->ldt.base);
5115 }
5116
5117 /* NOTE: same code as Linux kernel */
5118 /* Allow LDTs to be cleared by the user. */
5119 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5120 if (oldmode ||
5121 (contents == 0 &&
5122 read_exec_only == 1 &&
5123 seg_32bit == 0 &&
5124 limit_in_pages == 0 &&
5125 seg_not_present == 1 &&
5126 useable == 0 )) {
5127 entry_1 = 0;
5128 entry_2 = 0;
5129 goto install;
5130 }
5131 }
5132
5133 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5134 (ldt_info.limit & 0x0ffff);
5135 entry_2 = (ldt_info.base_addr & 0xff000000) |
5136 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5137 (ldt_info.limit & 0xf0000) |
5138 ((read_exec_only ^ 1) << 9) |
5139 (contents << 10) |
5140 ((seg_not_present ^ 1) << 15) |
5141 (seg_32bit << 22) |
5142 (limit_in_pages << 23) |
5143 (lm << 21) |
5144 0x7000;
5145 if (!oldmode)
5146 entry_2 |= (useable << 20);
5147
5148 /* Install the new entry ... */
5149 install:
5150 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5151 lp[0] = tswap32(entry_1);
5152 lp[1] = tswap32(entry_2);
5153 return 0;
5154 }
5155
5156 /* specific and weird i386 syscalls */
5157 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5158 unsigned long bytecount)
5159 {
5160 abi_long ret;
5161
5162 switch (func) {
5163 case 0:
5164 ret = read_ldt(ptr, bytecount);
5165 break;
5166 case 1:
5167 ret = write_ldt(env, ptr, bytecount, 1);
5168 break;
5169 case 0x11:
5170 ret = write_ldt(env, ptr, bytecount, 0);
5171 break;
5172 default:
5173 ret = -TARGET_ENOSYS;
5174 break;
5175 }
5176 return ret;
5177 }
5178
5179 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5180 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5181 {
5182 uint64_t *gdt_table = g2h(env->gdt.base);
5183 struct target_modify_ldt_ldt_s ldt_info;
5184 struct target_modify_ldt_ldt_s *target_ldt_info;
5185 int seg_32bit, contents, read_exec_only, limit_in_pages;
5186 int seg_not_present, useable, lm;
5187 uint32_t *lp, entry_1, entry_2;
5188 int i;
5189
5190 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5191 if (!target_ldt_info)
5192 return -TARGET_EFAULT;
5193 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5194 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5195 ldt_info.limit = tswap32(target_ldt_info->limit);
5196 ldt_info.flags = tswap32(target_ldt_info->flags);
5197 if (ldt_info.entry_number == -1) {
5198 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5199 if (gdt_table[i] == 0) {
5200 ldt_info.entry_number = i;
5201 target_ldt_info->entry_number = tswap32(i);
5202 break;
5203 }
5204 }
5205 }
5206 unlock_user_struct(target_ldt_info, ptr, 1);
5207
5208 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5209 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5210 return -TARGET_EINVAL;
5211 seg_32bit = ldt_info.flags & 1;
5212 contents = (ldt_info.flags >> 1) & 3;
5213 read_exec_only = (ldt_info.flags >> 3) & 1;
5214 limit_in_pages = (ldt_info.flags >> 4) & 1;
5215 seg_not_present = (ldt_info.flags >> 5) & 1;
5216 useable = (ldt_info.flags >> 6) & 1;
5217 #ifdef TARGET_ABI32
5218 lm = 0;
5219 #else
5220 lm = (ldt_info.flags >> 7) & 1;
5221 #endif
5222
5223 if (contents == 3) {
5224 if (seg_not_present == 0)
5225 return -TARGET_EINVAL;
5226 }
5227
5228 /* NOTE: same code as Linux kernel */
5229 /* Allow LDTs to be cleared by the user. */
5230 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5231 if ((contents == 0 &&
5232 read_exec_only == 1 &&
5233 seg_32bit == 0 &&
5234 limit_in_pages == 0 &&
5235 seg_not_present == 1 &&
5236 useable == 0 )) {
5237 entry_1 = 0;
5238 entry_2 = 0;
5239 goto install;
5240 }
5241 }
5242
5243 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5244 (ldt_info.limit & 0x0ffff);
5245 entry_2 = (ldt_info.base_addr & 0xff000000) |
5246 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5247 (ldt_info.limit & 0xf0000) |
5248 ((read_exec_only ^ 1) << 9) |
5249 (contents << 10) |
5250 ((seg_not_present ^ 1) << 15) |
5251 (seg_32bit << 22) |
5252 (limit_in_pages << 23) |
5253 (useable << 20) |
5254 (lm << 21) |
5255 0x7000;
5256
5257 /* Install the new entry ... */
5258 install:
5259 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5260 lp[0] = tswap32(entry_1);
5261 lp[1] = tswap32(entry_2);
5262 return 0;
5263 }
5264
5265 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5266 {
5267 struct target_modify_ldt_ldt_s *target_ldt_info;
5268 uint64_t *gdt_table = g2h(env->gdt.base);
5269 uint32_t base_addr, limit, flags;
5270 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5271 int seg_not_present, useable, lm;
5272 uint32_t *lp, entry_1, entry_2;
5273
5274 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5275 if (!target_ldt_info)
5276 return -TARGET_EFAULT;
5277 idx = tswap32(target_ldt_info->entry_number);
5278 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5279 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5280 unlock_user_struct(target_ldt_info, ptr, 1);
5281 return -TARGET_EINVAL;
5282 }
5283 lp = (uint32_t *)(gdt_table + idx);
5284 entry_1 = tswap32(lp[0]);
5285 entry_2 = tswap32(lp[1]);
5286
5287 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5288 contents = (entry_2 >> 10) & 3;
5289 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5290 seg_32bit = (entry_2 >> 22) & 1;
5291 limit_in_pages = (entry_2 >> 23) & 1;
5292 useable = (entry_2 >> 20) & 1;
5293 #ifdef TARGET_ABI32
5294 lm = 0;
5295 #else
5296 lm = (entry_2 >> 21) & 1;
5297 #endif
5298 flags = (seg_32bit << 0) | (contents << 1) |
5299 (read_exec_only << 3) | (limit_in_pages << 4) |
5300 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5301 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5302 base_addr = (entry_1 >> 16) |
5303 (entry_2 & 0xff000000) |
5304 ((entry_2 & 0xff) << 16);
5305 target_ldt_info->base_addr = tswapal(base_addr);
5306 target_ldt_info->limit = tswap32(limit);
5307 target_ldt_info->flags = tswap32(flags);
5308 unlock_user_struct(target_ldt_info, ptr, 1);
5309 return 0;
5310 }
5311 #endif /* TARGET_I386 && TARGET_ABI32 */
5312
5313 #ifndef TARGET_ABI32
5314 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5315 {
5316 abi_long ret = 0;
5317 abi_ulong val;
5318 int idx;
5319
5320 switch(code) {
5321 case TARGET_ARCH_SET_GS:
5322 case TARGET_ARCH_SET_FS:
5323 if (code == TARGET_ARCH_SET_GS)
5324 idx = R_GS;
5325 else
5326 idx = R_FS;
5327 cpu_x86_load_seg(env, idx, 0);
5328 env->segs[idx].base = addr;
5329 break;
5330 case TARGET_ARCH_GET_GS:
5331 case TARGET_ARCH_GET_FS:
5332 if (code == TARGET_ARCH_GET_GS)
5333 idx = R_GS;
5334 else
5335 idx = R_FS;
5336 val = env->segs[idx].base;
5337 if (put_user(val, addr, abi_ulong))
5338 ret = -TARGET_EFAULT;
5339 break;
5340 default:
5341 ret = -TARGET_EINVAL;
5342 break;
5343 }
5344 return ret;
5345 }
5346 #endif
5347
5348 #endif /* defined(TARGET_I386) */
5349
5350 #define NEW_STACK_SIZE 0x40000
5351
5352
5353 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5354 typedef struct {
5355 CPUArchState *env;
5356 pthread_mutex_t mutex;
5357 pthread_cond_t cond;
5358 pthread_t thread;
5359 uint32_t tid;
5360 abi_ulong child_tidptr;
5361 abi_ulong parent_tidptr;
5362 sigset_t sigmask;
5363 } new_thread_info;
5364
5365 static void *clone_func(void *arg)
5366 {
5367 new_thread_info *info = arg;
5368 CPUArchState *env;
5369 CPUState *cpu;
5370 TaskState *ts;
5371
5372 rcu_register_thread();
5373 env = info->env;
5374 cpu = ENV_GET_CPU(env);
5375 thread_cpu = cpu;
5376 ts = (TaskState *)cpu->opaque;
5377 info->tid = gettid();
5378 cpu->host_tid = info->tid;
5379 task_settid(ts);
5380 if (info->child_tidptr)
5381 put_user_u32(info->tid, info->child_tidptr);
5382 if (info->parent_tidptr)
5383 put_user_u32(info->tid, info->parent_tidptr);
5384 /* Enable signals. */
5385 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5386 /* Signal to the parent that we're ready. */
5387 pthread_mutex_lock(&info->mutex);
5388 pthread_cond_broadcast(&info->cond);
5389 pthread_mutex_unlock(&info->mutex);
5390 /* Wait until the parent has finshed initializing the tls state. */
5391 pthread_mutex_lock(&clone_lock);
5392 pthread_mutex_unlock(&clone_lock);
5393 cpu_loop(env);
5394 /* never exits */
5395 return NULL;
5396 }
5397
5398 /* do_fork() Must return host values and target errnos (unlike most
5399 do_*() functions). */
5400 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5401 abi_ulong parent_tidptr, target_ulong newtls,
5402 abi_ulong child_tidptr)
5403 {
5404 CPUState *cpu = ENV_GET_CPU(env);
5405 int ret;
5406 TaskState *ts;
5407 CPUState *new_cpu;
5408 CPUArchState *new_env;
5409 unsigned int nptl_flags;
5410 sigset_t sigmask;
5411
5412 /* Emulate vfork() with fork() */
5413 if (flags & CLONE_VFORK)
5414 flags &= ~(CLONE_VFORK | CLONE_VM);
5415
5416 if (flags & CLONE_VM) {
5417 TaskState *parent_ts = (TaskState *)cpu->opaque;
5418 new_thread_info info;
5419 pthread_attr_t attr;
5420
5421 ts = g_new0(TaskState, 1);
5422 init_task_state(ts);
5423 /* we create a new CPU instance. */
5424 new_env = cpu_copy(env);
5425 /* Init regs that differ from the parent. */
5426 cpu_clone_regs(new_env, newsp);
5427 new_cpu = ENV_GET_CPU(new_env);
5428 new_cpu->opaque = ts;
5429 ts->bprm = parent_ts->bprm;
5430 ts->info = parent_ts->info;
5431 ts->signal_mask = parent_ts->signal_mask;
5432 nptl_flags = flags;
5433 flags &= ~CLONE_NPTL_FLAGS2;
5434
5435 if (nptl_flags & CLONE_CHILD_CLEARTID) {
5436 ts->child_tidptr = child_tidptr;
5437 }
5438
5439 if (nptl_flags & CLONE_SETTLS)
5440 cpu_set_tls (new_env, newtls);
5441
5442 /* Grab a mutex so that thread setup appears atomic. */
5443 pthread_mutex_lock(&clone_lock);
5444
5445 memset(&info, 0, sizeof(info));
5446 pthread_mutex_init(&info.mutex, NULL);
5447 pthread_mutex_lock(&info.mutex);
5448 pthread_cond_init(&info.cond, NULL);
5449 info.env = new_env;
5450 if (nptl_flags & CLONE_CHILD_SETTID)
5451 info.child_tidptr = child_tidptr;
5452 if (nptl_flags & CLONE_PARENT_SETTID)
5453 info.parent_tidptr = parent_tidptr;
5454
5455 ret = pthread_attr_init(&attr);
5456 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5457 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5458 /* It is not safe to deliver signals until the child has finished
5459 initializing, so temporarily block all signals. */
5460 sigfillset(&sigmask);
5461 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5462
5463 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5464 /* TODO: Free new CPU state if thread creation failed. */
5465
5466 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5467 pthread_attr_destroy(&attr);
5468 if (ret == 0) {
5469 /* Wait for the child to initialize. */
5470 pthread_cond_wait(&info.cond, &info.mutex);
5471 ret = info.tid;
5472 if (flags & CLONE_PARENT_SETTID)
5473 put_user_u32(ret, parent_tidptr);
5474 } else {
5475 ret = -1;
5476 }
5477 pthread_mutex_unlock(&info.mutex);
5478 pthread_cond_destroy(&info.cond);
5479 pthread_mutex_destroy(&info.mutex);
5480 pthread_mutex_unlock(&clone_lock);
5481 } else {
5482 /* if no CLONE_VM, we consider it is a fork */
5483 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
5484 return -TARGET_EINVAL;
5485 }
5486
5487 if (block_signals()) {
5488 return -TARGET_ERESTARTSYS;
5489 }
5490
5491 fork_start();
5492 ret = fork();
5493 if (ret == 0) {
5494 /* Child Process. */
5495 rcu_after_fork();
5496 cpu_clone_regs(env, newsp);
5497 fork_end(1);
5498 /* There is a race condition here. The parent process could
5499 theoretically read the TID in the child process before the child
5500 tid is set. This would require using either ptrace
5501 (not implemented) or having *_tidptr to point at a shared memory
5502 mapping. We can't repeat the spinlock hack used above because
5503 the child process gets its own copy of the lock. */
5504 if (flags & CLONE_CHILD_SETTID)
5505 put_user_u32(gettid(), child_tidptr);
5506 if (flags & CLONE_PARENT_SETTID)
5507 put_user_u32(gettid(), parent_tidptr);
5508 ts = (TaskState *)cpu->opaque;
5509 if (flags & CLONE_SETTLS)
5510 cpu_set_tls (env, newtls);
5511 if (flags & CLONE_CHILD_CLEARTID)
5512 ts->child_tidptr = child_tidptr;
5513 } else {
5514 fork_end(0);
5515 }
5516 }
5517 return ret;
5518 }
5519
5520 /* warning : doesn't handle linux specific flags... */
5521 static int target_to_host_fcntl_cmd(int cmd)
5522 {
5523 switch(cmd) {
5524 case TARGET_F_DUPFD:
5525 case TARGET_F_GETFD:
5526 case TARGET_F_SETFD:
5527 case TARGET_F_GETFL:
5528 case TARGET_F_SETFL:
5529 return cmd;
5530 case TARGET_F_GETLK:
5531 return F_GETLK;
5532 case TARGET_F_SETLK:
5533 return F_SETLK;
5534 case TARGET_F_SETLKW:
5535 return F_SETLKW;
5536 case TARGET_F_GETOWN:
5537 return F_GETOWN;
5538 case TARGET_F_SETOWN:
5539 return F_SETOWN;
5540 case TARGET_F_GETSIG:
5541 return F_GETSIG;
5542 case TARGET_F_SETSIG:
5543 return F_SETSIG;
5544 #if TARGET_ABI_BITS == 32
5545 case TARGET_F_GETLK64:
5546 return F_GETLK64;
5547 case TARGET_F_SETLK64:
5548 return F_SETLK64;
5549 case TARGET_F_SETLKW64:
5550 return F_SETLKW64;
5551 #endif
5552 case TARGET_F_SETLEASE:
5553 return F_SETLEASE;
5554 case TARGET_F_GETLEASE:
5555 return F_GETLEASE;
5556 #ifdef F_DUPFD_CLOEXEC
5557 case TARGET_F_DUPFD_CLOEXEC:
5558 return F_DUPFD_CLOEXEC;
5559 #endif
5560 case TARGET_F_NOTIFY:
5561 return F_NOTIFY;
5562 #ifdef F_GETOWN_EX
5563 case TARGET_F_GETOWN_EX:
5564 return F_GETOWN_EX;
5565 #endif
5566 #ifdef F_SETOWN_EX
5567 case TARGET_F_SETOWN_EX:
5568 return F_SETOWN_EX;
5569 #endif
5570 default:
5571 return -TARGET_EINVAL;
5572 }
5573 return -TARGET_EINVAL;
5574 }
5575
5576 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5577 static const bitmask_transtbl flock_tbl[] = {
5578 TRANSTBL_CONVERT(F_RDLCK),
5579 TRANSTBL_CONVERT(F_WRLCK),
5580 TRANSTBL_CONVERT(F_UNLCK),
5581 TRANSTBL_CONVERT(F_EXLCK),
5582 TRANSTBL_CONVERT(F_SHLCK),
5583 { 0, 0, 0, 0 }
5584 };
5585
5586 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5587 {
5588 struct flock fl;
5589 struct target_flock *target_fl;
5590 struct flock64 fl64;
5591 struct target_flock64 *target_fl64;
5592 #ifdef F_GETOWN_EX
5593 struct f_owner_ex fox;
5594 struct target_f_owner_ex *target_fox;
5595 #endif
5596 abi_long ret;
5597 int host_cmd = target_to_host_fcntl_cmd(cmd);
5598
5599 if (host_cmd == -TARGET_EINVAL)
5600 return host_cmd;
5601
5602 switch(cmd) {
5603 case TARGET_F_GETLK:
5604 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
5605 return -TARGET_EFAULT;
5606 fl.l_type =
5607 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
5608 fl.l_whence = tswap16(target_fl->l_whence);
5609 fl.l_start = tswapal(target_fl->l_start);
5610 fl.l_len = tswapal(target_fl->l_len);
5611 fl.l_pid = tswap32(target_fl->l_pid);
5612 unlock_user_struct(target_fl, arg, 0);
5613 ret = get_errno(fcntl(fd, host_cmd, &fl));
5614 if (ret == 0) {
5615 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
5616 return -TARGET_EFAULT;
5617 target_fl->l_type =
5618 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
5619 target_fl->l_whence = tswap16(fl.l_whence);
5620 target_fl->l_start = tswapal(fl.l_start);
5621 target_fl->l_len = tswapal(fl.l_len);
5622 target_fl->l_pid = tswap32(fl.l_pid);
5623 unlock_user_struct(target_fl, arg, 1);
5624 }
5625 break;
5626
5627 case TARGET_F_SETLK:
5628 case TARGET_F_SETLKW:
5629 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
5630 return -TARGET_EFAULT;
5631 fl.l_type =
5632 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
5633 fl.l_whence = tswap16(target_fl->l_whence);
5634 fl.l_start = tswapal(target_fl->l_start);
5635 fl.l_len = tswapal(target_fl->l_len);
5636 fl.l_pid = tswap32(target_fl->l_pid);
5637 unlock_user_struct(target_fl, arg, 0);
5638 ret = get_errno(fcntl(fd, host_cmd, &fl));
5639 break;
5640
5641 case TARGET_F_GETLK64:
5642 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
5643 return -TARGET_EFAULT;
5644 fl64.l_type =
5645 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
5646 fl64.l_whence = tswap16(target_fl64->l_whence);
5647 fl64.l_start = tswap64(target_fl64->l_start);
5648 fl64.l_len = tswap64(target_fl64->l_len);
5649 fl64.l_pid = tswap32(target_fl64->l_pid);
5650 unlock_user_struct(target_fl64, arg, 0);
5651 ret = get_errno(fcntl(fd, host_cmd, &fl64));
5652 if (ret == 0) {
5653 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
5654 return -TARGET_EFAULT;
5655 target_fl64->l_type =
5656 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
5657 target_fl64->l_whence = tswap16(fl64.l_whence);
5658 target_fl64->l_start = tswap64(fl64.l_start);
5659 target_fl64->l_len = tswap64(fl64.l_len);
5660 target_fl64->l_pid = tswap32(fl64.l_pid);
5661 unlock_user_struct(target_fl64, arg, 1);
5662 }
5663 break;
5664 case TARGET_F_SETLK64:
5665 case TARGET_F_SETLKW64:
5666 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
5667 return -TARGET_EFAULT;
5668 fl64.l_type =
5669 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
5670 fl64.l_whence = tswap16(target_fl64->l_whence);
5671 fl64.l_start = tswap64(target_fl64->l_start);
5672 fl64.l_len = tswap64(target_fl64->l_len);
5673 fl64.l_pid = tswap32(target_fl64->l_pid);
5674 unlock_user_struct(target_fl64, arg, 0);
5675 ret = get_errno(fcntl(fd, host_cmd, &fl64));
5676 break;
5677
5678 case TARGET_F_GETFL:
5679 ret = get_errno(fcntl(fd, host_cmd, arg));
5680 if (ret >= 0) {
5681 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5682 }
5683 break;
5684
5685 case TARGET_F_SETFL:
5686 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
5687 break;
5688
5689 #ifdef F_GETOWN_EX
5690 case TARGET_F_GETOWN_EX:
5691 ret = get_errno(fcntl(fd, host_cmd, &fox));
5692 if (ret >= 0) {
5693 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5694 return -TARGET_EFAULT;
5695 target_fox->type = tswap32(fox.type);
5696 target_fox->pid = tswap32(fox.pid);
5697 unlock_user_struct(target_fox, arg, 1);
5698 }
5699 break;
5700 #endif
5701
5702 #ifdef F_SETOWN_EX
5703 case TARGET_F_SETOWN_EX:
5704 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5705 return -TARGET_EFAULT;
5706 fox.type = tswap32(target_fox->type);
5707 fox.pid = tswap32(target_fox->pid);
5708 unlock_user_struct(target_fox, arg, 0);
5709 ret = get_errno(fcntl(fd, host_cmd, &fox));
5710 break;
5711 #endif
5712
5713 case TARGET_F_SETOWN:
5714 case TARGET_F_GETOWN:
5715 case TARGET_F_SETSIG:
5716 case TARGET_F_GETSIG:
5717 case TARGET_F_SETLEASE:
5718 case TARGET_F_GETLEASE:
5719 ret = get_errno(fcntl(fd, host_cmd, arg));
5720 break;
5721
5722 default:
5723 ret = get_errno(fcntl(fd, cmd, arg));
5724 break;
5725 }
5726 return ret;
5727 }
5728
5729 #ifdef USE_UID16
5730
5731 static inline int high2lowuid(int uid)
5732 {
5733 if (uid > 65535)
5734 return 65534;
5735 else
5736 return uid;
5737 }
5738
5739 static inline int high2lowgid(int gid)
5740 {
5741 if (gid > 65535)
5742 return 65534;
5743 else
5744 return gid;
5745 }
5746
5747 static inline int low2highuid(int uid)
5748 {
5749 if ((int16_t)uid == -1)
5750 return -1;
5751 else
5752 return uid;
5753 }
5754
5755 static inline int low2highgid(int gid)
5756 {
5757 if ((int16_t)gid == -1)
5758 return -1;
5759 else
5760 return gid;
5761 }
5762 static inline int tswapid(int id)
5763 {
5764 return tswap16(id);
5765 }
5766
5767 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5768
5769 #else /* !USE_UID16 */
5770 static inline int high2lowuid(int uid)
5771 {
5772 return uid;
5773 }
5774 static inline int high2lowgid(int gid)
5775 {
5776 return gid;
5777 }
5778 static inline int low2highuid(int uid)
5779 {
5780 return uid;
5781 }
5782 static inline int low2highgid(int gid)
5783 {
5784 return gid;
5785 }
5786 static inline int tswapid(int id)
5787 {
5788 return tswap32(id);
5789 }
5790
5791 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5792
5793 #endif /* USE_UID16 */
5794
5795 /* We must do direct syscalls for setting UID/GID, because we want to
5796 * implement the Linux system call semantics of "change only for this thread",
5797 * not the libc/POSIX semantics of "change for all threads in process".
5798 * (See http://ewontfix.com/17/ for more details.)
5799 * We use the 32-bit version of the syscalls if present; if it is not
5800 * then either the host architecture supports 32-bit UIDs natively with
5801 * the standard syscall, or the 16-bit UID is the best we can do.
5802 */
5803 #ifdef __NR_setuid32
5804 #define __NR_sys_setuid __NR_setuid32
5805 #else
5806 #define __NR_sys_setuid __NR_setuid
5807 #endif
5808 #ifdef __NR_setgid32
5809 #define __NR_sys_setgid __NR_setgid32
5810 #else
5811 #define __NR_sys_setgid __NR_setgid
5812 #endif
5813 #ifdef __NR_setresuid32
5814 #define __NR_sys_setresuid __NR_setresuid32
5815 #else
5816 #define __NR_sys_setresuid __NR_setresuid
5817 #endif
5818 #ifdef __NR_setresgid32
5819 #define __NR_sys_setresgid __NR_setresgid32
5820 #else
5821 #define __NR_sys_setresgid __NR_setresgid
5822 #endif
5823
5824 _syscall1(int, sys_setuid, uid_t, uid)
5825 _syscall1(int, sys_setgid, gid_t, gid)
5826 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
5827 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
5828
5829 void syscall_init(void)
5830 {
5831 IOCTLEntry *ie;
5832 const argtype *arg_type;
5833 int size;
5834 int i;
5835
5836 thunk_init(STRUCT_MAX);
5837
5838 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5839 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5840 #include "syscall_types.h"
5841 #undef STRUCT
5842 #undef STRUCT_SPECIAL
5843
5844 /* Build target_to_host_errno_table[] table from
5845 * host_to_target_errno_table[]. */
5846 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5847 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5848 }
5849
5850 /* we patch the ioctl size if necessary. We rely on the fact that
5851 no ioctl has all the bits at '1' in the size field */
5852 ie = ioctl_entries;
5853 while (ie->target_cmd != 0) {
5854 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5855 TARGET_IOC_SIZEMASK) {
5856 arg_type = ie->arg_type;
5857 if (arg_type[0] != TYPE_PTR) {
5858 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5859 ie->target_cmd);
5860 exit(1);
5861 }
5862 arg_type++;
5863 size = thunk_type_size(arg_type, 0);
5864 ie->target_cmd = (ie->target_cmd &
5865 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5866 (size << TARGET_IOC_SIZESHIFT);
5867 }
5868
5869 /* automatic consistency check if same arch */
5870 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5871 (defined(__x86_64__) && defined(TARGET_X86_64))
5872 if (unlikely(ie->target_cmd != ie->host_cmd)) {
5873 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5874 ie->name, ie->target_cmd, ie->host_cmd);
5875 }
5876 #endif
5877 ie++;
5878 }
5879 }
5880
5881 #if TARGET_ABI_BITS == 32
5882 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5883 {
5884 #ifdef TARGET_WORDS_BIGENDIAN
5885 return ((uint64_t)word0 << 32) | word1;
5886 #else
5887 return ((uint64_t)word1 << 32) | word0;
5888 #endif
5889 }
5890 #else /* TARGET_ABI_BITS == 32 */
5891 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5892 {
5893 return word0;
5894 }
5895 #endif /* TARGET_ABI_BITS != 32 */
5896
5897 #ifdef TARGET_NR_truncate64
5898 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5899 abi_long arg2,
5900 abi_long arg3,
5901 abi_long arg4)
5902 {
5903 if (regpairs_aligned(cpu_env)) {
5904 arg2 = arg3;
5905 arg3 = arg4;
5906 }
5907 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5908 }
5909 #endif
5910
5911 #ifdef TARGET_NR_ftruncate64
5912 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5913 abi_long arg2,
5914 abi_long arg3,
5915 abi_long arg4)
5916 {
5917 if (regpairs_aligned(cpu_env)) {
5918 arg2 = arg3;
5919 arg3 = arg4;
5920 }
5921 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5922 }
5923 #endif
5924
5925 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5926 abi_ulong target_addr)
5927 {
5928 struct target_timespec *target_ts;
5929
5930 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5931 return -TARGET_EFAULT;
5932 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
5933 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5934 unlock_user_struct(target_ts, target_addr, 0);
5935 return 0;
5936 }
5937
5938 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5939 struct timespec *host_ts)
5940 {
5941 struct target_timespec *target_ts;
5942
5943 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5944 return -TARGET_EFAULT;
5945 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
5946 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5947 unlock_user_struct(target_ts, target_addr, 1);
5948 return 0;
5949 }
5950
5951 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5952 abi_ulong target_addr)
5953 {
5954 struct target_itimerspec *target_itspec;
5955
5956 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5957 return -TARGET_EFAULT;
5958 }
5959
5960 host_itspec->it_interval.tv_sec =
5961 tswapal(target_itspec->it_interval.tv_sec);
5962 host_itspec->it_interval.tv_nsec =
5963 tswapal(target_itspec->it_interval.tv_nsec);
5964 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5965 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5966
5967 unlock_user_struct(target_itspec, target_addr, 1);
5968 return 0;
5969 }
5970
5971 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5972 struct itimerspec *host_its)
5973 {
5974 struct target_itimerspec *target_itspec;
5975
5976 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5977 return -TARGET_EFAULT;
5978 }
5979
5980 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5981 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5982
5983 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5984 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5985
5986 unlock_user_struct(target_itspec, target_addr, 0);
5987 return 0;
5988 }
5989
5990 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5991 abi_ulong target_addr)
5992 {
5993 struct target_sigevent *target_sevp;
5994
5995 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5996 return -TARGET_EFAULT;
5997 }
5998
5999 /* This union is awkward on 64 bit systems because it has a 32 bit
6000 * integer and a pointer in it; we follow the conversion approach
6001 * used for handling sigval types in signal.c so the guest should get
6002 * the correct value back even if we did a 64 bit byteswap and it's
6003 * using the 32 bit integer.
6004 */
6005 host_sevp->sigev_value.sival_ptr =
6006 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6007 host_sevp->sigev_signo =
6008 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6009 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6010 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6011
6012 unlock_user_struct(target_sevp, target_addr, 1);
6013 return 0;
6014 }
6015
6016 #if defined(TARGET_NR_mlockall)
6017 static inline int target_to_host_mlockall_arg(int arg)
6018 {
6019 int result = 0;
6020
6021 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6022 result |= MCL_CURRENT;
6023 }
6024 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6025 result |= MCL_FUTURE;
6026 }
6027 return result;
6028 }
6029 #endif
6030
6031 static inline abi_long host_to_target_stat64(void *cpu_env,
6032 abi_ulong target_addr,
6033 struct stat *host_st)
6034 {
6035 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6036 if (((CPUARMState *)cpu_env)->eabi) {
6037 struct target_eabi_stat64 *target_st;
6038
6039 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6040 return -TARGET_EFAULT;
6041 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6042 __put_user(host_st->st_dev, &target_st->st_dev);
6043 __put_user(host_st->st_ino, &target_st->st_ino);
6044 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6045 __put_user(host_st->st_ino, &target_st->__st_ino);
6046 #endif
6047 __put_user(host_st->st_mode, &target_st->st_mode);
6048 __put_user(host_st->st_nlink, &target_st->st_nlink);
6049 __put_user(host_st->st_uid, &target_st->st_uid);
6050 __put_user(host_st->st_gid, &target_st->st_gid);
6051 __put_user(host_st->st_rdev, &target_st->st_rdev);
6052 __put_user(host_st->st_size, &target_st->st_size);
6053 __put_user(host_st->st_blksize, &target_st->st_blksize);
6054 __put_user(host_st->st_blocks, &target_st->st_blocks);
6055 __put_user(host_st->st_atime, &target_st->target_st_atime);
6056 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6057 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6058 unlock_user_struct(target_st, target_addr, 1);
6059 } else
6060 #endif
6061 {
6062 #if defined(TARGET_HAS_STRUCT_STAT64)
6063 struct target_stat64 *target_st;
6064 #else
6065 struct target_stat *target_st;
6066 #endif
6067
6068 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6069 return -TARGET_EFAULT;
6070 memset(target_st, 0, sizeof(*target_st));
6071 __put_user(host_st->st_dev, &target_st->st_dev);
6072 __put_user(host_st->st_ino, &target_st->st_ino);
6073 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6074 __put_user(host_st->st_ino, &target_st->__st_ino);
6075 #endif
6076 __put_user(host_st->st_mode, &target_st->st_mode);
6077 __put_user(host_st->st_nlink, &target_st->st_nlink);
6078 __put_user(host_st->st_uid, &target_st->st_uid);
6079 __put_user(host_st->st_gid, &target_st->st_gid);
6080 __put_user(host_st->st_rdev, &target_st->st_rdev);
6081 /* XXX: better use of kernel struct */
6082 __put_user(host_st->st_size, &target_st->st_size);
6083 __put_user(host_st->st_blksize, &target_st->st_blksize);
6084 __put_user(host_st->st_blocks, &target_st->st_blocks);
6085 __put_user(host_st->st_atime, &target_st->target_st_atime);
6086 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6087 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6088 unlock_user_struct(target_st, target_addr, 1);
6089 }
6090
6091 return 0;
6092 }
6093
6094 /* ??? Using host futex calls even when target atomic operations
6095 are not really atomic probably breaks things. However implementing
6096 futexes locally would make futexes shared between multiple processes
6097 tricky. However they're probably useless because guest atomic
6098 operations won't work either. */
6099 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6100 target_ulong uaddr2, int val3)
6101 {
6102 struct timespec ts, *pts;
6103 int base_op;
6104
6105 /* ??? We assume FUTEX_* constants are the same on both host
6106 and target. */
6107 #ifdef FUTEX_CMD_MASK
6108 base_op = op & FUTEX_CMD_MASK;
6109 #else
6110 base_op = op;
6111 #endif
6112 switch (base_op) {
6113 case FUTEX_WAIT:
6114 case FUTEX_WAIT_BITSET:
6115 if (timeout) {
6116 pts = &ts;
6117 target_to_host_timespec(pts, timeout);
6118 } else {
6119 pts = NULL;
6120 }
6121 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6122 pts, NULL, val3));
6123 case FUTEX_WAKE:
6124 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6125 case FUTEX_FD:
6126 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6127 case FUTEX_REQUEUE:
6128 case FUTEX_CMP_REQUEUE:
6129 case FUTEX_WAKE_OP:
6130 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6131 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6132 But the prototype takes a `struct timespec *'; insert casts
6133 to satisfy the compiler. We do not need to tswap TIMEOUT
6134 since it's not compared to guest memory. */
6135 pts = (struct timespec *)(uintptr_t) timeout;
6136 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6137 g2h(uaddr2),
6138 (base_op == FUTEX_CMP_REQUEUE
6139 ? tswap32(val3)
6140 : val3)));
6141 default:
6142 return -TARGET_ENOSYS;
6143 }
6144 }
6145 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6146 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6147 abi_long handle, abi_long mount_id,
6148 abi_long flags)
6149 {
6150 struct file_handle *target_fh;
6151 struct file_handle *fh;
6152 int mid = 0;
6153 abi_long ret;
6154 char *name;
6155 unsigned int size, total_size;
6156
6157 if (get_user_s32(size, handle)) {
6158 return -TARGET_EFAULT;
6159 }
6160
6161 name = lock_user_string(pathname);
6162 if (!name) {
6163 return -TARGET_EFAULT;
6164 }
6165
6166 total_size = sizeof(struct file_handle) + size;
6167 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6168 if (!target_fh) {
6169 unlock_user(name, pathname, 0);
6170 return -TARGET_EFAULT;
6171 }
6172
6173 fh = g_malloc0(total_size);
6174 fh->handle_bytes = size;
6175
6176 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6177 unlock_user(name, pathname, 0);
6178
6179 /* man name_to_handle_at(2):
6180 * Other than the use of the handle_bytes field, the caller should treat
6181 * the file_handle structure as an opaque data type
6182 */
6183
6184 memcpy(target_fh, fh, total_size);
6185 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6186 target_fh->handle_type = tswap32(fh->handle_type);
6187 g_free(fh);
6188 unlock_user(target_fh, handle, total_size);
6189
6190 if (put_user_s32(mid, mount_id)) {
6191 return -TARGET_EFAULT;
6192 }
6193
6194 return ret;
6195
6196 }
6197 #endif
6198
6199 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6200 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6201 abi_long flags)
6202 {
6203 struct file_handle *target_fh;
6204 struct file_handle *fh;
6205 unsigned int size, total_size;
6206 abi_long ret;
6207
6208 if (get_user_s32(size, handle)) {
6209 return -TARGET_EFAULT;
6210 }
6211
6212 total_size = sizeof(struct file_handle) + size;
6213 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6214 if (!target_fh) {
6215 return -TARGET_EFAULT;
6216 }
6217
6218 fh = g_memdup(target_fh, total_size);
6219 fh->handle_bytes = size;
6220 fh->handle_type = tswap32(target_fh->handle_type);
6221
6222 ret = get_errno(open_by_handle_at(mount_fd, fh,
6223 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6224
6225 g_free(fh);
6226
6227 unlock_user(target_fh, handle, total_size);
6228
6229 return ret;
6230 }
6231 #endif
6232
6233 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6234
6235 /* signalfd siginfo conversion */
6236
6237 static void
6238 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
6239 const struct signalfd_siginfo *info)
6240 {
6241 int sig = host_to_target_signal(info->ssi_signo);
6242
6243 /* linux/signalfd.h defines a ssi_addr_lsb
6244 * not defined in sys/signalfd.h but used by some kernels
6245 */
6246
6247 #ifdef BUS_MCEERR_AO
6248 if (tinfo->ssi_signo == SIGBUS &&
6249 (tinfo->ssi_code == BUS_MCEERR_AR ||
6250 tinfo->ssi_code == BUS_MCEERR_AO)) {
6251 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
6252 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
6253 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
6254 }
6255 #endif
6256
6257 tinfo->ssi_signo = tswap32(sig);
6258 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
6259 tinfo->ssi_code = tswap32(info->ssi_code);
6260 tinfo->ssi_pid = tswap32(info->ssi_pid);
6261 tinfo->ssi_uid = tswap32(info->ssi_uid);
6262 tinfo->ssi_fd = tswap32(info->ssi_fd);
6263 tinfo->ssi_tid = tswap32(info->ssi_tid);
6264 tinfo->ssi_band = tswap32(info->ssi_band);
6265 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
6266 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
6267 tinfo->ssi_status = tswap32(info->ssi_status);
6268 tinfo->ssi_int = tswap32(info->ssi_int);
6269 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
6270 tinfo->ssi_utime = tswap64(info->ssi_utime);
6271 tinfo->ssi_stime = tswap64(info->ssi_stime);
6272 tinfo->ssi_addr = tswap64(info->ssi_addr);
6273 }
6274
6275 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
6276 {
6277 int i;
6278
6279 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
6280 host_to_target_signalfd_siginfo(buf + i, buf + i);
6281 }
6282
6283 return len;
6284 }
6285
6286 static TargetFdTrans target_signalfd_trans = {
6287 .host_to_target_data = host_to_target_data_signalfd,
6288 };
6289
6290 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6291 {
6292 int host_flags;
6293 target_sigset_t *target_mask;
6294 sigset_t host_mask;
6295 abi_long ret;
6296
6297 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6298 return -TARGET_EINVAL;
6299 }
6300 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6301 return -TARGET_EFAULT;
6302 }
6303
6304 target_to_host_sigset(&host_mask, target_mask);
6305
6306 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6307
6308 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6309 if (ret >= 0) {
6310 fd_trans_register(ret, &target_signalfd_trans);
6311 }
6312
6313 unlock_user_struct(target_mask, mask, 0);
6314
6315 return ret;
6316 }
6317 #endif
6318
6319 /* Map host to target signal numbers for the wait family of syscalls.
6320 Assume all other status bits are the same. */
6321 int host_to_target_waitstatus(int status)
6322 {
6323 if (WIFSIGNALED(status)) {
6324 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6325 }
6326 if (WIFSTOPPED(status)) {
6327 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6328 | (status & 0xff);
6329 }
6330 return status;
6331 }
6332
6333 static int open_self_cmdline(void *cpu_env, int fd)
6334 {
6335 int fd_orig = -1;
6336 bool word_skipped = false;
6337
6338 fd_orig = open("/proc/self/cmdline", O_RDONLY);
6339 if (fd_orig < 0) {
6340 return fd_orig;
6341 }
6342
6343 while (true) {
6344 ssize_t nb_read;
6345 char buf[128];
6346 char *cp_buf = buf;
6347
6348 nb_read = read(fd_orig, buf, sizeof(buf));
6349 if (nb_read < 0) {
6350 int e = errno;
6351 fd_orig = close(fd_orig);
6352 errno = e;
6353 return -1;
6354 } else if (nb_read == 0) {
6355 break;
6356 }
6357
6358 if (!word_skipped) {
6359 /* Skip the first string, which is the path to qemu-*-static
6360 instead of the actual command. */
6361 cp_buf = memchr(buf, 0, sizeof(buf));
6362 if (cp_buf) {
6363 /* Null byte found, skip one string */
6364 cp_buf++;
6365 nb_read -= cp_buf - buf;
6366 word_skipped = true;
6367 }
6368 }
6369
6370 if (word_skipped) {
6371 if (write(fd, cp_buf, nb_read) != nb_read) {
6372 int e = errno;
6373 close(fd_orig);
6374 errno = e;
6375 return -1;
6376 }
6377 }
6378 }
6379
6380 return close(fd_orig);
6381 }
6382
6383 static int open_self_maps(void *cpu_env, int fd)
6384 {
6385 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6386 TaskState *ts = cpu->opaque;
6387 FILE *fp;
6388 char *line = NULL;
6389 size_t len = 0;
6390 ssize_t read;
6391
6392 fp = fopen("/proc/self/maps", "r");
6393 if (fp == NULL) {
6394 return -1;
6395 }
6396
6397 while ((read = getline(&line, &len, fp)) != -1) {
6398 int fields, dev_maj, dev_min, inode;
6399 uint64_t min, max, offset;
6400 char flag_r, flag_w, flag_x, flag_p;
6401 char path[512] = "";
6402 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6403 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6404 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6405
6406 if ((fields < 10) || (fields > 11)) {
6407 continue;
6408 }
6409 if (h2g_valid(min)) {
6410 int flags = page_get_flags(h2g(min));
6411 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
6412 if (page_check_range(h2g(min), max - min, flags) == -1) {
6413 continue;
6414 }
6415 if (h2g(min) == ts->info->stack_limit) {
6416 pstrcpy(path, sizeof(path), " [stack]");
6417 }
6418 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
6419 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6420 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6421 flag_x, flag_p, offset, dev_maj, dev_min, inode,
6422 path[0] ? " " : "", path);
6423 }
6424 }
6425
6426 free(line);
6427 fclose(fp);
6428
6429 return 0;
6430 }
6431
6432 static int open_self_stat(void *cpu_env, int fd)
6433 {
6434 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6435 TaskState *ts = cpu->opaque;
6436 abi_ulong start_stack = ts->info->start_stack;
6437 int i;
6438
6439 for (i = 0; i < 44; i++) {
6440 char buf[128];
6441 int len;
6442 uint64_t val = 0;
6443
6444 if (i == 0) {
6445 /* pid */
6446 val = getpid();
6447 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6448 } else if (i == 1) {
6449 /* app name */
6450 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6451 } else if (i == 27) {
6452 /* stack bottom */
6453 val = start_stack;
6454 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6455 } else {
6456 /* for the rest, there is MasterCard */
6457 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6458 }
6459
6460 len = strlen(buf);
6461 if (write(fd, buf, len) != len) {
6462 return -1;
6463 }
6464 }
6465
6466 return 0;
6467 }
6468
6469 static int open_self_auxv(void *cpu_env, int fd)
6470 {
6471 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6472 TaskState *ts = cpu->opaque;
6473 abi_ulong auxv = ts->info->saved_auxv;
6474 abi_ulong len = ts->info->auxv_len;
6475 char *ptr;
6476
6477 /*
6478 * Auxiliary vector is stored in target process stack.
6479 * read in whole auxv vector and copy it to file
6480 */
6481 ptr = lock_user(VERIFY_READ, auxv, len, 0);
6482 if (ptr != NULL) {
6483 while (len > 0) {
6484 ssize_t r;
6485 r = write(fd, ptr, len);
6486 if (r <= 0) {
6487 break;
6488 }
6489 len -= r;
6490 ptr += r;
6491 }
6492 lseek(fd, 0, SEEK_SET);
6493 unlock_user(ptr, auxv, len);
6494 }
6495
6496 return 0;
6497 }
6498
6499 static int is_proc_myself(const char *filename, const char *entry)
6500 {
6501 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6502 filename += strlen("/proc/");
6503 if (!strncmp(filename, "self/", strlen("self/"))) {
6504 filename += strlen("self/");
6505 } else if (*filename >= '1' && *filename <= '9') {
6506 char myself[80];
6507 snprintf(myself, sizeof(myself), "%d/", getpid());
6508 if (!strncmp(filename, myself, strlen(myself))) {
6509 filename += strlen(myself);
6510 } else {
6511 return 0;
6512 }
6513 } else {
6514 return 0;
6515 }
6516 if (!strcmp(filename, entry)) {
6517 return 1;
6518 }
6519 }
6520 return 0;
6521 }
6522
6523 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6524 static int is_proc(const char *filename, const char *entry)
6525 {
6526 return strcmp(filename, entry) == 0;
6527 }
6528
6529 static int open_net_route(void *cpu_env, int fd)
6530 {
6531 FILE *fp;
6532 char *line = NULL;
6533 size_t len = 0;
6534 ssize_t read;
6535
6536 fp = fopen("/proc/net/route", "r");
6537 if (fp == NULL) {
6538 return -1;
6539 }
6540
6541 /* read header */
6542
6543 read = getline(&line, &len, fp);
6544 dprintf(fd, "%s", line);
6545
6546 /* read routes */
6547
6548 while ((read = getline(&line, &len, fp)) != -1) {
6549 char iface[16];
6550 uint32_t dest, gw, mask;
6551 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6552 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6553 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6554 &mask, &mtu, &window, &irtt);
6555 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6556 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6557 metric, tswap32(mask), mtu, window, irtt);
6558 }
6559
6560 free(line);
6561 fclose(fp);
6562
6563 return 0;
6564 }
6565 #endif
6566
6567 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6568 {
6569 struct fake_open {
6570 const char *filename;
6571 int (*fill)(void *cpu_env, int fd);
6572 int (*cmp)(const char *s1, const char *s2);
6573 };
6574 const struct fake_open *fake_open;
6575 static const struct fake_open fakes[] = {
6576 { "maps", open_self_maps, is_proc_myself },
6577 { "stat", open_self_stat, is_proc_myself },
6578 { "auxv", open_self_auxv, is_proc_myself },
6579 { "cmdline", open_self_cmdline, is_proc_myself },
6580 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6581 { "/proc/net/route", open_net_route, is_proc },
6582 #endif
6583 { NULL, NULL, NULL }
6584 };
6585
6586 if (is_proc_myself(pathname, "exe")) {
6587 int execfd = qemu_getauxval(AT_EXECFD);
6588 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6589 }
6590
6591 for (fake_open = fakes; fake_open->filename; fake_open++) {
6592 if (fake_open->cmp(pathname, fake_open->filename)) {
6593 break;
6594 }
6595 }
6596
6597 if (fake_open->filename) {
6598 const char *tmpdir;
6599 char filename[PATH_MAX];
6600 int fd, r;
6601
6602 /* create temporary file to map stat to */
6603 tmpdir = getenv("TMPDIR");
6604 if (!tmpdir)
6605 tmpdir = "/tmp";
6606 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6607 fd = mkstemp(filename);
6608 if (fd < 0) {
6609 return fd;
6610 }
6611 unlink(filename);
6612
6613 if ((r = fake_open->fill(cpu_env, fd))) {
6614 int e = errno;
6615 close(fd);
6616 errno = e;
6617 return r;
6618 }
6619 lseek(fd, 0, SEEK_SET);
6620
6621 return fd;
6622 }
6623
6624 return safe_openat(dirfd, path(pathname), flags, mode);
6625 }
6626
6627 #define TIMER_MAGIC 0x0caf0000
6628 #define TIMER_MAGIC_MASK 0xffff0000
6629
6630 /* Convert QEMU provided timer ID back to internal 16bit index format */
6631 static target_timer_t get_timer_id(abi_long arg)
6632 {
6633 target_timer_t timerid = arg;
6634
6635 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6636 return -TARGET_EINVAL;
6637 }
6638
6639 timerid &= 0xffff;
6640
6641 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6642 return -TARGET_EINVAL;
6643 }
6644
6645 return timerid;
6646 }
6647
6648 /* do_syscall() should always have a single exit point at the end so
6649 that actions, such as logging of syscall results, can be performed.
6650 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
6651 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
6652 abi_long arg2, abi_long arg3, abi_long arg4,
6653 abi_long arg5, abi_long arg6, abi_long arg7,
6654 abi_long arg8)
6655 {
6656 CPUState *cpu = ENV_GET_CPU(cpu_env);
6657 abi_long ret;
6658 struct stat st;
6659 struct statfs stfs;
6660 void *p;
6661
6662 #if defined(DEBUG_ERESTARTSYS)
6663 /* Debug-only code for exercising the syscall-restart code paths
6664 * in the per-architecture cpu main loops: restart every syscall
6665 * the guest makes once before letting it through.
6666 */
6667 {
6668 static int flag;
6669
6670 flag = !flag;
6671 if (flag) {
6672 return -TARGET_ERESTARTSYS;
6673 }
6674 }
6675 #endif
6676
6677 #ifdef DEBUG
6678 gemu_log("syscall %d", num);
6679 #endif
6680 if(do_strace)
6681 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
6682
6683 switch(num) {
6684 case TARGET_NR_exit:
6685 /* In old applications this may be used to implement _exit(2).
6686 However in threaded applictions it is used for thread termination,
6687 and _exit_group is used for application termination.
6688 Do thread termination if we have more then one thread. */
6689
6690 if (block_signals()) {
6691 ret = -TARGET_ERESTARTSYS;
6692 break;
6693 }
6694
6695 if (CPU_NEXT(first_cpu)) {
6696 TaskState *ts;
6697
6698 cpu_list_lock();
6699 /* Remove the CPU from the list. */
6700 QTAILQ_REMOVE(&cpus, cpu, node);
6701 cpu_list_unlock();
6702 ts = cpu->opaque;
6703 if (ts->child_tidptr) {
6704 put_user_u32(0, ts->child_tidptr);
6705 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6706 NULL, NULL, 0);
6707 }
6708 thread_cpu = NULL;
6709 object_unref(OBJECT(cpu));
6710 g_free(ts);
6711 rcu_unregister_thread();
6712 pthread_exit(NULL);
6713 }
6714 #ifdef TARGET_GPROF
6715 _mcleanup();
6716 #endif
6717 gdb_exit(cpu_env, arg1);
6718 _exit(arg1);
6719 ret = 0; /* avoid warning */
6720 break;
6721 case TARGET_NR_read:
6722 if (arg3 == 0)
6723 ret = 0;
6724 else {
6725 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6726 goto efault;
6727 ret = get_errno(safe_read(arg1, p, arg3));
6728 if (ret >= 0 &&
6729 fd_trans_host_to_target_data(arg1)) {
6730 ret = fd_trans_host_to_target_data(arg1)(p, ret);
6731 }
6732 unlock_user(p, arg2, ret);
6733 }
6734 break;
6735 case TARGET_NR_write:
6736 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6737 goto efault;
6738 ret = get_errno(safe_write(arg1, p, arg3));
6739 unlock_user(p, arg2, 0);
6740 break;
6741 #ifdef TARGET_NR_open
6742 case TARGET_NR_open:
6743 if (!(p = lock_user_string(arg1)))
6744 goto efault;
6745 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6746 target_to_host_bitmask(arg2, fcntl_flags_tbl),
6747 arg3));
6748 fd_trans_unregister(ret);
6749 unlock_user(p, arg1, 0);
6750 break;
6751 #endif
6752 case TARGET_NR_openat:
6753 if (!(p = lock_user_string(arg2)))
6754 goto efault;
6755 ret = get_errno(do_openat(cpu_env, arg1, p,
6756 target_to_host_bitmask(arg3, fcntl_flags_tbl),
6757 arg4));
6758 fd_trans_unregister(ret);
6759 unlock_user(p, arg2, 0);
6760 break;
6761 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6762 case TARGET_NR_name_to_handle_at:
6763 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
6764 break;
6765 #endif
6766 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6767 case TARGET_NR_open_by_handle_at:
6768 ret = do_open_by_handle_at(arg1, arg2, arg3);
6769 fd_trans_unregister(ret);
6770 break;
6771 #endif
6772 case TARGET_NR_close:
6773 fd_trans_unregister(arg1);
6774 ret = get_errno(close(arg1));
6775 break;
6776 case TARGET_NR_brk:
6777 ret = do_brk(arg1);
6778 break;
6779 #ifdef TARGET_NR_fork
6780 case TARGET_NR_fork:
6781 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
6782 break;
6783 #endif
6784 #ifdef TARGET_NR_waitpid
6785 case TARGET_NR_waitpid:
6786 {
6787 int status;
6788 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
6789 if (!is_error(ret) && arg2 && ret
6790 && put_user_s32(host_to_target_waitstatus(status), arg2))
6791 goto efault;
6792 }
6793 break;
6794 #endif
6795 #ifdef TARGET_NR_waitid
6796 case TARGET_NR_waitid:
6797 {
6798 siginfo_t info;
6799 info.si_pid = 0;
6800 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
6801 if (!is_error(ret) && arg3 && info.si_pid != 0) {
6802 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
6803 goto efault;
6804 host_to_target_siginfo(p, &info);
6805 unlock_user(p, arg3, sizeof(target_siginfo_t));
6806 }
6807 }
6808 break;
6809 #endif
6810 #ifdef TARGET_NR_creat /* not on alpha */
6811 case TARGET_NR_creat:
6812 if (!(p = lock_user_string(arg1)))
6813 goto efault;
6814 ret = get_errno(creat(p, arg2));
6815 fd_trans_unregister(ret);
6816 unlock_user(p, arg1, 0);
6817 break;
6818 #endif
6819 #ifdef TARGET_NR_link
6820 case TARGET_NR_link:
6821 {
6822 void * p2;
6823 p = lock_user_string(arg1);
6824 p2 = lock_user_string(arg2);
6825 if (!p || !p2)
6826 ret = -TARGET_EFAULT;
6827 else
6828 ret = get_errno(link(p, p2));
6829 unlock_user(p2, arg2, 0);
6830 unlock_user(p, arg1, 0);
6831 }
6832 break;
6833 #endif
6834 #if defined(TARGET_NR_linkat)
6835 case TARGET_NR_linkat:
6836 {
6837 void * p2 = NULL;
6838 if (!arg2 || !arg4)
6839 goto efault;
6840 p = lock_user_string(arg2);
6841 p2 = lock_user_string(arg4);
6842 if (!p || !p2)
6843 ret = -TARGET_EFAULT;
6844 else
6845 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6846 unlock_user(p, arg2, 0);
6847 unlock_user(p2, arg4, 0);
6848 }
6849 break;
6850 #endif
6851 #ifdef TARGET_NR_unlink
6852 case TARGET_NR_unlink:
6853 if (!(p = lock_user_string(arg1)))
6854 goto efault;
6855 ret = get_errno(unlink(p));
6856 unlock_user(p, arg1, 0);
6857 break;
6858 #endif
6859 #if defined(TARGET_NR_unlinkat)
6860 case TARGET_NR_unlinkat:
6861 if (!(p = lock_user_string(arg2)))
6862 goto efault;
6863 ret = get_errno(unlinkat(arg1, p, arg3));
6864 unlock_user(p, arg2, 0);
6865 break;
6866 #endif
6867 case TARGET_NR_execve:
6868 {
6869 char **argp, **envp;
6870 int argc, envc;
6871 abi_ulong gp;
6872 abi_ulong guest_argp;
6873 abi_ulong guest_envp;
6874 abi_ulong addr;
6875 char **q;
6876 int total_size = 0;
6877
6878 argc = 0;
6879 guest_argp = arg2;
6880 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6881 if (get_user_ual(addr, gp))
6882 goto efault;
6883 if (!addr)
6884 break;
6885 argc++;
6886 }
6887 envc = 0;
6888 guest_envp = arg3;
6889 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6890 if (get_user_ual(addr, gp))
6891 goto efault;
6892 if (!addr)
6893 break;
6894 envc++;
6895 }
6896
6897 argp = alloca((argc + 1) * sizeof(void *));
6898 envp = alloca((envc + 1) * sizeof(void *));
6899
6900 for (gp = guest_argp, q = argp; gp;
6901 gp += sizeof(abi_ulong), q++) {
6902 if (get_user_ual(addr, gp))
6903 goto execve_efault;
6904 if (!addr)
6905 break;
6906 if (!(*q = lock_user_string(addr)))
6907 goto execve_efault;
6908 total_size += strlen(*q) + 1;
6909 }
6910 *q = NULL;
6911
6912 for (gp = guest_envp, q = envp; gp;
6913 gp += sizeof(abi_ulong), q++) {
6914 if (get_user_ual(addr, gp))
6915 goto execve_efault;
6916 if (!addr)
6917 break;
6918 if (!(*q = lock_user_string(addr)))
6919 goto execve_efault;
6920 total_size += strlen(*q) + 1;
6921 }
6922 *q = NULL;
6923
6924 if (!(p = lock_user_string(arg1)))
6925 goto execve_efault;
6926 /* Although execve() is not an interruptible syscall it is
6927 * a special case where we must use the safe_syscall wrapper:
6928 * if we allow a signal to happen before we make the host
6929 * syscall then we will 'lose' it, because at the point of
6930 * execve the process leaves QEMU's control. So we use the
6931 * safe syscall wrapper to ensure that we either take the
6932 * signal as a guest signal, or else it does not happen
6933 * before the execve completes and makes it the other
6934 * program's problem.
6935 */
6936 ret = get_errno(safe_execve(p, argp, envp));
6937 unlock_user(p, arg1, 0);
6938
6939 goto execve_end;
6940
6941 execve_efault:
6942 ret = -TARGET_EFAULT;
6943
6944 execve_end:
6945 for (gp = guest_argp, q = argp; *q;
6946 gp += sizeof(abi_ulong), q++) {
6947 if (get_user_ual(addr, gp)
6948 || !addr)
6949 break;
6950 unlock_user(*q, addr, 0);
6951 }
6952 for (gp = guest_envp, q = envp; *q;
6953 gp += sizeof(abi_ulong), q++) {
6954 if (get_user_ual(addr, gp)
6955 || !addr)
6956 break;
6957 unlock_user(*q, addr, 0);
6958 }
6959 }
6960 break;
6961 case TARGET_NR_chdir:
6962 if (!(p = lock_user_string(arg1)))
6963 goto efault;
6964 ret = get_errno(chdir(p));
6965 unlock_user(p, arg1, 0);
6966 break;
6967 #ifdef TARGET_NR_time
6968 case TARGET_NR_time:
6969 {
6970 time_t host_time;
6971 ret = get_errno(time(&host_time));
6972 if (!is_error(ret)
6973 && arg1
6974 && put_user_sal(host_time, arg1))
6975 goto efault;
6976 }
6977 break;
6978 #endif
6979 #ifdef TARGET_NR_mknod
6980 case TARGET_NR_mknod:
6981 if (!(p = lock_user_string(arg1)))
6982 goto efault;
6983 ret = get_errno(mknod(p, arg2, arg3));
6984 unlock_user(p, arg1, 0);
6985 break;
6986 #endif
6987 #if defined(TARGET_NR_mknodat)
6988 case TARGET_NR_mknodat:
6989 if (!(p = lock_user_string(arg2)))
6990 goto efault;
6991 ret = get_errno(mknodat(arg1, p, arg3, arg4));
6992 unlock_user(p, arg2, 0);
6993 break;
6994 #endif
6995 #ifdef TARGET_NR_chmod
6996 case TARGET_NR_chmod:
6997 if (!(p = lock_user_string(arg1)))
6998 goto efault;
6999 ret = get_errno(chmod(p, arg2));
7000 unlock_user(p, arg1, 0);
7001 break;
7002 #endif
7003 #ifdef TARGET_NR_break
7004 case TARGET_NR_break:
7005 goto unimplemented;
7006 #endif
7007 #ifdef TARGET_NR_oldstat
7008 case TARGET_NR_oldstat:
7009 goto unimplemented;
7010 #endif
7011 case TARGET_NR_lseek:
7012 ret = get_errno(lseek(arg1, arg2, arg3));
7013 break;
7014 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7015 /* Alpha specific */
7016 case TARGET_NR_getxpid:
7017 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7018 ret = get_errno(getpid());
7019 break;
7020 #endif
7021 #ifdef TARGET_NR_getpid
7022 case TARGET_NR_getpid:
7023 ret = get_errno(getpid());
7024 break;
7025 #endif
7026 case TARGET_NR_mount:
7027 {
7028 /* need to look at the data field */
7029 void *p2, *p3;
7030
7031 if (arg1) {
7032 p = lock_user_string(arg1);
7033 if (!p) {
7034 goto efault;
7035 }
7036 } else {
7037 p = NULL;
7038 }
7039
7040 p2 = lock_user_string(arg2);
7041 if (!p2) {
7042 if (arg1) {
7043 unlock_user(p, arg1, 0);
7044 }
7045 goto efault;
7046 }
7047
7048 if (arg3) {
7049 p3 = lock_user_string(arg3);
7050 if (!p3) {
7051 if (arg1) {
7052 unlock_user(p, arg1, 0);
7053 }
7054 unlock_user(p2, arg2, 0);
7055 goto efault;
7056 }
7057 } else {
7058 p3 = NULL;
7059 }
7060
7061 /* FIXME - arg5 should be locked, but it isn't clear how to
7062 * do that since it's not guaranteed to be a NULL-terminated
7063 * string.
7064 */
7065 if (!arg5) {
7066 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7067 } else {
7068 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7069 }
7070 ret = get_errno(ret);
7071
7072 if (arg1) {
7073 unlock_user(p, arg1, 0);
7074 }
7075 unlock_user(p2, arg2, 0);
7076 if (arg3) {
7077 unlock_user(p3, arg3, 0);
7078 }
7079 }
7080 break;
7081 #ifdef TARGET_NR_umount
7082 case TARGET_NR_umount:
7083 if (!(p = lock_user_string(arg1)))
7084 goto efault;
7085 ret = get_errno(umount(p));
7086 unlock_user(p, arg1, 0);
7087 break;
7088 #endif
7089 #ifdef TARGET_NR_stime /* not on alpha */
7090 case TARGET_NR_stime:
7091 {
7092 time_t host_time;
7093 if (get_user_sal(host_time, arg1))
7094 goto efault;
7095 ret = get_errno(stime(&host_time));
7096 }
7097 break;
7098 #endif
7099 case TARGET_NR_ptrace:
7100 goto unimplemented;
7101 #ifdef TARGET_NR_alarm /* not on alpha */
7102 case TARGET_NR_alarm:
7103 ret = alarm(arg1);
7104 break;
7105 #endif
7106 #ifdef TARGET_NR_oldfstat
7107 case TARGET_NR_oldfstat:
7108 goto unimplemented;
7109 #endif
7110 #ifdef TARGET_NR_pause /* not on alpha */
7111 case TARGET_NR_pause:
7112 if (!block_signals()) {
7113 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7114 }
7115 ret = -TARGET_EINTR;
7116 break;
7117 #endif
7118 #ifdef TARGET_NR_utime
7119 case TARGET_NR_utime:
7120 {
7121 struct utimbuf tbuf, *host_tbuf;
7122 struct target_utimbuf *target_tbuf;
7123 if (arg2) {
7124 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7125 goto efault;
7126 tbuf.actime = tswapal(target_tbuf->actime);
7127 tbuf.modtime = tswapal(target_tbuf->modtime);
7128 unlock_user_struct(target_tbuf, arg2, 0);
7129 host_tbuf = &tbuf;
7130 } else {
7131 host_tbuf = NULL;
7132 }
7133 if (!(p = lock_user_string(arg1)))
7134 goto efault;
7135 ret = get_errno(utime(p, host_tbuf));
7136 unlock_user(p, arg1, 0);
7137 }
7138 break;
7139 #endif
7140 #ifdef TARGET_NR_utimes
7141 case TARGET_NR_utimes:
7142 {
7143 struct timeval *tvp, tv[2];
7144 if (arg2) {
7145 if (copy_from_user_timeval(&tv[0], arg2)
7146 || copy_from_user_timeval(&tv[1],
7147 arg2 + sizeof(struct target_timeval)))
7148 goto efault;
7149 tvp = tv;
7150 } else {
7151 tvp = NULL;
7152 }
7153 if (!(p = lock_user_string(arg1)))
7154 goto efault;
7155 ret = get_errno(utimes(p, tvp));
7156 unlock_user(p, arg1, 0);
7157 }
7158 break;
7159 #endif
7160 #if defined(TARGET_NR_futimesat)
7161 case TARGET_NR_futimesat:
7162 {
7163 struct timeval *tvp, tv[2];
7164 if (arg3) {
7165 if (copy_from_user_timeval(&tv[0], arg3)
7166 || copy_from_user_timeval(&tv[1],
7167 arg3 + sizeof(struct target_timeval)))
7168 goto efault;
7169 tvp = tv;
7170 } else {
7171 tvp = NULL;
7172 }
7173 if (!(p = lock_user_string(arg2)))
7174 goto efault;
7175 ret = get_errno(futimesat(arg1, path(p), tvp));
7176 unlock_user(p, arg2, 0);
7177 }
7178 break;
7179 #endif
7180 #ifdef TARGET_NR_stty
7181 case TARGET_NR_stty:
7182 goto unimplemented;
7183 #endif
7184 #ifdef TARGET_NR_gtty
7185 case TARGET_NR_gtty:
7186 goto unimplemented;
7187 #endif
7188 #ifdef TARGET_NR_access
7189 case TARGET_NR_access:
7190 if (!(p = lock_user_string(arg1)))
7191 goto efault;
7192 ret = get_errno(access(path(p), arg2));
7193 unlock_user(p, arg1, 0);
7194 break;
7195 #endif
7196 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7197 case TARGET_NR_faccessat:
7198 if (!(p = lock_user_string(arg2)))
7199 goto efault;
7200 ret = get_errno(faccessat(arg1, p, arg3, 0));
7201 unlock_user(p, arg2, 0);
7202 break;
7203 #endif
7204 #ifdef TARGET_NR_nice /* not on alpha */
7205 case TARGET_NR_nice:
7206 ret = get_errno(nice(arg1));
7207 break;
7208 #endif
7209 #ifdef TARGET_NR_ftime
7210 case TARGET_NR_ftime:
7211 goto unimplemented;
7212 #endif
7213 case TARGET_NR_sync:
7214 sync();
7215 ret = 0;
7216 break;
7217 case TARGET_NR_kill:
7218 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7219 break;
7220 #ifdef TARGET_NR_rename
7221 case TARGET_NR_rename:
7222 {
7223 void *p2;
7224 p = lock_user_string(arg1);
7225 p2 = lock_user_string(arg2);
7226 if (!p || !p2)
7227 ret = -TARGET_EFAULT;
7228 else
7229 ret = get_errno(rename(p, p2));
7230 unlock_user(p2, arg2, 0);
7231 unlock_user(p, arg1, 0);
7232 }
7233 break;
7234 #endif
7235 #if defined(TARGET_NR_renameat)
7236 case TARGET_NR_renameat:
7237 {
7238 void *p2;
7239 p = lock_user_string(arg2);
7240 p2 = lock_user_string(arg4);
7241 if (!p || !p2)
7242 ret = -TARGET_EFAULT;
7243 else
7244 ret = get_errno(renameat(arg1, p, arg3, p2));
7245 unlock_user(p2, arg4, 0);
7246 unlock_user(p, arg2, 0);
7247 }
7248 break;
7249 #endif
7250 #ifdef TARGET_NR_mkdir
7251 case TARGET_NR_mkdir:
7252 if (!(p = lock_user_string(arg1)))
7253 goto efault;
7254 ret = get_errno(mkdir(p, arg2));
7255 unlock_user(p, arg1, 0);
7256 break;
7257 #endif
7258 #if defined(TARGET_NR_mkdirat)
7259 case TARGET_NR_mkdirat:
7260 if (!(p = lock_user_string(arg2)))
7261 goto efault;
7262 ret = get_errno(mkdirat(arg1, p, arg3));
7263 unlock_user(p, arg2, 0);
7264 break;
7265 #endif
7266 #ifdef TARGET_NR_rmdir
7267 case TARGET_NR_rmdir:
7268 if (!(p = lock_user_string(arg1)))
7269 goto efault;
7270 ret = get_errno(rmdir(p));
7271 unlock_user(p, arg1, 0);
7272 break;
7273 #endif
7274 case TARGET_NR_dup:
7275 ret = get_errno(dup(arg1));
7276 if (ret >= 0) {
7277 fd_trans_dup(arg1, ret);
7278 }
7279 break;
7280 #ifdef TARGET_NR_pipe
7281 case TARGET_NR_pipe:
7282 ret = do_pipe(cpu_env, arg1, 0, 0);
7283 break;
7284 #endif
7285 #ifdef TARGET_NR_pipe2
7286 case TARGET_NR_pipe2:
7287 ret = do_pipe(cpu_env, arg1,
7288 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7289 break;
7290 #endif
7291 case TARGET_NR_times:
7292 {
7293 struct target_tms *tmsp;
7294 struct tms tms;
7295 ret = get_errno(times(&tms));
7296 if (arg1) {
7297 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7298 if (!tmsp)
7299 goto efault;
7300 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7301 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7302 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7303 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7304 }
7305 if (!is_error(ret))
7306 ret = host_to_target_clock_t(ret);
7307 }
7308 break;
7309 #ifdef TARGET_NR_prof
7310 case TARGET_NR_prof:
7311 goto unimplemented;
7312 #endif
7313 #ifdef TARGET_NR_signal
7314 case TARGET_NR_signal:
7315 goto unimplemented;
7316 #endif
7317 case TARGET_NR_acct:
7318 if (arg1 == 0) {
7319 ret = get_errno(acct(NULL));
7320 } else {
7321 if (!(p = lock_user_string(arg1)))
7322 goto efault;
7323 ret = get_errno(acct(path(p)));
7324 unlock_user(p, arg1, 0);
7325 }
7326 break;
7327 #ifdef TARGET_NR_umount2
7328 case TARGET_NR_umount2:
7329 if (!(p = lock_user_string(arg1)))
7330 goto efault;
7331 ret = get_errno(umount2(p, arg2));
7332 unlock_user(p, arg1, 0);
7333 break;
7334 #endif
7335 #ifdef TARGET_NR_lock
7336 case TARGET_NR_lock:
7337 goto unimplemented;
7338 #endif
7339 case TARGET_NR_ioctl:
7340 ret = do_ioctl(arg1, arg2, arg3);
7341 break;
7342 case TARGET_NR_fcntl:
7343 ret = do_fcntl(arg1, arg2, arg3);
7344 break;
7345 #ifdef TARGET_NR_mpx
7346 case TARGET_NR_mpx:
7347 goto unimplemented;
7348 #endif
7349 case TARGET_NR_setpgid:
7350 ret = get_errno(setpgid(arg1, arg2));
7351 break;
7352 #ifdef TARGET_NR_ulimit
7353 case TARGET_NR_ulimit:
7354 goto unimplemented;
7355 #endif
7356 #ifdef TARGET_NR_oldolduname
7357 case TARGET_NR_oldolduname:
7358 goto unimplemented;
7359 #endif
7360 case TARGET_NR_umask:
7361 ret = get_errno(umask(arg1));
7362 break;
7363 case TARGET_NR_chroot:
7364 if (!(p = lock_user_string(arg1)))
7365 goto efault;
7366 ret = get_errno(chroot(p));
7367 unlock_user(p, arg1, 0);
7368 break;
7369 #ifdef TARGET_NR_ustat
7370 case TARGET_NR_ustat:
7371 goto unimplemented;
7372 #endif
7373 #ifdef TARGET_NR_dup2
7374 case TARGET_NR_dup2:
7375 ret = get_errno(dup2(arg1, arg2));
7376 if (ret >= 0) {
7377 fd_trans_dup(arg1, arg2);
7378 }
7379 break;
7380 #endif
7381 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7382 case TARGET_NR_dup3:
7383 ret = get_errno(dup3(arg1, arg2, arg3));
7384 if (ret >= 0) {
7385 fd_trans_dup(arg1, arg2);
7386 }
7387 break;
7388 #endif
7389 #ifdef TARGET_NR_getppid /* not on alpha */
7390 case TARGET_NR_getppid:
7391 ret = get_errno(getppid());
7392 break;
7393 #endif
7394 #ifdef TARGET_NR_getpgrp
7395 case TARGET_NR_getpgrp:
7396 ret = get_errno(getpgrp());
7397 break;
7398 #endif
7399 case TARGET_NR_setsid:
7400 ret = get_errno(setsid());
7401 break;
7402 #ifdef TARGET_NR_sigaction
7403 case TARGET_NR_sigaction:
7404 {
7405 #if defined(TARGET_ALPHA)
7406 struct target_sigaction act, oact, *pact = 0;
7407 struct target_old_sigaction *old_act;
7408 if (arg2) {
7409 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7410 goto efault;
7411 act._sa_handler = old_act->_sa_handler;
7412 target_siginitset(&act.sa_mask, old_act->sa_mask);
7413 act.sa_flags = old_act->sa_flags;
7414 act.sa_restorer = 0;
7415 unlock_user_struct(old_act, arg2, 0);
7416 pact = &act;
7417 }
7418 ret = get_errno(do_sigaction(arg1, pact, &oact));
7419 if (!is_error(ret) && arg3) {
7420 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7421 goto efault;
7422 old_act->_sa_handler = oact._sa_handler;
7423 old_act->sa_mask = oact.sa_mask.sig[0];
7424 old_act->sa_flags = oact.sa_flags;
7425 unlock_user_struct(old_act, arg3, 1);
7426 }
7427 #elif defined(TARGET_MIPS)
7428 struct target_sigaction act, oact, *pact, *old_act;
7429
7430 if (arg2) {
7431 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7432 goto efault;
7433 act._sa_handler = old_act->_sa_handler;
7434 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7435 act.sa_flags = old_act->sa_flags;
7436 unlock_user_struct(old_act, arg2, 0);
7437 pact = &act;
7438 } else {
7439 pact = NULL;
7440 }
7441
7442 ret = get_errno(do_sigaction(arg1, pact, &oact));
7443
7444 if (!is_error(ret) && arg3) {
7445 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7446 goto efault;
7447 old_act->_sa_handler = oact._sa_handler;
7448 old_act->sa_flags = oact.sa_flags;
7449 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7450 old_act->sa_mask.sig[1] = 0;
7451 old_act->sa_mask.sig[2] = 0;
7452 old_act->sa_mask.sig[3] = 0;
7453 unlock_user_struct(old_act, arg3, 1);
7454 }
7455 #else
7456 struct target_old_sigaction *old_act;
7457 struct target_sigaction act, oact, *pact;
7458 if (arg2) {
7459 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7460 goto efault;
7461 act._sa_handler = old_act->_sa_handler;
7462 target_siginitset(&act.sa_mask, old_act->sa_mask);
7463 act.sa_flags = old_act->sa_flags;
7464 act.sa_restorer = old_act->sa_restorer;
7465 unlock_user_struct(old_act, arg2, 0);
7466 pact = &act;
7467 } else {
7468 pact = NULL;
7469 }
7470 ret = get_errno(do_sigaction(arg1, pact, &oact));
7471 if (!is_error(ret) && arg3) {
7472 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7473 goto efault;
7474 old_act->_sa_handler = oact._sa_handler;
7475 old_act->sa_mask = oact.sa_mask.sig[0];
7476 old_act->sa_flags = oact.sa_flags;
7477 old_act->sa_restorer = oact.sa_restorer;
7478 unlock_user_struct(old_act, arg3, 1);
7479 }
7480 #endif
7481 }
7482 break;
7483 #endif
7484 case TARGET_NR_rt_sigaction:
7485 {
7486 #if defined(TARGET_ALPHA)
7487 struct target_sigaction act, oact, *pact = 0;
7488 struct target_rt_sigaction *rt_act;
7489 /* ??? arg4 == sizeof(sigset_t). */
7490 if (arg2) {
7491 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7492 goto efault;
7493 act._sa_handler = rt_act->_sa_handler;
7494 act.sa_mask = rt_act->sa_mask;
7495 act.sa_flags = rt_act->sa_flags;
7496 act.sa_restorer = arg5;
7497 unlock_user_struct(rt_act, arg2, 0);
7498 pact = &act;
7499 }
7500 ret = get_errno(do_sigaction(arg1, pact, &oact));
7501 if (!is_error(ret) && arg3) {
7502 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7503 goto efault;
7504 rt_act->_sa_handler = oact._sa_handler;
7505 rt_act->sa_mask = oact.sa_mask;
7506 rt_act->sa_flags = oact.sa_flags;
7507 unlock_user_struct(rt_act, arg3, 1);
7508 }
7509 #else
7510 struct target_sigaction *act;
7511 struct target_sigaction *oact;
7512
7513 if (arg2) {
7514 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
7515 goto efault;
7516 } else
7517 act = NULL;
7518 if (arg3) {
7519 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7520 ret = -TARGET_EFAULT;
7521 goto rt_sigaction_fail;
7522 }
7523 } else
7524 oact = NULL;
7525 ret = get_errno(do_sigaction(arg1, act, oact));
7526 rt_sigaction_fail:
7527 if (act)
7528 unlock_user_struct(act, arg2, 0);
7529 if (oact)
7530 unlock_user_struct(oact, arg3, 1);
7531 #endif
7532 }
7533 break;
7534 #ifdef TARGET_NR_sgetmask /* not on alpha */
7535 case TARGET_NR_sgetmask:
7536 {
7537 sigset_t cur_set;
7538 abi_ulong target_set;
7539 ret = do_sigprocmask(0, NULL, &cur_set);
7540 if (!ret) {
7541 host_to_target_old_sigset(&target_set, &cur_set);
7542 ret = target_set;
7543 }
7544 }
7545 break;
7546 #endif
7547 #ifdef TARGET_NR_ssetmask /* not on alpha */
7548 case TARGET_NR_ssetmask:
7549 {
7550 sigset_t set, oset, cur_set;
7551 abi_ulong target_set = arg1;
7552 /* We only have one word of the new mask so we must read
7553 * the rest of it with do_sigprocmask() and OR in this word.
7554 * We are guaranteed that a do_sigprocmask() that only queries
7555 * the signal mask will not fail.
7556 */
7557 ret = do_sigprocmask(0, NULL, &cur_set);
7558 assert(!ret);
7559 target_to_host_old_sigset(&set, &target_set);
7560 sigorset(&set, &set, &cur_set);
7561 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7562 if (!ret) {
7563 host_to_target_old_sigset(&target_set, &oset);
7564 ret = target_set;
7565 }
7566 }
7567 break;
7568 #endif
7569 #ifdef TARGET_NR_sigprocmask
7570 case TARGET_NR_sigprocmask:
7571 {
7572 #if defined(TARGET_ALPHA)
7573 sigset_t set, oldset;
7574 abi_ulong mask;
7575 int how;
7576
7577 switch (arg1) {
7578 case TARGET_SIG_BLOCK:
7579 how = SIG_BLOCK;
7580 break;
7581 case TARGET_SIG_UNBLOCK:
7582 how = SIG_UNBLOCK;
7583 break;
7584 case TARGET_SIG_SETMASK:
7585 how = SIG_SETMASK;
7586 break;
7587 default:
7588 ret = -TARGET_EINVAL;
7589 goto fail;
7590 }
7591 mask = arg2;
7592 target_to_host_old_sigset(&set, &mask);
7593
7594 ret = do_sigprocmask(how, &set, &oldset);
7595 if (!is_error(ret)) {
7596 host_to_target_old_sigset(&mask, &oldset);
7597 ret = mask;
7598 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7599 }
7600 #else
7601 sigset_t set, oldset, *set_ptr;
7602 int how;
7603
7604 if (arg2) {
7605 switch (arg1) {
7606 case TARGET_SIG_BLOCK:
7607 how = SIG_BLOCK;
7608 break;
7609 case TARGET_SIG_UNBLOCK:
7610 how = SIG_UNBLOCK;
7611 break;
7612 case TARGET_SIG_SETMASK:
7613 how = SIG_SETMASK;
7614 break;
7615 default:
7616 ret = -TARGET_EINVAL;
7617 goto fail;
7618 }
7619 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7620 goto efault;
7621 target_to_host_old_sigset(&set, p);
7622 unlock_user(p, arg2, 0);
7623 set_ptr = &set;
7624 } else {
7625 how = 0;
7626 set_ptr = NULL;
7627 }
7628 ret = do_sigprocmask(how, set_ptr, &oldset);
7629 if (!is_error(ret) && arg3) {
7630 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7631 goto efault;
7632 host_to_target_old_sigset(p, &oldset);
7633 unlock_user(p, arg3, sizeof(target_sigset_t));
7634 }
7635 #endif
7636 }
7637 break;
7638 #endif
7639 case TARGET_NR_rt_sigprocmask:
7640 {
7641 int how = arg1;
7642 sigset_t set, oldset, *set_ptr;
7643
7644 if (arg2) {
7645 switch(how) {
7646 case TARGET_SIG_BLOCK:
7647 how = SIG_BLOCK;
7648 break;
7649 case TARGET_SIG_UNBLOCK:
7650 how = SIG_UNBLOCK;
7651 break;
7652 case TARGET_SIG_SETMASK:
7653 how = SIG_SETMASK;
7654 break;
7655 default:
7656 ret = -TARGET_EINVAL;
7657 goto fail;
7658 }
7659 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7660 goto efault;
7661 target_to_host_sigset(&set, p);
7662 unlock_user(p, arg2, 0);
7663 set_ptr = &set;
7664 } else {
7665 how = 0;
7666 set_ptr = NULL;
7667 }
7668 ret = do_sigprocmask(how, set_ptr, &oldset);
7669 if (!is_error(ret) && arg3) {
7670 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7671 goto efault;
7672 host_to_target_sigset(p, &oldset);
7673 unlock_user(p, arg3, sizeof(target_sigset_t));
7674 }
7675 }
7676 break;
7677 #ifdef TARGET_NR_sigpending
7678 case TARGET_NR_sigpending:
7679 {
7680 sigset_t set;
7681 ret = get_errno(sigpending(&set));
7682 if (!is_error(ret)) {
7683 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7684 goto efault;
7685 host_to_target_old_sigset(p, &set);
7686 unlock_user(p, arg1, sizeof(target_sigset_t));
7687 }
7688 }
7689 break;
7690 #endif
7691 case TARGET_NR_rt_sigpending:
7692 {
7693 sigset_t set;
7694 ret = get_errno(sigpending(&set));
7695 if (!is_error(ret)) {
7696 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7697 goto efault;
7698 host_to_target_sigset(p, &set);
7699 unlock_user(p, arg1, sizeof(target_sigset_t));
7700 }
7701 }
7702 break;
7703 #ifdef TARGET_NR_sigsuspend
7704 case TARGET_NR_sigsuspend:
7705 {
7706 TaskState *ts = cpu->opaque;
7707 #if defined(TARGET_ALPHA)
7708 abi_ulong mask = arg1;
7709 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
7710 #else
7711 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7712 goto efault;
7713 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
7714 unlock_user(p, arg1, 0);
7715 #endif
7716 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7717 SIGSET_T_SIZE));
7718 if (ret != -TARGET_ERESTARTSYS) {
7719 ts->in_sigsuspend = 1;
7720 }
7721 }
7722 break;
7723 #endif
7724 case TARGET_NR_rt_sigsuspend:
7725 {
7726 TaskState *ts = cpu->opaque;
7727 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7728 goto efault;
7729 target_to_host_sigset(&ts->sigsuspend_mask, p);
7730 unlock_user(p, arg1, 0);
7731 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7732 SIGSET_T_SIZE));
7733 if (ret != -TARGET_ERESTARTSYS) {
7734 ts->in_sigsuspend = 1;
7735 }
7736 }
7737 break;
7738 case TARGET_NR_rt_sigtimedwait:
7739 {
7740 sigset_t set;
7741 struct timespec uts, *puts;
7742 siginfo_t uinfo;
7743
7744 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7745 goto efault;
7746 target_to_host_sigset(&set, p);
7747 unlock_user(p, arg1, 0);
7748 if (arg3) {
7749 puts = &uts;
7750 target_to_host_timespec(puts, arg3);
7751 } else {
7752 puts = NULL;
7753 }
7754 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
7755 if (!is_error(ret)) {
7756 if (arg2) {
7757 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
7758 0);
7759 if (!p) {
7760 goto efault;
7761 }
7762 host_to_target_siginfo(p, &uinfo);
7763 unlock_user(p, arg2, sizeof(target_siginfo_t));
7764 }
7765 ret = host_to_target_signal(ret);
7766 }
7767 }
7768 break;
7769 case TARGET_NR_rt_sigqueueinfo:
7770 {
7771 siginfo_t uinfo;
7772 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
7773 goto efault;
7774 target_to_host_siginfo(&uinfo, p);
7775 unlock_user(p, arg1, 0);
7776 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
7777 }
7778 break;
7779 #ifdef TARGET_NR_sigreturn
7780 case TARGET_NR_sigreturn:
7781 if (block_signals()) {
7782 ret = -TARGET_ERESTARTSYS;
7783 } else {
7784 ret = do_sigreturn(cpu_env);
7785 }
7786 break;
7787 #endif
7788 case TARGET_NR_rt_sigreturn:
7789 if (block_signals()) {
7790 ret = -TARGET_ERESTARTSYS;
7791 } else {
7792 ret = do_rt_sigreturn(cpu_env);
7793 }
7794 break;
7795 case TARGET_NR_sethostname:
7796 if (!(p = lock_user_string(arg1)))
7797 goto efault;
7798 ret = get_errno(sethostname(p, arg2));
7799 unlock_user(p, arg1, 0);
7800 break;
7801 case TARGET_NR_setrlimit:
7802 {
7803 int resource = target_to_host_resource(arg1);
7804 struct target_rlimit *target_rlim;
7805 struct rlimit rlim;
7806 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
7807 goto efault;
7808 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
7809 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
7810 unlock_user_struct(target_rlim, arg2, 0);
7811 ret = get_errno(setrlimit(resource, &rlim));
7812 }
7813 break;
7814 case TARGET_NR_getrlimit:
7815 {
7816 int resource = target_to_host_resource(arg1);
7817 struct target_rlimit *target_rlim;
7818 struct rlimit rlim;
7819
7820 ret = get_errno(getrlimit(resource, &rlim));
7821 if (!is_error(ret)) {
7822 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7823 goto efault;
7824 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7825 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7826 unlock_user_struct(target_rlim, arg2, 1);
7827 }
7828 }
7829 break;
7830 case TARGET_NR_getrusage:
7831 {
7832 struct rusage rusage;
7833 ret = get_errno(getrusage(arg1, &rusage));
7834 if (!is_error(ret)) {
7835 ret = host_to_target_rusage(arg2, &rusage);
7836 }
7837 }
7838 break;
7839 case TARGET_NR_gettimeofday:
7840 {
7841 struct timeval tv;
7842 ret = get_errno(gettimeofday(&tv, NULL));
7843 if (!is_error(ret)) {
7844 if (copy_to_user_timeval(arg1, &tv))
7845 goto efault;
7846 }
7847 }
7848 break;
7849 case TARGET_NR_settimeofday:
7850 {
7851 struct timeval tv, *ptv = NULL;
7852 struct timezone tz, *ptz = NULL;
7853
7854 if (arg1) {
7855 if (copy_from_user_timeval(&tv, arg1)) {
7856 goto efault;
7857 }
7858 ptv = &tv;
7859 }
7860
7861 if (arg2) {
7862 if (copy_from_user_timezone(&tz, arg2)) {
7863 goto efault;
7864 }
7865 ptz = &tz;
7866 }
7867
7868 ret = get_errno(settimeofday(ptv, ptz));
7869 }
7870 break;
7871 #if defined(TARGET_NR_select)
7872 case TARGET_NR_select:
7873 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7874 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7875 #else
7876 {
7877 struct target_sel_arg_struct *sel;
7878 abi_ulong inp, outp, exp, tvp;
7879 long nsel;
7880
7881 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
7882 goto efault;
7883 nsel = tswapal(sel->n);
7884 inp = tswapal(sel->inp);
7885 outp = tswapal(sel->outp);
7886 exp = tswapal(sel->exp);
7887 tvp = tswapal(sel->tvp);
7888 unlock_user_struct(sel, arg1, 0);
7889 ret = do_select(nsel, inp, outp, exp, tvp);
7890 }
7891 #endif
7892 break;
7893 #endif
7894 #ifdef TARGET_NR_pselect6
7895 case TARGET_NR_pselect6:
7896 {
7897 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
7898 fd_set rfds, wfds, efds;
7899 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
7900 struct timespec ts, *ts_ptr;
7901
7902 /*
7903 * The 6th arg is actually two args smashed together,
7904 * so we cannot use the C library.
7905 */
7906 sigset_t set;
7907 struct {
7908 sigset_t *set;
7909 size_t size;
7910 } sig, *sig_ptr;
7911
7912 abi_ulong arg_sigset, arg_sigsize, *arg7;
7913 target_sigset_t *target_sigset;
7914
7915 n = arg1;
7916 rfd_addr = arg2;
7917 wfd_addr = arg3;
7918 efd_addr = arg4;
7919 ts_addr = arg5;
7920
7921 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
7922 if (ret) {
7923 goto fail;
7924 }
7925 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
7926 if (ret) {
7927 goto fail;
7928 }
7929 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
7930 if (ret) {
7931 goto fail;
7932 }
7933
7934 /*
7935 * This takes a timespec, and not a timeval, so we cannot
7936 * use the do_select() helper ...
7937 */
7938 if (ts_addr) {
7939 if (target_to_host_timespec(&ts, ts_addr)) {
7940 goto efault;
7941 }
7942 ts_ptr = &ts;
7943 } else {
7944 ts_ptr = NULL;
7945 }
7946
7947 /* Extract the two packed args for the sigset */
7948 if (arg6) {
7949 sig_ptr = &sig;
7950 sig.size = SIGSET_T_SIZE;
7951
7952 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
7953 if (!arg7) {
7954 goto efault;
7955 }
7956 arg_sigset = tswapal(arg7[0]);
7957 arg_sigsize = tswapal(arg7[1]);
7958 unlock_user(arg7, arg6, 0);
7959
7960 if (arg_sigset) {
7961 sig.set = &set;
7962 if (arg_sigsize != sizeof(*target_sigset)) {
7963 /* Like the kernel, we enforce correct size sigsets */
7964 ret = -TARGET_EINVAL;
7965 goto fail;
7966 }
7967 target_sigset = lock_user(VERIFY_READ, arg_sigset,
7968 sizeof(*target_sigset), 1);
7969 if (!target_sigset) {
7970 goto efault;
7971 }
7972 target_to_host_sigset(&set, target_sigset);
7973 unlock_user(target_sigset, arg_sigset, 0);
7974 } else {
7975 sig.set = NULL;
7976 }
7977 } else {
7978 sig_ptr = NULL;
7979 }
7980
7981 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
7982 ts_ptr, sig_ptr));
7983
7984 if (!is_error(ret)) {
7985 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
7986 goto efault;
7987 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
7988 goto efault;
7989 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
7990 goto efault;
7991
7992 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
7993 goto efault;
7994 }
7995 }
7996 break;
7997 #endif
7998 #ifdef TARGET_NR_symlink
7999 case TARGET_NR_symlink:
8000 {
8001 void *p2;
8002 p = lock_user_string(arg1);
8003 p2 = lock_user_string(arg2);
8004 if (!p || !p2)
8005 ret = -TARGET_EFAULT;
8006 else
8007 ret = get_errno(symlink(p, p2));
8008 unlock_user(p2, arg2, 0);
8009 unlock_user(p, arg1, 0);
8010 }
8011 break;
8012 #endif
8013 #if defined(TARGET_NR_symlinkat)
8014 case TARGET_NR_symlinkat:
8015 {
8016 void *p2;
8017 p = lock_user_string(arg1);
8018 p2 = lock_user_string(arg3);
8019 if (!p || !p2)
8020 ret = -TARGET_EFAULT;
8021 else
8022 ret = get_errno(symlinkat(p, arg2, p2));
8023 unlock_user(p2, arg3, 0);
8024 unlock_user(p, arg1, 0);
8025 }
8026 break;
8027 #endif
8028 #ifdef TARGET_NR_oldlstat
8029 case TARGET_NR_oldlstat:
8030 goto unimplemented;
8031 #endif
8032 #ifdef TARGET_NR_readlink
8033 case TARGET_NR_readlink:
8034 {
8035 void *p2;
8036 p = lock_user_string(arg1);
8037 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8038 if (!p || !p2) {
8039 ret = -TARGET_EFAULT;
8040 } else if (!arg3) {
8041 /* Short circuit this for the magic exe check. */
8042 ret = -TARGET_EINVAL;
8043 } else if (is_proc_myself((const char *)p, "exe")) {
8044 char real[PATH_MAX], *temp;
8045 temp = realpath(exec_path, real);
8046 /* Return value is # of bytes that we wrote to the buffer. */
8047 if (temp == NULL) {
8048 ret = get_errno(-1);
8049 } else {
8050 /* Don't worry about sign mismatch as earlier mapping
8051 * logic would have thrown a bad address error. */
8052 ret = MIN(strlen(real), arg3);
8053 /* We cannot NUL terminate the string. */
8054 memcpy(p2, real, ret);
8055 }
8056 } else {
8057 ret = get_errno(readlink(path(p), p2, arg3));
8058 }
8059 unlock_user(p2, arg2, ret);
8060 unlock_user(p, arg1, 0);
8061 }
8062 break;
8063 #endif
8064 #if defined(TARGET_NR_readlinkat)
8065 case TARGET_NR_readlinkat:
8066 {
8067 void *p2;
8068 p = lock_user_string(arg2);
8069 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8070 if (!p || !p2) {
8071 ret = -TARGET_EFAULT;
8072 } else if (is_proc_myself((const char *)p, "exe")) {
8073 char real[PATH_MAX], *temp;
8074 temp = realpath(exec_path, real);
8075 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8076 snprintf((char *)p2, arg4, "%s", real);
8077 } else {
8078 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8079 }
8080 unlock_user(p2, arg3, ret);
8081 unlock_user(p, arg2, 0);
8082 }
8083 break;
8084 #endif
8085 #ifdef TARGET_NR_uselib
8086 case TARGET_NR_uselib:
8087 goto unimplemented;
8088 #endif
8089 #ifdef TARGET_NR_swapon
8090 case TARGET_NR_swapon:
8091 if (!(p = lock_user_string(arg1)))
8092 goto efault;
8093 ret = get_errno(swapon(p, arg2));
8094 unlock_user(p, arg1, 0);
8095 break;
8096 #endif
8097 case TARGET_NR_reboot:
8098 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8099 /* arg4 must be ignored in all other cases */
8100 p = lock_user_string(arg4);
8101 if (!p) {
8102 goto efault;
8103 }
8104 ret = get_errno(reboot(arg1, arg2, arg3, p));
8105 unlock_user(p, arg4, 0);
8106 } else {
8107 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8108 }
8109 break;
8110 #ifdef TARGET_NR_readdir
8111 case TARGET_NR_readdir:
8112 goto unimplemented;
8113 #endif
8114 #ifdef TARGET_NR_mmap
8115 case TARGET_NR_mmap:
8116 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8117 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8118 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8119 || defined(TARGET_S390X)
8120 {
8121 abi_ulong *v;
8122 abi_ulong v1, v2, v3, v4, v5, v6;
8123 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8124 goto efault;
8125 v1 = tswapal(v[0]);
8126 v2 = tswapal(v[1]);
8127 v3 = tswapal(v[2]);
8128 v4 = tswapal(v[3]);
8129 v5 = tswapal(v[4]);
8130 v6 = tswapal(v[5]);
8131 unlock_user(v, arg1, 0);
8132 ret = get_errno(target_mmap(v1, v2, v3,
8133 target_to_host_bitmask(v4, mmap_flags_tbl),
8134 v5, v6));
8135 }
8136 #else
8137 ret = get_errno(target_mmap(arg1, arg2, arg3,
8138 target_to_host_bitmask(arg4, mmap_flags_tbl),
8139 arg5,
8140 arg6));
8141 #endif
8142 break;
8143 #endif
8144 #ifdef TARGET_NR_mmap2
8145 case TARGET_NR_mmap2:
8146 #ifndef MMAP_SHIFT
8147 #define MMAP_SHIFT 12
8148 #endif
8149 ret = get_errno(target_mmap(arg1, arg2, arg3,
8150 target_to_host_bitmask(arg4, mmap_flags_tbl),
8151 arg5,
8152 arg6 << MMAP_SHIFT));
8153 break;
8154 #endif
8155 case TARGET_NR_munmap:
8156 ret = get_errno(target_munmap(arg1, arg2));
8157 break;
8158 case TARGET_NR_mprotect:
8159 {
8160 TaskState *ts = cpu->opaque;
8161 /* Special hack to detect libc making the stack executable. */
8162 if ((arg3 & PROT_GROWSDOWN)
8163 && arg1 >= ts->info->stack_limit
8164 && arg1 <= ts->info->start_stack) {
8165 arg3 &= ~PROT_GROWSDOWN;
8166 arg2 = arg2 + arg1 - ts->info->stack_limit;
8167 arg1 = ts->info->stack_limit;
8168 }
8169 }
8170 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8171 break;
8172 #ifdef TARGET_NR_mremap
8173 case TARGET_NR_mremap:
8174 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8175 break;
8176 #endif
8177 /* ??? msync/mlock/munlock are broken for softmmu. */
8178 #ifdef TARGET_NR_msync
8179 case TARGET_NR_msync:
8180 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8181 break;
8182 #endif
8183 #ifdef TARGET_NR_mlock
8184 case TARGET_NR_mlock:
8185 ret = get_errno(mlock(g2h(arg1), arg2));
8186 break;
8187 #endif
8188 #ifdef TARGET_NR_munlock
8189 case TARGET_NR_munlock:
8190 ret = get_errno(munlock(g2h(arg1), arg2));
8191 break;
8192 #endif
8193 #ifdef TARGET_NR_mlockall
8194 case TARGET_NR_mlockall:
8195 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8196 break;
8197 #endif
8198 #ifdef TARGET_NR_munlockall
8199 case TARGET_NR_munlockall:
8200 ret = get_errno(munlockall());
8201 break;
8202 #endif
8203 case TARGET_NR_truncate:
8204 if (!(p = lock_user_string(arg1)))
8205 goto efault;
8206 ret = get_errno(truncate(p, arg2));
8207 unlock_user(p, arg1, 0);
8208 break;
8209 case TARGET_NR_ftruncate:
8210 ret = get_errno(ftruncate(arg1, arg2));
8211 break;
8212 case TARGET_NR_fchmod:
8213 ret = get_errno(fchmod(arg1, arg2));
8214 break;
8215 #if defined(TARGET_NR_fchmodat)
8216 case TARGET_NR_fchmodat:
8217 if (!(p = lock_user_string(arg2)))
8218 goto efault;
8219 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8220 unlock_user(p, arg2, 0);
8221 break;
8222 #endif
8223 case TARGET_NR_getpriority:
8224 /* Note that negative values are valid for getpriority, so we must
8225 differentiate based on errno settings. */
8226 errno = 0;
8227 ret = getpriority(arg1, arg2);
8228 if (ret == -1 && errno != 0) {
8229 ret = -host_to_target_errno(errno);
8230 break;
8231 }
8232 #ifdef TARGET_ALPHA
8233 /* Return value is the unbiased priority. Signal no error. */
8234 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8235 #else
8236 /* Return value is a biased priority to avoid negative numbers. */
8237 ret = 20 - ret;
8238 #endif
8239 break;
8240 case TARGET_NR_setpriority:
8241 ret = get_errno(setpriority(arg1, arg2, arg3));
8242 break;
8243 #ifdef TARGET_NR_profil
8244 case TARGET_NR_profil:
8245 goto unimplemented;
8246 #endif
8247 case TARGET_NR_statfs:
8248 if (!(p = lock_user_string(arg1)))
8249 goto efault;
8250 ret = get_errno(statfs(path(p), &stfs));
8251 unlock_user(p, arg1, 0);
8252 convert_statfs:
8253 if (!is_error(ret)) {
8254 struct target_statfs *target_stfs;
8255
8256 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8257 goto efault;
8258 __put_user(stfs.f_type, &target_stfs->f_type);
8259 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8260 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8261 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8262 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8263 __put_user(stfs.f_files, &target_stfs->f_files);
8264 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8265 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8266 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8267 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8268 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8269 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8270 unlock_user_struct(target_stfs, arg2, 1);
8271 }
8272 break;
8273 case TARGET_NR_fstatfs:
8274 ret = get_errno(fstatfs(arg1, &stfs));
8275 goto convert_statfs;
8276 #ifdef TARGET_NR_statfs64
8277 case TARGET_NR_statfs64:
8278 if (!(p = lock_user_string(arg1)))
8279 goto efault;
8280 ret = get_errno(statfs(path(p), &stfs));
8281 unlock_user(p, arg1, 0);
8282 convert_statfs64:
8283 if (!is_error(ret)) {
8284 struct target_statfs64 *target_stfs;
8285
8286 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8287 goto efault;
8288 __put_user(stfs.f_type, &target_stfs->f_type);
8289 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8290 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8291 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8292 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8293 __put_user(stfs.f_files, &target_stfs->f_files);
8294 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8295 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8296 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8297 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8298 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8299 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8300 unlock_user_struct(target_stfs, arg3, 1);
8301 }
8302 break;
8303 case TARGET_NR_fstatfs64:
8304 ret = get_errno(fstatfs(arg1, &stfs));
8305 goto convert_statfs64;
8306 #endif
8307 #ifdef TARGET_NR_ioperm
8308 case TARGET_NR_ioperm:
8309 goto unimplemented;
8310 #endif
8311 #ifdef TARGET_NR_socketcall
8312 case TARGET_NR_socketcall:
8313 ret = do_socketcall(arg1, arg2);
8314 break;
8315 #endif
8316 #ifdef TARGET_NR_accept
8317 case TARGET_NR_accept:
8318 ret = do_accept4(arg1, arg2, arg3, 0);
8319 break;
8320 #endif
8321 #ifdef TARGET_NR_accept4
8322 case TARGET_NR_accept4:
8323 #ifdef CONFIG_ACCEPT4
8324 ret = do_accept4(arg1, arg2, arg3, arg4);
8325 #else
8326 goto unimplemented;
8327 #endif
8328 break;
8329 #endif
8330 #ifdef TARGET_NR_bind
8331 case TARGET_NR_bind:
8332 ret = do_bind(arg1, arg2, arg3);
8333 break;
8334 #endif
8335 #ifdef TARGET_NR_connect
8336 case TARGET_NR_connect:
8337 ret = do_connect(arg1, arg2, arg3);
8338 break;
8339 #endif
8340 #ifdef TARGET_NR_getpeername
8341 case TARGET_NR_getpeername:
8342 ret = do_getpeername(arg1, arg2, arg3);
8343 break;
8344 #endif
8345 #ifdef TARGET_NR_getsockname
8346 case TARGET_NR_getsockname:
8347 ret = do_getsockname(arg1, arg2, arg3);
8348 break;
8349 #endif
8350 #ifdef TARGET_NR_getsockopt
8351 case TARGET_NR_getsockopt:
8352 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8353 break;
8354 #endif
8355 #ifdef TARGET_NR_listen
8356 case TARGET_NR_listen:
8357 ret = get_errno(listen(arg1, arg2));
8358 break;
8359 #endif
8360 #ifdef TARGET_NR_recv
8361 case TARGET_NR_recv:
8362 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8363 break;
8364 #endif
8365 #ifdef TARGET_NR_recvfrom
8366 case TARGET_NR_recvfrom:
8367 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8368 break;
8369 #endif
8370 #ifdef TARGET_NR_recvmsg
8371 case TARGET_NR_recvmsg:
8372 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
8373 break;
8374 #endif
8375 #ifdef TARGET_NR_send
8376 case TARGET_NR_send:
8377 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8378 break;
8379 #endif
8380 #ifdef TARGET_NR_sendmsg
8381 case TARGET_NR_sendmsg:
8382 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
8383 break;
8384 #endif
8385 #ifdef TARGET_NR_sendmmsg
8386 case TARGET_NR_sendmmsg:
8387 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8388 break;
8389 case TARGET_NR_recvmmsg:
8390 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8391 break;
8392 #endif
8393 #ifdef TARGET_NR_sendto
8394 case TARGET_NR_sendto:
8395 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8396 break;
8397 #endif
8398 #ifdef TARGET_NR_shutdown
8399 case TARGET_NR_shutdown:
8400 ret = get_errno(shutdown(arg1, arg2));
8401 break;
8402 #endif
8403 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8404 case TARGET_NR_getrandom:
8405 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8406 if (!p) {
8407 goto efault;
8408 }
8409 ret = get_errno(getrandom(p, arg2, arg3));
8410 unlock_user(p, arg1, ret);
8411 break;
8412 #endif
8413 #ifdef TARGET_NR_socket
8414 case TARGET_NR_socket:
8415 ret = do_socket(arg1, arg2, arg3);
8416 fd_trans_unregister(ret);
8417 break;
8418 #endif
8419 #ifdef TARGET_NR_socketpair
8420 case TARGET_NR_socketpair:
8421 ret = do_socketpair(arg1, arg2, arg3, arg4);
8422 break;
8423 #endif
8424 #ifdef TARGET_NR_setsockopt
8425 case TARGET_NR_setsockopt:
8426 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8427 break;
8428 #endif
8429
8430 case TARGET_NR_syslog:
8431 if (!(p = lock_user_string(arg2)))
8432 goto efault;
8433 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8434 unlock_user(p, arg2, 0);
8435 break;
8436
8437 case TARGET_NR_setitimer:
8438 {
8439 struct itimerval value, ovalue, *pvalue;
8440
8441 if (arg2) {
8442 pvalue = &value;
8443 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8444 || copy_from_user_timeval(&pvalue->it_value,
8445 arg2 + sizeof(struct target_timeval)))
8446 goto efault;
8447 } else {
8448 pvalue = NULL;
8449 }
8450 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8451 if (!is_error(ret) && arg3) {
8452 if (copy_to_user_timeval(arg3,
8453 &ovalue.it_interval)
8454 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8455 &ovalue.it_value))
8456 goto efault;
8457 }
8458 }
8459 break;
8460 case TARGET_NR_getitimer:
8461 {
8462 struct itimerval value;
8463
8464 ret = get_errno(getitimer(arg1, &value));
8465 if (!is_error(ret) && arg2) {
8466 if (copy_to_user_timeval(arg2,
8467 &value.it_interval)
8468 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8469 &value.it_value))
8470 goto efault;
8471 }
8472 }
8473 break;
8474 #ifdef TARGET_NR_stat
8475 case TARGET_NR_stat:
8476 if (!(p = lock_user_string(arg1)))
8477 goto efault;
8478 ret = get_errno(stat(path(p), &st));
8479 unlock_user(p, arg1, 0);
8480 goto do_stat;
8481 #endif
8482 #ifdef TARGET_NR_lstat
8483 case TARGET_NR_lstat:
8484 if (!(p = lock_user_string(arg1)))
8485 goto efault;
8486 ret = get_errno(lstat(path(p), &st));
8487 unlock_user(p, arg1, 0);
8488 goto do_stat;
8489 #endif
8490 case TARGET_NR_fstat:
8491 {
8492 ret = get_errno(fstat(arg1, &st));
8493 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8494 do_stat:
8495 #endif
8496 if (!is_error(ret)) {
8497 struct target_stat *target_st;
8498
8499 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8500 goto efault;
8501 memset(target_st, 0, sizeof(*target_st));
8502 __put_user(st.st_dev, &target_st->st_dev);
8503 __put_user(st.st_ino, &target_st->st_ino);
8504 __put_user(st.st_mode, &target_st->st_mode);
8505 __put_user(st.st_uid, &target_st->st_uid);
8506 __put_user(st.st_gid, &target_st->st_gid);
8507 __put_user(st.st_nlink, &target_st->st_nlink);
8508 __put_user(st.st_rdev, &target_st->st_rdev);
8509 __put_user(st.st_size, &target_st->st_size);
8510 __put_user(st.st_blksize, &target_st->st_blksize);
8511 __put_user(st.st_blocks, &target_st->st_blocks);
8512 __put_user(st.st_atime, &target_st->target_st_atime);
8513 __put_user(st.st_mtime, &target_st->target_st_mtime);
8514 __put_user(st.st_ctime, &target_st->target_st_ctime);
8515 unlock_user_struct(target_st, arg2, 1);
8516 }
8517 }
8518 break;
8519 #ifdef TARGET_NR_olduname
8520 case TARGET_NR_olduname:
8521 goto unimplemented;
8522 #endif
8523 #ifdef TARGET_NR_iopl
8524 case TARGET_NR_iopl:
8525 goto unimplemented;
8526 #endif
8527 case TARGET_NR_vhangup:
8528 ret = get_errno(vhangup());
8529 break;
8530 #ifdef TARGET_NR_idle
8531 case TARGET_NR_idle:
8532 goto unimplemented;
8533 #endif
8534 #ifdef TARGET_NR_syscall
8535 case TARGET_NR_syscall:
8536 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8537 arg6, arg7, arg8, 0);
8538 break;
8539 #endif
8540 case TARGET_NR_wait4:
8541 {
8542 int status;
8543 abi_long status_ptr = arg2;
8544 struct rusage rusage, *rusage_ptr;
8545 abi_ulong target_rusage = arg4;
8546 abi_long rusage_err;
8547 if (target_rusage)
8548 rusage_ptr = &rusage;
8549 else
8550 rusage_ptr = NULL;
8551 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8552 if (!is_error(ret)) {
8553 if (status_ptr && ret) {
8554 status = host_to_target_waitstatus(status);
8555 if (put_user_s32(status, status_ptr))
8556 goto efault;
8557 }
8558 if (target_rusage) {
8559 rusage_err = host_to_target_rusage(target_rusage, &rusage);
8560 if (rusage_err) {
8561 ret = rusage_err;
8562 }
8563 }
8564 }
8565 }
8566 break;
8567 #ifdef TARGET_NR_swapoff
8568 case TARGET_NR_swapoff:
8569 if (!(p = lock_user_string(arg1)))
8570 goto efault;
8571 ret = get_errno(swapoff(p));
8572 unlock_user(p, arg1, 0);
8573 break;
8574 #endif
8575 case TARGET_NR_sysinfo:
8576 {
8577 struct target_sysinfo *target_value;
8578 struct sysinfo value;
8579 ret = get_errno(sysinfo(&value));
8580 if (!is_error(ret) && arg1)
8581 {
8582 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8583 goto efault;
8584 __put_user(value.uptime, &target_value->uptime);
8585 __put_user(value.loads[0], &target_value->loads[0]);
8586 __put_user(value.loads[1], &target_value->loads[1]);
8587 __put_user(value.loads[2], &target_value->loads[2]);
8588 __put_user(value.totalram, &target_value->totalram);
8589 __put_user(value.freeram, &target_value->freeram);
8590 __put_user(value.sharedram, &target_value->sharedram);
8591 __put_user(value.bufferram, &target_value->bufferram);
8592 __put_user(value.totalswap, &target_value->totalswap);
8593 __put_user(value.freeswap, &target_value->freeswap);
8594 __put_user(value.procs, &target_value->procs);
8595 __put_user(value.totalhigh, &target_value->totalhigh);
8596 __put_user(value.freehigh, &target_value->freehigh);
8597 __put_user(value.mem_unit, &target_value->mem_unit);
8598 unlock_user_struct(target_value, arg1, 1);
8599 }
8600 }
8601 break;
8602 #ifdef TARGET_NR_ipc
8603 case TARGET_NR_ipc:
8604 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
8605 break;
8606 #endif
8607 #ifdef TARGET_NR_semget
8608 case TARGET_NR_semget:
8609 ret = get_errno(semget(arg1, arg2, arg3));
8610 break;
8611 #endif
8612 #ifdef TARGET_NR_semop
8613 case TARGET_NR_semop:
8614 ret = do_semop(arg1, arg2, arg3);
8615 break;
8616 #endif
8617 #ifdef TARGET_NR_semctl
8618 case TARGET_NR_semctl:
8619 ret = do_semctl(arg1, arg2, arg3, arg4);
8620 break;
8621 #endif
8622 #ifdef TARGET_NR_msgctl
8623 case TARGET_NR_msgctl:
8624 ret = do_msgctl(arg1, arg2, arg3);
8625 break;
8626 #endif
8627 #ifdef TARGET_NR_msgget
8628 case TARGET_NR_msgget:
8629 ret = get_errno(msgget(arg1, arg2));
8630 break;
8631 #endif
8632 #ifdef TARGET_NR_msgrcv
8633 case TARGET_NR_msgrcv:
8634 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8635 break;
8636 #endif
8637 #ifdef TARGET_NR_msgsnd
8638 case TARGET_NR_msgsnd:
8639 ret = do_msgsnd(arg1, arg2, arg3, arg4);
8640 break;
8641 #endif
8642 #ifdef TARGET_NR_shmget
8643 case TARGET_NR_shmget:
8644 ret = get_errno(shmget(arg1, arg2, arg3));
8645 break;
8646 #endif
8647 #ifdef TARGET_NR_shmctl
8648 case TARGET_NR_shmctl:
8649 ret = do_shmctl(arg1, arg2, arg3);
8650 break;
8651 #endif
8652 #ifdef TARGET_NR_shmat
8653 case TARGET_NR_shmat:
8654 ret = do_shmat(arg1, arg2, arg3);
8655 break;
8656 #endif
8657 #ifdef TARGET_NR_shmdt
8658 case TARGET_NR_shmdt:
8659 ret = do_shmdt(arg1);
8660 break;
8661 #endif
8662 case TARGET_NR_fsync:
8663 ret = get_errno(fsync(arg1));
8664 break;
8665 case TARGET_NR_clone:
8666 /* Linux manages to have three different orderings for its
8667 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8668 * match the kernel's CONFIG_CLONE_* settings.
8669 * Microblaze is further special in that it uses a sixth
8670 * implicit argument to clone for the TLS pointer.
8671 */
8672 #if defined(TARGET_MICROBLAZE)
8673 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8674 #elif defined(TARGET_CLONE_BACKWARDS)
8675 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8676 #elif defined(TARGET_CLONE_BACKWARDS2)
8677 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8678 #else
8679 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8680 #endif
8681 break;
8682 #ifdef __NR_exit_group
8683 /* new thread calls */
8684 case TARGET_NR_exit_group:
8685 #ifdef TARGET_GPROF
8686 _mcleanup();
8687 #endif
8688 gdb_exit(cpu_env, arg1);
8689 ret = get_errno(exit_group(arg1));
8690 break;
8691 #endif
8692 case TARGET_NR_setdomainname:
8693 if (!(p = lock_user_string(arg1)))
8694 goto efault;
8695 ret = get_errno(setdomainname(p, arg2));
8696 unlock_user(p, arg1, 0);
8697 break;
8698 case TARGET_NR_uname:
8699 /* no need to transcode because we use the linux syscall */
8700 {
8701 struct new_utsname * buf;
8702
8703 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8704 goto efault;
8705 ret = get_errno(sys_uname(buf));
8706 if (!is_error(ret)) {
8707 /* Overrite the native machine name with whatever is being
8708 emulated. */
8709 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
8710 /* Allow the user to override the reported release. */
8711 if (qemu_uname_release && *qemu_uname_release)
8712 strcpy (buf->release, qemu_uname_release);
8713 }
8714 unlock_user_struct(buf, arg1, 1);
8715 }
8716 break;
8717 #ifdef TARGET_I386
8718 case TARGET_NR_modify_ldt:
8719 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
8720 break;
8721 #if !defined(TARGET_X86_64)
8722 case TARGET_NR_vm86old:
8723 goto unimplemented;
8724 case TARGET_NR_vm86:
8725 ret = do_vm86(cpu_env, arg1, arg2);
8726 break;
8727 #endif
8728 #endif
8729 case TARGET_NR_adjtimex:
8730 goto unimplemented;
8731 #ifdef TARGET_NR_create_module
8732 case TARGET_NR_create_module:
8733 #endif
8734 case TARGET_NR_init_module:
8735 case TARGET_NR_delete_module:
8736 #ifdef TARGET_NR_get_kernel_syms
8737 case TARGET_NR_get_kernel_syms:
8738 #endif
8739 goto unimplemented;
8740 case TARGET_NR_quotactl:
8741 goto unimplemented;
8742 case TARGET_NR_getpgid:
8743 ret = get_errno(getpgid(arg1));
8744 break;
8745 case TARGET_NR_fchdir:
8746 ret = get_errno(fchdir(arg1));
8747 break;
8748 #ifdef TARGET_NR_bdflush /* not on x86_64 */
8749 case TARGET_NR_bdflush:
8750 goto unimplemented;
8751 #endif
8752 #ifdef TARGET_NR_sysfs
8753 case TARGET_NR_sysfs:
8754 goto unimplemented;
8755 #endif
8756 case TARGET_NR_personality:
8757 ret = get_errno(personality(arg1));
8758 break;
8759 #ifdef TARGET_NR_afs_syscall
8760 case TARGET_NR_afs_syscall:
8761 goto unimplemented;
8762 #endif
8763 #ifdef TARGET_NR__llseek /* Not on alpha */
8764 case TARGET_NR__llseek:
8765 {
8766 int64_t res;
8767 #if !defined(__NR_llseek)
8768 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
8769 if (res == -1) {
8770 ret = get_errno(res);
8771 } else {
8772 ret = 0;
8773 }
8774 #else
8775 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
8776 #endif
8777 if ((ret == 0) && put_user_s64(res, arg4)) {
8778 goto efault;
8779 }
8780 }
8781 break;
8782 #endif
8783 #ifdef TARGET_NR_getdents
8784 case TARGET_NR_getdents:
8785 #ifdef __NR_getdents
8786 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8787 {
8788 struct target_dirent *target_dirp;
8789 struct linux_dirent *dirp;
8790 abi_long count = arg3;
8791
8792 dirp = g_try_malloc(count);
8793 if (!dirp) {
8794 ret = -TARGET_ENOMEM;
8795 goto fail;
8796 }
8797
8798 ret = get_errno(sys_getdents(arg1, dirp, count));
8799 if (!is_error(ret)) {
8800 struct linux_dirent *de;
8801 struct target_dirent *tde;
8802 int len = ret;
8803 int reclen, treclen;
8804 int count1, tnamelen;
8805
8806 count1 = 0;
8807 de = dirp;
8808 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8809 goto efault;
8810 tde = target_dirp;
8811 while (len > 0) {
8812 reclen = de->d_reclen;
8813 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
8814 assert(tnamelen >= 0);
8815 treclen = tnamelen + offsetof(struct target_dirent, d_name);
8816 assert(count1 + treclen <= count);
8817 tde->d_reclen = tswap16(treclen);
8818 tde->d_ino = tswapal(de->d_ino);
8819 tde->d_off = tswapal(de->d_off);
8820 memcpy(tde->d_name, de->d_name, tnamelen);
8821 de = (struct linux_dirent *)((char *)de + reclen);
8822 len -= reclen;
8823 tde = (struct target_dirent *)((char *)tde + treclen);
8824 count1 += treclen;
8825 }
8826 ret = count1;
8827 unlock_user(target_dirp, arg2, ret);
8828 }
8829 g_free(dirp);
8830 }
8831 #else
8832 {
8833 struct linux_dirent *dirp;
8834 abi_long count = arg3;
8835
8836 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8837 goto efault;
8838 ret = get_errno(sys_getdents(arg1, dirp, count));
8839 if (!is_error(ret)) {
8840 struct linux_dirent *de;
8841 int len = ret;
8842 int reclen;
8843 de = dirp;
8844 while (len > 0) {
8845 reclen = de->d_reclen;
8846 if (reclen > len)
8847 break;
8848 de->d_reclen = tswap16(reclen);
8849 tswapls(&de->d_ino);
8850 tswapls(&de->d_off);
8851 de = (struct linux_dirent *)((char *)de + reclen);
8852 len -= reclen;
8853 }
8854 }
8855 unlock_user(dirp, arg2, ret);
8856 }
8857 #endif
8858 #else
8859 /* Implement getdents in terms of getdents64 */
8860 {
8861 struct linux_dirent64 *dirp;
8862 abi_long count = arg3;
8863
8864 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8865 if (!dirp) {
8866 goto efault;
8867 }
8868 ret = get_errno(sys_getdents64(arg1, dirp, count));
8869 if (!is_error(ret)) {
8870 /* Convert the dirent64 structs to target dirent. We do this
8871 * in-place, since we can guarantee that a target_dirent is no
8872 * larger than a dirent64; however this means we have to be
8873 * careful to read everything before writing in the new format.
8874 */
8875 struct linux_dirent64 *de;
8876 struct target_dirent *tde;
8877 int len = ret;
8878 int tlen = 0;
8879
8880 de = dirp;
8881 tde = (struct target_dirent *)dirp;
8882 while (len > 0) {
8883 int namelen, treclen;
8884 int reclen = de->d_reclen;
8885 uint64_t ino = de->d_ino;
8886 int64_t off = de->d_off;
8887 uint8_t type = de->d_type;
8888
8889 namelen = strlen(de->d_name);
8890 treclen = offsetof(struct target_dirent, d_name)
8891 + namelen + 2;
8892 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8893
8894 memmove(tde->d_name, de->d_name, namelen + 1);
8895 tde->d_ino = tswapal(ino);
8896 tde->d_off = tswapal(off);
8897 tde->d_reclen = tswap16(treclen);
8898 /* The target_dirent type is in what was formerly a padding
8899 * byte at the end of the structure:
8900 */
8901 *(((char *)tde) + treclen - 1) = type;
8902
8903 de = (struct linux_dirent64 *)((char *)de + reclen);
8904 tde = (struct target_dirent *)((char *)tde + treclen);
8905 len -= reclen;
8906 tlen += treclen;
8907 }
8908 ret = tlen;
8909 }
8910 unlock_user(dirp, arg2, ret);
8911 }
8912 #endif
8913 break;
8914 #endif /* TARGET_NR_getdents */
8915 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8916 case TARGET_NR_getdents64:
8917 {
8918 struct linux_dirent64 *dirp;
8919 abi_long count = arg3;
8920 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8921 goto efault;
8922 ret = get_errno(sys_getdents64(arg1, dirp, count));
8923 if (!is_error(ret)) {
8924 struct linux_dirent64 *de;
8925 int len = ret;
8926 int reclen;
8927 de = dirp;
8928 while (len > 0) {
8929 reclen = de->d_reclen;
8930 if (reclen > len)
8931 break;
8932 de->d_reclen = tswap16(reclen);
8933 tswap64s((uint64_t *)&de->d_ino);
8934 tswap64s((uint64_t *)&de->d_off);
8935 de = (struct linux_dirent64 *)((char *)de + reclen);
8936 len -= reclen;
8937 }
8938 }
8939 unlock_user(dirp, arg2, ret);
8940 }
8941 break;
8942 #endif /* TARGET_NR_getdents64 */
8943 #if defined(TARGET_NR__newselect)
8944 case TARGET_NR__newselect:
8945 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8946 break;
8947 #endif
8948 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8949 # ifdef TARGET_NR_poll
8950 case TARGET_NR_poll:
8951 # endif
8952 # ifdef TARGET_NR_ppoll
8953 case TARGET_NR_ppoll:
8954 # endif
8955 {
8956 struct target_pollfd *target_pfd;
8957 unsigned int nfds = arg2;
8958 int timeout = arg3;
8959 struct pollfd *pfd;
8960 unsigned int i;
8961
8962 pfd = NULL;
8963 target_pfd = NULL;
8964 if (nfds) {
8965 target_pfd = lock_user(VERIFY_WRITE, arg1,
8966 sizeof(struct target_pollfd) * nfds, 1);
8967 if (!target_pfd) {
8968 goto efault;
8969 }
8970
8971 pfd = alloca(sizeof(struct pollfd) * nfds);
8972 for (i = 0; i < nfds; i++) {
8973 pfd[i].fd = tswap32(target_pfd[i].fd);
8974 pfd[i].events = tswap16(target_pfd[i].events);
8975 }
8976 }
8977
8978 # ifdef TARGET_NR_ppoll
8979 if (num == TARGET_NR_ppoll) {
8980 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
8981 target_sigset_t *target_set;
8982 sigset_t _set, *set = &_set;
8983
8984 if (arg3) {
8985 if (target_to_host_timespec(timeout_ts, arg3)) {
8986 unlock_user(target_pfd, arg1, 0);
8987 goto efault;
8988 }
8989 } else {
8990 timeout_ts = NULL;
8991 }
8992
8993 if (arg4) {
8994 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
8995 if (!target_set) {
8996 unlock_user(target_pfd, arg1, 0);
8997 goto efault;
8998 }
8999 target_to_host_sigset(set, target_set);
9000 } else {
9001 set = NULL;
9002 }
9003
9004 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts,
9005 set, SIGSET_T_SIZE));
9006
9007 if (!is_error(ret) && arg3) {
9008 host_to_target_timespec(arg3, timeout_ts);
9009 }
9010 if (arg4) {
9011 unlock_user(target_set, arg4, 0);
9012 }
9013 } else
9014 # endif
9015 ret = get_errno(poll(pfd, nfds, timeout));
9016
9017 if (!is_error(ret)) {
9018 for(i = 0; i < nfds; i++) {
9019 target_pfd[i].revents = tswap16(pfd[i].revents);
9020 }
9021 }
9022 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9023 }
9024 break;
9025 #endif
9026 case TARGET_NR_flock:
9027 /* NOTE: the flock constant seems to be the same for every
9028 Linux platform */
9029 ret = get_errno(safe_flock(arg1, arg2));
9030 break;
9031 case TARGET_NR_readv:
9032 {
9033 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9034 if (vec != NULL) {
9035 ret = get_errno(safe_readv(arg1, vec, arg3));
9036 unlock_iovec(vec, arg2, arg3, 1);
9037 } else {
9038 ret = -host_to_target_errno(errno);
9039 }
9040 }
9041 break;
9042 case TARGET_NR_writev:
9043 {
9044 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9045 if (vec != NULL) {
9046 ret = get_errno(safe_writev(arg1, vec, arg3));
9047 unlock_iovec(vec, arg2, arg3, 0);
9048 } else {
9049 ret = -host_to_target_errno(errno);
9050 }
9051 }
9052 break;
9053 case TARGET_NR_getsid:
9054 ret = get_errno(getsid(arg1));
9055 break;
9056 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9057 case TARGET_NR_fdatasync:
9058 ret = get_errno(fdatasync(arg1));
9059 break;
9060 #endif
9061 #ifdef TARGET_NR__sysctl
9062 case TARGET_NR__sysctl:
9063 /* We don't implement this, but ENOTDIR is always a safe
9064 return value. */
9065 ret = -TARGET_ENOTDIR;
9066 break;
9067 #endif
9068 case TARGET_NR_sched_getaffinity:
9069 {
9070 unsigned int mask_size;
9071 unsigned long *mask;
9072
9073 /*
9074 * sched_getaffinity needs multiples of ulong, so need to take
9075 * care of mismatches between target ulong and host ulong sizes.
9076 */
9077 if (arg2 & (sizeof(abi_ulong) - 1)) {
9078 ret = -TARGET_EINVAL;
9079 break;
9080 }
9081 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9082
9083 mask = alloca(mask_size);
9084 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9085
9086 if (!is_error(ret)) {
9087 if (ret > arg2) {
9088 /* More data returned than the caller's buffer will fit.
9089 * This only happens if sizeof(abi_long) < sizeof(long)
9090 * and the caller passed us a buffer holding an odd number
9091 * of abi_longs. If the host kernel is actually using the
9092 * extra 4 bytes then fail EINVAL; otherwise we can just
9093 * ignore them and only copy the interesting part.
9094 */
9095 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9096 if (numcpus > arg2 * 8) {
9097 ret = -TARGET_EINVAL;
9098 break;
9099 }
9100 ret = arg2;
9101 }
9102
9103 if (copy_to_user(arg3, mask, ret)) {
9104 goto efault;
9105 }
9106 }
9107 }
9108 break;
9109 case TARGET_NR_sched_setaffinity:
9110 {
9111 unsigned int mask_size;
9112 unsigned long *mask;
9113
9114 /*
9115 * sched_setaffinity needs multiples of ulong, so need to take
9116 * care of mismatches between target ulong and host ulong sizes.
9117 */
9118 if (arg2 & (sizeof(abi_ulong) - 1)) {
9119 ret = -TARGET_EINVAL;
9120 break;
9121 }
9122 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9123
9124 mask = alloca(mask_size);
9125 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9126 goto efault;
9127 }
9128 memcpy(mask, p, arg2);
9129 unlock_user_struct(p, arg2, 0);
9130
9131 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9132 }
9133 break;
9134 case TARGET_NR_sched_setparam:
9135 {
9136 struct sched_param *target_schp;
9137 struct sched_param schp;
9138
9139 if (arg2 == 0) {
9140 return -TARGET_EINVAL;
9141 }
9142 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9143 goto efault;
9144 schp.sched_priority = tswap32(target_schp->sched_priority);
9145 unlock_user_struct(target_schp, arg2, 0);
9146 ret = get_errno(sched_setparam(arg1, &schp));
9147 }
9148 break;
9149 case TARGET_NR_sched_getparam:
9150 {
9151 struct sched_param *target_schp;
9152 struct sched_param schp;
9153
9154 if (arg2 == 0) {
9155 return -TARGET_EINVAL;
9156 }
9157 ret = get_errno(sched_getparam(arg1, &schp));
9158 if (!is_error(ret)) {
9159 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9160 goto efault;
9161 target_schp->sched_priority = tswap32(schp.sched_priority);
9162 unlock_user_struct(target_schp, arg2, 1);
9163 }
9164 }
9165 break;
9166 case TARGET_NR_sched_setscheduler:
9167 {
9168 struct sched_param *target_schp;
9169 struct sched_param schp;
9170 if (arg3 == 0) {
9171 return -TARGET_EINVAL;
9172 }
9173 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9174 goto efault;
9175 schp.sched_priority = tswap32(target_schp->sched_priority);
9176 unlock_user_struct(target_schp, arg3, 0);
9177 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
9178 }
9179 break;
9180 case TARGET_NR_sched_getscheduler:
9181 ret = get_errno(sched_getscheduler(arg1));
9182 break;
9183 case TARGET_NR_sched_yield:
9184 ret = get_errno(sched_yield());
9185 break;
9186 case TARGET_NR_sched_get_priority_max:
9187 ret = get_errno(sched_get_priority_max(arg1));
9188 break;
9189 case TARGET_NR_sched_get_priority_min:
9190 ret = get_errno(sched_get_priority_min(arg1));
9191 break;
9192 case TARGET_NR_sched_rr_get_interval:
9193 {
9194 struct timespec ts;
9195 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9196 if (!is_error(ret)) {
9197 ret = host_to_target_timespec(arg2, &ts);
9198 }
9199 }
9200 break;
9201 case TARGET_NR_nanosleep:
9202 {
9203 struct timespec req, rem;
9204 target_to_host_timespec(&req, arg1);
9205 ret = get_errno(nanosleep(&req, &rem));
9206 if (is_error(ret) && arg2) {
9207 host_to_target_timespec(arg2, &rem);
9208 }
9209 }
9210 break;
9211 #ifdef TARGET_NR_query_module
9212 case TARGET_NR_query_module:
9213 goto unimplemented;
9214 #endif
9215 #ifdef TARGET_NR_nfsservctl
9216 case TARGET_NR_nfsservctl:
9217 goto unimplemented;
9218 #endif
9219 case TARGET_NR_prctl:
9220 switch (arg1) {
9221 case PR_GET_PDEATHSIG:
9222 {
9223 int deathsig;
9224 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9225 if (!is_error(ret) && arg2
9226 && put_user_ual(deathsig, arg2)) {
9227 goto efault;
9228 }
9229 break;
9230 }
9231 #ifdef PR_GET_NAME
9232 case PR_GET_NAME:
9233 {
9234 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9235 if (!name) {
9236 goto efault;
9237 }
9238 ret = get_errno(prctl(arg1, (unsigned long)name,
9239 arg3, arg4, arg5));
9240 unlock_user(name, arg2, 16);
9241 break;
9242 }
9243 case PR_SET_NAME:
9244 {
9245 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9246 if (!name) {
9247 goto efault;
9248 }
9249 ret = get_errno(prctl(arg1, (unsigned long)name,
9250 arg3, arg4, arg5));
9251 unlock_user(name, arg2, 0);
9252 break;
9253 }
9254 #endif
9255 default:
9256 /* Most prctl options have no pointer arguments */
9257 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9258 break;
9259 }
9260 break;
9261 #ifdef TARGET_NR_arch_prctl
9262 case TARGET_NR_arch_prctl:
9263 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9264 ret = do_arch_prctl(cpu_env, arg1, arg2);
9265 break;
9266 #else
9267 goto unimplemented;
9268 #endif
9269 #endif
9270 #ifdef TARGET_NR_pread64
9271 case TARGET_NR_pread64:
9272 if (regpairs_aligned(cpu_env)) {
9273 arg4 = arg5;
9274 arg5 = arg6;
9275 }
9276 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9277 goto efault;
9278 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9279 unlock_user(p, arg2, ret);
9280 break;
9281 case TARGET_NR_pwrite64:
9282 if (regpairs_aligned(cpu_env)) {
9283 arg4 = arg5;
9284 arg5 = arg6;
9285 }
9286 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9287 goto efault;
9288 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9289 unlock_user(p, arg2, 0);
9290 break;
9291 #endif
9292 case TARGET_NR_getcwd:
9293 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9294 goto efault;
9295 ret = get_errno(sys_getcwd1(p, arg2));
9296 unlock_user(p, arg1, ret);
9297 break;
9298 case TARGET_NR_capget:
9299 case TARGET_NR_capset:
9300 {
9301 struct target_user_cap_header *target_header;
9302 struct target_user_cap_data *target_data = NULL;
9303 struct __user_cap_header_struct header;
9304 struct __user_cap_data_struct data[2];
9305 struct __user_cap_data_struct *dataptr = NULL;
9306 int i, target_datalen;
9307 int data_items = 1;
9308
9309 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9310 goto efault;
9311 }
9312 header.version = tswap32(target_header->version);
9313 header.pid = tswap32(target_header->pid);
9314
9315 if (header.version != _LINUX_CAPABILITY_VERSION) {
9316 /* Version 2 and up takes pointer to two user_data structs */
9317 data_items = 2;
9318 }
9319
9320 target_datalen = sizeof(*target_data) * data_items;
9321
9322 if (arg2) {
9323 if (num == TARGET_NR_capget) {
9324 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9325 } else {
9326 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9327 }
9328 if (!target_data) {
9329 unlock_user_struct(target_header, arg1, 0);
9330 goto efault;
9331 }
9332
9333 if (num == TARGET_NR_capset) {
9334 for (i = 0; i < data_items; i++) {
9335 data[i].effective = tswap32(target_data[i].effective);
9336 data[i].permitted = tswap32(target_data[i].permitted);
9337 data[i].inheritable = tswap32(target_data[i].inheritable);
9338 }
9339 }
9340
9341 dataptr = data;
9342 }
9343
9344 if (num == TARGET_NR_capget) {
9345 ret = get_errno(capget(&header, dataptr));
9346 } else {
9347 ret = get_errno(capset(&header, dataptr));
9348 }
9349
9350 /* The kernel always updates version for both capget and capset */
9351 target_header->version = tswap32(header.version);
9352 unlock_user_struct(target_header, arg1, 1);
9353
9354 if (arg2) {
9355 if (num == TARGET_NR_capget) {
9356 for (i = 0; i < data_items; i++) {
9357 target_data[i].effective = tswap32(data[i].effective);
9358 target_data[i].permitted = tswap32(data[i].permitted);
9359 target_data[i].inheritable = tswap32(data[i].inheritable);
9360 }
9361 unlock_user(target_data, arg2, target_datalen);
9362 } else {
9363 unlock_user(target_data, arg2, 0);
9364 }
9365 }
9366 break;
9367 }
9368 case TARGET_NR_sigaltstack:
9369 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
9370 break;
9371
9372 #ifdef CONFIG_SENDFILE
9373 case TARGET_NR_sendfile:
9374 {
9375 off_t *offp = NULL;
9376 off_t off;
9377 if (arg3) {
9378 ret = get_user_sal(off, arg3);
9379 if (is_error(ret)) {
9380 break;
9381 }
9382 offp = &off;
9383 }
9384 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9385 if (!is_error(ret) && arg3) {
9386 abi_long ret2 = put_user_sal(off, arg3);
9387 if (is_error(ret2)) {
9388 ret = ret2;
9389 }
9390 }
9391 break;
9392 }
9393 #ifdef TARGET_NR_sendfile64
9394 case TARGET_NR_sendfile64:
9395 {
9396 off_t *offp = NULL;
9397 off_t off;
9398 if (arg3) {
9399 ret = get_user_s64(off, arg3);
9400 if (is_error(ret)) {
9401 break;
9402 }
9403 offp = &off;
9404 }
9405 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9406 if (!is_error(ret) && arg3) {
9407 abi_long ret2 = put_user_s64(off, arg3);
9408 if (is_error(ret2)) {
9409 ret = ret2;
9410 }
9411 }
9412 break;
9413 }
9414 #endif
9415 #else
9416 case TARGET_NR_sendfile:
9417 #ifdef TARGET_NR_sendfile64
9418 case TARGET_NR_sendfile64:
9419 #endif
9420 goto unimplemented;
9421 #endif
9422
9423 #ifdef TARGET_NR_getpmsg
9424 case TARGET_NR_getpmsg:
9425 goto unimplemented;
9426 #endif
9427 #ifdef TARGET_NR_putpmsg
9428 case TARGET_NR_putpmsg:
9429 goto unimplemented;
9430 #endif
9431 #ifdef TARGET_NR_vfork
9432 case TARGET_NR_vfork:
9433 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
9434 0, 0, 0, 0));
9435 break;
9436 #endif
9437 #ifdef TARGET_NR_ugetrlimit
9438 case TARGET_NR_ugetrlimit:
9439 {
9440 struct rlimit rlim;
9441 int resource = target_to_host_resource(arg1);
9442 ret = get_errno(getrlimit(resource, &rlim));
9443 if (!is_error(ret)) {
9444 struct target_rlimit *target_rlim;
9445 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9446 goto efault;
9447 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9448 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9449 unlock_user_struct(target_rlim, arg2, 1);
9450 }
9451 break;
9452 }
9453 #endif
9454 #ifdef TARGET_NR_truncate64
9455 case TARGET_NR_truncate64:
9456 if (!(p = lock_user_string(arg1)))
9457 goto efault;
9458 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9459 unlock_user(p, arg1, 0);
9460 break;
9461 #endif
9462 #ifdef TARGET_NR_ftruncate64
9463 case TARGET_NR_ftruncate64:
9464 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9465 break;
9466 #endif
9467 #ifdef TARGET_NR_stat64
9468 case TARGET_NR_stat64:
9469 if (!(p = lock_user_string(arg1)))
9470 goto efault;
9471 ret = get_errno(stat(path(p), &st));
9472 unlock_user(p, arg1, 0);
9473 if (!is_error(ret))
9474 ret = host_to_target_stat64(cpu_env, arg2, &st);
9475 break;
9476 #endif
9477 #ifdef TARGET_NR_lstat64
9478 case TARGET_NR_lstat64:
9479 if (!(p = lock_user_string(arg1)))
9480 goto efault;
9481 ret = get_errno(lstat(path(p), &st));
9482 unlock_user(p, arg1, 0);
9483 if (!is_error(ret))
9484 ret = host_to_target_stat64(cpu_env, arg2, &st);
9485 break;
9486 #endif
9487 #ifdef TARGET_NR_fstat64
9488 case TARGET_NR_fstat64:
9489 ret = get_errno(fstat(arg1, &st));
9490 if (!is_error(ret))
9491 ret = host_to_target_stat64(cpu_env, arg2, &st);
9492 break;
9493 #endif
9494 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9495 #ifdef TARGET_NR_fstatat64
9496 case TARGET_NR_fstatat64:
9497 #endif
9498 #ifdef TARGET_NR_newfstatat
9499 case TARGET_NR_newfstatat:
9500 #endif
9501 if (!(p = lock_user_string(arg2)))
9502 goto efault;
9503 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
9504 if (!is_error(ret))
9505 ret = host_to_target_stat64(cpu_env, arg3, &st);
9506 break;
9507 #endif
9508 #ifdef TARGET_NR_lchown
9509 case TARGET_NR_lchown:
9510 if (!(p = lock_user_string(arg1)))
9511 goto efault;
9512 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
9513 unlock_user(p, arg1, 0);
9514 break;
9515 #endif
9516 #ifdef TARGET_NR_getuid
9517 case TARGET_NR_getuid:
9518 ret = get_errno(high2lowuid(getuid()));
9519 break;
9520 #endif
9521 #ifdef TARGET_NR_getgid
9522 case TARGET_NR_getgid:
9523 ret = get_errno(high2lowgid(getgid()));
9524 break;
9525 #endif
9526 #ifdef TARGET_NR_geteuid
9527 case TARGET_NR_geteuid:
9528 ret = get_errno(high2lowuid(geteuid()));
9529 break;
9530 #endif
9531 #ifdef TARGET_NR_getegid
9532 case TARGET_NR_getegid:
9533 ret = get_errno(high2lowgid(getegid()));
9534 break;
9535 #endif
9536 case TARGET_NR_setreuid:
9537 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
9538 break;
9539 case TARGET_NR_setregid:
9540 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
9541 break;
9542 case TARGET_NR_getgroups:
9543 {
9544 int gidsetsize = arg1;
9545 target_id *target_grouplist;
9546 gid_t *grouplist;
9547 int i;
9548
9549 grouplist = alloca(gidsetsize * sizeof(gid_t));
9550 ret = get_errno(getgroups(gidsetsize, grouplist));
9551 if (gidsetsize == 0)
9552 break;
9553 if (!is_error(ret)) {
9554 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
9555 if (!target_grouplist)
9556 goto efault;
9557 for(i = 0;i < ret; i++)
9558 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
9559 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
9560 }
9561 }
9562 break;
9563 case TARGET_NR_setgroups:
9564 {
9565 int gidsetsize = arg1;
9566 target_id *target_grouplist;
9567 gid_t *grouplist = NULL;
9568 int i;
9569 if (gidsetsize) {
9570 grouplist = alloca(gidsetsize * sizeof(gid_t));
9571 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
9572 if (!target_grouplist) {
9573 ret = -TARGET_EFAULT;
9574 goto fail;
9575 }
9576 for (i = 0; i < gidsetsize; i++) {
9577 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
9578 }
9579 unlock_user(target_grouplist, arg2, 0);
9580 }
9581 ret = get_errno(setgroups(gidsetsize, grouplist));
9582 }
9583 break;
9584 case TARGET_NR_fchown:
9585 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
9586 break;
9587 #if defined(TARGET_NR_fchownat)
9588 case TARGET_NR_fchownat:
9589 if (!(p = lock_user_string(arg2)))
9590 goto efault;
9591 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
9592 low2highgid(arg4), arg5));
9593 unlock_user(p, arg2, 0);
9594 break;
9595 #endif
9596 #ifdef TARGET_NR_setresuid
9597 case TARGET_NR_setresuid:
9598 ret = get_errno(sys_setresuid(low2highuid(arg1),
9599 low2highuid(arg2),
9600 low2highuid(arg3)));
9601 break;
9602 #endif
9603 #ifdef TARGET_NR_getresuid
9604 case TARGET_NR_getresuid:
9605 {
9606 uid_t ruid, euid, suid;
9607 ret = get_errno(getresuid(&ruid, &euid, &suid));
9608 if (!is_error(ret)) {
9609 if (put_user_id(high2lowuid(ruid), arg1)
9610 || put_user_id(high2lowuid(euid), arg2)
9611 || put_user_id(high2lowuid(suid), arg3))
9612 goto efault;
9613 }
9614 }
9615 break;
9616 #endif
9617 #ifdef TARGET_NR_getresgid
9618 case TARGET_NR_setresgid:
9619 ret = get_errno(sys_setresgid(low2highgid(arg1),
9620 low2highgid(arg2),
9621 low2highgid(arg3)));
9622 break;
9623 #endif
9624 #ifdef TARGET_NR_getresgid
9625 case TARGET_NR_getresgid:
9626 {
9627 gid_t rgid, egid, sgid;
9628 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9629 if (!is_error(ret)) {
9630 if (put_user_id(high2lowgid(rgid), arg1)
9631 || put_user_id(high2lowgid(egid), arg2)
9632 || put_user_id(high2lowgid(sgid), arg3))
9633 goto efault;
9634 }
9635 }
9636 break;
9637 #endif
9638 #ifdef TARGET_NR_chown
9639 case TARGET_NR_chown:
9640 if (!(p = lock_user_string(arg1)))
9641 goto efault;
9642 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
9643 unlock_user(p, arg1, 0);
9644 break;
9645 #endif
9646 case TARGET_NR_setuid:
9647 ret = get_errno(sys_setuid(low2highuid(arg1)));
9648 break;
9649 case TARGET_NR_setgid:
9650 ret = get_errno(sys_setgid(low2highgid(arg1)));
9651 break;
9652 case TARGET_NR_setfsuid:
9653 ret = get_errno(setfsuid(arg1));
9654 break;
9655 case TARGET_NR_setfsgid:
9656 ret = get_errno(setfsgid(arg1));
9657 break;
9658
9659 #ifdef TARGET_NR_lchown32
9660 case TARGET_NR_lchown32:
9661 if (!(p = lock_user_string(arg1)))
9662 goto efault;
9663 ret = get_errno(lchown(p, arg2, arg3));
9664 unlock_user(p, arg1, 0);
9665 break;
9666 #endif
9667 #ifdef TARGET_NR_getuid32
9668 case TARGET_NR_getuid32:
9669 ret = get_errno(getuid());
9670 break;
9671 #endif
9672
9673 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9674 /* Alpha specific */
9675 case TARGET_NR_getxuid:
9676 {
9677 uid_t euid;
9678 euid=geteuid();
9679 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
9680 }
9681 ret = get_errno(getuid());
9682 break;
9683 #endif
9684 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9685 /* Alpha specific */
9686 case TARGET_NR_getxgid:
9687 {
9688 uid_t egid;
9689 egid=getegid();
9690 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
9691 }
9692 ret = get_errno(getgid());
9693 break;
9694 #endif
9695 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9696 /* Alpha specific */
9697 case TARGET_NR_osf_getsysinfo:
9698 ret = -TARGET_EOPNOTSUPP;
9699 switch (arg1) {
9700 case TARGET_GSI_IEEE_FP_CONTROL:
9701 {
9702 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
9703
9704 /* Copied from linux ieee_fpcr_to_swcr. */
9705 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
9706 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
9707 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
9708 | SWCR_TRAP_ENABLE_DZE
9709 | SWCR_TRAP_ENABLE_OVF);
9710 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
9711 | SWCR_TRAP_ENABLE_INE);
9712 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
9713 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
9714
9715 if (put_user_u64 (swcr, arg2))
9716 goto efault;
9717 ret = 0;
9718 }
9719 break;
9720
9721 /* case GSI_IEEE_STATE_AT_SIGNAL:
9722 -- Not implemented in linux kernel.
9723 case GSI_UACPROC:
9724 -- Retrieves current unaligned access state; not much used.
9725 case GSI_PROC_TYPE:
9726 -- Retrieves implver information; surely not used.
9727 case GSI_GET_HWRPB:
9728 -- Grabs a copy of the HWRPB; surely not used.
9729 */
9730 }
9731 break;
9732 #endif
9733 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9734 /* Alpha specific */
9735 case TARGET_NR_osf_setsysinfo:
9736 ret = -TARGET_EOPNOTSUPP;
9737 switch (arg1) {
9738 case TARGET_SSI_IEEE_FP_CONTROL:
9739 {
9740 uint64_t swcr, fpcr, orig_fpcr;
9741
9742 if (get_user_u64 (swcr, arg2)) {
9743 goto efault;
9744 }
9745 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9746 fpcr = orig_fpcr & FPCR_DYN_MASK;
9747
9748 /* Copied from linux ieee_swcr_to_fpcr. */
9749 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
9750 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
9751 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
9752 | SWCR_TRAP_ENABLE_DZE
9753 | SWCR_TRAP_ENABLE_OVF)) << 48;
9754 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
9755 | SWCR_TRAP_ENABLE_INE)) << 57;
9756 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
9757 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
9758
9759 cpu_alpha_store_fpcr(cpu_env, fpcr);
9760 ret = 0;
9761 }
9762 break;
9763
9764 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
9765 {
9766 uint64_t exc, fpcr, orig_fpcr;
9767 int si_code;
9768
9769 if (get_user_u64(exc, arg2)) {
9770 goto efault;
9771 }
9772
9773 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9774
9775 /* We only add to the exception status here. */
9776 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
9777
9778 cpu_alpha_store_fpcr(cpu_env, fpcr);
9779 ret = 0;
9780
9781 /* Old exceptions are not signaled. */
9782 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
9783
9784 /* If any exceptions set by this call,
9785 and are unmasked, send a signal. */
9786 si_code = 0;
9787 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
9788 si_code = TARGET_FPE_FLTRES;
9789 }
9790 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
9791 si_code = TARGET_FPE_FLTUND;
9792 }
9793 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
9794 si_code = TARGET_FPE_FLTOVF;
9795 }
9796 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
9797 si_code = TARGET_FPE_FLTDIV;
9798 }
9799 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
9800 si_code = TARGET_FPE_FLTINV;
9801 }
9802 if (si_code != 0) {
9803 target_siginfo_t info;
9804 info.si_signo = SIGFPE;
9805 info.si_errno = 0;
9806 info.si_code = si_code;
9807 info._sifields._sigfault._addr
9808 = ((CPUArchState *)cpu_env)->pc;
9809 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9810 }
9811 }
9812 break;
9813
9814 /* case SSI_NVPAIRS:
9815 -- Used with SSIN_UACPROC to enable unaligned accesses.
9816 case SSI_IEEE_STATE_AT_SIGNAL:
9817 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9818 -- Not implemented in linux kernel
9819 */
9820 }
9821 break;
9822 #endif
9823 #ifdef TARGET_NR_osf_sigprocmask
9824 /* Alpha specific. */
9825 case TARGET_NR_osf_sigprocmask:
9826 {
9827 abi_ulong mask;
9828 int how;
9829 sigset_t set, oldset;
9830
9831 switch(arg1) {
9832 case TARGET_SIG_BLOCK:
9833 how = SIG_BLOCK;
9834 break;
9835 case TARGET_SIG_UNBLOCK:
9836 how = SIG_UNBLOCK;
9837 break;
9838 case TARGET_SIG_SETMASK:
9839 how = SIG_SETMASK;
9840 break;
9841 default:
9842 ret = -TARGET_EINVAL;
9843 goto fail;
9844 }
9845 mask = arg2;
9846 target_to_host_old_sigset(&set, &mask);
9847 ret = do_sigprocmask(how, &set, &oldset);
9848 if (!ret) {
9849 host_to_target_old_sigset(&mask, &oldset);
9850 ret = mask;
9851 }
9852 }
9853 break;
9854 #endif
9855
9856 #ifdef TARGET_NR_getgid32
9857 case TARGET_NR_getgid32:
9858 ret = get_errno(getgid());
9859 break;
9860 #endif
9861 #ifdef TARGET_NR_geteuid32
9862 case TARGET_NR_geteuid32:
9863 ret = get_errno(geteuid());
9864 break;
9865 #endif
9866 #ifdef TARGET_NR_getegid32
9867 case TARGET_NR_getegid32:
9868 ret = get_errno(getegid());
9869 break;
9870 #endif
9871 #ifdef TARGET_NR_setreuid32
9872 case TARGET_NR_setreuid32:
9873 ret = get_errno(setreuid(arg1, arg2));
9874 break;
9875 #endif
9876 #ifdef TARGET_NR_setregid32
9877 case TARGET_NR_setregid32:
9878 ret = get_errno(setregid(arg1, arg2));
9879 break;
9880 #endif
9881 #ifdef TARGET_NR_getgroups32
9882 case TARGET_NR_getgroups32:
9883 {
9884 int gidsetsize = arg1;
9885 uint32_t *target_grouplist;
9886 gid_t *grouplist;
9887 int i;
9888
9889 grouplist = alloca(gidsetsize * sizeof(gid_t));
9890 ret = get_errno(getgroups(gidsetsize, grouplist));
9891 if (gidsetsize == 0)
9892 break;
9893 if (!is_error(ret)) {
9894 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
9895 if (!target_grouplist) {
9896 ret = -TARGET_EFAULT;
9897 goto fail;
9898 }
9899 for(i = 0;i < ret; i++)
9900 target_grouplist[i] = tswap32(grouplist[i]);
9901 unlock_user(target_grouplist, arg2, gidsetsize * 4);
9902 }
9903 }
9904 break;
9905 #endif
9906 #ifdef TARGET_NR_setgroups32
9907 case TARGET_NR_setgroups32:
9908 {
9909 int gidsetsize = arg1;
9910 uint32_t *target_grouplist;
9911 gid_t *grouplist;
9912 int i;
9913
9914 grouplist = alloca(gidsetsize * sizeof(gid_t));
9915 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
9916 if (!target_grouplist) {
9917 ret = -TARGET_EFAULT;
9918 goto fail;
9919 }
9920 for(i = 0;i < gidsetsize; i++)
9921 grouplist[i] = tswap32(target_grouplist[i]);
9922 unlock_user(target_grouplist, arg2, 0);
9923 ret = get_errno(setgroups(gidsetsize, grouplist));
9924 }
9925 break;
9926 #endif
9927 #ifdef TARGET_NR_fchown32
9928 case TARGET_NR_fchown32:
9929 ret = get_errno(fchown(arg1, arg2, arg3));
9930 break;
9931 #endif
9932 #ifdef TARGET_NR_setresuid32
9933 case TARGET_NR_setresuid32:
9934 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
9935 break;
9936 #endif
9937 #ifdef TARGET_NR_getresuid32
9938 case TARGET_NR_getresuid32:
9939 {
9940 uid_t ruid, euid, suid;
9941 ret = get_errno(getresuid(&ruid, &euid, &suid));
9942 if (!is_error(ret)) {
9943 if (put_user_u32(ruid, arg1)
9944 || put_user_u32(euid, arg2)
9945 || put_user_u32(suid, arg3))
9946 goto efault;
9947 }
9948 }
9949 break;
9950 #endif
9951 #ifdef TARGET_NR_setresgid32
9952 case TARGET_NR_setresgid32:
9953 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
9954 break;
9955 #endif
9956 #ifdef TARGET_NR_getresgid32
9957 case TARGET_NR_getresgid32:
9958 {
9959 gid_t rgid, egid, sgid;
9960 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9961 if (!is_error(ret)) {
9962 if (put_user_u32(rgid, arg1)
9963 || put_user_u32(egid, arg2)
9964 || put_user_u32(sgid, arg3))
9965 goto efault;
9966 }
9967 }
9968 break;
9969 #endif
9970 #ifdef TARGET_NR_chown32
9971 case TARGET_NR_chown32:
9972 if (!(p = lock_user_string(arg1)))
9973 goto efault;
9974 ret = get_errno(chown(p, arg2, arg3));
9975 unlock_user(p, arg1, 0);
9976 break;
9977 #endif
9978 #ifdef TARGET_NR_setuid32
9979 case TARGET_NR_setuid32:
9980 ret = get_errno(sys_setuid(arg1));
9981 break;
9982 #endif
9983 #ifdef TARGET_NR_setgid32
9984 case TARGET_NR_setgid32:
9985 ret = get_errno(sys_setgid(arg1));
9986 break;
9987 #endif
9988 #ifdef TARGET_NR_setfsuid32
9989 case TARGET_NR_setfsuid32:
9990 ret = get_errno(setfsuid(arg1));
9991 break;
9992 #endif
9993 #ifdef TARGET_NR_setfsgid32
9994 case TARGET_NR_setfsgid32:
9995 ret = get_errno(setfsgid(arg1));
9996 break;
9997 #endif
9998
9999 case TARGET_NR_pivot_root:
10000 goto unimplemented;
10001 #ifdef TARGET_NR_mincore
10002 case TARGET_NR_mincore:
10003 {
10004 void *a;
10005 ret = -TARGET_EFAULT;
10006 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10007 goto efault;
10008 if (!(p = lock_user_string(arg3)))
10009 goto mincore_fail;
10010 ret = get_errno(mincore(a, arg2, p));
10011 unlock_user(p, arg3, ret);
10012 mincore_fail:
10013 unlock_user(a, arg1, 0);
10014 }
10015 break;
10016 #endif
10017 #ifdef TARGET_NR_arm_fadvise64_64
10018 case TARGET_NR_arm_fadvise64_64:
10019 /* arm_fadvise64_64 looks like fadvise64_64 but
10020 * with different argument order: fd, advice, offset, len
10021 * rather than the usual fd, offset, len, advice.
10022 * Note that offset and len are both 64-bit so appear as
10023 * pairs of 32-bit registers.
10024 */
10025 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10026 target_offset64(arg5, arg6), arg2);
10027 ret = -host_to_target_errno(ret);
10028 break;
10029 #endif
10030
10031 #if TARGET_ABI_BITS == 32
10032
10033 #ifdef TARGET_NR_fadvise64_64
10034 case TARGET_NR_fadvise64_64:
10035 /* 6 args: fd, offset (high, low), len (high, low), advice */
10036 if (regpairs_aligned(cpu_env)) {
10037 /* offset is in (3,4), len in (5,6) and advice in 7 */
10038 arg2 = arg3;
10039 arg3 = arg4;
10040 arg4 = arg5;
10041 arg5 = arg6;
10042 arg6 = arg7;
10043 }
10044 ret = -host_to_target_errno(posix_fadvise(arg1,
10045 target_offset64(arg2, arg3),
10046 target_offset64(arg4, arg5),
10047 arg6));
10048 break;
10049 #endif
10050
10051 #ifdef TARGET_NR_fadvise64
10052 case TARGET_NR_fadvise64:
10053 /* 5 args: fd, offset (high, low), len, advice */
10054 if (regpairs_aligned(cpu_env)) {
10055 /* offset is in (3,4), len in 5 and advice in 6 */
10056 arg2 = arg3;
10057 arg3 = arg4;
10058 arg4 = arg5;
10059 arg5 = arg6;
10060 }
10061 ret = -host_to_target_errno(posix_fadvise(arg1,
10062 target_offset64(arg2, arg3),
10063 arg4, arg5));
10064 break;
10065 #endif
10066
10067 #else /* not a 32-bit ABI */
10068 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10069 #ifdef TARGET_NR_fadvise64_64
10070 case TARGET_NR_fadvise64_64:
10071 #endif
10072 #ifdef TARGET_NR_fadvise64
10073 case TARGET_NR_fadvise64:
10074 #endif
10075 #ifdef TARGET_S390X
10076 switch (arg4) {
10077 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10078 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10079 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10080 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10081 default: break;
10082 }
10083 #endif
10084 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10085 break;
10086 #endif
10087 #endif /* end of 64-bit ABI fadvise handling */
10088
10089 #ifdef TARGET_NR_madvise
10090 case TARGET_NR_madvise:
10091 /* A straight passthrough may not be safe because qemu sometimes
10092 turns private file-backed mappings into anonymous mappings.
10093 This will break MADV_DONTNEED.
10094 This is a hint, so ignoring and returning success is ok. */
10095 ret = get_errno(0);
10096 break;
10097 #endif
10098 #if TARGET_ABI_BITS == 32
10099 case TARGET_NR_fcntl64:
10100 {
10101 int cmd;
10102 struct flock64 fl;
10103 struct target_flock64 *target_fl;
10104 #ifdef TARGET_ARM
10105 struct target_eabi_flock64 *target_efl;
10106 #endif
10107
10108 cmd = target_to_host_fcntl_cmd(arg2);
10109 if (cmd == -TARGET_EINVAL) {
10110 ret = cmd;
10111 break;
10112 }
10113
10114 switch(arg2) {
10115 case TARGET_F_GETLK64:
10116 #ifdef TARGET_ARM
10117 if (((CPUARMState *)cpu_env)->eabi) {
10118 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
10119 goto efault;
10120 fl.l_type = tswap16(target_efl->l_type);
10121 fl.l_whence = tswap16(target_efl->l_whence);
10122 fl.l_start = tswap64(target_efl->l_start);
10123 fl.l_len = tswap64(target_efl->l_len);
10124 fl.l_pid = tswap32(target_efl->l_pid);
10125 unlock_user_struct(target_efl, arg3, 0);
10126 } else
10127 #endif
10128 {
10129 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
10130 goto efault;
10131 fl.l_type = tswap16(target_fl->l_type);
10132 fl.l_whence = tswap16(target_fl->l_whence);
10133 fl.l_start = tswap64(target_fl->l_start);
10134 fl.l_len = tswap64(target_fl->l_len);
10135 fl.l_pid = tswap32(target_fl->l_pid);
10136 unlock_user_struct(target_fl, arg3, 0);
10137 }
10138 ret = get_errno(fcntl(arg1, cmd, &fl));
10139 if (ret == 0) {
10140 #ifdef TARGET_ARM
10141 if (((CPUARMState *)cpu_env)->eabi) {
10142 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
10143 goto efault;
10144 target_efl->l_type = tswap16(fl.l_type);
10145 target_efl->l_whence = tswap16(fl.l_whence);
10146 target_efl->l_start = tswap64(fl.l_start);
10147 target_efl->l_len = tswap64(fl.l_len);
10148 target_efl->l_pid = tswap32(fl.l_pid);
10149 unlock_user_struct(target_efl, arg3, 1);
10150 } else
10151 #endif
10152 {
10153 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
10154 goto efault;
10155 target_fl->l_type = tswap16(fl.l_type);
10156 target_fl->l_whence = tswap16(fl.l_whence);
10157 target_fl->l_start = tswap64(fl.l_start);
10158 target_fl->l_len = tswap64(fl.l_len);
10159 target_fl->l_pid = tswap32(fl.l_pid);
10160 unlock_user_struct(target_fl, arg3, 1);
10161 }
10162 }
10163 break;
10164
10165 case TARGET_F_SETLK64:
10166 case TARGET_F_SETLKW64:
10167 #ifdef TARGET_ARM
10168 if (((CPUARMState *)cpu_env)->eabi) {
10169 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
10170 goto efault;
10171 fl.l_type = tswap16(target_efl->l_type);
10172 fl.l_whence = tswap16(target_efl->l_whence);
10173 fl.l_start = tswap64(target_efl->l_start);
10174 fl.l_len = tswap64(target_efl->l_len);
10175 fl.l_pid = tswap32(target_efl->l_pid);
10176 unlock_user_struct(target_efl, arg3, 0);
10177 } else
10178 #endif
10179 {
10180 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
10181 goto efault;
10182 fl.l_type = tswap16(target_fl->l_type);
10183 fl.l_whence = tswap16(target_fl->l_whence);
10184 fl.l_start = tswap64(target_fl->l_start);
10185 fl.l_len = tswap64(target_fl->l_len);
10186 fl.l_pid = tswap32(target_fl->l_pid);
10187 unlock_user_struct(target_fl, arg3, 0);
10188 }
10189 ret = get_errno(fcntl(arg1, cmd, &fl));
10190 break;
10191 default:
10192 ret = do_fcntl(arg1, arg2, arg3);
10193 break;
10194 }
10195 break;
10196 }
10197 #endif
10198 #ifdef TARGET_NR_cacheflush
10199 case TARGET_NR_cacheflush:
10200 /* self-modifying code is handled automatically, so nothing needed */
10201 ret = 0;
10202 break;
10203 #endif
10204 #ifdef TARGET_NR_security
10205 case TARGET_NR_security:
10206 goto unimplemented;
10207 #endif
10208 #ifdef TARGET_NR_getpagesize
10209 case TARGET_NR_getpagesize:
10210 ret = TARGET_PAGE_SIZE;
10211 break;
10212 #endif
10213 case TARGET_NR_gettid:
10214 ret = get_errno(gettid());
10215 break;
10216 #ifdef TARGET_NR_readahead
10217 case TARGET_NR_readahead:
10218 #if TARGET_ABI_BITS == 32
10219 if (regpairs_aligned(cpu_env)) {
10220 arg2 = arg3;
10221 arg3 = arg4;
10222 arg4 = arg5;
10223 }
10224 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
10225 #else
10226 ret = get_errno(readahead(arg1, arg2, arg3));
10227 #endif
10228 break;
10229 #endif
10230 #ifdef CONFIG_ATTR
10231 #ifdef TARGET_NR_setxattr
10232 case TARGET_NR_listxattr:
10233 case TARGET_NR_llistxattr:
10234 {
10235 void *p, *b = 0;
10236 if (arg2) {
10237 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10238 if (!b) {
10239 ret = -TARGET_EFAULT;
10240 break;
10241 }
10242 }
10243 p = lock_user_string(arg1);
10244 if (p) {
10245 if (num == TARGET_NR_listxattr) {
10246 ret = get_errno(listxattr(p, b, arg3));
10247 } else {
10248 ret = get_errno(llistxattr(p, b, arg3));
10249 }
10250 } else {
10251 ret = -TARGET_EFAULT;
10252 }
10253 unlock_user(p, arg1, 0);
10254 unlock_user(b, arg2, arg3);
10255 break;
10256 }
10257 case TARGET_NR_flistxattr:
10258 {
10259 void *b = 0;
10260 if (arg2) {
10261 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10262 if (!b) {
10263 ret = -TARGET_EFAULT;
10264 break;
10265 }
10266 }
10267 ret = get_errno(flistxattr(arg1, b, arg3));
10268 unlock_user(b, arg2, arg3);
10269 break;
10270 }
10271 case TARGET_NR_setxattr:
10272 case TARGET_NR_lsetxattr:
10273 {
10274 void *p, *n, *v = 0;
10275 if (arg3) {
10276 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10277 if (!v) {
10278 ret = -TARGET_EFAULT;
10279 break;
10280 }
10281 }
10282 p = lock_user_string(arg1);
10283 n = lock_user_string(arg2);
10284 if (p && n) {
10285 if (num == TARGET_NR_setxattr) {
10286 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10287 } else {
10288 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10289 }
10290 } else {
10291 ret = -TARGET_EFAULT;
10292 }
10293 unlock_user(p, arg1, 0);
10294 unlock_user(n, arg2, 0);
10295 unlock_user(v, arg3, 0);
10296 }
10297 break;
10298 case TARGET_NR_fsetxattr:
10299 {
10300 void *n, *v = 0;
10301 if (arg3) {
10302 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10303 if (!v) {
10304 ret = -TARGET_EFAULT;
10305 break;
10306 }
10307 }
10308 n = lock_user_string(arg2);
10309 if (n) {
10310 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10311 } else {
10312 ret = -TARGET_EFAULT;
10313 }
10314 unlock_user(n, arg2, 0);
10315 unlock_user(v, arg3, 0);
10316 }
10317 break;
10318 case TARGET_NR_getxattr:
10319 case TARGET_NR_lgetxattr:
10320 {
10321 void *p, *n, *v = 0;
10322 if (arg3) {
10323 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10324 if (!v) {
10325 ret = -TARGET_EFAULT;
10326 break;
10327 }
10328 }
10329 p = lock_user_string(arg1);
10330 n = lock_user_string(arg2);
10331 if (p && n) {
10332 if (num == TARGET_NR_getxattr) {
10333 ret = get_errno(getxattr(p, n, v, arg4));
10334 } else {
10335 ret = get_errno(lgetxattr(p, n, v, arg4));
10336 }
10337 } else {
10338 ret = -TARGET_EFAULT;
10339 }
10340 unlock_user(p, arg1, 0);
10341 unlock_user(n, arg2, 0);
10342 unlock_user(v, arg3, arg4);
10343 }
10344 break;
10345 case TARGET_NR_fgetxattr:
10346 {
10347 void *n, *v = 0;
10348 if (arg3) {
10349 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10350 if (!v) {
10351 ret = -TARGET_EFAULT;
10352 break;
10353 }
10354 }
10355 n = lock_user_string(arg2);
10356 if (n) {
10357 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10358 } else {
10359 ret = -TARGET_EFAULT;
10360 }
10361 unlock_user(n, arg2, 0);
10362 unlock_user(v, arg3, arg4);
10363 }
10364 break;
10365 case TARGET_NR_removexattr:
10366 case TARGET_NR_lremovexattr:
10367 {
10368 void *p, *n;
10369 p = lock_user_string(arg1);
10370 n = lock_user_string(arg2);
10371 if (p && n) {
10372 if (num == TARGET_NR_removexattr) {
10373 ret = get_errno(removexattr(p, n));
10374 } else {
10375 ret = get_errno(lremovexattr(p, n));
10376 }
10377 } else {
10378 ret = -TARGET_EFAULT;
10379 }
10380 unlock_user(p, arg1, 0);
10381 unlock_user(n, arg2, 0);
10382 }
10383 break;
10384 case TARGET_NR_fremovexattr:
10385 {
10386 void *n;
10387 n = lock_user_string(arg2);
10388 if (n) {
10389 ret = get_errno(fremovexattr(arg1, n));
10390 } else {
10391 ret = -TARGET_EFAULT;
10392 }
10393 unlock_user(n, arg2, 0);
10394 }
10395 break;
10396 #endif
10397 #endif /* CONFIG_ATTR */
10398 #ifdef TARGET_NR_set_thread_area
10399 case TARGET_NR_set_thread_area:
10400 #if defined(TARGET_MIPS)
10401 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10402 ret = 0;
10403 break;
10404 #elif defined(TARGET_CRIS)
10405 if (arg1 & 0xff)
10406 ret = -TARGET_EINVAL;
10407 else {
10408 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10409 ret = 0;
10410 }
10411 break;
10412 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10413 ret = do_set_thread_area(cpu_env, arg1);
10414 break;
10415 #elif defined(TARGET_M68K)
10416 {
10417 TaskState *ts = cpu->opaque;
10418 ts->tp_value = arg1;
10419 ret = 0;
10420 break;
10421 }
10422 #else
10423 goto unimplemented_nowarn;
10424 #endif
10425 #endif
10426 #ifdef TARGET_NR_get_thread_area
10427 case TARGET_NR_get_thread_area:
10428 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10429 ret = do_get_thread_area(cpu_env, arg1);
10430 break;
10431 #elif defined(TARGET_M68K)
10432 {
10433 TaskState *ts = cpu->opaque;
10434 ret = ts->tp_value;
10435 break;
10436 }
10437 #else
10438 goto unimplemented_nowarn;
10439 #endif
10440 #endif
10441 #ifdef TARGET_NR_getdomainname
10442 case TARGET_NR_getdomainname:
10443 goto unimplemented_nowarn;
10444 #endif
10445
10446 #ifdef TARGET_NR_clock_gettime
10447 case TARGET_NR_clock_gettime:
10448 {
10449 struct timespec ts;
10450 ret = get_errno(clock_gettime(arg1, &ts));
10451 if (!is_error(ret)) {
10452 host_to_target_timespec(arg2, &ts);
10453 }
10454 break;
10455 }
10456 #endif
10457 #ifdef TARGET_NR_clock_getres
10458 case TARGET_NR_clock_getres:
10459 {
10460 struct timespec ts;
10461 ret = get_errno(clock_getres(arg1, &ts));
10462 if (!is_error(ret)) {
10463 host_to_target_timespec(arg2, &ts);
10464 }
10465 break;
10466 }
10467 #endif
10468 #ifdef TARGET_NR_clock_nanosleep
10469 case TARGET_NR_clock_nanosleep:
10470 {
10471 struct timespec ts;
10472 target_to_host_timespec(&ts, arg3);
10473 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
10474 if (arg4)
10475 host_to_target_timespec(arg4, &ts);
10476
10477 #if defined(TARGET_PPC)
10478 /* clock_nanosleep is odd in that it returns positive errno values.
10479 * On PPC, CR0 bit 3 should be set in such a situation. */
10480 if (ret) {
10481 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10482 }
10483 #endif
10484 break;
10485 }
10486 #endif
10487
10488 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10489 case TARGET_NR_set_tid_address:
10490 ret = get_errno(set_tid_address((int *)g2h(arg1)));
10491 break;
10492 #endif
10493
10494 case TARGET_NR_tkill:
10495 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10496 break;
10497
10498 case TARGET_NR_tgkill:
10499 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
10500 target_to_host_signal(arg3)));
10501 break;
10502
10503 #ifdef TARGET_NR_set_robust_list
10504 case TARGET_NR_set_robust_list:
10505 case TARGET_NR_get_robust_list:
10506 /* The ABI for supporting robust futexes has userspace pass
10507 * the kernel a pointer to a linked list which is updated by
10508 * userspace after the syscall; the list is walked by the kernel
10509 * when the thread exits. Since the linked list in QEMU guest
10510 * memory isn't a valid linked list for the host and we have
10511 * no way to reliably intercept the thread-death event, we can't
10512 * support these. Silently return ENOSYS so that guest userspace
10513 * falls back to a non-robust futex implementation (which should
10514 * be OK except in the corner case of the guest crashing while
10515 * holding a mutex that is shared with another process via
10516 * shared memory).
10517 */
10518 goto unimplemented_nowarn;
10519 #endif
10520
10521 #if defined(TARGET_NR_utimensat)
10522 case TARGET_NR_utimensat:
10523 {
10524 struct timespec *tsp, ts[2];
10525 if (!arg3) {
10526 tsp = NULL;
10527 } else {
10528 target_to_host_timespec(ts, arg3);
10529 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10530 tsp = ts;
10531 }
10532 if (!arg2)
10533 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10534 else {
10535 if (!(p = lock_user_string(arg2))) {
10536 ret = -TARGET_EFAULT;
10537 goto fail;
10538 }
10539 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10540 unlock_user(p, arg2, 0);
10541 }
10542 }
10543 break;
10544 #endif
10545 case TARGET_NR_futex:
10546 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10547 break;
10548 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10549 case TARGET_NR_inotify_init:
10550 ret = get_errno(sys_inotify_init());
10551 break;
10552 #endif
10553 #ifdef CONFIG_INOTIFY1
10554 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10555 case TARGET_NR_inotify_init1:
10556 ret = get_errno(sys_inotify_init1(arg1));
10557 break;
10558 #endif
10559 #endif
10560 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10561 case TARGET_NR_inotify_add_watch:
10562 p = lock_user_string(arg2);
10563 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10564 unlock_user(p, arg2, 0);
10565 break;
10566 #endif
10567 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10568 case TARGET_NR_inotify_rm_watch:
10569 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
10570 break;
10571 #endif
10572
10573 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10574 case TARGET_NR_mq_open:
10575 {
10576 struct mq_attr posix_mq_attr, *attrp;
10577
10578 p = lock_user_string(arg1 - 1);
10579 if (arg4 != 0) {
10580 copy_from_user_mq_attr (&posix_mq_attr, arg4);
10581 attrp = &posix_mq_attr;
10582 } else {
10583 attrp = 0;
10584 }
10585 ret = get_errno(mq_open(p, arg2, arg3, attrp));
10586 unlock_user (p, arg1, 0);
10587 }
10588 break;
10589
10590 case TARGET_NR_mq_unlink:
10591 p = lock_user_string(arg1 - 1);
10592 ret = get_errno(mq_unlink(p));
10593 unlock_user (p, arg1, 0);
10594 break;
10595
10596 case TARGET_NR_mq_timedsend:
10597 {
10598 struct timespec ts;
10599
10600 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10601 if (arg5 != 0) {
10602 target_to_host_timespec(&ts, arg5);
10603 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
10604 host_to_target_timespec(arg5, &ts);
10605 } else {
10606 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
10607 }
10608 unlock_user (p, arg2, arg3);
10609 }
10610 break;
10611
10612 case TARGET_NR_mq_timedreceive:
10613 {
10614 struct timespec ts;
10615 unsigned int prio;
10616
10617 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10618 if (arg5 != 0) {
10619 target_to_host_timespec(&ts, arg5);
10620 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10621 &prio, &ts));
10622 host_to_target_timespec(arg5, &ts);
10623 } else {
10624 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10625 &prio, NULL));
10626 }
10627 unlock_user (p, arg2, arg3);
10628 if (arg4 != 0)
10629 put_user_u32(prio, arg4);
10630 }
10631 break;
10632
10633 /* Not implemented for now... */
10634 /* case TARGET_NR_mq_notify: */
10635 /* break; */
10636
10637 case TARGET_NR_mq_getsetattr:
10638 {
10639 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
10640 ret = 0;
10641 if (arg3 != 0) {
10642 ret = mq_getattr(arg1, &posix_mq_attr_out);
10643 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
10644 }
10645 if (arg2 != 0) {
10646 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
10647 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
10648 }
10649
10650 }
10651 break;
10652 #endif
10653
10654 #ifdef CONFIG_SPLICE
10655 #ifdef TARGET_NR_tee
10656 case TARGET_NR_tee:
10657 {
10658 ret = get_errno(tee(arg1,arg2,arg3,arg4));
10659 }
10660 break;
10661 #endif
10662 #ifdef TARGET_NR_splice
10663 case TARGET_NR_splice:
10664 {
10665 loff_t loff_in, loff_out;
10666 loff_t *ploff_in = NULL, *ploff_out = NULL;
10667 if (arg2) {
10668 if (get_user_u64(loff_in, arg2)) {
10669 goto efault;
10670 }
10671 ploff_in = &loff_in;
10672 }
10673 if (arg4) {
10674 if (get_user_u64(loff_out, arg4)) {
10675 goto efault;
10676 }
10677 ploff_out = &loff_out;
10678 }
10679 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
10680 if (arg2) {
10681 if (put_user_u64(loff_in, arg2)) {
10682 goto efault;
10683 }
10684 }
10685 if (arg4) {
10686 if (put_user_u64(loff_out, arg4)) {
10687 goto efault;
10688 }
10689 }
10690 }
10691 break;
10692 #endif
10693 #ifdef TARGET_NR_vmsplice
10694 case TARGET_NR_vmsplice:
10695 {
10696 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10697 if (vec != NULL) {
10698 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
10699 unlock_iovec(vec, arg2, arg3, 0);
10700 } else {
10701 ret = -host_to_target_errno(errno);
10702 }
10703 }
10704 break;
10705 #endif
10706 #endif /* CONFIG_SPLICE */
10707 #ifdef CONFIG_EVENTFD
10708 #if defined(TARGET_NR_eventfd)
10709 case TARGET_NR_eventfd:
10710 ret = get_errno(eventfd(arg1, 0));
10711 fd_trans_unregister(ret);
10712 break;
10713 #endif
10714 #if defined(TARGET_NR_eventfd2)
10715 case TARGET_NR_eventfd2:
10716 {
10717 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
10718 if (arg2 & TARGET_O_NONBLOCK) {
10719 host_flags |= O_NONBLOCK;
10720 }
10721 if (arg2 & TARGET_O_CLOEXEC) {
10722 host_flags |= O_CLOEXEC;
10723 }
10724 ret = get_errno(eventfd(arg1, host_flags));
10725 fd_trans_unregister(ret);
10726 break;
10727 }
10728 #endif
10729 #endif /* CONFIG_EVENTFD */
10730 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10731 case TARGET_NR_fallocate:
10732 #if TARGET_ABI_BITS == 32
10733 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
10734 target_offset64(arg5, arg6)));
10735 #else
10736 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
10737 #endif
10738 break;
10739 #endif
10740 #if defined(CONFIG_SYNC_FILE_RANGE)
10741 #if defined(TARGET_NR_sync_file_range)
10742 case TARGET_NR_sync_file_range:
10743 #if TARGET_ABI_BITS == 32
10744 #if defined(TARGET_MIPS)
10745 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10746 target_offset64(arg5, arg6), arg7));
10747 #else
10748 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
10749 target_offset64(arg4, arg5), arg6));
10750 #endif /* !TARGET_MIPS */
10751 #else
10752 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
10753 #endif
10754 break;
10755 #endif
10756 #if defined(TARGET_NR_sync_file_range2)
10757 case TARGET_NR_sync_file_range2:
10758 /* This is like sync_file_range but the arguments are reordered */
10759 #if TARGET_ABI_BITS == 32
10760 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10761 target_offset64(arg5, arg6), arg2));
10762 #else
10763 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
10764 #endif
10765 break;
10766 #endif
10767 #endif
10768 #if defined(TARGET_NR_signalfd4)
10769 case TARGET_NR_signalfd4:
10770 ret = do_signalfd4(arg1, arg2, arg4);
10771 break;
10772 #endif
10773 #if defined(TARGET_NR_signalfd)
10774 case TARGET_NR_signalfd:
10775 ret = do_signalfd4(arg1, arg2, 0);
10776 break;
10777 #endif
10778 #if defined(CONFIG_EPOLL)
10779 #if defined(TARGET_NR_epoll_create)
10780 case TARGET_NR_epoll_create:
10781 ret = get_errno(epoll_create(arg1));
10782 break;
10783 #endif
10784 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10785 case TARGET_NR_epoll_create1:
10786 ret = get_errno(epoll_create1(arg1));
10787 break;
10788 #endif
10789 #if defined(TARGET_NR_epoll_ctl)
10790 case TARGET_NR_epoll_ctl:
10791 {
10792 struct epoll_event ep;
10793 struct epoll_event *epp = 0;
10794 if (arg4) {
10795 struct target_epoll_event *target_ep;
10796 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
10797 goto efault;
10798 }
10799 ep.events = tswap32(target_ep->events);
10800 /* The epoll_data_t union is just opaque data to the kernel,
10801 * so we transfer all 64 bits across and need not worry what
10802 * actual data type it is.
10803 */
10804 ep.data.u64 = tswap64(target_ep->data.u64);
10805 unlock_user_struct(target_ep, arg4, 0);
10806 epp = &ep;
10807 }
10808 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
10809 break;
10810 }
10811 #endif
10812
10813 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
10814 #define IMPLEMENT_EPOLL_PWAIT
10815 #endif
10816 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
10817 #if defined(TARGET_NR_epoll_wait)
10818 case TARGET_NR_epoll_wait:
10819 #endif
10820 #if defined(IMPLEMENT_EPOLL_PWAIT)
10821 case TARGET_NR_epoll_pwait:
10822 #endif
10823 {
10824 struct target_epoll_event *target_ep;
10825 struct epoll_event *ep;
10826 int epfd = arg1;
10827 int maxevents = arg3;
10828 int timeout = arg4;
10829
10830 target_ep = lock_user(VERIFY_WRITE, arg2,
10831 maxevents * sizeof(struct target_epoll_event), 1);
10832 if (!target_ep) {
10833 goto efault;
10834 }
10835
10836 ep = alloca(maxevents * sizeof(struct epoll_event));
10837
10838 switch (num) {
10839 #if defined(IMPLEMENT_EPOLL_PWAIT)
10840 case TARGET_NR_epoll_pwait:
10841 {
10842 target_sigset_t *target_set;
10843 sigset_t _set, *set = &_set;
10844
10845 if (arg5) {
10846 target_set = lock_user(VERIFY_READ, arg5,
10847 sizeof(target_sigset_t), 1);
10848 if (!target_set) {
10849 unlock_user(target_ep, arg2, 0);
10850 goto efault;
10851 }
10852 target_to_host_sigset(set, target_set);
10853 unlock_user(target_set, arg5, 0);
10854 } else {
10855 set = NULL;
10856 }
10857
10858 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
10859 break;
10860 }
10861 #endif
10862 #if defined(TARGET_NR_epoll_wait)
10863 case TARGET_NR_epoll_wait:
10864 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
10865 break;
10866 #endif
10867 default:
10868 ret = -TARGET_ENOSYS;
10869 }
10870 if (!is_error(ret)) {
10871 int i;
10872 for (i = 0; i < ret; i++) {
10873 target_ep[i].events = tswap32(ep[i].events);
10874 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
10875 }
10876 }
10877 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
10878 break;
10879 }
10880 #endif
10881 #endif
10882 #ifdef TARGET_NR_prlimit64
10883 case TARGET_NR_prlimit64:
10884 {
10885 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10886 struct target_rlimit64 *target_rnew, *target_rold;
10887 struct host_rlimit64 rnew, rold, *rnewp = 0;
10888 int resource = target_to_host_resource(arg2);
10889 if (arg3) {
10890 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
10891 goto efault;
10892 }
10893 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
10894 rnew.rlim_max = tswap64(target_rnew->rlim_max);
10895 unlock_user_struct(target_rnew, arg3, 0);
10896 rnewp = &rnew;
10897 }
10898
10899 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10900 if (!is_error(ret) && arg4) {
10901 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10902 goto efault;
10903 }
10904 target_rold->rlim_cur = tswap64(rold.rlim_cur);
10905 target_rold->rlim_max = tswap64(rold.rlim_max);
10906 unlock_user_struct(target_rold, arg4, 1);
10907 }
10908 break;
10909 }
10910 #endif
10911 #ifdef TARGET_NR_gethostname
10912 case TARGET_NR_gethostname:
10913 {
10914 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10915 if (name) {
10916 ret = get_errno(gethostname(name, arg2));
10917 unlock_user(name, arg1, arg2);
10918 } else {
10919 ret = -TARGET_EFAULT;
10920 }
10921 break;
10922 }
10923 #endif
10924 #ifdef TARGET_NR_atomic_cmpxchg_32
10925 case TARGET_NR_atomic_cmpxchg_32:
10926 {
10927 /* should use start_exclusive from main.c */
10928 abi_ulong mem_value;
10929 if (get_user_u32(mem_value, arg6)) {
10930 target_siginfo_t info;
10931 info.si_signo = SIGSEGV;
10932 info.si_errno = 0;
10933 info.si_code = TARGET_SEGV_MAPERR;
10934 info._sifields._sigfault._addr = arg6;
10935 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10936 ret = 0xdeadbeef;
10937
10938 }
10939 if (mem_value == arg2)
10940 put_user_u32(arg1, arg6);
10941 ret = mem_value;
10942 break;
10943 }
10944 #endif
10945 #ifdef TARGET_NR_atomic_barrier
10946 case TARGET_NR_atomic_barrier:
10947 {
10948 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10949 ret = 0;
10950 break;
10951 }
10952 #endif
10953
10954 #ifdef TARGET_NR_timer_create
10955 case TARGET_NR_timer_create:
10956 {
10957 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10958
10959 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
10960
10961 int clkid = arg1;
10962 int timer_index = next_free_host_timer();
10963
10964 if (timer_index < 0) {
10965 ret = -TARGET_EAGAIN;
10966 } else {
10967 timer_t *phtimer = g_posix_timers + timer_index;
10968
10969 if (arg2) {
10970 phost_sevp = &host_sevp;
10971 ret = target_to_host_sigevent(phost_sevp, arg2);
10972 if (ret != 0) {
10973 break;
10974 }
10975 }
10976
10977 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
10978 if (ret) {
10979 phtimer = NULL;
10980 } else {
10981 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
10982 goto efault;
10983 }
10984 }
10985 }
10986 break;
10987 }
10988 #endif
10989
10990 #ifdef TARGET_NR_timer_settime
10991 case TARGET_NR_timer_settime:
10992 {
10993 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10994 * struct itimerspec * old_value */
10995 target_timer_t timerid = get_timer_id(arg1);
10996
10997 if (timerid < 0) {
10998 ret = timerid;
10999 } else if (arg3 == 0) {
11000 ret = -TARGET_EINVAL;
11001 } else {
11002 timer_t htimer = g_posix_timers[timerid];
11003 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11004
11005 target_to_host_itimerspec(&hspec_new, arg3);
11006 ret = get_errno(
11007 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11008 host_to_target_itimerspec(arg2, &hspec_old);
11009 }
11010 break;
11011 }
11012 #endif
11013
11014 #ifdef TARGET_NR_timer_gettime
11015 case TARGET_NR_timer_gettime:
11016 {
11017 /* args: timer_t timerid, struct itimerspec *curr_value */
11018 target_timer_t timerid = get_timer_id(arg1);
11019
11020 if (timerid < 0) {
11021 ret = timerid;
11022 } else if (!arg2) {
11023 ret = -TARGET_EFAULT;
11024 } else {
11025 timer_t htimer = g_posix_timers[timerid];
11026 struct itimerspec hspec;
11027 ret = get_errno(timer_gettime(htimer, &hspec));
11028
11029 if (host_to_target_itimerspec(arg2, &hspec)) {
11030 ret = -TARGET_EFAULT;
11031 }
11032 }
11033 break;
11034 }
11035 #endif
11036
11037 #ifdef TARGET_NR_timer_getoverrun
11038 case TARGET_NR_timer_getoverrun:
11039 {
11040 /* args: timer_t timerid */
11041 target_timer_t timerid = get_timer_id(arg1);
11042
11043 if (timerid < 0) {
11044 ret = timerid;
11045 } else {
11046 timer_t htimer = g_posix_timers[timerid];
11047 ret = get_errno(timer_getoverrun(htimer));
11048 }
11049 fd_trans_unregister(ret);
11050 break;
11051 }
11052 #endif
11053
11054 #ifdef TARGET_NR_timer_delete
11055 case TARGET_NR_timer_delete:
11056 {
11057 /* args: timer_t timerid */
11058 target_timer_t timerid = get_timer_id(arg1);
11059
11060 if (timerid < 0) {
11061 ret = timerid;
11062 } else {
11063 timer_t htimer = g_posix_timers[timerid];
11064 ret = get_errno(timer_delete(htimer));
11065 g_posix_timers[timerid] = 0;
11066 }
11067 break;
11068 }
11069 #endif
11070
11071 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11072 case TARGET_NR_timerfd_create:
11073 ret = get_errno(timerfd_create(arg1,
11074 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11075 break;
11076 #endif
11077
11078 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11079 case TARGET_NR_timerfd_gettime:
11080 {
11081 struct itimerspec its_curr;
11082
11083 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11084
11085 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11086 goto efault;
11087 }
11088 }
11089 break;
11090 #endif
11091
11092 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11093 case TARGET_NR_timerfd_settime:
11094 {
11095 struct itimerspec its_new, its_old, *p_new;
11096
11097 if (arg3) {
11098 if (target_to_host_itimerspec(&its_new, arg3)) {
11099 goto efault;
11100 }
11101 p_new = &its_new;
11102 } else {
11103 p_new = NULL;
11104 }
11105
11106 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11107
11108 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11109 goto efault;
11110 }
11111 }
11112 break;
11113 #endif
11114
11115 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11116 case TARGET_NR_ioprio_get:
11117 ret = get_errno(ioprio_get(arg1, arg2));
11118 break;
11119 #endif
11120
11121 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11122 case TARGET_NR_ioprio_set:
11123 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11124 break;
11125 #endif
11126
11127 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11128 case TARGET_NR_setns:
11129 ret = get_errno(setns(arg1, arg2));
11130 break;
11131 #endif
11132 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11133 case TARGET_NR_unshare:
11134 ret = get_errno(unshare(arg1));
11135 break;
11136 #endif
11137
11138 default:
11139 unimplemented:
11140 gemu_log("qemu: Unsupported syscall: %d\n", num);
11141 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11142 unimplemented_nowarn:
11143 #endif
11144 ret = -TARGET_ENOSYS;
11145 break;
11146 }
11147 fail:
11148 #ifdef DEBUG
11149 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11150 #endif
11151 if(do_strace)
11152 print_syscall_ret(num, ret);
11153 return ret;
11154 efault:
11155 ret = -TARGET_EFAULT;
11156 goto fail;
11157 }