]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/syscall.c
linux-user: Fix wrong type used for argument to rt_sigqueueinfo
[mirror_qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #ifdef __ia64__
39 int __clone2(int (*fn)(void *), void *child_stack_base,
40 size_t stack_size, int flags, void *arg, ...);
41 #endif
42 #include <sys/socket.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <sys/poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <linux/netlink.h>
104 #ifdef CONFIG_RTNETLINK
105 #include <linux/rtnetlink.h>
106 #endif
107 #include <linux/audit.h>
108 #include "linux_loop.h"
109 #include "uname.h"
110
111 #include "qemu.h"
112
113 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
114 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
115
116 //#define DEBUG
117 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
118 * once. This exercises the codepaths for restart.
119 */
120 //#define DEBUG_ERESTARTSYS
121
122 //#include <linux/msdos_fs.h>
123 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
124 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
125
126 #undef _syscall0
127 #undef _syscall1
128 #undef _syscall2
129 #undef _syscall3
130 #undef _syscall4
131 #undef _syscall5
132 #undef _syscall6
133
134 #define _syscall0(type,name) \
135 static type name (void) \
136 { \
137 return syscall(__NR_##name); \
138 }
139
140 #define _syscall1(type,name,type1,arg1) \
141 static type name (type1 arg1) \
142 { \
143 return syscall(__NR_##name, arg1); \
144 }
145
146 #define _syscall2(type,name,type1,arg1,type2,arg2) \
147 static type name (type1 arg1,type2 arg2) \
148 { \
149 return syscall(__NR_##name, arg1, arg2); \
150 }
151
152 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
153 static type name (type1 arg1,type2 arg2,type3 arg3) \
154 { \
155 return syscall(__NR_##name, arg1, arg2, arg3); \
156 }
157
158 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
160 { \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
162 }
163
164 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
165 type5,arg5) \
166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
167 { \
168 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
169 }
170
171
172 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
173 type5,arg5,type6,arg6) \
174 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
175 type6 arg6) \
176 { \
177 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
178 }
179
180
181 #define __NR_sys_uname __NR_uname
182 #define __NR_sys_getcwd1 __NR_getcwd
183 #define __NR_sys_getdents __NR_getdents
184 #define __NR_sys_getdents64 __NR_getdents64
185 #define __NR_sys_getpriority __NR_getpriority
186 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
187 #define __NR_sys_syslog __NR_syslog
188 #define __NR_sys_futex __NR_futex
189 #define __NR_sys_inotify_init __NR_inotify_init
190 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
191 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
192
193 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
194 defined(__s390x__)
195 #define __NR__llseek __NR_lseek
196 #endif
197
198 /* Newer kernel ports have llseek() instead of _llseek() */
199 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
200 #define TARGET_NR__llseek TARGET_NR_llseek
201 #endif
202
203 #ifdef __NR_gettid
204 _syscall0(int, gettid)
205 #else
206 /* This is a replacement for the host gettid() and must return a host
207 errno. */
208 static int gettid(void) {
209 return -ENOSYS;
210 }
211 #endif
212 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
213 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
214 #endif
215 #if !defined(__NR_getdents) || \
216 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
217 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
218 #endif
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
221 loff_t *, res, uint, wh);
222 #endif
223 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
224 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
225 #ifdef __NR_exit_group
226 _syscall1(int,exit_group,int,error_code)
227 #endif
228 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
229 _syscall1(int,set_tid_address,int *,tidptr)
230 #endif
231 #if defined(TARGET_NR_futex) && defined(__NR_futex)
232 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
233 const struct timespec *,timeout,int *,uaddr2,int,val3)
234 #endif
235 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
236 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
237 unsigned long *, user_mask_ptr);
238 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
239 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
240 unsigned long *, user_mask_ptr);
241 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
242 void *, arg);
243 _syscall2(int, capget, struct __user_cap_header_struct *, header,
244 struct __user_cap_data_struct *, data);
245 _syscall2(int, capset, struct __user_cap_header_struct *, header,
246 struct __user_cap_data_struct *, data);
247 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
248 _syscall2(int, ioprio_get, int, which, int, who)
249 #endif
250 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
251 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
252 #endif
253 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
254 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
255 #endif
256
257 static bitmask_transtbl fcntl_flags_tbl[] = {
258 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
259 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
260 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
261 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
262 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
263 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
264 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
265 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
266 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
267 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
268 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
269 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
270 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
271 #if defined(O_DIRECT)
272 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
273 #endif
274 #if defined(O_NOATIME)
275 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
276 #endif
277 #if defined(O_CLOEXEC)
278 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
279 #endif
280 #if defined(O_PATH)
281 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
282 #endif
283 /* Don't terminate the list prematurely on 64-bit host+guest. */
284 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
285 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
286 #endif
287 { 0, 0, 0, 0 }
288 };
289
290 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
291 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
292 typedef struct TargetFdTrans {
293 TargetFdDataFunc host_to_target_data;
294 TargetFdDataFunc target_to_host_data;
295 TargetFdAddrFunc target_to_host_addr;
296 } TargetFdTrans;
297
298 static TargetFdTrans **target_fd_trans;
299
300 static unsigned int target_fd_max;
301
302 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
303 {
304 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
305 return target_fd_trans[fd]->target_to_host_data;
306 }
307 return NULL;
308 }
309
310 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
311 {
312 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
313 return target_fd_trans[fd]->host_to_target_data;
314 }
315 return NULL;
316 }
317
318 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
319 {
320 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
321 return target_fd_trans[fd]->target_to_host_addr;
322 }
323 return NULL;
324 }
325
326 static void fd_trans_register(int fd, TargetFdTrans *trans)
327 {
328 unsigned int oldmax;
329
330 if (fd >= target_fd_max) {
331 oldmax = target_fd_max;
332 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
333 target_fd_trans = g_renew(TargetFdTrans *,
334 target_fd_trans, target_fd_max);
335 memset((void *)(target_fd_trans + oldmax), 0,
336 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
337 }
338 target_fd_trans[fd] = trans;
339 }
340
341 static void fd_trans_unregister(int fd)
342 {
343 if (fd >= 0 && fd < target_fd_max) {
344 target_fd_trans[fd] = NULL;
345 }
346 }
347
348 static void fd_trans_dup(int oldfd, int newfd)
349 {
350 fd_trans_unregister(newfd);
351 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
352 fd_trans_register(newfd, target_fd_trans[oldfd]);
353 }
354 }
355
356 static int sys_getcwd1(char *buf, size_t size)
357 {
358 if (getcwd(buf, size) == NULL) {
359 /* getcwd() sets errno */
360 return (-1);
361 }
362 return strlen(buf)+1;
363 }
364
365 #ifdef TARGET_NR_utimensat
366 #ifdef CONFIG_UTIMENSAT
367 static int sys_utimensat(int dirfd, const char *pathname,
368 const struct timespec times[2], int flags)
369 {
370 if (pathname == NULL)
371 return futimens(dirfd, times);
372 else
373 return utimensat(dirfd, pathname, times, flags);
374 }
375 #elif defined(__NR_utimensat)
376 #define __NR_sys_utimensat __NR_utimensat
377 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
378 const struct timespec *,tsp,int,flags)
379 #else
380 static int sys_utimensat(int dirfd, const char *pathname,
381 const struct timespec times[2], int flags)
382 {
383 errno = ENOSYS;
384 return -1;
385 }
386 #endif
387 #endif /* TARGET_NR_utimensat */
388
389 #ifdef CONFIG_INOTIFY
390 #include <sys/inotify.h>
391
392 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
393 static int sys_inotify_init(void)
394 {
395 return (inotify_init());
396 }
397 #endif
398 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
399 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
400 {
401 return (inotify_add_watch(fd, pathname, mask));
402 }
403 #endif
404 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
405 static int sys_inotify_rm_watch(int fd, int32_t wd)
406 {
407 return (inotify_rm_watch(fd, wd));
408 }
409 #endif
410 #ifdef CONFIG_INOTIFY1
411 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
412 static int sys_inotify_init1(int flags)
413 {
414 return (inotify_init1(flags));
415 }
416 #endif
417 #endif
418 #else
419 /* Userspace can usually survive runtime without inotify */
420 #undef TARGET_NR_inotify_init
421 #undef TARGET_NR_inotify_init1
422 #undef TARGET_NR_inotify_add_watch
423 #undef TARGET_NR_inotify_rm_watch
424 #endif /* CONFIG_INOTIFY */
425
426 #if defined(TARGET_NR_prlimit64)
427 #ifndef __NR_prlimit64
428 # define __NR_prlimit64 -1
429 #endif
430 #define __NR_sys_prlimit64 __NR_prlimit64
431 /* The glibc rlimit structure may not be that used by the underlying syscall */
432 struct host_rlimit64 {
433 uint64_t rlim_cur;
434 uint64_t rlim_max;
435 };
436 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
437 const struct host_rlimit64 *, new_limit,
438 struct host_rlimit64 *, old_limit)
439 #endif
440
441
442 #if defined(TARGET_NR_timer_create)
443 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
444 static timer_t g_posix_timers[32] = { 0, } ;
445
446 static inline int next_free_host_timer(void)
447 {
448 int k ;
449 /* FIXME: Does finding the next free slot require a lock? */
450 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
451 if (g_posix_timers[k] == 0) {
452 g_posix_timers[k] = (timer_t) 1;
453 return k;
454 }
455 }
456 return -1;
457 }
458 #endif
459
460 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
461 #ifdef TARGET_ARM
462 static inline int regpairs_aligned(void *cpu_env) {
463 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
464 }
465 #elif defined(TARGET_MIPS)
466 static inline int regpairs_aligned(void *cpu_env) { return 1; }
467 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
468 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
469 * of registers which translates to the same as ARM/MIPS, because we start with
470 * r3 as arg1 */
471 static inline int regpairs_aligned(void *cpu_env) { return 1; }
472 #else
473 static inline int regpairs_aligned(void *cpu_env) { return 0; }
474 #endif
475
476 #define ERRNO_TABLE_SIZE 1200
477
478 /* target_to_host_errno_table[] is initialized from
479 * host_to_target_errno_table[] in syscall_init(). */
480 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
481 };
482
483 /*
484 * This list is the union of errno values overridden in asm-<arch>/errno.h
485 * minus the errnos that are not actually generic to all archs.
486 */
487 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
488 [EAGAIN] = TARGET_EAGAIN,
489 [EIDRM] = TARGET_EIDRM,
490 [ECHRNG] = TARGET_ECHRNG,
491 [EL2NSYNC] = TARGET_EL2NSYNC,
492 [EL3HLT] = TARGET_EL3HLT,
493 [EL3RST] = TARGET_EL3RST,
494 [ELNRNG] = TARGET_ELNRNG,
495 [EUNATCH] = TARGET_EUNATCH,
496 [ENOCSI] = TARGET_ENOCSI,
497 [EL2HLT] = TARGET_EL2HLT,
498 [EDEADLK] = TARGET_EDEADLK,
499 [ENOLCK] = TARGET_ENOLCK,
500 [EBADE] = TARGET_EBADE,
501 [EBADR] = TARGET_EBADR,
502 [EXFULL] = TARGET_EXFULL,
503 [ENOANO] = TARGET_ENOANO,
504 [EBADRQC] = TARGET_EBADRQC,
505 [EBADSLT] = TARGET_EBADSLT,
506 [EBFONT] = TARGET_EBFONT,
507 [ENOSTR] = TARGET_ENOSTR,
508 [ENODATA] = TARGET_ENODATA,
509 [ETIME] = TARGET_ETIME,
510 [ENOSR] = TARGET_ENOSR,
511 [ENONET] = TARGET_ENONET,
512 [ENOPKG] = TARGET_ENOPKG,
513 [EREMOTE] = TARGET_EREMOTE,
514 [ENOLINK] = TARGET_ENOLINK,
515 [EADV] = TARGET_EADV,
516 [ESRMNT] = TARGET_ESRMNT,
517 [ECOMM] = TARGET_ECOMM,
518 [EPROTO] = TARGET_EPROTO,
519 [EDOTDOT] = TARGET_EDOTDOT,
520 [EMULTIHOP] = TARGET_EMULTIHOP,
521 [EBADMSG] = TARGET_EBADMSG,
522 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
523 [EOVERFLOW] = TARGET_EOVERFLOW,
524 [ENOTUNIQ] = TARGET_ENOTUNIQ,
525 [EBADFD] = TARGET_EBADFD,
526 [EREMCHG] = TARGET_EREMCHG,
527 [ELIBACC] = TARGET_ELIBACC,
528 [ELIBBAD] = TARGET_ELIBBAD,
529 [ELIBSCN] = TARGET_ELIBSCN,
530 [ELIBMAX] = TARGET_ELIBMAX,
531 [ELIBEXEC] = TARGET_ELIBEXEC,
532 [EILSEQ] = TARGET_EILSEQ,
533 [ENOSYS] = TARGET_ENOSYS,
534 [ELOOP] = TARGET_ELOOP,
535 [ERESTART] = TARGET_ERESTART,
536 [ESTRPIPE] = TARGET_ESTRPIPE,
537 [ENOTEMPTY] = TARGET_ENOTEMPTY,
538 [EUSERS] = TARGET_EUSERS,
539 [ENOTSOCK] = TARGET_ENOTSOCK,
540 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
541 [EMSGSIZE] = TARGET_EMSGSIZE,
542 [EPROTOTYPE] = TARGET_EPROTOTYPE,
543 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
544 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
545 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
546 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
547 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
548 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
549 [EADDRINUSE] = TARGET_EADDRINUSE,
550 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
551 [ENETDOWN] = TARGET_ENETDOWN,
552 [ENETUNREACH] = TARGET_ENETUNREACH,
553 [ENETRESET] = TARGET_ENETRESET,
554 [ECONNABORTED] = TARGET_ECONNABORTED,
555 [ECONNRESET] = TARGET_ECONNRESET,
556 [ENOBUFS] = TARGET_ENOBUFS,
557 [EISCONN] = TARGET_EISCONN,
558 [ENOTCONN] = TARGET_ENOTCONN,
559 [EUCLEAN] = TARGET_EUCLEAN,
560 [ENOTNAM] = TARGET_ENOTNAM,
561 [ENAVAIL] = TARGET_ENAVAIL,
562 [EISNAM] = TARGET_EISNAM,
563 [EREMOTEIO] = TARGET_EREMOTEIO,
564 [ESHUTDOWN] = TARGET_ESHUTDOWN,
565 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
566 [ETIMEDOUT] = TARGET_ETIMEDOUT,
567 [ECONNREFUSED] = TARGET_ECONNREFUSED,
568 [EHOSTDOWN] = TARGET_EHOSTDOWN,
569 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
570 [EALREADY] = TARGET_EALREADY,
571 [EINPROGRESS] = TARGET_EINPROGRESS,
572 [ESTALE] = TARGET_ESTALE,
573 [ECANCELED] = TARGET_ECANCELED,
574 [ENOMEDIUM] = TARGET_ENOMEDIUM,
575 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
576 #ifdef ENOKEY
577 [ENOKEY] = TARGET_ENOKEY,
578 #endif
579 #ifdef EKEYEXPIRED
580 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
581 #endif
582 #ifdef EKEYREVOKED
583 [EKEYREVOKED] = TARGET_EKEYREVOKED,
584 #endif
585 #ifdef EKEYREJECTED
586 [EKEYREJECTED] = TARGET_EKEYREJECTED,
587 #endif
588 #ifdef EOWNERDEAD
589 [EOWNERDEAD] = TARGET_EOWNERDEAD,
590 #endif
591 #ifdef ENOTRECOVERABLE
592 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
593 #endif
594 };
595
596 static inline int host_to_target_errno(int err)
597 {
598 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
599 host_to_target_errno_table[err]) {
600 return host_to_target_errno_table[err];
601 }
602 return err;
603 }
604
605 static inline int target_to_host_errno(int err)
606 {
607 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
608 target_to_host_errno_table[err]) {
609 return target_to_host_errno_table[err];
610 }
611 return err;
612 }
613
614 static inline abi_long get_errno(abi_long ret)
615 {
616 if (ret == -1)
617 return -host_to_target_errno(errno);
618 else
619 return ret;
620 }
621
622 static inline int is_error(abi_long ret)
623 {
624 return (abi_ulong)ret >= (abi_ulong)(-4096);
625 }
626
627 const char *target_strerror(int err)
628 {
629 if (err == TARGET_ERESTARTSYS) {
630 return "To be restarted";
631 }
632 if (err == TARGET_QEMU_ESIGRETURN) {
633 return "Successful exit from sigreturn";
634 }
635
636 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
637 return NULL;
638 }
639 return strerror(target_to_host_errno(err));
640 }
641
642 #define safe_syscall0(type, name) \
643 static type safe_##name(void) \
644 { \
645 return safe_syscall(__NR_##name); \
646 }
647
648 #define safe_syscall1(type, name, type1, arg1) \
649 static type safe_##name(type1 arg1) \
650 { \
651 return safe_syscall(__NR_##name, arg1); \
652 }
653
654 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
655 static type safe_##name(type1 arg1, type2 arg2) \
656 { \
657 return safe_syscall(__NR_##name, arg1, arg2); \
658 }
659
660 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
661 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
662 { \
663 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
664 }
665
666 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
667 type4, arg4) \
668 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
669 { \
670 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
671 }
672
673 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
674 type4, arg4, type5, arg5) \
675 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
676 type5 arg5) \
677 { \
678 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
679 }
680
681 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
682 type4, arg4, type5, arg5, type6, arg6) \
683 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
684 type5 arg5, type6 arg6) \
685 { \
686 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
687 }
688
689 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
690 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
691 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
692 int, flags, mode_t, mode)
693 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
694 struct rusage *, rusage)
695 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
696 int, options, struct rusage *, rusage)
697 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
698 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
699 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
700 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
701 struct timespec *, tsp, const sigset_t *, sigmask,
702 size_t, sigsetsize)
703 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
704 int, maxevents, int, timeout, const sigset_t *, sigmask,
705 size_t, sigsetsize)
706 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
707 const struct timespec *,timeout,int *,uaddr2,int,val3)
708 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
709 safe_syscall2(int, kill, pid_t, pid, int, sig)
710 safe_syscall2(int, tkill, int, tid, int, sig)
711 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
712 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
713 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
714 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
715 socklen_t, addrlen)
716 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
717 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
718 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
719 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
720 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
721 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
722 safe_syscall2(int, flock, int, fd, int, operation)
723 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
724 const struct timespec *, uts, size_t, sigsetsize)
725 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
726 int, flags)
727 safe_syscall2(int, nanosleep, const struct timespec *, req,
728 struct timespec *, rem)
729 #ifdef TARGET_NR_clock_nanosleep
730 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
731 const struct timespec *, req, struct timespec *, rem)
732 #endif
733 #ifdef __NR_msgsnd
734 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
735 int, flags)
736 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
737 long, msgtype, int, flags)
738 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
739 unsigned, nsops, const struct timespec *, timeout)
740 #else
741 /* This host kernel architecture uses a single ipc syscall; fake up
742 * wrappers for the sub-operations to hide this implementation detail.
743 * Annoyingly we can't include linux/ipc.h to get the constant definitions
744 * for the call parameter because some structs in there conflict with the
745 * sys/ipc.h ones. So we just define them here, and rely on them being
746 * the same for all host architectures.
747 */
748 #define Q_SEMTIMEDOP 4
749 #define Q_MSGSND 11
750 #define Q_MSGRCV 12
751 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
752
753 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
754 void *, ptr, long, fifth)
755 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
756 {
757 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
758 }
759 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
760 {
761 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
762 }
763 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
764 const struct timespec *timeout)
765 {
766 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
767 (long)timeout);
768 }
769 #endif
770 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
771 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
772 size_t, len, unsigned, prio, const struct timespec *, timeout)
773 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
774 size_t, len, unsigned *, prio, const struct timespec *, timeout)
775 #endif
776 /* We do ioctl like this rather than via safe_syscall3 to preserve the
777 * "third argument might be integer or pointer or not present" behaviour of
778 * the libc function.
779 */
780 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
781 /* Similarly for fcntl. Note that callers must always:
782 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
783 * use the flock64 struct rather than unsuffixed flock
784 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
785 */
786 #ifdef __NR_fcntl64
787 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
788 #else
789 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
790 #endif
791
792 static inline int host_to_target_sock_type(int host_type)
793 {
794 int target_type;
795
796 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
797 case SOCK_DGRAM:
798 target_type = TARGET_SOCK_DGRAM;
799 break;
800 case SOCK_STREAM:
801 target_type = TARGET_SOCK_STREAM;
802 break;
803 default:
804 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
805 break;
806 }
807
808 #if defined(SOCK_CLOEXEC)
809 if (host_type & SOCK_CLOEXEC) {
810 target_type |= TARGET_SOCK_CLOEXEC;
811 }
812 #endif
813
814 #if defined(SOCK_NONBLOCK)
815 if (host_type & SOCK_NONBLOCK) {
816 target_type |= TARGET_SOCK_NONBLOCK;
817 }
818 #endif
819
820 return target_type;
821 }
822
823 static abi_ulong target_brk;
824 static abi_ulong target_original_brk;
825 static abi_ulong brk_page;
826
827 void target_set_brk(abi_ulong new_brk)
828 {
829 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
830 brk_page = HOST_PAGE_ALIGN(target_brk);
831 }
832
833 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
834 #define DEBUGF_BRK(message, args...)
835
836 /* do_brk() must return target values and target errnos. */
837 abi_long do_brk(abi_ulong new_brk)
838 {
839 abi_long mapped_addr;
840 int new_alloc_size;
841
842 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
843
844 if (!new_brk) {
845 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
846 return target_brk;
847 }
848 if (new_brk < target_original_brk) {
849 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
850 target_brk);
851 return target_brk;
852 }
853
854 /* If the new brk is less than the highest page reserved to the
855 * target heap allocation, set it and we're almost done... */
856 if (new_brk <= brk_page) {
857 /* Heap contents are initialized to zero, as for anonymous
858 * mapped pages. */
859 if (new_brk > target_brk) {
860 memset(g2h(target_brk), 0, new_brk - target_brk);
861 }
862 target_brk = new_brk;
863 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
864 return target_brk;
865 }
866
867 /* We need to allocate more memory after the brk... Note that
868 * we don't use MAP_FIXED because that will map over the top of
869 * any existing mapping (like the one with the host libc or qemu
870 * itself); instead we treat "mapped but at wrong address" as
871 * a failure and unmap again.
872 */
873 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
874 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
875 PROT_READ|PROT_WRITE,
876 MAP_ANON|MAP_PRIVATE, 0, 0));
877
878 if (mapped_addr == brk_page) {
879 /* Heap contents are initialized to zero, as for anonymous
880 * mapped pages. Technically the new pages are already
881 * initialized to zero since they *are* anonymous mapped
882 * pages, however we have to take care with the contents that
883 * come from the remaining part of the previous page: it may
884 * contains garbage data due to a previous heap usage (grown
885 * then shrunken). */
886 memset(g2h(target_brk), 0, brk_page - target_brk);
887
888 target_brk = new_brk;
889 brk_page = HOST_PAGE_ALIGN(target_brk);
890 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
891 target_brk);
892 return target_brk;
893 } else if (mapped_addr != -1) {
894 /* Mapped but at wrong address, meaning there wasn't actually
895 * enough space for this brk.
896 */
897 target_munmap(mapped_addr, new_alloc_size);
898 mapped_addr = -1;
899 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
900 }
901 else {
902 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
903 }
904
905 #if defined(TARGET_ALPHA)
906 /* We (partially) emulate OSF/1 on Alpha, which requires we
907 return a proper errno, not an unchanged brk value. */
908 return -TARGET_ENOMEM;
909 #endif
910 /* For everything else, return the previous break. */
911 return target_brk;
912 }
913
914 static inline abi_long copy_from_user_fdset(fd_set *fds,
915 abi_ulong target_fds_addr,
916 int n)
917 {
918 int i, nw, j, k;
919 abi_ulong b, *target_fds;
920
921 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
922 if (!(target_fds = lock_user(VERIFY_READ,
923 target_fds_addr,
924 sizeof(abi_ulong) * nw,
925 1)))
926 return -TARGET_EFAULT;
927
928 FD_ZERO(fds);
929 k = 0;
930 for (i = 0; i < nw; i++) {
931 /* grab the abi_ulong */
932 __get_user(b, &target_fds[i]);
933 for (j = 0; j < TARGET_ABI_BITS; j++) {
934 /* check the bit inside the abi_ulong */
935 if ((b >> j) & 1)
936 FD_SET(k, fds);
937 k++;
938 }
939 }
940
941 unlock_user(target_fds, target_fds_addr, 0);
942
943 return 0;
944 }
945
946 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
947 abi_ulong target_fds_addr,
948 int n)
949 {
950 if (target_fds_addr) {
951 if (copy_from_user_fdset(fds, target_fds_addr, n))
952 return -TARGET_EFAULT;
953 *fds_ptr = fds;
954 } else {
955 *fds_ptr = NULL;
956 }
957 return 0;
958 }
959
960 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
961 const fd_set *fds,
962 int n)
963 {
964 int i, nw, j, k;
965 abi_long v;
966 abi_ulong *target_fds;
967
968 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
969 if (!(target_fds = lock_user(VERIFY_WRITE,
970 target_fds_addr,
971 sizeof(abi_ulong) * nw,
972 0)))
973 return -TARGET_EFAULT;
974
975 k = 0;
976 for (i = 0; i < nw; i++) {
977 v = 0;
978 for (j = 0; j < TARGET_ABI_BITS; j++) {
979 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
980 k++;
981 }
982 __put_user(v, &target_fds[i]);
983 }
984
985 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
986
987 return 0;
988 }
989
990 #if defined(__alpha__)
991 #define HOST_HZ 1024
992 #else
993 #define HOST_HZ 100
994 #endif
995
996 static inline abi_long host_to_target_clock_t(long ticks)
997 {
998 #if HOST_HZ == TARGET_HZ
999 return ticks;
1000 #else
1001 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1002 #endif
1003 }
1004
1005 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1006 const struct rusage *rusage)
1007 {
1008 struct target_rusage *target_rusage;
1009
1010 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1011 return -TARGET_EFAULT;
1012 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1013 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1014 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1015 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1016 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1017 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1018 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1019 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1020 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1021 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1022 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1023 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1024 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1025 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1026 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1027 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1028 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1029 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1030 unlock_user_struct(target_rusage, target_addr, 1);
1031
1032 return 0;
1033 }
1034
1035 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1036 {
1037 abi_ulong target_rlim_swap;
1038 rlim_t result;
1039
1040 target_rlim_swap = tswapal(target_rlim);
1041 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1042 return RLIM_INFINITY;
1043
1044 result = target_rlim_swap;
1045 if (target_rlim_swap != (rlim_t)result)
1046 return RLIM_INFINITY;
1047
1048 return result;
1049 }
1050
1051 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1052 {
1053 abi_ulong target_rlim_swap;
1054 abi_ulong result;
1055
1056 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1057 target_rlim_swap = TARGET_RLIM_INFINITY;
1058 else
1059 target_rlim_swap = rlim;
1060 result = tswapal(target_rlim_swap);
1061
1062 return result;
1063 }
1064
1065 static inline int target_to_host_resource(int code)
1066 {
1067 switch (code) {
1068 case TARGET_RLIMIT_AS:
1069 return RLIMIT_AS;
1070 case TARGET_RLIMIT_CORE:
1071 return RLIMIT_CORE;
1072 case TARGET_RLIMIT_CPU:
1073 return RLIMIT_CPU;
1074 case TARGET_RLIMIT_DATA:
1075 return RLIMIT_DATA;
1076 case TARGET_RLIMIT_FSIZE:
1077 return RLIMIT_FSIZE;
1078 case TARGET_RLIMIT_LOCKS:
1079 return RLIMIT_LOCKS;
1080 case TARGET_RLIMIT_MEMLOCK:
1081 return RLIMIT_MEMLOCK;
1082 case TARGET_RLIMIT_MSGQUEUE:
1083 return RLIMIT_MSGQUEUE;
1084 case TARGET_RLIMIT_NICE:
1085 return RLIMIT_NICE;
1086 case TARGET_RLIMIT_NOFILE:
1087 return RLIMIT_NOFILE;
1088 case TARGET_RLIMIT_NPROC:
1089 return RLIMIT_NPROC;
1090 case TARGET_RLIMIT_RSS:
1091 return RLIMIT_RSS;
1092 case TARGET_RLIMIT_RTPRIO:
1093 return RLIMIT_RTPRIO;
1094 case TARGET_RLIMIT_SIGPENDING:
1095 return RLIMIT_SIGPENDING;
1096 case TARGET_RLIMIT_STACK:
1097 return RLIMIT_STACK;
1098 default:
1099 return code;
1100 }
1101 }
1102
1103 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1104 abi_ulong target_tv_addr)
1105 {
1106 struct target_timeval *target_tv;
1107
1108 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1109 return -TARGET_EFAULT;
1110
1111 __get_user(tv->tv_sec, &target_tv->tv_sec);
1112 __get_user(tv->tv_usec, &target_tv->tv_usec);
1113
1114 unlock_user_struct(target_tv, target_tv_addr, 0);
1115
1116 return 0;
1117 }
1118
1119 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1120 const struct timeval *tv)
1121 {
1122 struct target_timeval *target_tv;
1123
1124 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1125 return -TARGET_EFAULT;
1126
1127 __put_user(tv->tv_sec, &target_tv->tv_sec);
1128 __put_user(tv->tv_usec, &target_tv->tv_usec);
1129
1130 unlock_user_struct(target_tv, target_tv_addr, 1);
1131
1132 return 0;
1133 }
1134
1135 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1136 abi_ulong target_tz_addr)
1137 {
1138 struct target_timezone *target_tz;
1139
1140 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1141 return -TARGET_EFAULT;
1142 }
1143
1144 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1145 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1146
1147 unlock_user_struct(target_tz, target_tz_addr, 0);
1148
1149 return 0;
1150 }
1151
1152 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1153 #include <mqueue.h>
1154
1155 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1156 abi_ulong target_mq_attr_addr)
1157 {
1158 struct target_mq_attr *target_mq_attr;
1159
1160 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1161 target_mq_attr_addr, 1))
1162 return -TARGET_EFAULT;
1163
1164 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1165 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1166 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1167 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1168
1169 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1170
1171 return 0;
1172 }
1173
1174 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1175 const struct mq_attr *attr)
1176 {
1177 struct target_mq_attr *target_mq_attr;
1178
1179 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1180 target_mq_attr_addr, 0))
1181 return -TARGET_EFAULT;
1182
1183 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1184 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1185 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1186 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1187
1188 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1189
1190 return 0;
1191 }
1192 #endif
1193
1194 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1195 /* do_select() must return target values and target errnos. */
1196 static abi_long do_select(int n,
1197 abi_ulong rfd_addr, abi_ulong wfd_addr,
1198 abi_ulong efd_addr, abi_ulong target_tv_addr)
1199 {
1200 fd_set rfds, wfds, efds;
1201 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1202 struct timeval tv;
1203 struct timespec ts, *ts_ptr;
1204 abi_long ret;
1205
1206 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1207 if (ret) {
1208 return ret;
1209 }
1210 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1211 if (ret) {
1212 return ret;
1213 }
1214 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1215 if (ret) {
1216 return ret;
1217 }
1218
1219 if (target_tv_addr) {
1220 if (copy_from_user_timeval(&tv, target_tv_addr))
1221 return -TARGET_EFAULT;
1222 ts.tv_sec = tv.tv_sec;
1223 ts.tv_nsec = tv.tv_usec * 1000;
1224 ts_ptr = &ts;
1225 } else {
1226 ts_ptr = NULL;
1227 }
1228
1229 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1230 ts_ptr, NULL));
1231
1232 if (!is_error(ret)) {
1233 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1234 return -TARGET_EFAULT;
1235 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1236 return -TARGET_EFAULT;
1237 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1238 return -TARGET_EFAULT;
1239
1240 if (target_tv_addr) {
1241 tv.tv_sec = ts.tv_sec;
1242 tv.tv_usec = ts.tv_nsec / 1000;
1243 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1244 return -TARGET_EFAULT;
1245 }
1246 }
1247 }
1248
1249 return ret;
1250 }
1251 #endif
1252
1253 static abi_long do_pipe2(int host_pipe[], int flags)
1254 {
1255 #ifdef CONFIG_PIPE2
1256 return pipe2(host_pipe, flags);
1257 #else
1258 return -ENOSYS;
1259 #endif
1260 }
1261
1262 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1263 int flags, int is_pipe2)
1264 {
1265 int host_pipe[2];
1266 abi_long ret;
1267 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1268
1269 if (is_error(ret))
1270 return get_errno(ret);
1271
1272 /* Several targets have special calling conventions for the original
1273 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1274 if (!is_pipe2) {
1275 #if defined(TARGET_ALPHA)
1276 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1277 return host_pipe[0];
1278 #elif defined(TARGET_MIPS)
1279 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1280 return host_pipe[0];
1281 #elif defined(TARGET_SH4)
1282 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1283 return host_pipe[0];
1284 #elif defined(TARGET_SPARC)
1285 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1286 return host_pipe[0];
1287 #endif
1288 }
1289
1290 if (put_user_s32(host_pipe[0], pipedes)
1291 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1292 return -TARGET_EFAULT;
1293 return get_errno(ret);
1294 }
1295
1296 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1297 abi_ulong target_addr,
1298 socklen_t len)
1299 {
1300 struct target_ip_mreqn *target_smreqn;
1301
1302 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1303 if (!target_smreqn)
1304 return -TARGET_EFAULT;
1305 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1306 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1307 if (len == sizeof(struct target_ip_mreqn))
1308 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1309 unlock_user(target_smreqn, target_addr, 0);
1310
1311 return 0;
1312 }
1313
1314 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1315 abi_ulong target_addr,
1316 socklen_t len)
1317 {
1318 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1319 sa_family_t sa_family;
1320 struct target_sockaddr *target_saddr;
1321
1322 if (fd_trans_target_to_host_addr(fd)) {
1323 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1324 }
1325
1326 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1327 if (!target_saddr)
1328 return -TARGET_EFAULT;
1329
1330 sa_family = tswap16(target_saddr->sa_family);
1331
1332 /* Oops. The caller might send a incomplete sun_path; sun_path
1333 * must be terminated by \0 (see the manual page), but
1334 * unfortunately it is quite common to specify sockaddr_un
1335 * length as "strlen(x->sun_path)" while it should be
1336 * "strlen(...) + 1". We'll fix that here if needed.
1337 * Linux kernel has a similar feature.
1338 */
1339
1340 if (sa_family == AF_UNIX) {
1341 if (len < unix_maxlen && len > 0) {
1342 char *cp = (char*)target_saddr;
1343
1344 if ( cp[len-1] && !cp[len] )
1345 len++;
1346 }
1347 if (len > unix_maxlen)
1348 len = unix_maxlen;
1349 }
1350
1351 memcpy(addr, target_saddr, len);
1352 addr->sa_family = sa_family;
1353 if (sa_family == AF_NETLINK) {
1354 struct sockaddr_nl *nladdr;
1355
1356 nladdr = (struct sockaddr_nl *)addr;
1357 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1358 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1359 } else if (sa_family == AF_PACKET) {
1360 struct target_sockaddr_ll *lladdr;
1361
1362 lladdr = (struct target_sockaddr_ll *)addr;
1363 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1364 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1365 }
1366 unlock_user(target_saddr, target_addr, 0);
1367
1368 return 0;
1369 }
1370
1371 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1372 struct sockaddr *addr,
1373 socklen_t len)
1374 {
1375 struct target_sockaddr *target_saddr;
1376
1377 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1378 if (!target_saddr)
1379 return -TARGET_EFAULT;
1380 memcpy(target_saddr, addr, len);
1381 target_saddr->sa_family = tswap16(addr->sa_family);
1382 if (addr->sa_family == AF_NETLINK) {
1383 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1384 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1385 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1386 }
1387 unlock_user(target_saddr, target_addr, len);
1388
1389 return 0;
1390 }
1391
1392 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1393 struct target_msghdr *target_msgh)
1394 {
1395 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1396 abi_long msg_controllen;
1397 abi_ulong target_cmsg_addr;
1398 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1399 socklen_t space = 0;
1400
1401 msg_controllen = tswapal(target_msgh->msg_controllen);
1402 if (msg_controllen < sizeof (struct target_cmsghdr))
1403 goto the_end;
1404 target_cmsg_addr = tswapal(target_msgh->msg_control);
1405 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1406 target_cmsg_start = target_cmsg;
1407 if (!target_cmsg)
1408 return -TARGET_EFAULT;
1409
1410 while (cmsg && target_cmsg) {
1411 void *data = CMSG_DATA(cmsg);
1412 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1413
1414 int len = tswapal(target_cmsg->cmsg_len)
1415 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1416
1417 space += CMSG_SPACE(len);
1418 if (space > msgh->msg_controllen) {
1419 space -= CMSG_SPACE(len);
1420 /* This is a QEMU bug, since we allocated the payload
1421 * area ourselves (unlike overflow in host-to-target
1422 * conversion, which is just the guest giving us a buffer
1423 * that's too small). It can't happen for the payload types
1424 * we currently support; if it becomes an issue in future
1425 * we would need to improve our allocation strategy to
1426 * something more intelligent than "twice the size of the
1427 * target buffer we're reading from".
1428 */
1429 gemu_log("Host cmsg overflow\n");
1430 break;
1431 }
1432
1433 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1434 cmsg->cmsg_level = SOL_SOCKET;
1435 } else {
1436 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1437 }
1438 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1439 cmsg->cmsg_len = CMSG_LEN(len);
1440
1441 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1442 int *fd = (int *)data;
1443 int *target_fd = (int *)target_data;
1444 int i, numfds = len / sizeof(int);
1445
1446 for (i = 0; i < numfds; i++) {
1447 __get_user(fd[i], target_fd + i);
1448 }
1449 } else if (cmsg->cmsg_level == SOL_SOCKET
1450 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1451 struct ucred *cred = (struct ucred *)data;
1452 struct target_ucred *target_cred =
1453 (struct target_ucred *)target_data;
1454
1455 __get_user(cred->pid, &target_cred->pid);
1456 __get_user(cred->uid, &target_cred->uid);
1457 __get_user(cred->gid, &target_cred->gid);
1458 } else {
1459 gemu_log("Unsupported ancillary data: %d/%d\n",
1460 cmsg->cmsg_level, cmsg->cmsg_type);
1461 memcpy(data, target_data, len);
1462 }
1463
1464 cmsg = CMSG_NXTHDR(msgh, cmsg);
1465 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1466 target_cmsg_start);
1467 }
1468 unlock_user(target_cmsg, target_cmsg_addr, 0);
1469 the_end:
1470 msgh->msg_controllen = space;
1471 return 0;
1472 }
1473
1474 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1475 struct msghdr *msgh)
1476 {
1477 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1478 abi_long msg_controllen;
1479 abi_ulong target_cmsg_addr;
1480 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1481 socklen_t space = 0;
1482
1483 msg_controllen = tswapal(target_msgh->msg_controllen);
1484 if (msg_controllen < sizeof (struct target_cmsghdr))
1485 goto the_end;
1486 target_cmsg_addr = tswapal(target_msgh->msg_control);
1487 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1488 target_cmsg_start = target_cmsg;
1489 if (!target_cmsg)
1490 return -TARGET_EFAULT;
1491
1492 while (cmsg && target_cmsg) {
1493 void *data = CMSG_DATA(cmsg);
1494 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1495
1496 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1497 int tgt_len, tgt_space;
1498
1499 /* We never copy a half-header but may copy half-data;
1500 * this is Linux's behaviour in put_cmsg(). Note that
1501 * truncation here is a guest problem (which we report
1502 * to the guest via the CTRUNC bit), unlike truncation
1503 * in target_to_host_cmsg, which is a QEMU bug.
1504 */
1505 if (msg_controllen < sizeof(struct cmsghdr)) {
1506 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1507 break;
1508 }
1509
1510 if (cmsg->cmsg_level == SOL_SOCKET) {
1511 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1512 } else {
1513 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1514 }
1515 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1516
1517 tgt_len = TARGET_CMSG_LEN(len);
1518
1519 /* Payload types which need a different size of payload on
1520 * the target must adjust tgt_len here.
1521 */
1522 switch (cmsg->cmsg_level) {
1523 case SOL_SOCKET:
1524 switch (cmsg->cmsg_type) {
1525 case SO_TIMESTAMP:
1526 tgt_len = sizeof(struct target_timeval);
1527 break;
1528 default:
1529 break;
1530 }
1531 default:
1532 break;
1533 }
1534
1535 if (msg_controllen < tgt_len) {
1536 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1537 tgt_len = msg_controllen;
1538 }
1539
1540 /* We must now copy-and-convert len bytes of payload
1541 * into tgt_len bytes of destination space. Bear in mind
1542 * that in both source and destination we may be dealing
1543 * with a truncated value!
1544 */
1545 switch (cmsg->cmsg_level) {
1546 case SOL_SOCKET:
1547 switch (cmsg->cmsg_type) {
1548 case SCM_RIGHTS:
1549 {
1550 int *fd = (int *)data;
1551 int *target_fd = (int *)target_data;
1552 int i, numfds = tgt_len / sizeof(int);
1553
1554 for (i = 0; i < numfds; i++) {
1555 __put_user(fd[i], target_fd + i);
1556 }
1557 break;
1558 }
1559 case SO_TIMESTAMP:
1560 {
1561 struct timeval *tv = (struct timeval *)data;
1562 struct target_timeval *target_tv =
1563 (struct target_timeval *)target_data;
1564
1565 if (len != sizeof(struct timeval) ||
1566 tgt_len != sizeof(struct target_timeval)) {
1567 goto unimplemented;
1568 }
1569
1570 /* copy struct timeval to target */
1571 __put_user(tv->tv_sec, &target_tv->tv_sec);
1572 __put_user(tv->tv_usec, &target_tv->tv_usec);
1573 break;
1574 }
1575 case SCM_CREDENTIALS:
1576 {
1577 struct ucred *cred = (struct ucred *)data;
1578 struct target_ucred *target_cred =
1579 (struct target_ucred *)target_data;
1580
1581 __put_user(cred->pid, &target_cred->pid);
1582 __put_user(cred->uid, &target_cred->uid);
1583 __put_user(cred->gid, &target_cred->gid);
1584 break;
1585 }
1586 default:
1587 goto unimplemented;
1588 }
1589 break;
1590
1591 default:
1592 unimplemented:
1593 gemu_log("Unsupported ancillary data: %d/%d\n",
1594 cmsg->cmsg_level, cmsg->cmsg_type);
1595 memcpy(target_data, data, MIN(len, tgt_len));
1596 if (tgt_len > len) {
1597 memset(target_data + len, 0, tgt_len - len);
1598 }
1599 }
1600
1601 target_cmsg->cmsg_len = tswapal(tgt_len);
1602 tgt_space = TARGET_CMSG_SPACE(len);
1603 if (msg_controllen < tgt_space) {
1604 tgt_space = msg_controllen;
1605 }
1606 msg_controllen -= tgt_space;
1607 space += tgt_space;
1608 cmsg = CMSG_NXTHDR(msgh, cmsg);
1609 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1610 target_cmsg_start);
1611 }
1612 unlock_user(target_cmsg, target_cmsg_addr, space);
1613 the_end:
1614 target_msgh->msg_controllen = tswapal(space);
1615 return 0;
1616 }
1617
1618 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1619 {
1620 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1621 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1622 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1623 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1624 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1625 }
1626
1627 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1628 size_t len,
1629 abi_long (*host_to_target_nlmsg)
1630 (struct nlmsghdr *))
1631 {
1632 uint32_t nlmsg_len;
1633 abi_long ret;
1634
1635 while (len > sizeof(struct nlmsghdr)) {
1636
1637 nlmsg_len = nlh->nlmsg_len;
1638 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1639 nlmsg_len > len) {
1640 break;
1641 }
1642
1643 switch (nlh->nlmsg_type) {
1644 case NLMSG_DONE:
1645 tswap_nlmsghdr(nlh);
1646 return 0;
1647 case NLMSG_NOOP:
1648 break;
1649 case NLMSG_ERROR:
1650 {
1651 struct nlmsgerr *e = NLMSG_DATA(nlh);
1652 e->error = tswap32(e->error);
1653 tswap_nlmsghdr(&e->msg);
1654 tswap_nlmsghdr(nlh);
1655 return 0;
1656 }
1657 default:
1658 ret = host_to_target_nlmsg(nlh);
1659 if (ret < 0) {
1660 tswap_nlmsghdr(nlh);
1661 return ret;
1662 }
1663 break;
1664 }
1665 tswap_nlmsghdr(nlh);
1666 len -= NLMSG_ALIGN(nlmsg_len);
1667 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1668 }
1669 return 0;
1670 }
1671
1672 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1673 size_t len,
1674 abi_long (*target_to_host_nlmsg)
1675 (struct nlmsghdr *))
1676 {
1677 int ret;
1678
1679 while (len > sizeof(struct nlmsghdr)) {
1680 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1681 tswap32(nlh->nlmsg_len) > len) {
1682 break;
1683 }
1684 tswap_nlmsghdr(nlh);
1685 switch (nlh->nlmsg_type) {
1686 case NLMSG_DONE:
1687 return 0;
1688 case NLMSG_NOOP:
1689 break;
1690 case NLMSG_ERROR:
1691 {
1692 struct nlmsgerr *e = NLMSG_DATA(nlh);
1693 e->error = tswap32(e->error);
1694 tswap_nlmsghdr(&e->msg);
1695 }
1696 default:
1697 ret = target_to_host_nlmsg(nlh);
1698 if (ret < 0) {
1699 return ret;
1700 }
1701 }
1702 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1703 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1704 }
1705 return 0;
1706 }
1707
1708 #ifdef CONFIG_RTNETLINK
1709 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1710 size_t len,
1711 abi_long (*host_to_target_rtattr)
1712 (struct rtattr *))
1713 {
1714 unsigned short rta_len;
1715 abi_long ret;
1716
1717 while (len > sizeof(struct rtattr)) {
1718 rta_len = rtattr->rta_len;
1719 if (rta_len < sizeof(struct rtattr) ||
1720 rta_len > len) {
1721 break;
1722 }
1723 ret = host_to_target_rtattr(rtattr);
1724 rtattr->rta_len = tswap16(rtattr->rta_len);
1725 rtattr->rta_type = tswap16(rtattr->rta_type);
1726 if (ret < 0) {
1727 return ret;
1728 }
1729 len -= RTA_ALIGN(rta_len);
1730 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1731 }
1732 return 0;
1733 }
1734
1735 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
1736 {
1737 uint32_t *u32;
1738 struct rtnl_link_stats *st;
1739 struct rtnl_link_stats64 *st64;
1740 struct rtnl_link_ifmap *map;
1741
1742 switch (rtattr->rta_type) {
1743 /* binary stream */
1744 case IFLA_ADDRESS:
1745 case IFLA_BROADCAST:
1746 /* string */
1747 case IFLA_IFNAME:
1748 case IFLA_QDISC:
1749 break;
1750 /* uin8_t */
1751 case IFLA_OPERSTATE:
1752 case IFLA_LINKMODE:
1753 case IFLA_CARRIER:
1754 case IFLA_PROTO_DOWN:
1755 break;
1756 /* uint32_t */
1757 case IFLA_MTU:
1758 case IFLA_LINK:
1759 case IFLA_WEIGHT:
1760 case IFLA_TXQLEN:
1761 case IFLA_CARRIER_CHANGES:
1762 case IFLA_NUM_RX_QUEUES:
1763 case IFLA_NUM_TX_QUEUES:
1764 case IFLA_PROMISCUITY:
1765 case IFLA_EXT_MASK:
1766 case IFLA_LINK_NETNSID:
1767 case IFLA_GROUP:
1768 case IFLA_MASTER:
1769 case IFLA_NUM_VF:
1770 u32 = RTA_DATA(rtattr);
1771 *u32 = tswap32(*u32);
1772 break;
1773 /* struct rtnl_link_stats */
1774 case IFLA_STATS:
1775 st = RTA_DATA(rtattr);
1776 st->rx_packets = tswap32(st->rx_packets);
1777 st->tx_packets = tswap32(st->tx_packets);
1778 st->rx_bytes = tswap32(st->rx_bytes);
1779 st->tx_bytes = tswap32(st->tx_bytes);
1780 st->rx_errors = tswap32(st->rx_errors);
1781 st->tx_errors = tswap32(st->tx_errors);
1782 st->rx_dropped = tswap32(st->rx_dropped);
1783 st->tx_dropped = tswap32(st->tx_dropped);
1784 st->multicast = tswap32(st->multicast);
1785 st->collisions = tswap32(st->collisions);
1786
1787 /* detailed rx_errors: */
1788 st->rx_length_errors = tswap32(st->rx_length_errors);
1789 st->rx_over_errors = tswap32(st->rx_over_errors);
1790 st->rx_crc_errors = tswap32(st->rx_crc_errors);
1791 st->rx_frame_errors = tswap32(st->rx_frame_errors);
1792 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
1793 st->rx_missed_errors = tswap32(st->rx_missed_errors);
1794
1795 /* detailed tx_errors */
1796 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
1797 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
1798 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
1799 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
1800 st->tx_window_errors = tswap32(st->tx_window_errors);
1801
1802 /* for cslip etc */
1803 st->rx_compressed = tswap32(st->rx_compressed);
1804 st->tx_compressed = tswap32(st->tx_compressed);
1805 break;
1806 /* struct rtnl_link_stats64 */
1807 case IFLA_STATS64:
1808 st64 = RTA_DATA(rtattr);
1809 st64->rx_packets = tswap64(st64->rx_packets);
1810 st64->tx_packets = tswap64(st64->tx_packets);
1811 st64->rx_bytes = tswap64(st64->rx_bytes);
1812 st64->tx_bytes = tswap64(st64->tx_bytes);
1813 st64->rx_errors = tswap64(st64->rx_errors);
1814 st64->tx_errors = tswap64(st64->tx_errors);
1815 st64->rx_dropped = tswap64(st64->rx_dropped);
1816 st64->tx_dropped = tswap64(st64->tx_dropped);
1817 st64->multicast = tswap64(st64->multicast);
1818 st64->collisions = tswap64(st64->collisions);
1819
1820 /* detailed rx_errors: */
1821 st64->rx_length_errors = tswap64(st64->rx_length_errors);
1822 st64->rx_over_errors = tswap64(st64->rx_over_errors);
1823 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
1824 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
1825 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
1826 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
1827
1828 /* detailed tx_errors */
1829 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
1830 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
1831 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
1832 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
1833 st64->tx_window_errors = tswap64(st64->tx_window_errors);
1834
1835 /* for cslip etc */
1836 st64->rx_compressed = tswap64(st64->rx_compressed);
1837 st64->tx_compressed = tswap64(st64->tx_compressed);
1838 break;
1839 /* struct rtnl_link_ifmap */
1840 case IFLA_MAP:
1841 map = RTA_DATA(rtattr);
1842 map->mem_start = tswap64(map->mem_start);
1843 map->mem_end = tswap64(map->mem_end);
1844 map->base_addr = tswap64(map->base_addr);
1845 map->irq = tswap16(map->irq);
1846 break;
1847 /* nested */
1848 case IFLA_AF_SPEC:
1849 case IFLA_LINKINFO:
1850 /* FIXME: implement nested type */
1851 gemu_log("Unimplemented nested type %d\n", rtattr->rta_type);
1852 break;
1853 default:
1854 gemu_log("Unknown host IFLA type: %d\n", rtattr->rta_type);
1855 break;
1856 }
1857 return 0;
1858 }
1859
1860 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
1861 {
1862 uint32_t *u32;
1863 struct ifa_cacheinfo *ci;
1864
1865 switch (rtattr->rta_type) {
1866 /* binary: depends on family type */
1867 case IFA_ADDRESS:
1868 case IFA_LOCAL:
1869 break;
1870 /* string */
1871 case IFA_LABEL:
1872 break;
1873 /* u32 */
1874 case IFA_FLAGS:
1875 case IFA_BROADCAST:
1876 u32 = RTA_DATA(rtattr);
1877 *u32 = tswap32(*u32);
1878 break;
1879 /* struct ifa_cacheinfo */
1880 case IFA_CACHEINFO:
1881 ci = RTA_DATA(rtattr);
1882 ci->ifa_prefered = tswap32(ci->ifa_prefered);
1883 ci->ifa_valid = tswap32(ci->ifa_valid);
1884 ci->cstamp = tswap32(ci->cstamp);
1885 ci->tstamp = tswap32(ci->tstamp);
1886 break;
1887 default:
1888 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
1889 break;
1890 }
1891 return 0;
1892 }
1893
1894 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
1895 {
1896 uint32_t *u32;
1897 switch (rtattr->rta_type) {
1898 /* binary: depends on family type */
1899 case RTA_GATEWAY:
1900 case RTA_DST:
1901 case RTA_PREFSRC:
1902 break;
1903 /* u32 */
1904 case RTA_PRIORITY:
1905 case RTA_TABLE:
1906 case RTA_OIF:
1907 u32 = RTA_DATA(rtattr);
1908 *u32 = tswap32(*u32);
1909 break;
1910 default:
1911 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
1912 break;
1913 }
1914 return 0;
1915 }
1916
1917 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
1918 uint32_t rtattr_len)
1919 {
1920 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1921 host_to_target_data_link_rtattr);
1922 }
1923
1924 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
1925 uint32_t rtattr_len)
1926 {
1927 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1928 host_to_target_data_addr_rtattr);
1929 }
1930
1931 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
1932 uint32_t rtattr_len)
1933 {
1934 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1935 host_to_target_data_route_rtattr);
1936 }
1937
1938 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
1939 {
1940 uint32_t nlmsg_len;
1941 struct ifinfomsg *ifi;
1942 struct ifaddrmsg *ifa;
1943 struct rtmsg *rtm;
1944
1945 nlmsg_len = nlh->nlmsg_len;
1946 switch (nlh->nlmsg_type) {
1947 case RTM_NEWLINK:
1948 case RTM_DELLINK:
1949 case RTM_GETLINK:
1950 ifi = NLMSG_DATA(nlh);
1951 ifi->ifi_type = tswap16(ifi->ifi_type);
1952 ifi->ifi_index = tswap32(ifi->ifi_index);
1953 ifi->ifi_flags = tswap32(ifi->ifi_flags);
1954 ifi->ifi_change = tswap32(ifi->ifi_change);
1955 host_to_target_link_rtattr(IFLA_RTA(ifi),
1956 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
1957 break;
1958 case RTM_NEWADDR:
1959 case RTM_DELADDR:
1960 case RTM_GETADDR:
1961 ifa = NLMSG_DATA(nlh);
1962 ifa->ifa_index = tswap32(ifa->ifa_index);
1963 host_to_target_addr_rtattr(IFA_RTA(ifa),
1964 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
1965 break;
1966 case RTM_NEWROUTE:
1967 case RTM_DELROUTE:
1968 case RTM_GETROUTE:
1969 rtm = NLMSG_DATA(nlh);
1970 rtm->rtm_flags = tswap32(rtm->rtm_flags);
1971 host_to_target_route_rtattr(RTM_RTA(rtm),
1972 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
1973 break;
1974 default:
1975 return -TARGET_EINVAL;
1976 }
1977 return 0;
1978 }
1979
1980 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
1981 size_t len)
1982 {
1983 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
1984 }
1985
1986 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
1987 size_t len,
1988 abi_long (*target_to_host_rtattr)
1989 (struct rtattr *))
1990 {
1991 abi_long ret;
1992
1993 while (len >= sizeof(struct rtattr)) {
1994 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
1995 tswap16(rtattr->rta_len) > len) {
1996 break;
1997 }
1998 rtattr->rta_len = tswap16(rtattr->rta_len);
1999 rtattr->rta_type = tswap16(rtattr->rta_type);
2000 ret = target_to_host_rtattr(rtattr);
2001 if (ret < 0) {
2002 return ret;
2003 }
2004 len -= RTA_ALIGN(rtattr->rta_len);
2005 rtattr = (struct rtattr *)(((char *)rtattr) +
2006 RTA_ALIGN(rtattr->rta_len));
2007 }
2008 return 0;
2009 }
2010
2011 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2012 {
2013 switch (rtattr->rta_type) {
2014 default:
2015 gemu_log("Unknown target IFLA type: %d\n", rtattr->rta_type);
2016 break;
2017 }
2018 return 0;
2019 }
2020
2021 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2022 {
2023 switch (rtattr->rta_type) {
2024 /* binary: depends on family type */
2025 case IFA_LOCAL:
2026 case IFA_ADDRESS:
2027 break;
2028 default:
2029 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2030 break;
2031 }
2032 return 0;
2033 }
2034
2035 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2036 {
2037 uint32_t *u32;
2038 switch (rtattr->rta_type) {
2039 /* binary: depends on family type */
2040 case RTA_DST:
2041 case RTA_SRC:
2042 case RTA_GATEWAY:
2043 break;
2044 /* u32 */
2045 case RTA_OIF:
2046 u32 = RTA_DATA(rtattr);
2047 *u32 = tswap32(*u32);
2048 break;
2049 default:
2050 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2051 break;
2052 }
2053 return 0;
2054 }
2055
2056 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2057 uint32_t rtattr_len)
2058 {
2059 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2060 target_to_host_data_link_rtattr);
2061 }
2062
2063 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2064 uint32_t rtattr_len)
2065 {
2066 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2067 target_to_host_data_addr_rtattr);
2068 }
2069
2070 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2071 uint32_t rtattr_len)
2072 {
2073 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2074 target_to_host_data_route_rtattr);
2075 }
2076
2077 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2078 {
2079 struct ifinfomsg *ifi;
2080 struct ifaddrmsg *ifa;
2081 struct rtmsg *rtm;
2082
2083 switch (nlh->nlmsg_type) {
2084 case RTM_GETLINK:
2085 break;
2086 case RTM_NEWLINK:
2087 case RTM_DELLINK:
2088 ifi = NLMSG_DATA(nlh);
2089 ifi->ifi_type = tswap16(ifi->ifi_type);
2090 ifi->ifi_index = tswap32(ifi->ifi_index);
2091 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2092 ifi->ifi_change = tswap32(ifi->ifi_change);
2093 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2094 NLMSG_LENGTH(sizeof(*ifi)));
2095 break;
2096 case RTM_GETADDR:
2097 case RTM_NEWADDR:
2098 case RTM_DELADDR:
2099 ifa = NLMSG_DATA(nlh);
2100 ifa->ifa_index = tswap32(ifa->ifa_index);
2101 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2102 NLMSG_LENGTH(sizeof(*ifa)));
2103 break;
2104 case RTM_GETROUTE:
2105 break;
2106 case RTM_NEWROUTE:
2107 case RTM_DELROUTE:
2108 rtm = NLMSG_DATA(nlh);
2109 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2110 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2111 NLMSG_LENGTH(sizeof(*rtm)));
2112 break;
2113 default:
2114 return -TARGET_EOPNOTSUPP;
2115 }
2116 return 0;
2117 }
2118
2119 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2120 {
2121 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2122 }
2123 #endif /* CONFIG_RTNETLINK */
2124
2125 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2126 {
2127 switch (nlh->nlmsg_type) {
2128 default:
2129 gemu_log("Unknown host audit message type %d\n",
2130 nlh->nlmsg_type);
2131 return -TARGET_EINVAL;
2132 }
2133 return 0;
2134 }
2135
2136 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2137 size_t len)
2138 {
2139 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2140 }
2141
2142 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2143 {
2144 switch (nlh->nlmsg_type) {
2145 case AUDIT_USER:
2146 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2147 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2148 break;
2149 default:
2150 gemu_log("Unknown target audit message type %d\n",
2151 nlh->nlmsg_type);
2152 return -TARGET_EINVAL;
2153 }
2154
2155 return 0;
2156 }
2157
2158 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2159 {
2160 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2161 }
2162
2163 /* do_setsockopt() Must return target values and target errnos. */
2164 static abi_long do_setsockopt(int sockfd, int level, int optname,
2165 abi_ulong optval_addr, socklen_t optlen)
2166 {
2167 abi_long ret;
2168 int val;
2169 struct ip_mreqn *ip_mreq;
2170 struct ip_mreq_source *ip_mreq_source;
2171
2172 switch(level) {
2173 case SOL_TCP:
2174 /* TCP options all take an 'int' value. */
2175 if (optlen < sizeof(uint32_t))
2176 return -TARGET_EINVAL;
2177
2178 if (get_user_u32(val, optval_addr))
2179 return -TARGET_EFAULT;
2180 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2181 break;
2182 case SOL_IP:
2183 switch(optname) {
2184 case IP_TOS:
2185 case IP_TTL:
2186 case IP_HDRINCL:
2187 case IP_ROUTER_ALERT:
2188 case IP_RECVOPTS:
2189 case IP_RETOPTS:
2190 case IP_PKTINFO:
2191 case IP_MTU_DISCOVER:
2192 case IP_RECVERR:
2193 case IP_RECVTOS:
2194 #ifdef IP_FREEBIND
2195 case IP_FREEBIND:
2196 #endif
2197 case IP_MULTICAST_TTL:
2198 case IP_MULTICAST_LOOP:
2199 val = 0;
2200 if (optlen >= sizeof(uint32_t)) {
2201 if (get_user_u32(val, optval_addr))
2202 return -TARGET_EFAULT;
2203 } else if (optlen >= 1) {
2204 if (get_user_u8(val, optval_addr))
2205 return -TARGET_EFAULT;
2206 }
2207 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2208 break;
2209 case IP_ADD_MEMBERSHIP:
2210 case IP_DROP_MEMBERSHIP:
2211 if (optlen < sizeof (struct target_ip_mreq) ||
2212 optlen > sizeof (struct target_ip_mreqn))
2213 return -TARGET_EINVAL;
2214
2215 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2216 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2217 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2218 break;
2219
2220 case IP_BLOCK_SOURCE:
2221 case IP_UNBLOCK_SOURCE:
2222 case IP_ADD_SOURCE_MEMBERSHIP:
2223 case IP_DROP_SOURCE_MEMBERSHIP:
2224 if (optlen != sizeof (struct target_ip_mreq_source))
2225 return -TARGET_EINVAL;
2226
2227 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2228 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2229 unlock_user (ip_mreq_source, optval_addr, 0);
2230 break;
2231
2232 default:
2233 goto unimplemented;
2234 }
2235 break;
2236 case SOL_IPV6:
2237 switch (optname) {
2238 case IPV6_MTU_DISCOVER:
2239 case IPV6_MTU:
2240 case IPV6_V6ONLY:
2241 case IPV6_RECVPKTINFO:
2242 val = 0;
2243 if (optlen < sizeof(uint32_t)) {
2244 return -TARGET_EINVAL;
2245 }
2246 if (get_user_u32(val, optval_addr)) {
2247 return -TARGET_EFAULT;
2248 }
2249 ret = get_errno(setsockopt(sockfd, level, optname,
2250 &val, sizeof(val)));
2251 break;
2252 default:
2253 goto unimplemented;
2254 }
2255 break;
2256 case SOL_RAW:
2257 switch (optname) {
2258 case ICMP_FILTER:
2259 /* struct icmp_filter takes an u32 value */
2260 if (optlen < sizeof(uint32_t)) {
2261 return -TARGET_EINVAL;
2262 }
2263
2264 if (get_user_u32(val, optval_addr)) {
2265 return -TARGET_EFAULT;
2266 }
2267 ret = get_errno(setsockopt(sockfd, level, optname,
2268 &val, sizeof(val)));
2269 break;
2270
2271 default:
2272 goto unimplemented;
2273 }
2274 break;
2275 case TARGET_SOL_SOCKET:
2276 switch (optname) {
2277 case TARGET_SO_RCVTIMEO:
2278 {
2279 struct timeval tv;
2280
2281 optname = SO_RCVTIMEO;
2282
2283 set_timeout:
2284 if (optlen != sizeof(struct target_timeval)) {
2285 return -TARGET_EINVAL;
2286 }
2287
2288 if (copy_from_user_timeval(&tv, optval_addr)) {
2289 return -TARGET_EFAULT;
2290 }
2291
2292 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2293 &tv, sizeof(tv)));
2294 return ret;
2295 }
2296 case TARGET_SO_SNDTIMEO:
2297 optname = SO_SNDTIMEO;
2298 goto set_timeout;
2299 case TARGET_SO_ATTACH_FILTER:
2300 {
2301 struct target_sock_fprog *tfprog;
2302 struct target_sock_filter *tfilter;
2303 struct sock_fprog fprog;
2304 struct sock_filter *filter;
2305 int i;
2306
2307 if (optlen != sizeof(*tfprog)) {
2308 return -TARGET_EINVAL;
2309 }
2310 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2311 return -TARGET_EFAULT;
2312 }
2313 if (!lock_user_struct(VERIFY_READ, tfilter,
2314 tswapal(tfprog->filter), 0)) {
2315 unlock_user_struct(tfprog, optval_addr, 1);
2316 return -TARGET_EFAULT;
2317 }
2318
2319 fprog.len = tswap16(tfprog->len);
2320 filter = g_try_new(struct sock_filter, fprog.len);
2321 if (filter == NULL) {
2322 unlock_user_struct(tfilter, tfprog->filter, 1);
2323 unlock_user_struct(tfprog, optval_addr, 1);
2324 return -TARGET_ENOMEM;
2325 }
2326 for (i = 0; i < fprog.len; i++) {
2327 filter[i].code = tswap16(tfilter[i].code);
2328 filter[i].jt = tfilter[i].jt;
2329 filter[i].jf = tfilter[i].jf;
2330 filter[i].k = tswap32(tfilter[i].k);
2331 }
2332 fprog.filter = filter;
2333
2334 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2335 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2336 g_free(filter);
2337
2338 unlock_user_struct(tfilter, tfprog->filter, 1);
2339 unlock_user_struct(tfprog, optval_addr, 1);
2340 return ret;
2341 }
2342 case TARGET_SO_BINDTODEVICE:
2343 {
2344 char *dev_ifname, *addr_ifname;
2345
2346 if (optlen > IFNAMSIZ - 1) {
2347 optlen = IFNAMSIZ - 1;
2348 }
2349 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2350 if (!dev_ifname) {
2351 return -TARGET_EFAULT;
2352 }
2353 optname = SO_BINDTODEVICE;
2354 addr_ifname = alloca(IFNAMSIZ);
2355 memcpy(addr_ifname, dev_ifname, optlen);
2356 addr_ifname[optlen] = 0;
2357 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2358 addr_ifname, optlen));
2359 unlock_user (dev_ifname, optval_addr, 0);
2360 return ret;
2361 }
2362 /* Options with 'int' argument. */
2363 case TARGET_SO_DEBUG:
2364 optname = SO_DEBUG;
2365 break;
2366 case TARGET_SO_REUSEADDR:
2367 optname = SO_REUSEADDR;
2368 break;
2369 case TARGET_SO_TYPE:
2370 optname = SO_TYPE;
2371 break;
2372 case TARGET_SO_ERROR:
2373 optname = SO_ERROR;
2374 break;
2375 case TARGET_SO_DONTROUTE:
2376 optname = SO_DONTROUTE;
2377 break;
2378 case TARGET_SO_BROADCAST:
2379 optname = SO_BROADCAST;
2380 break;
2381 case TARGET_SO_SNDBUF:
2382 optname = SO_SNDBUF;
2383 break;
2384 case TARGET_SO_SNDBUFFORCE:
2385 optname = SO_SNDBUFFORCE;
2386 break;
2387 case TARGET_SO_RCVBUF:
2388 optname = SO_RCVBUF;
2389 break;
2390 case TARGET_SO_RCVBUFFORCE:
2391 optname = SO_RCVBUFFORCE;
2392 break;
2393 case TARGET_SO_KEEPALIVE:
2394 optname = SO_KEEPALIVE;
2395 break;
2396 case TARGET_SO_OOBINLINE:
2397 optname = SO_OOBINLINE;
2398 break;
2399 case TARGET_SO_NO_CHECK:
2400 optname = SO_NO_CHECK;
2401 break;
2402 case TARGET_SO_PRIORITY:
2403 optname = SO_PRIORITY;
2404 break;
2405 #ifdef SO_BSDCOMPAT
2406 case TARGET_SO_BSDCOMPAT:
2407 optname = SO_BSDCOMPAT;
2408 break;
2409 #endif
2410 case TARGET_SO_PASSCRED:
2411 optname = SO_PASSCRED;
2412 break;
2413 case TARGET_SO_PASSSEC:
2414 optname = SO_PASSSEC;
2415 break;
2416 case TARGET_SO_TIMESTAMP:
2417 optname = SO_TIMESTAMP;
2418 break;
2419 case TARGET_SO_RCVLOWAT:
2420 optname = SO_RCVLOWAT;
2421 break;
2422 break;
2423 default:
2424 goto unimplemented;
2425 }
2426 if (optlen < sizeof(uint32_t))
2427 return -TARGET_EINVAL;
2428
2429 if (get_user_u32(val, optval_addr))
2430 return -TARGET_EFAULT;
2431 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2432 break;
2433 default:
2434 unimplemented:
2435 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2436 ret = -TARGET_ENOPROTOOPT;
2437 }
2438 return ret;
2439 }
2440
2441 /* do_getsockopt() Must return target values and target errnos. */
2442 static abi_long do_getsockopt(int sockfd, int level, int optname,
2443 abi_ulong optval_addr, abi_ulong optlen)
2444 {
2445 abi_long ret;
2446 int len, val;
2447 socklen_t lv;
2448
2449 switch(level) {
2450 case TARGET_SOL_SOCKET:
2451 level = SOL_SOCKET;
2452 switch (optname) {
2453 /* These don't just return a single integer */
2454 case TARGET_SO_LINGER:
2455 case TARGET_SO_RCVTIMEO:
2456 case TARGET_SO_SNDTIMEO:
2457 case TARGET_SO_PEERNAME:
2458 goto unimplemented;
2459 case TARGET_SO_PEERCRED: {
2460 struct ucred cr;
2461 socklen_t crlen;
2462 struct target_ucred *tcr;
2463
2464 if (get_user_u32(len, optlen)) {
2465 return -TARGET_EFAULT;
2466 }
2467 if (len < 0) {
2468 return -TARGET_EINVAL;
2469 }
2470
2471 crlen = sizeof(cr);
2472 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2473 &cr, &crlen));
2474 if (ret < 0) {
2475 return ret;
2476 }
2477 if (len > crlen) {
2478 len = crlen;
2479 }
2480 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2481 return -TARGET_EFAULT;
2482 }
2483 __put_user(cr.pid, &tcr->pid);
2484 __put_user(cr.uid, &tcr->uid);
2485 __put_user(cr.gid, &tcr->gid);
2486 unlock_user_struct(tcr, optval_addr, 1);
2487 if (put_user_u32(len, optlen)) {
2488 return -TARGET_EFAULT;
2489 }
2490 break;
2491 }
2492 /* Options with 'int' argument. */
2493 case TARGET_SO_DEBUG:
2494 optname = SO_DEBUG;
2495 goto int_case;
2496 case TARGET_SO_REUSEADDR:
2497 optname = SO_REUSEADDR;
2498 goto int_case;
2499 case TARGET_SO_TYPE:
2500 optname = SO_TYPE;
2501 goto int_case;
2502 case TARGET_SO_ERROR:
2503 optname = SO_ERROR;
2504 goto int_case;
2505 case TARGET_SO_DONTROUTE:
2506 optname = SO_DONTROUTE;
2507 goto int_case;
2508 case TARGET_SO_BROADCAST:
2509 optname = SO_BROADCAST;
2510 goto int_case;
2511 case TARGET_SO_SNDBUF:
2512 optname = SO_SNDBUF;
2513 goto int_case;
2514 case TARGET_SO_RCVBUF:
2515 optname = SO_RCVBUF;
2516 goto int_case;
2517 case TARGET_SO_KEEPALIVE:
2518 optname = SO_KEEPALIVE;
2519 goto int_case;
2520 case TARGET_SO_OOBINLINE:
2521 optname = SO_OOBINLINE;
2522 goto int_case;
2523 case TARGET_SO_NO_CHECK:
2524 optname = SO_NO_CHECK;
2525 goto int_case;
2526 case TARGET_SO_PRIORITY:
2527 optname = SO_PRIORITY;
2528 goto int_case;
2529 #ifdef SO_BSDCOMPAT
2530 case TARGET_SO_BSDCOMPAT:
2531 optname = SO_BSDCOMPAT;
2532 goto int_case;
2533 #endif
2534 case TARGET_SO_PASSCRED:
2535 optname = SO_PASSCRED;
2536 goto int_case;
2537 case TARGET_SO_TIMESTAMP:
2538 optname = SO_TIMESTAMP;
2539 goto int_case;
2540 case TARGET_SO_RCVLOWAT:
2541 optname = SO_RCVLOWAT;
2542 goto int_case;
2543 case TARGET_SO_ACCEPTCONN:
2544 optname = SO_ACCEPTCONN;
2545 goto int_case;
2546 default:
2547 goto int_case;
2548 }
2549 break;
2550 case SOL_TCP:
2551 /* TCP options all take an 'int' value. */
2552 int_case:
2553 if (get_user_u32(len, optlen))
2554 return -TARGET_EFAULT;
2555 if (len < 0)
2556 return -TARGET_EINVAL;
2557 lv = sizeof(lv);
2558 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2559 if (ret < 0)
2560 return ret;
2561 if (optname == SO_TYPE) {
2562 val = host_to_target_sock_type(val);
2563 }
2564 if (len > lv)
2565 len = lv;
2566 if (len == 4) {
2567 if (put_user_u32(val, optval_addr))
2568 return -TARGET_EFAULT;
2569 } else {
2570 if (put_user_u8(val, optval_addr))
2571 return -TARGET_EFAULT;
2572 }
2573 if (put_user_u32(len, optlen))
2574 return -TARGET_EFAULT;
2575 break;
2576 case SOL_IP:
2577 switch(optname) {
2578 case IP_TOS:
2579 case IP_TTL:
2580 case IP_HDRINCL:
2581 case IP_ROUTER_ALERT:
2582 case IP_RECVOPTS:
2583 case IP_RETOPTS:
2584 case IP_PKTINFO:
2585 case IP_MTU_DISCOVER:
2586 case IP_RECVERR:
2587 case IP_RECVTOS:
2588 #ifdef IP_FREEBIND
2589 case IP_FREEBIND:
2590 #endif
2591 case IP_MULTICAST_TTL:
2592 case IP_MULTICAST_LOOP:
2593 if (get_user_u32(len, optlen))
2594 return -TARGET_EFAULT;
2595 if (len < 0)
2596 return -TARGET_EINVAL;
2597 lv = sizeof(lv);
2598 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2599 if (ret < 0)
2600 return ret;
2601 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2602 len = 1;
2603 if (put_user_u32(len, optlen)
2604 || put_user_u8(val, optval_addr))
2605 return -TARGET_EFAULT;
2606 } else {
2607 if (len > sizeof(int))
2608 len = sizeof(int);
2609 if (put_user_u32(len, optlen)
2610 || put_user_u32(val, optval_addr))
2611 return -TARGET_EFAULT;
2612 }
2613 break;
2614 default:
2615 ret = -TARGET_ENOPROTOOPT;
2616 break;
2617 }
2618 break;
2619 default:
2620 unimplemented:
2621 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2622 level, optname);
2623 ret = -TARGET_EOPNOTSUPP;
2624 break;
2625 }
2626 return ret;
2627 }
2628
2629 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2630 int count, int copy)
2631 {
2632 struct target_iovec *target_vec;
2633 struct iovec *vec;
2634 abi_ulong total_len, max_len;
2635 int i;
2636 int err = 0;
2637 bool bad_address = false;
2638
2639 if (count == 0) {
2640 errno = 0;
2641 return NULL;
2642 }
2643 if (count < 0 || count > IOV_MAX) {
2644 errno = EINVAL;
2645 return NULL;
2646 }
2647
2648 vec = g_try_new0(struct iovec, count);
2649 if (vec == NULL) {
2650 errno = ENOMEM;
2651 return NULL;
2652 }
2653
2654 target_vec = lock_user(VERIFY_READ, target_addr,
2655 count * sizeof(struct target_iovec), 1);
2656 if (target_vec == NULL) {
2657 err = EFAULT;
2658 goto fail2;
2659 }
2660
2661 /* ??? If host page size > target page size, this will result in a
2662 value larger than what we can actually support. */
2663 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2664 total_len = 0;
2665
2666 for (i = 0; i < count; i++) {
2667 abi_ulong base = tswapal(target_vec[i].iov_base);
2668 abi_long len = tswapal(target_vec[i].iov_len);
2669
2670 if (len < 0) {
2671 err = EINVAL;
2672 goto fail;
2673 } else if (len == 0) {
2674 /* Zero length pointer is ignored. */
2675 vec[i].iov_base = 0;
2676 } else {
2677 vec[i].iov_base = lock_user(type, base, len, copy);
2678 /* If the first buffer pointer is bad, this is a fault. But
2679 * subsequent bad buffers will result in a partial write; this
2680 * is realized by filling the vector with null pointers and
2681 * zero lengths. */
2682 if (!vec[i].iov_base) {
2683 if (i == 0) {
2684 err = EFAULT;
2685 goto fail;
2686 } else {
2687 bad_address = true;
2688 }
2689 }
2690 if (bad_address) {
2691 len = 0;
2692 }
2693 if (len > max_len - total_len) {
2694 len = max_len - total_len;
2695 }
2696 }
2697 vec[i].iov_len = len;
2698 total_len += len;
2699 }
2700
2701 unlock_user(target_vec, target_addr, 0);
2702 return vec;
2703
2704 fail:
2705 while (--i >= 0) {
2706 if (tswapal(target_vec[i].iov_len) > 0) {
2707 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2708 }
2709 }
2710 unlock_user(target_vec, target_addr, 0);
2711 fail2:
2712 g_free(vec);
2713 errno = err;
2714 return NULL;
2715 }
2716
2717 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2718 int count, int copy)
2719 {
2720 struct target_iovec *target_vec;
2721 int i;
2722
2723 target_vec = lock_user(VERIFY_READ, target_addr,
2724 count * sizeof(struct target_iovec), 1);
2725 if (target_vec) {
2726 for (i = 0; i < count; i++) {
2727 abi_ulong base = tswapal(target_vec[i].iov_base);
2728 abi_long len = tswapal(target_vec[i].iov_len);
2729 if (len < 0) {
2730 break;
2731 }
2732 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2733 }
2734 unlock_user(target_vec, target_addr, 0);
2735 }
2736
2737 g_free(vec);
2738 }
2739
2740 static inline int target_to_host_sock_type(int *type)
2741 {
2742 int host_type = 0;
2743 int target_type = *type;
2744
2745 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2746 case TARGET_SOCK_DGRAM:
2747 host_type = SOCK_DGRAM;
2748 break;
2749 case TARGET_SOCK_STREAM:
2750 host_type = SOCK_STREAM;
2751 break;
2752 default:
2753 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2754 break;
2755 }
2756 if (target_type & TARGET_SOCK_CLOEXEC) {
2757 #if defined(SOCK_CLOEXEC)
2758 host_type |= SOCK_CLOEXEC;
2759 #else
2760 return -TARGET_EINVAL;
2761 #endif
2762 }
2763 if (target_type & TARGET_SOCK_NONBLOCK) {
2764 #if defined(SOCK_NONBLOCK)
2765 host_type |= SOCK_NONBLOCK;
2766 #elif !defined(O_NONBLOCK)
2767 return -TARGET_EINVAL;
2768 #endif
2769 }
2770 *type = host_type;
2771 return 0;
2772 }
2773
2774 /* Try to emulate socket type flags after socket creation. */
2775 static int sock_flags_fixup(int fd, int target_type)
2776 {
2777 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2778 if (target_type & TARGET_SOCK_NONBLOCK) {
2779 int flags = fcntl(fd, F_GETFL);
2780 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2781 close(fd);
2782 return -TARGET_EINVAL;
2783 }
2784 }
2785 #endif
2786 return fd;
2787 }
2788
2789 static abi_long packet_target_to_host_sockaddr(void *host_addr,
2790 abi_ulong target_addr,
2791 socklen_t len)
2792 {
2793 struct sockaddr *addr = host_addr;
2794 struct target_sockaddr *target_saddr;
2795
2796 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
2797 if (!target_saddr) {
2798 return -TARGET_EFAULT;
2799 }
2800
2801 memcpy(addr, target_saddr, len);
2802 addr->sa_family = tswap16(target_saddr->sa_family);
2803 /* spkt_protocol is big-endian */
2804
2805 unlock_user(target_saddr, target_addr, 0);
2806 return 0;
2807 }
2808
2809 static TargetFdTrans target_packet_trans = {
2810 .target_to_host_addr = packet_target_to_host_sockaddr,
2811 };
2812
2813 #ifdef CONFIG_RTNETLINK
2814 static abi_long netlink_route_target_to_host(void *buf, size_t len)
2815 {
2816 return target_to_host_nlmsg_route(buf, len);
2817 }
2818
2819 static abi_long netlink_route_host_to_target(void *buf, size_t len)
2820 {
2821 return host_to_target_nlmsg_route(buf, len);
2822 }
2823
2824 static TargetFdTrans target_netlink_route_trans = {
2825 .target_to_host_data = netlink_route_target_to_host,
2826 .host_to_target_data = netlink_route_host_to_target,
2827 };
2828 #endif /* CONFIG_RTNETLINK */
2829
2830 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
2831 {
2832 return target_to_host_nlmsg_audit(buf, len);
2833 }
2834
2835 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
2836 {
2837 return host_to_target_nlmsg_audit(buf, len);
2838 }
2839
2840 static TargetFdTrans target_netlink_audit_trans = {
2841 .target_to_host_data = netlink_audit_target_to_host,
2842 .host_to_target_data = netlink_audit_host_to_target,
2843 };
2844
2845 /* do_socket() Must return target values and target errnos. */
2846 static abi_long do_socket(int domain, int type, int protocol)
2847 {
2848 int target_type = type;
2849 int ret;
2850
2851 ret = target_to_host_sock_type(&type);
2852 if (ret) {
2853 return ret;
2854 }
2855
2856 if (domain == PF_NETLINK && !(
2857 #ifdef CONFIG_RTNETLINK
2858 protocol == NETLINK_ROUTE ||
2859 #endif
2860 protocol == NETLINK_KOBJECT_UEVENT ||
2861 protocol == NETLINK_AUDIT)) {
2862 return -EPFNOSUPPORT;
2863 }
2864
2865 if (domain == AF_PACKET ||
2866 (domain == AF_INET && type == SOCK_PACKET)) {
2867 protocol = tswap16(protocol);
2868 }
2869
2870 ret = get_errno(socket(domain, type, protocol));
2871 if (ret >= 0) {
2872 ret = sock_flags_fixup(ret, target_type);
2873 if (type == SOCK_PACKET) {
2874 /* Manage an obsolete case :
2875 * if socket type is SOCK_PACKET, bind by name
2876 */
2877 fd_trans_register(ret, &target_packet_trans);
2878 } else if (domain == PF_NETLINK) {
2879 switch (protocol) {
2880 #ifdef CONFIG_RTNETLINK
2881 case NETLINK_ROUTE:
2882 fd_trans_register(ret, &target_netlink_route_trans);
2883 break;
2884 #endif
2885 case NETLINK_KOBJECT_UEVENT:
2886 /* nothing to do: messages are strings */
2887 break;
2888 case NETLINK_AUDIT:
2889 fd_trans_register(ret, &target_netlink_audit_trans);
2890 break;
2891 default:
2892 g_assert_not_reached();
2893 }
2894 }
2895 }
2896 return ret;
2897 }
2898
2899 /* do_bind() Must return target values and target errnos. */
2900 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2901 socklen_t addrlen)
2902 {
2903 void *addr;
2904 abi_long ret;
2905
2906 if ((int)addrlen < 0) {
2907 return -TARGET_EINVAL;
2908 }
2909
2910 addr = alloca(addrlen+1);
2911
2912 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2913 if (ret)
2914 return ret;
2915
2916 return get_errno(bind(sockfd, addr, addrlen));
2917 }
2918
2919 /* do_connect() Must return target values and target errnos. */
2920 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2921 socklen_t addrlen)
2922 {
2923 void *addr;
2924 abi_long ret;
2925
2926 if ((int)addrlen < 0) {
2927 return -TARGET_EINVAL;
2928 }
2929
2930 addr = alloca(addrlen+1);
2931
2932 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2933 if (ret)
2934 return ret;
2935
2936 return get_errno(safe_connect(sockfd, addr, addrlen));
2937 }
2938
2939 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2940 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2941 int flags, int send)
2942 {
2943 abi_long ret, len;
2944 struct msghdr msg;
2945 int count;
2946 struct iovec *vec;
2947 abi_ulong target_vec;
2948
2949 if (msgp->msg_name) {
2950 msg.msg_namelen = tswap32(msgp->msg_namelen);
2951 msg.msg_name = alloca(msg.msg_namelen+1);
2952 ret = target_to_host_sockaddr(fd, msg.msg_name,
2953 tswapal(msgp->msg_name),
2954 msg.msg_namelen);
2955 if (ret) {
2956 goto out2;
2957 }
2958 } else {
2959 msg.msg_name = NULL;
2960 msg.msg_namelen = 0;
2961 }
2962 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2963 msg.msg_control = alloca(msg.msg_controllen);
2964 msg.msg_flags = tswap32(msgp->msg_flags);
2965
2966 count = tswapal(msgp->msg_iovlen);
2967 target_vec = tswapal(msgp->msg_iov);
2968 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2969 target_vec, count, send);
2970 if (vec == NULL) {
2971 ret = -host_to_target_errno(errno);
2972 goto out2;
2973 }
2974 msg.msg_iovlen = count;
2975 msg.msg_iov = vec;
2976
2977 if (send) {
2978 if (fd_trans_target_to_host_data(fd)) {
2979 ret = fd_trans_target_to_host_data(fd)(msg.msg_iov->iov_base,
2980 msg.msg_iov->iov_len);
2981 } else {
2982 ret = target_to_host_cmsg(&msg, msgp);
2983 }
2984 if (ret == 0) {
2985 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2986 }
2987 } else {
2988 ret = get_errno(safe_recvmsg(fd, &msg, flags));
2989 if (!is_error(ret)) {
2990 len = ret;
2991 if (fd_trans_host_to_target_data(fd)) {
2992 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2993 msg.msg_iov->iov_len);
2994 } else {
2995 ret = host_to_target_cmsg(msgp, &msg);
2996 }
2997 if (!is_error(ret)) {
2998 msgp->msg_namelen = tswap32(msg.msg_namelen);
2999 if (msg.msg_name != NULL) {
3000 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3001 msg.msg_name, msg.msg_namelen);
3002 if (ret) {
3003 goto out;
3004 }
3005 }
3006
3007 ret = len;
3008 }
3009 }
3010 }
3011
3012 out:
3013 unlock_iovec(vec, target_vec, count, !send);
3014 out2:
3015 return ret;
3016 }
3017
3018 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3019 int flags, int send)
3020 {
3021 abi_long ret;
3022 struct target_msghdr *msgp;
3023
3024 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3025 msgp,
3026 target_msg,
3027 send ? 1 : 0)) {
3028 return -TARGET_EFAULT;
3029 }
3030 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3031 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3032 return ret;
3033 }
3034
3035 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3036 * so it might not have this *mmsg-specific flag either.
3037 */
3038 #ifndef MSG_WAITFORONE
3039 #define MSG_WAITFORONE 0x10000
3040 #endif
3041
3042 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3043 unsigned int vlen, unsigned int flags,
3044 int send)
3045 {
3046 struct target_mmsghdr *mmsgp;
3047 abi_long ret = 0;
3048 int i;
3049
3050 if (vlen > UIO_MAXIOV) {
3051 vlen = UIO_MAXIOV;
3052 }
3053
3054 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3055 if (!mmsgp) {
3056 return -TARGET_EFAULT;
3057 }
3058
3059 for (i = 0; i < vlen; i++) {
3060 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3061 if (is_error(ret)) {
3062 break;
3063 }
3064 mmsgp[i].msg_len = tswap32(ret);
3065 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3066 if (flags & MSG_WAITFORONE) {
3067 flags |= MSG_DONTWAIT;
3068 }
3069 }
3070
3071 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3072
3073 /* Return number of datagrams sent if we sent any at all;
3074 * otherwise return the error.
3075 */
3076 if (i) {
3077 return i;
3078 }
3079 return ret;
3080 }
3081
3082 /* do_accept4() Must return target values and target errnos. */
3083 static abi_long do_accept4(int fd, abi_ulong target_addr,
3084 abi_ulong target_addrlen_addr, int flags)
3085 {
3086 socklen_t addrlen;
3087 void *addr;
3088 abi_long ret;
3089 int host_flags;
3090
3091 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3092
3093 if (target_addr == 0) {
3094 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3095 }
3096
3097 /* linux returns EINVAL if addrlen pointer is invalid */
3098 if (get_user_u32(addrlen, target_addrlen_addr))
3099 return -TARGET_EINVAL;
3100
3101 if ((int)addrlen < 0) {
3102 return -TARGET_EINVAL;
3103 }
3104
3105 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3106 return -TARGET_EINVAL;
3107
3108 addr = alloca(addrlen);
3109
3110 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3111 if (!is_error(ret)) {
3112 host_to_target_sockaddr(target_addr, addr, addrlen);
3113 if (put_user_u32(addrlen, target_addrlen_addr))
3114 ret = -TARGET_EFAULT;
3115 }
3116 return ret;
3117 }
3118
3119 /* do_getpeername() Must return target values and target errnos. */
3120 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3121 abi_ulong target_addrlen_addr)
3122 {
3123 socklen_t addrlen;
3124 void *addr;
3125 abi_long ret;
3126
3127 if (get_user_u32(addrlen, target_addrlen_addr))
3128 return -TARGET_EFAULT;
3129
3130 if ((int)addrlen < 0) {
3131 return -TARGET_EINVAL;
3132 }
3133
3134 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3135 return -TARGET_EFAULT;
3136
3137 addr = alloca(addrlen);
3138
3139 ret = get_errno(getpeername(fd, addr, &addrlen));
3140 if (!is_error(ret)) {
3141 host_to_target_sockaddr(target_addr, addr, addrlen);
3142 if (put_user_u32(addrlen, target_addrlen_addr))
3143 ret = -TARGET_EFAULT;
3144 }
3145 return ret;
3146 }
3147
3148 /* do_getsockname() Must return target values and target errnos. */
3149 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3150 abi_ulong target_addrlen_addr)
3151 {
3152 socklen_t addrlen;
3153 void *addr;
3154 abi_long ret;
3155
3156 if (get_user_u32(addrlen, target_addrlen_addr))
3157 return -TARGET_EFAULT;
3158
3159 if ((int)addrlen < 0) {
3160 return -TARGET_EINVAL;
3161 }
3162
3163 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3164 return -TARGET_EFAULT;
3165
3166 addr = alloca(addrlen);
3167
3168 ret = get_errno(getsockname(fd, addr, &addrlen));
3169 if (!is_error(ret)) {
3170 host_to_target_sockaddr(target_addr, addr, addrlen);
3171 if (put_user_u32(addrlen, target_addrlen_addr))
3172 ret = -TARGET_EFAULT;
3173 }
3174 return ret;
3175 }
3176
3177 /* do_socketpair() Must return target values and target errnos. */
3178 static abi_long do_socketpair(int domain, int type, int protocol,
3179 abi_ulong target_tab_addr)
3180 {
3181 int tab[2];
3182 abi_long ret;
3183
3184 target_to_host_sock_type(&type);
3185
3186 ret = get_errno(socketpair(domain, type, protocol, tab));
3187 if (!is_error(ret)) {
3188 if (put_user_s32(tab[0], target_tab_addr)
3189 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3190 ret = -TARGET_EFAULT;
3191 }
3192 return ret;
3193 }
3194
3195 /* do_sendto() Must return target values and target errnos. */
3196 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3197 abi_ulong target_addr, socklen_t addrlen)
3198 {
3199 void *addr;
3200 void *host_msg;
3201 abi_long ret;
3202
3203 if ((int)addrlen < 0) {
3204 return -TARGET_EINVAL;
3205 }
3206
3207 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3208 if (!host_msg)
3209 return -TARGET_EFAULT;
3210 if (fd_trans_target_to_host_data(fd)) {
3211 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3212 if (ret < 0) {
3213 unlock_user(host_msg, msg, 0);
3214 return ret;
3215 }
3216 }
3217 if (target_addr) {
3218 addr = alloca(addrlen+1);
3219 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3220 if (ret) {
3221 unlock_user(host_msg, msg, 0);
3222 return ret;
3223 }
3224 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3225 } else {
3226 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3227 }
3228 unlock_user(host_msg, msg, 0);
3229 return ret;
3230 }
3231
3232 /* do_recvfrom() Must return target values and target errnos. */
3233 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3234 abi_ulong target_addr,
3235 abi_ulong target_addrlen)
3236 {
3237 socklen_t addrlen;
3238 void *addr;
3239 void *host_msg;
3240 abi_long ret;
3241
3242 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3243 if (!host_msg)
3244 return -TARGET_EFAULT;
3245 if (target_addr) {
3246 if (get_user_u32(addrlen, target_addrlen)) {
3247 ret = -TARGET_EFAULT;
3248 goto fail;
3249 }
3250 if ((int)addrlen < 0) {
3251 ret = -TARGET_EINVAL;
3252 goto fail;
3253 }
3254 addr = alloca(addrlen);
3255 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3256 addr, &addrlen));
3257 } else {
3258 addr = NULL; /* To keep compiler quiet. */
3259 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3260 }
3261 if (!is_error(ret)) {
3262 if (target_addr) {
3263 host_to_target_sockaddr(target_addr, addr, addrlen);
3264 if (put_user_u32(addrlen, target_addrlen)) {
3265 ret = -TARGET_EFAULT;
3266 goto fail;
3267 }
3268 }
3269 unlock_user(host_msg, msg, len);
3270 } else {
3271 fail:
3272 unlock_user(host_msg, msg, 0);
3273 }
3274 return ret;
3275 }
3276
3277 #ifdef TARGET_NR_socketcall
3278 /* do_socketcall() Must return target values and target errnos. */
3279 static abi_long do_socketcall(int num, abi_ulong vptr)
3280 {
3281 static const unsigned ac[] = { /* number of arguments per call */
3282 [SOCKOP_socket] = 3, /* domain, type, protocol */
3283 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3284 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3285 [SOCKOP_listen] = 2, /* sockfd, backlog */
3286 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3287 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3288 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3289 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3290 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3291 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3292 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3293 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3294 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3295 [SOCKOP_shutdown] = 2, /* sockfd, how */
3296 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3297 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3298 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3299 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3300 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3301 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3302 };
3303 abi_long a[6]; /* max 6 args */
3304
3305 /* first, collect the arguments in a[] according to ac[] */
3306 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3307 unsigned i;
3308 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3309 for (i = 0; i < ac[num]; ++i) {
3310 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3311 return -TARGET_EFAULT;
3312 }
3313 }
3314 }
3315
3316 /* now when we have the args, actually handle the call */
3317 switch (num) {
3318 case SOCKOP_socket: /* domain, type, protocol */
3319 return do_socket(a[0], a[1], a[2]);
3320 case SOCKOP_bind: /* sockfd, addr, addrlen */
3321 return do_bind(a[0], a[1], a[2]);
3322 case SOCKOP_connect: /* sockfd, addr, addrlen */
3323 return do_connect(a[0], a[1], a[2]);
3324 case SOCKOP_listen: /* sockfd, backlog */
3325 return get_errno(listen(a[0], a[1]));
3326 case SOCKOP_accept: /* sockfd, addr, addrlen */
3327 return do_accept4(a[0], a[1], a[2], 0);
3328 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3329 return do_accept4(a[0], a[1], a[2], a[3]);
3330 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3331 return do_getsockname(a[0], a[1], a[2]);
3332 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3333 return do_getpeername(a[0], a[1], a[2]);
3334 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3335 return do_socketpair(a[0], a[1], a[2], a[3]);
3336 case SOCKOP_send: /* sockfd, msg, len, flags */
3337 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3338 case SOCKOP_recv: /* sockfd, msg, len, flags */
3339 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3340 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3341 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3342 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3343 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3344 case SOCKOP_shutdown: /* sockfd, how */
3345 return get_errno(shutdown(a[0], a[1]));
3346 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3347 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3348 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3349 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3350 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3351 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3352 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3353 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3354 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3355 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3356 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3357 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3358 default:
3359 gemu_log("Unsupported socketcall: %d\n", num);
3360 return -TARGET_ENOSYS;
3361 }
3362 }
3363 #endif
3364
3365 #define N_SHM_REGIONS 32
3366
3367 static struct shm_region {
3368 abi_ulong start;
3369 abi_ulong size;
3370 bool in_use;
3371 } shm_regions[N_SHM_REGIONS];
3372
3373 struct target_semid_ds
3374 {
3375 struct target_ipc_perm sem_perm;
3376 abi_ulong sem_otime;
3377 #if !defined(TARGET_PPC64)
3378 abi_ulong __unused1;
3379 #endif
3380 abi_ulong sem_ctime;
3381 #if !defined(TARGET_PPC64)
3382 abi_ulong __unused2;
3383 #endif
3384 abi_ulong sem_nsems;
3385 abi_ulong __unused3;
3386 abi_ulong __unused4;
3387 };
3388
3389 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3390 abi_ulong target_addr)
3391 {
3392 struct target_ipc_perm *target_ip;
3393 struct target_semid_ds *target_sd;
3394
3395 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3396 return -TARGET_EFAULT;
3397 target_ip = &(target_sd->sem_perm);
3398 host_ip->__key = tswap32(target_ip->__key);
3399 host_ip->uid = tswap32(target_ip->uid);
3400 host_ip->gid = tswap32(target_ip->gid);
3401 host_ip->cuid = tswap32(target_ip->cuid);
3402 host_ip->cgid = tswap32(target_ip->cgid);
3403 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3404 host_ip->mode = tswap32(target_ip->mode);
3405 #else
3406 host_ip->mode = tswap16(target_ip->mode);
3407 #endif
3408 #if defined(TARGET_PPC)
3409 host_ip->__seq = tswap32(target_ip->__seq);
3410 #else
3411 host_ip->__seq = tswap16(target_ip->__seq);
3412 #endif
3413 unlock_user_struct(target_sd, target_addr, 0);
3414 return 0;
3415 }
3416
3417 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3418 struct ipc_perm *host_ip)
3419 {
3420 struct target_ipc_perm *target_ip;
3421 struct target_semid_ds *target_sd;
3422
3423 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3424 return -TARGET_EFAULT;
3425 target_ip = &(target_sd->sem_perm);
3426 target_ip->__key = tswap32(host_ip->__key);
3427 target_ip->uid = tswap32(host_ip->uid);
3428 target_ip->gid = tswap32(host_ip->gid);
3429 target_ip->cuid = tswap32(host_ip->cuid);
3430 target_ip->cgid = tswap32(host_ip->cgid);
3431 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3432 target_ip->mode = tswap32(host_ip->mode);
3433 #else
3434 target_ip->mode = tswap16(host_ip->mode);
3435 #endif
3436 #if defined(TARGET_PPC)
3437 target_ip->__seq = tswap32(host_ip->__seq);
3438 #else
3439 target_ip->__seq = tswap16(host_ip->__seq);
3440 #endif
3441 unlock_user_struct(target_sd, target_addr, 1);
3442 return 0;
3443 }
3444
3445 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3446 abi_ulong target_addr)
3447 {
3448 struct target_semid_ds *target_sd;
3449
3450 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3451 return -TARGET_EFAULT;
3452 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3453 return -TARGET_EFAULT;
3454 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3455 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3456 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3457 unlock_user_struct(target_sd, target_addr, 0);
3458 return 0;
3459 }
3460
3461 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3462 struct semid_ds *host_sd)
3463 {
3464 struct target_semid_ds *target_sd;
3465
3466 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3467 return -TARGET_EFAULT;
3468 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3469 return -TARGET_EFAULT;
3470 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3471 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3472 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3473 unlock_user_struct(target_sd, target_addr, 1);
3474 return 0;
3475 }
3476
3477 struct target_seminfo {
3478 int semmap;
3479 int semmni;
3480 int semmns;
3481 int semmnu;
3482 int semmsl;
3483 int semopm;
3484 int semume;
3485 int semusz;
3486 int semvmx;
3487 int semaem;
3488 };
3489
3490 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3491 struct seminfo *host_seminfo)
3492 {
3493 struct target_seminfo *target_seminfo;
3494 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3495 return -TARGET_EFAULT;
3496 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3497 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3498 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3499 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3500 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3501 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3502 __put_user(host_seminfo->semume, &target_seminfo->semume);
3503 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3504 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3505 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3506 unlock_user_struct(target_seminfo, target_addr, 1);
3507 return 0;
3508 }
3509
3510 union semun {
3511 int val;
3512 struct semid_ds *buf;
3513 unsigned short *array;
3514 struct seminfo *__buf;
3515 };
3516
3517 union target_semun {
3518 int val;
3519 abi_ulong buf;
3520 abi_ulong array;
3521 abi_ulong __buf;
3522 };
3523
3524 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3525 abi_ulong target_addr)
3526 {
3527 int nsems;
3528 unsigned short *array;
3529 union semun semun;
3530 struct semid_ds semid_ds;
3531 int i, ret;
3532
3533 semun.buf = &semid_ds;
3534
3535 ret = semctl(semid, 0, IPC_STAT, semun);
3536 if (ret == -1)
3537 return get_errno(ret);
3538
3539 nsems = semid_ds.sem_nsems;
3540
3541 *host_array = g_try_new(unsigned short, nsems);
3542 if (!*host_array) {
3543 return -TARGET_ENOMEM;
3544 }
3545 array = lock_user(VERIFY_READ, target_addr,
3546 nsems*sizeof(unsigned short), 1);
3547 if (!array) {
3548 g_free(*host_array);
3549 return -TARGET_EFAULT;
3550 }
3551
3552 for(i=0; i<nsems; i++) {
3553 __get_user((*host_array)[i], &array[i]);
3554 }
3555 unlock_user(array, target_addr, 0);
3556
3557 return 0;
3558 }
3559
3560 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3561 unsigned short **host_array)
3562 {
3563 int nsems;
3564 unsigned short *array;
3565 union semun semun;
3566 struct semid_ds semid_ds;
3567 int i, ret;
3568
3569 semun.buf = &semid_ds;
3570
3571 ret = semctl(semid, 0, IPC_STAT, semun);
3572 if (ret == -1)
3573 return get_errno(ret);
3574
3575 nsems = semid_ds.sem_nsems;
3576
3577 array = lock_user(VERIFY_WRITE, target_addr,
3578 nsems*sizeof(unsigned short), 0);
3579 if (!array)
3580 return -TARGET_EFAULT;
3581
3582 for(i=0; i<nsems; i++) {
3583 __put_user((*host_array)[i], &array[i]);
3584 }
3585 g_free(*host_array);
3586 unlock_user(array, target_addr, 1);
3587
3588 return 0;
3589 }
3590
3591 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3592 abi_ulong target_arg)
3593 {
3594 union target_semun target_su = { .buf = target_arg };
3595 union semun arg;
3596 struct semid_ds dsarg;
3597 unsigned short *array = NULL;
3598 struct seminfo seminfo;
3599 abi_long ret = -TARGET_EINVAL;
3600 abi_long err;
3601 cmd &= 0xff;
3602
3603 switch( cmd ) {
3604 case GETVAL:
3605 case SETVAL:
3606 /* In 64 bit cross-endian situations, we will erroneously pick up
3607 * the wrong half of the union for the "val" element. To rectify
3608 * this, the entire 8-byte structure is byteswapped, followed by
3609 * a swap of the 4 byte val field. In other cases, the data is
3610 * already in proper host byte order. */
3611 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3612 target_su.buf = tswapal(target_su.buf);
3613 arg.val = tswap32(target_su.val);
3614 } else {
3615 arg.val = target_su.val;
3616 }
3617 ret = get_errno(semctl(semid, semnum, cmd, arg));
3618 break;
3619 case GETALL:
3620 case SETALL:
3621 err = target_to_host_semarray(semid, &array, target_su.array);
3622 if (err)
3623 return err;
3624 arg.array = array;
3625 ret = get_errno(semctl(semid, semnum, cmd, arg));
3626 err = host_to_target_semarray(semid, target_su.array, &array);
3627 if (err)
3628 return err;
3629 break;
3630 case IPC_STAT:
3631 case IPC_SET:
3632 case SEM_STAT:
3633 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3634 if (err)
3635 return err;
3636 arg.buf = &dsarg;
3637 ret = get_errno(semctl(semid, semnum, cmd, arg));
3638 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3639 if (err)
3640 return err;
3641 break;
3642 case IPC_INFO:
3643 case SEM_INFO:
3644 arg.__buf = &seminfo;
3645 ret = get_errno(semctl(semid, semnum, cmd, arg));
3646 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3647 if (err)
3648 return err;
3649 break;
3650 case IPC_RMID:
3651 case GETPID:
3652 case GETNCNT:
3653 case GETZCNT:
3654 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3655 break;
3656 }
3657
3658 return ret;
3659 }
3660
3661 struct target_sembuf {
3662 unsigned short sem_num;
3663 short sem_op;
3664 short sem_flg;
3665 };
3666
3667 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3668 abi_ulong target_addr,
3669 unsigned nsops)
3670 {
3671 struct target_sembuf *target_sembuf;
3672 int i;
3673
3674 target_sembuf = lock_user(VERIFY_READ, target_addr,
3675 nsops*sizeof(struct target_sembuf), 1);
3676 if (!target_sembuf)
3677 return -TARGET_EFAULT;
3678
3679 for(i=0; i<nsops; i++) {
3680 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3681 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3682 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3683 }
3684
3685 unlock_user(target_sembuf, target_addr, 0);
3686
3687 return 0;
3688 }
3689
3690 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3691 {
3692 struct sembuf sops[nsops];
3693
3694 if (target_to_host_sembuf(sops, ptr, nsops))
3695 return -TARGET_EFAULT;
3696
3697 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3698 }
3699
3700 struct target_msqid_ds
3701 {
3702 struct target_ipc_perm msg_perm;
3703 abi_ulong msg_stime;
3704 #if TARGET_ABI_BITS == 32
3705 abi_ulong __unused1;
3706 #endif
3707 abi_ulong msg_rtime;
3708 #if TARGET_ABI_BITS == 32
3709 abi_ulong __unused2;
3710 #endif
3711 abi_ulong msg_ctime;
3712 #if TARGET_ABI_BITS == 32
3713 abi_ulong __unused3;
3714 #endif
3715 abi_ulong __msg_cbytes;
3716 abi_ulong msg_qnum;
3717 abi_ulong msg_qbytes;
3718 abi_ulong msg_lspid;
3719 abi_ulong msg_lrpid;
3720 abi_ulong __unused4;
3721 abi_ulong __unused5;
3722 };
3723
3724 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3725 abi_ulong target_addr)
3726 {
3727 struct target_msqid_ds *target_md;
3728
3729 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3730 return -TARGET_EFAULT;
3731 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3732 return -TARGET_EFAULT;
3733 host_md->msg_stime = tswapal(target_md->msg_stime);
3734 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3735 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3736 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3737 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3738 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3739 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3740 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3741 unlock_user_struct(target_md, target_addr, 0);
3742 return 0;
3743 }
3744
3745 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3746 struct msqid_ds *host_md)
3747 {
3748 struct target_msqid_ds *target_md;
3749
3750 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3751 return -TARGET_EFAULT;
3752 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3753 return -TARGET_EFAULT;
3754 target_md->msg_stime = tswapal(host_md->msg_stime);
3755 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3756 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3757 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3758 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3759 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3760 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3761 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3762 unlock_user_struct(target_md, target_addr, 1);
3763 return 0;
3764 }
3765
3766 struct target_msginfo {
3767 int msgpool;
3768 int msgmap;
3769 int msgmax;
3770 int msgmnb;
3771 int msgmni;
3772 int msgssz;
3773 int msgtql;
3774 unsigned short int msgseg;
3775 };
3776
3777 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3778 struct msginfo *host_msginfo)
3779 {
3780 struct target_msginfo *target_msginfo;
3781 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3782 return -TARGET_EFAULT;
3783 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3784 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3785 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3786 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3787 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3788 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3789 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3790 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3791 unlock_user_struct(target_msginfo, target_addr, 1);
3792 return 0;
3793 }
3794
3795 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3796 {
3797 struct msqid_ds dsarg;
3798 struct msginfo msginfo;
3799 abi_long ret = -TARGET_EINVAL;
3800
3801 cmd &= 0xff;
3802
3803 switch (cmd) {
3804 case IPC_STAT:
3805 case IPC_SET:
3806 case MSG_STAT:
3807 if (target_to_host_msqid_ds(&dsarg,ptr))
3808 return -TARGET_EFAULT;
3809 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3810 if (host_to_target_msqid_ds(ptr,&dsarg))
3811 return -TARGET_EFAULT;
3812 break;
3813 case IPC_RMID:
3814 ret = get_errno(msgctl(msgid, cmd, NULL));
3815 break;
3816 case IPC_INFO:
3817 case MSG_INFO:
3818 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3819 if (host_to_target_msginfo(ptr, &msginfo))
3820 return -TARGET_EFAULT;
3821 break;
3822 }
3823
3824 return ret;
3825 }
3826
3827 struct target_msgbuf {
3828 abi_long mtype;
3829 char mtext[1];
3830 };
3831
3832 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3833 ssize_t msgsz, int msgflg)
3834 {
3835 struct target_msgbuf *target_mb;
3836 struct msgbuf *host_mb;
3837 abi_long ret = 0;
3838
3839 if (msgsz < 0) {
3840 return -TARGET_EINVAL;
3841 }
3842
3843 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3844 return -TARGET_EFAULT;
3845 host_mb = g_try_malloc(msgsz + sizeof(long));
3846 if (!host_mb) {
3847 unlock_user_struct(target_mb, msgp, 0);
3848 return -TARGET_ENOMEM;
3849 }
3850 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3851 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3852 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3853 g_free(host_mb);
3854 unlock_user_struct(target_mb, msgp, 0);
3855
3856 return ret;
3857 }
3858
3859 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3860 ssize_t msgsz, abi_long msgtyp,
3861 int msgflg)
3862 {
3863 struct target_msgbuf *target_mb;
3864 char *target_mtext;
3865 struct msgbuf *host_mb;
3866 abi_long ret = 0;
3867
3868 if (msgsz < 0) {
3869 return -TARGET_EINVAL;
3870 }
3871
3872 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3873 return -TARGET_EFAULT;
3874
3875 host_mb = g_try_malloc(msgsz + sizeof(long));
3876 if (!host_mb) {
3877 ret = -TARGET_ENOMEM;
3878 goto end;
3879 }
3880 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3881
3882 if (ret > 0) {
3883 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3884 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3885 if (!target_mtext) {
3886 ret = -TARGET_EFAULT;
3887 goto end;
3888 }
3889 memcpy(target_mb->mtext, host_mb->mtext, ret);
3890 unlock_user(target_mtext, target_mtext_addr, ret);
3891 }
3892
3893 target_mb->mtype = tswapal(host_mb->mtype);
3894
3895 end:
3896 if (target_mb)
3897 unlock_user_struct(target_mb, msgp, 1);
3898 g_free(host_mb);
3899 return ret;
3900 }
3901
3902 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3903 abi_ulong target_addr)
3904 {
3905 struct target_shmid_ds *target_sd;
3906
3907 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3908 return -TARGET_EFAULT;
3909 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3910 return -TARGET_EFAULT;
3911 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3912 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3913 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3914 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3915 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3916 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3917 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3918 unlock_user_struct(target_sd, target_addr, 0);
3919 return 0;
3920 }
3921
3922 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3923 struct shmid_ds *host_sd)
3924 {
3925 struct target_shmid_ds *target_sd;
3926
3927 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3928 return -TARGET_EFAULT;
3929 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3930 return -TARGET_EFAULT;
3931 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3932 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3933 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3934 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3935 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3936 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3937 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3938 unlock_user_struct(target_sd, target_addr, 1);
3939 return 0;
3940 }
3941
3942 struct target_shminfo {
3943 abi_ulong shmmax;
3944 abi_ulong shmmin;
3945 abi_ulong shmmni;
3946 abi_ulong shmseg;
3947 abi_ulong shmall;
3948 };
3949
3950 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3951 struct shminfo *host_shminfo)
3952 {
3953 struct target_shminfo *target_shminfo;
3954 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3955 return -TARGET_EFAULT;
3956 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3957 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3958 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3959 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3960 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3961 unlock_user_struct(target_shminfo, target_addr, 1);
3962 return 0;
3963 }
3964
3965 struct target_shm_info {
3966 int used_ids;
3967 abi_ulong shm_tot;
3968 abi_ulong shm_rss;
3969 abi_ulong shm_swp;
3970 abi_ulong swap_attempts;
3971 abi_ulong swap_successes;
3972 };
3973
3974 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3975 struct shm_info *host_shm_info)
3976 {
3977 struct target_shm_info *target_shm_info;
3978 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3979 return -TARGET_EFAULT;
3980 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3981 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3982 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3983 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3984 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3985 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3986 unlock_user_struct(target_shm_info, target_addr, 1);
3987 return 0;
3988 }
3989
3990 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3991 {
3992 struct shmid_ds dsarg;
3993 struct shminfo shminfo;
3994 struct shm_info shm_info;
3995 abi_long ret = -TARGET_EINVAL;
3996
3997 cmd &= 0xff;
3998
3999 switch(cmd) {
4000 case IPC_STAT:
4001 case IPC_SET:
4002 case SHM_STAT:
4003 if (target_to_host_shmid_ds(&dsarg, buf))
4004 return -TARGET_EFAULT;
4005 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4006 if (host_to_target_shmid_ds(buf, &dsarg))
4007 return -TARGET_EFAULT;
4008 break;
4009 case IPC_INFO:
4010 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4011 if (host_to_target_shminfo(buf, &shminfo))
4012 return -TARGET_EFAULT;
4013 break;
4014 case SHM_INFO:
4015 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4016 if (host_to_target_shm_info(buf, &shm_info))
4017 return -TARGET_EFAULT;
4018 break;
4019 case IPC_RMID:
4020 case SHM_LOCK:
4021 case SHM_UNLOCK:
4022 ret = get_errno(shmctl(shmid, cmd, NULL));
4023 break;
4024 }
4025
4026 return ret;
4027 }
4028
4029 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
4030 {
4031 abi_long raddr;
4032 void *host_raddr;
4033 struct shmid_ds shm_info;
4034 int i,ret;
4035
4036 /* find out the length of the shared memory segment */
4037 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4038 if (is_error(ret)) {
4039 /* can't get length, bail out */
4040 return ret;
4041 }
4042
4043 mmap_lock();
4044
4045 if (shmaddr)
4046 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4047 else {
4048 abi_ulong mmap_start;
4049
4050 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4051
4052 if (mmap_start == -1) {
4053 errno = ENOMEM;
4054 host_raddr = (void *)-1;
4055 } else
4056 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4057 }
4058
4059 if (host_raddr == (void *)-1) {
4060 mmap_unlock();
4061 return get_errno((long)host_raddr);
4062 }
4063 raddr=h2g((unsigned long)host_raddr);
4064
4065 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4066 PAGE_VALID | PAGE_READ |
4067 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4068
4069 for (i = 0; i < N_SHM_REGIONS; i++) {
4070 if (!shm_regions[i].in_use) {
4071 shm_regions[i].in_use = true;
4072 shm_regions[i].start = raddr;
4073 shm_regions[i].size = shm_info.shm_segsz;
4074 break;
4075 }
4076 }
4077
4078 mmap_unlock();
4079 return raddr;
4080
4081 }
4082
4083 static inline abi_long do_shmdt(abi_ulong shmaddr)
4084 {
4085 int i;
4086
4087 for (i = 0; i < N_SHM_REGIONS; ++i) {
4088 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4089 shm_regions[i].in_use = false;
4090 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4091 break;
4092 }
4093 }
4094
4095 return get_errno(shmdt(g2h(shmaddr)));
4096 }
4097
4098 #ifdef TARGET_NR_ipc
4099 /* ??? This only works with linear mappings. */
4100 /* do_ipc() must return target values and target errnos. */
4101 static abi_long do_ipc(unsigned int call, abi_long first,
4102 abi_long second, abi_long third,
4103 abi_long ptr, abi_long fifth)
4104 {
4105 int version;
4106 abi_long ret = 0;
4107
4108 version = call >> 16;
4109 call &= 0xffff;
4110
4111 switch (call) {
4112 case IPCOP_semop:
4113 ret = do_semop(first, ptr, second);
4114 break;
4115
4116 case IPCOP_semget:
4117 ret = get_errno(semget(first, second, third));
4118 break;
4119
4120 case IPCOP_semctl: {
4121 /* The semun argument to semctl is passed by value, so dereference the
4122 * ptr argument. */
4123 abi_ulong atptr;
4124 get_user_ual(atptr, ptr);
4125 ret = do_semctl(first, second, third, atptr);
4126 break;
4127 }
4128
4129 case IPCOP_msgget:
4130 ret = get_errno(msgget(first, second));
4131 break;
4132
4133 case IPCOP_msgsnd:
4134 ret = do_msgsnd(first, ptr, second, third);
4135 break;
4136
4137 case IPCOP_msgctl:
4138 ret = do_msgctl(first, second, ptr);
4139 break;
4140
4141 case IPCOP_msgrcv:
4142 switch (version) {
4143 case 0:
4144 {
4145 struct target_ipc_kludge {
4146 abi_long msgp;
4147 abi_long msgtyp;
4148 } *tmp;
4149
4150 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4151 ret = -TARGET_EFAULT;
4152 break;
4153 }
4154
4155 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4156
4157 unlock_user_struct(tmp, ptr, 0);
4158 break;
4159 }
4160 default:
4161 ret = do_msgrcv(first, ptr, second, fifth, third);
4162 }
4163 break;
4164
4165 case IPCOP_shmat:
4166 switch (version) {
4167 default:
4168 {
4169 abi_ulong raddr;
4170 raddr = do_shmat(first, ptr, second);
4171 if (is_error(raddr))
4172 return get_errno(raddr);
4173 if (put_user_ual(raddr, third))
4174 return -TARGET_EFAULT;
4175 break;
4176 }
4177 case 1:
4178 ret = -TARGET_EINVAL;
4179 break;
4180 }
4181 break;
4182 case IPCOP_shmdt:
4183 ret = do_shmdt(ptr);
4184 break;
4185
4186 case IPCOP_shmget:
4187 /* IPC_* flag values are the same on all linux platforms */
4188 ret = get_errno(shmget(first, second, third));
4189 break;
4190
4191 /* IPC_* and SHM_* command values are the same on all linux platforms */
4192 case IPCOP_shmctl:
4193 ret = do_shmctl(first, second, ptr);
4194 break;
4195 default:
4196 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4197 ret = -TARGET_ENOSYS;
4198 break;
4199 }
4200 return ret;
4201 }
4202 #endif
4203
4204 /* kernel structure types definitions */
4205
4206 #define STRUCT(name, ...) STRUCT_ ## name,
4207 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4208 enum {
4209 #include "syscall_types.h"
4210 STRUCT_MAX
4211 };
4212 #undef STRUCT
4213 #undef STRUCT_SPECIAL
4214
4215 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4216 #define STRUCT_SPECIAL(name)
4217 #include "syscall_types.h"
4218 #undef STRUCT
4219 #undef STRUCT_SPECIAL
4220
4221 typedef struct IOCTLEntry IOCTLEntry;
4222
4223 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4224 int fd, int cmd, abi_long arg);
4225
4226 struct IOCTLEntry {
4227 int target_cmd;
4228 unsigned int host_cmd;
4229 const char *name;
4230 int access;
4231 do_ioctl_fn *do_ioctl;
4232 const argtype arg_type[5];
4233 };
4234
4235 #define IOC_R 0x0001
4236 #define IOC_W 0x0002
4237 #define IOC_RW (IOC_R | IOC_W)
4238
4239 #define MAX_STRUCT_SIZE 4096
4240
4241 #ifdef CONFIG_FIEMAP
4242 /* So fiemap access checks don't overflow on 32 bit systems.
4243 * This is very slightly smaller than the limit imposed by
4244 * the underlying kernel.
4245 */
4246 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4247 / sizeof(struct fiemap_extent))
4248
4249 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4250 int fd, int cmd, abi_long arg)
4251 {
4252 /* The parameter for this ioctl is a struct fiemap followed
4253 * by an array of struct fiemap_extent whose size is set
4254 * in fiemap->fm_extent_count. The array is filled in by the
4255 * ioctl.
4256 */
4257 int target_size_in, target_size_out;
4258 struct fiemap *fm;
4259 const argtype *arg_type = ie->arg_type;
4260 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4261 void *argptr, *p;
4262 abi_long ret;
4263 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4264 uint32_t outbufsz;
4265 int free_fm = 0;
4266
4267 assert(arg_type[0] == TYPE_PTR);
4268 assert(ie->access == IOC_RW);
4269 arg_type++;
4270 target_size_in = thunk_type_size(arg_type, 0);
4271 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4272 if (!argptr) {
4273 return -TARGET_EFAULT;
4274 }
4275 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4276 unlock_user(argptr, arg, 0);
4277 fm = (struct fiemap *)buf_temp;
4278 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4279 return -TARGET_EINVAL;
4280 }
4281
4282 outbufsz = sizeof (*fm) +
4283 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4284
4285 if (outbufsz > MAX_STRUCT_SIZE) {
4286 /* We can't fit all the extents into the fixed size buffer.
4287 * Allocate one that is large enough and use it instead.
4288 */
4289 fm = g_try_malloc(outbufsz);
4290 if (!fm) {
4291 return -TARGET_ENOMEM;
4292 }
4293 memcpy(fm, buf_temp, sizeof(struct fiemap));
4294 free_fm = 1;
4295 }
4296 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4297 if (!is_error(ret)) {
4298 target_size_out = target_size_in;
4299 /* An extent_count of 0 means we were only counting the extents
4300 * so there are no structs to copy
4301 */
4302 if (fm->fm_extent_count != 0) {
4303 target_size_out += fm->fm_mapped_extents * extent_size;
4304 }
4305 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4306 if (!argptr) {
4307 ret = -TARGET_EFAULT;
4308 } else {
4309 /* Convert the struct fiemap */
4310 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4311 if (fm->fm_extent_count != 0) {
4312 p = argptr + target_size_in;
4313 /* ...and then all the struct fiemap_extents */
4314 for (i = 0; i < fm->fm_mapped_extents; i++) {
4315 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4316 THUNK_TARGET);
4317 p += extent_size;
4318 }
4319 }
4320 unlock_user(argptr, arg, target_size_out);
4321 }
4322 }
4323 if (free_fm) {
4324 g_free(fm);
4325 }
4326 return ret;
4327 }
4328 #endif
4329
4330 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4331 int fd, int cmd, abi_long arg)
4332 {
4333 const argtype *arg_type = ie->arg_type;
4334 int target_size;
4335 void *argptr;
4336 int ret;
4337 struct ifconf *host_ifconf;
4338 uint32_t outbufsz;
4339 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4340 int target_ifreq_size;
4341 int nb_ifreq;
4342 int free_buf = 0;
4343 int i;
4344 int target_ifc_len;
4345 abi_long target_ifc_buf;
4346 int host_ifc_len;
4347 char *host_ifc_buf;
4348
4349 assert(arg_type[0] == TYPE_PTR);
4350 assert(ie->access == IOC_RW);
4351
4352 arg_type++;
4353 target_size = thunk_type_size(arg_type, 0);
4354
4355 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4356 if (!argptr)
4357 return -TARGET_EFAULT;
4358 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4359 unlock_user(argptr, arg, 0);
4360
4361 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4362 target_ifc_len = host_ifconf->ifc_len;
4363 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4364
4365 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4366 nb_ifreq = target_ifc_len / target_ifreq_size;
4367 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4368
4369 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4370 if (outbufsz > MAX_STRUCT_SIZE) {
4371 /* We can't fit all the extents into the fixed size buffer.
4372 * Allocate one that is large enough and use it instead.
4373 */
4374 host_ifconf = malloc(outbufsz);
4375 if (!host_ifconf) {
4376 return -TARGET_ENOMEM;
4377 }
4378 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4379 free_buf = 1;
4380 }
4381 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4382
4383 host_ifconf->ifc_len = host_ifc_len;
4384 host_ifconf->ifc_buf = host_ifc_buf;
4385
4386 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4387 if (!is_error(ret)) {
4388 /* convert host ifc_len to target ifc_len */
4389
4390 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4391 target_ifc_len = nb_ifreq * target_ifreq_size;
4392 host_ifconf->ifc_len = target_ifc_len;
4393
4394 /* restore target ifc_buf */
4395
4396 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4397
4398 /* copy struct ifconf to target user */
4399
4400 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4401 if (!argptr)
4402 return -TARGET_EFAULT;
4403 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4404 unlock_user(argptr, arg, target_size);
4405
4406 /* copy ifreq[] to target user */
4407
4408 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4409 for (i = 0; i < nb_ifreq ; i++) {
4410 thunk_convert(argptr + i * target_ifreq_size,
4411 host_ifc_buf + i * sizeof(struct ifreq),
4412 ifreq_arg_type, THUNK_TARGET);
4413 }
4414 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4415 }
4416
4417 if (free_buf) {
4418 free(host_ifconf);
4419 }
4420
4421 return ret;
4422 }
4423
4424 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4425 int cmd, abi_long arg)
4426 {
4427 void *argptr;
4428 struct dm_ioctl *host_dm;
4429 abi_long guest_data;
4430 uint32_t guest_data_size;
4431 int target_size;
4432 const argtype *arg_type = ie->arg_type;
4433 abi_long ret;
4434 void *big_buf = NULL;
4435 char *host_data;
4436
4437 arg_type++;
4438 target_size = thunk_type_size(arg_type, 0);
4439 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4440 if (!argptr) {
4441 ret = -TARGET_EFAULT;
4442 goto out;
4443 }
4444 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4445 unlock_user(argptr, arg, 0);
4446
4447 /* buf_temp is too small, so fetch things into a bigger buffer */
4448 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4449 memcpy(big_buf, buf_temp, target_size);
4450 buf_temp = big_buf;
4451 host_dm = big_buf;
4452
4453 guest_data = arg + host_dm->data_start;
4454 if ((guest_data - arg) < 0) {
4455 ret = -EINVAL;
4456 goto out;
4457 }
4458 guest_data_size = host_dm->data_size - host_dm->data_start;
4459 host_data = (char*)host_dm + host_dm->data_start;
4460
4461 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4462 switch (ie->host_cmd) {
4463 case DM_REMOVE_ALL:
4464 case DM_LIST_DEVICES:
4465 case DM_DEV_CREATE:
4466 case DM_DEV_REMOVE:
4467 case DM_DEV_SUSPEND:
4468 case DM_DEV_STATUS:
4469 case DM_DEV_WAIT:
4470 case DM_TABLE_STATUS:
4471 case DM_TABLE_CLEAR:
4472 case DM_TABLE_DEPS:
4473 case DM_LIST_VERSIONS:
4474 /* no input data */
4475 break;
4476 case DM_DEV_RENAME:
4477 case DM_DEV_SET_GEOMETRY:
4478 /* data contains only strings */
4479 memcpy(host_data, argptr, guest_data_size);
4480 break;
4481 case DM_TARGET_MSG:
4482 memcpy(host_data, argptr, guest_data_size);
4483 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4484 break;
4485 case DM_TABLE_LOAD:
4486 {
4487 void *gspec = argptr;
4488 void *cur_data = host_data;
4489 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4490 int spec_size = thunk_type_size(arg_type, 0);
4491 int i;
4492
4493 for (i = 0; i < host_dm->target_count; i++) {
4494 struct dm_target_spec *spec = cur_data;
4495 uint32_t next;
4496 int slen;
4497
4498 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4499 slen = strlen((char*)gspec + spec_size) + 1;
4500 next = spec->next;
4501 spec->next = sizeof(*spec) + slen;
4502 strcpy((char*)&spec[1], gspec + spec_size);
4503 gspec += next;
4504 cur_data += spec->next;
4505 }
4506 break;
4507 }
4508 default:
4509 ret = -TARGET_EINVAL;
4510 unlock_user(argptr, guest_data, 0);
4511 goto out;
4512 }
4513 unlock_user(argptr, guest_data, 0);
4514
4515 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4516 if (!is_error(ret)) {
4517 guest_data = arg + host_dm->data_start;
4518 guest_data_size = host_dm->data_size - host_dm->data_start;
4519 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4520 switch (ie->host_cmd) {
4521 case DM_REMOVE_ALL:
4522 case DM_DEV_CREATE:
4523 case DM_DEV_REMOVE:
4524 case DM_DEV_RENAME:
4525 case DM_DEV_SUSPEND:
4526 case DM_DEV_STATUS:
4527 case DM_TABLE_LOAD:
4528 case DM_TABLE_CLEAR:
4529 case DM_TARGET_MSG:
4530 case DM_DEV_SET_GEOMETRY:
4531 /* no return data */
4532 break;
4533 case DM_LIST_DEVICES:
4534 {
4535 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4536 uint32_t remaining_data = guest_data_size;
4537 void *cur_data = argptr;
4538 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4539 int nl_size = 12; /* can't use thunk_size due to alignment */
4540
4541 while (1) {
4542 uint32_t next = nl->next;
4543 if (next) {
4544 nl->next = nl_size + (strlen(nl->name) + 1);
4545 }
4546 if (remaining_data < nl->next) {
4547 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4548 break;
4549 }
4550 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4551 strcpy(cur_data + nl_size, nl->name);
4552 cur_data += nl->next;
4553 remaining_data -= nl->next;
4554 if (!next) {
4555 break;
4556 }
4557 nl = (void*)nl + next;
4558 }
4559 break;
4560 }
4561 case DM_DEV_WAIT:
4562 case DM_TABLE_STATUS:
4563 {
4564 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4565 void *cur_data = argptr;
4566 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4567 int spec_size = thunk_type_size(arg_type, 0);
4568 int i;
4569
4570 for (i = 0; i < host_dm->target_count; i++) {
4571 uint32_t next = spec->next;
4572 int slen = strlen((char*)&spec[1]) + 1;
4573 spec->next = (cur_data - argptr) + spec_size + slen;
4574 if (guest_data_size < spec->next) {
4575 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4576 break;
4577 }
4578 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4579 strcpy(cur_data + spec_size, (char*)&spec[1]);
4580 cur_data = argptr + spec->next;
4581 spec = (void*)host_dm + host_dm->data_start + next;
4582 }
4583 break;
4584 }
4585 case DM_TABLE_DEPS:
4586 {
4587 void *hdata = (void*)host_dm + host_dm->data_start;
4588 int count = *(uint32_t*)hdata;
4589 uint64_t *hdev = hdata + 8;
4590 uint64_t *gdev = argptr + 8;
4591 int i;
4592
4593 *(uint32_t*)argptr = tswap32(count);
4594 for (i = 0; i < count; i++) {
4595 *gdev = tswap64(*hdev);
4596 gdev++;
4597 hdev++;
4598 }
4599 break;
4600 }
4601 case DM_LIST_VERSIONS:
4602 {
4603 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4604 uint32_t remaining_data = guest_data_size;
4605 void *cur_data = argptr;
4606 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4607 int vers_size = thunk_type_size(arg_type, 0);
4608
4609 while (1) {
4610 uint32_t next = vers->next;
4611 if (next) {
4612 vers->next = vers_size + (strlen(vers->name) + 1);
4613 }
4614 if (remaining_data < vers->next) {
4615 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4616 break;
4617 }
4618 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4619 strcpy(cur_data + vers_size, vers->name);
4620 cur_data += vers->next;
4621 remaining_data -= vers->next;
4622 if (!next) {
4623 break;
4624 }
4625 vers = (void*)vers + next;
4626 }
4627 break;
4628 }
4629 default:
4630 unlock_user(argptr, guest_data, 0);
4631 ret = -TARGET_EINVAL;
4632 goto out;
4633 }
4634 unlock_user(argptr, guest_data, guest_data_size);
4635
4636 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4637 if (!argptr) {
4638 ret = -TARGET_EFAULT;
4639 goto out;
4640 }
4641 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4642 unlock_user(argptr, arg, target_size);
4643 }
4644 out:
4645 g_free(big_buf);
4646 return ret;
4647 }
4648
4649 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4650 int cmd, abi_long arg)
4651 {
4652 void *argptr;
4653 int target_size;
4654 const argtype *arg_type = ie->arg_type;
4655 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4656 abi_long ret;
4657
4658 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4659 struct blkpg_partition host_part;
4660
4661 /* Read and convert blkpg */
4662 arg_type++;
4663 target_size = thunk_type_size(arg_type, 0);
4664 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4665 if (!argptr) {
4666 ret = -TARGET_EFAULT;
4667 goto out;
4668 }
4669 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4670 unlock_user(argptr, arg, 0);
4671
4672 switch (host_blkpg->op) {
4673 case BLKPG_ADD_PARTITION:
4674 case BLKPG_DEL_PARTITION:
4675 /* payload is struct blkpg_partition */
4676 break;
4677 default:
4678 /* Unknown opcode */
4679 ret = -TARGET_EINVAL;
4680 goto out;
4681 }
4682
4683 /* Read and convert blkpg->data */
4684 arg = (abi_long)(uintptr_t)host_blkpg->data;
4685 target_size = thunk_type_size(part_arg_type, 0);
4686 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4687 if (!argptr) {
4688 ret = -TARGET_EFAULT;
4689 goto out;
4690 }
4691 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4692 unlock_user(argptr, arg, 0);
4693
4694 /* Swizzle the data pointer to our local copy and call! */
4695 host_blkpg->data = &host_part;
4696 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4697
4698 out:
4699 return ret;
4700 }
4701
4702 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4703 int fd, int cmd, abi_long arg)
4704 {
4705 const argtype *arg_type = ie->arg_type;
4706 const StructEntry *se;
4707 const argtype *field_types;
4708 const int *dst_offsets, *src_offsets;
4709 int target_size;
4710 void *argptr;
4711 abi_ulong *target_rt_dev_ptr;
4712 unsigned long *host_rt_dev_ptr;
4713 abi_long ret;
4714 int i;
4715
4716 assert(ie->access == IOC_W);
4717 assert(*arg_type == TYPE_PTR);
4718 arg_type++;
4719 assert(*arg_type == TYPE_STRUCT);
4720 target_size = thunk_type_size(arg_type, 0);
4721 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4722 if (!argptr) {
4723 return -TARGET_EFAULT;
4724 }
4725 arg_type++;
4726 assert(*arg_type == (int)STRUCT_rtentry);
4727 se = struct_entries + *arg_type++;
4728 assert(se->convert[0] == NULL);
4729 /* convert struct here to be able to catch rt_dev string */
4730 field_types = se->field_types;
4731 dst_offsets = se->field_offsets[THUNK_HOST];
4732 src_offsets = se->field_offsets[THUNK_TARGET];
4733 for (i = 0; i < se->nb_fields; i++) {
4734 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4735 assert(*field_types == TYPE_PTRVOID);
4736 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4737 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4738 if (*target_rt_dev_ptr != 0) {
4739 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4740 tswapal(*target_rt_dev_ptr));
4741 if (!*host_rt_dev_ptr) {
4742 unlock_user(argptr, arg, 0);
4743 return -TARGET_EFAULT;
4744 }
4745 } else {
4746 *host_rt_dev_ptr = 0;
4747 }
4748 field_types++;
4749 continue;
4750 }
4751 field_types = thunk_convert(buf_temp + dst_offsets[i],
4752 argptr + src_offsets[i],
4753 field_types, THUNK_HOST);
4754 }
4755 unlock_user(argptr, arg, 0);
4756
4757 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4758 if (*host_rt_dev_ptr != 0) {
4759 unlock_user((void *)*host_rt_dev_ptr,
4760 *target_rt_dev_ptr, 0);
4761 }
4762 return ret;
4763 }
4764
4765 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4766 int fd, int cmd, abi_long arg)
4767 {
4768 int sig = target_to_host_signal(arg);
4769 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4770 }
4771
4772 static IOCTLEntry ioctl_entries[] = {
4773 #define IOCTL(cmd, access, ...) \
4774 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4775 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4776 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4777 #include "ioctls.h"
4778 { 0, 0, },
4779 };
4780
4781 /* ??? Implement proper locking for ioctls. */
4782 /* do_ioctl() Must return target values and target errnos. */
4783 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4784 {
4785 const IOCTLEntry *ie;
4786 const argtype *arg_type;
4787 abi_long ret;
4788 uint8_t buf_temp[MAX_STRUCT_SIZE];
4789 int target_size;
4790 void *argptr;
4791
4792 ie = ioctl_entries;
4793 for(;;) {
4794 if (ie->target_cmd == 0) {
4795 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4796 return -TARGET_ENOSYS;
4797 }
4798 if (ie->target_cmd == cmd)
4799 break;
4800 ie++;
4801 }
4802 arg_type = ie->arg_type;
4803 #if defined(DEBUG)
4804 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
4805 #endif
4806 if (ie->do_ioctl) {
4807 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4808 }
4809
4810 switch(arg_type[0]) {
4811 case TYPE_NULL:
4812 /* no argument */
4813 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4814 break;
4815 case TYPE_PTRVOID:
4816 case TYPE_INT:
4817 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4818 break;
4819 case TYPE_PTR:
4820 arg_type++;
4821 target_size = thunk_type_size(arg_type, 0);
4822 switch(ie->access) {
4823 case IOC_R:
4824 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4825 if (!is_error(ret)) {
4826 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4827 if (!argptr)
4828 return -TARGET_EFAULT;
4829 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4830 unlock_user(argptr, arg, target_size);
4831 }
4832 break;
4833 case IOC_W:
4834 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4835 if (!argptr)
4836 return -TARGET_EFAULT;
4837 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4838 unlock_user(argptr, arg, 0);
4839 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4840 break;
4841 default:
4842 case IOC_RW:
4843 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4844 if (!argptr)
4845 return -TARGET_EFAULT;
4846 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4847 unlock_user(argptr, arg, 0);
4848 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4849 if (!is_error(ret)) {
4850 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4851 if (!argptr)
4852 return -TARGET_EFAULT;
4853 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4854 unlock_user(argptr, arg, target_size);
4855 }
4856 break;
4857 }
4858 break;
4859 default:
4860 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4861 (long)cmd, arg_type[0]);
4862 ret = -TARGET_ENOSYS;
4863 break;
4864 }
4865 return ret;
4866 }
4867
4868 static const bitmask_transtbl iflag_tbl[] = {
4869 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4870 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4871 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4872 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4873 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4874 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4875 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4876 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4877 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4878 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4879 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4880 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4881 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4882 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4883 { 0, 0, 0, 0 }
4884 };
4885
4886 static const bitmask_transtbl oflag_tbl[] = {
4887 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4888 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4889 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4890 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4891 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4892 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4893 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4894 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4895 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4896 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4897 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4898 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4899 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4900 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4901 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4902 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4903 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4904 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4905 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4906 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4907 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4908 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4909 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4910 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4911 { 0, 0, 0, 0 }
4912 };
4913
4914 static const bitmask_transtbl cflag_tbl[] = {
4915 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4916 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4917 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4918 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4919 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4920 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4921 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4922 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4923 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4924 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4925 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4926 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4927 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4928 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4929 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4930 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4931 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4932 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4933 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4934 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4935 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4936 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4937 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4938 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4939 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4940 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4941 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4942 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4943 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4944 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4945 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4946 { 0, 0, 0, 0 }
4947 };
4948
4949 static const bitmask_transtbl lflag_tbl[] = {
4950 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4951 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4952 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4953 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4954 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4955 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4956 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4957 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4958 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4959 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4960 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4961 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4962 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4963 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4964 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4965 { 0, 0, 0, 0 }
4966 };
4967
4968 static void target_to_host_termios (void *dst, const void *src)
4969 {
4970 struct host_termios *host = dst;
4971 const struct target_termios *target = src;
4972
4973 host->c_iflag =
4974 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4975 host->c_oflag =
4976 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4977 host->c_cflag =
4978 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4979 host->c_lflag =
4980 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4981 host->c_line = target->c_line;
4982
4983 memset(host->c_cc, 0, sizeof(host->c_cc));
4984 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4985 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4986 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4987 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4988 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4989 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4990 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4991 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4992 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4993 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4994 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4995 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4996 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4997 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4998 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4999 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5000 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5001 }
5002
5003 static void host_to_target_termios (void *dst, const void *src)
5004 {
5005 struct target_termios *target = dst;
5006 const struct host_termios *host = src;
5007
5008 target->c_iflag =
5009 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5010 target->c_oflag =
5011 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5012 target->c_cflag =
5013 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5014 target->c_lflag =
5015 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5016 target->c_line = host->c_line;
5017
5018 memset(target->c_cc, 0, sizeof(target->c_cc));
5019 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5020 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5021 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5022 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5023 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5024 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5025 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5026 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5027 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5028 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5029 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5030 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5031 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5032 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5033 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5034 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5035 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5036 }
5037
5038 static const StructEntry struct_termios_def = {
5039 .convert = { host_to_target_termios, target_to_host_termios },
5040 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5041 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5042 };
5043
5044 static bitmask_transtbl mmap_flags_tbl[] = {
5045 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5046 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5047 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5048 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5049 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5050 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5051 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5052 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5053 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5054 MAP_NORESERVE },
5055 { 0, 0, 0, 0 }
5056 };
5057
5058 #if defined(TARGET_I386)
5059
5060 /* NOTE: there is really one LDT for all the threads */
5061 static uint8_t *ldt_table;
5062
5063 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5064 {
5065 int size;
5066 void *p;
5067
5068 if (!ldt_table)
5069 return 0;
5070 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5071 if (size > bytecount)
5072 size = bytecount;
5073 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5074 if (!p)
5075 return -TARGET_EFAULT;
5076 /* ??? Should this by byteswapped? */
5077 memcpy(p, ldt_table, size);
5078 unlock_user(p, ptr, size);
5079 return size;
5080 }
5081
5082 /* XXX: add locking support */
5083 static abi_long write_ldt(CPUX86State *env,
5084 abi_ulong ptr, unsigned long bytecount, int oldmode)
5085 {
5086 struct target_modify_ldt_ldt_s ldt_info;
5087 struct target_modify_ldt_ldt_s *target_ldt_info;
5088 int seg_32bit, contents, read_exec_only, limit_in_pages;
5089 int seg_not_present, useable, lm;
5090 uint32_t *lp, entry_1, entry_2;
5091
5092 if (bytecount != sizeof(ldt_info))
5093 return -TARGET_EINVAL;
5094 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5095 return -TARGET_EFAULT;
5096 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5097 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5098 ldt_info.limit = tswap32(target_ldt_info->limit);
5099 ldt_info.flags = tswap32(target_ldt_info->flags);
5100 unlock_user_struct(target_ldt_info, ptr, 0);
5101
5102 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5103 return -TARGET_EINVAL;
5104 seg_32bit = ldt_info.flags & 1;
5105 contents = (ldt_info.flags >> 1) & 3;
5106 read_exec_only = (ldt_info.flags >> 3) & 1;
5107 limit_in_pages = (ldt_info.flags >> 4) & 1;
5108 seg_not_present = (ldt_info.flags >> 5) & 1;
5109 useable = (ldt_info.flags >> 6) & 1;
5110 #ifdef TARGET_ABI32
5111 lm = 0;
5112 #else
5113 lm = (ldt_info.flags >> 7) & 1;
5114 #endif
5115 if (contents == 3) {
5116 if (oldmode)
5117 return -TARGET_EINVAL;
5118 if (seg_not_present == 0)
5119 return -TARGET_EINVAL;
5120 }
5121 /* allocate the LDT */
5122 if (!ldt_table) {
5123 env->ldt.base = target_mmap(0,
5124 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5125 PROT_READ|PROT_WRITE,
5126 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5127 if (env->ldt.base == -1)
5128 return -TARGET_ENOMEM;
5129 memset(g2h(env->ldt.base), 0,
5130 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5131 env->ldt.limit = 0xffff;
5132 ldt_table = g2h(env->ldt.base);
5133 }
5134
5135 /* NOTE: same code as Linux kernel */
5136 /* Allow LDTs to be cleared by the user. */
5137 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5138 if (oldmode ||
5139 (contents == 0 &&
5140 read_exec_only == 1 &&
5141 seg_32bit == 0 &&
5142 limit_in_pages == 0 &&
5143 seg_not_present == 1 &&
5144 useable == 0 )) {
5145 entry_1 = 0;
5146 entry_2 = 0;
5147 goto install;
5148 }
5149 }
5150
5151 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5152 (ldt_info.limit & 0x0ffff);
5153 entry_2 = (ldt_info.base_addr & 0xff000000) |
5154 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5155 (ldt_info.limit & 0xf0000) |
5156 ((read_exec_only ^ 1) << 9) |
5157 (contents << 10) |
5158 ((seg_not_present ^ 1) << 15) |
5159 (seg_32bit << 22) |
5160 (limit_in_pages << 23) |
5161 (lm << 21) |
5162 0x7000;
5163 if (!oldmode)
5164 entry_2 |= (useable << 20);
5165
5166 /* Install the new entry ... */
5167 install:
5168 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5169 lp[0] = tswap32(entry_1);
5170 lp[1] = tswap32(entry_2);
5171 return 0;
5172 }
5173
5174 /* specific and weird i386 syscalls */
5175 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5176 unsigned long bytecount)
5177 {
5178 abi_long ret;
5179
5180 switch (func) {
5181 case 0:
5182 ret = read_ldt(ptr, bytecount);
5183 break;
5184 case 1:
5185 ret = write_ldt(env, ptr, bytecount, 1);
5186 break;
5187 case 0x11:
5188 ret = write_ldt(env, ptr, bytecount, 0);
5189 break;
5190 default:
5191 ret = -TARGET_ENOSYS;
5192 break;
5193 }
5194 return ret;
5195 }
5196
5197 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5198 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5199 {
5200 uint64_t *gdt_table = g2h(env->gdt.base);
5201 struct target_modify_ldt_ldt_s ldt_info;
5202 struct target_modify_ldt_ldt_s *target_ldt_info;
5203 int seg_32bit, contents, read_exec_only, limit_in_pages;
5204 int seg_not_present, useable, lm;
5205 uint32_t *lp, entry_1, entry_2;
5206 int i;
5207
5208 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5209 if (!target_ldt_info)
5210 return -TARGET_EFAULT;
5211 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5212 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5213 ldt_info.limit = tswap32(target_ldt_info->limit);
5214 ldt_info.flags = tswap32(target_ldt_info->flags);
5215 if (ldt_info.entry_number == -1) {
5216 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5217 if (gdt_table[i] == 0) {
5218 ldt_info.entry_number = i;
5219 target_ldt_info->entry_number = tswap32(i);
5220 break;
5221 }
5222 }
5223 }
5224 unlock_user_struct(target_ldt_info, ptr, 1);
5225
5226 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5227 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5228 return -TARGET_EINVAL;
5229 seg_32bit = ldt_info.flags & 1;
5230 contents = (ldt_info.flags >> 1) & 3;
5231 read_exec_only = (ldt_info.flags >> 3) & 1;
5232 limit_in_pages = (ldt_info.flags >> 4) & 1;
5233 seg_not_present = (ldt_info.flags >> 5) & 1;
5234 useable = (ldt_info.flags >> 6) & 1;
5235 #ifdef TARGET_ABI32
5236 lm = 0;
5237 #else
5238 lm = (ldt_info.flags >> 7) & 1;
5239 #endif
5240
5241 if (contents == 3) {
5242 if (seg_not_present == 0)
5243 return -TARGET_EINVAL;
5244 }
5245
5246 /* NOTE: same code as Linux kernel */
5247 /* Allow LDTs to be cleared by the user. */
5248 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5249 if ((contents == 0 &&
5250 read_exec_only == 1 &&
5251 seg_32bit == 0 &&
5252 limit_in_pages == 0 &&
5253 seg_not_present == 1 &&
5254 useable == 0 )) {
5255 entry_1 = 0;
5256 entry_2 = 0;
5257 goto install;
5258 }
5259 }
5260
5261 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5262 (ldt_info.limit & 0x0ffff);
5263 entry_2 = (ldt_info.base_addr & 0xff000000) |
5264 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5265 (ldt_info.limit & 0xf0000) |
5266 ((read_exec_only ^ 1) << 9) |
5267 (contents << 10) |
5268 ((seg_not_present ^ 1) << 15) |
5269 (seg_32bit << 22) |
5270 (limit_in_pages << 23) |
5271 (useable << 20) |
5272 (lm << 21) |
5273 0x7000;
5274
5275 /* Install the new entry ... */
5276 install:
5277 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5278 lp[0] = tswap32(entry_1);
5279 lp[1] = tswap32(entry_2);
5280 return 0;
5281 }
5282
5283 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5284 {
5285 struct target_modify_ldt_ldt_s *target_ldt_info;
5286 uint64_t *gdt_table = g2h(env->gdt.base);
5287 uint32_t base_addr, limit, flags;
5288 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5289 int seg_not_present, useable, lm;
5290 uint32_t *lp, entry_1, entry_2;
5291
5292 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5293 if (!target_ldt_info)
5294 return -TARGET_EFAULT;
5295 idx = tswap32(target_ldt_info->entry_number);
5296 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5297 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5298 unlock_user_struct(target_ldt_info, ptr, 1);
5299 return -TARGET_EINVAL;
5300 }
5301 lp = (uint32_t *)(gdt_table + idx);
5302 entry_1 = tswap32(lp[0]);
5303 entry_2 = tswap32(lp[1]);
5304
5305 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5306 contents = (entry_2 >> 10) & 3;
5307 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5308 seg_32bit = (entry_2 >> 22) & 1;
5309 limit_in_pages = (entry_2 >> 23) & 1;
5310 useable = (entry_2 >> 20) & 1;
5311 #ifdef TARGET_ABI32
5312 lm = 0;
5313 #else
5314 lm = (entry_2 >> 21) & 1;
5315 #endif
5316 flags = (seg_32bit << 0) | (contents << 1) |
5317 (read_exec_only << 3) | (limit_in_pages << 4) |
5318 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5319 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5320 base_addr = (entry_1 >> 16) |
5321 (entry_2 & 0xff000000) |
5322 ((entry_2 & 0xff) << 16);
5323 target_ldt_info->base_addr = tswapal(base_addr);
5324 target_ldt_info->limit = tswap32(limit);
5325 target_ldt_info->flags = tswap32(flags);
5326 unlock_user_struct(target_ldt_info, ptr, 1);
5327 return 0;
5328 }
5329 #endif /* TARGET_I386 && TARGET_ABI32 */
5330
5331 #ifndef TARGET_ABI32
5332 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5333 {
5334 abi_long ret = 0;
5335 abi_ulong val;
5336 int idx;
5337
5338 switch(code) {
5339 case TARGET_ARCH_SET_GS:
5340 case TARGET_ARCH_SET_FS:
5341 if (code == TARGET_ARCH_SET_GS)
5342 idx = R_GS;
5343 else
5344 idx = R_FS;
5345 cpu_x86_load_seg(env, idx, 0);
5346 env->segs[idx].base = addr;
5347 break;
5348 case TARGET_ARCH_GET_GS:
5349 case TARGET_ARCH_GET_FS:
5350 if (code == TARGET_ARCH_GET_GS)
5351 idx = R_GS;
5352 else
5353 idx = R_FS;
5354 val = env->segs[idx].base;
5355 if (put_user(val, addr, abi_ulong))
5356 ret = -TARGET_EFAULT;
5357 break;
5358 default:
5359 ret = -TARGET_EINVAL;
5360 break;
5361 }
5362 return ret;
5363 }
5364 #endif
5365
5366 #endif /* defined(TARGET_I386) */
5367
5368 #define NEW_STACK_SIZE 0x40000
5369
5370
5371 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5372 typedef struct {
5373 CPUArchState *env;
5374 pthread_mutex_t mutex;
5375 pthread_cond_t cond;
5376 pthread_t thread;
5377 uint32_t tid;
5378 abi_ulong child_tidptr;
5379 abi_ulong parent_tidptr;
5380 sigset_t sigmask;
5381 } new_thread_info;
5382
5383 static void *clone_func(void *arg)
5384 {
5385 new_thread_info *info = arg;
5386 CPUArchState *env;
5387 CPUState *cpu;
5388 TaskState *ts;
5389
5390 rcu_register_thread();
5391 env = info->env;
5392 cpu = ENV_GET_CPU(env);
5393 thread_cpu = cpu;
5394 ts = (TaskState *)cpu->opaque;
5395 info->tid = gettid();
5396 cpu->host_tid = info->tid;
5397 task_settid(ts);
5398 if (info->child_tidptr)
5399 put_user_u32(info->tid, info->child_tidptr);
5400 if (info->parent_tidptr)
5401 put_user_u32(info->tid, info->parent_tidptr);
5402 /* Enable signals. */
5403 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5404 /* Signal to the parent that we're ready. */
5405 pthread_mutex_lock(&info->mutex);
5406 pthread_cond_broadcast(&info->cond);
5407 pthread_mutex_unlock(&info->mutex);
5408 /* Wait until the parent has finshed initializing the tls state. */
5409 pthread_mutex_lock(&clone_lock);
5410 pthread_mutex_unlock(&clone_lock);
5411 cpu_loop(env);
5412 /* never exits */
5413 return NULL;
5414 }
5415
5416 /* do_fork() Must return host values and target errnos (unlike most
5417 do_*() functions). */
5418 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5419 abi_ulong parent_tidptr, target_ulong newtls,
5420 abi_ulong child_tidptr)
5421 {
5422 CPUState *cpu = ENV_GET_CPU(env);
5423 int ret;
5424 TaskState *ts;
5425 CPUState *new_cpu;
5426 CPUArchState *new_env;
5427 unsigned int nptl_flags;
5428 sigset_t sigmask;
5429
5430 /* Emulate vfork() with fork() */
5431 if (flags & CLONE_VFORK)
5432 flags &= ~(CLONE_VFORK | CLONE_VM);
5433
5434 if (flags & CLONE_VM) {
5435 TaskState *parent_ts = (TaskState *)cpu->opaque;
5436 new_thread_info info;
5437 pthread_attr_t attr;
5438
5439 ts = g_new0(TaskState, 1);
5440 init_task_state(ts);
5441 /* we create a new CPU instance. */
5442 new_env = cpu_copy(env);
5443 /* Init regs that differ from the parent. */
5444 cpu_clone_regs(new_env, newsp);
5445 new_cpu = ENV_GET_CPU(new_env);
5446 new_cpu->opaque = ts;
5447 ts->bprm = parent_ts->bprm;
5448 ts->info = parent_ts->info;
5449 ts->signal_mask = parent_ts->signal_mask;
5450 nptl_flags = flags;
5451 flags &= ~CLONE_NPTL_FLAGS2;
5452
5453 if (nptl_flags & CLONE_CHILD_CLEARTID) {
5454 ts->child_tidptr = child_tidptr;
5455 }
5456
5457 if (nptl_flags & CLONE_SETTLS)
5458 cpu_set_tls (new_env, newtls);
5459
5460 /* Grab a mutex so that thread setup appears atomic. */
5461 pthread_mutex_lock(&clone_lock);
5462
5463 memset(&info, 0, sizeof(info));
5464 pthread_mutex_init(&info.mutex, NULL);
5465 pthread_mutex_lock(&info.mutex);
5466 pthread_cond_init(&info.cond, NULL);
5467 info.env = new_env;
5468 if (nptl_flags & CLONE_CHILD_SETTID)
5469 info.child_tidptr = child_tidptr;
5470 if (nptl_flags & CLONE_PARENT_SETTID)
5471 info.parent_tidptr = parent_tidptr;
5472
5473 ret = pthread_attr_init(&attr);
5474 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5475 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5476 /* It is not safe to deliver signals until the child has finished
5477 initializing, so temporarily block all signals. */
5478 sigfillset(&sigmask);
5479 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5480
5481 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5482 /* TODO: Free new CPU state if thread creation failed. */
5483
5484 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5485 pthread_attr_destroy(&attr);
5486 if (ret == 0) {
5487 /* Wait for the child to initialize. */
5488 pthread_cond_wait(&info.cond, &info.mutex);
5489 ret = info.tid;
5490 if (flags & CLONE_PARENT_SETTID)
5491 put_user_u32(ret, parent_tidptr);
5492 } else {
5493 ret = -1;
5494 }
5495 pthread_mutex_unlock(&info.mutex);
5496 pthread_cond_destroy(&info.cond);
5497 pthread_mutex_destroy(&info.mutex);
5498 pthread_mutex_unlock(&clone_lock);
5499 } else {
5500 /* if no CLONE_VM, we consider it is a fork */
5501 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
5502 return -TARGET_EINVAL;
5503 }
5504
5505 if (block_signals()) {
5506 return -TARGET_ERESTARTSYS;
5507 }
5508
5509 fork_start();
5510 ret = fork();
5511 if (ret == 0) {
5512 /* Child Process. */
5513 rcu_after_fork();
5514 cpu_clone_regs(env, newsp);
5515 fork_end(1);
5516 /* There is a race condition here. The parent process could
5517 theoretically read the TID in the child process before the child
5518 tid is set. This would require using either ptrace
5519 (not implemented) or having *_tidptr to point at a shared memory
5520 mapping. We can't repeat the spinlock hack used above because
5521 the child process gets its own copy of the lock. */
5522 if (flags & CLONE_CHILD_SETTID)
5523 put_user_u32(gettid(), child_tidptr);
5524 if (flags & CLONE_PARENT_SETTID)
5525 put_user_u32(gettid(), parent_tidptr);
5526 ts = (TaskState *)cpu->opaque;
5527 if (flags & CLONE_SETTLS)
5528 cpu_set_tls (env, newtls);
5529 if (flags & CLONE_CHILD_CLEARTID)
5530 ts->child_tidptr = child_tidptr;
5531 } else {
5532 fork_end(0);
5533 }
5534 }
5535 return ret;
5536 }
5537
5538 /* warning : doesn't handle linux specific flags... */
5539 static int target_to_host_fcntl_cmd(int cmd)
5540 {
5541 switch(cmd) {
5542 case TARGET_F_DUPFD:
5543 case TARGET_F_GETFD:
5544 case TARGET_F_SETFD:
5545 case TARGET_F_GETFL:
5546 case TARGET_F_SETFL:
5547 return cmd;
5548 case TARGET_F_GETLK:
5549 return F_GETLK64;
5550 case TARGET_F_SETLK:
5551 return F_SETLK64;
5552 case TARGET_F_SETLKW:
5553 return F_SETLKW64;
5554 case TARGET_F_GETOWN:
5555 return F_GETOWN;
5556 case TARGET_F_SETOWN:
5557 return F_SETOWN;
5558 case TARGET_F_GETSIG:
5559 return F_GETSIG;
5560 case TARGET_F_SETSIG:
5561 return F_SETSIG;
5562 #if TARGET_ABI_BITS == 32
5563 case TARGET_F_GETLK64:
5564 return F_GETLK64;
5565 case TARGET_F_SETLK64:
5566 return F_SETLK64;
5567 case TARGET_F_SETLKW64:
5568 return F_SETLKW64;
5569 #endif
5570 case TARGET_F_SETLEASE:
5571 return F_SETLEASE;
5572 case TARGET_F_GETLEASE:
5573 return F_GETLEASE;
5574 #ifdef F_DUPFD_CLOEXEC
5575 case TARGET_F_DUPFD_CLOEXEC:
5576 return F_DUPFD_CLOEXEC;
5577 #endif
5578 case TARGET_F_NOTIFY:
5579 return F_NOTIFY;
5580 #ifdef F_GETOWN_EX
5581 case TARGET_F_GETOWN_EX:
5582 return F_GETOWN_EX;
5583 #endif
5584 #ifdef F_SETOWN_EX
5585 case TARGET_F_SETOWN_EX:
5586 return F_SETOWN_EX;
5587 #endif
5588 default:
5589 return -TARGET_EINVAL;
5590 }
5591 return -TARGET_EINVAL;
5592 }
5593
5594 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5595 static const bitmask_transtbl flock_tbl[] = {
5596 TRANSTBL_CONVERT(F_RDLCK),
5597 TRANSTBL_CONVERT(F_WRLCK),
5598 TRANSTBL_CONVERT(F_UNLCK),
5599 TRANSTBL_CONVERT(F_EXLCK),
5600 TRANSTBL_CONVERT(F_SHLCK),
5601 { 0, 0, 0, 0 }
5602 };
5603
5604 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5605 abi_ulong target_flock_addr)
5606 {
5607 struct target_flock *target_fl;
5608 short l_type;
5609
5610 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5611 return -TARGET_EFAULT;
5612 }
5613
5614 __get_user(l_type, &target_fl->l_type);
5615 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
5616 __get_user(fl->l_whence, &target_fl->l_whence);
5617 __get_user(fl->l_start, &target_fl->l_start);
5618 __get_user(fl->l_len, &target_fl->l_len);
5619 __get_user(fl->l_pid, &target_fl->l_pid);
5620 unlock_user_struct(target_fl, target_flock_addr, 0);
5621 return 0;
5622 }
5623
5624 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5625 const struct flock64 *fl)
5626 {
5627 struct target_flock *target_fl;
5628 short l_type;
5629
5630 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5631 return -TARGET_EFAULT;
5632 }
5633
5634 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
5635 __put_user(l_type, &target_fl->l_type);
5636 __put_user(fl->l_whence, &target_fl->l_whence);
5637 __put_user(fl->l_start, &target_fl->l_start);
5638 __put_user(fl->l_len, &target_fl->l_len);
5639 __put_user(fl->l_pid, &target_fl->l_pid);
5640 unlock_user_struct(target_fl, target_flock_addr, 1);
5641 return 0;
5642 }
5643
5644 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5645 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5646
5647 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5648 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
5649 abi_ulong target_flock_addr)
5650 {
5651 struct target_eabi_flock64 *target_fl;
5652 short l_type;
5653
5654 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5655 return -TARGET_EFAULT;
5656 }
5657
5658 __get_user(l_type, &target_fl->l_type);
5659 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
5660 __get_user(fl->l_whence, &target_fl->l_whence);
5661 __get_user(fl->l_start, &target_fl->l_start);
5662 __get_user(fl->l_len, &target_fl->l_len);
5663 __get_user(fl->l_pid, &target_fl->l_pid);
5664 unlock_user_struct(target_fl, target_flock_addr, 0);
5665 return 0;
5666 }
5667
5668 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
5669 const struct flock64 *fl)
5670 {
5671 struct target_eabi_flock64 *target_fl;
5672 short l_type;
5673
5674 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5675 return -TARGET_EFAULT;
5676 }
5677
5678 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
5679 __put_user(l_type, &target_fl->l_type);
5680 __put_user(fl->l_whence, &target_fl->l_whence);
5681 __put_user(fl->l_start, &target_fl->l_start);
5682 __put_user(fl->l_len, &target_fl->l_len);
5683 __put_user(fl->l_pid, &target_fl->l_pid);
5684 unlock_user_struct(target_fl, target_flock_addr, 1);
5685 return 0;
5686 }
5687 #endif
5688
5689 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5690 abi_ulong target_flock_addr)
5691 {
5692 struct target_flock64 *target_fl;
5693 short l_type;
5694
5695 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5696 return -TARGET_EFAULT;
5697 }
5698
5699 __get_user(l_type, &target_fl->l_type);
5700 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
5701 __get_user(fl->l_whence, &target_fl->l_whence);
5702 __get_user(fl->l_start, &target_fl->l_start);
5703 __get_user(fl->l_len, &target_fl->l_len);
5704 __get_user(fl->l_pid, &target_fl->l_pid);
5705 unlock_user_struct(target_fl, target_flock_addr, 0);
5706 return 0;
5707 }
5708
5709 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5710 const struct flock64 *fl)
5711 {
5712 struct target_flock64 *target_fl;
5713 short l_type;
5714
5715 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5716 return -TARGET_EFAULT;
5717 }
5718
5719 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
5720 __put_user(l_type, &target_fl->l_type);
5721 __put_user(fl->l_whence, &target_fl->l_whence);
5722 __put_user(fl->l_start, &target_fl->l_start);
5723 __put_user(fl->l_len, &target_fl->l_len);
5724 __put_user(fl->l_pid, &target_fl->l_pid);
5725 unlock_user_struct(target_fl, target_flock_addr, 1);
5726 return 0;
5727 }
5728
5729 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5730 {
5731 struct flock64 fl64;
5732 #ifdef F_GETOWN_EX
5733 struct f_owner_ex fox;
5734 struct target_f_owner_ex *target_fox;
5735 #endif
5736 abi_long ret;
5737 int host_cmd = target_to_host_fcntl_cmd(cmd);
5738
5739 if (host_cmd == -TARGET_EINVAL)
5740 return host_cmd;
5741
5742 switch(cmd) {
5743 case TARGET_F_GETLK:
5744 ret = copy_from_user_flock(&fl64, arg);
5745 if (ret) {
5746 return ret;
5747 }
5748 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5749 if (ret == 0) {
5750 ret = copy_to_user_flock(arg, &fl64);
5751 }
5752 break;
5753
5754 case TARGET_F_SETLK:
5755 case TARGET_F_SETLKW:
5756 ret = copy_from_user_flock(&fl64, arg);
5757 if (ret) {
5758 return ret;
5759 }
5760 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5761 break;
5762
5763 case TARGET_F_GETLK64:
5764 ret = copy_from_user_flock64(&fl64, arg);
5765 if (ret) {
5766 return ret;
5767 }
5768 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5769 if (ret == 0) {
5770 ret = copy_to_user_flock64(arg, &fl64);
5771 }
5772 break;
5773 case TARGET_F_SETLK64:
5774 case TARGET_F_SETLKW64:
5775 ret = copy_from_user_flock64(&fl64, arg);
5776 if (ret) {
5777 return ret;
5778 }
5779 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5780 break;
5781
5782 case TARGET_F_GETFL:
5783 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5784 if (ret >= 0) {
5785 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5786 }
5787 break;
5788
5789 case TARGET_F_SETFL:
5790 ret = get_errno(safe_fcntl(fd, host_cmd,
5791 target_to_host_bitmask(arg,
5792 fcntl_flags_tbl)));
5793 break;
5794
5795 #ifdef F_GETOWN_EX
5796 case TARGET_F_GETOWN_EX:
5797 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5798 if (ret >= 0) {
5799 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5800 return -TARGET_EFAULT;
5801 target_fox->type = tswap32(fox.type);
5802 target_fox->pid = tswap32(fox.pid);
5803 unlock_user_struct(target_fox, arg, 1);
5804 }
5805 break;
5806 #endif
5807
5808 #ifdef F_SETOWN_EX
5809 case TARGET_F_SETOWN_EX:
5810 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5811 return -TARGET_EFAULT;
5812 fox.type = tswap32(target_fox->type);
5813 fox.pid = tswap32(target_fox->pid);
5814 unlock_user_struct(target_fox, arg, 0);
5815 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5816 break;
5817 #endif
5818
5819 case TARGET_F_SETOWN:
5820 case TARGET_F_GETOWN:
5821 case TARGET_F_SETSIG:
5822 case TARGET_F_GETSIG:
5823 case TARGET_F_SETLEASE:
5824 case TARGET_F_GETLEASE:
5825 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5826 break;
5827
5828 default:
5829 ret = get_errno(safe_fcntl(fd, cmd, arg));
5830 break;
5831 }
5832 return ret;
5833 }
5834
5835 #ifdef USE_UID16
5836
5837 static inline int high2lowuid(int uid)
5838 {
5839 if (uid > 65535)
5840 return 65534;
5841 else
5842 return uid;
5843 }
5844
5845 static inline int high2lowgid(int gid)
5846 {
5847 if (gid > 65535)
5848 return 65534;
5849 else
5850 return gid;
5851 }
5852
5853 static inline int low2highuid(int uid)
5854 {
5855 if ((int16_t)uid == -1)
5856 return -1;
5857 else
5858 return uid;
5859 }
5860
5861 static inline int low2highgid(int gid)
5862 {
5863 if ((int16_t)gid == -1)
5864 return -1;
5865 else
5866 return gid;
5867 }
5868 static inline int tswapid(int id)
5869 {
5870 return tswap16(id);
5871 }
5872
5873 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5874
5875 #else /* !USE_UID16 */
5876 static inline int high2lowuid(int uid)
5877 {
5878 return uid;
5879 }
5880 static inline int high2lowgid(int gid)
5881 {
5882 return gid;
5883 }
5884 static inline int low2highuid(int uid)
5885 {
5886 return uid;
5887 }
5888 static inline int low2highgid(int gid)
5889 {
5890 return gid;
5891 }
5892 static inline int tswapid(int id)
5893 {
5894 return tswap32(id);
5895 }
5896
5897 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5898
5899 #endif /* USE_UID16 */
5900
5901 /* We must do direct syscalls for setting UID/GID, because we want to
5902 * implement the Linux system call semantics of "change only for this thread",
5903 * not the libc/POSIX semantics of "change for all threads in process".
5904 * (See http://ewontfix.com/17/ for more details.)
5905 * We use the 32-bit version of the syscalls if present; if it is not
5906 * then either the host architecture supports 32-bit UIDs natively with
5907 * the standard syscall, or the 16-bit UID is the best we can do.
5908 */
5909 #ifdef __NR_setuid32
5910 #define __NR_sys_setuid __NR_setuid32
5911 #else
5912 #define __NR_sys_setuid __NR_setuid
5913 #endif
5914 #ifdef __NR_setgid32
5915 #define __NR_sys_setgid __NR_setgid32
5916 #else
5917 #define __NR_sys_setgid __NR_setgid
5918 #endif
5919 #ifdef __NR_setresuid32
5920 #define __NR_sys_setresuid __NR_setresuid32
5921 #else
5922 #define __NR_sys_setresuid __NR_setresuid
5923 #endif
5924 #ifdef __NR_setresgid32
5925 #define __NR_sys_setresgid __NR_setresgid32
5926 #else
5927 #define __NR_sys_setresgid __NR_setresgid
5928 #endif
5929
5930 _syscall1(int, sys_setuid, uid_t, uid)
5931 _syscall1(int, sys_setgid, gid_t, gid)
5932 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
5933 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
5934
5935 void syscall_init(void)
5936 {
5937 IOCTLEntry *ie;
5938 const argtype *arg_type;
5939 int size;
5940 int i;
5941
5942 thunk_init(STRUCT_MAX);
5943
5944 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5945 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5946 #include "syscall_types.h"
5947 #undef STRUCT
5948 #undef STRUCT_SPECIAL
5949
5950 /* Build target_to_host_errno_table[] table from
5951 * host_to_target_errno_table[]. */
5952 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5953 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5954 }
5955
5956 /* we patch the ioctl size if necessary. We rely on the fact that
5957 no ioctl has all the bits at '1' in the size field */
5958 ie = ioctl_entries;
5959 while (ie->target_cmd != 0) {
5960 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5961 TARGET_IOC_SIZEMASK) {
5962 arg_type = ie->arg_type;
5963 if (arg_type[0] != TYPE_PTR) {
5964 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5965 ie->target_cmd);
5966 exit(1);
5967 }
5968 arg_type++;
5969 size = thunk_type_size(arg_type, 0);
5970 ie->target_cmd = (ie->target_cmd &
5971 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5972 (size << TARGET_IOC_SIZESHIFT);
5973 }
5974
5975 /* automatic consistency check if same arch */
5976 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5977 (defined(__x86_64__) && defined(TARGET_X86_64))
5978 if (unlikely(ie->target_cmd != ie->host_cmd)) {
5979 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5980 ie->name, ie->target_cmd, ie->host_cmd);
5981 }
5982 #endif
5983 ie++;
5984 }
5985 }
5986
5987 #if TARGET_ABI_BITS == 32
5988 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5989 {
5990 #ifdef TARGET_WORDS_BIGENDIAN
5991 return ((uint64_t)word0 << 32) | word1;
5992 #else
5993 return ((uint64_t)word1 << 32) | word0;
5994 #endif
5995 }
5996 #else /* TARGET_ABI_BITS == 32 */
5997 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5998 {
5999 return word0;
6000 }
6001 #endif /* TARGET_ABI_BITS != 32 */
6002
6003 #ifdef TARGET_NR_truncate64
6004 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6005 abi_long arg2,
6006 abi_long arg3,
6007 abi_long arg4)
6008 {
6009 if (regpairs_aligned(cpu_env)) {
6010 arg2 = arg3;
6011 arg3 = arg4;
6012 }
6013 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6014 }
6015 #endif
6016
6017 #ifdef TARGET_NR_ftruncate64
6018 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6019 abi_long arg2,
6020 abi_long arg3,
6021 abi_long arg4)
6022 {
6023 if (regpairs_aligned(cpu_env)) {
6024 arg2 = arg3;
6025 arg3 = arg4;
6026 }
6027 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6028 }
6029 #endif
6030
6031 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6032 abi_ulong target_addr)
6033 {
6034 struct target_timespec *target_ts;
6035
6036 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6037 return -TARGET_EFAULT;
6038 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6039 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6040 unlock_user_struct(target_ts, target_addr, 0);
6041 return 0;
6042 }
6043
6044 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6045 struct timespec *host_ts)
6046 {
6047 struct target_timespec *target_ts;
6048
6049 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6050 return -TARGET_EFAULT;
6051 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6052 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6053 unlock_user_struct(target_ts, target_addr, 1);
6054 return 0;
6055 }
6056
6057 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6058 abi_ulong target_addr)
6059 {
6060 struct target_itimerspec *target_itspec;
6061
6062 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6063 return -TARGET_EFAULT;
6064 }
6065
6066 host_itspec->it_interval.tv_sec =
6067 tswapal(target_itspec->it_interval.tv_sec);
6068 host_itspec->it_interval.tv_nsec =
6069 tswapal(target_itspec->it_interval.tv_nsec);
6070 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6071 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6072
6073 unlock_user_struct(target_itspec, target_addr, 1);
6074 return 0;
6075 }
6076
6077 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6078 struct itimerspec *host_its)
6079 {
6080 struct target_itimerspec *target_itspec;
6081
6082 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6083 return -TARGET_EFAULT;
6084 }
6085
6086 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6087 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6088
6089 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6090 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6091
6092 unlock_user_struct(target_itspec, target_addr, 0);
6093 return 0;
6094 }
6095
6096 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6097 abi_ulong target_addr)
6098 {
6099 struct target_sigevent *target_sevp;
6100
6101 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6102 return -TARGET_EFAULT;
6103 }
6104
6105 /* This union is awkward on 64 bit systems because it has a 32 bit
6106 * integer and a pointer in it; we follow the conversion approach
6107 * used for handling sigval types in signal.c so the guest should get
6108 * the correct value back even if we did a 64 bit byteswap and it's
6109 * using the 32 bit integer.
6110 */
6111 host_sevp->sigev_value.sival_ptr =
6112 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6113 host_sevp->sigev_signo =
6114 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6115 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6116 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6117
6118 unlock_user_struct(target_sevp, target_addr, 1);
6119 return 0;
6120 }
6121
6122 #if defined(TARGET_NR_mlockall)
6123 static inline int target_to_host_mlockall_arg(int arg)
6124 {
6125 int result = 0;
6126
6127 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6128 result |= MCL_CURRENT;
6129 }
6130 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6131 result |= MCL_FUTURE;
6132 }
6133 return result;
6134 }
6135 #endif
6136
6137 static inline abi_long host_to_target_stat64(void *cpu_env,
6138 abi_ulong target_addr,
6139 struct stat *host_st)
6140 {
6141 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6142 if (((CPUARMState *)cpu_env)->eabi) {
6143 struct target_eabi_stat64 *target_st;
6144
6145 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6146 return -TARGET_EFAULT;
6147 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6148 __put_user(host_st->st_dev, &target_st->st_dev);
6149 __put_user(host_st->st_ino, &target_st->st_ino);
6150 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6151 __put_user(host_st->st_ino, &target_st->__st_ino);
6152 #endif
6153 __put_user(host_st->st_mode, &target_st->st_mode);
6154 __put_user(host_st->st_nlink, &target_st->st_nlink);
6155 __put_user(host_st->st_uid, &target_st->st_uid);
6156 __put_user(host_st->st_gid, &target_st->st_gid);
6157 __put_user(host_st->st_rdev, &target_st->st_rdev);
6158 __put_user(host_st->st_size, &target_st->st_size);
6159 __put_user(host_st->st_blksize, &target_st->st_blksize);
6160 __put_user(host_st->st_blocks, &target_st->st_blocks);
6161 __put_user(host_st->st_atime, &target_st->target_st_atime);
6162 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6163 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6164 unlock_user_struct(target_st, target_addr, 1);
6165 } else
6166 #endif
6167 {
6168 #if defined(TARGET_HAS_STRUCT_STAT64)
6169 struct target_stat64 *target_st;
6170 #else
6171 struct target_stat *target_st;
6172 #endif
6173
6174 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6175 return -TARGET_EFAULT;
6176 memset(target_st, 0, sizeof(*target_st));
6177 __put_user(host_st->st_dev, &target_st->st_dev);
6178 __put_user(host_st->st_ino, &target_st->st_ino);
6179 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6180 __put_user(host_st->st_ino, &target_st->__st_ino);
6181 #endif
6182 __put_user(host_st->st_mode, &target_st->st_mode);
6183 __put_user(host_st->st_nlink, &target_st->st_nlink);
6184 __put_user(host_st->st_uid, &target_st->st_uid);
6185 __put_user(host_st->st_gid, &target_st->st_gid);
6186 __put_user(host_st->st_rdev, &target_st->st_rdev);
6187 /* XXX: better use of kernel struct */
6188 __put_user(host_st->st_size, &target_st->st_size);
6189 __put_user(host_st->st_blksize, &target_st->st_blksize);
6190 __put_user(host_st->st_blocks, &target_st->st_blocks);
6191 __put_user(host_st->st_atime, &target_st->target_st_atime);
6192 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6193 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6194 unlock_user_struct(target_st, target_addr, 1);
6195 }
6196
6197 return 0;
6198 }
6199
6200 /* ??? Using host futex calls even when target atomic operations
6201 are not really atomic probably breaks things. However implementing
6202 futexes locally would make futexes shared between multiple processes
6203 tricky. However they're probably useless because guest atomic
6204 operations won't work either. */
6205 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6206 target_ulong uaddr2, int val3)
6207 {
6208 struct timespec ts, *pts;
6209 int base_op;
6210
6211 /* ??? We assume FUTEX_* constants are the same on both host
6212 and target. */
6213 #ifdef FUTEX_CMD_MASK
6214 base_op = op & FUTEX_CMD_MASK;
6215 #else
6216 base_op = op;
6217 #endif
6218 switch (base_op) {
6219 case FUTEX_WAIT:
6220 case FUTEX_WAIT_BITSET:
6221 if (timeout) {
6222 pts = &ts;
6223 target_to_host_timespec(pts, timeout);
6224 } else {
6225 pts = NULL;
6226 }
6227 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6228 pts, NULL, val3));
6229 case FUTEX_WAKE:
6230 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6231 case FUTEX_FD:
6232 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6233 case FUTEX_REQUEUE:
6234 case FUTEX_CMP_REQUEUE:
6235 case FUTEX_WAKE_OP:
6236 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6237 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6238 But the prototype takes a `struct timespec *'; insert casts
6239 to satisfy the compiler. We do not need to tswap TIMEOUT
6240 since it's not compared to guest memory. */
6241 pts = (struct timespec *)(uintptr_t) timeout;
6242 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6243 g2h(uaddr2),
6244 (base_op == FUTEX_CMP_REQUEUE
6245 ? tswap32(val3)
6246 : val3)));
6247 default:
6248 return -TARGET_ENOSYS;
6249 }
6250 }
6251 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6252 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6253 abi_long handle, abi_long mount_id,
6254 abi_long flags)
6255 {
6256 struct file_handle *target_fh;
6257 struct file_handle *fh;
6258 int mid = 0;
6259 abi_long ret;
6260 char *name;
6261 unsigned int size, total_size;
6262
6263 if (get_user_s32(size, handle)) {
6264 return -TARGET_EFAULT;
6265 }
6266
6267 name = lock_user_string(pathname);
6268 if (!name) {
6269 return -TARGET_EFAULT;
6270 }
6271
6272 total_size = sizeof(struct file_handle) + size;
6273 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6274 if (!target_fh) {
6275 unlock_user(name, pathname, 0);
6276 return -TARGET_EFAULT;
6277 }
6278
6279 fh = g_malloc0(total_size);
6280 fh->handle_bytes = size;
6281
6282 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6283 unlock_user(name, pathname, 0);
6284
6285 /* man name_to_handle_at(2):
6286 * Other than the use of the handle_bytes field, the caller should treat
6287 * the file_handle structure as an opaque data type
6288 */
6289
6290 memcpy(target_fh, fh, total_size);
6291 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6292 target_fh->handle_type = tswap32(fh->handle_type);
6293 g_free(fh);
6294 unlock_user(target_fh, handle, total_size);
6295
6296 if (put_user_s32(mid, mount_id)) {
6297 return -TARGET_EFAULT;
6298 }
6299
6300 return ret;
6301
6302 }
6303 #endif
6304
6305 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6306 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6307 abi_long flags)
6308 {
6309 struct file_handle *target_fh;
6310 struct file_handle *fh;
6311 unsigned int size, total_size;
6312 abi_long ret;
6313
6314 if (get_user_s32(size, handle)) {
6315 return -TARGET_EFAULT;
6316 }
6317
6318 total_size = sizeof(struct file_handle) + size;
6319 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6320 if (!target_fh) {
6321 return -TARGET_EFAULT;
6322 }
6323
6324 fh = g_memdup(target_fh, total_size);
6325 fh->handle_bytes = size;
6326 fh->handle_type = tswap32(target_fh->handle_type);
6327
6328 ret = get_errno(open_by_handle_at(mount_fd, fh,
6329 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6330
6331 g_free(fh);
6332
6333 unlock_user(target_fh, handle, total_size);
6334
6335 return ret;
6336 }
6337 #endif
6338
6339 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6340
6341 /* signalfd siginfo conversion */
6342
6343 static void
6344 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
6345 const struct signalfd_siginfo *info)
6346 {
6347 int sig = host_to_target_signal(info->ssi_signo);
6348
6349 /* linux/signalfd.h defines a ssi_addr_lsb
6350 * not defined in sys/signalfd.h but used by some kernels
6351 */
6352
6353 #ifdef BUS_MCEERR_AO
6354 if (tinfo->ssi_signo == SIGBUS &&
6355 (tinfo->ssi_code == BUS_MCEERR_AR ||
6356 tinfo->ssi_code == BUS_MCEERR_AO)) {
6357 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
6358 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
6359 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
6360 }
6361 #endif
6362
6363 tinfo->ssi_signo = tswap32(sig);
6364 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
6365 tinfo->ssi_code = tswap32(info->ssi_code);
6366 tinfo->ssi_pid = tswap32(info->ssi_pid);
6367 tinfo->ssi_uid = tswap32(info->ssi_uid);
6368 tinfo->ssi_fd = tswap32(info->ssi_fd);
6369 tinfo->ssi_tid = tswap32(info->ssi_tid);
6370 tinfo->ssi_band = tswap32(info->ssi_band);
6371 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
6372 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
6373 tinfo->ssi_status = tswap32(info->ssi_status);
6374 tinfo->ssi_int = tswap32(info->ssi_int);
6375 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
6376 tinfo->ssi_utime = tswap64(info->ssi_utime);
6377 tinfo->ssi_stime = tswap64(info->ssi_stime);
6378 tinfo->ssi_addr = tswap64(info->ssi_addr);
6379 }
6380
6381 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
6382 {
6383 int i;
6384
6385 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
6386 host_to_target_signalfd_siginfo(buf + i, buf + i);
6387 }
6388
6389 return len;
6390 }
6391
6392 static TargetFdTrans target_signalfd_trans = {
6393 .host_to_target_data = host_to_target_data_signalfd,
6394 };
6395
6396 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6397 {
6398 int host_flags;
6399 target_sigset_t *target_mask;
6400 sigset_t host_mask;
6401 abi_long ret;
6402
6403 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6404 return -TARGET_EINVAL;
6405 }
6406 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6407 return -TARGET_EFAULT;
6408 }
6409
6410 target_to_host_sigset(&host_mask, target_mask);
6411
6412 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6413
6414 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6415 if (ret >= 0) {
6416 fd_trans_register(ret, &target_signalfd_trans);
6417 }
6418
6419 unlock_user_struct(target_mask, mask, 0);
6420
6421 return ret;
6422 }
6423 #endif
6424
6425 /* Map host to target signal numbers for the wait family of syscalls.
6426 Assume all other status bits are the same. */
6427 int host_to_target_waitstatus(int status)
6428 {
6429 if (WIFSIGNALED(status)) {
6430 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6431 }
6432 if (WIFSTOPPED(status)) {
6433 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6434 | (status & 0xff);
6435 }
6436 return status;
6437 }
6438
6439 static int open_self_cmdline(void *cpu_env, int fd)
6440 {
6441 int fd_orig = -1;
6442 bool word_skipped = false;
6443
6444 fd_orig = open("/proc/self/cmdline", O_RDONLY);
6445 if (fd_orig < 0) {
6446 return fd_orig;
6447 }
6448
6449 while (true) {
6450 ssize_t nb_read;
6451 char buf[128];
6452 char *cp_buf = buf;
6453
6454 nb_read = read(fd_orig, buf, sizeof(buf));
6455 if (nb_read < 0) {
6456 int e = errno;
6457 fd_orig = close(fd_orig);
6458 errno = e;
6459 return -1;
6460 } else if (nb_read == 0) {
6461 break;
6462 }
6463
6464 if (!word_skipped) {
6465 /* Skip the first string, which is the path to qemu-*-static
6466 instead of the actual command. */
6467 cp_buf = memchr(buf, 0, sizeof(buf));
6468 if (cp_buf) {
6469 /* Null byte found, skip one string */
6470 cp_buf++;
6471 nb_read -= cp_buf - buf;
6472 word_skipped = true;
6473 }
6474 }
6475
6476 if (word_skipped) {
6477 if (write(fd, cp_buf, nb_read) != nb_read) {
6478 int e = errno;
6479 close(fd_orig);
6480 errno = e;
6481 return -1;
6482 }
6483 }
6484 }
6485
6486 return close(fd_orig);
6487 }
6488
6489 static int open_self_maps(void *cpu_env, int fd)
6490 {
6491 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6492 TaskState *ts = cpu->opaque;
6493 FILE *fp;
6494 char *line = NULL;
6495 size_t len = 0;
6496 ssize_t read;
6497
6498 fp = fopen("/proc/self/maps", "r");
6499 if (fp == NULL) {
6500 return -1;
6501 }
6502
6503 while ((read = getline(&line, &len, fp)) != -1) {
6504 int fields, dev_maj, dev_min, inode;
6505 uint64_t min, max, offset;
6506 char flag_r, flag_w, flag_x, flag_p;
6507 char path[512] = "";
6508 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6509 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6510 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6511
6512 if ((fields < 10) || (fields > 11)) {
6513 continue;
6514 }
6515 if (h2g_valid(min)) {
6516 int flags = page_get_flags(h2g(min));
6517 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
6518 if (page_check_range(h2g(min), max - min, flags) == -1) {
6519 continue;
6520 }
6521 if (h2g(min) == ts->info->stack_limit) {
6522 pstrcpy(path, sizeof(path), " [stack]");
6523 }
6524 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
6525 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6526 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6527 flag_x, flag_p, offset, dev_maj, dev_min, inode,
6528 path[0] ? " " : "", path);
6529 }
6530 }
6531
6532 free(line);
6533 fclose(fp);
6534
6535 return 0;
6536 }
6537
6538 static int open_self_stat(void *cpu_env, int fd)
6539 {
6540 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6541 TaskState *ts = cpu->opaque;
6542 abi_ulong start_stack = ts->info->start_stack;
6543 int i;
6544
6545 for (i = 0; i < 44; i++) {
6546 char buf[128];
6547 int len;
6548 uint64_t val = 0;
6549
6550 if (i == 0) {
6551 /* pid */
6552 val = getpid();
6553 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6554 } else if (i == 1) {
6555 /* app name */
6556 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6557 } else if (i == 27) {
6558 /* stack bottom */
6559 val = start_stack;
6560 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6561 } else {
6562 /* for the rest, there is MasterCard */
6563 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6564 }
6565
6566 len = strlen(buf);
6567 if (write(fd, buf, len) != len) {
6568 return -1;
6569 }
6570 }
6571
6572 return 0;
6573 }
6574
6575 static int open_self_auxv(void *cpu_env, int fd)
6576 {
6577 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6578 TaskState *ts = cpu->opaque;
6579 abi_ulong auxv = ts->info->saved_auxv;
6580 abi_ulong len = ts->info->auxv_len;
6581 char *ptr;
6582
6583 /*
6584 * Auxiliary vector is stored in target process stack.
6585 * read in whole auxv vector and copy it to file
6586 */
6587 ptr = lock_user(VERIFY_READ, auxv, len, 0);
6588 if (ptr != NULL) {
6589 while (len > 0) {
6590 ssize_t r;
6591 r = write(fd, ptr, len);
6592 if (r <= 0) {
6593 break;
6594 }
6595 len -= r;
6596 ptr += r;
6597 }
6598 lseek(fd, 0, SEEK_SET);
6599 unlock_user(ptr, auxv, len);
6600 }
6601
6602 return 0;
6603 }
6604
6605 static int is_proc_myself(const char *filename, const char *entry)
6606 {
6607 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6608 filename += strlen("/proc/");
6609 if (!strncmp(filename, "self/", strlen("self/"))) {
6610 filename += strlen("self/");
6611 } else if (*filename >= '1' && *filename <= '9') {
6612 char myself[80];
6613 snprintf(myself, sizeof(myself), "%d/", getpid());
6614 if (!strncmp(filename, myself, strlen(myself))) {
6615 filename += strlen(myself);
6616 } else {
6617 return 0;
6618 }
6619 } else {
6620 return 0;
6621 }
6622 if (!strcmp(filename, entry)) {
6623 return 1;
6624 }
6625 }
6626 return 0;
6627 }
6628
6629 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6630 static int is_proc(const char *filename, const char *entry)
6631 {
6632 return strcmp(filename, entry) == 0;
6633 }
6634
6635 static int open_net_route(void *cpu_env, int fd)
6636 {
6637 FILE *fp;
6638 char *line = NULL;
6639 size_t len = 0;
6640 ssize_t read;
6641
6642 fp = fopen("/proc/net/route", "r");
6643 if (fp == NULL) {
6644 return -1;
6645 }
6646
6647 /* read header */
6648
6649 read = getline(&line, &len, fp);
6650 dprintf(fd, "%s", line);
6651
6652 /* read routes */
6653
6654 while ((read = getline(&line, &len, fp)) != -1) {
6655 char iface[16];
6656 uint32_t dest, gw, mask;
6657 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6658 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6659 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6660 &mask, &mtu, &window, &irtt);
6661 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6662 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6663 metric, tswap32(mask), mtu, window, irtt);
6664 }
6665
6666 free(line);
6667 fclose(fp);
6668
6669 return 0;
6670 }
6671 #endif
6672
6673 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6674 {
6675 struct fake_open {
6676 const char *filename;
6677 int (*fill)(void *cpu_env, int fd);
6678 int (*cmp)(const char *s1, const char *s2);
6679 };
6680 const struct fake_open *fake_open;
6681 static const struct fake_open fakes[] = {
6682 { "maps", open_self_maps, is_proc_myself },
6683 { "stat", open_self_stat, is_proc_myself },
6684 { "auxv", open_self_auxv, is_proc_myself },
6685 { "cmdline", open_self_cmdline, is_proc_myself },
6686 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6687 { "/proc/net/route", open_net_route, is_proc },
6688 #endif
6689 { NULL, NULL, NULL }
6690 };
6691
6692 if (is_proc_myself(pathname, "exe")) {
6693 int execfd = qemu_getauxval(AT_EXECFD);
6694 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6695 }
6696
6697 for (fake_open = fakes; fake_open->filename; fake_open++) {
6698 if (fake_open->cmp(pathname, fake_open->filename)) {
6699 break;
6700 }
6701 }
6702
6703 if (fake_open->filename) {
6704 const char *tmpdir;
6705 char filename[PATH_MAX];
6706 int fd, r;
6707
6708 /* create temporary file to map stat to */
6709 tmpdir = getenv("TMPDIR");
6710 if (!tmpdir)
6711 tmpdir = "/tmp";
6712 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6713 fd = mkstemp(filename);
6714 if (fd < 0) {
6715 return fd;
6716 }
6717 unlink(filename);
6718
6719 if ((r = fake_open->fill(cpu_env, fd))) {
6720 int e = errno;
6721 close(fd);
6722 errno = e;
6723 return r;
6724 }
6725 lseek(fd, 0, SEEK_SET);
6726
6727 return fd;
6728 }
6729
6730 return safe_openat(dirfd, path(pathname), flags, mode);
6731 }
6732
6733 #define TIMER_MAGIC 0x0caf0000
6734 #define TIMER_MAGIC_MASK 0xffff0000
6735
6736 /* Convert QEMU provided timer ID back to internal 16bit index format */
6737 static target_timer_t get_timer_id(abi_long arg)
6738 {
6739 target_timer_t timerid = arg;
6740
6741 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6742 return -TARGET_EINVAL;
6743 }
6744
6745 timerid &= 0xffff;
6746
6747 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6748 return -TARGET_EINVAL;
6749 }
6750
6751 return timerid;
6752 }
6753
6754 /* do_syscall() should always have a single exit point at the end so
6755 that actions, such as logging of syscall results, can be performed.
6756 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
6757 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
6758 abi_long arg2, abi_long arg3, abi_long arg4,
6759 abi_long arg5, abi_long arg6, abi_long arg7,
6760 abi_long arg8)
6761 {
6762 CPUState *cpu = ENV_GET_CPU(cpu_env);
6763 abi_long ret;
6764 struct stat st;
6765 struct statfs stfs;
6766 void *p;
6767
6768 #if defined(DEBUG_ERESTARTSYS)
6769 /* Debug-only code for exercising the syscall-restart code paths
6770 * in the per-architecture cpu main loops: restart every syscall
6771 * the guest makes once before letting it through.
6772 */
6773 {
6774 static int flag;
6775
6776 flag = !flag;
6777 if (flag) {
6778 return -TARGET_ERESTARTSYS;
6779 }
6780 }
6781 #endif
6782
6783 #ifdef DEBUG
6784 gemu_log("syscall %d", num);
6785 #endif
6786 if(do_strace)
6787 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
6788
6789 switch(num) {
6790 case TARGET_NR_exit:
6791 /* In old applications this may be used to implement _exit(2).
6792 However in threaded applictions it is used for thread termination,
6793 and _exit_group is used for application termination.
6794 Do thread termination if we have more then one thread. */
6795
6796 if (block_signals()) {
6797 ret = -TARGET_ERESTARTSYS;
6798 break;
6799 }
6800
6801 if (CPU_NEXT(first_cpu)) {
6802 TaskState *ts;
6803
6804 cpu_list_lock();
6805 /* Remove the CPU from the list. */
6806 QTAILQ_REMOVE(&cpus, cpu, node);
6807 cpu_list_unlock();
6808 ts = cpu->opaque;
6809 if (ts->child_tidptr) {
6810 put_user_u32(0, ts->child_tidptr);
6811 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6812 NULL, NULL, 0);
6813 }
6814 thread_cpu = NULL;
6815 object_unref(OBJECT(cpu));
6816 g_free(ts);
6817 rcu_unregister_thread();
6818 pthread_exit(NULL);
6819 }
6820 #ifdef TARGET_GPROF
6821 _mcleanup();
6822 #endif
6823 gdb_exit(cpu_env, arg1);
6824 _exit(arg1);
6825 ret = 0; /* avoid warning */
6826 break;
6827 case TARGET_NR_read:
6828 if (arg3 == 0)
6829 ret = 0;
6830 else {
6831 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6832 goto efault;
6833 ret = get_errno(safe_read(arg1, p, arg3));
6834 if (ret >= 0 &&
6835 fd_trans_host_to_target_data(arg1)) {
6836 ret = fd_trans_host_to_target_data(arg1)(p, ret);
6837 }
6838 unlock_user(p, arg2, ret);
6839 }
6840 break;
6841 case TARGET_NR_write:
6842 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6843 goto efault;
6844 ret = get_errno(safe_write(arg1, p, arg3));
6845 unlock_user(p, arg2, 0);
6846 break;
6847 #ifdef TARGET_NR_open
6848 case TARGET_NR_open:
6849 if (!(p = lock_user_string(arg1)))
6850 goto efault;
6851 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6852 target_to_host_bitmask(arg2, fcntl_flags_tbl),
6853 arg3));
6854 fd_trans_unregister(ret);
6855 unlock_user(p, arg1, 0);
6856 break;
6857 #endif
6858 case TARGET_NR_openat:
6859 if (!(p = lock_user_string(arg2)))
6860 goto efault;
6861 ret = get_errno(do_openat(cpu_env, arg1, p,
6862 target_to_host_bitmask(arg3, fcntl_flags_tbl),
6863 arg4));
6864 fd_trans_unregister(ret);
6865 unlock_user(p, arg2, 0);
6866 break;
6867 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6868 case TARGET_NR_name_to_handle_at:
6869 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
6870 break;
6871 #endif
6872 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6873 case TARGET_NR_open_by_handle_at:
6874 ret = do_open_by_handle_at(arg1, arg2, arg3);
6875 fd_trans_unregister(ret);
6876 break;
6877 #endif
6878 case TARGET_NR_close:
6879 fd_trans_unregister(arg1);
6880 ret = get_errno(close(arg1));
6881 break;
6882 case TARGET_NR_brk:
6883 ret = do_brk(arg1);
6884 break;
6885 #ifdef TARGET_NR_fork
6886 case TARGET_NR_fork:
6887 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
6888 break;
6889 #endif
6890 #ifdef TARGET_NR_waitpid
6891 case TARGET_NR_waitpid:
6892 {
6893 int status;
6894 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
6895 if (!is_error(ret) && arg2 && ret
6896 && put_user_s32(host_to_target_waitstatus(status), arg2))
6897 goto efault;
6898 }
6899 break;
6900 #endif
6901 #ifdef TARGET_NR_waitid
6902 case TARGET_NR_waitid:
6903 {
6904 siginfo_t info;
6905 info.si_pid = 0;
6906 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
6907 if (!is_error(ret) && arg3 && info.si_pid != 0) {
6908 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
6909 goto efault;
6910 host_to_target_siginfo(p, &info);
6911 unlock_user(p, arg3, sizeof(target_siginfo_t));
6912 }
6913 }
6914 break;
6915 #endif
6916 #ifdef TARGET_NR_creat /* not on alpha */
6917 case TARGET_NR_creat:
6918 if (!(p = lock_user_string(arg1)))
6919 goto efault;
6920 ret = get_errno(creat(p, arg2));
6921 fd_trans_unregister(ret);
6922 unlock_user(p, arg1, 0);
6923 break;
6924 #endif
6925 #ifdef TARGET_NR_link
6926 case TARGET_NR_link:
6927 {
6928 void * p2;
6929 p = lock_user_string(arg1);
6930 p2 = lock_user_string(arg2);
6931 if (!p || !p2)
6932 ret = -TARGET_EFAULT;
6933 else
6934 ret = get_errno(link(p, p2));
6935 unlock_user(p2, arg2, 0);
6936 unlock_user(p, arg1, 0);
6937 }
6938 break;
6939 #endif
6940 #if defined(TARGET_NR_linkat)
6941 case TARGET_NR_linkat:
6942 {
6943 void * p2 = NULL;
6944 if (!arg2 || !arg4)
6945 goto efault;
6946 p = lock_user_string(arg2);
6947 p2 = lock_user_string(arg4);
6948 if (!p || !p2)
6949 ret = -TARGET_EFAULT;
6950 else
6951 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6952 unlock_user(p, arg2, 0);
6953 unlock_user(p2, arg4, 0);
6954 }
6955 break;
6956 #endif
6957 #ifdef TARGET_NR_unlink
6958 case TARGET_NR_unlink:
6959 if (!(p = lock_user_string(arg1)))
6960 goto efault;
6961 ret = get_errno(unlink(p));
6962 unlock_user(p, arg1, 0);
6963 break;
6964 #endif
6965 #if defined(TARGET_NR_unlinkat)
6966 case TARGET_NR_unlinkat:
6967 if (!(p = lock_user_string(arg2)))
6968 goto efault;
6969 ret = get_errno(unlinkat(arg1, p, arg3));
6970 unlock_user(p, arg2, 0);
6971 break;
6972 #endif
6973 case TARGET_NR_execve:
6974 {
6975 char **argp, **envp;
6976 int argc, envc;
6977 abi_ulong gp;
6978 abi_ulong guest_argp;
6979 abi_ulong guest_envp;
6980 abi_ulong addr;
6981 char **q;
6982 int total_size = 0;
6983
6984 argc = 0;
6985 guest_argp = arg2;
6986 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6987 if (get_user_ual(addr, gp))
6988 goto efault;
6989 if (!addr)
6990 break;
6991 argc++;
6992 }
6993 envc = 0;
6994 guest_envp = arg3;
6995 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6996 if (get_user_ual(addr, gp))
6997 goto efault;
6998 if (!addr)
6999 break;
7000 envc++;
7001 }
7002
7003 argp = alloca((argc + 1) * sizeof(void *));
7004 envp = alloca((envc + 1) * sizeof(void *));
7005
7006 for (gp = guest_argp, q = argp; gp;
7007 gp += sizeof(abi_ulong), q++) {
7008 if (get_user_ual(addr, gp))
7009 goto execve_efault;
7010 if (!addr)
7011 break;
7012 if (!(*q = lock_user_string(addr)))
7013 goto execve_efault;
7014 total_size += strlen(*q) + 1;
7015 }
7016 *q = NULL;
7017
7018 for (gp = guest_envp, q = envp; gp;
7019 gp += sizeof(abi_ulong), q++) {
7020 if (get_user_ual(addr, gp))
7021 goto execve_efault;
7022 if (!addr)
7023 break;
7024 if (!(*q = lock_user_string(addr)))
7025 goto execve_efault;
7026 total_size += strlen(*q) + 1;
7027 }
7028 *q = NULL;
7029
7030 if (!(p = lock_user_string(arg1)))
7031 goto execve_efault;
7032 /* Although execve() is not an interruptible syscall it is
7033 * a special case where we must use the safe_syscall wrapper:
7034 * if we allow a signal to happen before we make the host
7035 * syscall then we will 'lose' it, because at the point of
7036 * execve the process leaves QEMU's control. So we use the
7037 * safe syscall wrapper to ensure that we either take the
7038 * signal as a guest signal, or else it does not happen
7039 * before the execve completes and makes it the other
7040 * program's problem.
7041 */
7042 ret = get_errno(safe_execve(p, argp, envp));
7043 unlock_user(p, arg1, 0);
7044
7045 goto execve_end;
7046
7047 execve_efault:
7048 ret = -TARGET_EFAULT;
7049
7050 execve_end:
7051 for (gp = guest_argp, q = argp; *q;
7052 gp += sizeof(abi_ulong), q++) {
7053 if (get_user_ual(addr, gp)
7054 || !addr)
7055 break;
7056 unlock_user(*q, addr, 0);
7057 }
7058 for (gp = guest_envp, q = envp; *q;
7059 gp += sizeof(abi_ulong), q++) {
7060 if (get_user_ual(addr, gp)
7061 || !addr)
7062 break;
7063 unlock_user(*q, addr, 0);
7064 }
7065 }
7066 break;
7067 case TARGET_NR_chdir:
7068 if (!(p = lock_user_string(arg1)))
7069 goto efault;
7070 ret = get_errno(chdir(p));
7071 unlock_user(p, arg1, 0);
7072 break;
7073 #ifdef TARGET_NR_time
7074 case TARGET_NR_time:
7075 {
7076 time_t host_time;
7077 ret = get_errno(time(&host_time));
7078 if (!is_error(ret)
7079 && arg1
7080 && put_user_sal(host_time, arg1))
7081 goto efault;
7082 }
7083 break;
7084 #endif
7085 #ifdef TARGET_NR_mknod
7086 case TARGET_NR_mknod:
7087 if (!(p = lock_user_string(arg1)))
7088 goto efault;
7089 ret = get_errno(mknod(p, arg2, arg3));
7090 unlock_user(p, arg1, 0);
7091 break;
7092 #endif
7093 #if defined(TARGET_NR_mknodat)
7094 case TARGET_NR_mknodat:
7095 if (!(p = lock_user_string(arg2)))
7096 goto efault;
7097 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7098 unlock_user(p, arg2, 0);
7099 break;
7100 #endif
7101 #ifdef TARGET_NR_chmod
7102 case TARGET_NR_chmod:
7103 if (!(p = lock_user_string(arg1)))
7104 goto efault;
7105 ret = get_errno(chmod(p, arg2));
7106 unlock_user(p, arg1, 0);
7107 break;
7108 #endif
7109 #ifdef TARGET_NR_break
7110 case TARGET_NR_break:
7111 goto unimplemented;
7112 #endif
7113 #ifdef TARGET_NR_oldstat
7114 case TARGET_NR_oldstat:
7115 goto unimplemented;
7116 #endif
7117 case TARGET_NR_lseek:
7118 ret = get_errno(lseek(arg1, arg2, arg3));
7119 break;
7120 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7121 /* Alpha specific */
7122 case TARGET_NR_getxpid:
7123 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7124 ret = get_errno(getpid());
7125 break;
7126 #endif
7127 #ifdef TARGET_NR_getpid
7128 case TARGET_NR_getpid:
7129 ret = get_errno(getpid());
7130 break;
7131 #endif
7132 case TARGET_NR_mount:
7133 {
7134 /* need to look at the data field */
7135 void *p2, *p3;
7136
7137 if (arg1) {
7138 p = lock_user_string(arg1);
7139 if (!p) {
7140 goto efault;
7141 }
7142 } else {
7143 p = NULL;
7144 }
7145
7146 p2 = lock_user_string(arg2);
7147 if (!p2) {
7148 if (arg1) {
7149 unlock_user(p, arg1, 0);
7150 }
7151 goto efault;
7152 }
7153
7154 if (arg3) {
7155 p3 = lock_user_string(arg3);
7156 if (!p3) {
7157 if (arg1) {
7158 unlock_user(p, arg1, 0);
7159 }
7160 unlock_user(p2, arg2, 0);
7161 goto efault;
7162 }
7163 } else {
7164 p3 = NULL;
7165 }
7166
7167 /* FIXME - arg5 should be locked, but it isn't clear how to
7168 * do that since it's not guaranteed to be a NULL-terminated
7169 * string.
7170 */
7171 if (!arg5) {
7172 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7173 } else {
7174 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7175 }
7176 ret = get_errno(ret);
7177
7178 if (arg1) {
7179 unlock_user(p, arg1, 0);
7180 }
7181 unlock_user(p2, arg2, 0);
7182 if (arg3) {
7183 unlock_user(p3, arg3, 0);
7184 }
7185 }
7186 break;
7187 #ifdef TARGET_NR_umount
7188 case TARGET_NR_umount:
7189 if (!(p = lock_user_string(arg1)))
7190 goto efault;
7191 ret = get_errno(umount(p));
7192 unlock_user(p, arg1, 0);
7193 break;
7194 #endif
7195 #ifdef TARGET_NR_stime /* not on alpha */
7196 case TARGET_NR_stime:
7197 {
7198 time_t host_time;
7199 if (get_user_sal(host_time, arg1))
7200 goto efault;
7201 ret = get_errno(stime(&host_time));
7202 }
7203 break;
7204 #endif
7205 case TARGET_NR_ptrace:
7206 goto unimplemented;
7207 #ifdef TARGET_NR_alarm /* not on alpha */
7208 case TARGET_NR_alarm:
7209 ret = alarm(arg1);
7210 break;
7211 #endif
7212 #ifdef TARGET_NR_oldfstat
7213 case TARGET_NR_oldfstat:
7214 goto unimplemented;
7215 #endif
7216 #ifdef TARGET_NR_pause /* not on alpha */
7217 case TARGET_NR_pause:
7218 if (!block_signals()) {
7219 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7220 }
7221 ret = -TARGET_EINTR;
7222 break;
7223 #endif
7224 #ifdef TARGET_NR_utime
7225 case TARGET_NR_utime:
7226 {
7227 struct utimbuf tbuf, *host_tbuf;
7228 struct target_utimbuf *target_tbuf;
7229 if (arg2) {
7230 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7231 goto efault;
7232 tbuf.actime = tswapal(target_tbuf->actime);
7233 tbuf.modtime = tswapal(target_tbuf->modtime);
7234 unlock_user_struct(target_tbuf, arg2, 0);
7235 host_tbuf = &tbuf;
7236 } else {
7237 host_tbuf = NULL;
7238 }
7239 if (!(p = lock_user_string(arg1)))
7240 goto efault;
7241 ret = get_errno(utime(p, host_tbuf));
7242 unlock_user(p, arg1, 0);
7243 }
7244 break;
7245 #endif
7246 #ifdef TARGET_NR_utimes
7247 case TARGET_NR_utimes:
7248 {
7249 struct timeval *tvp, tv[2];
7250 if (arg2) {
7251 if (copy_from_user_timeval(&tv[0], arg2)
7252 || copy_from_user_timeval(&tv[1],
7253 arg2 + sizeof(struct target_timeval)))
7254 goto efault;
7255 tvp = tv;
7256 } else {
7257 tvp = NULL;
7258 }
7259 if (!(p = lock_user_string(arg1)))
7260 goto efault;
7261 ret = get_errno(utimes(p, tvp));
7262 unlock_user(p, arg1, 0);
7263 }
7264 break;
7265 #endif
7266 #if defined(TARGET_NR_futimesat)
7267 case TARGET_NR_futimesat:
7268 {
7269 struct timeval *tvp, tv[2];
7270 if (arg3) {
7271 if (copy_from_user_timeval(&tv[0], arg3)
7272 || copy_from_user_timeval(&tv[1],
7273 arg3 + sizeof(struct target_timeval)))
7274 goto efault;
7275 tvp = tv;
7276 } else {
7277 tvp = NULL;
7278 }
7279 if (!(p = lock_user_string(arg2)))
7280 goto efault;
7281 ret = get_errno(futimesat(arg1, path(p), tvp));
7282 unlock_user(p, arg2, 0);
7283 }
7284 break;
7285 #endif
7286 #ifdef TARGET_NR_stty
7287 case TARGET_NR_stty:
7288 goto unimplemented;
7289 #endif
7290 #ifdef TARGET_NR_gtty
7291 case TARGET_NR_gtty:
7292 goto unimplemented;
7293 #endif
7294 #ifdef TARGET_NR_access
7295 case TARGET_NR_access:
7296 if (!(p = lock_user_string(arg1)))
7297 goto efault;
7298 ret = get_errno(access(path(p), arg2));
7299 unlock_user(p, arg1, 0);
7300 break;
7301 #endif
7302 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7303 case TARGET_NR_faccessat:
7304 if (!(p = lock_user_string(arg2)))
7305 goto efault;
7306 ret = get_errno(faccessat(arg1, p, arg3, 0));
7307 unlock_user(p, arg2, 0);
7308 break;
7309 #endif
7310 #ifdef TARGET_NR_nice /* not on alpha */
7311 case TARGET_NR_nice:
7312 ret = get_errno(nice(arg1));
7313 break;
7314 #endif
7315 #ifdef TARGET_NR_ftime
7316 case TARGET_NR_ftime:
7317 goto unimplemented;
7318 #endif
7319 case TARGET_NR_sync:
7320 sync();
7321 ret = 0;
7322 break;
7323 case TARGET_NR_kill:
7324 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7325 break;
7326 #ifdef TARGET_NR_rename
7327 case TARGET_NR_rename:
7328 {
7329 void *p2;
7330 p = lock_user_string(arg1);
7331 p2 = lock_user_string(arg2);
7332 if (!p || !p2)
7333 ret = -TARGET_EFAULT;
7334 else
7335 ret = get_errno(rename(p, p2));
7336 unlock_user(p2, arg2, 0);
7337 unlock_user(p, arg1, 0);
7338 }
7339 break;
7340 #endif
7341 #if defined(TARGET_NR_renameat)
7342 case TARGET_NR_renameat:
7343 {
7344 void *p2;
7345 p = lock_user_string(arg2);
7346 p2 = lock_user_string(arg4);
7347 if (!p || !p2)
7348 ret = -TARGET_EFAULT;
7349 else
7350 ret = get_errno(renameat(arg1, p, arg3, p2));
7351 unlock_user(p2, arg4, 0);
7352 unlock_user(p, arg2, 0);
7353 }
7354 break;
7355 #endif
7356 #ifdef TARGET_NR_mkdir
7357 case TARGET_NR_mkdir:
7358 if (!(p = lock_user_string(arg1)))
7359 goto efault;
7360 ret = get_errno(mkdir(p, arg2));
7361 unlock_user(p, arg1, 0);
7362 break;
7363 #endif
7364 #if defined(TARGET_NR_mkdirat)
7365 case TARGET_NR_mkdirat:
7366 if (!(p = lock_user_string(arg2)))
7367 goto efault;
7368 ret = get_errno(mkdirat(arg1, p, arg3));
7369 unlock_user(p, arg2, 0);
7370 break;
7371 #endif
7372 #ifdef TARGET_NR_rmdir
7373 case TARGET_NR_rmdir:
7374 if (!(p = lock_user_string(arg1)))
7375 goto efault;
7376 ret = get_errno(rmdir(p));
7377 unlock_user(p, arg1, 0);
7378 break;
7379 #endif
7380 case TARGET_NR_dup:
7381 ret = get_errno(dup(arg1));
7382 if (ret >= 0) {
7383 fd_trans_dup(arg1, ret);
7384 }
7385 break;
7386 #ifdef TARGET_NR_pipe
7387 case TARGET_NR_pipe:
7388 ret = do_pipe(cpu_env, arg1, 0, 0);
7389 break;
7390 #endif
7391 #ifdef TARGET_NR_pipe2
7392 case TARGET_NR_pipe2:
7393 ret = do_pipe(cpu_env, arg1,
7394 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7395 break;
7396 #endif
7397 case TARGET_NR_times:
7398 {
7399 struct target_tms *tmsp;
7400 struct tms tms;
7401 ret = get_errno(times(&tms));
7402 if (arg1) {
7403 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7404 if (!tmsp)
7405 goto efault;
7406 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7407 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7408 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7409 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7410 }
7411 if (!is_error(ret))
7412 ret = host_to_target_clock_t(ret);
7413 }
7414 break;
7415 #ifdef TARGET_NR_prof
7416 case TARGET_NR_prof:
7417 goto unimplemented;
7418 #endif
7419 #ifdef TARGET_NR_signal
7420 case TARGET_NR_signal:
7421 goto unimplemented;
7422 #endif
7423 case TARGET_NR_acct:
7424 if (arg1 == 0) {
7425 ret = get_errno(acct(NULL));
7426 } else {
7427 if (!(p = lock_user_string(arg1)))
7428 goto efault;
7429 ret = get_errno(acct(path(p)));
7430 unlock_user(p, arg1, 0);
7431 }
7432 break;
7433 #ifdef TARGET_NR_umount2
7434 case TARGET_NR_umount2:
7435 if (!(p = lock_user_string(arg1)))
7436 goto efault;
7437 ret = get_errno(umount2(p, arg2));
7438 unlock_user(p, arg1, 0);
7439 break;
7440 #endif
7441 #ifdef TARGET_NR_lock
7442 case TARGET_NR_lock:
7443 goto unimplemented;
7444 #endif
7445 case TARGET_NR_ioctl:
7446 ret = do_ioctl(arg1, arg2, arg3);
7447 break;
7448 case TARGET_NR_fcntl:
7449 ret = do_fcntl(arg1, arg2, arg3);
7450 break;
7451 #ifdef TARGET_NR_mpx
7452 case TARGET_NR_mpx:
7453 goto unimplemented;
7454 #endif
7455 case TARGET_NR_setpgid:
7456 ret = get_errno(setpgid(arg1, arg2));
7457 break;
7458 #ifdef TARGET_NR_ulimit
7459 case TARGET_NR_ulimit:
7460 goto unimplemented;
7461 #endif
7462 #ifdef TARGET_NR_oldolduname
7463 case TARGET_NR_oldolduname:
7464 goto unimplemented;
7465 #endif
7466 case TARGET_NR_umask:
7467 ret = get_errno(umask(arg1));
7468 break;
7469 case TARGET_NR_chroot:
7470 if (!(p = lock_user_string(arg1)))
7471 goto efault;
7472 ret = get_errno(chroot(p));
7473 unlock_user(p, arg1, 0);
7474 break;
7475 #ifdef TARGET_NR_ustat
7476 case TARGET_NR_ustat:
7477 goto unimplemented;
7478 #endif
7479 #ifdef TARGET_NR_dup2
7480 case TARGET_NR_dup2:
7481 ret = get_errno(dup2(arg1, arg2));
7482 if (ret >= 0) {
7483 fd_trans_dup(arg1, arg2);
7484 }
7485 break;
7486 #endif
7487 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7488 case TARGET_NR_dup3:
7489 ret = get_errno(dup3(arg1, arg2, arg3));
7490 if (ret >= 0) {
7491 fd_trans_dup(arg1, arg2);
7492 }
7493 break;
7494 #endif
7495 #ifdef TARGET_NR_getppid /* not on alpha */
7496 case TARGET_NR_getppid:
7497 ret = get_errno(getppid());
7498 break;
7499 #endif
7500 #ifdef TARGET_NR_getpgrp
7501 case TARGET_NR_getpgrp:
7502 ret = get_errno(getpgrp());
7503 break;
7504 #endif
7505 case TARGET_NR_setsid:
7506 ret = get_errno(setsid());
7507 break;
7508 #ifdef TARGET_NR_sigaction
7509 case TARGET_NR_sigaction:
7510 {
7511 #if defined(TARGET_ALPHA)
7512 struct target_sigaction act, oact, *pact = 0;
7513 struct target_old_sigaction *old_act;
7514 if (arg2) {
7515 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7516 goto efault;
7517 act._sa_handler = old_act->_sa_handler;
7518 target_siginitset(&act.sa_mask, old_act->sa_mask);
7519 act.sa_flags = old_act->sa_flags;
7520 act.sa_restorer = 0;
7521 unlock_user_struct(old_act, arg2, 0);
7522 pact = &act;
7523 }
7524 ret = get_errno(do_sigaction(arg1, pact, &oact));
7525 if (!is_error(ret) && arg3) {
7526 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7527 goto efault;
7528 old_act->_sa_handler = oact._sa_handler;
7529 old_act->sa_mask = oact.sa_mask.sig[0];
7530 old_act->sa_flags = oact.sa_flags;
7531 unlock_user_struct(old_act, arg3, 1);
7532 }
7533 #elif defined(TARGET_MIPS)
7534 struct target_sigaction act, oact, *pact, *old_act;
7535
7536 if (arg2) {
7537 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7538 goto efault;
7539 act._sa_handler = old_act->_sa_handler;
7540 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7541 act.sa_flags = old_act->sa_flags;
7542 unlock_user_struct(old_act, arg2, 0);
7543 pact = &act;
7544 } else {
7545 pact = NULL;
7546 }
7547
7548 ret = get_errno(do_sigaction(arg1, pact, &oact));
7549
7550 if (!is_error(ret) && arg3) {
7551 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7552 goto efault;
7553 old_act->_sa_handler = oact._sa_handler;
7554 old_act->sa_flags = oact.sa_flags;
7555 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7556 old_act->sa_mask.sig[1] = 0;
7557 old_act->sa_mask.sig[2] = 0;
7558 old_act->sa_mask.sig[3] = 0;
7559 unlock_user_struct(old_act, arg3, 1);
7560 }
7561 #else
7562 struct target_old_sigaction *old_act;
7563 struct target_sigaction act, oact, *pact;
7564 if (arg2) {
7565 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7566 goto efault;
7567 act._sa_handler = old_act->_sa_handler;
7568 target_siginitset(&act.sa_mask, old_act->sa_mask);
7569 act.sa_flags = old_act->sa_flags;
7570 act.sa_restorer = old_act->sa_restorer;
7571 unlock_user_struct(old_act, arg2, 0);
7572 pact = &act;
7573 } else {
7574 pact = NULL;
7575 }
7576 ret = get_errno(do_sigaction(arg1, pact, &oact));
7577 if (!is_error(ret) && arg3) {
7578 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7579 goto efault;
7580 old_act->_sa_handler = oact._sa_handler;
7581 old_act->sa_mask = oact.sa_mask.sig[0];
7582 old_act->sa_flags = oact.sa_flags;
7583 old_act->sa_restorer = oact.sa_restorer;
7584 unlock_user_struct(old_act, arg3, 1);
7585 }
7586 #endif
7587 }
7588 break;
7589 #endif
7590 case TARGET_NR_rt_sigaction:
7591 {
7592 #if defined(TARGET_ALPHA)
7593 struct target_sigaction act, oact, *pact = 0;
7594 struct target_rt_sigaction *rt_act;
7595 /* ??? arg4 == sizeof(sigset_t). */
7596 if (arg2) {
7597 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7598 goto efault;
7599 act._sa_handler = rt_act->_sa_handler;
7600 act.sa_mask = rt_act->sa_mask;
7601 act.sa_flags = rt_act->sa_flags;
7602 act.sa_restorer = arg5;
7603 unlock_user_struct(rt_act, arg2, 0);
7604 pact = &act;
7605 }
7606 ret = get_errno(do_sigaction(arg1, pact, &oact));
7607 if (!is_error(ret) && arg3) {
7608 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7609 goto efault;
7610 rt_act->_sa_handler = oact._sa_handler;
7611 rt_act->sa_mask = oact.sa_mask;
7612 rt_act->sa_flags = oact.sa_flags;
7613 unlock_user_struct(rt_act, arg3, 1);
7614 }
7615 #else
7616 struct target_sigaction *act;
7617 struct target_sigaction *oact;
7618
7619 if (arg2) {
7620 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
7621 goto efault;
7622 } else
7623 act = NULL;
7624 if (arg3) {
7625 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7626 ret = -TARGET_EFAULT;
7627 goto rt_sigaction_fail;
7628 }
7629 } else
7630 oact = NULL;
7631 ret = get_errno(do_sigaction(arg1, act, oact));
7632 rt_sigaction_fail:
7633 if (act)
7634 unlock_user_struct(act, arg2, 0);
7635 if (oact)
7636 unlock_user_struct(oact, arg3, 1);
7637 #endif
7638 }
7639 break;
7640 #ifdef TARGET_NR_sgetmask /* not on alpha */
7641 case TARGET_NR_sgetmask:
7642 {
7643 sigset_t cur_set;
7644 abi_ulong target_set;
7645 ret = do_sigprocmask(0, NULL, &cur_set);
7646 if (!ret) {
7647 host_to_target_old_sigset(&target_set, &cur_set);
7648 ret = target_set;
7649 }
7650 }
7651 break;
7652 #endif
7653 #ifdef TARGET_NR_ssetmask /* not on alpha */
7654 case TARGET_NR_ssetmask:
7655 {
7656 sigset_t set, oset, cur_set;
7657 abi_ulong target_set = arg1;
7658 /* We only have one word of the new mask so we must read
7659 * the rest of it with do_sigprocmask() and OR in this word.
7660 * We are guaranteed that a do_sigprocmask() that only queries
7661 * the signal mask will not fail.
7662 */
7663 ret = do_sigprocmask(0, NULL, &cur_set);
7664 assert(!ret);
7665 target_to_host_old_sigset(&set, &target_set);
7666 sigorset(&set, &set, &cur_set);
7667 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7668 if (!ret) {
7669 host_to_target_old_sigset(&target_set, &oset);
7670 ret = target_set;
7671 }
7672 }
7673 break;
7674 #endif
7675 #ifdef TARGET_NR_sigprocmask
7676 case TARGET_NR_sigprocmask:
7677 {
7678 #if defined(TARGET_ALPHA)
7679 sigset_t set, oldset;
7680 abi_ulong mask;
7681 int how;
7682
7683 switch (arg1) {
7684 case TARGET_SIG_BLOCK:
7685 how = SIG_BLOCK;
7686 break;
7687 case TARGET_SIG_UNBLOCK:
7688 how = SIG_UNBLOCK;
7689 break;
7690 case TARGET_SIG_SETMASK:
7691 how = SIG_SETMASK;
7692 break;
7693 default:
7694 ret = -TARGET_EINVAL;
7695 goto fail;
7696 }
7697 mask = arg2;
7698 target_to_host_old_sigset(&set, &mask);
7699
7700 ret = do_sigprocmask(how, &set, &oldset);
7701 if (!is_error(ret)) {
7702 host_to_target_old_sigset(&mask, &oldset);
7703 ret = mask;
7704 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7705 }
7706 #else
7707 sigset_t set, oldset, *set_ptr;
7708 int how;
7709
7710 if (arg2) {
7711 switch (arg1) {
7712 case TARGET_SIG_BLOCK:
7713 how = SIG_BLOCK;
7714 break;
7715 case TARGET_SIG_UNBLOCK:
7716 how = SIG_UNBLOCK;
7717 break;
7718 case TARGET_SIG_SETMASK:
7719 how = SIG_SETMASK;
7720 break;
7721 default:
7722 ret = -TARGET_EINVAL;
7723 goto fail;
7724 }
7725 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7726 goto efault;
7727 target_to_host_old_sigset(&set, p);
7728 unlock_user(p, arg2, 0);
7729 set_ptr = &set;
7730 } else {
7731 how = 0;
7732 set_ptr = NULL;
7733 }
7734 ret = do_sigprocmask(how, set_ptr, &oldset);
7735 if (!is_error(ret) && arg3) {
7736 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7737 goto efault;
7738 host_to_target_old_sigset(p, &oldset);
7739 unlock_user(p, arg3, sizeof(target_sigset_t));
7740 }
7741 #endif
7742 }
7743 break;
7744 #endif
7745 case TARGET_NR_rt_sigprocmask:
7746 {
7747 int how = arg1;
7748 sigset_t set, oldset, *set_ptr;
7749
7750 if (arg2) {
7751 switch(how) {
7752 case TARGET_SIG_BLOCK:
7753 how = SIG_BLOCK;
7754 break;
7755 case TARGET_SIG_UNBLOCK:
7756 how = SIG_UNBLOCK;
7757 break;
7758 case TARGET_SIG_SETMASK:
7759 how = SIG_SETMASK;
7760 break;
7761 default:
7762 ret = -TARGET_EINVAL;
7763 goto fail;
7764 }
7765 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7766 goto efault;
7767 target_to_host_sigset(&set, p);
7768 unlock_user(p, arg2, 0);
7769 set_ptr = &set;
7770 } else {
7771 how = 0;
7772 set_ptr = NULL;
7773 }
7774 ret = do_sigprocmask(how, set_ptr, &oldset);
7775 if (!is_error(ret) && arg3) {
7776 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7777 goto efault;
7778 host_to_target_sigset(p, &oldset);
7779 unlock_user(p, arg3, sizeof(target_sigset_t));
7780 }
7781 }
7782 break;
7783 #ifdef TARGET_NR_sigpending
7784 case TARGET_NR_sigpending:
7785 {
7786 sigset_t set;
7787 ret = get_errno(sigpending(&set));
7788 if (!is_error(ret)) {
7789 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7790 goto efault;
7791 host_to_target_old_sigset(p, &set);
7792 unlock_user(p, arg1, sizeof(target_sigset_t));
7793 }
7794 }
7795 break;
7796 #endif
7797 case TARGET_NR_rt_sigpending:
7798 {
7799 sigset_t set;
7800 ret = get_errno(sigpending(&set));
7801 if (!is_error(ret)) {
7802 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7803 goto efault;
7804 host_to_target_sigset(p, &set);
7805 unlock_user(p, arg1, sizeof(target_sigset_t));
7806 }
7807 }
7808 break;
7809 #ifdef TARGET_NR_sigsuspend
7810 case TARGET_NR_sigsuspend:
7811 {
7812 TaskState *ts = cpu->opaque;
7813 #if defined(TARGET_ALPHA)
7814 abi_ulong mask = arg1;
7815 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
7816 #else
7817 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7818 goto efault;
7819 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
7820 unlock_user(p, arg1, 0);
7821 #endif
7822 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7823 SIGSET_T_SIZE));
7824 if (ret != -TARGET_ERESTARTSYS) {
7825 ts->in_sigsuspend = 1;
7826 }
7827 }
7828 break;
7829 #endif
7830 case TARGET_NR_rt_sigsuspend:
7831 {
7832 TaskState *ts = cpu->opaque;
7833 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7834 goto efault;
7835 target_to_host_sigset(&ts->sigsuspend_mask, p);
7836 unlock_user(p, arg1, 0);
7837 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7838 SIGSET_T_SIZE));
7839 if (ret != -TARGET_ERESTARTSYS) {
7840 ts->in_sigsuspend = 1;
7841 }
7842 }
7843 break;
7844 case TARGET_NR_rt_sigtimedwait:
7845 {
7846 sigset_t set;
7847 struct timespec uts, *puts;
7848 siginfo_t uinfo;
7849
7850 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7851 goto efault;
7852 target_to_host_sigset(&set, p);
7853 unlock_user(p, arg1, 0);
7854 if (arg3) {
7855 puts = &uts;
7856 target_to_host_timespec(puts, arg3);
7857 } else {
7858 puts = NULL;
7859 }
7860 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
7861 SIGSET_T_SIZE));
7862 if (!is_error(ret)) {
7863 if (arg2) {
7864 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
7865 0);
7866 if (!p) {
7867 goto efault;
7868 }
7869 host_to_target_siginfo(p, &uinfo);
7870 unlock_user(p, arg2, sizeof(target_siginfo_t));
7871 }
7872 ret = host_to_target_signal(ret);
7873 }
7874 }
7875 break;
7876 case TARGET_NR_rt_sigqueueinfo:
7877 {
7878 siginfo_t uinfo;
7879
7880 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
7881 if (!p) {
7882 goto efault;
7883 }
7884 target_to_host_siginfo(&uinfo, p);
7885 unlock_user(p, arg1, 0);
7886 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
7887 }
7888 break;
7889 #ifdef TARGET_NR_sigreturn
7890 case TARGET_NR_sigreturn:
7891 if (block_signals()) {
7892 ret = -TARGET_ERESTARTSYS;
7893 } else {
7894 ret = do_sigreturn(cpu_env);
7895 }
7896 break;
7897 #endif
7898 case TARGET_NR_rt_sigreturn:
7899 if (block_signals()) {
7900 ret = -TARGET_ERESTARTSYS;
7901 } else {
7902 ret = do_rt_sigreturn(cpu_env);
7903 }
7904 break;
7905 case TARGET_NR_sethostname:
7906 if (!(p = lock_user_string(arg1)))
7907 goto efault;
7908 ret = get_errno(sethostname(p, arg2));
7909 unlock_user(p, arg1, 0);
7910 break;
7911 case TARGET_NR_setrlimit:
7912 {
7913 int resource = target_to_host_resource(arg1);
7914 struct target_rlimit *target_rlim;
7915 struct rlimit rlim;
7916 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
7917 goto efault;
7918 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
7919 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
7920 unlock_user_struct(target_rlim, arg2, 0);
7921 ret = get_errno(setrlimit(resource, &rlim));
7922 }
7923 break;
7924 case TARGET_NR_getrlimit:
7925 {
7926 int resource = target_to_host_resource(arg1);
7927 struct target_rlimit *target_rlim;
7928 struct rlimit rlim;
7929
7930 ret = get_errno(getrlimit(resource, &rlim));
7931 if (!is_error(ret)) {
7932 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7933 goto efault;
7934 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7935 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7936 unlock_user_struct(target_rlim, arg2, 1);
7937 }
7938 }
7939 break;
7940 case TARGET_NR_getrusage:
7941 {
7942 struct rusage rusage;
7943 ret = get_errno(getrusage(arg1, &rusage));
7944 if (!is_error(ret)) {
7945 ret = host_to_target_rusage(arg2, &rusage);
7946 }
7947 }
7948 break;
7949 case TARGET_NR_gettimeofday:
7950 {
7951 struct timeval tv;
7952 ret = get_errno(gettimeofday(&tv, NULL));
7953 if (!is_error(ret)) {
7954 if (copy_to_user_timeval(arg1, &tv))
7955 goto efault;
7956 }
7957 }
7958 break;
7959 case TARGET_NR_settimeofday:
7960 {
7961 struct timeval tv, *ptv = NULL;
7962 struct timezone tz, *ptz = NULL;
7963
7964 if (arg1) {
7965 if (copy_from_user_timeval(&tv, arg1)) {
7966 goto efault;
7967 }
7968 ptv = &tv;
7969 }
7970
7971 if (arg2) {
7972 if (copy_from_user_timezone(&tz, arg2)) {
7973 goto efault;
7974 }
7975 ptz = &tz;
7976 }
7977
7978 ret = get_errno(settimeofday(ptv, ptz));
7979 }
7980 break;
7981 #if defined(TARGET_NR_select)
7982 case TARGET_NR_select:
7983 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7984 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7985 #else
7986 {
7987 struct target_sel_arg_struct *sel;
7988 abi_ulong inp, outp, exp, tvp;
7989 long nsel;
7990
7991 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
7992 goto efault;
7993 nsel = tswapal(sel->n);
7994 inp = tswapal(sel->inp);
7995 outp = tswapal(sel->outp);
7996 exp = tswapal(sel->exp);
7997 tvp = tswapal(sel->tvp);
7998 unlock_user_struct(sel, arg1, 0);
7999 ret = do_select(nsel, inp, outp, exp, tvp);
8000 }
8001 #endif
8002 break;
8003 #endif
8004 #ifdef TARGET_NR_pselect6
8005 case TARGET_NR_pselect6:
8006 {
8007 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8008 fd_set rfds, wfds, efds;
8009 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8010 struct timespec ts, *ts_ptr;
8011
8012 /*
8013 * The 6th arg is actually two args smashed together,
8014 * so we cannot use the C library.
8015 */
8016 sigset_t set;
8017 struct {
8018 sigset_t *set;
8019 size_t size;
8020 } sig, *sig_ptr;
8021
8022 abi_ulong arg_sigset, arg_sigsize, *arg7;
8023 target_sigset_t *target_sigset;
8024
8025 n = arg1;
8026 rfd_addr = arg2;
8027 wfd_addr = arg3;
8028 efd_addr = arg4;
8029 ts_addr = arg5;
8030
8031 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8032 if (ret) {
8033 goto fail;
8034 }
8035 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8036 if (ret) {
8037 goto fail;
8038 }
8039 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8040 if (ret) {
8041 goto fail;
8042 }
8043
8044 /*
8045 * This takes a timespec, and not a timeval, so we cannot
8046 * use the do_select() helper ...
8047 */
8048 if (ts_addr) {
8049 if (target_to_host_timespec(&ts, ts_addr)) {
8050 goto efault;
8051 }
8052 ts_ptr = &ts;
8053 } else {
8054 ts_ptr = NULL;
8055 }
8056
8057 /* Extract the two packed args for the sigset */
8058 if (arg6) {
8059 sig_ptr = &sig;
8060 sig.size = SIGSET_T_SIZE;
8061
8062 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8063 if (!arg7) {
8064 goto efault;
8065 }
8066 arg_sigset = tswapal(arg7[0]);
8067 arg_sigsize = tswapal(arg7[1]);
8068 unlock_user(arg7, arg6, 0);
8069
8070 if (arg_sigset) {
8071 sig.set = &set;
8072 if (arg_sigsize != sizeof(*target_sigset)) {
8073 /* Like the kernel, we enforce correct size sigsets */
8074 ret = -TARGET_EINVAL;
8075 goto fail;
8076 }
8077 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8078 sizeof(*target_sigset), 1);
8079 if (!target_sigset) {
8080 goto efault;
8081 }
8082 target_to_host_sigset(&set, target_sigset);
8083 unlock_user(target_sigset, arg_sigset, 0);
8084 } else {
8085 sig.set = NULL;
8086 }
8087 } else {
8088 sig_ptr = NULL;
8089 }
8090
8091 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8092 ts_ptr, sig_ptr));
8093
8094 if (!is_error(ret)) {
8095 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8096 goto efault;
8097 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8098 goto efault;
8099 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8100 goto efault;
8101
8102 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8103 goto efault;
8104 }
8105 }
8106 break;
8107 #endif
8108 #ifdef TARGET_NR_symlink
8109 case TARGET_NR_symlink:
8110 {
8111 void *p2;
8112 p = lock_user_string(arg1);
8113 p2 = lock_user_string(arg2);
8114 if (!p || !p2)
8115 ret = -TARGET_EFAULT;
8116 else
8117 ret = get_errno(symlink(p, p2));
8118 unlock_user(p2, arg2, 0);
8119 unlock_user(p, arg1, 0);
8120 }
8121 break;
8122 #endif
8123 #if defined(TARGET_NR_symlinkat)
8124 case TARGET_NR_symlinkat:
8125 {
8126 void *p2;
8127 p = lock_user_string(arg1);
8128 p2 = lock_user_string(arg3);
8129 if (!p || !p2)
8130 ret = -TARGET_EFAULT;
8131 else
8132 ret = get_errno(symlinkat(p, arg2, p2));
8133 unlock_user(p2, arg3, 0);
8134 unlock_user(p, arg1, 0);
8135 }
8136 break;
8137 #endif
8138 #ifdef TARGET_NR_oldlstat
8139 case TARGET_NR_oldlstat:
8140 goto unimplemented;
8141 #endif
8142 #ifdef TARGET_NR_readlink
8143 case TARGET_NR_readlink:
8144 {
8145 void *p2;
8146 p = lock_user_string(arg1);
8147 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8148 if (!p || !p2) {
8149 ret = -TARGET_EFAULT;
8150 } else if (!arg3) {
8151 /* Short circuit this for the magic exe check. */
8152 ret = -TARGET_EINVAL;
8153 } else if (is_proc_myself((const char *)p, "exe")) {
8154 char real[PATH_MAX], *temp;
8155 temp = realpath(exec_path, real);
8156 /* Return value is # of bytes that we wrote to the buffer. */
8157 if (temp == NULL) {
8158 ret = get_errno(-1);
8159 } else {
8160 /* Don't worry about sign mismatch as earlier mapping
8161 * logic would have thrown a bad address error. */
8162 ret = MIN(strlen(real), arg3);
8163 /* We cannot NUL terminate the string. */
8164 memcpy(p2, real, ret);
8165 }
8166 } else {
8167 ret = get_errno(readlink(path(p), p2, arg3));
8168 }
8169 unlock_user(p2, arg2, ret);
8170 unlock_user(p, arg1, 0);
8171 }
8172 break;
8173 #endif
8174 #if defined(TARGET_NR_readlinkat)
8175 case TARGET_NR_readlinkat:
8176 {
8177 void *p2;
8178 p = lock_user_string(arg2);
8179 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8180 if (!p || !p2) {
8181 ret = -TARGET_EFAULT;
8182 } else if (is_proc_myself((const char *)p, "exe")) {
8183 char real[PATH_MAX], *temp;
8184 temp = realpath(exec_path, real);
8185 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8186 snprintf((char *)p2, arg4, "%s", real);
8187 } else {
8188 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8189 }
8190 unlock_user(p2, arg3, ret);
8191 unlock_user(p, arg2, 0);
8192 }
8193 break;
8194 #endif
8195 #ifdef TARGET_NR_uselib
8196 case TARGET_NR_uselib:
8197 goto unimplemented;
8198 #endif
8199 #ifdef TARGET_NR_swapon
8200 case TARGET_NR_swapon:
8201 if (!(p = lock_user_string(arg1)))
8202 goto efault;
8203 ret = get_errno(swapon(p, arg2));
8204 unlock_user(p, arg1, 0);
8205 break;
8206 #endif
8207 case TARGET_NR_reboot:
8208 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8209 /* arg4 must be ignored in all other cases */
8210 p = lock_user_string(arg4);
8211 if (!p) {
8212 goto efault;
8213 }
8214 ret = get_errno(reboot(arg1, arg2, arg3, p));
8215 unlock_user(p, arg4, 0);
8216 } else {
8217 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8218 }
8219 break;
8220 #ifdef TARGET_NR_readdir
8221 case TARGET_NR_readdir:
8222 goto unimplemented;
8223 #endif
8224 #ifdef TARGET_NR_mmap
8225 case TARGET_NR_mmap:
8226 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8227 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8228 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8229 || defined(TARGET_S390X)
8230 {
8231 abi_ulong *v;
8232 abi_ulong v1, v2, v3, v4, v5, v6;
8233 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8234 goto efault;
8235 v1 = tswapal(v[0]);
8236 v2 = tswapal(v[1]);
8237 v3 = tswapal(v[2]);
8238 v4 = tswapal(v[3]);
8239 v5 = tswapal(v[4]);
8240 v6 = tswapal(v[5]);
8241 unlock_user(v, arg1, 0);
8242 ret = get_errno(target_mmap(v1, v2, v3,
8243 target_to_host_bitmask(v4, mmap_flags_tbl),
8244 v5, v6));
8245 }
8246 #else
8247 ret = get_errno(target_mmap(arg1, arg2, arg3,
8248 target_to_host_bitmask(arg4, mmap_flags_tbl),
8249 arg5,
8250 arg6));
8251 #endif
8252 break;
8253 #endif
8254 #ifdef TARGET_NR_mmap2
8255 case TARGET_NR_mmap2:
8256 #ifndef MMAP_SHIFT
8257 #define MMAP_SHIFT 12
8258 #endif
8259 ret = get_errno(target_mmap(arg1, arg2, arg3,
8260 target_to_host_bitmask(arg4, mmap_flags_tbl),
8261 arg5,
8262 arg6 << MMAP_SHIFT));
8263 break;
8264 #endif
8265 case TARGET_NR_munmap:
8266 ret = get_errno(target_munmap(arg1, arg2));
8267 break;
8268 case TARGET_NR_mprotect:
8269 {
8270 TaskState *ts = cpu->opaque;
8271 /* Special hack to detect libc making the stack executable. */
8272 if ((arg3 & PROT_GROWSDOWN)
8273 && arg1 >= ts->info->stack_limit
8274 && arg1 <= ts->info->start_stack) {
8275 arg3 &= ~PROT_GROWSDOWN;
8276 arg2 = arg2 + arg1 - ts->info->stack_limit;
8277 arg1 = ts->info->stack_limit;
8278 }
8279 }
8280 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8281 break;
8282 #ifdef TARGET_NR_mremap
8283 case TARGET_NR_mremap:
8284 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8285 break;
8286 #endif
8287 /* ??? msync/mlock/munlock are broken for softmmu. */
8288 #ifdef TARGET_NR_msync
8289 case TARGET_NR_msync:
8290 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8291 break;
8292 #endif
8293 #ifdef TARGET_NR_mlock
8294 case TARGET_NR_mlock:
8295 ret = get_errno(mlock(g2h(arg1), arg2));
8296 break;
8297 #endif
8298 #ifdef TARGET_NR_munlock
8299 case TARGET_NR_munlock:
8300 ret = get_errno(munlock(g2h(arg1), arg2));
8301 break;
8302 #endif
8303 #ifdef TARGET_NR_mlockall
8304 case TARGET_NR_mlockall:
8305 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8306 break;
8307 #endif
8308 #ifdef TARGET_NR_munlockall
8309 case TARGET_NR_munlockall:
8310 ret = get_errno(munlockall());
8311 break;
8312 #endif
8313 case TARGET_NR_truncate:
8314 if (!(p = lock_user_string(arg1)))
8315 goto efault;
8316 ret = get_errno(truncate(p, arg2));
8317 unlock_user(p, arg1, 0);
8318 break;
8319 case TARGET_NR_ftruncate:
8320 ret = get_errno(ftruncate(arg1, arg2));
8321 break;
8322 case TARGET_NR_fchmod:
8323 ret = get_errno(fchmod(arg1, arg2));
8324 break;
8325 #if defined(TARGET_NR_fchmodat)
8326 case TARGET_NR_fchmodat:
8327 if (!(p = lock_user_string(arg2)))
8328 goto efault;
8329 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8330 unlock_user(p, arg2, 0);
8331 break;
8332 #endif
8333 case TARGET_NR_getpriority:
8334 /* Note that negative values are valid for getpriority, so we must
8335 differentiate based on errno settings. */
8336 errno = 0;
8337 ret = getpriority(arg1, arg2);
8338 if (ret == -1 && errno != 0) {
8339 ret = -host_to_target_errno(errno);
8340 break;
8341 }
8342 #ifdef TARGET_ALPHA
8343 /* Return value is the unbiased priority. Signal no error. */
8344 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8345 #else
8346 /* Return value is a biased priority to avoid negative numbers. */
8347 ret = 20 - ret;
8348 #endif
8349 break;
8350 case TARGET_NR_setpriority:
8351 ret = get_errno(setpriority(arg1, arg2, arg3));
8352 break;
8353 #ifdef TARGET_NR_profil
8354 case TARGET_NR_profil:
8355 goto unimplemented;
8356 #endif
8357 case TARGET_NR_statfs:
8358 if (!(p = lock_user_string(arg1)))
8359 goto efault;
8360 ret = get_errno(statfs(path(p), &stfs));
8361 unlock_user(p, arg1, 0);
8362 convert_statfs:
8363 if (!is_error(ret)) {
8364 struct target_statfs *target_stfs;
8365
8366 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8367 goto efault;
8368 __put_user(stfs.f_type, &target_stfs->f_type);
8369 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8370 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8371 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8372 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8373 __put_user(stfs.f_files, &target_stfs->f_files);
8374 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8375 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8376 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8377 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8378 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8379 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8380 unlock_user_struct(target_stfs, arg2, 1);
8381 }
8382 break;
8383 case TARGET_NR_fstatfs:
8384 ret = get_errno(fstatfs(arg1, &stfs));
8385 goto convert_statfs;
8386 #ifdef TARGET_NR_statfs64
8387 case TARGET_NR_statfs64:
8388 if (!(p = lock_user_string(arg1)))
8389 goto efault;
8390 ret = get_errno(statfs(path(p), &stfs));
8391 unlock_user(p, arg1, 0);
8392 convert_statfs64:
8393 if (!is_error(ret)) {
8394 struct target_statfs64 *target_stfs;
8395
8396 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8397 goto efault;
8398 __put_user(stfs.f_type, &target_stfs->f_type);
8399 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8400 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8401 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8402 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8403 __put_user(stfs.f_files, &target_stfs->f_files);
8404 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8405 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8406 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8407 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8408 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8409 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8410 unlock_user_struct(target_stfs, arg3, 1);
8411 }
8412 break;
8413 case TARGET_NR_fstatfs64:
8414 ret = get_errno(fstatfs(arg1, &stfs));
8415 goto convert_statfs64;
8416 #endif
8417 #ifdef TARGET_NR_ioperm
8418 case TARGET_NR_ioperm:
8419 goto unimplemented;
8420 #endif
8421 #ifdef TARGET_NR_socketcall
8422 case TARGET_NR_socketcall:
8423 ret = do_socketcall(arg1, arg2);
8424 break;
8425 #endif
8426 #ifdef TARGET_NR_accept
8427 case TARGET_NR_accept:
8428 ret = do_accept4(arg1, arg2, arg3, 0);
8429 break;
8430 #endif
8431 #ifdef TARGET_NR_accept4
8432 case TARGET_NR_accept4:
8433 ret = do_accept4(arg1, arg2, arg3, arg4);
8434 break;
8435 #endif
8436 #ifdef TARGET_NR_bind
8437 case TARGET_NR_bind:
8438 ret = do_bind(arg1, arg2, arg3);
8439 break;
8440 #endif
8441 #ifdef TARGET_NR_connect
8442 case TARGET_NR_connect:
8443 ret = do_connect(arg1, arg2, arg3);
8444 break;
8445 #endif
8446 #ifdef TARGET_NR_getpeername
8447 case TARGET_NR_getpeername:
8448 ret = do_getpeername(arg1, arg2, arg3);
8449 break;
8450 #endif
8451 #ifdef TARGET_NR_getsockname
8452 case TARGET_NR_getsockname:
8453 ret = do_getsockname(arg1, arg2, arg3);
8454 break;
8455 #endif
8456 #ifdef TARGET_NR_getsockopt
8457 case TARGET_NR_getsockopt:
8458 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8459 break;
8460 #endif
8461 #ifdef TARGET_NR_listen
8462 case TARGET_NR_listen:
8463 ret = get_errno(listen(arg1, arg2));
8464 break;
8465 #endif
8466 #ifdef TARGET_NR_recv
8467 case TARGET_NR_recv:
8468 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8469 break;
8470 #endif
8471 #ifdef TARGET_NR_recvfrom
8472 case TARGET_NR_recvfrom:
8473 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8474 break;
8475 #endif
8476 #ifdef TARGET_NR_recvmsg
8477 case TARGET_NR_recvmsg:
8478 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
8479 break;
8480 #endif
8481 #ifdef TARGET_NR_send
8482 case TARGET_NR_send:
8483 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8484 break;
8485 #endif
8486 #ifdef TARGET_NR_sendmsg
8487 case TARGET_NR_sendmsg:
8488 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
8489 break;
8490 #endif
8491 #ifdef TARGET_NR_sendmmsg
8492 case TARGET_NR_sendmmsg:
8493 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8494 break;
8495 case TARGET_NR_recvmmsg:
8496 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8497 break;
8498 #endif
8499 #ifdef TARGET_NR_sendto
8500 case TARGET_NR_sendto:
8501 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8502 break;
8503 #endif
8504 #ifdef TARGET_NR_shutdown
8505 case TARGET_NR_shutdown:
8506 ret = get_errno(shutdown(arg1, arg2));
8507 break;
8508 #endif
8509 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8510 case TARGET_NR_getrandom:
8511 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8512 if (!p) {
8513 goto efault;
8514 }
8515 ret = get_errno(getrandom(p, arg2, arg3));
8516 unlock_user(p, arg1, ret);
8517 break;
8518 #endif
8519 #ifdef TARGET_NR_socket
8520 case TARGET_NR_socket:
8521 ret = do_socket(arg1, arg2, arg3);
8522 fd_trans_unregister(ret);
8523 break;
8524 #endif
8525 #ifdef TARGET_NR_socketpair
8526 case TARGET_NR_socketpair:
8527 ret = do_socketpair(arg1, arg2, arg3, arg4);
8528 break;
8529 #endif
8530 #ifdef TARGET_NR_setsockopt
8531 case TARGET_NR_setsockopt:
8532 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8533 break;
8534 #endif
8535
8536 case TARGET_NR_syslog:
8537 if (!(p = lock_user_string(arg2)))
8538 goto efault;
8539 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8540 unlock_user(p, arg2, 0);
8541 break;
8542
8543 case TARGET_NR_setitimer:
8544 {
8545 struct itimerval value, ovalue, *pvalue;
8546
8547 if (arg2) {
8548 pvalue = &value;
8549 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8550 || copy_from_user_timeval(&pvalue->it_value,
8551 arg2 + sizeof(struct target_timeval)))
8552 goto efault;
8553 } else {
8554 pvalue = NULL;
8555 }
8556 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8557 if (!is_error(ret) && arg3) {
8558 if (copy_to_user_timeval(arg3,
8559 &ovalue.it_interval)
8560 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8561 &ovalue.it_value))
8562 goto efault;
8563 }
8564 }
8565 break;
8566 case TARGET_NR_getitimer:
8567 {
8568 struct itimerval value;
8569
8570 ret = get_errno(getitimer(arg1, &value));
8571 if (!is_error(ret) && arg2) {
8572 if (copy_to_user_timeval(arg2,
8573 &value.it_interval)
8574 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8575 &value.it_value))
8576 goto efault;
8577 }
8578 }
8579 break;
8580 #ifdef TARGET_NR_stat
8581 case TARGET_NR_stat:
8582 if (!(p = lock_user_string(arg1)))
8583 goto efault;
8584 ret = get_errno(stat(path(p), &st));
8585 unlock_user(p, arg1, 0);
8586 goto do_stat;
8587 #endif
8588 #ifdef TARGET_NR_lstat
8589 case TARGET_NR_lstat:
8590 if (!(p = lock_user_string(arg1)))
8591 goto efault;
8592 ret = get_errno(lstat(path(p), &st));
8593 unlock_user(p, arg1, 0);
8594 goto do_stat;
8595 #endif
8596 case TARGET_NR_fstat:
8597 {
8598 ret = get_errno(fstat(arg1, &st));
8599 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8600 do_stat:
8601 #endif
8602 if (!is_error(ret)) {
8603 struct target_stat *target_st;
8604
8605 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8606 goto efault;
8607 memset(target_st, 0, sizeof(*target_st));
8608 __put_user(st.st_dev, &target_st->st_dev);
8609 __put_user(st.st_ino, &target_st->st_ino);
8610 __put_user(st.st_mode, &target_st->st_mode);
8611 __put_user(st.st_uid, &target_st->st_uid);
8612 __put_user(st.st_gid, &target_st->st_gid);
8613 __put_user(st.st_nlink, &target_st->st_nlink);
8614 __put_user(st.st_rdev, &target_st->st_rdev);
8615 __put_user(st.st_size, &target_st->st_size);
8616 __put_user(st.st_blksize, &target_st->st_blksize);
8617 __put_user(st.st_blocks, &target_st->st_blocks);
8618 __put_user(st.st_atime, &target_st->target_st_atime);
8619 __put_user(st.st_mtime, &target_st->target_st_mtime);
8620 __put_user(st.st_ctime, &target_st->target_st_ctime);
8621 unlock_user_struct(target_st, arg2, 1);
8622 }
8623 }
8624 break;
8625 #ifdef TARGET_NR_olduname
8626 case TARGET_NR_olduname:
8627 goto unimplemented;
8628 #endif
8629 #ifdef TARGET_NR_iopl
8630 case TARGET_NR_iopl:
8631 goto unimplemented;
8632 #endif
8633 case TARGET_NR_vhangup:
8634 ret = get_errno(vhangup());
8635 break;
8636 #ifdef TARGET_NR_idle
8637 case TARGET_NR_idle:
8638 goto unimplemented;
8639 #endif
8640 #ifdef TARGET_NR_syscall
8641 case TARGET_NR_syscall:
8642 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8643 arg6, arg7, arg8, 0);
8644 break;
8645 #endif
8646 case TARGET_NR_wait4:
8647 {
8648 int status;
8649 abi_long status_ptr = arg2;
8650 struct rusage rusage, *rusage_ptr;
8651 abi_ulong target_rusage = arg4;
8652 abi_long rusage_err;
8653 if (target_rusage)
8654 rusage_ptr = &rusage;
8655 else
8656 rusage_ptr = NULL;
8657 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8658 if (!is_error(ret)) {
8659 if (status_ptr && ret) {
8660 status = host_to_target_waitstatus(status);
8661 if (put_user_s32(status, status_ptr))
8662 goto efault;
8663 }
8664 if (target_rusage) {
8665 rusage_err = host_to_target_rusage(target_rusage, &rusage);
8666 if (rusage_err) {
8667 ret = rusage_err;
8668 }
8669 }
8670 }
8671 }
8672 break;
8673 #ifdef TARGET_NR_swapoff
8674 case TARGET_NR_swapoff:
8675 if (!(p = lock_user_string(arg1)))
8676 goto efault;
8677 ret = get_errno(swapoff(p));
8678 unlock_user(p, arg1, 0);
8679 break;
8680 #endif
8681 case TARGET_NR_sysinfo:
8682 {
8683 struct target_sysinfo *target_value;
8684 struct sysinfo value;
8685 ret = get_errno(sysinfo(&value));
8686 if (!is_error(ret) && arg1)
8687 {
8688 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8689 goto efault;
8690 __put_user(value.uptime, &target_value->uptime);
8691 __put_user(value.loads[0], &target_value->loads[0]);
8692 __put_user(value.loads[1], &target_value->loads[1]);
8693 __put_user(value.loads[2], &target_value->loads[2]);
8694 __put_user(value.totalram, &target_value->totalram);
8695 __put_user(value.freeram, &target_value->freeram);
8696 __put_user(value.sharedram, &target_value->sharedram);
8697 __put_user(value.bufferram, &target_value->bufferram);
8698 __put_user(value.totalswap, &target_value->totalswap);
8699 __put_user(value.freeswap, &target_value->freeswap);
8700 __put_user(value.procs, &target_value->procs);
8701 __put_user(value.totalhigh, &target_value->totalhigh);
8702 __put_user(value.freehigh, &target_value->freehigh);
8703 __put_user(value.mem_unit, &target_value->mem_unit);
8704 unlock_user_struct(target_value, arg1, 1);
8705 }
8706 }
8707 break;
8708 #ifdef TARGET_NR_ipc
8709 case TARGET_NR_ipc:
8710 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
8711 break;
8712 #endif
8713 #ifdef TARGET_NR_semget
8714 case TARGET_NR_semget:
8715 ret = get_errno(semget(arg1, arg2, arg3));
8716 break;
8717 #endif
8718 #ifdef TARGET_NR_semop
8719 case TARGET_NR_semop:
8720 ret = do_semop(arg1, arg2, arg3);
8721 break;
8722 #endif
8723 #ifdef TARGET_NR_semctl
8724 case TARGET_NR_semctl:
8725 ret = do_semctl(arg1, arg2, arg3, arg4);
8726 break;
8727 #endif
8728 #ifdef TARGET_NR_msgctl
8729 case TARGET_NR_msgctl:
8730 ret = do_msgctl(arg1, arg2, arg3);
8731 break;
8732 #endif
8733 #ifdef TARGET_NR_msgget
8734 case TARGET_NR_msgget:
8735 ret = get_errno(msgget(arg1, arg2));
8736 break;
8737 #endif
8738 #ifdef TARGET_NR_msgrcv
8739 case TARGET_NR_msgrcv:
8740 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8741 break;
8742 #endif
8743 #ifdef TARGET_NR_msgsnd
8744 case TARGET_NR_msgsnd:
8745 ret = do_msgsnd(arg1, arg2, arg3, arg4);
8746 break;
8747 #endif
8748 #ifdef TARGET_NR_shmget
8749 case TARGET_NR_shmget:
8750 ret = get_errno(shmget(arg1, arg2, arg3));
8751 break;
8752 #endif
8753 #ifdef TARGET_NR_shmctl
8754 case TARGET_NR_shmctl:
8755 ret = do_shmctl(arg1, arg2, arg3);
8756 break;
8757 #endif
8758 #ifdef TARGET_NR_shmat
8759 case TARGET_NR_shmat:
8760 ret = do_shmat(arg1, arg2, arg3);
8761 break;
8762 #endif
8763 #ifdef TARGET_NR_shmdt
8764 case TARGET_NR_shmdt:
8765 ret = do_shmdt(arg1);
8766 break;
8767 #endif
8768 case TARGET_NR_fsync:
8769 ret = get_errno(fsync(arg1));
8770 break;
8771 case TARGET_NR_clone:
8772 /* Linux manages to have three different orderings for its
8773 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8774 * match the kernel's CONFIG_CLONE_* settings.
8775 * Microblaze is further special in that it uses a sixth
8776 * implicit argument to clone for the TLS pointer.
8777 */
8778 #if defined(TARGET_MICROBLAZE)
8779 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8780 #elif defined(TARGET_CLONE_BACKWARDS)
8781 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8782 #elif defined(TARGET_CLONE_BACKWARDS2)
8783 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8784 #else
8785 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8786 #endif
8787 break;
8788 #ifdef __NR_exit_group
8789 /* new thread calls */
8790 case TARGET_NR_exit_group:
8791 #ifdef TARGET_GPROF
8792 _mcleanup();
8793 #endif
8794 gdb_exit(cpu_env, arg1);
8795 ret = get_errno(exit_group(arg1));
8796 break;
8797 #endif
8798 case TARGET_NR_setdomainname:
8799 if (!(p = lock_user_string(arg1)))
8800 goto efault;
8801 ret = get_errno(setdomainname(p, arg2));
8802 unlock_user(p, arg1, 0);
8803 break;
8804 case TARGET_NR_uname:
8805 /* no need to transcode because we use the linux syscall */
8806 {
8807 struct new_utsname * buf;
8808
8809 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8810 goto efault;
8811 ret = get_errno(sys_uname(buf));
8812 if (!is_error(ret)) {
8813 /* Overrite the native machine name with whatever is being
8814 emulated. */
8815 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
8816 /* Allow the user to override the reported release. */
8817 if (qemu_uname_release && *qemu_uname_release)
8818 strcpy (buf->release, qemu_uname_release);
8819 }
8820 unlock_user_struct(buf, arg1, 1);
8821 }
8822 break;
8823 #ifdef TARGET_I386
8824 case TARGET_NR_modify_ldt:
8825 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
8826 break;
8827 #if !defined(TARGET_X86_64)
8828 case TARGET_NR_vm86old:
8829 goto unimplemented;
8830 case TARGET_NR_vm86:
8831 ret = do_vm86(cpu_env, arg1, arg2);
8832 break;
8833 #endif
8834 #endif
8835 case TARGET_NR_adjtimex:
8836 goto unimplemented;
8837 #ifdef TARGET_NR_create_module
8838 case TARGET_NR_create_module:
8839 #endif
8840 case TARGET_NR_init_module:
8841 case TARGET_NR_delete_module:
8842 #ifdef TARGET_NR_get_kernel_syms
8843 case TARGET_NR_get_kernel_syms:
8844 #endif
8845 goto unimplemented;
8846 case TARGET_NR_quotactl:
8847 goto unimplemented;
8848 case TARGET_NR_getpgid:
8849 ret = get_errno(getpgid(arg1));
8850 break;
8851 case TARGET_NR_fchdir:
8852 ret = get_errno(fchdir(arg1));
8853 break;
8854 #ifdef TARGET_NR_bdflush /* not on x86_64 */
8855 case TARGET_NR_bdflush:
8856 goto unimplemented;
8857 #endif
8858 #ifdef TARGET_NR_sysfs
8859 case TARGET_NR_sysfs:
8860 goto unimplemented;
8861 #endif
8862 case TARGET_NR_personality:
8863 ret = get_errno(personality(arg1));
8864 break;
8865 #ifdef TARGET_NR_afs_syscall
8866 case TARGET_NR_afs_syscall:
8867 goto unimplemented;
8868 #endif
8869 #ifdef TARGET_NR__llseek /* Not on alpha */
8870 case TARGET_NR__llseek:
8871 {
8872 int64_t res;
8873 #if !defined(__NR_llseek)
8874 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
8875 if (res == -1) {
8876 ret = get_errno(res);
8877 } else {
8878 ret = 0;
8879 }
8880 #else
8881 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
8882 #endif
8883 if ((ret == 0) && put_user_s64(res, arg4)) {
8884 goto efault;
8885 }
8886 }
8887 break;
8888 #endif
8889 #ifdef TARGET_NR_getdents
8890 case TARGET_NR_getdents:
8891 #ifdef __NR_getdents
8892 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8893 {
8894 struct target_dirent *target_dirp;
8895 struct linux_dirent *dirp;
8896 abi_long count = arg3;
8897
8898 dirp = g_try_malloc(count);
8899 if (!dirp) {
8900 ret = -TARGET_ENOMEM;
8901 goto fail;
8902 }
8903
8904 ret = get_errno(sys_getdents(arg1, dirp, count));
8905 if (!is_error(ret)) {
8906 struct linux_dirent *de;
8907 struct target_dirent *tde;
8908 int len = ret;
8909 int reclen, treclen;
8910 int count1, tnamelen;
8911
8912 count1 = 0;
8913 de = dirp;
8914 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8915 goto efault;
8916 tde = target_dirp;
8917 while (len > 0) {
8918 reclen = de->d_reclen;
8919 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
8920 assert(tnamelen >= 0);
8921 treclen = tnamelen + offsetof(struct target_dirent, d_name);
8922 assert(count1 + treclen <= count);
8923 tde->d_reclen = tswap16(treclen);
8924 tde->d_ino = tswapal(de->d_ino);
8925 tde->d_off = tswapal(de->d_off);
8926 memcpy(tde->d_name, de->d_name, tnamelen);
8927 de = (struct linux_dirent *)((char *)de + reclen);
8928 len -= reclen;
8929 tde = (struct target_dirent *)((char *)tde + treclen);
8930 count1 += treclen;
8931 }
8932 ret = count1;
8933 unlock_user(target_dirp, arg2, ret);
8934 }
8935 g_free(dirp);
8936 }
8937 #else
8938 {
8939 struct linux_dirent *dirp;
8940 abi_long count = arg3;
8941
8942 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8943 goto efault;
8944 ret = get_errno(sys_getdents(arg1, dirp, count));
8945 if (!is_error(ret)) {
8946 struct linux_dirent *de;
8947 int len = ret;
8948 int reclen;
8949 de = dirp;
8950 while (len > 0) {
8951 reclen = de->d_reclen;
8952 if (reclen > len)
8953 break;
8954 de->d_reclen = tswap16(reclen);
8955 tswapls(&de->d_ino);
8956 tswapls(&de->d_off);
8957 de = (struct linux_dirent *)((char *)de + reclen);
8958 len -= reclen;
8959 }
8960 }
8961 unlock_user(dirp, arg2, ret);
8962 }
8963 #endif
8964 #else
8965 /* Implement getdents in terms of getdents64 */
8966 {
8967 struct linux_dirent64 *dirp;
8968 abi_long count = arg3;
8969
8970 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8971 if (!dirp) {
8972 goto efault;
8973 }
8974 ret = get_errno(sys_getdents64(arg1, dirp, count));
8975 if (!is_error(ret)) {
8976 /* Convert the dirent64 structs to target dirent. We do this
8977 * in-place, since we can guarantee that a target_dirent is no
8978 * larger than a dirent64; however this means we have to be
8979 * careful to read everything before writing in the new format.
8980 */
8981 struct linux_dirent64 *de;
8982 struct target_dirent *tde;
8983 int len = ret;
8984 int tlen = 0;
8985
8986 de = dirp;
8987 tde = (struct target_dirent *)dirp;
8988 while (len > 0) {
8989 int namelen, treclen;
8990 int reclen = de->d_reclen;
8991 uint64_t ino = de->d_ino;
8992 int64_t off = de->d_off;
8993 uint8_t type = de->d_type;
8994
8995 namelen = strlen(de->d_name);
8996 treclen = offsetof(struct target_dirent, d_name)
8997 + namelen + 2;
8998 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8999
9000 memmove(tde->d_name, de->d_name, namelen + 1);
9001 tde->d_ino = tswapal(ino);
9002 tde->d_off = tswapal(off);
9003 tde->d_reclen = tswap16(treclen);
9004 /* The target_dirent type is in what was formerly a padding
9005 * byte at the end of the structure:
9006 */
9007 *(((char *)tde) + treclen - 1) = type;
9008
9009 de = (struct linux_dirent64 *)((char *)de + reclen);
9010 tde = (struct target_dirent *)((char *)tde + treclen);
9011 len -= reclen;
9012 tlen += treclen;
9013 }
9014 ret = tlen;
9015 }
9016 unlock_user(dirp, arg2, ret);
9017 }
9018 #endif
9019 break;
9020 #endif /* TARGET_NR_getdents */
9021 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9022 case TARGET_NR_getdents64:
9023 {
9024 struct linux_dirent64 *dirp;
9025 abi_long count = arg3;
9026 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9027 goto efault;
9028 ret = get_errno(sys_getdents64(arg1, dirp, count));
9029 if (!is_error(ret)) {
9030 struct linux_dirent64 *de;
9031 int len = ret;
9032 int reclen;
9033 de = dirp;
9034 while (len > 0) {
9035 reclen = de->d_reclen;
9036 if (reclen > len)
9037 break;
9038 de->d_reclen = tswap16(reclen);
9039 tswap64s((uint64_t *)&de->d_ino);
9040 tswap64s((uint64_t *)&de->d_off);
9041 de = (struct linux_dirent64 *)((char *)de + reclen);
9042 len -= reclen;
9043 }
9044 }
9045 unlock_user(dirp, arg2, ret);
9046 }
9047 break;
9048 #endif /* TARGET_NR_getdents64 */
9049 #if defined(TARGET_NR__newselect)
9050 case TARGET_NR__newselect:
9051 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9052 break;
9053 #endif
9054 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9055 # ifdef TARGET_NR_poll
9056 case TARGET_NR_poll:
9057 # endif
9058 # ifdef TARGET_NR_ppoll
9059 case TARGET_NR_ppoll:
9060 # endif
9061 {
9062 struct target_pollfd *target_pfd;
9063 unsigned int nfds = arg2;
9064 struct pollfd *pfd;
9065 unsigned int i;
9066
9067 pfd = NULL;
9068 target_pfd = NULL;
9069 if (nfds) {
9070 target_pfd = lock_user(VERIFY_WRITE, arg1,
9071 sizeof(struct target_pollfd) * nfds, 1);
9072 if (!target_pfd) {
9073 goto efault;
9074 }
9075
9076 pfd = alloca(sizeof(struct pollfd) * nfds);
9077 for (i = 0; i < nfds; i++) {
9078 pfd[i].fd = tswap32(target_pfd[i].fd);
9079 pfd[i].events = tswap16(target_pfd[i].events);
9080 }
9081 }
9082
9083 switch (num) {
9084 # ifdef TARGET_NR_ppoll
9085 case TARGET_NR_ppoll:
9086 {
9087 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9088 target_sigset_t *target_set;
9089 sigset_t _set, *set = &_set;
9090
9091 if (arg3) {
9092 if (target_to_host_timespec(timeout_ts, arg3)) {
9093 unlock_user(target_pfd, arg1, 0);
9094 goto efault;
9095 }
9096 } else {
9097 timeout_ts = NULL;
9098 }
9099
9100 if (arg4) {
9101 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9102 if (!target_set) {
9103 unlock_user(target_pfd, arg1, 0);
9104 goto efault;
9105 }
9106 target_to_host_sigset(set, target_set);
9107 } else {
9108 set = NULL;
9109 }
9110
9111 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9112 set, SIGSET_T_SIZE));
9113
9114 if (!is_error(ret) && arg3) {
9115 host_to_target_timespec(arg3, timeout_ts);
9116 }
9117 if (arg4) {
9118 unlock_user(target_set, arg4, 0);
9119 }
9120 break;
9121 }
9122 # endif
9123 # ifdef TARGET_NR_poll
9124 case TARGET_NR_poll:
9125 {
9126 struct timespec ts, *pts;
9127
9128 if (arg3 >= 0) {
9129 /* Convert ms to secs, ns */
9130 ts.tv_sec = arg3 / 1000;
9131 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9132 pts = &ts;
9133 } else {
9134 /* -ve poll() timeout means "infinite" */
9135 pts = NULL;
9136 }
9137 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9138 break;
9139 }
9140 # endif
9141 default:
9142 g_assert_not_reached();
9143 }
9144
9145 if (!is_error(ret)) {
9146 for(i = 0; i < nfds; i++) {
9147 target_pfd[i].revents = tswap16(pfd[i].revents);
9148 }
9149 }
9150 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9151 }
9152 break;
9153 #endif
9154 case TARGET_NR_flock:
9155 /* NOTE: the flock constant seems to be the same for every
9156 Linux platform */
9157 ret = get_errno(safe_flock(arg1, arg2));
9158 break;
9159 case TARGET_NR_readv:
9160 {
9161 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9162 if (vec != NULL) {
9163 ret = get_errno(safe_readv(arg1, vec, arg3));
9164 unlock_iovec(vec, arg2, arg3, 1);
9165 } else {
9166 ret = -host_to_target_errno(errno);
9167 }
9168 }
9169 break;
9170 case TARGET_NR_writev:
9171 {
9172 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9173 if (vec != NULL) {
9174 ret = get_errno(safe_writev(arg1, vec, arg3));
9175 unlock_iovec(vec, arg2, arg3, 0);
9176 } else {
9177 ret = -host_to_target_errno(errno);
9178 }
9179 }
9180 break;
9181 case TARGET_NR_getsid:
9182 ret = get_errno(getsid(arg1));
9183 break;
9184 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9185 case TARGET_NR_fdatasync:
9186 ret = get_errno(fdatasync(arg1));
9187 break;
9188 #endif
9189 #ifdef TARGET_NR__sysctl
9190 case TARGET_NR__sysctl:
9191 /* We don't implement this, but ENOTDIR is always a safe
9192 return value. */
9193 ret = -TARGET_ENOTDIR;
9194 break;
9195 #endif
9196 case TARGET_NR_sched_getaffinity:
9197 {
9198 unsigned int mask_size;
9199 unsigned long *mask;
9200
9201 /*
9202 * sched_getaffinity needs multiples of ulong, so need to take
9203 * care of mismatches between target ulong and host ulong sizes.
9204 */
9205 if (arg2 & (sizeof(abi_ulong) - 1)) {
9206 ret = -TARGET_EINVAL;
9207 break;
9208 }
9209 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9210
9211 mask = alloca(mask_size);
9212 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9213
9214 if (!is_error(ret)) {
9215 if (ret > arg2) {
9216 /* More data returned than the caller's buffer will fit.
9217 * This only happens if sizeof(abi_long) < sizeof(long)
9218 * and the caller passed us a buffer holding an odd number
9219 * of abi_longs. If the host kernel is actually using the
9220 * extra 4 bytes then fail EINVAL; otherwise we can just
9221 * ignore them and only copy the interesting part.
9222 */
9223 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9224 if (numcpus > arg2 * 8) {
9225 ret = -TARGET_EINVAL;
9226 break;
9227 }
9228 ret = arg2;
9229 }
9230
9231 if (copy_to_user(arg3, mask, ret)) {
9232 goto efault;
9233 }
9234 }
9235 }
9236 break;
9237 case TARGET_NR_sched_setaffinity:
9238 {
9239 unsigned int mask_size;
9240 unsigned long *mask;
9241
9242 /*
9243 * sched_setaffinity needs multiples of ulong, so need to take
9244 * care of mismatches between target ulong and host ulong sizes.
9245 */
9246 if (arg2 & (sizeof(abi_ulong) - 1)) {
9247 ret = -TARGET_EINVAL;
9248 break;
9249 }
9250 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9251
9252 mask = alloca(mask_size);
9253 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9254 goto efault;
9255 }
9256 memcpy(mask, p, arg2);
9257 unlock_user_struct(p, arg2, 0);
9258
9259 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9260 }
9261 break;
9262 case TARGET_NR_sched_setparam:
9263 {
9264 struct sched_param *target_schp;
9265 struct sched_param schp;
9266
9267 if (arg2 == 0) {
9268 return -TARGET_EINVAL;
9269 }
9270 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9271 goto efault;
9272 schp.sched_priority = tswap32(target_schp->sched_priority);
9273 unlock_user_struct(target_schp, arg2, 0);
9274 ret = get_errno(sched_setparam(arg1, &schp));
9275 }
9276 break;
9277 case TARGET_NR_sched_getparam:
9278 {
9279 struct sched_param *target_schp;
9280 struct sched_param schp;
9281
9282 if (arg2 == 0) {
9283 return -TARGET_EINVAL;
9284 }
9285 ret = get_errno(sched_getparam(arg1, &schp));
9286 if (!is_error(ret)) {
9287 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9288 goto efault;
9289 target_schp->sched_priority = tswap32(schp.sched_priority);
9290 unlock_user_struct(target_schp, arg2, 1);
9291 }
9292 }
9293 break;
9294 case TARGET_NR_sched_setscheduler:
9295 {
9296 struct sched_param *target_schp;
9297 struct sched_param schp;
9298 if (arg3 == 0) {
9299 return -TARGET_EINVAL;
9300 }
9301 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9302 goto efault;
9303 schp.sched_priority = tswap32(target_schp->sched_priority);
9304 unlock_user_struct(target_schp, arg3, 0);
9305 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
9306 }
9307 break;
9308 case TARGET_NR_sched_getscheduler:
9309 ret = get_errno(sched_getscheduler(arg1));
9310 break;
9311 case TARGET_NR_sched_yield:
9312 ret = get_errno(sched_yield());
9313 break;
9314 case TARGET_NR_sched_get_priority_max:
9315 ret = get_errno(sched_get_priority_max(arg1));
9316 break;
9317 case TARGET_NR_sched_get_priority_min:
9318 ret = get_errno(sched_get_priority_min(arg1));
9319 break;
9320 case TARGET_NR_sched_rr_get_interval:
9321 {
9322 struct timespec ts;
9323 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9324 if (!is_error(ret)) {
9325 ret = host_to_target_timespec(arg2, &ts);
9326 }
9327 }
9328 break;
9329 case TARGET_NR_nanosleep:
9330 {
9331 struct timespec req, rem;
9332 target_to_host_timespec(&req, arg1);
9333 ret = get_errno(safe_nanosleep(&req, &rem));
9334 if (is_error(ret) && arg2) {
9335 host_to_target_timespec(arg2, &rem);
9336 }
9337 }
9338 break;
9339 #ifdef TARGET_NR_query_module
9340 case TARGET_NR_query_module:
9341 goto unimplemented;
9342 #endif
9343 #ifdef TARGET_NR_nfsservctl
9344 case TARGET_NR_nfsservctl:
9345 goto unimplemented;
9346 #endif
9347 case TARGET_NR_prctl:
9348 switch (arg1) {
9349 case PR_GET_PDEATHSIG:
9350 {
9351 int deathsig;
9352 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9353 if (!is_error(ret) && arg2
9354 && put_user_ual(deathsig, arg2)) {
9355 goto efault;
9356 }
9357 break;
9358 }
9359 #ifdef PR_GET_NAME
9360 case PR_GET_NAME:
9361 {
9362 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9363 if (!name) {
9364 goto efault;
9365 }
9366 ret = get_errno(prctl(arg1, (unsigned long)name,
9367 arg3, arg4, arg5));
9368 unlock_user(name, arg2, 16);
9369 break;
9370 }
9371 case PR_SET_NAME:
9372 {
9373 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9374 if (!name) {
9375 goto efault;
9376 }
9377 ret = get_errno(prctl(arg1, (unsigned long)name,
9378 arg3, arg4, arg5));
9379 unlock_user(name, arg2, 0);
9380 break;
9381 }
9382 #endif
9383 default:
9384 /* Most prctl options have no pointer arguments */
9385 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9386 break;
9387 }
9388 break;
9389 #ifdef TARGET_NR_arch_prctl
9390 case TARGET_NR_arch_prctl:
9391 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9392 ret = do_arch_prctl(cpu_env, arg1, arg2);
9393 break;
9394 #else
9395 goto unimplemented;
9396 #endif
9397 #endif
9398 #ifdef TARGET_NR_pread64
9399 case TARGET_NR_pread64:
9400 if (regpairs_aligned(cpu_env)) {
9401 arg4 = arg5;
9402 arg5 = arg6;
9403 }
9404 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9405 goto efault;
9406 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9407 unlock_user(p, arg2, ret);
9408 break;
9409 case TARGET_NR_pwrite64:
9410 if (regpairs_aligned(cpu_env)) {
9411 arg4 = arg5;
9412 arg5 = arg6;
9413 }
9414 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9415 goto efault;
9416 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9417 unlock_user(p, arg2, 0);
9418 break;
9419 #endif
9420 case TARGET_NR_getcwd:
9421 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9422 goto efault;
9423 ret = get_errno(sys_getcwd1(p, arg2));
9424 unlock_user(p, arg1, ret);
9425 break;
9426 case TARGET_NR_capget:
9427 case TARGET_NR_capset:
9428 {
9429 struct target_user_cap_header *target_header;
9430 struct target_user_cap_data *target_data = NULL;
9431 struct __user_cap_header_struct header;
9432 struct __user_cap_data_struct data[2];
9433 struct __user_cap_data_struct *dataptr = NULL;
9434 int i, target_datalen;
9435 int data_items = 1;
9436
9437 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9438 goto efault;
9439 }
9440 header.version = tswap32(target_header->version);
9441 header.pid = tswap32(target_header->pid);
9442
9443 if (header.version != _LINUX_CAPABILITY_VERSION) {
9444 /* Version 2 and up takes pointer to two user_data structs */
9445 data_items = 2;
9446 }
9447
9448 target_datalen = sizeof(*target_data) * data_items;
9449
9450 if (arg2) {
9451 if (num == TARGET_NR_capget) {
9452 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9453 } else {
9454 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9455 }
9456 if (!target_data) {
9457 unlock_user_struct(target_header, arg1, 0);
9458 goto efault;
9459 }
9460
9461 if (num == TARGET_NR_capset) {
9462 for (i = 0; i < data_items; i++) {
9463 data[i].effective = tswap32(target_data[i].effective);
9464 data[i].permitted = tswap32(target_data[i].permitted);
9465 data[i].inheritable = tswap32(target_data[i].inheritable);
9466 }
9467 }
9468
9469 dataptr = data;
9470 }
9471
9472 if (num == TARGET_NR_capget) {
9473 ret = get_errno(capget(&header, dataptr));
9474 } else {
9475 ret = get_errno(capset(&header, dataptr));
9476 }
9477
9478 /* The kernel always updates version for both capget and capset */
9479 target_header->version = tswap32(header.version);
9480 unlock_user_struct(target_header, arg1, 1);
9481
9482 if (arg2) {
9483 if (num == TARGET_NR_capget) {
9484 for (i = 0; i < data_items; i++) {
9485 target_data[i].effective = tswap32(data[i].effective);
9486 target_data[i].permitted = tswap32(data[i].permitted);
9487 target_data[i].inheritable = tswap32(data[i].inheritable);
9488 }
9489 unlock_user(target_data, arg2, target_datalen);
9490 } else {
9491 unlock_user(target_data, arg2, 0);
9492 }
9493 }
9494 break;
9495 }
9496 case TARGET_NR_sigaltstack:
9497 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
9498 break;
9499
9500 #ifdef CONFIG_SENDFILE
9501 case TARGET_NR_sendfile:
9502 {
9503 off_t *offp = NULL;
9504 off_t off;
9505 if (arg3) {
9506 ret = get_user_sal(off, arg3);
9507 if (is_error(ret)) {
9508 break;
9509 }
9510 offp = &off;
9511 }
9512 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9513 if (!is_error(ret) && arg3) {
9514 abi_long ret2 = put_user_sal(off, arg3);
9515 if (is_error(ret2)) {
9516 ret = ret2;
9517 }
9518 }
9519 break;
9520 }
9521 #ifdef TARGET_NR_sendfile64
9522 case TARGET_NR_sendfile64:
9523 {
9524 off_t *offp = NULL;
9525 off_t off;
9526 if (arg3) {
9527 ret = get_user_s64(off, arg3);
9528 if (is_error(ret)) {
9529 break;
9530 }
9531 offp = &off;
9532 }
9533 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9534 if (!is_error(ret) && arg3) {
9535 abi_long ret2 = put_user_s64(off, arg3);
9536 if (is_error(ret2)) {
9537 ret = ret2;
9538 }
9539 }
9540 break;
9541 }
9542 #endif
9543 #else
9544 case TARGET_NR_sendfile:
9545 #ifdef TARGET_NR_sendfile64
9546 case TARGET_NR_sendfile64:
9547 #endif
9548 goto unimplemented;
9549 #endif
9550
9551 #ifdef TARGET_NR_getpmsg
9552 case TARGET_NR_getpmsg:
9553 goto unimplemented;
9554 #endif
9555 #ifdef TARGET_NR_putpmsg
9556 case TARGET_NR_putpmsg:
9557 goto unimplemented;
9558 #endif
9559 #ifdef TARGET_NR_vfork
9560 case TARGET_NR_vfork:
9561 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
9562 0, 0, 0, 0));
9563 break;
9564 #endif
9565 #ifdef TARGET_NR_ugetrlimit
9566 case TARGET_NR_ugetrlimit:
9567 {
9568 struct rlimit rlim;
9569 int resource = target_to_host_resource(arg1);
9570 ret = get_errno(getrlimit(resource, &rlim));
9571 if (!is_error(ret)) {
9572 struct target_rlimit *target_rlim;
9573 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9574 goto efault;
9575 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9576 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9577 unlock_user_struct(target_rlim, arg2, 1);
9578 }
9579 break;
9580 }
9581 #endif
9582 #ifdef TARGET_NR_truncate64
9583 case TARGET_NR_truncate64:
9584 if (!(p = lock_user_string(arg1)))
9585 goto efault;
9586 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9587 unlock_user(p, arg1, 0);
9588 break;
9589 #endif
9590 #ifdef TARGET_NR_ftruncate64
9591 case TARGET_NR_ftruncate64:
9592 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9593 break;
9594 #endif
9595 #ifdef TARGET_NR_stat64
9596 case TARGET_NR_stat64:
9597 if (!(p = lock_user_string(arg1)))
9598 goto efault;
9599 ret = get_errno(stat(path(p), &st));
9600 unlock_user(p, arg1, 0);
9601 if (!is_error(ret))
9602 ret = host_to_target_stat64(cpu_env, arg2, &st);
9603 break;
9604 #endif
9605 #ifdef TARGET_NR_lstat64
9606 case TARGET_NR_lstat64:
9607 if (!(p = lock_user_string(arg1)))
9608 goto efault;
9609 ret = get_errno(lstat(path(p), &st));
9610 unlock_user(p, arg1, 0);
9611 if (!is_error(ret))
9612 ret = host_to_target_stat64(cpu_env, arg2, &st);
9613 break;
9614 #endif
9615 #ifdef TARGET_NR_fstat64
9616 case TARGET_NR_fstat64:
9617 ret = get_errno(fstat(arg1, &st));
9618 if (!is_error(ret))
9619 ret = host_to_target_stat64(cpu_env, arg2, &st);
9620 break;
9621 #endif
9622 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9623 #ifdef TARGET_NR_fstatat64
9624 case TARGET_NR_fstatat64:
9625 #endif
9626 #ifdef TARGET_NR_newfstatat
9627 case TARGET_NR_newfstatat:
9628 #endif
9629 if (!(p = lock_user_string(arg2)))
9630 goto efault;
9631 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
9632 if (!is_error(ret))
9633 ret = host_to_target_stat64(cpu_env, arg3, &st);
9634 break;
9635 #endif
9636 #ifdef TARGET_NR_lchown
9637 case TARGET_NR_lchown:
9638 if (!(p = lock_user_string(arg1)))
9639 goto efault;
9640 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
9641 unlock_user(p, arg1, 0);
9642 break;
9643 #endif
9644 #ifdef TARGET_NR_getuid
9645 case TARGET_NR_getuid:
9646 ret = get_errno(high2lowuid(getuid()));
9647 break;
9648 #endif
9649 #ifdef TARGET_NR_getgid
9650 case TARGET_NR_getgid:
9651 ret = get_errno(high2lowgid(getgid()));
9652 break;
9653 #endif
9654 #ifdef TARGET_NR_geteuid
9655 case TARGET_NR_geteuid:
9656 ret = get_errno(high2lowuid(geteuid()));
9657 break;
9658 #endif
9659 #ifdef TARGET_NR_getegid
9660 case TARGET_NR_getegid:
9661 ret = get_errno(high2lowgid(getegid()));
9662 break;
9663 #endif
9664 case TARGET_NR_setreuid:
9665 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
9666 break;
9667 case TARGET_NR_setregid:
9668 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
9669 break;
9670 case TARGET_NR_getgroups:
9671 {
9672 int gidsetsize = arg1;
9673 target_id *target_grouplist;
9674 gid_t *grouplist;
9675 int i;
9676
9677 grouplist = alloca(gidsetsize * sizeof(gid_t));
9678 ret = get_errno(getgroups(gidsetsize, grouplist));
9679 if (gidsetsize == 0)
9680 break;
9681 if (!is_error(ret)) {
9682 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
9683 if (!target_grouplist)
9684 goto efault;
9685 for(i = 0;i < ret; i++)
9686 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
9687 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
9688 }
9689 }
9690 break;
9691 case TARGET_NR_setgroups:
9692 {
9693 int gidsetsize = arg1;
9694 target_id *target_grouplist;
9695 gid_t *grouplist = NULL;
9696 int i;
9697 if (gidsetsize) {
9698 grouplist = alloca(gidsetsize * sizeof(gid_t));
9699 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
9700 if (!target_grouplist) {
9701 ret = -TARGET_EFAULT;
9702 goto fail;
9703 }
9704 for (i = 0; i < gidsetsize; i++) {
9705 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
9706 }
9707 unlock_user(target_grouplist, arg2, 0);
9708 }
9709 ret = get_errno(setgroups(gidsetsize, grouplist));
9710 }
9711 break;
9712 case TARGET_NR_fchown:
9713 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
9714 break;
9715 #if defined(TARGET_NR_fchownat)
9716 case TARGET_NR_fchownat:
9717 if (!(p = lock_user_string(arg2)))
9718 goto efault;
9719 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
9720 low2highgid(arg4), arg5));
9721 unlock_user(p, arg2, 0);
9722 break;
9723 #endif
9724 #ifdef TARGET_NR_setresuid
9725 case TARGET_NR_setresuid:
9726 ret = get_errno(sys_setresuid(low2highuid(arg1),
9727 low2highuid(arg2),
9728 low2highuid(arg3)));
9729 break;
9730 #endif
9731 #ifdef TARGET_NR_getresuid
9732 case TARGET_NR_getresuid:
9733 {
9734 uid_t ruid, euid, suid;
9735 ret = get_errno(getresuid(&ruid, &euid, &suid));
9736 if (!is_error(ret)) {
9737 if (put_user_id(high2lowuid(ruid), arg1)
9738 || put_user_id(high2lowuid(euid), arg2)
9739 || put_user_id(high2lowuid(suid), arg3))
9740 goto efault;
9741 }
9742 }
9743 break;
9744 #endif
9745 #ifdef TARGET_NR_getresgid
9746 case TARGET_NR_setresgid:
9747 ret = get_errno(sys_setresgid(low2highgid(arg1),
9748 low2highgid(arg2),
9749 low2highgid(arg3)));
9750 break;
9751 #endif
9752 #ifdef TARGET_NR_getresgid
9753 case TARGET_NR_getresgid:
9754 {
9755 gid_t rgid, egid, sgid;
9756 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9757 if (!is_error(ret)) {
9758 if (put_user_id(high2lowgid(rgid), arg1)
9759 || put_user_id(high2lowgid(egid), arg2)
9760 || put_user_id(high2lowgid(sgid), arg3))
9761 goto efault;
9762 }
9763 }
9764 break;
9765 #endif
9766 #ifdef TARGET_NR_chown
9767 case TARGET_NR_chown:
9768 if (!(p = lock_user_string(arg1)))
9769 goto efault;
9770 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
9771 unlock_user(p, arg1, 0);
9772 break;
9773 #endif
9774 case TARGET_NR_setuid:
9775 ret = get_errno(sys_setuid(low2highuid(arg1)));
9776 break;
9777 case TARGET_NR_setgid:
9778 ret = get_errno(sys_setgid(low2highgid(arg1)));
9779 break;
9780 case TARGET_NR_setfsuid:
9781 ret = get_errno(setfsuid(arg1));
9782 break;
9783 case TARGET_NR_setfsgid:
9784 ret = get_errno(setfsgid(arg1));
9785 break;
9786
9787 #ifdef TARGET_NR_lchown32
9788 case TARGET_NR_lchown32:
9789 if (!(p = lock_user_string(arg1)))
9790 goto efault;
9791 ret = get_errno(lchown(p, arg2, arg3));
9792 unlock_user(p, arg1, 0);
9793 break;
9794 #endif
9795 #ifdef TARGET_NR_getuid32
9796 case TARGET_NR_getuid32:
9797 ret = get_errno(getuid());
9798 break;
9799 #endif
9800
9801 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9802 /* Alpha specific */
9803 case TARGET_NR_getxuid:
9804 {
9805 uid_t euid;
9806 euid=geteuid();
9807 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
9808 }
9809 ret = get_errno(getuid());
9810 break;
9811 #endif
9812 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9813 /* Alpha specific */
9814 case TARGET_NR_getxgid:
9815 {
9816 uid_t egid;
9817 egid=getegid();
9818 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
9819 }
9820 ret = get_errno(getgid());
9821 break;
9822 #endif
9823 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9824 /* Alpha specific */
9825 case TARGET_NR_osf_getsysinfo:
9826 ret = -TARGET_EOPNOTSUPP;
9827 switch (arg1) {
9828 case TARGET_GSI_IEEE_FP_CONTROL:
9829 {
9830 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
9831
9832 /* Copied from linux ieee_fpcr_to_swcr. */
9833 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
9834 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
9835 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
9836 | SWCR_TRAP_ENABLE_DZE
9837 | SWCR_TRAP_ENABLE_OVF);
9838 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
9839 | SWCR_TRAP_ENABLE_INE);
9840 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
9841 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
9842
9843 if (put_user_u64 (swcr, arg2))
9844 goto efault;
9845 ret = 0;
9846 }
9847 break;
9848
9849 /* case GSI_IEEE_STATE_AT_SIGNAL:
9850 -- Not implemented in linux kernel.
9851 case GSI_UACPROC:
9852 -- Retrieves current unaligned access state; not much used.
9853 case GSI_PROC_TYPE:
9854 -- Retrieves implver information; surely not used.
9855 case GSI_GET_HWRPB:
9856 -- Grabs a copy of the HWRPB; surely not used.
9857 */
9858 }
9859 break;
9860 #endif
9861 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9862 /* Alpha specific */
9863 case TARGET_NR_osf_setsysinfo:
9864 ret = -TARGET_EOPNOTSUPP;
9865 switch (arg1) {
9866 case TARGET_SSI_IEEE_FP_CONTROL:
9867 {
9868 uint64_t swcr, fpcr, orig_fpcr;
9869
9870 if (get_user_u64 (swcr, arg2)) {
9871 goto efault;
9872 }
9873 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9874 fpcr = orig_fpcr & FPCR_DYN_MASK;
9875
9876 /* Copied from linux ieee_swcr_to_fpcr. */
9877 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
9878 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
9879 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
9880 | SWCR_TRAP_ENABLE_DZE
9881 | SWCR_TRAP_ENABLE_OVF)) << 48;
9882 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
9883 | SWCR_TRAP_ENABLE_INE)) << 57;
9884 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
9885 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
9886
9887 cpu_alpha_store_fpcr(cpu_env, fpcr);
9888 ret = 0;
9889 }
9890 break;
9891
9892 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
9893 {
9894 uint64_t exc, fpcr, orig_fpcr;
9895 int si_code;
9896
9897 if (get_user_u64(exc, arg2)) {
9898 goto efault;
9899 }
9900
9901 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9902
9903 /* We only add to the exception status here. */
9904 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
9905
9906 cpu_alpha_store_fpcr(cpu_env, fpcr);
9907 ret = 0;
9908
9909 /* Old exceptions are not signaled. */
9910 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
9911
9912 /* If any exceptions set by this call,
9913 and are unmasked, send a signal. */
9914 si_code = 0;
9915 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
9916 si_code = TARGET_FPE_FLTRES;
9917 }
9918 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
9919 si_code = TARGET_FPE_FLTUND;
9920 }
9921 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
9922 si_code = TARGET_FPE_FLTOVF;
9923 }
9924 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
9925 si_code = TARGET_FPE_FLTDIV;
9926 }
9927 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
9928 si_code = TARGET_FPE_FLTINV;
9929 }
9930 if (si_code != 0) {
9931 target_siginfo_t info;
9932 info.si_signo = SIGFPE;
9933 info.si_errno = 0;
9934 info.si_code = si_code;
9935 info._sifields._sigfault._addr
9936 = ((CPUArchState *)cpu_env)->pc;
9937 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9938 }
9939 }
9940 break;
9941
9942 /* case SSI_NVPAIRS:
9943 -- Used with SSIN_UACPROC to enable unaligned accesses.
9944 case SSI_IEEE_STATE_AT_SIGNAL:
9945 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9946 -- Not implemented in linux kernel
9947 */
9948 }
9949 break;
9950 #endif
9951 #ifdef TARGET_NR_osf_sigprocmask
9952 /* Alpha specific. */
9953 case TARGET_NR_osf_sigprocmask:
9954 {
9955 abi_ulong mask;
9956 int how;
9957 sigset_t set, oldset;
9958
9959 switch(arg1) {
9960 case TARGET_SIG_BLOCK:
9961 how = SIG_BLOCK;
9962 break;
9963 case TARGET_SIG_UNBLOCK:
9964 how = SIG_UNBLOCK;
9965 break;
9966 case TARGET_SIG_SETMASK:
9967 how = SIG_SETMASK;
9968 break;
9969 default:
9970 ret = -TARGET_EINVAL;
9971 goto fail;
9972 }
9973 mask = arg2;
9974 target_to_host_old_sigset(&set, &mask);
9975 ret = do_sigprocmask(how, &set, &oldset);
9976 if (!ret) {
9977 host_to_target_old_sigset(&mask, &oldset);
9978 ret = mask;
9979 }
9980 }
9981 break;
9982 #endif
9983
9984 #ifdef TARGET_NR_getgid32
9985 case TARGET_NR_getgid32:
9986 ret = get_errno(getgid());
9987 break;
9988 #endif
9989 #ifdef TARGET_NR_geteuid32
9990 case TARGET_NR_geteuid32:
9991 ret = get_errno(geteuid());
9992 break;
9993 #endif
9994 #ifdef TARGET_NR_getegid32
9995 case TARGET_NR_getegid32:
9996 ret = get_errno(getegid());
9997 break;
9998 #endif
9999 #ifdef TARGET_NR_setreuid32
10000 case TARGET_NR_setreuid32:
10001 ret = get_errno(setreuid(arg1, arg2));
10002 break;
10003 #endif
10004 #ifdef TARGET_NR_setregid32
10005 case TARGET_NR_setregid32:
10006 ret = get_errno(setregid(arg1, arg2));
10007 break;
10008 #endif
10009 #ifdef TARGET_NR_getgroups32
10010 case TARGET_NR_getgroups32:
10011 {
10012 int gidsetsize = arg1;
10013 uint32_t *target_grouplist;
10014 gid_t *grouplist;
10015 int i;
10016
10017 grouplist = alloca(gidsetsize * sizeof(gid_t));
10018 ret = get_errno(getgroups(gidsetsize, grouplist));
10019 if (gidsetsize == 0)
10020 break;
10021 if (!is_error(ret)) {
10022 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10023 if (!target_grouplist) {
10024 ret = -TARGET_EFAULT;
10025 goto fail;
10026 }
10027 for(i = 0;i < ret; i++)
10028 target_grouplist[i] = tswap32(grouplist[i]);
10029 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10030 }
10031 }
10032 break;
10033 #endif
10034 #ifdef TARGET_NR_setgroups32
10035 case TARGET_NR_setgroups32:
10036 {
10037 int gidsetsize = arg1;
10038 uint32_t *target_grouplist;
10039 gid_t *grouplist;
10040 int i;
10041
10042 grouplist = alloca(gidsetsize * sizeof(gid_t));
10043 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10044 if (!target_grouplist) {
10045 ret = -TARGET_EFAULT;
10046 goto fail;
10047 }
10048 for(i = 0;i < gidsetsize; i++)
10049 grouplist[i] = tswap32(target_grouplist[i]);
10050 unlock_user(target_grouplist, arg2, 0);
10051 ret = get_errno(setgroups(gidsetsize, grouplist));
10052 }
10053 break;
10054 #endif
10055 #ifdef TARGET_NR_fchown32
10056 case TARGET_NR_fchown32:
10057 ret = get_errno(fchown(arg1, arg2, arg3));
10058 break;
10059 #endif
10060 #ifdef TARGET_NR_setresuid32
10061 case TARGET_NR_setresuid32:
10062 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10063 break;
10064 #endif
10065 #ifdef TARGET_NR_getresuid32
10066 case TARGET_NR_getresuid32:
10067 {
10068 uid_t ruid, euid, suid;
10069 ret = get_errno(getresuid(&ruid, &euid, &suid));
10070 if (!is_error(ret)) {
10071 if (put_user_u32(ruid, arg1)
10072 || put_user_u32(euid, arg2)
10073 || put_user_u32(suid, arg3))
10074 goto efault;
10075 }
10076 }
10077 break;
10078 #endif
10079 #ifdef TARGET_NR_setresgid32
10080 case TARGET_NR_setresgid32:
10081 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
10082 break;
10083 #endif
10084 #ifdef TARGET_NR_getresgid32
10085 case TARGET_NR_getresgid32:
10086 {
10087 gid_t rgid, egid, sgid;
10088 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10089 if (!is_error(ret)) {
10090 if (put_user_u32(rgid, arg1)
10091 || put_user_u32(egid, arg2)
10092 || put_user_u32(sgid, arg3))
10093 goto efault;
10094 }
10095 }
10096 break;
10097 #endif
10098 #ifdef TARGET_NR_chown32
10099 case TARGET_NR_chown32:
10100 if (!(p = lock_user_string(arg1)))
10101 goto efault;
10102 ret = get_errno(chown(p, arg2, arg3));
10103 unlock_user(p, arg1, 0);
10104 break;
10105 #endif
10106 #ifdef TARGET_NR_setuid32
10107 case TARGET_NR_setuid32:
10108 ret = get_errno(sys_setuid(arg1));
10109 break;
10110 #endif
10111 #ifdef TARGET_NR_setgid32
10112 case TARGET_NR_setgid32:
10113 ret = get_errno(sys_setgid(arg1));
10114 break;
10115 #endif
10116 #ifdef TARGET_NR_setfsuid32
10117 case TARGET_NR_setfsuid32:
10118 ret = get_errno(setfsuid(arg1));
10119 break;
10120 #endif
10121 #ifdef TARGET_NR_setfsgid32
10122 case TARGET_NR_setfsgid32:
10123 ret = get_errno(setfsgid(arg1));
10124 break;
10125 #endif
10126
10127 case TARGET_NR_pivot_root:
10128 goto unimplemented;
10129 #ifdef TARGET_NR_mincore
10130 case TARGET_NR_mincore:
10131 {
10132 void *a;
10133 ret = -TARGET_EFAULT;
10134 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10135 goto efault;
10136 if (!(p = lock_user_string(arg3)))
10137 goto mincore_fail;
10138 ret = get_errno(mincore(a, arg2, p));
10139 unlock_user(p, arg3, ret);
10140 mincore_fail:
10141 unlock_user(a, arg1, 0);
10142 }
10143 break;
10144 #endif
10145 #ifdef TARGET_NR_arm_fadvise64_64
10146 case TARGET_NR_arm_fadvise64_64:
10147 /* arm_fadvise64_64 looks like fadvise64_64 but
10148 * with different argument order: fd, advice, offset, len
10149 * rather than the usual fd, offset, len, advice.
10150 * Note that offset and len are both 64-bit so appear as
10151 * pairs of 32-bit registers.
10152 */
10153 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10154 target_offset64(arg5, arg6), arg2);
10155 ret = -host_to_target_errno(ret);
10156 break;
10157 #endif
10158
10159 #if TARGET_ABI_BITS == 32
10160
10161 #ifdef TARGET_NR_fadvise64_64
10162 case TARGET_NR_fadvise64_64:
10163 /* 6 args: fd, offset (high, low), len (high, low), advice */
10164 if (regpairs_aligned(cpu_env)) {
10165 /* offset is in (3,4), len in (5,6) and advice in 7 */
10166 arg2 = arg3;
10167 arg3 = arg4;
10168 arg4 = arg5;
10169 arg5 = arg6;
10170 arg6 = arg7;
10171 }
10172 ret = -host_to_target_errno(posix_fadvise(arg1,
10173 target_offset64(arg2, arg3),
10174 target_offset64(arg4, arg5),
10175 arg6));
10176 break;
10177 #endif
10178
10179 #ifdef TARGET_NR_fadvise64
10180 case TARGET_NR_fadvise64:
10181 /* 5 args: fd, offset (high, low), len, advice */
10182 if (regpairs_aligned(cpu_env)) {
10183 /* offset is in (3,4), len in 5 and advice in 6 */
10184 arg2 = arg3;
10185 arg3 = arg4;
10186 arg4 = arg5;
10187 arg5 = arg6;
10188 }
10189 ret = -host_to_target_errno(posix_fadvise(arg1,
10190 target_offset64(arg2, arg3),
10191 arg4, arg5));
10192 break;
10193 #endif
10194
10195 #else /* not a 32-bit ABI */
10196 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10197 #ifdef TARGET_NR_fadvise64_64
10198 case TARGET_NR_fadvise64_64:
10199 #endif
10200 #ifdef TARGET_NR_fadvise64
10201 case TARGET_NR_fadvise64:
10202 #endif
10203 #ifdef TARGET_S390X
10204 switch (arg4) {
10205 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10206 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10207 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10208 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10209 default: break;
10210 }
10211 #endif
10212 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10213 break;
10214 #endif
10215 #endif /* end of 64-bit ABI fadvise handling */
10216
10217 #ifdef TARGET_NR_madvise
10218 case TARGET_NR_madvise:
10219 /* A straight passthrough may not be safe because qemu sometimes
10220 turns private file-backed mappings into anonymous mappings.
10221 This will break MADV_DONTNEED.
10222 This is a hint, so ignoring and returning success is ok. */
10223 ret = get_errno(0);
10224 break;
10225 #endif
10226 #if TARGET_ABI_BITS == 32
10227 case TARGET_NR_fcntl64:
10228 {
10229 int cmd;
10230 struct flock64 fl;
10231 from_flock64_fn *copyfrom = copy_from_user_flock64;
10232 to_flock64_fn *copyto = copy_to_user_flock64;
10233
10234 #ifdef TARGET_ARM
10235 if (((CPUARMState *)cpu_env)->eabi) {
10236 copyfrom = copy_from_user_eabi_flock64;
10237 copyto = copy_to_user_eabi_flock64;
10238 }
10239 #endif
10240
10241 cmd = target_to_host_fcntl_cmd(arg2);
10242 if (cmd == -TARGET_EINVAL) {
10243 ret = cmd;
10244 break;
10245 }
10246
10247 switch(arg2) {
10248 case TARGET_F_GETLK64:
10249 ret = copyfrom(&fl, arg3);
10250 if (ret) {
10251 break;
10252 }
10253 ret = get_errno(fcntl(arg1, cmd, &fl));
10254 if (ret == 0) {
10255 ret = copyto(arg3, &fl);
10256 }
10257 break;
10258
10259 case TARGET_F_SETLK64:
10260 case TARGET_F_SETLKW64:
10261 ret = copyfrom(&fl, arg3);
10262 if (ret) {
10263 break;
10264 }
10265 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10266 break;
10267 default:
10268 ret = do_fcntl(arg1, arg2, arg3);
10269 break;
10270 }
10271 break;
10272 }
10273 #endif
10274 #ifdef TARGET_NR_cacheflush
10275 case TARGET_NR_cacheflush:
10276 /* self-modifying code is handled automatically, so nothing needed */
10277 ret = 0;
10278 break;
10279 #endif
10280 #ifdef TARGET_NR_security
10281 case TARGET_NR_security:
10282 goto unimplemented;
10283 #endif
10284 #ifdef TARGET_NR_getpagesize
10285 case TARGET_NR_getpagesize:
10286 ret = TARGET_PAGE_SIZE;
10287 break;
10288 #endif
10289 case TARGET_NR_gettid:
10290 ret = get_errno(gettid());
10291 break;
10292 #ifdef TARGET_NR_readahead
10293 case TARGET_NR_readahead:
10294 #if TARGET_ABI_BITS == 32
10295 if (regpairs_aligned(cpu_env)) {
10296 arg2 = arg3;
10297 arg3 = arg4;
10298 arg4 = arg5;
10299 }
10300 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
10301 #else
10302 ret = get_errno(readahead(arg1, arg2, arg3));
10303 #endif
10304 break;
10305 #endif
10306 #ifdef CONFIG_ATTR
10307 #ifdef TARGET_NR_setxattr
10308 case TARGET_NR_listxattr:
10309 case TARGET_NR_llistxattr:
10310 {
10311 void *p, *b = 0;
10312 if (arg2) {
10313 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10314 if (!b) {
10315 ret = -TARGET_EFAULT;
10316 break;
10317 }
10318 }
10319 p = lock_user_string(arg1);
10320 if (p) {
10321 if (num == TARGET_NR_listxattr) {
10322 ret = get_errno(listxattr(p, b, arg3));
10323 } else {
10324 ret = get_errno(llistxattr(p, b, arg3));
10325 }
10326 } else {
10327 ret = -TARGET_EFAULT;
10328 }
10329 unlock_user(p, arg1, 0);
10330 unlock_user(b, arg2, arg3);
10331 break;
10332 }
10333 case TARGET_NR_flistxattr:
10334 {
10335 void *b = 0;
10336 if (arg2) {
10337 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10338 if (!b) {
10339 ret = -TARGET_EFAULT;
10340 break;
10341 }
10342 }
10343 ret = get_errno(flistxattr(arg1, b, arg3));
10344 unlock_user(b, arg2, arg3);
10345 break;
10346 }
10347 case TARGET_NR_setxattr:
10348 case TARGET_NR_lsetxattr:
10349 {
10350 void *p, *n, *v = 0;
10351 if (arg3) {
10352 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10353 if (!v) {
10354 ret = -TARGET_EFAULT;
10355 break;
10356 }
10357 }
10358 p = lock_user_string(arg1);
10359 n = lock_user_string(arg2);
10360 if (p && n) {
10361 if (num == TARGET_NR_setxattr) {
10362 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10363 } else {
10364 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10365 }
10366 } else {
10367 ret = -TARGET_EFAULT;
10368 }
10369 unlock_user(p, arg1, 0);
10370 unlock_user(n, arg2, 0);
10371 unlock_user(v, arg3, 0);
10372 }
10373 break;
10374 case TARGET_NR_fsetxattr:
10375 {
10376 void *n, *v = 0;
10377 if (arg3) {
10378 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10379 if (!v) {
10380 ret = -TARGET_EFAULT;
10381 break;
10382 }
10383 }
10384 n = lock_user_string(arg2);
10385 if (n) {
10386 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10387 } else {
10388 ret = -TARGET_EFAULT;
10389 }
10390 unlock_user(n, arg2, 0);
10391 unlock_user(v, arg3, 0);
10392 }
10393 break;
10394 case TARGET_NR_getxattr:
10395 case TARGET_NR_lgetxattr:
10396 {
10397 void *p, *n, *v = 0;
10398 if (arg3) {
10399 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10400 if (!v) {
10401 ret = -TARGET_EFAULT;
10402 break;
10403 }
10404 }
10405 p = lock_user_string(arg1);
10406 n = lock_user_string(arg2);
10407 if (p && n) {
10408 if (num == TARGET_NR_getxattr) {
10409 ret = get_errno(getxattr(p, n, v, arg4));
10410 } else {
10411 ret = get_errno(lgetxattr(p, n, v, arg4));
10412 }
10413 } else {
10414 ret = -TARGET_EFAULT;
10415 }
10416 unlock_user(p, arg1, 0);
10417 unlock_user(n, arg2, 0);
10418 unlock_user(v, arg3, arg4);
10419 }
10420 break;
10421 case TARGET_NR_fgetxattr:
10422 {
10423 void *n, *v = 0;
10424 if (arg3) {
10425 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10426 if (!v) {
10427 ret = -TARGET_EFAULT;
10428 break;
10429 }
10430 }
10431 n = lock_user_string(arg2);
10432 if (n) {
10433 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10434 } else {
10435 ret = -TARGET_EFAULT;
10436 }
10437 unlock_user(n, arg2, 0);
10438 unlock_user(v, arg3, arg4);
10439 }
10440 break;
10441 case TARGET_NR_removexattr:
10442 case TARGET_NR_lremovexattr:
10443 {
10444 void *p, *n;
10445 p = lock_user_string(arg1);
10446 n = lock_user_string(arg2);
10447 if (p && n) {
10448 if (num == TARGET_NR_removexattr) {
10449 ret = get_errno(removexattr(p, n));
10450 } else {
10451 ret = get_errno(lremovexattr(p, n));
10452 }
10453 } else {
10454 ret = -TARGET_EFAULT;
10455 }
10456 unlock_user(p, arg1, 0);
10457 unlock_user(n, arg2, 0);
10458 }
10459 break;
10460 case TARGET_NR_fremovexattr:
10461 {
10462 void *n;
10463 n = lock_user_string(arg2);
10464 if (n) {
10465 ret = get_errno(fremovexattr(arg1, n));
10466 } else {
10467 ret = -TARGET_EFAULT;
10468 }
10469 unlock_user(n, arg2, 0);
10470 }
10471 break;
10472 #endif
10473 #endif /* CONFIG_ATTR */
10474 #ifdef TARGET_NR_set_thread_area
10475 case TARGET_NR_set_thread_area:
10476 #if defined(TARGET_MIPS)
10477 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10478 ret = 0;
10479 break;
10480 #elif defined(TARGET_CRIS)
10481 if (arg1 & 0xff)
10482 ret = -TARGET_EINVAL;
10483 else {
10484 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10485 ret = 0;
10486 }
10487 break;
10488 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10489 ret = do_set_thread_area(cpu_env, arg1);
10490 break;
10491 #elif defined(TARGET_M68K)
10492 {
10493 TaskState *ts = cpu->opaque;
10494 ts->tp_value = arg1;
10495 ret = 0;
10496 break;
10497 }
10498 #else
10499 goto unimplemented_nowarn;
10500 #endif
10501 #endif
10502 #ifdef TARGET_NR_get_thread_area
10503 case TARGET_NR_get_thread_area:
10504 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10505 ret = do_get_thread_area(cpu_env, arg1);
10506 break;
10507 #elif defined(TARGET_M68K)
10508 {
10509 TaskState *ts = cpu->opaque;
10510 ret = ts->tp_value;
10511 break;
10512 }
10513 #else
10514 goto unimplemented_nowarn;
10515 #endif
10516 #endif
10517 #ifdef TARGET_NR_getdomainname
10518 case TARGET_NR_getdomainname:
10519 goto unimplemented_nowarn;
10520 #endif
10521
10522 #ifdef TARGET_NR_clock_gettime
10523 case TARGET_NR_clock_gettime:
10524 {
10525 struct timespec ts;
10526 ret = get_errno(clock_gettime(arg1, &ts));
10527 if (!is_error(ret)) {
10528 host_to_target_timespec(arg2, &ts);
10529 }
10530 break;
10531 }
10532 #endif
10533 #ifdef TARGET_NR_clock_getres
10534 case TARGET_NR_clock_getres:
10535 {
10536 struct timespec ts;
10537 ret = get_errno(clock_getres(arg1, &ts));
10538 if (!is_error(ret)) {
10539 host_to_target_timespec(arg2, &ts);
10540 }
10541 break;
10542 }
10543 #endif
10544 #ifdef TARGET_NR_clock_nanosleep
10545 case TARGET_NR_clock_nanosleep:
10546 {
10547 struct timespec ts;
10548 target_to_host_timespec(&ts, arg3);
10549 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10550 &ts, arg4 ? &ts : NULL));
10551 if (arg4)
10552 host_to_target_timespec(arg4, &ts);
10553
10554 #if defined(TARGET_PPC)
10555 /* clock_nanosleep is odd in that it returns positive errno values.
10556 * On PPC, CR0 bit 3 should be set in such a situation. */
10557 if (ret && ret != -TARGET_ERESTARTSYS) {
10558 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10559 }
10560 #endif
10561 break;
10562 }
10563 #endif
10564
10565 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10566 case TARGET_NR_set_tid_address:
10567 ret = get_errno(set_tid_address((int *)g2h(arg1)));
10568 break;
10569 #endif
10570
10571 case TARGET_NR_tkill:
10572 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10573 break;
10574
10575 case TARGET_NR_tgkill:
10576 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
10577 target_to_host_signal(arg3)));
10578 break;
10579
10580 #ifdef TARGET_NR_set_robust_list
10581 case TARGET_NR_set_robust_list:
10582 case TARGET_NR_get_robust_list:
10583 /* The ABI for supporting robust futexes has userspace pass
10584 * the kernel a pointer to a linked list which is updated by
10585 * userspace after the syscall; the list is walked by the kernel
10586 * when the thread exits. Since the linked list in QEMU guest
10587 * memory isn't a valid linked list for the host and we have
10588 * no way to reliably intercept the thread-death event, we can't
10589 * support these. Silently return ENOSYS so that guest userspace
10590 * falls back to a non-robust futex implementation (which should
10591 * be OK except in the corner case of the guest crashing while
10592 * holding a mutex that is shared with another process via
10593 * shared memory).
10594 */
10595 goto unimplemented_nowarn;
10596 #endif
10597
10598 #if defined(TARGET_NR_utimensat)
10599 case TARGET_NR_utimensat:
10600 {
10601 struct timespec *tsp, ts[2];
10602 if (!arg3) {
10603 tsp = NULL;
10604 } else {
10605 target_to_host_timespec(ts, arg3);
10606 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10607 tsp = ts;
10608 }
10609 if (!arg2)
10610 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10611 else {
10612 if (!(p = lock_user_string(arg2))) {
10613 ret = -TARGET_EFAULT;
10614 goto fail;
10615 }
10616 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10617 unlock_user(p, arg2, 0);
10618 }
10619 }
10620 break;
10621 #endif
10622 case TARGET_NR_futex:
10623 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10624 break;
10625 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10626 case TARGET_NR_inotify_init:
10627 ret = get_errno(sys_inotify_init());
10628 break;
10629 #endif
10630 #ifdef CONFIG_INOTIFY1
10631 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10632 case TARGET_NR_inotify_init1:
10633 ret = get_errno(sys_inotify_init1(arg1));
10634 break;
10635 #endif
10636 #endif
10637 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10638 case TARGET_NR_inotify_add_watch:
10639 p = lock_user_string(arg2);
10640 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10641 unlock_user(p, arg2, 0);
10642 break;
10643 #endif
10644 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10645 case TARGET_NR_inotify_rm_watch:
10646 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
10647 break;
10648 #endif
10649
10650 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10651 case TARGET_NR_mq_open:
10652 {
10653 struct mq_attr posix_mq_attr, *attrp;
10654
10655 p = lock_user_string(arg1 - 1);
10656 if (arg4 != 0) {
10657 copy_from_user_mq_attr (&posix_mq_attr, arg4);
10658 attrp = &posix_mq_attr;
10659 } else {
10660 attrp = 0;
10661 }
10662 ret = get_errno(mq_open(p, arg2, arg3, attrp));
10663 unlock_user (p, arg1, 0);
10664 }
10665 break;
10666
10667 case TARGET_NR_mq_unlink:
10668 p = lock_user_string(arg1 - 1);
10669 ret = get_errno(mq_unlink(p));
10670 unlock_user (p, arg1, 0);
10671 break;
10672
10673 case TARGET_NR_mq_timedsend:
10674 {
10675 struct timespec ts;
10676
10677 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10678 if (arg5 != 0) {
10679 target_to_host_timespec(&ts, arg5);
10680 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
10681 host_to_target_timespec(arg5, &ts);
10682 } else {
10683 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
10684 }
10685 unlock_user (p, arg2, arg3);
10686 }
10687 break;
10688
10689 case TARGET_NR_mq_timedreceive:
10690 {
10691 struct timespec ts;
10692 unsigned int prio;
10693
10694 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10695 if (arg5 != 0) {
10696 target_to_host_timespec(&ts, arg5);
10697 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10698 &prio, &ts));
10699 host_to_target_timespec(arg5, &ts);
10700 } else {
10701 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10702 &prio, NULL));
10703 }
10704 unlock_user (p, arg2, arg3);
10705 if (arg4 != 0)
10706 put_user_u32(prio, arg4);
10707 }
10708 break;
10709
10710 /* Not implemented for now... */
10711 /* case TARGET_NR_mq_notify: */
10712 /* break; */
10713
10714 case TARGET_NR_mq_getsetattr:
10715 {
10716 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
10717 ret = 0;
10718 if (arg3 != 0) {
10719 ret = mq_getattr(arg1, &posix_mq_attr_out);
10720 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
10721 }
10722 if (arg2 != 0) {
10723 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
10724 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
10725 }
10726
10727 }
10728 break;
10729 #endif
10730
10731 #ifdef CONFIG_SPLICE
10732 #ifdef TARGET_NR_tee
10733 case TARGET_NR_tee:
10734 {
10735 ret = get_errno(tee(arg1,arg2,arg3,arg4));
10736 }
10737 break;
10738 #endif
10739 #ifdef TARGET_NR_splice
10740 case TARGET_NR_splice:
10741 {
10742 loff_t loff_in, loff_out;
10743 loff_t *ploff_in = NULL, *ploff_out = NULL;
10744 if (arg2) {
10745 if (get_user_u64(loff_in, arg2)) {
10746 goto efault;
10747 }
10748 ploff_in = &loff_in;
10749 }
10750 if (arg4) {
10751 if (get_user_u64(loff_out, arg4)) {
10752 goto efault;
10753 }
10754 ploff_out = &loff_out;
10755 }
10756 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
10757 if (arg2) {
10758 if (put_user_u64(loff_in, arg2)) {
10759 goto efault;
10760 }
10761 }
10762 if (arg4) {
10763 if (put_user_u64(loff_out, arg4)) {
10764 goto efault;
10765 }
10766 }
10767 }
10768 break;
10769 #endif
10770 #ifdef TARGET_NR_vmsplice
10771 case TARGET_NR_vmsplice:
10772 {
10773 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10774 if (vec != NULL) {
10775 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
10776 unlock_iovec(vec, arg2, arg3, 0);
10777 } else {
10778 ret = -host_to_target_errno(errno);
10779 }
10780 }
10781 break;
10782 #endif
10783 #endif /* CONFIG_SPLICE */
10784 #ifdef CONFIG_EVENTFD
10785 #if defined(TARGET_NR_eventfd)
10786 case TARGET_NR_eventfd:
10787 ret = get_errno(eventfd(arg1, 0));
10788 fd_trans_unregister(ret);
10789 break;
10790 #endif
10791 #if defined(TARGET_NR_eventfd2)
10792 case TARGET_NR_eventfd2:
10793 {
10794 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
10795 if (arg2 & TARGET_O_NONBLOCK) {
10796 host_flags |= O_NONBLOCK;
10797 }
10798 if (arg2 & TARGET_O_CLOEXEC) {
10799 host_flags |= O_CLOEXEC;
10800 }
10801 ret = get_errno(eventfd(arg1, host_flags));
10802 fd_trans_unregister(ret);
10803 break;
10804 }
10805 #endif
10806 #endif /* CONFIG_EVENTFD */
10807 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10808 case TARGET_NR_fallocate:
10809 #if TARGET_ABI_BITS == 32
10810 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
10811 target_offset64(arg5, arg6)));
10812 #else
10813 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
10814 #endif
10815 break;
10816 #endif
10817 #if defined(CONFIG_SYNC_FILE_RANGE)
10818 #if defined(TARGET_NR_sync_file_range)
10819 case TARGET_NR_sync_file_range:
10820 #if TARGET_ABI_BITS == 32
10821 #if defined(TARGET_MIPS)
10822 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10823 target_offset64(arg5, arg6), arg7));
10824 #else
10825 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
10826 target_offset64(arg4, arg5), arg6));
10827 #endif /* !TARGET_MIPS */
10828 #else
10829 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
10830 #endif
10831 break;
10832 #endif
10833 #if defined(TARGET_NR_sync_file_range2)
10834 case TARGET_NR_sync_file_range2:
10835 /* This is like sync_file_range but the arguments are reordered */
10836 #if TARGET_ABI_BITS == 32
10837 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10838 target_offset64(arg5, arg6), arg2));
10839 #else
10840 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
10841 #endif
10842 break;
10843 #endif
10844 #endif
10845 #if defined(TARGET_NR_signalfd4)
10846 case TARGET_NR_signalfd4:
10847 ret = do_signalfd4(arg1, arg2, arg4);
10848 break;
10849 #endif
10850 #if defined(TARGET_NR_signalfd)
10851 case TARGET_NR_signalfd:
10852 ret = do_signalfd4(arg1, arg2, 0);
10853 break;
10854 #endif
10855 #if defined(CONFIG_EPOLL)
10856 #if defined(TARGET_NR_epoll_create)
10857 case TARGET_NR_epoll_create:
10858 ret = get_errno(epoll_create(arg1));
10859 break;
10860 #endif
10861 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10862 case TARGET_NR_epoll_create1:
10863 ret = get_errno(epoll_create1(arg1));
10864 break;
10865 #endif
10866 #if defined(TARGET_NR_epoll_ctl)
10867 case TARGET_NR_epoll_ctl:
10868 {
10869 struct epoll_event ep;
10870 struct epoll_event *epp = 0;
10871 if (arg4) {
10872 struct target_epoll_event *target_ep;
10873 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
10874 goto efault;
10875 }
10876 ep.events = tswap32(target_ep->events);
10877 /* The epoll_data_t union is just opaque data to the kernel,
10878 * so we transfer all 64 bits across and need not worry what
10879 * actual data type it is.
10880 */
10881 ep.data.u64 = tswap64(target_ep->data.u64);
10882 unlock_user_struct(target_ep, arg4, 0);
10883 epp = &ep;
10884 }
10885 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
10886 break;
10887 }
10888 #endif
10889
10890 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
10891 #if defined(TARGET_NR_epoll_wait)
10892 case TARGET_NR_epoll_wait:
10893 #endif
10894 #if defined(TARGET_NR_epoll_pwait)
10895 case TARGET_NR_epoll_pwait:
10896 #endif
10897 {
10898 struct target_epoll_event *target_ep;
10899 struct epoll_event *ep;
10900 int epfd = arg1;
10901 int maxevents = arg3;
10902 int timeout = arg4;
10903
10904 target_ep = lock_user(VERIFY_WRITE, arg2,
10905 maxevents * sizeof(struct target_epoll_event), 1);
10906 if (!target_ep) {
10907 goto efault;
10908 }
10909
10910 ep = alloca(maxevents * sizeof(struct epoll_event));
10911
10912 switch (num) {
10913 #if defined(TARGET_NR_epoll_pwait)
10914 case TARGET_NR_epoll_pwait:
10915 {
10916 target_sigset_t *target_set;
10917 sigset_t _set, *set = &_set;
10918
10919 if (arg5) {
10920 target_set = lock_user(VERIFY_READ, arg5,
10921 sizeof(target_sigset_t), 1);
10922 if (!target_set) {
10923 unlock_user(target_ep, arg2, 0);
10924 goto efault;
10925 }
10926 target_to_host_sigset(set, target_set);
10927 unlock_user(target_set, arg5, 0);
10928 } else {
10929 set = NULL;
10930 }
10931
10932 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
10933 set, SIGSET_T_SIZE));
10934 break;
10935 }
10936 #endif
10937 #if defined(TARGET_NR_epoll_wait)
10938 case TARGET_NR_epoll_wait:
10939 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
10940 NULL, 0));
10941 break;
10942 #endif
10943 default:
10944 ret = -TARGET_ENOSYS;
10945 }
10946 if (!is_error(ret)) {
10947 int i;
10948 for (i = 0; i < ret; i++) {
10949 target_ep[i].events = tswap32(ep[i].events);
10950 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
10951 }
10952 }
10953 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
10954 break;
10955 }
10956 #endif
10957 #endif
10958 #ifdef TARGET_NR_prlimit64
10959 case TARGET_NR_prlimit64:
10960 {
10961 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10962 struct target_rlimit64 *target_rnew, *target_rold;
10963 struct host_rlimit64 rnew, rold, *rnewp = 0;
10964 int resource = target_to_host_resource(arg2);
10965 if (arg3) {
10966 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
10967 goto efault;
10968 }
10969 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
10970 rnew.rlim_max = tswap64(target_rnew->rlim_max);
10971 unlock_user_struct(target_rnew, arg3, 0);
10972 rnewp = &rnew;
10973 }
10974
10975 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10976 if (!is_error(ret) && arg4) {
10977 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10978 goto efault;
10979 }
10980 target_rold->rlim_cur = tswap64(rold.rlim_cur);
10981 target_rold->rlim_max = tswap64(rold.rlim_max);
10982 unlock_user_struct(target_rold, arg4, 1);
10983 }
10984 break;
10985 }
10986 #endif
10987 #ifdef TARGET_NR_gethostname
10988 case TARGET_NR_gethostname:
10989 {
10990 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10991 if (name) {
10992 ret = get_errno(gethostname(name, arg2));
10993 unlock_user(name, arg1, arg2);
10994 } else {
10995 ret = -TARGET_EFAULT;
10996 }
10997 break;
10998 }
10999 #endif
11000 #ifdef TARGET_NR_atomic_cmpxchg_32
11001 case TARGET_NR_atomic_cmpxchg_32:
11002 {
11003 /* should use start_exclusive from main.c */
11004 abi_ulong mem_value;
11005 if (get_user_u32(mem_value, arg6)) {
11006 target_siginfo_t info;
11007 info.si_signo = SIGSEGV;
11008 info.si_errno = 0;
11009 info.si_code = TARGET_SEGV_MAPERR;
11010 info._sifields._sigfault._addr = arg6;
11011 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
11012 ret = 0xdeadbeef;
11013
11014 }
11015 if (mem_value == arg2)
11016 put_user_u32(arg1, arg6);
11017 ret = mem_value;
11018 break;
11019 }
11020 #endif
11021 #ifdef TARGET_NR_atomic_barrier
11022 case TARGET_NR_atomic_barrier:
11023 {
11024 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11025 ret = 0;
11026 break;
11027 }
11028 #endif
11029
11030 #ifdef TARGET_NR_timer_create
11031 case TARGET_NR_timer_create:
11032 {
11033 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11034
11035 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11036
11037 int clkid = arg1;
11038 int timer_index = next_free_host_timer();
11039
11040 if (timer_index < 0) {
11041 ret = -TARGET_EAGAIN;
11042 } else {
11043 timer_t *phtimer = g_posix_timers + timer_index;
11044
11045 if (arg2) {
11046 phost_sevp = &host_sevp;
11047 ret = target_to_host_sigevent(phost_sevp, arg2);
11048 if (ret != 0) {
11049 break;
11050 }
11051 }
11052
11053 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11054 if (ret) {
11055 phtimer = NULL;
11056 } else {
11057 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11058 goto efault;
11059 }
11060 }
11061 }
11062 break;
11063 }
11064 #endif
11065
11066 #ifdef TARGET_NR_timer_settime
11067 case TARGET_NR_timer_settime:
11068 {
11069 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11070 * struct itimerspec * old_value */
11071 target_timer_t timerid = get_timer_id(arg1);
11072
11073 if (timerid < 0) {
11074 ret = timerid;
11075 } else if (arg3 == 0) {
11076 ret = -TARGET_EINVAL;
11077 } else {
11078 timer_t htimer = g_posix_timers[timerid];
11079 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11080
11081 target_to_host_itimerspec(&hspec_new, arg3);
11082 ret = get_errno(
11083 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11084 host_to_target_itimerspec(arg2, &hspec_old);
11085 }
11086 break;
11087 }
11088 #endif
11089
11090 #ifdef TARGET_NR_timer_gettime
11091 case TARGET_NR_timer_gettime:
11092 {
11093 /* args: timer_t timerid, struct itimerspec *curr_value */
11094 target_timer_t timerid = get_timer_id(arg1);
11095
11096 if (timerid < 0) {
11097 ret = timerid;
11098 } else if (!arg2) {
11099 ret = -TARGET_EFAULT;
11100 } else {
11101 timer_t htimer = g_posix_timers[timerid];
11102 struct itimerspec hspec;
11103 ret = get_errno(timer_gettime(htimer, &hspec));
11104
11105 if (host_to_target_itimerspec(arg2, &hspec)) {
11106 ret = -TARGET_EFAULT;
11107 }
11108 }
11109 break;
11110 }
11111 #endif
11112
11113 #ifdef TARGET_NR_timer_getoverrun
11114 case TARGET_NR_timer_getoverrun:
11115 {
11116 /* args: timer_t timerid */
11117 target_timer_t timerid = get_timer_id(arg1);
11118
11119 if (timerid < 0) {
11120 ret = timerid;
11121 } else {
11122 timer_t htimer = g_posix_timers[timerid];
11123 ret = get_errno(timer_getoverrun(htimer));
11124 }
11125 fd_trans_unregister(ret);
11126 break;
11127 }
11128 #endif
11129
11130 #ifdef TARGET_NR_timer_delete
11131 case TARGET_NR_timer_delete:
11132 {
11133 /* args: timer_t timerid */
11134 target_timer_t timerid = get_timer_id(arg1);
11135
11136 if (timerid < 0) {
11137 ret = timerid;
11138 } else {
11139 timer_t htimer = g_posix_timers[timerid];
11140 ret = get_errno(timer_delete(htimer));
11141 g_posix_timers[timerid] = 0;
11142 }
11143 break;
11144 }
11145 #endif
11146
11147 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11148 case TARGET_NR_timerfd_create:
11149 ret = get_errno(timerfd_create(arg1,
11150 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11151 break;
11152 #endif
11153
11154 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11155 case TARGET_NR_timerfd_gettime:
11156 {
11157 struct itimerspec its_curr;
11158
11159 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11160
11161 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11162 goto efault;
11163 }
11164 }
11165 break;
11166 #endif
11167
11168 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11169 case TARGET_NR_timerfd_settime:
11170 {
11171 struct itimerspec its_new, its_old, *p_new;
11172
11173 if (arg3) {
11174 if (target_to_host_itimerspec(&its_new, arg3)) {
11175 goto efault;
11176 }
11177 p_new = &its_new;
11178 } else {
11179 p_new = NULL;
11180 }
11181
11182 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11183
11184 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11185 goto efault;
11186 }
11187 }
11188 break;
11189 #endif
11190
11191 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11192 case TARGET_NR_ioprio_get:
11193 ret = get_errno(ioprio_get(arg1, arg2));
11194 break;
11195 #endif
11196
11197 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11198 case TARGET_NR_ioprio_set:
11199 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11200 break;
11201 #endif
11202
11203 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11204 case TARGET_NR_setns:
11205 ret = get_errno(setns(arg1, arg2));
11206 break;
11207 #endif
11208 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11209 case TARGET_NR_unshare:
11210 ret = get_errno(unshare(arg1));
11211 break;
11212 #endif
11213
11214 default:
11215 unimplemented:
11216 gemu_log("qemu: Unsupported syscall: %d\n", num);
11217 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11218 unimplemented_nowarn:
11219 #endif
11220 ret = -TARGET_ENOSYS;
11221 break;
11222 }
11223 fail:
11224 #ifdef DEBUG
11225 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11226 #endif
11227 if(do_strace)
11228 print_syscall_ret(num, ret);
11229 return ret;
11230 efault:
11231 ret = -TARGET_EFAULT;
11232 goto fail;
11233 }