]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/syscall.c
Merge remote-tracking branch 'remotes/riku/tags/pull-linux-user-20160628' into staging
[mirror_qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #ifdef __ia64__
39 int __clone2(int (*fn)(void *), void *child_stack_base,
40 size_t stack_size, int flags, void *arg, ...);
41 #endif
42 #include <sys/socket.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <sys/poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <linux/netlink.h>
104 #ifdef CONFIG_RTNETLINK
105 #include <linux/rtnetlink.h>
106 #endif
107 #include <linux/audit.h>
108 #include "linux_loop.h"
109 #include "uname.h"
110
111 #include "qemu.h"
112
113 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
114 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
115
116 //#define DEBUG
117 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
118 * once. This exercises the codepaths for restart.
119 */
120 //#define DEBUG_ERESTARTSYS
121
122 //#include <linux/msdos_fs.h>
123 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
124 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
125
126 #undef _syscall0
127 #undef _syscall1
128 #undef _syscall2
129 #undef _syscall3
130 #undef _syscall4
131 #undef _syscall5
132 #undef _syscall6
133
134 #define _syscall0(type,name) \
135 static type name (void) \
136 { \
137 return syscall(__NR_##name); \
138 }
139
140 #define _syscall1(type,name,type1,arg1) \
141 static type name (type1 arg1) \
142 { \
143 return syscall(__NR_##name, arg1); \
144 }
145
146 #define _syscall2(type,name,type1,arg1,type2,arg2) \
147 static type name (type1 arg1,type2 arg2) \
148 { \
149 return syscall(__NR_##name, arg1, arg2); \
150 }
151
152 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
153 static type name (type1 arg1,type2 arg2,type3 arg3) \
154 { \
155 return syscall(__NR_##name, arg1, arg2, arg3); \
156 }
157
158 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
160 { \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
162 }
163
164 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
165 type5,arg5) \
166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
167 { \
168 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
169 }
170
171
172 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
173 type5,arg5,type6,arg6) \
174 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
175 type6 arg6) \
176 { \
177 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
178 }
179
180
181 #define __NR_sys_uname __NR_uname
182 #define __NR_sys_getcwd1 __NR_getcwd
183 #define __NR_sys_getdents __NR_getdents
184 #define __NR_sys_getdents64 __NR_getdents64
185 #define __NR_sys_getpriority __NR_getpriority
186 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
187 #define __NR_sys_syslog __NR_syslog
188 #define __NR_sys_futex __NR_futex
189 #define __NR_sys_inotify_init __NR_inotify_init
190 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
191 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
192
193 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
194 defined(__s390x__)
195 #define __NR__llseek __NR_lseek
196 #endif
197
198 /* Newer kernel ports have llseek() instead of _llseek() */
199 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
200 #define TARGET_NR__llseek TARGET_NR_llseek
201 #endif
202
203 #ifdef __NR_gettid
204 _syscall0(int, gettid)
205 #else
206 /* This is a replacement for the host gettid() and must return a host
207 errno. */
208 static int gettid(void) {
209 return -ENOSYS;
210 }
211 #endif
212 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
213 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
214 #endif
215 #if !defined(__NR_getdents) || \
216 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
217 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
218 #endif
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
221 loff_t *, res, uint, wh);
222 #endif
223 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
224 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
225 #ifdef __NR_exit_group
226 _syscall1(int,exit_group,int,error_code)
227 #endif
228 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
229 _syscall1(int,set_tid_address,int *,tidptr)
230 #endif
231 #if defined(TARGET_NR_futex) && defined(__NR_futex)
232 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
233 const struct timespec *,timeout,int *,uaddr2,int,val3)
234 #endif
235 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
236 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
237 unsigned long *, user_mask_ptr);
238 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
239 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
240 unsigned long *, user_mask_ptr);
241 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
242 void *, arg);
243 _syscall2(int, capget, struct __user_cap_header_struct *, header,
244 struct __user_cap_data_struct *, data);
245 _syscall2(int, capset, struct __user_cap_header_struct *, header,
246 struct __user_cap_data_struct *, data);
247 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
248 _syscall2(int, ioprio_get, int, which, int, who)
249 #endif
250 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
251 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
252 #endif
253 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
254 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
255 #endif
256
257 static bitmask_transtbl fcntl_flags_tbl[] = {
258 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
259 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
260 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
261 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
262 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
263 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
264 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
265 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
266 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
267 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
268 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
269 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
270 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
271 #if defined(O_DIRECT)
272 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
273 #endif
274 #if defined(O_NOATIME)
275 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
276 #endif
277 #if defined(O_CLOEXEC)
278 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
279 #endif
280 #if defined(O_PATH)
281 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
282 #endif
283 /* Don't terminate the list prematurely on 64-bit host+guest. */
284 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
285 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
286 #endif
287 { 0, 0, 0, 0 }
288 };
289
290 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
291 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
292 typedef struct TargetFdTrans {
293 TargetFdDataFunc host_to_target_data;
294 TargetFdDataFunc target_to_host_data;
295 TargetFdAddrFunc target_to_host_addr;
296 } TargetFdTrans;
297
298 static TargetFdTrans **target_fd_trans;
299
300 static unsigned int target_fd_max;
301
302 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
303 {
304 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
305 return target_fd_trans[fd]->target_to_host_data;
306 }
307 return NULL;
308 }
309
310 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
311 {
312 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
313 return target_fd_trans[fd]->host_to_target_data;
314 }
315 return NULL;
316 }
317
318 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
319 {
320 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
321 return target_fd_trans[fd]->target_to_host_addr;
322 }
323 return NULL;
324 }
325
326 static void fd_trans_register(int fd, TargetFdTrans *trans)
327 {
328 unsigned int oldmax;
329
330 if (fd >= target_fd_max) {
331 oldmax = target_fd_max;
332 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
333 target_fd_trans = g_renew(TargetFdTrans *,
334 target_fd_trans, target_fd_max);
335 memset((void *)(target_fd_trans + oldmax), 0,
336 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
337 }
338 target_fd_trans[fd] = trans;
339 }
340
341 static void fd_trans_unregister(int fd)
342 {
343 if (fd >= 0 && fd < target_fd_max) {
344 target_fd_trans[fd] = NULL;
345 }
346 }
347
348 static void fd_trans_dup(int oldfd, int newfd)
349 {
350 fd_trans_unregister(newfd);
351 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
352 fd_trans_register(newfd, target_fd_trans[oldfd]);
353 }
354 }
355
356 static int sys_getcwd1(char *buf, size_t size)
357 {
358 if (getcwd(buf, size) == NULL) {
359 /* getcwd() sets errno */
360 return (-1);
361 }
362 return strlen(buf)+1;
363 }
364
365 #ifdef TARGET_NR_utimensat
366 #ifdef CONFIG_UTIMENSAT
367 static int sys_utimensat(int dirfd, const char *pathname,
368 const struct timespec times[2], int flags)
369 {
370 if (pathname == NULL)
371 return futimens(dirfd, times);
372 else
373 return utimensat(dirfd, pathname, times, flags);
374 }
375 #elif defined(__NR_utimensat)
376 #define __NR_sys_utimensat __NR_utimensat
377 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
378 const struct timespec *,tsp,int,flags)
379 #else
380 static int sys_utimensat(int dirfd, const char *pathname,
381 const struct timespec times[2], int flags)
382 {
383 errno = ENOSYS;
384 return -1;
385 }
386 #endif
387 #endif /* TARGET_NR_utimensat */
388
389 #ifdef CONFIG_INOTIFY
390 #include <sys/inotify.h>
391
392 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
393 static int sys_inotify_init(void)
394 {
395 return (inotify_init());
396 }
397 #endif
398 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
399 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
400 {
401 return (inotify_add_watch(fd, pathname, mask));
402 }
403 #endif
404 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
405 static int sys_inotify_rm_watch(int fd, int32_t wd)
406 {
407 return (inotify_rm_watch(fd, wd));
408 }
409 #endif
410 #ifdef CONFIG_INOTIFY1
411 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
412 static int sys_inotify_init1(int flags)
413 {
414 return (inotify_init1(flags));
415 }
416 #endif
417 #endif
418 #else
419 /* Userspace can usually survive runtime without inotify */
420 #undef TARGET_NR_inotify_init
421 #undef TARGET_NR_inotify_init1
422 #undef TARGET_NR_inotify_add_watch
423 #undef TARGET_NR_inotify_rm_watch
424 #endif /* CONFIG_INOTIFY */
425
426 #if defined(TARGET_NR_prlimit64)
427 #ifndef __NR_prlimit64
428 # define __NR_prlimit64 -1
429 #endif
430 #define __NR_sys_prlimit64 __NR_prlimit64
431 /* The glibc rlimit structure may not be that used by the underlying syscall */
432 struct host_rlimit64 {
433 uint64_t rlim_cur;
434 uint64_t rlim_max;
435 };
436 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
437 const struct host_rlimit64 *, new_limit,
438 struct host_rlimit64 *, old_limit)
439 #endif
440
441
442 #if defined(TARGET_NR_timer_create)
443 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
444 static timer_t g_posix_timers[32] = { 0, } ;
445
446 static inline int next_free_host_timer(void)
447 {
448 int k ;
449 /* FIXME: Does finding the next free slot require a lock? */
450 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
451 if (g_posix_timers[k] == 0) {
452 g_posix_timers[k] = (timer_t) 1;
453 return k;
454 }
455 }
456 return -1;
457 }
458 #endif
459
460 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
461 #ifdef TARGET_ARM
462 static inline int regpairs_aligned(void *cpu_env) {
463 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
464 }
465 #elif defined(TARGET_MIPS)
466 static inline int regpairs_aligned(void *cpu_env) { return 1; }
467 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
468 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
469 * of registers which translates to the same as ARM/MIPS, because we start with
470 * r3 as arg1 */
471 static inline int regpairs_aligned(void *cpu_env) { return 1; }
472 #else
473 static inline int regpairs_aligned(void *cpu_env) { return 0; }
474 #endif
475
476 #define ERRNO_TABLE_SIZE 1200
477
478 /* target_to_host_errno_table[] is initialized from
479 * host_to_target_errno_table[] in syscall_init(). */
480 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
481 };
482
483 /*
484 * This list is the union of errno values overridden in asm-<arch>/errno.h
485 * minus the errnos that are not actually generic to all archs.
486 */
487 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
488 [EAGAIN] = TARGET_EAGAIN,
489 [EIDRM] = TARGET_EIDRM,
490 [ECHRNG] = TARGET_ECHRNG,
491 [EL2NSYNC] = TARGET_EL2NSYNC,
492 [EL3HLT] = TARGET_EL3HLT,
493 [EL3RST] = TARGET_EL3RST,
494 [ELNRNG] = TARGET_ELNRNG,
495 [EUNATCH] = TARGET_EUNATCH,
496 [ENOCSI] = TARGET_ENOCSI,
497 [EL2HLT] = TARGET_EL2HLT,
498 [EDEADLK] = TARGET_EDEADLK,
499 [ENOLCK] = TARGET_ENOLCK,
500 [EBADE] = TARGET_EBADE,
501 [EBADR] = TARGET_EBADR,
502 [EXFULL] = TARGET_EXFULL,
503 [ENOANO] = TARGET_ENOANO,
504 [EBADRQC] = TARGET_EBADRQC,
505 [EBADSLT] = TARGET_EBADSLT,
506 [EBFONT] = TARGET_EBFONT,
507 [ENOSTR] = TARGET_ENOSTR,
508 [ENODATA] = TARGET_ENODATA,
509 [ETIME] = TARGET_ETIME,
510 [ENOSR] = TARGET_ENOSR,
511 [ENONET] = TARGET_ENONET,
512 [ENOPKG] = TARGET_ENOPKG,
513 [EREMOTE] = TARGET_EREMOTE,
514 [ENOLINK] = TARGET_ENOLINK,
515 [EADV] = TARGET_EADV,
516 [ESRMNT] = TARGET_ESRMNT,
517 [ECOMM] = TARGET_ECOMM,
518 [EPROTO] = TARGET_EPROTO,
519 [EDOTDOT] = TARGET_EDOTDOT,
520 [EMULTIHOP] = TARGET_EMULTIHOP,
521 [EBADMSG] = TARGET_EBADMSG,
522 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
523 [EOVERFLOW] = TARGET_EOVERFLOW,
524 [ENOTUNIQ] = TARGET_ENOTUNIQ,
525 [EBADFD] = TARGET_EBADFD,
526 [EREMCHG] = TARGET_EREMCHG,
527 [ELIBACC] = TARGET_ELIBACC,
528 [ELIBBAD] = TARGET_ELIBBAD,
529 [ELIBSCN] = TARGET_ELIBSCN,
530 [ELIBMAX] = TARGET_ELIBMAX,
531 [ELIBEXEC] = TARGET_ELIBEXEC,
532 [EILSEQ] = TARGET_EILSEQ,
533 [ENOSYS] = TARGET_ENOSYS,
534 [ELOOP] = TARGET_ELOOP,
535 [ERESTART] = TARGET_ERESTART,
536 [ESTRPIPE] = TARGET_ESTRPIPE,
537 [ENOTEMPTY] = TARGET_ENOTEMPTY,
538 [EUSERS] = TARGET_EUSERS,
539 [ENOTSOCK] = TARGET_ENOTSOCK,
540 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
541 [EMSGSIZE] = TARGET_EMSGSIZE,
542 [EPROTOTYPE] = TARGET_EPROTOTYPE,
543 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
544 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
545 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
546 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
547 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
548 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
549 [EADDRINUSE] = TARGET_EADDRINUSE,
550 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
551 [ENETDOWN] = TARGET_ENETDOWN,
552 [ENETUNREACH] = TARGET_ENETUNREACH,
553 [ENETRESET] = TARGET_ENETRESET,
554 [ECONNABORTED] = TARGET_ECONNABORTED,
555 [ECONNRESET] = TARGET_ECONNRESET,
556 [ENOBUFS] = TARGET_ENOBUFS,
557 [EISCONN] = TARGET_EISCONN,
558 [ENOTCONN] = TARGET_ENOTCONN,
559 [EUCLEAN] = TARGET_EUCLEAN,
560 [ENOTNAM] = TARGET_ENOTNAM,
561 [ENAVAIL] = TARGET_ENAVAIL,
562 [EISNAM] = TARGET_EISNAM,
563 [EREMOTEIO] = TARGET_EREMOTEIO,
564 [ESHUTDOWN] = TARGET_ESHUTDOWN,
565 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
566 [ETIMEDOUT] = TARGET_ETIMEDOUT,
567 [ECONNREFUSED] = TARGET_ECONNREFUSED,
568 [EHOSTDOWN] = TARGET_EHOSTDOWN,
569 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
570 [EALREADY] = TARGET_EALREADY,
571 [EINPROGRESS] = TARGET_EINPROGRESS,
572 [ESTALE] = TARGET_ESTALE,
573 [ECANCELED] = TARGET_ECANCELED,
574 [ENOMEDIUM] = TARGET_ENOMEDIUM,
575 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
576 #ifdef ENOKEY
577 [ENOKEY] = TARGET_ENOKEY,
578 #endif
579 #ifdef EKEYEXPIRED
580 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
581 #endif
582 #ifdef EKEYREVOKED
583 [EKEYREVOKED] = TARGET_EKEYREVOKED,
584 #endif
585 #ifdef EKEYREJECTED
586 [EKEYREJECTED] = TARGET_EKEYREJECTED,
587 #endif
588 #ifdef EOWNERDEAD
589 [EOWNERDEAD] = TARGET_EOWNERDEAD,
590 #endif
591 #ifdef ENOTRECOVERABLE
592 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
593 #endif
594 };
595
596 static inline int host_to_target_errno(int err)
597 {
598 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
599 host_to_target_errno_table[err]) {
600 return host_to_target_errno_table[err];
601 }
602 return err;
603 }
604
605 static inline int target_to_host_errno(int err)
606 {
607 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
608 target_to_host_errno_table[err]) {
609 return target_to_host_errno_table[err];
610 }
611 return err;
612 }
613
614 static inline abi_long get_errno(abi_long ret)
615 {
616 if (ret == -1)
617 return -host_to_target_errno(errno);
618 else
619 return ret;
620 }
621
622 static inline int is_error(abi_long ret)
623 {
624 return (abi_ulong)ret >= (abi_ulong)(-4096);
625 }
626
627 const char *target_strerror(int err)
628 {
629 if (err == TARGET_ERESTARTSYS) {
630 return "To be restarted";
631 }
632 if (err == TARGET_QEMU_ESIGRETURN) {
633 return "Successful exit from sigreturn";
634 }
635
636 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
637 return NULL;
638 }
639 return strerror(target_to_host_errno(err));
640 }
641
642 #define safe_syscall0(type, name) \
643 static type safe_##name(void) \
644 { \
645 return safe_syscall(__NR_##name); \
646 }
647
648 #define safe_syscall1(type, name, type1, arg1) \
649 static type safe_##name(type1 arg1) \
650 { \
651 return safe_syscall(__NR_##name, arg1); \
652 }
653
654 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
655 static type safe_##name(type1 arg1, type2 arg2) \
656 { \
657 return safe_syscall(__NR_##name, arg1, arg2); \
658 }
659
660 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
661 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
662 { \
663 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
664 }
665
666 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
667 type4, arg4) \
668 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
669 { \
670 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
671 }
672
673 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
674 type4, arg4, type5, arg5) \
675 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
676 type5 arg5) \
677 { \
678 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
679 }
680
681 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
682 type4, arg4, type5, arg5, type6, arg6) \
683 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
684 type5 arg5, type6 arg6) \
685 { \
686 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
687 }
688
689 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
690 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
691 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
692 int, flags, mode_t, mode)
693 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
694 struct rusage *, rusage)
695 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
696 int, options, struct rusage *, rusage)
697 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
698 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
699 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
700 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
701 struct timespec *, tsp, const sigset_t *, sigmask,
702 size_t, sigsetsize)
703 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
704 int, maxevents, int, timeout, const sigset_t *, sigmask,
705 size_t, sigsetsize)
706 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
707 const struct timespec *,timeout,int *,uaddr2,int,val3)
708 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
709 safe_syscall2(int, kill, pid_t, pid, int, sig)
710 safe_syscall2(int, tkill, int, tid, int, sig)
711 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
712 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
713 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
714 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
715 socklen_t, addrlen)
716 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
717 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
718 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
719 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
720 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
721 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
722 safe_syscall2(int, flock, int, fd, int, operation)
723 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
724 const struct timespec *, uts, size_t, sigsetsize)
725 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
726 int, flags)
727 safe_syscall2(int, nanosleep, const struct timespec *, req,
728 struct timespec *, rem)
729 #ifdef TARGET_NR_clock_nanosleep
730 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
731 const struct timespec *, req, struct timespec *, rem)
732 #endif
733 #ifdef __NR_msgsnd
734 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
735 int, flags)
736 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
737 long, msgtype, int, flags)
738 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
739 unsigned, nsops, const struct timespec *, timeout)
740 #else
741 /* This host kernel architecture uses a single ipc syscall; fake up
742 * wrappers for the sub-operations to hide this implementation detail.
743 * Annoyingly we can't include linux/ipc.h to get the constant definitions
744 * for the call parameter because some structs in there conflict with the
745 * sys/ipc.h ones. So we just define them here, and rely on them being
746 * the same for all host architectures.
747 */
748 #define Q_SEMTIMEDOP 4
749 #define Q_MSGSND 11
750 #define Q_MSGRCV 12
751 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
752
753 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
754 void *, ptr, long, fifth)
755 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
756 {
757 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
758 }
759 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
760 {
761 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
762 }
763 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
764 const struct timespec *timeout)
765 {
766 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
767 (long)timeout);
768 }
769 #endif
770 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
771 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
772 size_t, len, unsigned, prio, const struct timespec *, timeout)
773 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
774 size_t, len, unsigned *, prio, const struct timespec *, timeout)
775 #endif
776 /* We do ioctl like this rather than via safe_syscall3 to preserve the
777 * "third argument might be integer or pointer or not present" behaviour of
778 * the libc function.
779 */
780 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
781 /* Similarly for fcntl. Note that callers must always:
782 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
783 * use the flock64 struct rather than unsuffixed flock
784 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
785 */
786 #ifdef __NR_fcntl64
787 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
788 #else
789 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
790 #endif
791
792 static inline int host_to_target_sock_type(int host_type)
793 {
794 int target_type;
795
796 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
797 case SOCK_DGRAM:
798 target_type = TARGET_SOCK_DGRAM;
799 break;
800 case SOCK_STREAM:
801 target_type = TARGET_SOCK_STREAM;
802 break;
803 default:
804 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
805 break;
806 }
807
808 #if defined(SOCK_CLOEXEC)
809 if (host_type & SOCK_CLOEXEC) {
810 target_type |= TARGET_SOCK_CLOEXEC;
811 }
812 #endif
813
814 #if defined(SOCK_NONBLOCK)
815 if (host_type & SOCK_NONBLOCK) {
816 target_type |= TARGET_SOCK_NONBLOCK;
817 }
818 #endif
819
820 return target_type;
821 }
822
823 static abi_ulong target_brk;
824 static abi_ulong target_original_brk;
825 static abi_ulong brk_page;
826
827 void target_set_brk(abi_ulong new_brk)
828 {
829 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
830 brk_page = HOST_PAGE_ALIGN(target_brk);
831 }
832
833 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
834 #define DEBUGF_BRK(message, args...)
835
836 /* do_brk() must return target values and target errnos. */
837 abi_long do_brk(abi_ulong new_brk)
838 {
839 abi_long mapped_addr;
840 int new_alloc_size;
841
842 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
843
844 if (!new_brk) {
845 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
846 return target_brk;
847 }
848 if (new_brk < target_original_brk) {
849 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
850 target_brk);
851 return target_brk;
852 }
853
854 /* If the new brk is less than the highest page reserved to the
855 * target heap allocation, set it and we're almost done... */
856 if (new_brk <= brk_page) {
857 /* Heap contents are initialized to zero, as for anonymous
858 * mapped pages. */
859 if (new_brk > target_brk) {
860 memset(g2h(target_brk), 0, new_brk - target_brk);
861 }
862 target_brk = new_brk;
863 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
864 return target_brk;
865 }
866
867 /* We need to allocate more memory after the brk... Note that
868 * we don't use MAP_FIXED because that will map over the top of
869 * any existing mapping (like the one with the host libc or qemu
870 * itself); instead we treat "mapped but at wrong address" as
871 * a failure and unmap again.
872 */
873 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
874 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
875 PROT_READ|PROT_WRITE,
876 MAP_ANON|MAP_PRIVATE, 0, 0));
877
878 if (mapped_addr == brk_page) {
879 /* Heap contents are initialized to zero, as for anonymous
880 * mapped pages. Technically the new pages are already
881 * initialized to zero since they *are* anonymous mapped
882 * pages, however we have to take care with the contents that
883 * come from the remaining part of the previous page: it may
884 * contains garbage data due to a previous heap usage (grown
885 * then shrunken). */
886 memset(g2h(target_brk), 0, brk_page - target_brk);
887
888 target_brk = new_brk;
889 brk_page = HOST_PAGE_ALIGN(target_brk);
890 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
891 target_brk);
892 return target_brk;
893 } else if (mapped_addr != -1) {
894 /* Mapped but at wrong address, meaning there wasn't actually
895 * enough space for this brk.
896 */
897 target_munmap(mapped_addr, new_alloc_size);
898 mapped_addr = -1;
899 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
900 }
901 else {
902 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
903 }
904
905 #if defined(TARGET_ALPHA)
906 /* We (partially) emulate OSF/1 on Alpha, which requires we
907 return a proper errno, not an unchanged brk value. */
908 return -TARGET_ENOMEM;
909 #endif
910 /* For everything else, return the previous break. */
911 return target_brk;
912 }
913
914 static inline abi_long copy_from_user_fdset(fd_set *fds,
915 abi_ulong target_fds_addr,
916 int n)
917 {
918 int i, nw, j, k;
919 abi_ulong b, *target_fds;
920
921 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
922 if (!(target_fds = lock_user(VERIFY_READ,
923 target_fds_addr,
924 sizeof(abi_ulong) * nw,
925 1)))
926 return -TARGET_EFAULT;
927
928 FD_ZERO(fds);
929 k = 0;
930 for (i = 0; i < nw; i++) {
931 /* grab the abi_ulong */
932 __get_user(b, &target_fds[i]);
933 for (j = 0; j < TARGET_ABI_BITS; j++) {
934 /* check the bit inside the abi_ulong */
935 if ((b >> j) & 1)
936 FD_SET(k, fds);
937 k++;
938 }
939 }
940
941 unlock_user(target_fds, target_fds_addr, 0);
942
943 return 0;
944 }
945
946 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
947 abi_ulong target_fds_addr,
948 int n)
949 {
950 if (target_fds_addr) {
951 if (copy_from_user_fdset(fds, target_fds_addr, n))
952 return -TARGET_EFAULT;
953 *fds_ptr = fds;
954 } else {
955 *fds_ptr = NULL;
956 }
957 return 0;
958 }
959
960 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
961 const fd_set *fds,
962 int n)
963 {
964 int i, nw, j, k;
965 abi_long v;
966 abi_ulong *target_fds;
967
968 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
969 if (!(target_fds = lock_user(VERIFY_WRITE,
970 target_fds_addr,
971 sizeof(abi_ulong) * nw,
972 0)))
973 return -TARGET_EFAULT;
974
975 k = 0;
976 for (i = 0; i < nw; i++) {
977 v = 0;
978 for (j = 0; j < TARGET_ABI_BITS; j++) {
979 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
980 k++;
981 }
982 __put_user(v, &target_fds[i]);
983 }
984
985 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
986
987 return 0;
988 }
989
990 #if defined(__alpha__)
991 #define HOST_HZ 1024
992 #else
993 #define HOST_HZ 100
994 #endif
995
996 static inline abi_long host_to_target_clock_t(long ticks)
997 {
998 #if HOST_HZ == TARGET_HZ
999 return ticks;
1000 #else
1001 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1002 #endif
1003 }
1004
1005 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1006 const struct rusage *rusage)
1007 {
1008 struct target_rusage *target_rusage;
1009
1010 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1011 return -TARGET_EFAULT;
1012 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1013 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1014 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1015 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1016 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1017 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1018 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1019 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1020 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1021 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1022 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1023 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1024 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1025 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1026 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1027 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1028 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1029 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1030 unlock_user_struct(target_rusage, target_addr, 1);
1031
1032 return 0;
1033 }
1034
1035 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1036 {
1037 abi_ulong target_rlim_swap;
1038 rlim_t result;
1039
1040 target_rlim_swap = tswapal(target_rlim);
1041 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1042 return RLIM_INFINITY;
1043
1044 result = target_rlim_swap;
1045 if (target_rlim_swap != (rlim_t)result)
1046 return RLIM_INFINITY;
1047
1048 return result;
1049 }
1050
1051 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1052 {
1053 abi_ulong target_rlim_swap;
1054 abi_ulong result;
1055
1056 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1057 target_rlim_swap = TARGET_RLIM_INFINITY;
1058 else
1059 target_rlim_swap = rlim;
1060 result = tswapal(target_rlim_swap);
1061
1062 return result;
1063 }
1064
1065 static inline int target_to_host_resource(int code)
1066 {
1067 switch (code) {
1068 case TARGET_RLIMIT_AS:
1069 return RLIMIT_AS;
1070 case TARGET_RLIMIT_CORE:
1071 return RLIMIT_CORE;
1072 case TARGET_RLIMIT_CPU:
1073 return RLIMIT_CPU;
1074 case TARGET_RLIMIT_DATA:
1075 return RLIMIT_DATA;
1076 case TARGET_RLIMIT_FSIZE:
1077 return RLIMIT_FSIZE;
1078 case TARGET_RLIMIT_LOCKS:
1079 return RLIMIT_LOCKS;
1080 case TARGET_RLIMIT_MEMLOCK:
1081 return RLIMIT_MEMLOCK;
1082 case TARGET_RLIMIT_MSGQUEUE:
1083 return RLIMIT_MSGQUEUE;
1084 case TARGET_RLIMIT_NICE:
1085 return RLIMIT_NICE;
1086 case TARGET_RLIMIT_NOFILE:
1087 return RLIMIT_NOFILE;
1088 case TARGET_RLIMIT_NPROC:
1089 return RLIMIT_NPROC;
1090 case TARGET_RLIMIT_RSS:
1091 return RLIMIT_RSS;
1092 case TARGET_RLIMIT_RTPRIO:
1093 return RLIMIT_RTPRIO;
1094 case TARGET_RLIMIT_SIGPENDING:
1095 return RLIMIT_SIGPENDING;
1096 case TARGET_RLIMIT_STACK:
1097 return RLIMIT_STACK;
1098 default:
1099 return code;
1100 }
1101 }
1102
1103 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1104 abi_ulong target_tv_addr)
1105 {
1106 struct target_timeval *target_tv;
1107
1108 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1109 return -TARGET_EFAULT;
1110
1111 __get_user(tv->tv_sec, &target_tv->tv_sec);
1112 __get_user(tv->tv_usec, &target_tv->tv_usec);
1113
1114 unlock_user_struct(target_tv, target_tv_addr, 0);
1115
1116 return 0;
1117 }
1118
1119 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1120 const struct timeval *tv)
1121 {
1122 struct target_timeval *target_tv;
1123
1124 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1125 return -TARGET_EFAULT;
1126
1127 __put_user(tv->tv_sec, &target_tv->tv_sec);
1128 __put_user(tv->tv_usec, &target_tv->tv_usec);
1129
1130 unlock_user_struct(target_tv, target_tv_addr, 1);
1131
1132 return 0;
1133 }
1134
1135 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1136 abi_ulong target_tz_addr)
1137 {
1138 struct target_timezone *target_tz;
1139
1140 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1141 return -TARGET_EFAULT;
1142 }
1143
1144 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1145 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1146
1147 unlock_user_struct(target_tz, target_tz_addr, 0);
1148
1149 return 0;
1150 }
1151
1152 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1153 #include <mqueue.h>
1154
1155 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1156 abi_ulong target_mq_attr_addr)
1157 {
1158 struct target_mq_attr *target_mq_attr;
1159
1160 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1161 target_mq_attr_addr, 1))
1162 return -TARGET_EFAULT;
1163
1164 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1165 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1166 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1167 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1168
1169 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1170
1171 return 0;
1172 }
1173
1174 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1175 const struct mq_attr *attr)
1176 {
1177 struct target_mq_attr *target_mq_attr;
1178
1179 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1180 target_mq_attr_addr, 0))
1181 return -TARGET_EFAULT;
1182
1183 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1184 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1185 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1186 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1187
1188 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1189
1190 return 0;
1191 }
1192 #endif
1193
1194 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1195 /* do_select() must return target values and target errnos. */
1196 static abi_long do_select(int n,
1197 abi_ulong rfd_addr, abi_ulong wfd_addr,
1198 abi_ulong efd_addr, abi_ulong target_tv_addr)
1199 {
1200 fd_set rfds, wfds, efds;
1201 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1202 struct timeval tv;
1203 struct timespec ts, *ts_ptr;
1204 abi_long ret;
1205
1206 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1207 if (ret) {
1208 return ret;
1209 }
1210 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1211 if (ret) {
1212 return ret;
1213 }
1214 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1215 if (ret) {
1216 return ret;
1217 }
1218
1219 if (target_tv_addr) {
1220 if (copy_from_user_timeval(&tv, target_tv_addr))
1221 return -TARGET_EFAULT;
1222 ts.tv_sec = tv.tv_sec;
1223 ts.tv_nsec = tv.tv_usec * 1000;
1224 ts_ptr = &ts;
1225 } else {
1226 ts_ptr = NULL;
1227 }
1228
1229 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1230 ts_ptr, NULL));
1231
1232 if (!is_error(ret)) {
1233 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1234 return -TARGET_EFAULT;
1235 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1236 return -TARGET_EFAULT;
1237 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1238 return -TARGET_EFAULT;
1239
1240 if (target_tv_addr) {
1241 tv.tv_sec = ts.tv_sec;
1242 tv.tv_usec = ts.tv_nsec / 1000;
1243 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1244 return -TARGET_EFAULT;
1245 }
1246 }
1247 }
1248
1249 return ret;
1250 }
1251 #endif
1252
1253 static abi_long do_pipe2(int host_pipe[], int flags)
1254 {
1255 #ifdef CONFIG_PIPE2
1256 return pipe2(host_pipe, flags);
1257 #else
1258 return -ENOSYS;
1259 #endif
1260 }
1261
1262 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1263 int flags, int is_pipe2)
1264 {
1265 int host_pipe[2];
1266 abi_long ret;
1267 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1268
1269 if (is_error(ret))
1270 return get_errno(ret);
1271
1272 /* Several targets have special calling conventions for the original
1273 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1274 if (!is_pipe2) {
1275 #if defined(TARGET_ALPHA)
1276 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1277 return host_pipe[0];
1278 #elif defined(TARGET_MIPS)
1279 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1280 return host_pipe[0];
1281 #elif defined(TARGET_SH4)
1282 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1283 return host_pipe[0];
1284 #elif defined(TARGET_SPARC)
1285 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1286 return host_pipe[0];
1287 #endif
1288 }
1289
1290 if (put_user_s32(host_pipe[0], pipedes)
1291 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1292 return -TARGET_EFAULT;
1293 return get_errno(ret);
1294 }
1295
1296 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1297 abi_ulong target_addr,
1298 socklen_t len)
1299 {
1300 struct target_ip_mreqn *target_smreqn;
1301
1302 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1303 if (!target_smreqn)
1304 return -TARGET_EFAULT;
1305 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1306 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1307 if (len == sizeof(struct target_ip_mreqn))
1308 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1309 unlock_user(target_smreqn, target_addr, 0);
1310
1311 return 0;
1312 }
1313
1314 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1315 abi_ulong target_addr,
1316 socklen_t len)
1317 {
1318 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1319 sa_family_t sa_family;
1320 struct target_sockaddr *target_saddr;
1321
1322 if (fd_trans_target_to_host_addr(fd)) {
1323 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1324 }
1325
1326 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1327 if (!target_saddr)
1328 return -TARGET_EFAULT;
1329
1330 sa_family = tswap16(target_saddr->sa_family);
1331
1332 /* Oops. The caller might send a incomplete sun_path; sun_path
1333 * must be terminated by \0 (see the manual page), but
1334 * unfortunately it is quite common to specify sockaddr_un
1335 * length as "strlen(x->sun_path)" while it should be
1336 * "strlen(...) + 1". We'll fix that here if needed.
1337 * Linux kernel has a similar feature.
1338 */
1339
1340 if (sa_family == AF_UNIX) {
1341 if (len < unix_maxlen && len > 0) {
1342 char *cp = (char*)target_saddr;
1343
1344 if ( cp[len-1] && !cp[len] )
1345 len++;
1346 }
1347 if (len > unix_maxlen)
1348 len = unix_maxlen;
1349 }
1350
1351 memcpy(addr, target_saddr, len);
1352 addr->sa_family = sa_family;
1353 if (sa_family == AF_NETLINK) {
1354 struct sockaddr_nl *nladdr;
1355
1356 nladdr = (struct sockaddr_nl *)addr;
1357 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1358 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1359 } else if (sa_family == AF_PACKET) {
1360 struct target_sockaddr_ll *lladdr;
1361
1362 lladdr = (struct target_sockaddr_ll *)addr;
1363 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1364 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1365 }
1366 unlock_user(target_saddr, target_addr, 0);
1367
1368 return 0;
1369 }
1370
1371 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1372 struct sockaddr *addr,
1373 socklen_t len)
1374 {
1375 struct target_sockaddr *target_saddr;
1376
1377 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1378 if (!target_saddr)
1379 return -TARGET_EFAULT;
1380 memcpy(target_saddr, addr, len);
1381 target_saddr->sa_family = tswap16(addr->sa_family);
1382 if (addr->sa_family == AF_NETLINK) {
1383 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1384 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1385 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1386 }
1387 unlock_user(target_saddr, target_addr, len);
1388
1389 return 0;
1390 }
1391
1392 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1393 struct target_msghdr *target_msgh)
1394 {
1395 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1396 abi_long msg_controllen;
1397 abi_ulong target_cmsg_addr;
1398 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1399 socklen_t space = 0;
1400
1401 msg_controllen = tswapal(target_msgh->msg_controllen);
1402 if (msg_controllen < sizeof (struct target_cmsghdr))
1403 goto the_end;
1404 target_cmsg_addr = tswapal(target_msgh->msg_control);
1405 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1406 target_cmsg_start = target_cmsg;
1407 if (!target_cmsg)
1408 return -TARGET_EFAULT;
1409
1410 while (cmsg && target_cmsg) {
1411 void *data = CMSG_DATA(cmsg);
1412 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1413
1414 int len = tswapal(target_cmsg->cmsg_len)
1415 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1416
1417 space += CMSG_SPACE(len);
1418 if (space > msgh->msg_controllen) {
1419 space -= CMSG_SPACE(len);
1420 /* This is a QEMU bug, since we allocated the payload
1421 * area ourselves (unlike overflow in host-to-target
1422 * conversion, which is just the guest giving us a buffer
1423 * that's too small). It can't happen for the payload types
1424 * we currently support; if it becomes an issue in future
1425 * we would need to improve our allocation strategy to
1426 * something more intelligent than "twice the size of the
1427 * target buffer we're reading from".
1428 */
1429 gemu_log("Host cmsg overflow\n");
1430 break;
1431 }
1432
1433 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1434 cmsg->cmsg_level = SOL_SOCKET;
1435 } else {
1436 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1437 }
1438 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1439 cmsg->cmsg_len = CMSG_LEN(len);
1440
1441 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1442 int *fd = (int *)data;
1443 int *target_fd = (int *)target_data;
1444 int i, numfds = len / sizeof(int);
1445
1446 for (i = 0; i < numfds; i++) {
1447 __get_user(fd[i], target_fd + i);
1448 }
1449 } else if (cmsg->cmsg_level == SOL_SOCKET
1450 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1451 struct ucred *cred = (struct ucred *)data;
1452 struct target_ucred *target_cred =
1453 (struct target_ucred *)target_data;
1454
1455 __get_user(cred->pid, &target_cred->pid);
1456 __get_user(cred->uid, &target_cred->uid);
1457 __get_user(cred->gid, &target_cred->gid);
1458 } else {
1459 gemu_log("Unsupported ancillary data: %d/%d\n",
1460 cmsg->cmsg_level, cmsg->cmsg_type);
1461 memcpy(data, target_data, len);
1462 }
1463
1464 cmsg = CMSG_NXTHDR(msgh, cmsg);
1465 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1466 target_cmsg_start);
1467 }
1468 unlock_user(target_cmsg, target_cmsg_addr, 0);
1469 the_end:
1470 msgh->msg_controllen = space;
1471 return 0;
1472 }
1473
1474 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1475 struct msghdr *msgh)
1476 {
1477 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1478 abi_long msg_controllen;
1479 abi_ulong target_cmsg_addr;
1480 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1481 socklen_t space = 0;
1482
1483 msg_controllen = tswapal(target_msgh->msg_controllen);
1484 if (msg_controllen < sizeof (struct target_cmsghdr))
1485 goto the_end;
1486 target_cmsg_addr = tswapal(target_msgh->msg_control);
1487 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1488 target_cmsg_start = target_cmsg;
1489 if (!target_cmsg)
1490 return -TARGET_EFAULT;
1491
1492 while (cmsg && target_cmsg) {
1493 void *data = CMSG_DATA(cmsg);
1494 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1495
1496 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1497 int tgt_len, tgt_space;
1498
1499 /* We never copy a half-header but may copy half-data;
1500 * this is Linux's behaviour in put_cmsg(). Note that
1501 * truncation here is a guest problem (which we report
1502 * to the guest via the CTRUNC bit), unlike truncation
1503 * in target_to_host_cmsg, which is a QEMU bug.
1504 */
1505 if (msg_controllen < sizeof(struct cmsghdr)) {
1506 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1507 break;
1508 }
1509
1510 if (cmsg->cmsg_level == SOL_SOCKET) {
1511 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1512 } else {
1513 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1514 }
1515 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1516
1517 tgt_len = TARGET_CMSG_LEN(len);
1518
1519 /* Payload types which need a different size of payload on
1520 * the target must adjust tgt_len here.
1521 */
1522 switch (cmsg->cmsg_level) {
1523 case SOL_SOCKET:
1524 switch (cmsg->cmsg_type) {
1525 case SO_TIMESTAMP:
1526 tgt_len = sizeof(struct target_timeval);
1527 break;
1528 default:
1529 break;
1530 }
1531 default:
1532 break;
1533 }
1534
1535 if (msg_controllen < tgt_len) {
1536 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1537 tgt_len = msg_controllen;
1538 }
1539
1540 /* We must now copy-and-convert len bytes of payload
1541 * into tgt_len bytes of destination space. Bear in mind
1542 * that in both source and destination we may be dealing
1543 * with a truncated value!
1544 */
1545 switch (cmsg->cmsg_level) {
1546 case SOL_SOCKET:
1547 switch (cmsg->cmsg_type) {
1548 case SCM_RIGHTS:
1549 {
1550 int *fd = (int *)data;
1551 int *target_fd = (int *)target_data;
1552 int i, numfds = tgt_len / sizeof(int);
1553
1554 for (i = 0; i < numfds; i++) {
1555 __put_user(fd[i], target_fd + i);
1556 }
1557 break;
1558 }
1559 case SO_TIMESTAMP:
1560 {
1561 struct timeval *tv = (struct timeval *)data;
1562 struct target_timeval *target_tv =
1563 (struct target_timeval *)target_data;
1564
1565 if (len != sizeof(struct timeval) ||
1566 tgt_len != sizeof(struct target_timeval)) {
1567 goto unimplemented;
1568 }
1569
1570 /* copy struct timeval to target */
1571 __put_user(tv->tv_sec, &target_tv->tv_sec);
1572 __put_user(tv->tv_usec, &target_tv->tv_usec);
1573 break;
1574 }
1575 case SCM_CREDENTIALS:
1576 {
1577 struct ucred *cred = (struct ucred *)data;
1578 struct target_ucred *target_cred =
1579 (struct target_ucred *)target_data;
1580
1581 __put_user(cred->pid, &target_cred->pid);
1582 __put_user(cred->uid, &target_cred->uid);
1583 __put_user(cred->gid, &target_cred->gid);
1584 break;
1585 }
1586 default:
1587 goto unimplemented;
1588 }
1589 break;
1590
1591 default:
1592 unimplemented:
1593 gemu_log("Unsupported ancillary data: %d/%d\n",
1594 cmsg->cmsg_level, cmsg->cmsg_type);
1595 memcpy(target_data, data, MIN(len, tgt_len));
1596 if (tgt_len > len) {
1597 memset(target_data + len, 0, tgt_len - len);
1598 }
1599 }
1600
1601 target_cmsg->cmsg_len = tswapal(tgt_len);
1602 tgt_space = TARGET_CMSG_SPACE(len);
1603 if (msg_controllen < tgt_space) {
1604 tgt_space = msg_controllen;
1605 }
1606 msg_controllen -= tgt_space;
1607 space += tgt_space;
1608 cmsg = CMSG_NXTHDR(msgh, cmsg);
1609 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1610 target_cmsg_start);
1611 }
1612 unlock_user(target_cmsg, target_cmsg_addr, space);
1613 the_end:
1614 target_msgh->msg_controllen = tswapal(space);
1615 return 0;
1616 }
1617
1618 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1619 {
1620 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1621 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1622 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1623 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1624 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1625 }
1626
1627 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1628 size_t len,
1629 abi_long (*host_to_target_nlmsg)
1630 (struct nlmsghdr *))
1631 {
1632 uint32_t nlmsg_len;
1633 abi_long ret;
1634
1635 while (len > sizeof(struct nlmsghdr)) {
1636
1637 nlmsg_len = nlh->nlmsg_len;
1638 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1639 nlmsg_len > len) {
1640 break;
1641 }
1642
1643 switch (nlh->nlmsg_type) {
1644 case NLMSG_DONE:
1645 tswap_nlmsghdr(nlh);
1646 return 0;
1647 case NLMSG_NOOP:
1648 break;
1649 case NLMSG_ERROR:
1650 {
1651 struct nlmsgerr *e = NLMSG_DATA(nlh);
1652 e->error = tswap32(e->error);
1653 tswap_nlmsghdr(&e->msg);
1654 tswap_nlmsghdr(nlh);
1655 return 0;
1656 }
1657 default:
1658 ret = host_to_target_nlmsg(nlh);
1659 if (ret < 0) {
1660 tswap_nlmsghdr(nlh);
1661 return ret;
1662 }
1663 break;
1664 }
1665 tswap_nlmsghdr(nlh);
1666 len -= NLMSG_ALIGN(nlmsg_len);
1667 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1668 }
1669 return 0;
1670 }
1671
1672 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1673 size_t len,
1674 abi_long (*target_to_host_nlmsg)
1675 (struct nlmsghdr *))
1676 {
1677 int ret;
1678
1679 while (len > sizeof(struct nlmsghdr)) {
1680 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1681 tswap32(nlh->nlmsg_len) > len) {
1682 break;
1683 }
1684 tswap_nlmsghdr(nlh);
1685 switch (nlh->nlmsg_type) {
1686 case NLMSG_DONE:
1687 return 0;
1688 case NLMSG_NOOP:
1689 break;
1690 case NLMSG_ERROR:
1691 {
1692 struct nlmsgerr *e = NLMSG_DATA(nlh);
1693 e->error = tswap32(e->error);
1694 tswap_nlmsghdr(&e->msg);
1695 return 0;
1696 }
1697 default:
1698 ret = target_to_host_nlmsg(nlh);
1699 if (ret < 0) {
1700 return ret;
1701 }
1702 }
1703 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1704 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1705 }
1706 return 0;
1707 }
1708
1709 #ifdef CONFIG_RTNETLINK
1710 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1711 size_t len,
1712 abi_long (*host_to_target_rtattr)
1713 (struct rtattr *))
1714 {
1715 unsigned short rta_len;
1716 abi_long ret;
1717
1718 while (len > sizeof(struct rtattr)) {
1719 rta_len = rtattr->rta_len;
1720 if (rta_len < sizeof(struct rtattr) ||
1721 rta_len > len) {
1722 break;
1723 }
1724 ret = host_to_target_rtattr(rtattr);
1725 rtattr->rta_len = tswap16(rtattr->rta_len);
1726 rtattr->rta_type = tswap16(rtattr->rta_type);
1727 if (ret < 0) {
1728 return ret;
1729 }
1730 len -= RTA_ALIGN(rta_len);
1731 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1732 }
1733 return 0;
1734 }
1735
1736 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
1737 {
1738 uint32_t *u32;
1739 struct rtnl_link_stats *st;
1740 struct rtnl_link_stats64 *st64;
1741 struct rtnl_link_ifmap *map;
1742
1743 switch (rtattr->rta_type) {
1744 /* binary stream */
1745 case IFLA_ADDRESS:
1746 case IFLA_BROADCAST:
1747 /* string */
1748 case IFLA_IFNAME:
1749 case IFLA_QDISC:
1750 break;
1751 /* uin8_t */
1752 case IFLA_OPERSTATE:
1753 case IFLA_LINKMODE:
1754 case IFLA_CARRIER:
1755 case IFLA_PROTO_DOWN:
1756 break;
1757 /* uint32_t */
1758 case IFLA_MTU:
1759 case IFLA_LINK:
1760 case IFLA_WEIGHT:
1761 case IFLA_TXQLEN:
1762 case IFLA_CARRIER_CHANGES:
1763 case IFLA_NUM_RX_QUEUES:
1764 case IFLA_NUM_TX_QUEUES:
1765 case IFLA_PROMISCUITY:
1766 case IFLA_EXT_MASK:
1767 case IFLA_LINK_NETNSID:
1768 case IFLA_GROUP:
1769 case IFLA_MASTER:
1770 case IFLA_NUM_VF:
1771 u32 = RTA_DATA(rtattr);
1772 *u32 = tswap32(*u32);
1773 break;
1774 /* struct rtnl_link_stats */
1775 case IFLA_STATS:
1776 st = RTA_DATA(rtattr);
1777 st->rx_packets = tswap32(st->rx_packets);
1778 st->tx_packets = tswap32(st->tx_packets);
1779 st->rx_bytes = tswap32(st->rx_bytes);
1780 st->tx_bytes = tswap32(st->tx_bytes);
1781 st->rx_errors = tswap32(st->rx_errors);
1782 st->tx_errors = tswap32(st->tx_errors);
1783 st->rx_dropped = tswap32(st->rx_dropped);
1784 st->tx_dropped = tswap32(st->tx_dropped);
1785 st->multicast = tswap32(st->multicast);
1786 st->collisions = tswap32(st->collisions);
1787
1788 /* detailed rx_errors: */
1789 st->rx_length_errors = tswap32(st->rx_length_errors);
1790 st->rx_over_errors = tswap32(st->rx_over_errors);
1791 st->rx_crc_errors = tswap32(st->rx_crc_errors);
1792 st->rx_frame_errors = tswap32(st->rx_frame_errors);
1793 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
1794 st->rx_missed_errors = tswap32(st->rx_missed_errors);
1795
1796 /* detailed tx_errors */
1797 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
1798 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
1799 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
1800 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
1801 st->tx_window_errors = tswap32(st->tx_window_errors);
1802
1803 /* for cslip etc */
1804 st->rx_compressed = tswap32(st->rx_compressed);
1805 st->tx_compressed = tswap32(st->tx_compressed);
1806 break;
1807 /* struct rtnl_link_stats64 */
1808 case IFLA_STATS64:
1809 st64 = RTA_DATA(rtattr);
1810 st64->rx_packets = tswap64(st64->rx_packets);
1811 st64->tx_packets = tswap64(st64->tx_packets);
1812 st64->rx_bytes = tswap64(st64->rx_bytes);
1813 st64->tx_bytes = tswap64(st64->tx_bytes);
1814 st64->rx_errors = tswap64(st64->rx_errors);
1815 st64->tx_errors = tswap64(st64->tx_errors);
1816 st64->rx_dropped = tswap64(st64->rx_dropped);
1817 st64->tx_dropped = tswap64(st64->tx_dropped);
1818 st64->multicast = tswap64(st64->multicast);
1819 st64->collisions = tswap64(st64->collisions);
1820
1821 /* detailed rx_errors: */
1822 st64->rx_length_errors = tswap64(st64->rx_length_errors);
1823 st64->rx_over_errors = tswap64(st64->rx_over_errors);
1824 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
1825 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
1826 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
1827 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
1828
1829 /* detailed tx_errors */
1830 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
1831 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
1832 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
1833 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
1834 st64->tx_window_errors = tswap64(st64->tx_window_errors);
1835
1836 /* for cslip etc */
1837 st64->rx_compressed = tswap64(st64->rx_compressed);
1838 st64->tx_compressed = tswap64(st64->tx_compressed);
1839 break;
1840 /* struct rtnl_link_ifmap */
1841 case IFLA_MAP:
1842 map = RTA_DATA(rtattr);
1843 map->mem_start = tswap64(map->mem_start);
1844 map->mem_end = tswap64(map->mem_end);
1845 map->base_addr = tswap64(map->base_addr);
1846 map->irq = tswap16(map->irq);
1847 break;
1848 /* nested */
1849 case IFLA_AF_SPEC:
1850 case IFLA_LINKINFO:
1851 /* FIXME: implement nested type */
1852 gemu_log("Unimplemented nested type %d\n", rtattr->rta_type);
1853 break;
1854 default:
1855 gemu_log("Unknown host IFLA type: %d\n", rtattr->rta_type);
1856 break;
1857 }
1858 return 0;
1859 }
1860
1861 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
1862 {
1863 uint32_t *u32;
1864 struct ifa_cacheinfo *ci;
1865
1866 switch (rtattr->rta_type) {
1867 /* binary: depends on family type */
1868 case IFA_ADDRESS:
1869 case IFA_LOCAL:
1870 break;
1871 /* string */
1872 case IFA_LABEL:
1873 break;
1874 /* u32 */
1875 case IFA_FLAGS:
1876 case IFA_BROADCAST:
1877 u32 = RTA_DATA(rtattr);
1878 *u32 = tswap32(*u32);
1879 break;
1880 /* struct ifa_cacheinfo */
1881 case IFA_CACHEINFO:
1882 ci = RTA_DATA(rtattr);
1883 ci->ifa_prefered = tswap32(ci->ifa_prefered);
1884 ci->ifa_valid = tswap32(ci->ifa_valid);
1885 ci->cstamp = tswap32(ci->cstamp);
1886 ci->tstamp = tswap32(ci->tstamp);
1887 break;
1888 default:
1889 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
1890 break;
1891 }
1892 return 0;
1893 }
1894
1895 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
1896 {
1897 uint32_t *u32;
1898 switch (rtattr->rta_type) {
1899 /* binary: depends on family type */
1900 case RTA_GATEWAY:
1901 case RTA_DST:
1902 case RTA_PREFSRC:
1903 break;
1904 /* u32 */
1905 case RTA_PRIORITY:
1906 case RTA_TABLE:
1907 case RTA_OIF:
1908 u32 = RTA_DATA(rtattr);
1909 *u32 = tswap32(*u32);
1910 break;
1911 default:
1912 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
1913 break;
1914 }
1915 return 0;
1916 }
1917
1918 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
1919 uint32_t rtattr_len)
1920 {
1921 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1922 host_to_target_data_link_rtattr);
1923 }
1924
1925 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
1926 uint32_t rtattr_len)
1927 {
1928 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1929 host_to_target_data_addr_rtattr);
1930 }
1931
1932 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
1933 uint32_t rtattr_len)
1934 {
1935 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1936 host_to_target_data_route_rtattr);
1937 }
1938
1939 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
1940 {
1941 uint32_t nlmsg_len;
1942 struct ifinfomsg *ifi;
1943 struct ifaddrmsg *ifa;
1944 struct rtmsg *rtm;
1945
1946 nlmsg_len = nlh->nlmsg_len;
1947 switch (nlh->nlmsg_type) {
1948 case RTM_NEWLINK:
1949 case RTM_DELLINK:
1950 case RTM_GETLINK:
1951 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
1952 ifi = NLMSG_DATA(nlh);
1953 ifi->ifi_type = tswap16(ifi->ifi_type);
1954 ifi->ifi_index = tswap32(ifi->ifi_index);
1955 ifi->ifi_flags = tswap32(ifi->ifi_flags);
1956 ifi->ifi_change = tswap32(ifi->ifi_change);
1957 host_to_target_link_rtattr(IFLA_RTA(ifi),
1958 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
1959 }
1960 break;
1961 case RTM_NEWADDR:
1962 case RTM_DELADDR:
1963 case RTM_GETADDR:
1964 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
1965 ifa = NLMSG_DATA(nlh);
1966 ifa->ifa_index = tswap32(ifa->ifa_index);
1967 host_to_target_addr_rtattr(IFA_RTA(ifa),
1968 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
1969 }
1970 break;
1971 case RTM_NEWROUTE:
1972 case RTM_DELROUTE:
1973 case RTM_GETROUTE:
1974 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
1975 rtm = NLMSG_DATA(nlh);
1976 rtm->rtm_flags = tswap32(rtm->rtm_flags);
1977 host_to_target_route_rtattr(RTM_RTA(rtm),
1978 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
1979 }
1980 break;
1981 default:
1982 return -TARGET_EINVAL;
1983 }
1984 return 0;
1985 }
1986
1987 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
1988 size_t len)
1989 {
1990 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
1991 }
1992
1993 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
1994 size_t len,
1995 abi_long (*target_to_host_rtattr)
1996 (struct rtattr *))
1997 {
1998 abi_long ret;
1999
2000 while (len >= sizeof(struct rtattr)) {
2001 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2002 tswap16(rtattr->rta_len) > len) {
2003 break;
2004 }
2005 rtattr->rta_len = tswap16(rtattr->rta_len);
2006 rtattr->rta_type = tswap16(rtattr->rta_type);
2007 ret = target_to_host_rtattr(rtattr);
2008 if (ret < 0) {
2009 return ret;
2010 }
2011 len -= RTA_ALIGN(rtattr->rta_len);
2012 rtattr = (struct rtattr *)(((char *)rtattr) +
2013 RTA_ALIGN(rtattr->rta_len));
2014 }
2015 return 0;
2016 }
2017
2018 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2019 {
2020 switch (rtattr->rta_type) {
2021 default:
2022 gemu_log("Unknown target IFLA type: %d\n", rtattr->rta_type);
2023 break;
2024 }
2025 return 0;
2026 }
2027
2028 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2029 {
2030 switch (rtattr->rta_type) {
2031 /* binary: depends on family type */
2032 case IFA_LOCAL:
2033 case IFA_ADDRESS:
2034 break;
2035 default:
2036 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2037 break;
2038 }
2039 return 0;
2040 }
2041
2042 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2043 {
2044 uint32_t *u32;
2045 switch (rtattr->rta_type) {
2046 /* binary: depends on family type */
2047 case RTA_DST:
2048 case RTA_SRC:
2049 case RTA_GATEWAY:
2050 break;
2051 /* u32 */
2052 case RTA_OIF:
2053 u32 = RTA_DATA(rtattr);
2054 *u32 = tswap32(*u32);
2055 break;
2056 default:
2057 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2058 break;
2059 }
2060 return 0;
2061 }
2062
2063 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2064 uint32_t rtattr_len)
2065 {
2066 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2067 target_to_host_data_link_rtattr);
2068 }
2069
2070 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2071 uint32_t rtattr_len)
2072 {
2073 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2074 target_to_host_data_addr_rtattr);
2075 }
2076
2077 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2078 uint32_t rtattr_len)
2079 {
2080 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2081 target_to_host_data_route_rtattr);
2082 }
2083
2084 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2085 {
2086 struct ifinfomsg *ifi;
2087 struct ifaddrmsg *ifa;
2088 struct rtmsg *rtm;
2089
2090 switch (nlh->nlmsg_type) {
2091 case RTM_GETLINK:
2092 break;
2093 case RTM_NEWLINK:
2094 case RTM_DELLINK:
2095 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2096 ifi = NLMSG_DATA(nlh);
2097 ifi->ifi_type = tswap16(ifi->ifi_type);
2098 ifi->ifi_index = tswap32(ifi->ifi_index);
2099 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2100 ifi->ifi_change = tswap32(ifi->ifi_change);
2101 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2102 NLMSG_LENGTH(sizeof(*ifi)));
2103 }
2104 break;
2105 case RTM_GETADDR:
2106 case RTM_NEWADDR:
2107 case RTM_DELADDR:
2108 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2109 ifa = NLMSG_DATA(nlh);
2110 ifa->ifa_index = tswap32(ifa->ifa_index);
2111 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2112 NLMSG_LENGTH(sizeof(*ifa)));
2113 }
2114 break;
2115 case RTM_GETROUTE:
2116 break;
2117 case RTM_NEWROUTE:
2118 case RTM_DELROUTE:
2119 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2120 rtm = NLMSG_DATA(nlh);
2121 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2122 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2123 NLMSG_LENGTH(sizeof(*rtm)));
2124 }
2125 break;
2126 default:
2127 return -TARGET_EOPNOTSUPP;
2128 }
2129 return 0;
2130 }
2131
2132 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2133 {
2134 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2135 }
2136 #endif /* CONFIG_RTNETLINK */
2137
2138 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2139 {
2140 switch (nlh->nlmsg_type) {
2141 default:
2142 gemu_log("Unknown host audit message type %d\n",
2143 nlh->nlmsg_type);
2144 return -TARGET_EINVAL;
2145 }
2146 return 0;
2147 }
2148
2149 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2150 size_t len)
2151 {
2152 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2153 }
2154
2155 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2156 {
2157 switch (nlh->nlmsg_type) {
2158 case AUDIT_USER:
2159 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2160 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2161 break;
2162 default:
2163 gemu_log("Unknown target audit message type %d\n",
2164 nlh->nlmsg_type);
2165 return -TARGET_EINVAL;
2166 }
2167
2168 return 0;
2169 }
2170
2171 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2172 {
2173 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2174 }
2175
2176 /* do_setsockopt() Must return target values and target errnos. */
2177 static abi_long do_setsockopt(int sockfd, int level, int optname,
2178 abi_ulong optval_addr, socklen_t optlen)
2179 {
2180 abi_long ret;
2181 int val;
2182 struct ip_mreqn *ip_mreq;
2183 struct ip_mreq_source *ip_mreq_source;
2184
2185 switch(level) {
2186 case SOL_TCP:
2187 /* TCP options all take an 'int' value. */
2188 if (optlen < sizeof(uint32_t))
2189 return -TARGET_EINVAL;
2190
2191 if (get_user_u32(val, optval_addr))
2192 return -TARGET_EFAULT;
2193 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2194 break;
2195 case SOL_IP:
2196 switch(optname) {
2197 case IP_TOS:
2198 case IP_TTL:
2199 case IP_HDRINCL:
2200 case IP_ROUTER_ALERT:
2201 case IP_RECVOPTS:
2202 case IP_RETOPTS:
2203 case IP_PKTINFO:
2204 case IP_MTU_DISCOVER:
2205 case IP_RECVERR:
2206 case IP_RECVTOS:
2207 #ifdef IP_FREEBIND
2208 case IP_FREEBIND:
2209 #endif
2210 case IP_MULTICAST_TTL:
2211 case IP_MULTICAST_LOOP:
2212 val = 0;
2213 if (optlen >= sizeof(uint32_t)) {
2214 if (get_user_u32(val, optval_addr))
2215 return -TARGET_EFAULT;
2216 } else if (optlen >= 1) {
2217 if (get_user_u8(val, optval_addr))
2218 return -TARGET_EFAULT;
2219 }
2220 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2221 break;
2222 case IP_ADD_MEMBERSHIP:
2223 case IP_DROP_MEMBERSHIP:
2224 if (optlen < sizeof (struct target_ip_mreq) ||
2225 optlen > sizeof (struct target_ip_mreqn))
2226 return -TARGET_EINVAL;
2227
2228 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2229 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2230 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2231 break;
2232
2233 case IP_BLOCK_SOURCE:
2234 case IP_UNBLOCK_SOURCE:
2235 case IP_ADD_SOURCE_MEMBERSHIP:
2236 case IP_DROP_SOURCE_MEMBERSHIP:
2237 if (optlen != sizeof (struct target_ip_mreq_source))
2238 return -TARGET_EINVAL;
2239
2240 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2241 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2242 unlock_user (ip_mreq_source, optval_addr, 0);
2243 break;
2244
2245 default:
2246 goto unimplemented;
2247 }
2248 break;
2249 case SOL_IPV6:
2250 switch (optname) {
2251 case IPV6_MTU_DISCOVER:
2252 case IPV6_MTU:
2253 case IPV6_V6ONLY:
2254 case IPV6_RECVPKTINFO:
2255 val = 0;
2256 if (optlen < sizeof(uint32_t)) {
2257 return -TARGET_EINVAL;
2258 }
2259 if (get_user_u32(val, optval_addr)) {
2260 return -TARGET_EFAULT;
2261 }
2262 ret = get_errno(setsockopt(sockfd, level, optname,
2263 &val, sizeof(val)));
2264 break;
2265 default:
2266 goto unimplemented;
2267 }
2268 break;
2269 case SOL_RAW:
2270 switch (optname) {
2271 case ICMP_FILTER:
2272 /* struct icmp_filter takes an u32 value */
2273 if (optlen < sizeof(uint32_t)) {
2274 return -TARGET_EINVAL;
2275 }
2276
2277 if (get_user_u32(val, optval_addr)) {
2278 return -TARGET_EFAULT;
2279 }
2280 ret = get_errno(setsockopt(sockfd, level, optname,
2281 &val, sizeof(val)));
2282 break;
2283
2284 default:
2285 goto unimplemented;
2286 }
2287 break;
2288 case TARGET_SOL_SOCKET:
2289 switch (optname) {
2290 case TARGET_SO_RCVTIMEO:
2291 {
2292 struct timeval tv;
2293
2294 optname = SO_RCVTIMEO;
2295
2296 set_timeout:
2297 if (optlen != sizeof(struct target_timeval)) {
2298 return -TARGET_EINVAL;
2299 }
2300
2301 if (copy_from_user_timeval(&tv, optval_addr)) {
2302 return -TARGET_EFAULT;
2303 }
2304
2305 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2306 &tv, sizeof(tv)));
2307 return ret;
2308 }
2309 case TARGET_SO_SNDTIMEO:
2310 optname = SO_SNDTIMEO;
2311 goto set_timeout;
2312 case TARGET_SO_ATTACH_FILTER:
2313 {
2314 struct target_sock_fprog *tfprog;
2315 struct target_sock_filter *tfilter;
2316 struct sock_fprog fprog;
2317 struct sock_filter *filter;
2318 int i;
2319
2320 if (optlen != sizeof(*tfprog)) {
2321 return -TARGET_EINVAL;
2322 }
2323 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2324 return -TARGET_EFAULT;
2325 }
2326 if (!lock_user_struct(VERIFY_READ, tfilter,
2327 tswapal(tfprog->filter), 0)) {
2328 unlock_user_struct(tfprog, optval_addr, 1);
2329 return -TARGET_EFAULT;
2330 }
2331
2332 fprog.len = tswap16(tfprog->len);
2333 filter = g_try_new(struct sock_filter, fprog.len);
2334 if (filter == NULL) {
2335 unlock_user_struct(tfilter, tfprog->filter, 1);
2336 unlock_user_struct(tfprog, optval_addr, 1);
2337 return -TARGET_ENOMEM;
2338 }
2339 for (i = 0; i < fprog.len; i++) {
2340 filter[i].code = tswap16(tfilter[i].code);
2341 filter[i].jt = tfilter[i].jt;
2342 filter[i].jf = tfilter[i].jf;
2343 filter[i].k = tswap32(tfilter[i].k);
2344 }
2345 fprog.filter = filter;
2346
2347 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2348 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2349 g_free(filter);
2350
2351 unlock_user_struct(tfilter, tfprog->filter, 1);
2352 unlock_user_struct(tfprog, optval_addr, 1);
2353 return ret;
2354 }
2355 case TARGET_SO_BINDTODEVICE:
2356 {
2357 char *dev_ifname, *addr_ifname;
2358
2359 if (optlen > IFNAMSIZ - 1) {
2360 optlen = IFNAMSIZ - 1;
2361 }
2362 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2363 if (!dev_ifname) {
2364 return -TARGET_EFAULT;
2365 }
2366 optname = SO_BINDTODEVICE;
2367 addr_ifname = alloca(IFNAMSIZ);
2368 memcpy(addr_ifname, dev_ifname, optlen);
2369 addr_ifname[optlen] = 0;
2370 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2371 addr_ifname, optlen));
2372 unlock_user (dev_ifname, optval_addr, 0);
2373 return ret;
2374 }
2375 /* Options with 'int' argument. */
2376 case TARGET_SO_DEBUG:
2377 optname = SO_DEBUG;
2378 break;
2379 case TARGET_SO_REUSEADDR:
2380 optname = SO_REUSEADDR;
2381 break;
2382 case TARGET_SO_TYPE:
2383 optname = SO_TYPE;
2384 break;
2385 case TARGET_SO_ERROR:
2386 optname = SO_ERROR;
2387 break;
2388 case TARGET_SO_DONTROUTE:
2389 optname = SO_DONTROUTE;
2390 break;
2391 case TARGET_SO_BROADCAST:
2392 optname = SO_BROADCAST;
2393 break;
2394 case TARGET_SO_SNDBUF:
2395 optname = SO_SNDBUF;
2396 break;
2397 case TARGET_SO_SNDBUFFORCE:
2398 optname = SO_SNDBUFFORCE;
2399 break;
2400 case TARGET_SO_RCVBUF:
2401 optname = SO_RCVBUF;
2402 break;
2403 case TARGET_SO_RCVBUFFORCE:
2404 optname = SO_RCVBUFFORCE;
2405 break;
2406 case TARGET_SO_KEEPALIVE:
2407 optname = SO_KEEPALIVE;
2408 break;
2409 case TARGET_SO_OOBINLINE:
2410 optname = SO_OOBINLINE;
2411 break;
2412 case TARGET_SO_NO_CHECK:
2413 optname = SO_NO_CHECK;
2414 break;
2415 case TARGET_SO_PRIORITY:
2416 optname = SO_PRIORITY;
2417 break;
2418 #ifdef SO_BSDCOMPAT
2419 case TARGET_SO_BSDCOMPAT:
2420 optname = SO_BSDCOMPAT;
2421 break;
2422 #endif
2423 case TARGET_SO_PASSCRED:
2424 optname = SO_PASSCRED;
2425 break;
2426 case TARGET_SO_PASSSEC:
2427 optname = SO_PASSSEC;
2428 break;
2429 case TARGET_SO_TIMESTAMP:
2430 optname = SO_TIMESTAMP;
2431 break;
2432 case TARGET_SO_RCVLOWAT:
2433 optname = SO_RCVLOWAT;
2434 break;
2435 break;
2436 default:
2437 goto unimplemented;
2438 }
2439 if (optlen < sizeof(uint32_t))
2440 return -TARGET_EINVAL;
2441
2442 if (get_user_u32(val, optval_addr))
2443 return -TARGET_EFAULT;
2444 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2445 break;
2446 default:
2447 unimplemented:
2448 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2449 ret = -TARGET_ENOPROTOOPT;
2450 }
2451 return ret;
2452 }
2453
2454 /* do_getsockopt() Must return target values and target errnos. */
2455 static abi_long do_getsockopt(int sockfd, int level, int optname,
2456 abi_ulong optval_addr, abi_ulong optlen)
2457 {
2458 abi_long ret;
2459 int len, val;
2460 socklen_t lv;
2461
2462 switch(level) {
2463 case TARGET_SOL_SOCKET:
2464 level = SOL_SOCKET;
2465 switch (optname) {
2466 /* These don't just return a single integer */
2467 case TARGET_SO_LINGER:
2468 case TARGET_SO_RCVTIMEO:
2469 case TARGET_SO_SNDTIMEO:
2470 case TARGET_SO_PEERNAME:
2471 goto unimplemented;
2472 case TARGET_SO_PEERCRED: {
2473 struct ucred cr;
2474 socklen_t crlen;
2475 struct target_ucred *tcr;
2476
2477 if (get_user_u32(len, optlen)) {
2478 return -TARGET_EFAULT;
2479 }
2480 if (len < 0) {
2481 return -TARGET_EINVAL;
2482 }
2483
2484 crlen = sizeof(cr);
2485 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2486 &cr, &crlen));
2487 if (ret < 0) {
2488 return ret;
2489 }
2490 if (len > crlen) {
2491 len = crlen;
2492 }
2493 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2494 return -TARGET_EFAULT;
2495 }
2496 __put_user(cr.pid, &tcr->pid);
2497 __put_user(cr.uid, &tcr->uid);
2498 __put_user(cr.gid, &tcr->gid);
2499 unlock_user_struct(tcr, optval_addr, 1);
2500 if (put_user_u32(len, optlen)) {
2501 return -TARGET_EFAULT;
2502 }
2503 break;
2504 }
2505 /* Options with 'int' argument. */
2506 case TARGET_SO_DEBUG:
2507 optname = SO_DEBUG;
2508 goto int_case;
2509 case TARGET_SO_REUSEADDR:
2510 optname = SO_REUSEADDR;
2511 goto int_case;
2512 case TARGET_SO_TYPE:
2513 optname = SO_TYPE;
2514 goto int_case;
2515 case TARGET_SO_ERROR:
2516 optname = SO_ERROR;
2517 goto int_case;
2518 case TARGET_SO_DONTROUTE:
2519 optname = SO_DONTROUTE;
2520 goto int_case;
2521 case TARGET_SO_BROADCAST:
2522 optname = SO_BROADCAST;
2523 goto int_case;
2524 case TARGET_SO_SNDBUF:
2525 optname = SO_SNDBUF;
2526 goto int_case;
2527 case TARGET_SO_RCVBUF:
2528 optname = SO_RCVBUF;
2529 goto int_case;
2530 case TARGET_SO_KEEPALIVE:
2531 optname = SO_KEEPALIVE;
2532 goto int_case;
2533 case TARGET_SO_OOBINLINE:
2534 optname = SO_OOBINLINE;
2535 goto int_case;
2536 case TARGET_SO_NO_CHECK:
2537 optname = SO_NO_CHECK;
2538 goto int_case;
2539 case TARGET_SO_PRIORITY:
2540 optname = SO_PRIORITY;
2541 goto int_case;
2542 #ifdef SO_BSDCOMPAT
2543 case TARGET_SO_BSDCOMPAT:
2544 optname = SO_BSDCOMPAT;
2545 goto int_case;
2546 #endif
2547 case TARGET_SO_PASSCRED:
2548 optname = SO_PASSCRED;
2549 goto int_case;
2550 case TARGET_SO_TIMESTAMP:
2551 optname = SO_TIMESTAMP;
2552 goto int_case;
2553 case TARGET_SO_RCVLOWAT:
2554 optname = SO_RCVLOWAT;
2555 goto int_case;
2556 case TARGET_SO_ACCEPTCONN:
2557 optname = SO_ACCEPTCONN;
2558 goto int_case;
2559 default:
2560 goto int_case;
2561 }
2562 break;
2563 case SOL_TCP:
2564 /* TCP options all take an 'int' value. */
2565 int_case:
2566 if (get_user_u32(len, optlen))
2567 return -TARGET_EFAULT;
2568 if (len < 0)
2569 return -TARGET_EINVAL;
2570 lv = sizeof(lv);
2571 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2572 if (ret < 0)
2573 return ret;
2574 if (optname == SO_TYPE) {
2575 val = host_to_target_sock_type(val);
2576 }
2577 if (len > lv)
2578 len = lv;
2579 if (len == 4) {
2580 if (put_user_u32(val, optval_addr))
2581 return -TARGET_EFAULT;
2582 } else {
2583 if (put_user_u8(val, optval_addr))
2584 return -TARGET_EFAULT;
2585 }
2586 if (put_user_u32(len, optlen))
2587 return -TARGET_EFAULT;
2588 break;
2589 case SOL_IP:
2590 switch(optname) {
2591 case IP_TOS:
2592 case IP_TTL:
2593 case IP_HDRINCL:
2594 case IP_ROUTER_ALERT:
2595 case IP_RECVOPTS:
2596 case IP_RETOPTS:
2597 case IP_PKTINFO:
2598 case IP_MTU_DISCOVER:
2599 case IP_RECVERR:
2600 case IP_RECVTOS:
2601 #ifdef IP_FREEBIND
2602 case IP_FREEBIND:
2603 #endif
2604 case IP_MULTICAST_TTL:
2605 case IP_MULTICAST_LOOP:
2606 if (get_user_u32(len, optlen))
2607 return -TARGET_EFAULT;
2608 if (len < 0)
2609 return -TARGET_EINVAL;
2610 lv = sizeof(lv);
2611 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2612 if (ret < 0)
2613 return ret;
2614 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2615 len = 1;
2616 if (put_user_u32(len, optlen)
2617 || put_user_u8(val, optval_addr))
2618 return -TARGET_EFAULT;
2619 } else {
2620 if (len > sizeof(int))
2621 len = sizeof(int);
2622 if (put_user_u32(len, optlen)
2623 || put_user_u32(val, optval_addr))
2624 return -TARGET_EFAULT;
2625 }
2626 break;
2627 default:
2628 ret = -TARGET_ENOPROTOOPT;
2629 break;
2630 }
2631 break;
2632 default:
2633 unimplemented:
2634 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2635 level, optname);
2636 ret = -TARGET_EOPNOTSUPP;
2637 break;
2638 }
2639 return ret;
2640 }
2641
2642 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2643 int count, int copy)
2644 {
2645 struct target_iovec *target_vec;
2646 struct iovec *vec;
2647 abi_ulong total_len, max_len;
2648 int i;
2649 int err = 0;
2650 bool bad_address = false;
2651
2652 if (count == 0) {
2653 errno = 0;
2654 return NULL;
2655 }
2656 if (count < 0 || count > IOV_MAX) {
2657 errno = EINVAL;
2658 return NULL;
2659 }
2660
2661 vec = g_try_new0(struct iovec, count);
2662 if (vec == NULL) {
2663 errno = ENOMEM;
2664 return NULL;
2665 }
2666
2667 target_vec = lock_user(VERIFY_READ, target_addr,
2668 count * sizeof(struct target_iovec), 1);
2669 if (target_vec == NULL) {
2670 err = EFAULT;
2671 goto fail2;
2672 }
2673
2674 /* ??? If host page size > target page size, this will result in a
2675 value larger than what we can actually support. */
2676 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2677 total_len = 0;
2678
2679 for (i = 0; i < count; i++) {
2680 abi_ulong base = tswapal(target_vec[i].iov_base);
2681 abi_long len = tswapal(target_vec[i].iov_len);
2682
2683 if (len < 0) {
2684 err = EINVAL;
2685 goto fail;
2686 } else if (len == 0) {
2687 /* Zero length pointer is ignored. */
2688 vec[i].iov_base = 0;
2689 } else {
2690 vec[i].iov_base = lock_user(type, base, len, copy);
2691 /* If the first buffer pointer is bad, this is a fault. But
2692 * subsequent bad buffers will result in a partial write; this
2693 * is realized by filling the vector with null pointers and
2694 * zero lengths. */
2695 if (!vec[i].iov_base) {
2696 if (i == 0) {
2697 err = EFAULT;
2698 goto fail;
2699 } else {
2700 bad_address = true;
2701 }
2702 }
2703 if (bad_address) {
2704 len = 0;
2705 }
2706 if (len > max_len - total_len) {
2707 len = max_len - total_len;
2708 }
2709 }
2710 vec[i].iov_len = len;
2711 total_len += len;
2712 }
2713
2714 unlock_user(target_vec, target_addr, 0);
2715 return vec;
2716
2717 fail:
2718 while (--i >= 0) {
2719 if (tswapal(target_vec[i].iov_len) > 0) {
2720 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2721 }
2722 }
2723 unlock_user(target_vec, target_addr, 0);
2724 fail2:
2725 g_free(vec);
2726 errno = err;
2727 return NULL;
2728 }
2729
2730 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2731 int count, int copy)
2732 {
2733 struct target_iovec *target_vec;
2734 int i;
2735
2736 target_vec = lock_user(VERIFY_READ, target_addr,
2737 count * sizeof(struct target_iovec), 1);
2738 if (target_vec) {
2739 for (i = 0; i < count; i++) {
2740 abi_ulong base = tswapal(target_vec[i].iov_base);
2741 abi_long len = tswapal(target_vec[i].iov_len);
2742 if (len < 0) {
2743 break;
2744 }
2745 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2746 }
2747 unlock_user(target_vec, target_addr, 0);
2748 }
2749
2750 g_free(vec);
2751 }
2752
2753 static inline int target_to_host_sock_type(int *type)
2754 {
2755 int host_type = 0;
2756 int target_type = *type;
2757
2758 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2759 case TARGET_SOCK_DGRAM:
2760 host_type = SOCK_DGRAM;
2761 break;
2762 case TARGET_SOCK_STREAM:
2763 host_type = SOCK_STREAM;
2764 break;
2765 default:
2766 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2767 break;
2768 }
2769 if (target_type & TARGET_SOCK_CLOEXEC) {
2770 #if defined(SOCK_CLOEXEC)
2771 host_type |= SOCK_CLOEXEC;
2772 #else
2773 return -TARGET_EINVAL;
2774 #endif
2775 }
2776 if (target_type & TARGET_SOCK_NONBLOCK) {
2777 #if defined(SOCK_NONBLOCK)
2778 host_type |= SOCK_NONBLOCK;
2779 #elif !defined(O_NONBLOCK)
2780 return -TARGET_EINVAL;
2781 #endif
2782 }
2783 *type = host_type;
2784 return 0;
2785 }
2786
2787 /* Try to emulate socket type flags after socket creation. */
2788 static int sock_flags_fixup(int fd, int target_type)
2789 {
2790 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2791 if (target_type & TARGET_SOCK_NONBLOCK) {
2792 int flags = fcntl(fd, F_GETFL);
2793 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2794 close(fd);
2795 return -TARGET_EINVAL;
2796 }
2797 }
2798 #endif
2799 return fd;
2800 }
2801
2802 static abi_long packet_target_to_host_sockaddr(void *host_addr,
2803 abi_ulong target_addr,
2804 socklen_t len)
2805 {
2806 struct sockaddr *addr = host_addr;
2807 struct target_sockaddr *target_saddr;
2808
2809 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
2810 if (!target_saddr) {
2811 return -TARGET_EFAULT;
2812 }
2813
2814 memcpy(addr, target_saddr, len);
2815 addr->sa_family = tswap16(target_saddr->sa_family);
2816 /* spkt_protocol is big-endian */
2817
2818 unlock_user(target_saddr, target_addr, 0);
2819 return 0;
2820 }
2821
2822 static TargetFdTrans target_packet_trans = {
2823 .target_to_host_addr = packet_target_to_host_sockaddr,
2824 };
2825
2826 #ifdef CONFIG_RTNETLINK
2827 static abi_long netlink_route_target_to_host(void *buf, size_t len)
2828 {
2829 return target_to_host_nlmsg_route(buf, len);
2830 }
2831
2832 static abi_long netlink_route_host_to_target(void *buf, size_t len)
2833 {
2834 return host_to_target_nlmsg_route(buf, len);
2835 }
2836
2837 static TargetFdTrans target_netlink_route_trans = {
2838 .target_to_host_data = netlink_route_target_to_host,
2839 .host_to_target_data = netlink_route_host_to_target,
2840 };
2841 #endif /* CONFIG_RTNETLINK */
2842
2843 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
2844 {
2845 return target_to_host_nlmsg_audit(buf, len);
2846 }
2847
2848 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
2849 {
2850 return host_to_target_nlmsg_audit(buf, len);
2851 }
2852
2853 static TargetFdTrans target_netlink_audit_trans = {
2854 .target_to_host_data = netlink_audit_target_to_host,
2855 .host_to_target_data = netlink_audit_host_to_target,
2856 };
2857
2858 /* do_socket() Must return target values and target errnos. */
2859 static abi_long do_socket(int domain, int type, int protocol)
2860 {
2861 int target_type = type;
2862 int ret;
2863
2864 ret = target_to_host_sock_type(&type);
2865 if (ret) {
2866 return ret;
2867 }
2868
2869 if (domain == PF_NETLINK && !(
2870 #ifdef CONFIG_RTNETLINK
2871 protocol == NETLINK_ROUTE ||
2872 #endif
2873 protocol == NETLINK_KOBJECT_UEVENT ||
2874 protocol == NETLINK_AUDIT)) {
2875 return -EPFNOSUPPORT;
2876 }
2877
2878 if (domain == AF_PACKET ||
2879 (domain == AF_INET && type == SOCK_PACKET)) {
2880 protocol = tswap16(protocol);
2881 }
2882
2883 ret = get_errno(socket(domain, type, protocol));
2884 if (ret >= 0) {
2885 ret = sock_flags_fixup(ret, target_type);
2886 if (type == SOCK_PACKET) {
2887 /* Manage an obsolete case :
2888 * if socket type is SOCK_PACKET, bind by name
2889 */
2890 fd_trans_register(ret, &target_packet_trans);
2891 } else if (domain == PF_NETLINK) {
2892 switch (protocol) {
2893 #ifdef CONFIG_RTNETLINK
2894 case NETLINK_ROUTE:
2895 fd_trans_register(ret, &target_netlink_route_trans);
2896 break;
2897 #endif
2898 case NETLINK_KOBJECT_UEVENT:
2899 /* nothing to do: messages are strings */
2900 break;
2901 case NETLINK_AUDIT:
2902 fd_trans_register(ret, &target_netlink_audit_trans);
2903 break;
2904 default:
2905 g_assert_not_reached();
2906 }
2907 }
2908 }
2909 return ret;
2910 }
2911
2912 /* do_bind() Must return target values and target errnos. */
2913 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2914 socklen_t addrlen)
2915 {
2916 void *addr;
2917 abi_long ret;
2918
2919 if ((int)addrlen < 0) {
2920 return -TARGET_EINVAL;
2921 }
2922
2923 addr = alloca(addrlen+1);
2924
2925 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2926 if (ret)
2927 return ret;
2928
2929 return get_errno(bind(sockfd, addr, addrlen));
2930 }
2931
2932 /* do_connect() Must return target values and target errnos. */
2933 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2934 socklen_t addrlen)
2935 {
2936 void *addr;
2937 abi_long ret;
2938
2939 if ((int)addrlen < 0) {
2940 return -TARGET_EINVAL;
2941 }
2942
2943 addr = alloca(addrlen+1);
2944
2945 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2946 if (ret)
2947 return ret;
2948
2949 return get_errno(safe_connect(sockfd, addr, addrlen));
2950 }
2951
2952 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2953 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2954 int flags, int send)
2955 {
2956 abi_long ret, len;
2957 struct msghdr msg;
2958 int count;
2959 struct iovec *vec;
2960 abi_ulong target_vec;
2961
2962 if (msgp->msg_name) {
2963 msg.msg_namelen = tswap32(msgp->msg_namelen);
2964 msg.msg_name = alloca(msg.msg_namelen+1);
2965 ret = target_to_host_sockaddr(fd, msg.msg_name,
2966 tswapal(msgp->msg_name),
2967 msg.msg_namelen);
2968 if (ret) {
2969 goto out2;
2970 }
2971 } else {
2972 msg.msg_name = NULL;
2973 msg.msg_namelen = 0;
2974 }
2975 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2976 msg.msg_control = alloca(msg.msg_controllen);
2977 msg.msg_flags = tswap32(msgp->msg_flags);
2978
2979 count = tswapal(msgp->msg_iovlen);
2980 target_vec = tswapal(msgp->msg_iov);
2981 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2982 target_vec, count, send);
2983 if (vec == NULL) {
2984 ret = -host_to_target_errno(errno);
2985 goto out2;
2986 }
2987 msg.msg_iovlen = count;
2988 msg.msg_iov = vec;
2989
2990 if (send) {
2991 if (fd_trans_target_to_host_data(fd)) {
2992 ret = fd_trans_target_to_host_data(fd)(msg.msg_iov->iov_base,
2993 msg.msg_iov->iov_len);
2994 } else {
2995 ret = target_to_host_cmsg(&msg, msgp);
2996 }
2997 if (ret == 0) {
2998 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2999 }
3000 } else {
3001 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3002 if (!is_error(ret)) {
3003 len = ret;
3004 if (fd_trans_host_to_target_data(fd)) {
3005 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3006 len);
3007 } else {
3008 ret = host_to_target_cmsg(msgp, &msg);
3009 }
3010 if (!is_error(ret)) {
3011 msgp->msg_namelen = tswap32(msg.msg_namelen);
3012 if (msg.msg_name != NULL) {
3013 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3014 msg.msg_name, msg.msg_namelen);
3015 if (ret) {
3016 goto out;
3017 }
3018 }
3019
3020 ret = len;
3021 }
3022 }
3023 }
3024
3025 out:
3026 unlock_iovec(vec, target_vec, count, !send);
3027 out2:
3028 return ret;
3029 }
3030
3031 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3032 int flags, int send)
3033 {
3034 abi_long ret;
3035 struct target_msghdr *msgp;
3036
3037 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3038 msgp,
3039 target_msg,
3040 send ? 1 : 0)) {
3041 return -TARGET_EFAULT;
3042 }
3043 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3044 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3045 return ret;
3046 }
3047
3048 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3049 * so it might not have this *mmsg-specific flag either.
3050 */
3051 #ifndef MSG_WAITFORONE
3052 #define MSG_WAITFORONE 0x10000
3053 #endif
3054
3055 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3056 unsigned int vlen, unsigned int flags,
3057 int send)
3058 {
3059 struct target_mmsghdr *mmsgp;
3060 abi_long ret = 0;
3061 int i;
3062
3063 if (vlen > UIO_MAXIOV) {
3064 vlen = UIO_MAXIOV;
3065 }
3066
3067 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3068 if (!mmsgp) {
3069 return -TARGET_EFAULT;
3070 }
3071
3072 for (i = 0; i < vlen; i++) {
3073 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3074 if (is_error(ret)) {
3075 break;
3076 }
3077 mmsgp[i].msg_len = tswap32(ret);
3078 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3079 if (flags & MSG_WAITFORONE) {
3080 flags |= MSG_DONTWAIT;
3081 }
3082 }
3083
3084 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3085
3086 /* Return number of datagrams sent if we sent any at all;
3087 * otherwise return the error.
3088 */
3089 if (i) {
3090 return i;
3091 }
3092 return ret;
3093 }
3094
3095 /* do_accept4() Must return target values and target errnos. */
3096 static abi_long do_accept4(int fd, abi_ulong target_addr,
3097 abi_ulong target_addrlen_addr, int flags)
3098 {
3099 socklen_t addrlen;
3100 void *addr;
3101 abi_long ret;
3102 int host_flags;
3103
3104 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3105
3106 if (target_addr == 0) {
3107 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3108 }
3109
3110 /* linux returns EINVAL if addrlen pointer is invalid */
3111 if (get_user_u32(addrlen, target_addrlen_addr))
3112 return -TARGET_EINVAL;
3113
3114 if ((int)addrlen < 0) {
3115 return -TARGET_EINVAL;
3116 }
3117
3118 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3119 return -TARGET_EINVAL;
3120
3121 addr = alloca(addrlen);
3122
3123 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3124 if (!is_error(ret)) {
3125 host_to_target_sockaddr(target_addr, addr, addrlen);
3126 if (put_user_u32(addrlen, target_addrlen_addr))
3127 ret = -TARGET_EFAULT;
3128 }
3129 return ret;
3130 }
3131
3132 /* do_getpeername() Must return target values and target errnos. */
3133 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3134 abi_ulong target_addrlen_addr)
3135 {
3136 socklen_t addrlen;
3137 void *addr;
3138 abi_long ret;
3139
3140 if (get_user_u32(addrlen, target_addrlen_addr))
3141 return -TARGET_EFAULT;
3142
3143 if ((int)addrlen < 0) {
3144 return -TARGET_EINVAL;
3145 }
3146
3147 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3148 return -TARGET_EFAULT;
3149
3150 addr = alloca(addrlen);
3151
3152 ret = get_errno(getpeername(fd, addr, &addrlen));
3153 if (!is_error(ret)) {
3154 host_to_target_sockaddr(target_addr, addr, addrlen);
3155 if (put_user_u32(addrlen, target_addrlen_addr))
3156 ret = -TARGET_EFAULT;
3157 }
3158 return ret;
3159 }
3160
3161 /* do_getsockname() Must return target values and target errnos. */
3162 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3163 abi_ulong target_addrlen_addr)
3164 {
3165 socklen_t addrlen;
3166 void *addr;
3167 abi_long ret;
3168
3169 if (get_user_u32(addrlen, target_addrlen_addr))
3170 return -TARGET_EFAULT;
3171
3172 if ((int)addrlen < 0) {
3173 return -TARGET_EINVAL;
3174 }
3175
3176 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3177 return -TARGET_EFAULT;
3178
3179 addr = alloca(addrlen);
3180
3181 ret = get_errno(getsockname(fd, addr, &addrlen));
3182 if (!is_error(ret)) {
3183 host_to_target_sockaddr(target_addr, addr, addrlen);
3184 if (put_user_u32(addrlen, target_addrlen_addr))
3185 ret = -TARGET_EFAULT;
3186 }
3187 return ret;
3188 }
3189
3190 /* do_socketpair() Must return target values and target errnos. */
3191 static abi_long do_socketpair(int domain, int type, int protocol,
3192 abi_ulong target_tab_addr)
3193 {
3194 int tab[2];
3195 abi_long ret;
3196
3197 target_to_host_sock_type(&type);
3198
3199 ret = get_errno(socketpair(domain, type, protocol, tab));
3200 if (!is_error(ret)) {
3201 if (put_user_s32(tab[0], target_tab_addr)
3202 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3203 ret = -TARGET_EFAULT;
3204 }
3205 return ret;
3206 }
3207
3208 /* do_sendto() Must return target values and target errnos. */
3209 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3210 abi_ulong target_addr, socklen_t addrlen)
3211 {
3212 void *addr;
3213 void *host_msg;
3214 abi_long ret;
3215
3216 if ((int)addrlen < 0) {
3217 return -TARGET_EINVAL;
3218 }
3219
3220 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3221 if (!host_msg)
3222 return -TARGET_EFAULT;
3223 if (fd_trans_target_to_host_data(fd)) {
3224 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3225 if (ret < 0) {
3226 unlock_user(host_msg, msg, 0);
3227 return ret;
3228 }
3229 }
3230 if (target_addr) {
3231 addr = alloca(addrlen+1);
3232 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3233 if (ret) {
3234 unlock_user(host_msg, msg, 0);
3235 return ret;
3236 }
3237 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3238 } else {
3239 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3240 }
3241 unlock_user(host_msg, msg, 0);
3242 return ret;
3243 }
3244
3245 /* do_recvfrom() Must return target values and target errnos. */
3246 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3247 abi_ulong target_addr,
3248 abi_ulong target_addrlen)
3249 {
3250 socklen_t addrlen;
3251 void *addr;
3252 void *host_msg;
3253 abi_long ret;
3254
3255 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3256 if (!host_msg)
3257 return -TARGET_EFAULT;
3258 if (target_addr) {
3259 if (get_user_u32(addrlen, target_addrlen)) {
3260 ret = -TARGET_EFAULT;
3261 goto fail;
3262 }
3263 if ((int)addrlen < 0) {
3264 ret = -TARGET_EINVAL;
3265 goto fail;
3266 }
3267 addr = alloca(addrlen);
3268 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3269 addr, &addrlen));
3270 } else {
3271 addr = NULL; /* To keep compiler quiet. */
3272 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3273 }
3274 if (!is_error(ret)) {
3275 if (target_addr) {
3276 host_to_target_sockaddr(target_addr, addr, addrlen);
3277 if (put_user_u32(addrlen, target_addrlen)) {
3278 ret = -TARGET_EFAULT;
3279 goto fail;
3280 }
3281 }
3282 unlock_user(host_msg, msg, len);
3283 } else {
3284 fail:
3285 unlock_user(host_msg, msg, 0);
3286 }
3287 return ret;
3288 }
3289
3290 #ifdef TARGET_NR_socketcall
3291 /* do_socketcall() Must return target values and target errnos. */
3292 static abi_long do_socketcall(int num, abi_ulong vptr)
3293 {
3294 static const unsigned ac[] = { /* number of arguments per call */
3295 [SOCKOP_socket] = 3, /* domain, type, protocol */
3296 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3297 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3298 [SOCKOP_listen] = 2, /* sockfd, backlog */
3299 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3300 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3301 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3302 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3303 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3304 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3305 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3306 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3307 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3308 [SOCKOP_shutdown] = 2, /* sockfd, how */
3309 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3310 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3311 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3312 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3313 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3314 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3315 };
3316 abi_long a[6]; /* max 6 args */
3317
3318 /* first, collect the arguments in a[] according to ac[] */
3319 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3320 unsigned i;
3321 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3322 for (i = 0; i < ac[num]; ++i) {
3323 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3324 return -TARGET_EFAULT;
3325 }
3326 }
3327 }
3328
3329 /* now when we have the args, actually handle the call */
3330 switch (num) {
3331 case SOCKOP_socket: /* domain, type, protocol */
3332 return do_socket(a[0], a[1], a[2]);
3333 case SOCKOP_bind: /* sockfd, addr, addrlen */
3334 return do_bind(a[0], a[1], a[2]);
3335 case SOCKOP_connect: /* sockfd, addr, addrlen */
3336 return do_connect(a[0], a[1], a[2]);
3337 case SOCKOP_listen: /* sockfd, backlog */
3338 return get_errno(listen(a[0], a[1]));
3339 case SOCKOP_accept: /* sockfd, addr, addrlen */
3340 return do_accept4(a[0], a[1], a[2], 0);
3341 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3342 return do_accept4(a[0], a[1], a[2], a[3]);
3343 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3344 return do_getsockname(a[0], a[1], a[2]);
3345 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3346 return do_getpeername(a[0], a[1], a[2]);
3347 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3348 return do_socketpair(a[0], a[1], a[2], a[3]);
3349 case SOCKOP_send: /* sockfd, msg, len, flags */
3350 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3351 case SOCKOP_recv: /* sockfd, msg, len, flags */
3352 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3353 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3354 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3355 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3356 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3357 case SOCKOP_shutdown: /* sockfd, how */
3358 return get_errno(shutdown(a[0], a[1]));
3359 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3360 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3361 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3362 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3363 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3364 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3365 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3366 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3367 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3368 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3369 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3370 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3371 default:
3372 gemu_log("Unsupported socketcall: %d\n", num);
3373 return -TARGET_ENOSYS;
3374 }
3375 }
3376 #endif
3377
3378 #define N_SHM_REGIONS 32
3379
3380 static struct shm_region {
3381 abi_ulong start;
3382 abi_ulong size;
3383 bool in_use;
3384 } shm_regions[N_SHM_REGIONS];
3385
3386 struct target_semid_ds
3387 {
3388 struct target_ipc_perm sem_perm;
3389 abi_ulong sem_otime;
3390 #if !defined(TARGET_PPC64)
3391 abi_ulong __unused1;
3392 #endif
3393 abi_ulong sem_ctime;
3394 #if !defined(TARGET_PPC64)
3395 abi_ulong __unused2;
3396 #endif
3397 abi_ulong sem_nsems;
3398 abi_ulong __unused3;
3399 abi_ulong __unused4;
3400 };
3401
3402 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3403 abi_ulong target_addr)
3404 {
3405 struct target_ipc_perm *target_ip;
3406 struct target_semid_ds *target_sd;
3407
3408 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3409 return -TARGET_EFAULT;
3410 target_ip = &(target_sd->sem_perm);
3411 host_ip->__key = tswap32(target_ip->__key);
3412 host_ip->uid = tswap32(target_ip->uid);
3413 host_ip->gid = tswap32(target_ip->gid);
3414 host_ip->cuid = tswap32(target_ip->cuid);
3415 host_ip->cgid = tswap32(target_ip->cgid);
3416 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3417 host_ip->mode = tswap32(target_ip->mode);
3418 #else
3419 host_ip->mode = tswap16(target_ip->mode);
3420 #endif
3421 #if defined(TARGET_PPC)
3422 host_ip->__seq = tswap32(target_ip->__seq);
3423 #else
3424 host_ip->__seq = tswap16(target_ip->__seq);
3425 #endif
3426 unlock_user_struct(target_sd, target_addr, 0);
3427 return 0;
3428 }
3429
3430 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3431 struct ipc_perm *host_ip)
3432 {
3433 struct target_ipc_perm *target_ip;
3434 struct target_semid_ds *target_sd;
3435
3436 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3437 return -TARGET_EFAULT;
3438 target_ip = &(target_sd->sem_perm);
3439 target_ip->__key = tswap32(host_ip->__key);
3440 target_ip->uid = tswap32(host_ip->uid);
3441 target_ip->gid = tswap32(host_ip->gid);
3442 target_ip->cuid = tswap32(host_ip->cuid);
3443 target_ip->cgid = tswap32(host_ip->cgid);
3444 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3445 target_ip->mode = tswap32(host_ip->mode);
3446 #else
3447 target_ip->mode = tswap16(host_ip->mode);
3448 #endif
3449 #if defined(TARGET_PPC)
3450 target_ip->__seq = tswap32(host_ip->__seq);
3451 #else
3452 target_ip->__seq = tswap16(host_ip->__seq);
3453 #endif
3454 unlock_user_struct(target_sd, target_addr, 1);
3455 return 0;
3456 }
3457
3458 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3459 abi_ulong target_addr)
3460 {
3461 struct target_semid_ds *target_sd;
3462
3463 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3464 return -TARGET_EFAULT;
3465 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3466 return -TARGET_EFAULT;
3467 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3468 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3469 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3470 unlock_user_struct(target_sd, target_addr, 0);
3471 return 0;
3472 }
3473
3474 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3475 struct semid_ds *host_sd)
3476 {
3477 struct target_semid_ds *target_sd;
3478
3479 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3480 return -TARGET_EFAULT;
3481 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3482 return -TARGET_EFAULT;
3483 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3484 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3485 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3486 unlock_user_struct(target_sd, target_addr, 1);
3487 return 0;
3488 }
3489
3490 struct target_seminfo {
3491 int semmap;
3492 int semmni;
3493 int semmns;
3494 int semmnu;
3495 int semmsl;
3496 int semopm;
3497 int semume;
3498 int semusz;
3499 int semvmx;
3500 int semaem;
3501 };
3502
3503 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3504 struct seminfo *host_seminfo)
3505 {
3506 struct target_seminfo *target_seminfo;
3507 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3508 return -TARGET_EFAULT;
3509 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3510 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3511 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3512 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3513 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3514 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3515 __put_user(host_seminfo->semume, &target_seminfo->semume);
3516 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3517 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3518 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3519 unlock_user_struct(target_seminfo, target_addr, 1);
3520 return 0;
3521 }
3522
3523 union semun {
3524 int val;
3525 struct semid_ds *buf;
3526 unsigned short *array;
3527 struct seminfo *__buf;
3528 };
3529
3530 union target_semun {
3531 int val;
3532 abi_ulong buf;
3533 abi_ulong array;
3534 abi_ulong __buf;
3535 };
3536
3537 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3538 abi_ulong target_addr)
3539 {
3540 int nsems;
3541 unsigned short *array;
3542 union semun semun;
3543 struct semid_ds semid_ds;
3544 int i, ret;
3545
3546 semun.buf = &semid_ds;
3547
3548 ret = semctl(semid, 0, IPC_STAT, semun);
3549 if (ret == -1)
3550 return get_errno(ret);
3551
3552 nsems = semid_ds.sem_nsems;
3553
3554 *host_array = g_try_new(unsigned short, nsems);
3555 if (!*host_array) {
3556 return -TARGET_ENOMEM;
3557 }
3558 array = lock_user(VERIFY_READ, target_addr,
3559 nsems*sizeof(unsigned short), 1);
3560 if (!array) {
3561 g_free(*host_array);
3562 return -TARGET_EFAULT;
3563 }
3564
3565 for(i=0; i<nsems; i++) {
3566 __get_user((*host_array)[i], &array[i]);
3567 }
3568 unlock_user(array, target_addr, 0);
3569
3570 return 0;
3571 }
3572
3573 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3574 unsigned short **host_array)
3575 {
3576 int nsems;
3577 unsigned short *array;
3578 union semun semun;
3579 struct semid_ds semid_ds;
3580 int i, ret;
3581
3582 semun.buf = &semid_ds;
3583
3584 ret = semctl(semid, 0, IPC_STAT, semun);
3585 if (ret == -1)
3586 return get_errno(ret);
3587
3588 nsems = semid_ds.sem_nsems;
3589
3590 array = lock_user(VERIFY_WRITE, target_addr,
3591 nsems*sizeof(unsigned short), 0);
3592 if (!array)
3593 return -TARGET_EFAULT;
3594
3595 for(i=0; i<nsems; i++) {
3596 __put_user((*host_array)[i], &array[i]);
3597 }
3598 g_free(*host_array);
3599 unlock_user(array, target_addr, 1);
3600
3601 return 0;
3602 }
3603
3604 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3605 abi_ulong target_arg)
3606 {
3607 union target_semun target_su = { .buf = target_arg };
3608 union semun arg;
3609 struct semid_ds dsarg;
3610 unsigned short *array = NULL;
3611 struct seminfo seminfo;
3612 abi_long ret = -TARGET_EINVAL;
3613 abi_long err;
3614 cmd &= 0xff;
3615
3616 switch( cmd ) {
3617 case GETVAL:
3618 case SETVAL:
3619 /* In 64 bit cross-endian situations, we will erroneously pick up
3620 * the wrong half of the union for the "val" element. To rectify
3621 * this, the entire 8-byte structure is byteswapped, followed by
3622 * a swap of the 4 byte val field. In other cases, the data is
3623 * already in proper host byte order. */
3624 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3625 target_su.buf = tswapal(target_su.buf);
3626 arg.val = tswap32(target_su.val);
3627 } else {
3628 arg.val = target_su.val;
3629 }
3630 ret = get_errno(semctl(semid, semnum, cmd, arg));
3631 break;
3632 case GETALL:
3633 case SETALL:
3634 err = target_to_host_semarray(semid, &array, target_su.array);
3635 if (err)
3636 return err;
3637 arg.array = array;
3638 ret = get_errno(semctl(semid, semnum, cmd, arg));
3639 err = host_to_target_semarray(semid, target_su.array, &array);
3640 if (err)
3641 return err;
3642 break;
3643 case IPC_STAT:
3644 case IPC_SET:
3645 case SEM_STAT:
3646 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3647 if (err)
3648 return err;
3649 arg.buf = &dsarg;
3650 ret = get_errno(semctl(semid, semnum, cmd, arg));
3651 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3652 if (err)
3653 return err;
3654 break;
3655 case IPC_INFO:
3656 case SEM_INFO:
3657 arg.__buf = &seminfo;
3658 ret = get_errno(semctl(semid, semnum, cmd, arg));
3659 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3660 if (err)
3661 return err;
3662 break;
3663 case IPC_RMID:
3664 case GETPID:
3665 case GETNCNT:
3666 case GETZCNT:
3667 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3668 break;
3669 }
3670
3671 return ret;
3672 }
3673
3674 struct target_sembuf {
3675 unsigned short sem_num;
3676 short sem_op;
3677 short sem_flg;
3678 };
3679
3680 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3681 abi_ulong target_addr,
3682 unsigned nsops)
3683 {
3684 struct target_sembuf *target_sembuf;
3685 int i;
3686
3687 target_sembuf = lock_user(VERIFY_READ, target_addr,
3688 nsops*sizeof(struct target_sembuf), 1);
3689 if (!target_sembuf)
3690 return -TARGET_EFAULT;
3691
3692 for(i=0; i<nsops; i++) {
3693 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3694 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3695 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3696 }
3697
3698 unlock_user(target_sembuf, target_addr, 0);
3699
3700 return 0;
3701 }
3702
3703 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3704 {
3705 struct sembuf sops[nsops];
3706
3707 if (target_to_host_sembuf(sops, ptr, nsops))
3708 return -TARGET_EFAULT;
3709
3710 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3711 }
3712
3713 struct target_msqid_ds
3714 {
3715 struct target_ipc_perm msg_perm;
3716 abi_ulong msg_stime;
3717 #if TARGET_ABI_BITS == 32
3718 abi_ulong __unused1;
3719 #endif
3720 abi_ulong msg_rtime;
3721 #if TARGET_ABI_BITS == 32
3722 abi_ulong __unused2;
3723 #endif
3724 abi_ulong msg_ctime;
3725 #if TARGET_ABI_BITS == 32
3726 abi_ulong __unused3;
3727 #endif
3728 abi_ulong __msg_cbytes;
3729 abi_ulong msg_qnum;
3730 abi_ulong msg_qbytes;
3731 abi_ulong msg_lspid;
3732 abi_ulong msg_lrpid;
3733 abi_ulong __unused4;
3734 abi_ulong __unused5;
3735 };
3736
3737 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3738 abi_ulong target_addr)
3739 {
3740 struct target_msqid_ds *target_md;
3741
3742 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3743 return -TARGET_EFAULT;
3744 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3745 return -TARGET_EFAULT;
3746 host_md->msg_stime = tswapal(target_md->msg_stime);
3747 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3748 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3749 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3750 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3751 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3752 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3753 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3754 unlock_user_struct(target_md, target_addr, 0);
3755 return 0;
3756 }
3757
3758 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3759 struct msqid_ds *host_md)
3760 {
3761 struct target_msqid_ds *target_md;
3762
3763 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3764 return -TARGET_EFAULT;
3765 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3766 return -TARGET_EFAULT;
3767 target_md->msg_stime = tswapal(host_md->msg_stime);
3768 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3769 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3770 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3771 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3772 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3773 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3774 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3775 unlock_user_struct(target_md, target_addr, 1);
3776 return 0;
3777 }
3778
3779 struct target_msginfo {
3780 int msgpool;
3781 int msgmap;
3782 int msgmax;
3783 int msgmnb;
3784 int msgmni;
3785 int msgssz;
3786 int msgtql;
3787 unsigned short int msgseg;
3788 };
3789
3790 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3791 struct msginfo *host_msginfo)
3792 {
3793 struct target_msginfo *target_msginfo;
3794 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3795 return -TARGET_EFAULT;
3796 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3797 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3798 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3799 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3800 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3801 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3802 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3803 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3804 unlock_user_struct(target_msginfo, target_addr, 1);
3805 return 0;
3806 }
3807
3808 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3809 {
3810 struct msqid_ds dsarg;
3811 struct msginfo msginfo;
3812 abi_long ret = -TARGET_EINVAL;
3813
3814 cmd &= 0xff;
3815
3816 switch (cmd) {
3817 case IPC_STAT:
3818 case IPC_SET:
3819 case MSG_STAT:
3820 if (target_to_host_msqid_ds(&dsarg,ptr))
3821 return -TARGET_EFAULT;
3822 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3823 if (host_to_target_msqid_ds(ptr,&dsarg))
3824 return -TARGET_EFAULT;
3825 break;
3826 case IPC_RMID:
3827 ret = get_errno(msgctl(msgid, cmd, NULL));
3828 break;
3829 case IPC_INFO:
3830 case MSG_INFO:
3831 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3832 if (host_to_target_msginfo(ptr, &msginfo))
3833 return -TARGET_EFAULT;
3834 break;
3835 }
3836
3837 return ret;
3838 }
3839
3840 struct target_msgbuf {
3841 abi_long mtype;
3842 char mtext[1];
3843 };
3844
3845 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3846 ssize_t msgsz, int msgflg)
3847 {
3848 struct target_msgbuf *target_mb;
3849 struct msgbuf *host_mb;
3850 abi_long ret = 0;
3851
3852 if (msgsz < 0) {
3853 return -TARGET_EINVAL;
3854 }
3855
3856 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3857 return -TARGET_EFAULT;
3858 host_mb = g_try_malloc(msgsz + sizeof(long));
3859 if (!host_mb) {
3860 unlock_user_struct(target_mb, msgp, 0);
3861 return -TARGET_ENOMEM;
3862 }
3863 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3864 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3865 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3866 g_free(host_mb);
3867 unlock_user_struct(target_mb, msgp, 0);
3868
3869 return ret;
3870 }
3871
3872 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3873 ssize_t msgsz, abi_long msgtyp,
3874 int msgflg)
3875 {
3876 struct target_msgbuf *target_mb;
3877 char *target_mtext;
3878 struct msgbuf *host_mb;
3879 abi_long ret = 0;
3880
3881 if (msgsz < 0) {
3882 return -TARGET_EINVAL;
3883 }
3884
3885 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3886 return -TARGET_EFAULT;
3887
3888 host_mb = g_try_malloc(msgsz + sizeof(long));
3889 if (!host_mb) {
3890 ret = -TARGET_ENOMEM;
3891 goto end;
3892 }
3893 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3894
3895 if (ret > 0) {
3896 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3897 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3898 if (!target_mtext) {
3899 ret = -TARGET_EFAULT;
3900 goto end;
3901 }
3902 memcpy(target_mb->mtext, host_mb->mtext, ret);
3903 unlock_user(target_mtext, target_mtext_addr, ret);
3904 }
3905
3906 target_mb->mtype = tswapal(host_mb->mtype);
3907
3908 end:
3909 if (target_mb)
3910 unlock_user_struct(target_mb, msgp, 1);
3911 g_free(host_mb);
3912 return ret;
3913 }
3914
3915 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3916 abi_ulong target_addr)
3917 {
3918 struct target_shmid_ds *target_sd;
3919
3920 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3921 return -TARGET_EFAULT;
3922 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3923 return -TARGET_EFAULT;
3924 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3925 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3926 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3927 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3928 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3929 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3930 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3931 unlock_user_struct(target_sd, target_addr, 0);
3932 return 0;
3933 }
3934
3935 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3936 struct shmid_ds *host_sd)
3937 {
3938 struct target_shmid_ds *target_sd;
3939
3940 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3941 return -TARGET_EFAULT;
3942 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3943 return -TARGET_EFAULT;
3944 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3945 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3946 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3947 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3948 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3949 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3950 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3951 unlock_user_struct(target_sd, target_addr, 1);
3952 return 0;
3953 }
3954
3955 struct target_shminfo {
3956 abi_ulong shmmax;
3957 abi_ulong shmmin;
3958 abi_ulong shmmni;
3959 abi_ulong shmseg;
3960 abi_ulong shmall;
3961 };
3962
3963 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3964 struct shminfo *host_shminfo)
3965 {
3966 struct target_shminfo *target_shminfo;
3967 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3968 return -TARGET_EFAULT;
3969 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3970 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3971 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3972 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3973 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3974 unlock_user_struct(target_shminfo, target_addr, 1);
3975 return 0;
3976 }
3977
3978 struct target_shm_info {
3979 int used_ids;
3980 abi_ulong shm_tot;
3981 abi_ulong shm_rss;
3982 abi_ulong shm_swp;
3983 abi_ulong swap_attempts;
3984 abi_ulong swap_successes;
3985 };
3986
3987 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3988 struct shm_info *host_shm_info)
3989 {
3990 struct target_shm_info *target_shm_info;
3991 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3992 return -TARGET_EFAULT;
3993 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3994 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3995 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3996 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3997 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3998 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3999 unlock_user_struct(target_shm_info, target_addr, 1);
4000 return 0;
4001 }
4002
4003 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4004 {
4005 struct shmid_ds dsarg;
4006 struct shminfo shminfo;
4007 struct shm_info shm_info;
4008 abi_long ret = -TARGET_EINVAL;
4009
4010 cmd &= 0xff;
4011
4012 switch(cmd) {
4013 case IPC_STAT:
4014 case IPC_SET:
4015 case SHM_STAT:
4016 if (target_to_host_shmid_ds(&dsarg, buf))
4017 return -TARGET_EFAULT;
4018 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4019 if (host_to_target_shmid_ds(buf, &dsarg))
4020 return -TARGET_EFAULT;
4021 break;
4022 case IPC_INFO:
4023 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4024 if (host_to_target_shminfo(buf, &shminfo))
4025 return -TARGET_EFAULT;
4026 break;
4027 case SHM_INFO:
4028 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4029 if (host_to_target_shm_info(buf, &shm_info))
4030 return -TARGET_EFAULT;
4031 break;
4032 case IPC_RMID:
4033 case SHM_LOCK:
4034 case SHM_UNLOCK:
4035 ret = get_errno(shmctl(shmid, cmd, NULL));
4036 break;
4037 }
4038
4039 return ret;
4040 }
4041
4042 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
4043 {
4044 abi_long raddr;
4045 void *host_raddr;
4046 struct shmid_ds shm_info;
4047 int i,ret;
4048
4049 /* find out the length of the shared memory segment */
4050 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4051 if (is_error(ret)) {
4052 /* can't get length, bail out */
4053 return ret;
4054 }
4055
4056 mmap_lock();
4057
4058 if (shmaddr)
4059 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4060 else {
4061 abi_ulong mmap_start;
4062
4063 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4064
4065 if (mmap_start == -1) {
4066 errno = ENOMEM;
4067 host_raddr = (void *)-1;
4068 } else
4069 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4070 }
4071
4072 if (host_raddr == (void *)-1) {
4073 mmap_unlock();
4074 return get_errno((long)host_raddr);
4075 }
4076 raddr=h2g((unsigned long)host_raddr);
4077
4078 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4079 PAGE_VALID | PAGE_READ |
4080 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4081
4082 for (i = 0; i < N_SHM_REGIONS; i++) {
4083 if (!shm_regions[i].in_use) {
4084 shm_regions[i].in_use = true;
4085 shm_regions[i].start = raddr;
4086 shm_regions[i].size = shm_info.shm_segsz;
4087 break;
4088 }
4089 }
4090
4091 mmap_unlock();
4092 return raddr;
4093
4094 }
4095
4096 static inline abi_long do_shmdt(abi_ulong shmaddr)
4097 {
4098 int i;
4099
4100 for (i = 0; i < N_SHM_REGIONS; ++i) {
4101 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4102 shm_regions[i].in_use = false;
4103 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4104 break;
4105 }
4106 }
4107
4108 return get_errno(shmdt(g2h(shmaddr)));
4109 }
4110
4111 #ifdef TARGET_NR_ipc
4112 /* ??? This only works with linear mappings. */
4113 /* do_ipc() must return target values and target errnos. */
4114 static abi_long do_ipc(unsigned int call, abi_long first,
4115 abi_long second, abi_long third,
4116 abi_long ptr, abi_long fifth)
4117 {
4118 int version;
4119 abi_long ret = 0;
4120
4121 version = call >> 16;
4122 call &= 0xffff;
4123
4124 switch (call) {
4125 case IPCOP_semop:
4126 ret = do_semop(first, ptr, second);
4127 break;
4128
4129 case IPCOP_semget:
4130 ret = get_errno(semget(first, second, third));
4131 break;
4132
4133 case IPCOP_semctl: {
4134 /* The semun argument to semctl is passed by value, so dereference the
4135 * ptr argument. */
4136 abi_ulong atptr;
4137 get_user_ual(atptr, ptr);
4138 ret = do_semctl(first, second, third, atptr);
4139 break;
4140 }
4141
4142 case IPCOP_msgget:
4143 ret = get_errno(msgget(first, second));
4144 break;
4145
4146 case IPCOP_msgsnd:
4147 ret = do_msgsnd(first, ptr, second, third);
4148 break;
4149
4150 case IPCOP_msgctl:
4151 ret = do_msgctl(first, second, ptr);
4152 break;
4153
4154 case IPCOP_msgrcv:
4155 switch (version) {
4156 case 0:
4157 {
4158 struct target_ipc_kludge {
4159 abi_long msgp;
4160 abi_long msgtyp;
4161 } *tmp;
4162
4163 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4164 ret = -TARGET_EFAULT;
4165 break;
4166 }
4167
4168 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4169
4170 unlock_user_struct(tmp, ptr, 0);
4171 break;
4172 }
4173 default:
4174 ret = do_msgrcv(first, ptr, second, fifth, third);
4175 }
4176 break;
4177
4178 case IPCOP_shmat:
4179 switch (version) {
4180 default:
4181 {
4182 abi_ulong raddr;
4183 raddr = do_shmat(first, ptr, second);
4184 if (is_error(raddr))
4185 return get_errno(raddr);
4186 if (put_user_ual(raddr, third))
4187 return -TARGET_EFAULT;
4188 break;
4189 }
4190 case 1:
4191 ret = -TARGET_EINVAL;
4192 break;
4193 }
4194 break;
4195 case IPCOP_shmdt:
4196 ret = do_shmdt(ptr);
4197 break;
4198
4199 case IPCOP_shmget:
4200 /* IPC_* flag values are the same on all linux platforms */
4201 ret = get_errno(shmget(first, second, third));
4202 break;
4203
4204 /* IPC_* and SHM_* command values are the same on all linux platforms */
4205 case IPCOP_shmctl:
4206 ret = do_shmctl(first, second, ptr);
4207 break;
4208 default:
4209 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4210 ret = -TARGET_ENOSYS;
4211 break;
4212 }
4213 return ret;
4214 }
4215 #endif
4216
4217 /* kernel structure types definitions */
4218
4219 #define STRUCT(name, ...) STRUCT_ ## name,
4220 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4221 enum {
4222 #include "syscall_types.h"
4223 STRUCT_MAX
4224 };
4225 #undef STRUCT
4226 #undef STRUCT_SPECIAL
4227
4228 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4229 #define STRUCT_SPECIAL(name)
4230 #include "syscall_types.h"
4231 #undef STRUCT
4232 #undef STRUCT_SPECIAL
4233
4234 typedef struct IOCTLEntry IOCTLEntry;
4235
4236 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4237 int fd, int cmd, abi_long arg);
4238
4239 struct IOCTLEntry {
4240 int target_cmd;
4241 unsigned int host_cmd;
4242 const char *name;
4243 int access;
4244 do_ioctl_fn *do_ioctl;
4245 const argtype arg_type[5];
4246 };
4247
4248 #define IOC_R 0x0001
4249 #define IOC_W 0x0002
4250 #define IOC_RW (IOC_R | IOC_W)
4251
4252 #define MAX_STRUCT_SIZE 4096
4253
4254 #ifdef CONFIG_FIEMAP
4255 /* So fiemap access checks don't overflow on 32 bit systems.
4256 * This is very slightly smaller than the limit imposed by
4257 * the underlying kernel.
4258 */
4259 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4260 / sizeof(struct fiemap_extent))
4261
4262 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4263 int fd, int cmd, abi_long arg)
4264 {
4265 /* The parameter for this ioctl is a struct fiemap followed
4266 * by an array of struct fiemap_extent whose size is set
4267 * in fiemap->fm_extent_count. The array is filled in by the
4268 * ioctl.
4269 */
4270 int target_size_in, target_size_out;
4271 struct fiemap *fm;
4272 const argtype *arg_type = ie->arg_type;
4273 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4274 void *argptr, *p;
4275 abi_long ret;
4276 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4277 uint32_t outbufsz;
4278 int free_fm = 0;
4279
4280 assert(arg_type[0] == TYPE_PTR);
4281 assert(ie->access == IOC_RW);
4282 arg_type++;
4283 target_size_in = thunk_type_size(arg_type, 0);
4284 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4285 if (!argptr) {
4286 return -TARGET_EFAULT;
4287 }
4288 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4289 unlock_user(argptr, arg, 0);
4290 fm = (struct fiemap *)buf_temp;
4291 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4292 return -TARGET_EINVAL;
4293 }
4294
4295 outbufsz = sizeof (*fm) +
4296 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4297
4298 if (outbufsz > MAX_STRUCT_SIZE) {
4299 /* We can't fit all the extents into the fixed size buffer.
4300 * Allocate one that is large enough and use it instead.
4301 */
4302 fm = g_try_malloc(outbufsz);
4303 if (!fm) {
4304 return -TARGET_ENOMEM;
4305 }
4306 memcpy(fm, buf_temp, sizeof(struct fiemap));
4307 free_fm = 1;
4308 }
4309 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4310 if (!is_error(ret)) {
4311 target_size_out = target_size_in;
4312 /* An extent_count of 0 means we were only counting the extents
4313 * so there are no structs to copy
4314 */
4315 if (fm->fm_extent_count != 0) {
4316 target_size_out += fm->fm_mapped_extents * extent_size;
4317 }
4318 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4319 if (!argptr) {
4320 ret = -TARGET_EFAULT;
4321 } else {
4322 /* Convert the struct fiemap */
4323 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4324 if (fm->fm_extent_count != 0) {
4325 p = argptr + target_size_in;
4326 /* ...and then all the struct fiemap_extents */
4327 for (i = 0; i < fm->fm_mapped_extents; i++) {
4328 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4329 THUNK_TARGET);
4330 p += extent_size;
4331 }
4332 }
4333 unlock_user(argptr, arg, target_size_out);
4334 }
4335 }
4336 if (free_fm) {
4337 g_free(fm);
4338 }
4339 return ret;
4340 }
4341 #endif
4342
4343 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4344 int fd, int cmd, abi_long arg)
4345 {
4346 const argtype *arg_type = ie->arg_type;
4347 int target_size;
4348 void *argptr;
4349 int ret;
4350 struct ifconf *host_ifconf;
4351 uint32_t outbufsz;
4352 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4353 int target_ifreq_size;
4354 int nb_ifreq;
4355 int free_buf = 0;
4356 int i;
4357 int target_ifc_len;
4358 abi_long target_ifc_buf;
4359 int host_ifc_len;
4360 char *host_ifc_buf;
4361
4362 assert(arg_type[0] == TYPE_PTR);
4363 assert(ie->access == IOC_RW);
4364
4365 arg_type++;
4366 target_size = thunk_type_size(arg_type, 0);
4367
4368 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4369 if (!argptr)
4370 return -TARGET_EFAULT;
4371 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4372 unlock_user(argptr, arg, 0);
4373
4374 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4375 target_ifc_len = host_ifconf->ifc_len;
4376 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4377
4378 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4379 nb_ifreq = target_ifc_len / target_ifreq_size;
4380 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4381
4382 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4383 if (outbufsz > MAX_STRUCT_SIZE) {
4384 /* We can't fit all the extents into the fixed size buffer.
4385 * Allocate one that is large enough and use it instead.
4386 */
4387 host_ifconf = malloc(outbufsz);
4388 if (!host_ifconf) {
4389 return -TARGET_ENOMEM;
4390 }
4391 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4392 free_buf = 1;
4393 }
4394 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4395
4396 host_ifconf->ifc_len = host_ifc_len;
4397 host_ifconf->ifc_buf = host_ifc_buf;
4398
4399 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4400 if (!is_error(ret)) {
4401 /* convert host ifc_len to target ifc_len */
4402
4403 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4404 target_ifc_len = nb_ifreq * target_ifreq_size;
4405 host_ifconf->ifc_len = target_ifc_len;
4406
4407 /* restore target ifc_buf */
4408
4409 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4410
4411 /* copy struct ifconf to target user */
4412
4413 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4414 if (!argptr)
4415 return -TARGET_EFAULT;
4416 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4417 unlock_user(argptr, arg, target_size);
4418
4419 /* copy ifreq[] to target user */
4420
4421 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4422 for (i = 0; i < nb_ifreq ; i++) {
4423 thunk_convert(argptr + i * target_ifreq_size,
4424 host_ifc_buf + i * sizeof(struct ifreq),
4425 ifreq_arg_type, THUNK_TARGET);
4426 }
4427 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4428 }
4429
4430 if (free_buf) {
4431 free(host_ifconf);
4432 }
4433
4434 return ret;
4435 }
4436
4437 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4438 int cmd, abi_long arg)
4439 {
4440 void *argptr;
4441 struct dm_ioctl *host_dm;
4442 abi_long guest_data;
4443 uint32_t guest_data_size;
4444 int target_size;
4445 const argtype *arg_type = ie->arg_type;
4446 abi_long ret;
4447 void *big_buf = NULL;
4448 char *host_data;
4449
4450 arg_type++;
4451 target_size = thunk_type_size(arg_type, 0);
4452 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4453 if (!argptr) {
4454 ret = -TARGET_EFAULT;
4455 goto out;
4456 }
4457 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4458 unlock_user(argptr, arg, 0);
4459
4460 /* buf_temp is too small, so fetch things into a bigger buffer */
4461 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4462 memcpy(big_buf, buf_temp, target_size);
4463 buf_temp = big_buf;
4464 host_dm = big_buf;
4465
4466 guest_data = arg + host_dm->data_start;
4467 if ((guest_data - arg) < 0) {
4468 ret = -EINVAL;
4469 goto out;
4470 }
4471 guest_data_size = host_dm->data_size - host_dm->data_start;
4472 host_data = (char*)host_dm + host_dm->data_start;
4473
4474 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4475 switch (ie->host_cmd) {
4476 case DM_REMOVE_ALL:
4477 case DM_LIST_DEVICES:
4478 case DM_DEV_CREATE:
4479 case DM_DEV_REMOVE:
4480 case DM_DEV_SUSPEND:
4481 case DM_DEV_STATUS:
4482 case DM_DEV_WAIT:
4483 case DM_TABLE_STATUS:
4484 case DM_TABLE_CLEAR:
4485 case DM_TABLE_DEPS:
4486 case DM_LIST_VERSIONS:
4487 /* no input data */
4488 break;
4489 case DM_DEV_RENAME:
4490 case DM_DEV_SET_GEOMETRY:
4491 /* data contains only strings */
4492 memcpy(host_data, argptr, guest_data_size);
4493 break;
4494 case DM_TARGET_MSG:
4495 memcpy(host_data, argptr, guest_data_size);
4496 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4497 break;
4498 case DM_TABLE_LOAD:
4499 {
4500 void *gspec = argptr;
4501 void *cur_data = host_data;
4502 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4503 int spec_size = thunk_type_size(arg_type, 0);
4504 int i;
4505
4506 for (i = 0; i < host_dm->target_count; i++) {
4507 struct dm_target_spec *spec = cur_data;
4508 uint32_t next;
4509 int slen;
4510
4511 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4512 slen = strlen((char*)gspec + spec_size) + 1;
4513 next = spec->next;
4514 spec->next = sizeof(*spec) + slen;
4515 strcpy((char*)&spec[1], gspec + spec_size);
4516 gspec += next;
4517 cur_data += spec->next;
4518 }
4519 break;
4520 }
4521 default:
4522 ret = -TARGET_EINVAL;
4523 unlock_user(argptr, guest_data, 0);
4524 goto out;
4525 }
4526 unlock_user(argptr, guest_data, 0);
4527
4528 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4529 if (!is_error(ret)) {
4530 guest_data = arg + host_dm->data_start;
4531 guest_data_size = host_dm->data_size - host_dm->data_start;
4532 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4533 switch (ie->host_cmd) {
4534 case DM_REMOVE_ALL:
4535 case DM_DEV_CREATE:
4536 case DM_DEV_REMOVE:
4537 case DM_DEV_RENAME:
4538 case DM_DEV_SUSPEND:
4539 case DM_DEV_STATUS:
4540 case DM_TABLE_LOAD:
4541 case DM_TABLE_CLEAR:
4542 case DM_TARGET_MSG:
4543 case DM_DEV_SET_GEOMETRY:
4544 /* no return data */
4545 break;
4546 case DM_LIST_DEVICES:
4547 {
4548 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4549 uint32_t remaining_data = guest_data_size;
4550 void *cur_data = argptr;
4551 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4552 int nl_size = 12; /* can't use thunk_size due to alignment */
4553
4554 while (1) {
4555 uint32_t next = nl->next;
4556 if (next) {
4557 nl->next = nl_size + (strlen(nl->name) + 1);
4558 }
4559 if (remaining_data < nl->next) {
4560 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4561 break;
4562 }
4563 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4564 strcpy(cur_data + nl_size, nl->name);
4565 cur_data += nl->next;
4566 remaining_data -= nl->next;
4567 if (!next) {
4568 break;
4569 }
4570 nl = (void*)nl + next;
4571 }
4572 break;
4573 }
4574 case DM_DEV_WAIT:
4575 case DM_TABLE_STATUS:
4576 {
4577 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4578 void *cur_data = argptr;
4579 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4580 int spec_size = thunk_type_size(arg_type, 0);
4581 int i;
4582
4583 for (i = 0; i < host_dm->target_count; i++) {
4584 uint32_t next = spec->next;
4585 int slen = strlen((char*)&spec[1]) + 1;
4586 spec->next = (cur_data - argptr) + spec_size + slen;
4587 if (guest_data_size < spec->next) {
4588 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4589 break;
4590 }
4591 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4592 strcpy(cur_data + spec_size, (char*)&spec[1]);
4593 cur_data = argptr + spec->next;
4594 spec = (void*)host_dm + host_dm->data_start + next;
4595 }
4596 break;
4597 }
4598 case DM_TABLE_DEPS:
4599 {
4600 void *hdata = (void*)host_dm + host_dm->data_start;
4601 int count = *(uint32_t*)hdata;
4602 uint64_t *hdev = hdata + 8;
4603 uint64_t *gdev = argptr + 8;
4604 int i;
4605
4606 *(uint32_t*)argptr = tswap32(count);
4607 for (i = 0; i < count; i++) {
4608 *gdev = tswap64(*hdev);
4609 gdev++;
4610 hdev++;
4611 }
4612 break;
4613 }
4614 case DM_LIST_VERSIONS:
4615 {
4616 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4617 uint32_t remaining_data = guest_data_size;
4618 void *cur_data = argptr;
4619 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4620 int vers_size = thunk_type_size(arg_type, 0);
4621
4622 while (1) {
4623 uint32_t next = vers->next;
4624 if (next) {
4625 vers->next = vers_size + (strlen(vers->name) + 1);
4626 }
4627 if (remaining_data < vers->next) {
4628 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4629 break;
4630 }
4631 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4632 strcpy(cur_data + vers_size, vers->name);
4633 cur_data += vers->next;
4634 remaining_data -= vers->next;
4635 if (!next) {
4636 break;
4637 }
4638 vers = (void*)vers + next;
4639 }
4640 break;
4641 }
4642 default:
4643 unlock_user(argptr, guest_data, 0);
4644 ret = -TARGET_EINVAL;
4645 goto out;
4646 }
4647 unlock_user(argptr, guest_data, guest_data_size);
4648
4649 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4650 if (!argptr) {
4651 ret = -TARGET_EFAULT;
4652 goto out;
4653 }
4654 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4655 unlock_user(argptr, arg, target_size);
4656 }
4657 out:
4658 g_free(big_buf);
4659 return ret;
4660 }
4661
4662 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4663 int cmd, abi_long arg)
4664 {
4665 void *argptr;
4666 int target_size;
4667 const argtype *arg_type = ie->arg_type;
4668 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4669 abi_long ret;
4670
4671 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4672 struct blkpg_partition host_part;
4673
4674 /* Read and convert blkpg */
4675 arg_type++;
4676 target_size = thunk_type_size(arg_type, 0);
4677 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4678 if (!argptr) {
4679 ret = -TARGET_EFAULT;
4680 goto out;
4681 }
4682 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4683 unlock_user(argptr, arg, 0);
4684
4685 switch (host_blkpg->op) {
4686 case BLKPG_ADD_PARTITION:
4687 case BLKPG_DEL_PARTITION:
4688 /* payload is struct blkpg_partition */
4689 break;
4690 default:
4691 /* Unknown opcode */
4692 ret = -TARGET_EINVAL;
4693 goto out;
4694 }
4695
4696 /* Read and convert blkpg->data */
4697 arg = (abi_long)(uintptr_t)host_blkpg->data;
4698 target_size = thunk_type_size(part_arg_type, 0);
4699 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4700 if (!argptr) {
4701 ret = -TARGET_EFAULT;
4702 goto out;
4703 }
4704 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4705 unlock_user(argptr, arg, 0);
4706
4707 /* Swizzle the data pointer to our local copy and call! */
4708 host_blkpg->data = &host_part;
4709 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4710
4711 out:
4712 return ret;
4713 }
4714
4715 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4716 int fd, int cmd, abi_long arg)
4717 {
4718 const argtype *arg_type = ie->arg_type;
4719 const StructEntry *se;
4720 const argtype *field_types;
4721 const int *dst_offsets, *src_offsets;
4722 int target_size;
4723 void *argptr;
4724 abi_ulong *target_rt_dev_ptr;
4725 unsigned long *host_rt_dev_ptr;
4726 abi_long ret;
4727 int i;
4728
4729 assert(ie->access == IOC_W);
4730 assert(*arg_type == TYPE_PTR);
4731 arg_type++;
4732 assert(*arg_type == TYPE_STRUCT);
4733 target_size = thunk_type_size(arg_type, 0);
4734 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4735 if (!argptr) {
4736 return -TARGET_EFAULT;
4737 }
4738 arg_type++;
4739 assert(*arg_type == (int)STRUCT_rtentry);
4740 se = struct_entries + *arg_type++;
4741 assert(se->convert[0] == NULL);
4742 /* convert struct here to be able to catch rt_dev string */
4743 field_types = se->field_types;
4744 dst_offsets = se->field_offsets[THUNK_HOST];
4745 src_offsets = se->field_offsets[THUNK_TARGET];
4746 for (i = 0; i < se->nb_fields; i++) {
4747 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4748 assert(*field_types == TYPE_PTRVOID);
4749 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4750 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4751 if (*target_rt_dev_ptr != 0) {
4752 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4753 tswapal(*target_rt_dev_ptr));
4754 if (!*host_rt_dev_ptr) {
4755 unlock_user(argptr, arg, 0);
4756 return -TARGET_EFAULT;
4757 }
4758 } else {
4759 *host_rt_dev_ptr = 0;
4760 }
4761 field_types++;
4762 continue;
4763 }
4764 field_types = thunk_convert(buf_temp + dst_offsets[i],
4765 argptr + src_offsets[i],
4766 field_types, THUNK_HOST);
4767 }
4768 unlock_user(argptr, arg, 0);
4769
4770 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4771 if (*host_rt_dev_ptr != 0) {
4772 unlock_user((void *)*host_rt_dev_ptr,
4773 *target_rt_dev_ptr, 0);
4774 }
4775 return ret;
4776 }
4777
4778 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4779 int fd, int cmd, abi_long arg)
4780 {
4781 int sig = target_to_host_signal(arg);
4782 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4783 }
4784
4785 static IOCTLEntry ioctl_entries[] = {
4786 #define IOCTL(cmd, access, ...) \
4787 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4788 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4789 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4790 #include "ioctls.h"
4791 { 0, 0, },
4792 };
4793
4794 /* ??? Implement proper locking for ioctls. */
4795 /* do_ioctl() Must return target values and target errnos. */
4796 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4797 {
4798 const IOCTLEntry *ie;
4799 const argtype *arg_type;
4800 abi_long ret;
4801 uint8_t buf_temp[MAX_STRUCT_SIZE];
4802 int target_size;
4803 void *argptr;
4804
4805 ie = ioctl_entries;
4806 for(;;) {
4807 if (ie->target_cmd == 0) {
4808 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4809 return -TARGET_ENOSYS;
4810 }
4811 if (ie->target_cmd == cmd)
4812 break;
4813 ie++;
4814 }
4815 arg_type = ie->arg_type;
4816 #if defined(DEBUG)
4817 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
4818 #endif
4819 if (ie->do_ioctl) {
4820 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4821 }
4822
4823 switch(arg_type[0]) {
4824 case TYPE_NULL:
4825 /* no argument */
4826 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4827 break;
4828 case TYPE_PTRVOID:
4829 case TYPE_INT:
4830 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4831 break;
4832 case TYPE_PTR:
4833 arg_type++;
4834 target_size = thunk_type_size(arg_type, 0);
4835 switch(ie->access) {
4836 case IOC_R:
4837 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4838 if (!is_error(ret)) {
4839 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4840 if (!argptr)
4841 return -TARGET_EFAULT;
4842 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4843 unlock_user(argptr, arg, target_size);
4844 }
4845 break;
4846 case IOC_W:
4847 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4848 if (!argptr)
4849 return -TARGET_EFAULT;
4850 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4851 unlock_user(argptr, arg, 0);
4852 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4853 break;
4854 default:
4855 case IOC_RW:
4856 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4857 if (!argptr)
4858 return -TARGET_EFAULT;
4859 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4860 unlock_user(argptr, arg, 0);
4861 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4862 if (!is_error(ret)) {
4863 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4864 if (!argptr)
4865 return -TARGET_EFAULT;
4866 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4867 unlock_user(argptr, arg, target_size);
4868 }
4869 break;
4870 }
4871 break;
4872 default:
4873 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4874 (long)cmd, arg_type[0]);
4875 ret = -TARGET_ENOSYS;
4876 break;
4877 }
4878 return ret;
4879 }
4880
4881 static const bitmask_transtbl iflag_tbl[] = {
4882 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4883 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4884 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4885 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4886 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4887 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4888 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4889 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4890 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4891 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4892 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4893 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4894 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4895 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4896 { 0, 0, 0, 0 }
4897 };
4898
4899 static const bitmask_transtbl oflag_tbl[] = {
4900 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4901 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4902 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4903 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4904 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4905 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4906 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4907 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4908 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4909 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4910 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4911 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4912 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4913 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4914 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4915 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4916 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4917 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4918 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4919 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4920 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4921 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4922 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4923 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4924 { 0, 0, 0, 0 }
4925 };
4926
4927 static const bitmask_transtbl cflag_tbl[] = {
4928 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4929 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4930 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4931 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4932 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4933 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4934 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4935 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4936 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4937 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4938 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4939 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4940 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4941 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4942 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4943 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4944 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4945 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4946 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4947 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4948 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4949 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4950 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4951 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4952 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4953 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4954 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4955 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4956 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4957 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4958 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4959 { 0, 0, 0, 0 }
4960 };
4961
4962 static const bitmask_transtbl lflag_tbl[] = {
4963 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4964 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4965 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4966 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4967 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4968 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4969 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4970 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4971 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4972 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4973 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4974 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4975 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4976 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4977 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4978 { 0, 0, 0, 0 }
4979 };
4980
4981 static void target_to_host_termios (void *dst, const void *src)
4982 {
4983 struct host_termios *host = dst;
4984 const struct target_termios *target = src;
4985
4986 host->c_iflag =
4987 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4988 host->c_oflag =
4989 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4990 host->c_cflag =
4991 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4992 host->c_lflag =
4993 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4994 host->c_line = target->c_line;
4995
4996 memset(host->c_cc, 0, sizeof(host->c_cc));
4997 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4998 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4999 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5000 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5001 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5002 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5003 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5004 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5005 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5006 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5007 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5008 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5009 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5010 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5011 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5012 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5013 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5014 }
5015
5016 static void host_to_target_termios (void *dst, const void *src)
5017 {
5018 struct target_termios *target = dst;
5019 const struct host_termios *host = src;
5020
5021 target->c_iflag =
5022 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5023 target->c_oflag =
5024 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5025 target->c_cflag =
5026 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5027 target->c_lflag =
5028 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5029 target->c_line = host->c_line;
5030
5031 memset(target->c_cc, 0, sizeof(target->c_cc));
5032 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5033 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5034 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5035 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5036 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5037 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5038 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5039 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5040 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5041 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5042 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5043 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5044 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5045 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5046 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5047 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5048 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5049 }
5050
5051 static const StructEntry struct_termios_def = {
5052 .convert = { host_to_target_termios, target_to_host_termios },
5053 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5054 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5055 };
5056
5057 static bitmask_transtbl mmap_flags_tbl[] = {
5058 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5059 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5060 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5061 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5062 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5063 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5064 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5065 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5066 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5067 MAP_NORESERVE },
5068 { 0, 0, 0, 0 }
5069 };
5070
5071 #if defined(TARGET_I386)
5072
5073 /* NOTE: there is really one LDT for all the threads */
5074 static uint8_t *ldt_table;
5075
5076 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5077 {
5078 int size;
5079 void *p;
5080
5081 if (!ldt_table)
5082 return 0;
5083 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5084 if (size > bytecount)
5085 size = bytecount;
5086 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5087 if (!p)
5088 return -TARGET_EFAULT;
5089 /* ??? Should this by byteswapped? */
5090 memcpy(p, ldt_table, size);
5091 unlock_user(p, ptr, size);
5092 return size;
5093 }
5094
5095 /* XXX: add locking support */
5096 static abi_long write_ldt(CPUX86State *env,
5097 abi_ulong ptr, unsigned long bytecount, int oldmode)
5098 {
5099 struct target_modify_ldt_ldt_s ldt_info;
5100 struct target_modify_ldt_ldt_s *target_ldt_info;
5101 int seg_32bit, contents, read_exec_only, limit_in_pages;
5102 int seg_not_present, useable, lm;
5103 uint32_t *lp, entry_1, entry_2;
5104
5105 if (bytecount != sizeof(ldt_info))
5106 return -TARGET_EINVAL;
5107 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5108 return -TARGET_EFAULT;
5109 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5110 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5111 ldt_info.limit = tswap32(target_ldt_info->limit);
5112 ldt_info.flags = tswap32(target_ldt_info->flags);
5113 unlock_user_struct(target_ldt_info, ptr, 0);
5114
5115 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5116 return -TARGET_EINVAL;
5117 seg_32bit = ldt_info.flags & 1;
5118 contents = (ldt_info.flags >> 1) & 3;
5119 read_exec_only = (ldt_info.flags >> 3) & 1;
5120 limit_in_pages = (ldt_info.flags >> 4) & 1;
5121 seg_not_present = (ldt_info.flags >> 5) & 1;
5122 useable = (ldt_info.flags >> 6) & 1;
5123 #ifdef TARGET_ABI32
5124 lm = 0;
5125 #else
5126 lm = (ldt_info.flags >> 7) & 1;
5127 #endif
5128 if (contents == 3) {
5129 if (oldmode)
5130 return -TARGET_EINVAL;
5131 if (seg_not_present == 0)
5132 return -TARGET_EINVAL;
5133 }
5134 /* allocate the LDT */
5135 if (!ldt_table) {
5136 env->ldt.base = target_mmap(0,
5137 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5138 PROT_READ|PROT_WRITE,
5139 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5140 if (env->ldt.base == -1)
5141 return -TARGET_ENOMEM;
5142 memset(g2h(env->ldt.base), 0,
5143 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5144 env->ldt.limit = 0xffff;
5145 ldt_table = g2h(env->ldt.base);
5146 }
5147
5148 /* NOTE: same code as Linux kernel */
5149 /* Allow LDTs to be cleared by the user. */
5150 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5151 if (oldmode ||
5152 (contents == 0 &&
5153 read_exec_only == 1 &&
5154 seg_32bit == 0 &&
5155 limit_in_pages == 0 &&
5156 seg_not_present == 1 &&
5157 useable == 0 )) {
5158 entry_1 = 0;
5159 entry_2 = 0;
5160 goto install;
5161 }
5162 }
5163
5164 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5165 (ldt_info.limit & 0x0ffff);
5166 entry_2 = (ldt_info.base_addr & 0xff000000) |
5167 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5168 (ldt_info.limit & 0xf0000) |
5169 ((read_exec_only ^ 1) << 9) |
5170 (contents << 10) |
5171 ((seg_not_present ^ 1) << 15) |
5172 (seg_32bit << 22) |
5173 (limit_in_pages << 23) |
5174 (lm << 21) |
5175 0x7000;
5176 if (!oldmode)
5177 entry_2 |= (useable << 20);
5178
5179 /* Install the new entry ... */
5180 install:
5181 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5182 lp[0] = tswap32(entry_1);
5183 lp[1] = tswap32(entry_2);
5184 return 0;
5185 }
5186
5187 /* specific and weird i386 syscalls */
5188 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5189 unsigned long bytecount)
5190 {
5191 abi_long ret;
5192
5193 switch (func) {
5194 case 0:
5195 ret = read_ldt(ptr, bytecount);
5196 break;
5197 case 1:
5198 ret = write_ldt(env, ptr, bytecount, 1);
5199 break;
5200 case 0x11:
5201 ret = write_ldt(env, ptr, bytecount, 0);
5202 break;
5203 default:
5204 ret = -TARGET_ENOSYS;
5205 break;
5206 }
5207 return ret;
5208 }
5209
5210 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5211 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5212 {
5213 uint64_t *gdt_table = g2h(env->gdt.base);
5214 struct target_modify_ldt_ldt_s ldt_info;
5215 struct target_modify_ldt_ldt_s *target_ldt_info;
5216 int seg_32bit, contents, read_exec_only, limit_in_pages;
5217 int seg_not_present, useable, lm;
5218 uint32_t *lp, entry_1, entry_2;
5219 int i;
5220
5221 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5222 if (!target_ldt_info)
5223 return -TARGET_EFAULT;
5224 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5225 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5226 ldt_info.limit = tswap32(target_ldt_info->limit);
5227 ldt_info.flags = tswap32(target_ldt_info->flags);
5228 if (ldt_info.entry_number == -1) {
5229 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5230 if (gdt_table[i] == 0) {
5231 ldt_info.entry_number = i;
5232 target_ldt_info->entry_number = tswap32(i);
5233 break;
5234 }
5235 }
5236 }
5237 unlock_user_struct(target_ldt_info, ptr, 1);
5238
5239 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5240 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5241 return -TARGET_EINVAL;
5242 seg_32bit = ldt_info.flags & 1;
5243 contents = (ldt_info.flags >> 1) & 3;
5244 read_exec_only = (ldt_info.flags >> 3) & 1;
5245 limit_in_pages = (ldt_info.flags >> 4) & 1;
5246 seg_not_present = (ldt_info.flags >> 5) & 1;
5247 useable = (ldt_info.flags >> 6) & 1;
5248 #ifdef TARGET_ABI32
5249 lm = 0;
5250 #else
5251 lm = (ldt_info.flags >> 7) & 1;
5252 #endif
5253
5254 if (contents == 3) {
5255 if (seg_not_present == 0)
5256 return -TARGET_EINVAL;
5257 }
5258
5259 /* NOTE: same code as Linux kernel */
5260 /* Allow LDTs to be cleared by the user. */
5261 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5262 if ((contents == 0 &&
5263 read_exec_only == 1 &&
5264 seg_32bit == 0 &&
5265 limit_in_pages == 0 &&
5266 seg_not_present == 1 &&
5267 useable == 0 )) {
5268 entry_1 = 0;
5269 entry_2 = 0;
5270 goto install;
5271 }
5272 }
5273
5274 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5275 (ldt_info.limit & 0x0ffff);
5276 entry_2 = (ldt_info.base_addr & 0xff000000) |
5277 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5278 (ldt_info.limit & 0xf0000) |
5279 ((read_exec_only ^ 1) << 9) |
5280 (contents << 10) |
5281 ((seg_not_present ^ 1) << 15) |
5282 (seg_32bit << 22) |
5283 (limit_in_pages << 23) |
5284 (useable << 20) |
5285 (lm << 21) |
5286 0x7000;
5287
5288 /* Install the new entry ... */
5289 install:
5290 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5291 lp[0] = tswap32(entry_1);
5292 lp[1] = tswap32(entry_2);
5293 return 0;
5294 }
5295
5296 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5297 {
5298 struct target_modify_ldt_ldt_s *target_ldt_info;
5299 uint64_t *gdt_table = g2h(env->gdt.base);
5300 uint32_t base_addr, limit, flags;
5301 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5302 int seg_not_present, useable, lm;
5303 uint32_t *lp, entry_1, entry_2;
5304
5305 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5306 if (!target_ldt_info)
5307 return -TARGET_EFAULT;
5308 idx = tswap32(target_ldt_info->entry_number);
5309 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5310 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5311 unlock_user_struct(target_ldt_info, ptr, 1);
5312 return -TARGET_EINVAL;
5313 }
5314 lp = (uint32_t *)(gdt_table + idx);
5315 entry_1 = tswap32(lp[0]);
5316 entry_2 = tswap32(lp[1]);
5317
5318 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5319 contents = (entry_2 >> 10) & 3;
5320 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5321 seg_32bit = (entry_2 >> 22) & 1;
5322 limit_in_pages = (entry_2 >> 23) & 1;
5323 useable = (entry_2 >> 20) & 1;
5324 #ifdef TARGET_ABI32
5325 lm = 0;
5326 #else
5327 lm = (entry_2 >> 21) & 1;
5328 #endif
5329 flags = (seg_32bit << 0) | (contents << 1) |
5330 (read_exec_only << 3) | (limit_in_pages << 4) |
5331 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5332 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5333 base_addr = (entry_1 >> 16) |
5334 (entry_2 & 0xff000000) |
5335 ((entry_2 & 0xff) << 16);
5336 target_ldt_info->base_addr = tswapal(base_addr);
5337 target_ldt_info->limit = tswap32(limit);
5338 target_ldt_info->flags = tswap32(flags);
5339 unlock_user_struct(target_ldt_info, ptr, 1);
5340 return 0;
5341 }
5342 #endif /* TARGET_I386 && TARGET_ABI32 */
5343
5344 #ifndef TARGET_ABI32
5345 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5346 {
5347 abi_long ret = 0;
5348 abi_ulong val;
5349 int idx;
5350
5351 switch(code) {
5352 case TARGET_ARCH_SET_GS:
5353 case TARGET_ARCH_SET_FS:
5354 if (code == TARGET_ARCH_SET_GS)
5355 idx = R_GS;
5356 else
5357 idx = R_FS;
5358 cpu_x86_load_seg(env, idx, 0);
5359 env->segs[idx].base = addr;
5360 break;
5361 case TARGET_ARCH_GET_GS:
5362 case TARGET_ARCH_GET_FS:
5363 if (code == TARGET_ARCH_GET_GS)
5364 idx = R_GS;
5365 else
5366 idx = R_FS;
5367 val = env->segs[idx].base;
5368 if (put_user(val, addr, abi_ulong))
5369 ret = -TARGET_EFAULT;
5370 break;
5371 default:
5372 ret = -TARGET_EINVAL;
5373 break;
5374 }
5375 return ret;
5376 }
5377 #endif
5378
5379 #endif /* defined(TARGET_I386) */
5380
5381 #define NEW_STACK_SIZE 0x40000
5382
5383
5384 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5385 typedef struct {
5386 CPUArchState *env;
5387 pthread_mutex_t mutex;
5388 pthread_cond_t cond;
5389 pthread_t thread;
5390 uint32_t tid;
5391 abi_ulong child_tidptr;
5392 abi_ulong parent_tidptr;
5393 sigset_t sigmask;
5394 } new_thread_info;
5395
5396 static void *clone_func(void *arg)
5397 {
5398 new_thread_info *info = arg;
5399 CPUArchState *env;
5400 CPUState *cpu;
5401 TaskState *ts;
5402
5403 rcu_register_thread();
5404 env = info->env;
5405 cpu = ENV_GET_CPU(env);
5406 thread_cpu = cpu;
5407 ts = (TaskState *)cpu->opaque;
5408 info->tid = gettid();
5409 cpu->host_tid = info->tid;
5410 task_settid(ts);
5411 if (info->child_tidptr)
5412 put_user_u32(info->tid, info->child_tidptr);
5413 if (info->parent_tidptr)
5414 put_user_u32(info->tid, info->parent_tidptr);
5415 /* Enable signals. */
5416 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5417 /* Signal to the parent that we're ready. */
5418 pthread_mutex_lock(&info->mutex);
5419 pthread_cond_broadcast(&info->cond);
5420 pthread_mutex_unlock(&info->mutex);
5421 /* Wait until the parent has finshed initializing the tls state. */
5422 pthread_mutex_lock(&clone_lock);
5423 pthread_mutex_unlock(&clone_lock);
5424 cpu_loop(env);
5425 /* never exits */
5426 return NULL;
5427 }
5428
5429 /* do_fork() Must return host values and target errnos (unlike most
5430 do_*() functions). */
5431 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5432 abi_ulong parent_tidptr, target_ulong newtls,
5433 abi_ulong child_tidptr)
5434 {
5435 CPUState *cpu = ENV_GET_CPU(env);
5436 int ret;
5437 TaskState *ts;
5438 CPUState *new_cpu;
5439 CPUArchState *new_env;
5440 unsigned int nptl_flags;
5441 sigset_t sigmask;
5442
5443 /* Emulate vfork() with fork() */
5444 if (flags & CLONE_VFORK)
5445 flags &= ~(CLONE_VFORK | CLONE_VM);
5446
5447 if (flags & CLONE_VM) {
5448 TaskState *parent_ts = (TaskState *)cpu->opaque;
5449 new_thread_info info;
5450 pthread_attr_t attr;
5451
5452 ts = g_new0(TaskState, 1);
5453 init_task_state(ts);
5454 /* we create a new CPU instance. */
5455 new_env = cpu_copy(env);
5456 /* Init regs that differ from the parent. */
5457 cpu_clone_regs(new_env, newsp);
5458 new_cpu = ENV_GET_CPU(new_env);
5459 new_cpu->opaque = ts;
5460 ts->bprm = parent_ts->bprm;
5461 ts->info = parent_ts->info;
5462 ts->signal_mask = parent_ts->signal_mask;
5463 nptl_flags = flags;
5464 flags &= ~CLONE_NPTL_FLAGS2;
5465
5466 if (nptl_flags & CLONE_CHILD_CLEARTID) {
5467 ts->child_tidptr = child_tidptr;
5468 }
5469
5470 if (nptl_flags & CLONE_SETTLS)
5471 cpu_set_tls (new_env, newtls);
5472
5473 /* Grab a mutex so that thread setup appears atomic. */
5474 pthread_mutex_lock(&clone_lock);
5475
5476 memset(&info, 0, sizeof(info));
5477 pthread_mutex_init(&info.mutex, NULL);
5478 pthread_mutex_lock(&info.mutex);
5479 pthread_cond_init(&info.cond, NULL);
5480 info.env = new_env;
5481 if (nptl_flags & CLONE_CHILD_SETTID)
5482 info.child_tidptr = child_tidptr;
5483 if (nptl_flags & CLONE_PARENT_SETTID)
5484 info.parent_tidptr = parent_tidptr;
5485
5486 ret = pthread_attr_init(&attr);
5487 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5488 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5489 /* It is not safe to deliver signals until the child has finished
5490 initializing, so temporarily block all signals. */
5491 sigfillset(&sigmask);
5492 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5493
5494 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5495 /* TODO: Free new CPU state if thread creation failed. */
5496
5497 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5498 pthread_attr_destroy(&attr);
5499 if (ret == 0) {
5500 /* Wait for the child to initialize. */
5501 pthread_cond_wait(&info.cond, &info.mutex);
5502 ret = info.tid;
5503 if (flags & CLONE_PARENT_SETTID)
5504 put_user_u32(ret, parent_tidptr);
5505 } else {
5506 ret = -1;
5507 }
5508 pthread_mutex_unlock(&info.mutex);
5509 pthread_cond_destroy(&info.cond);
5510 pthread_mutex_destroy(&info.mutex);
5511 pthread_mutex_unlock(&clone_lock);
5512 } else {
5513 /* if no CLONE_VM, we consider it is a fork */
5514 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
5515 return -TARGET_EINVAL;
5516 }
5517
5518 if (block_signals()) {
5519 return -TARGET_ERESTARTSYS;
5520 }
5521
5522 fork_start();
5523 ret = fork();
5524 if (ret == 0) {
5525 /* Child Process. */
5526 rcu_after_fork();
5527 cpu_clone_regs(env, newsp);
5528 fork_end(1);
5529 /* There is a race condition here. The parent process could
5530 theoretically read the TID in the child process before the child
5531 tid is set. This would require using either ptrace
5532 (not implemented) or having *_tidptr to point at a shared memory
5533 mapping. We can't repeat the spinlock hack used above because
5534 the child process gets its own copy of the lock. */
5535 if (flags & CLONE_CHILD_SETTID)
5536 put_user_u32(gettid(), child_tidptr);
5537 if (flags & CLONE_PARENT_SETTID)
5538 put_user_u32(gettid(), parent_tidptr);
5539 ts = (TaskState *)cpu->opaque;
5540 if (flags & CLONE_SETTLS)
5541 cpu_set_tls (env, newtls);
5542 if (flags & CLONE_CHILD_CLEARTID)
5543 ts->child_tidptr = child_tidptr;
5544 } else {
5545 fork_end(0);
5546 }
5547 }
5548 return ret;
5549 }
5550
5551 /* warning : doesn't handle linux specific flags... */
5552 static int target_to_host_fcntl_cmd(int cmd)
5553 {
5554 switch(cmd) {
5555 case TARGET_F_DUPFD:
5556 case TARGET_F_GETFD:
5557 case TARGET_F_SETFD:
5558 case TARGET_F_GETFL:
5559 case TARGET_F_SETFL:
5560 return cmd;
5561 case TARGET_F_GETLK:
5562 return F_GETLK64;
5563 case TARGET_F_SETLK:
5564 return F_SETLK64;
5565 case TARGET_F_SETLKW:
5566 return F_SETLKW64;
5567 case TARGET_F_GETOWN:
5568 return F_GETOWN;
5569 case TARGET_F_SETOWN:
5570 return F_SETOWN;
5571 case TARGET_F_GETSIG:
5572 return F_GETSIG;
5573 case TARGET_F_SETSIG:
5574 return F_SETSIG;
5575 #if TARGET_ABI_BITS == 32
5576 case TARGET_F_GETLK64:
5577 return F_GETLK64;
5578 case TARGET_F_SETLK64:
5579 return F_SETLK64;
5580 case TARGET_F_SETLKW64:
5581 return F_SETLKW64;
5582 #endif
5583 case TARGET_F_SETLEASE:
5584 return F_SETLEASE;
5585 case TARGET_F_GETLEASE:
5586 return F_GETLEASE;
5587 #ifdef F_DUPFD_CLOEXEC
5588 case TARGET_F_DUPFD_CLOEXEC:
5589 return F_DUPFD_CLOEXEC;
5590 #endif
5591 case TARGET_F_NOTIFY:
5592 return F_NOTIFY;
5593 #ifdef F_GETOWN_EX
5594 case TARGET_F_GETOWN_EX:
5595 return F_GETOWN_EX;
5596 #endif
5597 #ifdef F_SETOWN_EX
5598 case TARGET_F_SETOWN_EX:
5599 return F_SETOWN_EX;
5600 #endif
5601 case TARGET_F_SETPIPE_SZ:
5602 return F_SETPIPE_SZ;
5603 case TARGET_F_GETPIPE_SZ:
5604 return F_GETPIPE_SZ;
5605 default:
5606 return -TARGET_EINVAL;
5607 }
5608 return -TARGET_EINVAL;
5609 }
5610
5611 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5612 static const bitmask_transtbl flock_tbl[] = {
5613 TRANSTBL_CONVERT(F_RDLCK),
5614 TRANSTBL_CONVERT(F_WRLCK),
5615 TRANSTBL_CONVERT(F_UNLCK),
5616 TRANSTBL_CONVERT(F_EXLCK),
5617 TRANSTBL_CONVERT(F_SHLCK),
5618 { 0, 0, 0, 0 }
5619 };
5620
5621 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5622 abi_ulong target_flock_addr)
5623 {
5624 struct target_flock *target_fl;
5625 short l_type;
5626
5627 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5628 return -TARGET_EFAULT;
5629 }
5630
5631 __get_user(l_type, &target_fl->l_type);
5632 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
5633 __get_user(fl->l_whence, &target_fl->l_whence);
5634 __get_user(fl->l_start, &target_fl->l_start);
5635 __get_user(fl->l_len, &target_fl->l_len);
5636 __get_user(fl->l_pid, &target_fl->l_pid);
5637 unlock_user_struct(target_fl, target_flock_addr, 0);
5638 return 0;
5639 }
5640
5641 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5642 const struct flock64 *fl)
5643 {
5644 struct target_flock *target_fl;
5645 short l_type;
5646
5647 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5648 return -TARGET_EFAULT;
5649 }
5650
5651 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
5652 __put_user(l_type, &target_fl->l_type);
5653 __put_user(fl->l_whence, &target_fl->l_whence);
5654 __put_user(fl->l_start, &target_fl->l_start);
5655 __put_user(fl->l_len, &target_fl->l_len);
5656 __put_user(fl->l_pid, &target_fl->l_pid);
5657 unlock_user_struct(target_fl, target_flock_addr, 1);
5658 return 0;
5659 }
5660
5661 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5662 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5663
5664 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5665 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
5666 abi_ulong target_flock_addr)
5667 {
5668 struct target_eabi_flock64 *target_fl;
5669 short l_type;
5670
5671 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5672 return -TARGET_EFAULT;
5673 }
5674
5675 __get_user(l_type, &target_fl->l_type);
5676 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
5677 __get_user(fl->l_whence, &target_fl->l_whence);
5678 __get_user(fl->l_start, &target_fl->l_start);
5679 __get_user(fl->l_len, &target_fl->l_len);
5680 __get_user(fl->l_pid, &target_fl->l_pid);
5681 unlock_user_struct(target_fl, target_flock_addr, 0);
5682 return 0;
5683 }
5684
5685 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
5686 const struct flock64 *fl)
5687 {
5688 struct target_eabi_flock64 *target_fl;
5689 short l_type;
5690
5691 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5692 return -TARGET_EFAULT;
5693 }
5694
5695 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
5696 __put_user(l_type, &target_fl->l_type);
5697 __put_user(fl->l_whence, &target_fl->l_whence);
5698 __put_user(fl->l_start, &target_fl->l_start);
5699 __put_user(fl->l_len, &target_fl->l_len);
5700 __put_user(fl->l_pid, &target_fl->l_pid);
5701 unlock_user_struct(target_fl, target_flock_addr, 1);
5702 return 0;
5703 }
5704 #endif
5705
5706 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5707 abi_ulong target_flock_addr)
5708 {
5709 struct target_flock64 *target_fl;
5710 short l_type;
5711
5712 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5713 return -TARGET_EFAULT;
5714 }
5715
5716 __get_user(l_type, &target_fl->l_type);
5717 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
5718 __get_user(fl->l_whence, &target_fl->l_whence);
5719 __get_user(fl->l_start, &target_fl->l_start);
5720 __get_user(fl->l_len, &target_fl->l_len);
5721 __get_user(fl->l_pid, &target_fl->l_pid);
5722 unlock_user_struct(target_fl, target_flock_addr, 0);
5723 return 0;
5724 }
5725
5726 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5727 const struct flock64 *fl)
5728 {
5729 struct target_flock64 *target_fl;
5730 short l_type;
5731
5732 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5733 return -TARGET_EFAULT;
5734 }
5735
5736 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
5737 __put_user(l_type, &target_fl->l_type);
5738 __put_user(fl->l_whence, &target_fl->l_whence);
5739 __put_user(fl->l_start, &target_fl->l_start);
5740 __put_user(fl->l_len, &target_fl->l_len);
5741 __put_user(fl->l_pid, &target_fl->l_pid);
5742 unlock_user_struct(target_fl, target_flock_addr, 1);
5743 return 0;
5744 }
5745
5746 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5747 {
5748 struct flock64 fl64;
5749 #ifdef F_GETOWN_EX
5750 struct f_owner_ex fox;
5751 struct target_f_owner_ex *target_fox;
5752 #endif
5753 abi_long ret;
5754 int host_cmd = target_to_host_fcntl_cmd(cmd);
5755
5756 if (host_cmd == -TARGET_EINVAL)
5757 return host_cmd;
5758
5759 switch(cmd) {
5760 case TARGET_F_GETLK:
5761 ret = copy_from_user_flock(&fl64, arg);
5762 if (ret) {
5763 return ret;
5764 }
5765 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5766 if (ret == 0) {
5767 ret = copy_to_user_flock(arg, &fl64);
5768 }
5769 break;
5770
5771 case TARGET_F_SETLK:
5772 case TARGET_F_SETLKW:
5773 ret = copy_from_user_flock(&fl64, arg);
5774 if (ret) {
5775 return ret;
5776 }
5777 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5778 break;
5779
5780 case TARGET_F_GETLK64:
5781 ret = copy_from_user_flock64(&fl64, arg);
5782 if (ret) {
5783 return ret;
5784 }
5785 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5786 if (ret == 0) {
5787 ret = copy_to_user_flock64(arg, &fl64);
5788 }
5789 break;
5790 case TARGET_F_SETLK64:
5791 case TARGET_F_SETLKW64:
5792 ret = copy_from_user_flock64(&fl64, arg);
5793 if (ret) {
5794 return ret;
5795 }
5796 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5797 break;
5798
5799 case TARGET_F_GETFL:
5800 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5801 if (ret >= 0) {
5802 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5803 }
5804 break;
5805
5806 case TARGET_F_SETFL:
5807 ret = get_errno(safe_fcntl(fd, host_cmd,
5808 target_to_host_bitmask(arg,
5809 fcntl_flags_tbl)));
5810 break;
5811
5812 #ifdef F_GETOWN_EX
5813 case TARGET_F_GETOWN_EX:
5814 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5815 if (ret >= 0) {
5816 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5817 return -TARGET_EFAULT;
5818 target_fox->type = tswap32(fox.type);
5819 target_fox->pid = tswap32(fox.pid);
5820 unlock_user_struct(target_fox, arg, 1);
5821 }
5822 break;
5823 #endif
5824
5825 #ifdef F_SETOWN_EX
5826 case TARGET_F_SETOWN_EX:
5827 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5828 return -TARGET_EFAULT;
5829 fox.type = tswap32(target_fox->type);
5830 fox.pid = tswap32(target_fox->pid);
5831 unlock_user_struct(target_fox, arg, 0);
5832 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5833 break;
5834 #endif
5835
5836 case TARGET_F_SETOWN:
5837 case TARGET_F_GETOWN:
5838 case TARGET_F_SETSIG:
5839 case TARGET_F_GETSIG:
5840 case TARGET_F_SETLEASE:
5841 case TARGET_F_GETLEASE:
5842 case TARGET_F_SETPIPE_SZ:
5843 case TARGET_F_GETPIPE_SZ:
5844 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5845 break;
5846
5847 default:
5848 ret = get_errno(safe_fcntl(fd, cmd, arg));
5849 break;
5850 }
5851 return ret;
5852 }
5853
5854 #ifdef USE_UID16
5855
5856 static inline int high2lowuid(int uid)
5857 {
5858 if (uid > 65535)
5859 return 65534;
5860 else
5861 return uid;
5862 }
5863
5864 static inline int high2lowgid(int gid)
5865 {
5866 if (gid > 65535)
5867 return 65534;
5868 else
5869 return gid;
5870 }
5871
5872 static inline int low2highuid(int uid)
5873 {
5874 if ((int16_t)uid == -1)
5875 return -1;
5876 else
5877 return uid;
5878 }
5879
5880 static inline int low2highgid(int gid)
5881 {
5882 if ((int16_t)gid == -1)
5883 return -1;
5884 else
5885 return gid;
5886 }
5887 static inline int tswapid(int id)
5888 {
5889 return tswap16(id);
5890 }
5891
5892 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5893
5894 #else /* !USE_UID16 */
5895 static inline int high2lowuid(int uid)
5896 {
5897 return uid;
5898 }
5899 static inline int high2lowgid(int gid)
5900 {
5901 return gid;
5902 }
5903 static inline int low2highuid(int uid)
5904 {
5905 return uid;
5906 }
5907 static inline int low2highgid(int gid)
5908 {
5909 return gid;
5910 }
5911 static inline int tswapid(int id)
5912 {
5913 return tswap32(id);
5914 }
5915
5916 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5917
5918 #endif /* USE_UID16 */
5919
5920 /* We must do direct syscalls for setting UID/GID, because we want to
5921 * implement the Linux system call semantics of "change only for this thread",
5922 * not the libc/POSIX semantics of "change for all threads in process".
5923 * (See http://ewontfix.com/17/ for more details.)
5924 * We use the 32-bit version of the syscalls if present; if it is not
5925 * then either the host architecture supports 32-bit UIDs natively with
5926 * the standard syscall, or the 16-bit UID is the best we can do.
5927 */
5928 #ifdef __NR_setuid32
5929 #define __NR_sys_setuid __NR_setuid32
5930 #else
5931 #define __NR_sys_setuid __NR_setuid
5932 #endif
5933 #ifdef __NR_setgid32
5934 #define __NR_sys_setgid __NR_setgid32
5935 #else
5936 #define __NR_sys_setgid __NR_setgid
5937 #endif
5938 #ifdef __NR_setresuid32
5939 #define __NR_sys_setresuid __NR_setresuid32
5940 #else
5941 #define __NR_sys_setresuid __NR_setresuid
5942 #endif
5943 #ifdef __NR_setresgid32
5944 #define __NR_sys_setresgid __NR_setresgid32
5945 #else
5946 #define __NR_sys_setresgid __NR_setresgid
5947 #endif
5948
5949 _syscall1(int, sys_setuid, uid_t, uid)
5950 _syscall1(int, sys_setgid, gid_t, gid)
5951 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
5952 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
5953
5954 void syscall_init(void)
5955 {
5956 IOCTLEntry *ie;
5957 const argtype *arg_type;
5958 int size;
5959 int i;
5960
5961 thunk_init(STRUCT_MAX);
5962
5963 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5964 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5965 #include "syscall_types.h"
5966 #undef STRUCT
5967 #undef STRUCT_SPECIAL
5968
5969 /* Build target_to_host_errno_table[] table from
5970 * host_to_target_errno_table[]. */
5971 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5972 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5973 }
5974
5975 /* we patch the ioctl size if necessary. We rely on the fact that
5976 no ioctl has all the bits at '1' in the size field */
5977 ie = ioctl_entries;
5978 while (ie->target_cmd != 0) {
5979 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5980 TARGET_IOC_SIZEMASK) {
5981 arg_type = ie->arg_type;
5982 if (arg_type[0] != TYPE_PTR) {
5983 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5984 ie->target_cmd);
5985 exit(1);
5986 }
5987 arg_type++;
5988 size = thunk_type_size(arg_type, 0);
5989 ie->target_cmd = (ie->target_cmd &
5990 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5991 (size << TARGET_IOC_SIZESHIFT);
5992 }
5993
5994 /* automatic consistency check if same arch */
5995 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5996 (defined(__x86_64__) && defined(TARGET_X86_64))
5997 if (unlikely(ie->target_cmd != ie->host_cmd)) {
5998 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5999 ie->name, ie->target_cmd, ie->host_cmd);
6000 }
6001 #endif
6002 ie++;
6003 }
6004 }
6005
6006 #if TARGET_ABI_BITS == 32
6007 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6008 {
6009 #ifdef TARGET_WORDS_BIGENDIAN
6010 return ((uint64_t)word0 << 32) | word1;
6011 #else
6012 return ((uint64_t)word1 << 32) | word0;
6013 #endif
6014 }
6015 #else /* TARGET_ABI_BITS == 32 */
6016 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6017 {
6018 return word0;
6019 }
6020 #endif /* TARGET_ABI_BITS != 32 */
6021
6022 #ifdef TARGET_NR_truncate64
6023 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6024 abi_long arg2,
6025 abi_long arg3,
6026 abi_long arg4)
6027 {
6028 if (regpairs_aligned(cpu_env)) {
6029 arg2 = arg3;
6030 arg3 = arg4;
6031 }
6032 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6033 }
6034 #endif
6035
6036 #ifdef TARGET_NR_ftruncate64
6037 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6038 abi_long arg2,
6039 abi_long arg3,
6040 abi_long arg4)
6041 {
6042 if (regpairs_aligned(cpu_env)) {
6043 arg2 = arg3;
6044 arg3 = arg4;
6045 }
6046 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6047 }
6048 #endif
6049
6050 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6051 abi_ulong target_addr)
6052 {
6053 struct target_timespec *target_ts;
6054
6055 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6056 return -TARGET_EFAULT;
6057 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6058 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6059 unlock_user_struct(target_ts, target_addr, 0);
6060 return 0;
6061 }
6062
6063 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6064 struct timespec *host_ts)
6065 {
6066 struct target_timespec *target_ts;
6067
6068 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6069 return -TARGET_EFAULT;
6070 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6071 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6072 unlock_user_struct(target_ts, target_addr, 1);
6073 return 0;
6074 }
6075
6076 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6077 abi_ulong target_addr)
6078 {
6079 struct target_itimerspec *target_itspec;
6080
6081 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6082 return -TARGET_EFAULT;
6083 }
6084
6085 host_itspec->it_interval.tv_sec =
6086 tswapal(target_itspec->it_interval.tv_sec);
6087 host_itspec->it_interval.tv_nsec =
6088 tswapal(target_itspec->it_interval.tv_nsec);
6089 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6090 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6091
6092 unlock_user_struct(target_itspec, target_addr, 1);
6093 return 0;
6094 }
6095
6096 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6097 struct itimerspec *host_its)
6098 {
6099 struct target_itimerspec *target_itspec;
6100
6101 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6102 return -TARGET_EFAULT;
6103 }
6104
6105 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6106 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6107
6108 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6109 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6110
6111 unlock_user_struct(target_itspec, target_addr, 0);
6112 return 0;
6113 }
6114
6115 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6116 abi_ulong target_addr)
6117 {
6118 struct target_sigevent *target_sevp;
6119
6120 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6121 return -TARGET_EFAULT;
6122 }
6123
6124 /* This union is awkward on 64 bit systems because it has a 32 bit
6125 * integer and a pointer in it; we follow the conversion approach
6126 * used for handling sigval types in signal.c so the guest should get
6127 * the correct value back even if we did a 64 bit byteswap and it's
6128 * using the 32 bit integer.
6129 */
6130 host_sevp->sigev_value.sival_ptr =
6131 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6132 host_sevp->sigev_signo =
6133 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6134 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6135 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6136
6137 unlock_user_struct(target_sevp, target_addr, 1);
6138 return 0;
6139 }
6140
6141 #if defined(TARGET_NR_mlockall)
6142 static inline int target_to_host_mlockall_arg(int arg)
6143 {
6144 int result = 0;
6145
6146 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6147 result |= MCL_CURRENT;
6148 }
6149 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6150 result |= MCL_FUTURE;
6151 }
6152 return result;
6153 }
6154 #endif
6155
6156 static inline abi_long host_to_target_stat64(void *cpu_env,
6157 abi_ulong target_addr,
6158 struct stat *host_st)
6159 {
6160 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6161 if (((CPUARMState *)cpu_env)->eabi) {
6162 struct target_eabi_stat64 *target_st;
6163
6164 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6165 return -TARGET_EFAULT;
6166 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6167 __put_user(host_st->st_dev, &target_st->st_dev);
6168 __put_user(host_st->st_ino, &target_st->st_ino);
6169 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6170 __put_user(host_st->st_ino, &target_st->__st_ino);
6171 #endif
6172 __put_user(host_st->st_mode, &target_st->st_mode);
6173 __put_user(host_st->st_nlink, &target_st->st_nlink);
6174 __put_user(host_st->st_uid, &target_st->st_uid);
6175 __put_user(host_st->st_gid, &target_st->st_gid);
6176 __put_user(host_st->st_rdev, &target_st->st_rdev);
6177 __put_user(host_st->st_size, &target_st->st_size);
6178 __put_user(host_st->st_blksize, &target_st->st_blksize);
6179 __put_user(host_st->st_blocks, &target_st->st_blocks);
6180 __put_user(host_st->st_atime, &target_st->target_st_atime);
6181 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6182 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6183 unlock_user_struct(target_st, target_addr, 1);
6184 } else
6185 #endif
6186 {
6187 #if defined(TARGET_HAS_STRUCT_STAT64)
6188 struct target_stat64 *target_st;
6189 #else
6190 struct target_stat *target_st;
6191 #endif
6192
6193 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6194 return -TARGET_EFAULT;
6195 memset(target_st, 0, sizeof(*target_st));
6196 __put_user(host_st->st_dev, &target_st->st_dev);
6197 __put_user(host_st->st_ino, &target_st->st_ino);
6198 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6199 __put_user(host_st->st_ino, &target_st->__st_ino);
6200 #endif
6201 __put_user(host_st->st_mode, &target_st->st_mode);
6202 __put_user(host_st->st_nlink, &target_st->st_nlink);
6203 __put_user(host_st->st_uid, &target_st->st_uid);
6204 __put_user(host_st->st_gid, &target_st->st_gid);
6205 __put_user(host_st->st_rdev, &target_st->st_rdev);
6206 /* XXX: better use of kernel struct */
6207 __put_user(host_st->st_size, &target_st->st_size);
6208 __put_user(host_st->st_blksize, &target_st->st_blksize);
6209 __put_user(host_st->st_blocks, &target_st->st_blocks);
6210 __put_user(host_st->st_atime, &target_st->target_st_atime);
6211 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6212 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6213 unlock_user_struct(target_st, target_addr, 1);
6214 }
6215
6216 return 0;
6217 }
6218
6219 /* ??? Using host futex calls even when target atomic operations
6220 are not really atomic probably breaks things. However implementing
6221 futexes locally would make futexes shared between multiple processes
6222 tricky. However they're probably useless because guest atomic
6223 operations won't work either. */
6224 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6225 target_ulong uaddr2, int val3)
6226 {
6227 struct timespec ts, *pts;
6228 int base_op;
6229
6230 /* ??? We assume FUTEX_* constants are the same on both host
6231 and target. */
6232 #ifdef FUTEX_CMD_MASK
6233 base_op = op & FUTEX_CMD_MASK;
6234 #else
6235 base_op = op;
6236 #endif
6237 switch (base_op) {
6238 case FUTEX_WAIT:
6239 case FUTEX_WAIT_BITSET:
6240 if (timeout) {
6241 pts = &ts;
6242 target_to_host_timespec(pts, timeout);
6243 } else {
6244 pts = NULL;
6245 }
6246 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6247 pts, NULL, val3));
6248 case FUTEX_WAKE:
6249 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6250 case FUTEX_FD:
6251 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6252 case FUTEX_REQUEUE:
6253 case FUTEX_CMP_REQUEUE:
6254 case FUTEX_WAKE_OP:
6255 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6256 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6257 But the prototype takes a `struct timespec *'; insert casts
6258 to satisfy the compiler. We do not need to tswap TIMEOUT
6259 since it's not compared to guest memory. */
6260 pts = (struct timespec *)(uintptr_t) timeout;
6261 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6262 g2h(uaddr2),
6263 (base_op == FUTEX_CMP_REQUEUE
6264 ? tswap32(val3)
6265 : val3)));
6266 default:
6267 return -TARGET_ENOSYS;
6268 }
6269 }
6270 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6271 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6272 abi_long handle, abi_long mount_id,
6273 abi_long flags)
6274 {
6275 struct file_handle *target_fh;
6276 struct file_handle *fh;
6277 int mid = 0;
6278 abi_long ret;
6279 char *name;
6280 unsigned int size, total_size;
6281
6282 if (get_user_s32(size, handle)) {
6283 return -TARGET_EFAULT;
6284 }
6285
6286 name = lock_user_string(pathname);
6287 if (!name) {
6288 return -TARGET_EFAULT;
6289 }
6290
6291 total_size = sizeof(struct file_handle) + size;
6292 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6293 if (!target_fh) {
6294 unlock_user(name, pathname, 0);
6295 return -TARGET_EFAULT;
6296 }
6297
6298 fh = g_malloc0(total_size);
6299 fh->handle_bytes = size;
6300
6301 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6302 unlock_user(name, pathname, 0);
6303
6304 /* man name_to_handle_at(2):
6305 * Other than the use of the handle_bytes field, the caller should treat
6306 * the file_handle structure as an opaque data type
6307 */
6308
6309 memcpy(target_fh, fh, total_size);
6310 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6311 target_fh->handle_type = tswap32(fh->handle_type);
6312 g_free(fh);
6313 unlock_user(target_fh, handle, total_size);
6314
6315 if (put_user_s32(mid, mount_id)) {
6316 return -TARGET_EFAULT;
6317 }
6318
6319 return ret;
6320
6321 }
6322 #endif
6323
6324 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6325 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6326 abi_long flags)
6327 {
6328 struct file_handle *target_fh;
6329 struct file_handle *fh;
6330 unsigned int size, total_size;
6331 abi_long ret;
6332
6333 if (get_user_s32(size, handle)) {
6334 return -TARGET_EFAULT;
6335 }
6336
6337 total_size = sizeof(struct file_handle) + size;
6338 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6339 if (!target_fh) {
6340 return -TARGET_EFAULT;
6341 }
6342
6343 fh = g_memdup(target_fh, total_size);
6344 fh->handle_bytes = size;
6345 fh->handle_type = tswap32(target_fh->handle_type);
6346
6347 ret = get_errno(open_by_handle_at(mount_fd, fh,
6348 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6349
6350 g_free(fh);
6351
6352 unlock_user(target_fh, handle, total_size);
6353
6354 return ret;
6355 }
6356 #endif
6357
6358 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6359
6360 /* signalfd siginfo conversion */
6361
6362 static void
6363 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
6364 const struct signalfd_siginfo *info)
6365 {
6366 int sig = host_to_target_signal(info->ssi_signo);
6367
6368 /* linux/signalfd.h defines a ssi_addr_lsb
6369 * not defined in sys/signalfd.h but used by some kernels
6370 */
6371
6372 #ifdef BUS_MCEERR_AO
6373 if (tinfo->ssi_signo == SIGBUS &&
6374 (tinfo->ssi_code == BUS_MCEERR_AR ||
6375 tinfo->ssi_code == BUS_MCEERR_AO)) {
6376 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
6377 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
6378 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
6379 }
6380 #endif
6381
6382 tinfo->ssi_signo = tswap32(sig);
6383 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
6384 tinfo->ssi_code = tswap32(info->ssi_code);
6385 tinfo->ssi_pid = tswap32(info->ssi_pid);
6386 tinfo->ssi_uid = tswap32(info->ssi_uid);
6387 tinfo->ssi_fd = tswap32(info->ssi_fd);
6388 tinfo->ssi_tid = tswap32(info->ssi_tid);
6389 tinfo->ssi_band = tswap32(info->ssi_band);
6390 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
6391 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
6392 tinfo->ssi_status = tswap32(info->ssi_status);
6393 tinfo->ssi_int = tswap32(info->ssi_int);
6394 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
6395 tinfo->ssi_utime = tswap64(info->ssi_utime);
6396 tinfo->ssi_stime = tswap64(info->ssi_stime);
6397 tinfo->ssi_addr = tswap64(info->ssi_addr);
6398 }
6399
6400 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
6401 {
6402 int i;
6403
6404 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
6405 host_to_target_signalfd_siginfo(buf + i, buf + i);
6406 }
6407
6408 return len;
6409 }
6410
6411 static TargetFdTrans target_signalfd_trans = {
6412 .host_to_target_data = host_to_target_data_signalfd,
6413 };
6414
6415 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6416 {
6417 int host_flags;
6418 target_sigset_t *target_mask;
6419 sigset_t host_mask;
6420 abi_long ret;
6421
6422 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6423 return -TARGET_EINVAL;
6424 }
6425 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6426 return -TARGET_EFAULT;
6427 }
6428
6429 target_to_host_sigset(&host_mask, target_mask);
6430
6431 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6432
6433 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6434 if (ret >= 0) {
6435 fd_trans_register(ret, &target_signalfd_trans);
6436 }
6437
6438 unlock_user_struct(target_mask, mask, 0);
6439
6440 return ret;
6441 }
6442 #endif
6443
6444 /* Map host to target signal numbers for the wait family of syscalls.
6445 Assume all other status bits are the same. */
6446 int host_to_target_waitstatus(int status)
6447 {
6448 if (WIFSIGNALED(status)) {
6449 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6450 }
6451 if (WIFSTOPPED(status)) {
6452 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6453 | (status & 0xff);
6454 }
6455 return status;
6456 }
6457
6458 static int open_self_cmdline(void *cpu_env, int fd)
6459 {
6460 int fd_orig = -1;
6461 bool word_skipped = false;
6462
6463 fd_orig = open("/proc/self/cmdline", O_RDONLY);
6464 if (fd_orig < 0) {
6465 return fd_orig;
6466 }
6467
6468 while (true) {
6469 ssize_t nb_read;
6470 char buf[128];
6471 char *cp_buf = buf;
6472
6473 nb_read = read(fd_orig, buf, sizeof(buf));
6474 if (nb_read < 0) {
6475 int e = errno;
6476 fd_orig = close(fd_orig);
6477 errno = e;
6478 return -1;
6479 } else if (nb_read == 0) {
6480 break;
6481 }
6482
6483 if (!word_skipped) {
6484 /* Skip the first string, which is the path to qemu-*-static
6485 instead of the actual command. */
6486 cp_buf = memchr(buf, 0, sizeof(buf));
6487 if (cp_buf) {
6488 /* Null byte found, skip one string */
6489 cp_buf++;
6490 nb_read -= cp_buf - buf;
6491 word_skipped = true;
6492 }
6493 }
6494
6495 if (word_skipped) {
6496 if (write(fd, cp_buf, nb_read) != nb_read) {
6497 int e = errno;
6498 close(fd_orig);
6499 errno = e;
6500 return -1;
6501 }
6502 }
6503 }
6504
6505 return close(fd_orig);
6506 }
6507
6508 static int open_self_maps(void *cpu_env, int fd)
6509 {
6510 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6511 TaskState *ts = cpu->opaque;
6512 FILE *fp;
6513 char *line = NULL;
6514 size_t len = 0;
6515 ssize_t read;
6516
6517 fp = fopen("/proc/self/maps", "r");
6518 if (fp == NULL) {
6519 return -1;
6520 }
6521
6522 while ((read = getline(&line, &len, fp)) != -1) {
6523 int fields, dev_maj, dev_min, inode;
6524 uint64_t min, max, offset;
6525 char flag_r, flag_w, flag_x, flag_p;
6526 char path[512] = "";
6527 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6528 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6529 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6530
6531 if ((fields < 10) || (fields > 11)) {
6532 continue;
6533 }
6534 if (h2g_valid(min)) {
6535 int flags = page_get_flags(h2g(min));
6536 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
6537 if (page_check_range(h2g(min), max - min, flags) == -1) {
6538 continue;
6539 }
6540 if (h2g(min) == ts->info->stack_limit) {
6541 pstrcpy(path, sizeof(path), " [stack]");
6542 }
6543 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
6544 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6545 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6546 flag_x, flag_p, offset, dev_maj, dev_min, inode,
6547 path[0] ? " " : "", path);
6548 }
6549 }
6550
6551 free(line);
6552 fclose(fp);
6553
6554 return 0;
6555 }
6556
6557 static int open_self_stat(void *cpu_env, int fd)
6558 {
6559 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6560 TaskState *ts = cpu->opaque;
6561 abi_ulong start_stack = ts->info->start_stack;
6562 int i;
6563
6564 for (i = 0; i < 44; i++) {
6565 char buf[128];
6566 int len;
6567 uint64_t val = 0;
6568
6569 if (i == 0) {
6570 /* pid */
6571 val = getpid();
6572 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6573 } else if (i == 1) {
6574 /* app name */
6575 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6576 } else if (i == 27) {
6577 /* stack bottom */
6578 val = start_stack;
6579 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6580 } else {
6581 /* for the rest, there is MasterCard */
6582 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6583 }
6584
6585 len = strlen(buf);
6586 if (write(fd, buf, len) != len) {
6587 return -1;
6588 }
6589 }
6590
6591 return 0;
6592 }
6593
6594 static int open_self_auxv(void *cpu_env, int fd)
6595 {
6596 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6597 TaskState *ts = cpu->opaque;
6598 abi_ulong auxv = ts->info->saved_auxv;
6599 abi_ulong len = ts->info->auxv_len;
6600 char *ptr;
6601
6602 /*
6603 * Auxiliary vector is stored in target process stack.
6604 * read in whole auxv vector and copy it to file
6605 */
6606 ptr = lock_user(VERIFY_READ, auxv, len, 0);
6607 if (ptr != NULL) {
6608 while (len > 0) {
6609 ssize_t r;
6610 r = write(fd, ptr, len);
6611 if (r <= 0) {
6612 break;
6613 }
6614 len -= r;
6615 ptr += r;
6616 }
6617 lseek(fd, 0, SEEK_SET);
6618 unlock_user(ptr, auxv, len);
6619 }
6620
6621 return 0;
6622 }
6623
6624 static int is_proc_myself(const char *filename, const char *entry)
6625 {
6626 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6627 filename += strlen("/proc/");
6628 if (!strncmp(filename, "self/", strlen("self/"))) {
6629 filename += strlen("self/");
6630 } else if (*filename >= '1' && *filename <= '9') {
6631 char myself[80];
6632 snprintf(myself, sizeof(myself), "%d/", getpid());
6633 if (!strncmp(filename, myself, strlen(myself))) {
6634 filename += strlen(myself);
6635 } else {
6636 return 0;
6637 }
6638 } else {
6639 return 0;
6640 }
6641 if (!strcmp(filename, entry)) {
6642 return 1;
6643 }
6644 }
6645 return 0;
6646 }
6647
6648 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6649 static int is_proc(const char *filename, const char *entry)
6650 {
6651 return strcmp(filename, entry) == 0;
6652 }
6653
6654 static int open_net_route(void *cpu_env, int fd)
6655 {
6656 FILE *fp;
6657 char *line = NULL;
6658 size_t len = 0;
6659 ssize_t read;
6660
6661 fp = fopen("/proc/net/route", "r");
6662 if (fp == NULL) {
6663 return -1;
6664 }
6665
6666 /* read header */
6667
6668 read = getline(&line, &len, fp);
6669 dprintf(fd, "%s", line);
6670
6671 /* read routes */
6672
6673 while ((read = getline(&line, &len, fp)) != -1) {
6674 char iface[16];
6675 uint32_t dest, gw, mask;
6676 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6677 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6678 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6679 &mask, &mtu, &window, &irtt);
6680 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6681 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6682 metric, tswap32(mask), mtu, window, irtt);
6683 }
6684
6685 free(line);
6686 fclose(fp);
6687
6688 return 0;
6689 }
6690 #endif
6691
6692 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6693 {
6694 struct fake_open {
6695 const char *filename;
6696 int (*fill)(void *cpu_env, int fd);
6697 int (*cmp)(const char *s1, const char *s2);
6698 };
6699 const struct fake_open *fake_open;
6700 static const struct fake_open fakes[] = {
6701 { "maps", open_self_maps, is_proc_myself },
6702 { "stat", open_self_stat, is_proc_myself },
6703 { "auxv", open_self_auxv, is_proc_myself },
6704 { "cmdline", open_self_cmdline, is_proc_myself },
6705 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6706 { "/proc/net/route", open_net_route, is_proc },
6707 #endif
6708 { NULL, NULL, NULL }
6709 };
6710
6711 if (is_proc_myself(pathname, "exe")) {
6712 int execfd = qemu_getauxval(AT_EXECFD);
6713 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6714 }
6715
6716 for (fake_open = fakes; fake_open->filename; fake_open++) {
6717 if (fake_open->cmp(pathname, fake_open->filename)) {
6718 break;
6719 }
6720 }
6721
6722 if (fake_open->filename) {
6723 const char *tmpdir;
6724 char filename[PATH_MAX];
6725 int fd, r;
6726
6727 /* create temporary file to map stat to */
6728 tmpdir = getenv("TMPDIR");
6729 if (!tmpdir)
6730 tmpdir = "/tmp";
6731 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6732 fd = mkstemp(filename);
6733 if (fd < 0) {
6734 return fd;
6735 }
6736 unlink(filename);
6737
6738 if ((r = fake_open->fill(cpu_env, fd))) {
6739 int e = errno;
6740 close(fd);
6741 errno = e;
6742 return r;
6743 }
6744 lseek(fd, 0, SEEK_SET);
6745
6746 return fd;
6747 }
6748
6749 return safe_openat(dirfd, path(pathname), flags, mode);
6750 }
6751
6752 #define TIMER_MAGIC 0x0caf0000
6753 #define TIMER_MAGIC_MASK 0xffff0000
6754
6755 /* Convert QEMU provided timer ID back to internal 16bit index format */
6756 static target_timer_t get_timer_id(abi_long arg)
6757 {
6758 target_timer_t timerid = arg;
6759
6760 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6761 return -TARGET_EINVAL;
6762 }
6763
6764 timerid &= 0xffff;
6765
6766 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6767 return -TARGET_EINVAL;
6768 }
6769
6770 return timerid;
6771 }
6772
6773 /* do_syscall() should always have a single exit point at the end so
6774 that actions, such as logging of syscall results, can be performed.
6775 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
6776 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
6777 abi_long arg2, abi_long arg3, abi_long arg4,
6778 abi_long arg5, abi_long arg6, abi_long arg7,
6779 abi_long arg8)
6780 {
6781 CPUState *cpu = ENV_GET_CPU(cpu_env);
6782 abi_long ret;
6783 struct stat st;
6784 struct statfs stfs;
6785 void *p;
6786
6787 #if defined(DEBUG_ERESTARTSYS)
6788 /* Debug-only code for exercising the syscall-restart code paths
6789 * in the per-architecture cpu main loops: restart every syscall
6790 * the guest makes once before letting it through.
6791 */
6792 {
6793 static int flag;
6794
6795 flag = !flag;
6796 if (flag) {
6797 return -TARGET_ERESTARTSYS;
6798 }
6799 }
6800 #endif
6801
6802 #ifdef DEBUG
6803 gemu_log("syscall %d", num);
6804 #endif
6805 if(do_strace)
6806 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
6807
6808 switch(num) {
6809 case TARGET_NR_exit:
6810 /* In old applications this may be used to implement _exit(2).
6811 However in threaded applictions it is used for thread termination,
6812 and _exit_group is used for application termination.
6813 Do thread termination if we have more then one thread. */
6814
6815 if (block_signals()) {
6816 ret = -TARGET_ERESTARTSYS;
6817 break;
6818 }
6819
6820 if (CPU_NEXT(first_cpu)) {
6821 TaskState *ts;
6822
6823 cpu_list_lock();
6824 /* Remove the CPU from the list. */
6825 QTAILQ_REMOVE(&cpus, cpu, node);
6826 cpu_list_unlock();
6827 ts = cpu->opaque;
6828 if (ts->child_tidptr) {
6829 put_user_u32(0, ts->child_tidptr);
6830 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6831 NULL, NULL, 0);
6832 }
6833 thread_cpu = NULL;
6834 object_unref(OBJECT(cpu));
6835 g_free(ts);
6836 rcu_unregister_thread();
6837 pthread_exit(NULL);
6838 }
6839 #ifdef TARGET_GPROF
6840 _mcleanup();
6841 #endif
6842 gdb_exit(cpu_env, arg1);
6843 _exit(arg1);
6844 ret = 0; /* avoid warning */
6845 break;
6846 case TARGET_NR_read:
6847 if (arg3 == 0)
6848 ret = 0;
6849 else {
6850 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6851 goto efault;
6852 ret = get_errno(safe_read(arg1, p, arg3));
6853 if (ret >= 0 &&
6854 fd_trans_host_to_target_data(arg1)) {
6855 ret = fd_trans_host_to_target_data(arg1)(p, ret);
6856 }
6857 unlock_user(p, arg2, ret);
6858 }
6859 break;
6860 case TARGET_NR_write:
6861 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6862 goto efault;
6863 ret = get_errno(safe_write(arg1, p, arg3));
6864 unlock_user(p, arg2, 0);
6865 break;
6866 #ifdef TARGET_NR_open
6867 case TARGET_NR_open:
6868 if (!(p = lock_user_string(arg1)))
6869 goto efault;
6870 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6871 target_to_host_bitmask(arg2, fcntl_flags_tbl),
6872 arg3));
6873 fd_trans_unregister(ret);
6874 unlock_user(p, arg1, 0);
6875 break;
6876 #endif
6877 case TARGET_NR_openat:
6878 if (!(p = lock_user_string(arg2)))
6879 goto efault;
6880 ret = get_errno(do_openat(cpu_env, arg1, p,
6881 target_to_host_bitmask(arg3, fcntl_flags_tbl),
6882 arg4));
6883 fd_trans_unregister(ret);
6884 unlock_user(p, arg2, 0);
6885 break;
6886 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6887 case TARGET_NR_name_to_handle_at:
6888 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
6889 break;
6890 #endif
6891 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6892 case TARGET_NR_open_by_handle_at:
6893 ret = do_open_by_handle_at(arg1, arg2, arg3);
6894 fd_trans_unregister(ret);
6895 break;
6896 #endif
6897 case TARGET_NR_close:
6898 fd_trans_unregister(arg1);
6899 ret = get_errno(close(arg1));
6900 break;
6901 case TARGET_NR_brk:
6902 ret = do_brk(arg1);
6903 break;
6904 #ifdef TARGET_NR_fork
6905 case TARGET_NR_fork:
6906 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
6907 break;
6908 #endif
6909 #ifdef TARGET_NR_waitpid
6910 case TARGET_NR_waitpid:
6911 {
6912 int status;
6913 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
6914 if (!is_error(ret) && arg2 && ret
6915 && put_user_s32(host_to_target_waitstatus(status), arg2))
6916 goto efault;
6917 }
6918 break;
6919 #endif
6920 #ifdef TARGET_NR_waitid
6921 case TARGET_NR_waitid:
6922 {
6923 siginfo_t info;
6924 info.si_pid = 0;
6925 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
6926 if (!is_error(ret) && arg3 && info.si_pid != 0) {
6927 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
6928 goto efault;
6929 host_to_target_siginfo(p, &info);
6930 unlock_user(p, arg3, sizeof(target_siginfo_t));
6931 }
6932 }
6933 break;
6934 #endif
6935 #ifdef TARGET_NR_creat /* not on alpha */
6936 case TARGET_NR_creat:
6937 if (!(p = lock_user_string(arg1)))
6938 goto efault;
6939 ret = get_errno(creat(p, arg2));
6940 fd_trans_unregister(ret);
6941 unlock_user(p, arg1, 0);
6942 break;
6943 #endif
6944 #ifdef TARGET_NR_link
6945 case TARGET_NR_link:
6946 {
6947 void * p2;
6948 p = lock_user_string(arg1);
6949 p2 = lock_user_string(arg2);
6950 if (!p || !p2)
6951 ret = -TARGET_EFAULT;
6952 else
6953 ret = get_errno(link(p, p2));
6954 unlock_user(p2, arg2, 0);
6955 unlock_user(p, arg1, 0);
6956 }
6957 break;
6958 #endif
6959 #if defined(TARGET_NR_linkat)
6960 case TARGET_NR_linkat:
6961 {
6962 void * p2 = NULL;
6963 if (!arg2 || !arg4)
6964 goto efault;
6965 p = lock_user_string(arg2);
6966 p2 = lock_user_string(arg4);
6967 if (!p || !p2)
6968 ret = -TARGET_EFAULT;
6969 else
6970 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6971 unlock_user(p, arg2, 0);
6972 unlock_user(p2, arg4, 0);
6973 }
6974 break;
6975 #endif
6976 #ifdef TARGET_NR_unlink
6977 case TARGET_NR_unlink:
6978 if (!(p = lock_user_string(arg1)))
6979 goto efault;
6980 ret = get_errno(unlink(p));
6981 unlock_user(p, arg1, 0);
6982 break;
6983 #endif
6984 #if defined(TARGET_NR_unlinkat)
6985 case TARGET_NR_unlinkat:
6986 if (!(p = lock_user_string(arg2)))
6987 goto efault;
6988 ret = get_errno(unlinkat(arg1, p, arg3));
6989 unlock_user(p, arg2, 0);
6990 break;
6991 #endif
6992 case TARGET_NR_execve:
6993 {
6994 char **argp, **envp;
6995 int argc, envc;
6996 abi_ulong gp;
6997 abi_ulong guest_argp;
6998 abi_ulong guest_envp;
6999 abi_ulong addr;
7000 char **q;
7001 int total_size = 0;
7002
7003 argc = 0;
7004 guest_argp = arg2;
7005 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7006 if (get_user_ual(addr, gp))
7007 goto efault;
7008 if (!addr)
7009 break;
7010 argc++;
7011 }
7012 envc = 0;
7013 guest_envp = arg3;
7014 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7015 if (get_user_ual(addr, gp))
7016 goto efault;
7017 if (!addr)
7018 break;
7019 envc++;
7020 }
7021
7022 argp = alloca((argc + 1) * sizeof(void *));
7023 envp = alloca((envc + 1) * sizeof(void *));
7024
7025 for (gp = guest_argp, q = argp; gp;
7026 gp += sizeof(abi_ulong), q++) {
7027 if (get_user_ual(addr, gp))
7028 goto execve_efault;
7029 if (!addr)
7030 break;
7031 if (!(*q = lock_user_string(addr)))
7032 goto execve_efault;
7033 total_size += strlen(*q) + 1;
7034 }
7035 *q = NULL;
7036
7037 for (gp = guest_envp, q = envp; gp;
7038 gp += sizeof(abi_ulong), q++) {
7039 if (get_user_ual(addr, gp))
7040 goto execve_efault;
7041 if (!addr)
7042 break;
7043 if (!(*q = lock_user_string(addr)))
7044 goto execve_efault;
7045 total_size += strlen(*q) + 1;
7046 }
7047 *q = NULL;
7048
7049 if (!(p = lock_user_string(arg1)))
7050 goto execve_efault;
7051 /* Although execve() is not an interruptible syscall it is
7052 * a special case where we must use the safe_syscall wrapper:
7053 * if we allow a signal to happen before we make the host
7054 * syscall then we will 'lose' it, because at the point of
7055 * execve the process leaves QEMU's control. So we use the
7056 * safe syscall wrapper to ensure that we either take the
7057 * signal as a guest signal, or else it does not happen
7058 * before the execve completes and makes it the other
7059 * program's problem.
7060 */
7061 ret = get_errno(safe_execve(p, argp, envp));
7062 unlock_user(p, arg1, 0);
7063
7064 goto execve_end;
7065
7066 execve_efault:
7067 ret = -TARGET_EFAULT;
7068
7069 execve_end:
7070 for (gp = guest_argp, q = argp; *q;
7071 gp += sizeof(abi_ulong), q++) {
7072 if (get_user_ual(addr, gp)
7073 || !addr)
7074 break;
7075 unlock_user(*q, addr, 0);
7076 }
7077 for (gp = guest_envp, q = envp; *q;
7078 gp += sizeof(abi_ulong), q++) {
7079 if (get_user_ual(addr, gp)
7080 || !addr)
7081 break;
7082 unlock_user(*q, addr, 0);
7083 }
7084 }
7085 break;
7086 case TARGET_NR_chdir:
7087 if (!(p = lock_user_string(arg1)))
7088 goto efault;
7089 ret = get_errno(chdir(p));
7090 unlock_user(p, arg1, 0);
7091 break;
7092 #ifdef TARGET_NR_time
7093 case TARGET_NR_time:
7094 {
7095 time_t host_time;
7096 ret = get_errno(time(&host_time));
7097 if (!is_error(ret)
7098 && arg1
7099 && put_user_sal(host_time, arg1))
7100 goto efault;
7101 }
7102 break;
7103 #endif
7104 #ifdef TARGET_NR_mknod
7105 case TARGET_NR_mknod:
7106 if (!(p = lock_user_string(arg1)))
7107 goto efault;
7108 ret = get_errno(mknod(p, arg2, arg3));
7109 unlock_user(p, arg1, 0);
7110 break;
7111 #endif
7112 #if defined(TARGET_NR_mknodat)
7113 case TARGET_NR_mknodat:
7114 if (!(p = lock_user_string(arg2)))
7115 goto efault;
7116 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7117 unlock_user(p, arg2, 0);
7118 break;
7119 #endif
7120 #ifdef TARGET_NR_chmod
7121 case TARGET_NR_chmod:
7122 if (!(p = lock_user_string(arg1)))
7123 goto efault;
7124 ret = get_errno(chmod(p, arg2));
7125 unlock_user(p, arg1, 0);
7126 break;
7127 #endif
7128 #ifdef TARGET_NR_break
7129 case TARGET_NR_break:
7130 goto unimplemented;
7131 #endif
7132 #ifdef TARGET_NR_oldstat
7133 case TARGET_NR_oldstat:
7134 goto unimplemented;
7135 #endif
7136 case TARGET_NR_lseek:
7137 ret = get_errno(lseek(arg1, arg2, arg3));
7138 break;
7139 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7140 /* Alpha specific */
7141 case TARGET_NR_getxpid:
7142 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7143 ret = get_errno(getpid());
7144 break;
7145 #endif
7146 #ifdef TARGET_NR_getpid
7147 case TARGET_NR_getpid:
7148 ret = get_errno(getpid());
7149 break;
7150 #endif
7151 case TARGET_NR_mount:
7152 {
7153 /* need to look at the data field */
7154 void *p2, *p3;
7155
7156 if (arg1) {
7157 p = lock_user_string(arg1);
7158 if (!p) {
7159 goto efault;
7160 }
7161 } else {
7162 p = NULL;
7163 }
7164
7165 p2 = lock_user_string(arg2);
7166 if (!p2) {
7167 if (arg1) {
7168 unlock_user(p, arg1, 0);
7169 }
7170 goto efault;
7171 }
7172
7173 if (arg3) {
7174 p3 = lock_user_string(arg3);
7175 if (!p3) {
7176 if (arg1) {
7177 unlock_user(p, arg1, 0);
7178 }
7179 unlock_user(p2, arg2, 0);
7180 goto efault;
7181 }
7182 } else {
7183 p3 = NULL;
7184 }
7185
7186 /* FIXME - arg5 should be locked, but it isn't clear how to
7187 * do that since it's not guaranteed to be a NULL-terminated
7188 * string.
7189 */
7190 if (!arg5) {
7191 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7192 } else {
7193 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7194 }
7195 ret = get_errno(ret);
7196
7197 if (arg1) {
7198 unlock_user(p, arg1, 0);
7199 }
7200 unlock_user(p2, arg2, 0);
7201 if (arg3) {
7202 unlock_user(p3, arg3, 0);
7203 }
7204 }
7205 break;
7206 #ifdef TARGET_NR_umount
7207 case TARGET_NR_umount:
7208 if (!(p = lock_user_string(arg1)))
7209 goto efault;
7210 ret = get_errno(umount(p));
7211 unlock_user(p, arg1, 0);
7212 break;
7213 #endif
7214 #ifdef TARGET_NR_stime /* not on alpha */
7215 case TARGET_NR_stime:
7216 {
7217 time_t host_time;
7218 if (get_user_sal(host_time, arg1))
7219 goto efault;
7220 ret = get_errno(stime(&host_time));
7221 }
7222 break;
7223 #endif
7224 case TARGET_NR_ptrace:
7225 goto unimplemented;
7226 #ifdef TARGET_NR_alarm /* not on alpha */
7227 case TARGET_NR_alarm:
7228 ret = alarm(arg1);
7229 break;
7230 #endif
7231 #ifdef TARGET_NR_oldfstat
7232 case TARGET_NR_oldfstat:
7233 goto unimplemented;
7234 #endif
7235 #ifdef TARGET_NR_pause /* not on alpha */
7236 case TARGET_NR_pause:
7237 if (!block_signals()) {
7238 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7239 }
7240 ret = -TARGET_EINTR;
7241 break;
7242 #endif
7243 #ifdef TARGET_NR_utime
7244 case TARGET_NR_utime:
7245 {
7246 struct utimbuf tbuf, *host_tbuf;
7247 struct target_utimbuf *target_tbuf;
7248 if (arg2) {
7249 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7250 goto efault;
7251 tbuf.actime = tswapal(target_tbuf->actime);
7252 tbuf.modtime = tswapal(target_tbuf->modtime);
7253 unlock_user_struct(target_tbuf, arg2, 0);
7254 host_tbuf = &tbuf;
7255 } else {
7256 host_tbuf = NULL;
7257 }
7258 if (!(p = lock_user_string(arg1)))
7259 goto efault;
7260 ret = get_errno(utime(p, host_tbuf));
7261 unlock_user(p, arg1, 0);
7262 }
7263 break;
7264 #endif
7265 #ifdef TARGET_NR_utimes
7266 case TARGET_NR_utimes:
7267 {
7268 struct timeval *tvp, tv[2];
7269 if (arg2) {
7270 if (copy_from_user_timeval(&tv[0], arg2)
7271 || copy_from_user_timeval(&tv[1],
7272 arg2 + sizeof(struct target_timeval)))
7273 goto efault;
7274 tvp = tv;
7275 } else {
7276 tvp = NULL;
7277 }
7278 if (!(p = lock_user_string(arg1)))
7279 goto efault;
7280 ret = get_errno(utimes(p, tvp));
7281 unlock_user(p, arg1, 0);
7282 }
7283 break;
7284 #endif
7285 #if defined(TARGET_NR_futimesat)
7286 case TARGET_NR_futimesat:
7287 {
7288 struct timeval *tvp, tv[2];
7289 if (arg3) {
7290 if (copy_from_user_timeval(&tv[0], arg3)
7291 || copy_from_user_timeval(&tv[1],
7292 arg3 + sizeof(struct target_timeval)))
7293 goto efault;
7294 tvp = tv;
7295 } else {
7296 tvp = NULL;
7297 }
7298 if (!(p = lock_user_string(arg2)))
7299 goto efault;
7300 ret = get_errno(futimesat(arg1, path(p), tvp));
7301 unlock_user(p, arg2, 0);
7302 }
7303 break;
7304 #endif
7305 #ifdef TARGET_NR_stty
7306 case TARGET_NR_stty:
7307 goto unimplemented;
7308 #endif
7309 #ifdef TARGET_NR_gtty
7310 case TARGET_NR_gtty:
7311 goto unimplemented;
7312 #endif
7313 #ifdef TARGET_NR_access
7314 case TARGET_NR_access:
7315 if (!(p = lock_user_string(arg1)))
7316 goto efault;
7317 ret = get_errno(access(path(p), arg2));
7318 unlock_user(p, arg1, 0);
7319 break;
7320 #endif
7321 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7322 case TARGET_NR_faccessat:
7323 if (!(p = lock_user_string(arg2)))
7324 goto efault;
7325 ret = get_errno(faccessat(arg1, p, arg3, 0));
7326 unlock_user(p, arg2, 0);
7327 break;
7328 #endif
7329 #ifdef TARGET_NR_nice /* not on alpha */
7330 case TARGET_NR_nice:
7331 ret = get_errno(nice(arg1));
7332 break;
7333 #endif
7334 #ifdef TARGET_NR_ftime
7335 case TARGET_NR_ftime:
7336 goto unimplemented;
7337 #endif
7338 case TARGET_NR_sync:
7339 sync();
7340 ret = 0;
7341 break;
7342 case TARGET_NR_kill:
7343 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7344 break;
7345 #ifdef TARGET_NR_rename
7346 case TARGET_NR_rename:
7347 {
7348 void *p2;
7349 p = lock_user_string(arg1);
7350 p2 = lock_user_string(arg2);
7351 if (!p || !p2)
7352 ret = -TARGET_EFAULT;
7353 else
7354 ret = get_errno(rename(p, p2));
7355 unlock_user(p2, arg2, 0);
7356 unlock_user(p, arg1, 0);
7357 }
7358 break;
7359 #endif
7360 #if defined(TARGET_NR_renameat)
7361 case TARGET_NR_renameat:
7362 {
7363 void *p2;
7364 p = lock_user_string(arg2);
7365 p2 = lock_user_string(arg4);
7366 if (!p || !p2)
7367 ret = -TARGET_EFAULT;
7368 else
7369 ret = get_errno(renameat(arg1, p, arg3, p2));
7370 unlock_user(p2, arg4, 0);
7371 unlock_user(p, arg2, 0);
7372 }
7373 break;
7374 #endif
7375 #ifdef TARGET_NR_mkdir
7376 case TARGET_NR_mkdir:
7377 if (!(p = lock_user_string(arg1)))
7378 goto efault;
7379 ret = get_errno(mkdir(p, arg2));
7380 unlock_user(p, arg1, 0);
7381 break;
7382 #endif
7383 #if defined(TARGET_NR_mkdirat)
7384 case TARGET_NR_mkdirat:
7385 if (!(p = lock_user_string(arg2)))
7386 goto efault;
7387 ret = get_errno(mkdirat(arg1, p, arg3));
7388 unlock_user(p, arg2, 0);
7389 break;
7390 #endif
7391 #ifdef TARGET_NR_rmdir
7392 case TARGET_NR_rmdir:
7393 if (!(p = lock_user_string(arg1)))
7394 goto efault;
7395 ret = get_errno(rmdir(p));
7396 unlock_user(p, arg1, 0);
7397 break;
7398 #endif
7399 case TARGET_NR_dup:
7400 ret = get_errno(dup(arg1));
7401 if (ret >= 0) {
7402 fd_trans_dup(arg1, ret);
7403 }
7404 break;
7405 #ifdef TARGET_NR_pipe
7406 case TARGET_NR_pipe:
7407 ret = do_pipe(cpu_env, arg1, 0, 0);
7408 break;
7409 #endif
7410 #ifdef TARGET_NR_pipe2
7411 case TARGET_NR_pipe2:
7412 ret = do_pipe(cpu_env, arg1,
7413 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7414 break;
7415 #endif
7416 case TARGET_NR_times:
7417 {
7418 struct target_tms *tmsp;
7419 struct tms tms;
7420 ret = get_errno(times(&tms));
7421 if (arg1) {
7422 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7423 if (!tmsp)
7424 goto efault;
7425 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7426 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7427 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7428 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7429 }
7430 if (!is_error(ret))
7431 ret = host_to_target_clock_t(ret);
7432 }
7433 break;
7434 #ifdef TARGET_NR_prof
7435 case TARGET_NR_prof:
7436 goto unimplemented;
7437 #endif
7438 #ifdef TARGET_NR_signal
7439 case TARGET_NR_signal:
7440 goto unimplemented;
7441 #endif
7442 case TARGET_NR_acct:
7443 if (arg1 == 0) {
7444 ret = get_errno(acct(NULL));
7445 } else {
7446 if (!(p = lock_user_string(arg1)))
7447 goto efault;
7448 ret = get_errno(acct(path(p)));
7449 unlock_user(p, arg1, 0);
7450 }
7451 break;
7452 #ifdef TARGET_NR_umount2
7453 case TARGET_NR_umount2:
7454 if (!(p = lock_user_string(arg1)))
7455 goto efault;
7456 ret = get_errno(umount2(p, arg2));
7457 unlock_user(p, arg1, 0);
7458 break;
7459 #endif
7460 #ifdef TARGET_NR_lock
7461 case TARGET_NR_lock:
7462 goto unimplemented;
7463 #endif
7464 case TARGET_NR_ioctl:
7465 ret = do_ioctl(arg1, arg2, arg3);
7466 break;
7467 case TARGET_NR_fcntl:
7468 ret = do_fcntl(arg1, arg2, arg3);
7469 break;
7470 #ifdef TARGET_NR_mpx
7471 case TARGET_NR_mpx:
7472 goto unimplemented;
7473 #endif
7474 case TARGET_NR_setpgid:
7475 ret = get_errno(setpgid(arg1, arg2));
7476 break;
7477 #ifdef TARGET_NR_ulimit
7478 case TARGET_NR_ulimit:
7479 goto unimplemented;
7480 #endif
7481 #ifdef TARGET_NR_oldolduname
7482 case TARGET_NR_oldolduname:
7483 goto unimplemented;
7484 #endif
7485 case TARGET_NR_umask:
7486 ret = get_errno(umask(arg1));
7487 break;
7488 case TARGET_NR_chroot:
7489 if (!(p = lock_user_string(arg1)))
7490 goto efault;
7491 ret = get_errno(chroot(p));
7492 unlock_user(p, arg1, 0);
7493 break;
7494 #ifdef TARGET_NR_ustat
7495 case TARGET_NR_ustat:
7496 goto unimplemented;
7497 #endif
7498 #ifdef TARGET_NR_dup2
7499 case TARGET_NR_dup2:
7500 ret = get_errno(dup2(arg1, arg2));
7501 if (ret >= 0) {
7502 fd_trans_dup(arg1, arg2);
7503 }
7504 break;
7505 #endif
7506 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7507 case TARGET_NR_dup3:
7508 ret = get_errno(dup3(arg1, arg2, arg3));
7509 if (ret >= 0) {
7510 fd_trans_dup(arg1, arg2);
7511 }
7512 break;
7513 #endif
7514 #ifdef TARGET_NR_getppid /* not on alpha */
7515 case TARGET_NR_getppid:
7516 ret = get_errno(getppid());
7517 break;
7518 #endif
7519 #ifdef TARGET_NR_getpgrp
7520 case TARGET_NR_getpgrp:
7521 ret = get_errno(getpgrp());
7522 break;
7523 #endif
7524 case TARGET_NR_setsid:
7525 ret = get_errno(setsid());
7526 break;
7527 #ifdef TARGET_NR_sigaction
7528 case TARGET_NR_sigaction:
7529 {
7530 #if defined(TARGET_ALPHA)
7531 struct target_sigaction act, oact, *pact = 0;
7532 struct target_old_sigaction *old_act;
7533 if (arg2) {
7534 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7535 goto efault;
7536 act._sa_handler = old_act->_sa_handler;
7537 target_siginitset(&act.sa_mask, old_act->sa_mask);
7538 act.sa_flags = old_act->sa_flags;
7539 act.sa_restorer = 0;
7540 unlock_user_struct(old_act, arg2, 0);
7541 pact = &act;
7542 }
7543 ret = get_errno(do_sigaction(arg1, pact, &oact));
7544 if (!is_error(ret) && arg3) {
7545 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7546 goto efault;
7547 old_act->_sa_handler = oact._sa_handler;
7548 old_act->sa_mask = oact.sa_mask.sig[0];
7549 old_act->sa_flags = oact.sa_flags;
7550 unlock_user_struct(old_act, arg3, 1);
7551 }
7552 #elif defined(TARGET_MIPS)
7553 struct target_sigaction act, oact, *pact, *old_act;
7554
7555 if (arg2) {
7556 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7557 goto efault;
7558 act._sa_handler = old_act->_sa_handler;
7559 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7560 act.sa_flags = old_act->sa_flags;
7561 unlock_user_struct(old_act, arg2, 0);
7562 pact = &act;
7563 } else {
7564 pact = NULL;
7565 }
7566
7567 ret = get_errno(do_sigaction(arg1, pact, &oact));
7568
7569 if (!is_error(ret) && arg3) {
7570 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7571 goto efault;
7572 old_act->_sa_handler = oact._sa_handler;
7573 old_act->sa_flags = oact.sa_flags;
7574 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7575 old_act->sa_mask.sig[1] = 0;
7576 old_act->sa_mask.sig[2] = 0;
7577 old_act->sa_mask.sig[3] = 0;
7578 unlock_user_struct(old_act, arg3, 1);
7579 }
7580 #else
7581 struct target_old_sigaction *old_act;
7582 struct target_sigaction act, oact, *pact;
7583 if (arg2) {
7584 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7585 goto efault;
7586 act._sa_handler = old_act->_sa_handler;
7587 target_siginitset(&act.sa_mask, old_act->sa_mask);
7588 act.sa_flags = old_act->sa_flags;
7589 act.sa_restorer = old_act->sa_restorer;
7590 unlock_user_struct(old_act, arg2, 0);
7591 pact = &act;
7592 } else {
7593 pact = NULL;
7594 }
7595 ret = get_errno(do_sigaction(arg1, pact, &oact));
7596 if (!is_error(ret) && arg3) {
7597 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7598 goto efault;
7599 old_act->_sa_handler = oact._sa_handler;
7600 old_act->sa_mask = oact.sa_mask.sig[0];
7601 old_act->sa_flags = oact.sa_flags;
7602 old_act->sa_restorer = oact.sa_restorer;
7603 unlock_user_struct(old_act, arg3, 1);
7604 }
7605 #endif
7606 }
7607 break;
7608 #endif
7609 case TARGET_NR_rt_sigaction:
7610 {
7611 #if defined(TARGET_ALPHA)
7612 struct target_sigaction act, oact, *pact = 0;
7613 struct target_rt_sigaction *rt_act;
7614 /* ??? arg4 == sizeof(sigset_t). */
7615 if (arg2) {
7616 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7617 goto efault;
7618 act._sa_handler = rt_act->_sa_handler;
7619 act.sa_mask = rt_act->sa_mask;
7620 act.sa_flags = rt_act->sa_flags;
7621 act.sa_restorer = arg5;
7622 unlock_user_struct(rt_act, arg2, 0);
7623 pact = &act;
7624 }
7625 ret = get_errno(do_sigaction(arg1, pact, &oact));
7626 if (!is_error(ret) && arg3) {
7627 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7628 goto efault;
7629 rt_act->_sa_handler = oact._sa_handler;
7630 rt_act->sa_mask = oact.sa_mask;
7631 rt_act->sa_flags = oact.sa_flags;
7632 unlock_user_struct(rt_act, arg3, 1);
7633 }
7634 #else
7635 struct target_sigaction *act;
7636 struct target_sigaction *oact;
7637
7638 if (arg2) {
7639 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
7640 goto efault;
7641 } else
7642 act = NULL;
7643 if (arg3) {
7644 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7645 ret = -TARGET_EFAULT;
7646 goto rt_sigaction_fail;
7647 }
7648 } else
7649 oact = NULL;
7650 ret = get_errno(do_sigaction(arg1, act, oact));
7651 rt_sigaction_fail:
7652 if (act)
7653 unlock_user_struct(act, arg2, 0);
7654 if (oact)
7655 unlock_user_struct(oact, arg3, 1);
7656 #endif
7657 }
7658 break;
7659 #ifdef TARGET_NR_sgetmask /* not on alpha */
7660 case TARGET_NR_sgetmask:
7661 {
7662 sigset_t cur_set;
7663 abi_ulong target_set;
7664 ret = do_sigprocmask(0, NULL, &cur_set);
7665 if (!ret) {
7666 host_to_target_old_sigset(&target_set, &cur_set);
7667 ret = target_set;
7668 }
7669 }
7670 break;
7671 #endif
7672 #ifdef TARGET_NR_ssetmask /* not on alpha */
7673 case TARGET_NR_ssetmask:
7674 {
7675 sigset_t set, oset, cur_set;
7676 abi_ulong target_set = arg1;
7677 /* We only have one word of the new mask so we must read
7678 * the rest of it with do_sigprocmask() and OR in this word.
7679 * We are guaranteed that a do_sigprocmask() that only queries
7680 * the signal mask will not fail.
7681 */
7682 ret = do_sigprocmask(0, NULL, &cur_set);
7683 assert(!ret);
7684 target_to_host_old_sigset(&set, &target_set);
7685 sigorset(&set, &set, &cur_set);
7686 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7687 if (!ret) {
7688 host_to_target_old_sigset(&target_set, &oset);
7689 ret = target_set;
7690 }
7691 }
7692 break;
7693 #endif
7694 #ifdef TARGET_NR_sigprocmask
7695 case TARGET_NR_sigprocmask:
7696 {
7697 #if defined(TARGET_ALPHA)
7698 sigset_t set, oldset;
7699 abi_ulong mask;
7700 int how;
7701
7702 switch (arg1) {
7703 case TARGET_SIG_BLOCK:
7704 how = SIG_BLOCK;
7705 break;
7706 case TARGET_SIG_UNBLOCK:
7707 how = SIG_UNBLOCK;
7708 break;
7709 case TARGET_SIG_SETMASK:
7710 how = SIG_SETMASK;
7711 break;
7712 default:
7713 ret = -TARGET_EINVAL;
7714 goto fail;
7715 }
7716 mask = arg2;
7717 target_to_host_old_sigset(&set, &mask);
7718
7719 ret = do_sigprocmask(how, &set, &oldset);
7720 if (!is_error(ret)) {
7721 host_to_target_old_sigset(&mask, &oldset);
7722 ret = mask;
7723 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7724 }
7725 #else
7726 sigset_t set, oldset, *set_ptr;
7727 int how;
7728
7729 if (arg2) {
7730 switch (arg1) {
7731 case TARGET_SIG_BLOCK:
7732 how = SIG_BLOCK;
7733 break;
7734 case TARGET_SIG_UNBLOCK:
7735 how = SIG_UNBLOCK;
7736 break;
7737 case TARGET_SIG_SETMASK:
7738 how = SIG_SETMASK;
7739 break;
7740 default:
7741 ret = -TARGET_EINVAL;
7742 goto fail;
7743 }
7744 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7745 goto efault;
7746 target_to_host_old_sigset(&set, p);
7747 unlock_user(p, arg2, 0);
7748 set_ptr = &set;
7749 } else {
7750 how = 0;
7751 set_ptr = NULL;
7752 }
7753 ret = do_sigprocmask(how, set_ptr, &oldset);
7754 if (!is_error(ret) && arg3) {
7755 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7756 goto efault;
7757 host_to_target_old_sigset(p, &oldset);
7758 unlock_user(p, arg3, sizeof(target_sigset_t));
7759 }
7760 #endif
7761 }
7762 break;
7763 #endif
7764 case TARGET_NR_rt_sigprocmask:
7765 {
7766 int how = arg1;
7767 sigset_t set, oldset, *set_ptr;
7768
7769 if (arg2) {
7770 switch(how) {
7771 case TARGET_SIG_BLOCK:
7772 how = SIG_BLOCK;
7773 break;
7774 case TARGET_SIG_UNBLOCK:
7775 how = SIG_UNBLOCK;
7776 break;
7777 case TARGET_SIG_SETMASK:
7778 how = SIG_SETMASK;
7779 break;
7780 default:
7781 ret = -TARGET_EINVAL;
7782 goto fail;
7783 }
7784 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7785 goto efault;
7786 target_to_host_sigset(&set, p);
7787 unlock_user(p, arg2, 0);
7788 set_ptr = &set;
7789 } else {
7790 how = 0;
7791 set_ptr = NULL;
7792 }
7793 ret = do_sigprocmask(how, set_ptr, &oldset);
7794 if (!is_error(ret) && arg3) {
7795 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7796 goto efault;
7797 host_to_target_sigset(p, &oldset);
7798 unlock_user(p, arg3, sizeof(target_sigset_t));
7799 }
7800 }
7801 break;
7802 #ifdef TARGET_NR_sigpending
7803 case TARGET_NR_sigpending:
7804 {
7805 sigset_t set;
7806 ret = get_errno(sigpending(&set));
7807 if (!is_error(ret)) {
7808 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7809 goto efault;
7810 host_to_target_old_sigset(p, &set);
7811 unlock_user(p, arg1, sizeof(target_sigset_t));
7812 }
7813 }
7814 break;
7815 #endif
7816 case TARGET_NR_rt_sigpending:
7817 {
7818 sigset_t set;
7819 ret = get_errno(sigpending(&set));
7820 if (!is_error(ret)) {
7821 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7822 goto efault;
7823 host_to_target_sigset(p, &set);
7824 unlock_user(p, arg1, sizeof(target_sigset_t));
7825 }
7826 }
7827 break;
7828 #ifdef TARGET_NR_sigsuspend
7829 case TARGET_NR_sigsuspend:
7830 {
7831 TaskState *ts = cpu->opaque;
7832 #if defined(TARGET_ALPHA)
7833 abi_ulong mask = arg1;
7834 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
7835 #else
7836 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7837 goto efault;
7838 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
7839 unlock_user(p, arg1, 0);
7840 #endif
7841 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7842 SIGSET_T_SIZE));
7843 if (ret != -TARGET_ERESTARTSYS) {
7844 ts->in_sigsuspend = 1;
7845 }
7846 }
7847 break;
7848 #endif
7849 case TARGET_NR_rt_sigsuspend:
7850 {
7851 TaskState *ts = cpu->opaque;
7852 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7853 goto efault;
7854 target_to_host_sigset(&ts->sigsuspend_mask, p);
7855 unlock_user(p, arg1, 0);
7856 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7857 SIGSET_T_SIZE));
7858 if (ret != -TARGET_ERESTARTSYS) {
7859 ts->in_sigsuspend = 1;
7860 }
7861 }
7862 break;
7863 case TARGET_NR_rt_sigtimedwait:
7864 {
7865 sigset_t set;
7866 struct timespec uts, *puts;
7867 siginfo_t uinfo;
7868
7869 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7870 goto efault;
7871 target_to_host_sigset(&set, p);
7872 unlock_user(p, arg1, 0);
7873 if (arg3) {
7874 puts = &uts;
7875 target_to_host_timespec(puts, arg3);
7876 } else {
7877 puts = NULL;
7878 }
7879 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
7880 SIGSET_T_SIZE));
7881 if (!is_error(ret)) {
7882 if (arg2) {
7883 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
7884 0);
7885 if (!p) {
7886 goto efault;
7887 }
7888 host_to_target_siginfo(p, &uinfo);
7889 unlock_user(p, arg2, sizeof(target_siginfo_t));
7890 }
7891 ret = host_to_target_signal(ret);
7892 }
7893 }
7894 break;
7895 case TARGET_NR_rt_sigqueueinfo:
7896 {
7897 siginfo_t uinfo;
7898
7899 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
7900 if (!p) {
7901 goto efault;
7902 }
7903 target_to_host_siginfo(&uinfo, p);
7904 unlock_user(p, arg1, 0);
7905 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
7906 }
7907 break;
7908 #ifdef TARGET_NR_sigreturn
7909 case TARGET_NR_sigreturn:
7910 if (block_signals()) {
7911 ret = -TARGET_ERESTARTSYS;
7912 } else {
7913 ret = do_sigreturn(cpu_env);
7914 }
7915 break;
7916 #endif
7917 case TARGET_NR_rt_sigreturn:
7918 if (block_signals()) {
7919 ret = -TARGET_ERESTARTSYS;
7920 } else {
7921 ret = do_rt_sigreturn(cpu_env);
7922 }
7923 break;
7924 case TARGET_NR_sethostname:
7925 if (!(p = lock_user_string(arg1)))
7926 goto efault;
7927 ret = get_errno(sethostname(p, arg2));
7928 unlock_user(p, arg1, 0);
7929 break;
7930 case TARGET_NR_setrlimit:
7931 {
7932 int resource = target_to_host_resource(arg1);
7933 struct target_rlimit *target_rlim;
7934 struct rlimit rlim;
7935 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
7936 goto efault;
7937 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
7938 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
7939 unlock_user_struct(target_rlim, arg2, 0);
7940 ret = get_errno(setrlimit(resource, &rlim));
7941 }
7942 break;
7943 case TARGET_NR_getrlimit:
7944 {
7945 int resource = target_to_host_resource(arg1);
7946 struct target_rlimit *target_rlim;
7947 struct rlimit rlim;
7948
7949 ret = get_errno(getrlimit(resource, &rlim));
7950 if (!is_error(ret)) {
7951 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7952 goto efault;
7953 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7954 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7955 unlock_user_struct(target_rlim, arg2, 1);
7956 }
7957 }
7958 break;
7959 case TARGET_NR_getrusage:
7960 {
7961 struct rusage rusage;
7962 ret = get_errno(getrusage(arg1, &rusage));
7963 if (!is_error(ret)) {
7964 ret = host_to_target_rusage(arg2, &rusage);
7965 }
7966 }
7967 break;
7968 case TARGET_NR_gettimeofday:
7969 {
7970 struct timeval tv;
7971 ret = get_errno(gettimeofday(&tv, NULL));
7972 if (!is_error(ret)) {
7973 if (copy_to_user_timeval(arg1, &tv))
7974 goto efault;
7975 }
7976 }
7977 break;
7978 case TARGET_NR_settimeofday:
7979 {
7980 struct timeval tv, *ptv = NULL;
7981 struct timezone tz, *ptz = NULL;
7982
7983 if (arg1) {
7984 if (copy_from_user_timeval(&tv, arg1)) {
7985 goto efault;
7986 }
7987 ptv = &tv;
7988 }
7989
7990 if (arg2) {
7991 if (copy_from_user_timezone(&tz, arg2)) {
7992 goto efault;
7993 }
7994 ptz = &tz;
7995 }
7996
7997 ret = get_errno(settimeofday(ptv, ptz));
7998 }
7999 break;
8000 #if defined(TARGET_NR_select)
8001 case TARGET_NR_select:
8002 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
8003 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8004 #else
8005 {
8006 struct target_sel_arg_struct *sel;
8007 abi_ulong inp, outp, exp, tvp;
8008 long nsel;
8009
8010 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
8011 goto efault;
8012 nsel = tswapal(sel->n);
8013 inp = tswapal(sel->inp);
8014 outp = tswapal(sel->outp);
8015 exp = tswapal(sel->exp);
8016 tvp = tswapal(sel->tvp);
8017 unlock_user_struct(sel, arg1, 0);
8018 ret = do_select(nsel, inp, outp, exp, tvp);
8019 }
8020 #endif
8021 break;
8022 #endif
8023 #ifdef TARGET_NR_pselect6
8024 case TARGET_NR_pselect6:
8025 {
8026 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8027 fd_set rfds, wfds, efds;
8028 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8029 struct timespec ts, *ts_ptr;
8030
8031 /*
8032 * The 6th arg is actually two args smashed together,
8033 * so we cannot use the C library.
8034 */
8035 sigset_t set;
8036 struct {
8037 sigset_t *set;
8038 size_t size;
8039 } sig, *sig_ptr;
8040
8041 abi_ulong arg_sigset, arg_sigsize, *arg7;
8042 target_sigset_t *target_sigset;
8043
8044 n = arg1;
8045 rfd_addr = arg2;
8046 wfd_addr = arg3;
8047 efd_addr = arg4;
8048 ts_addr = arg5;
8049
8050 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8051 if (ret) {
8052 goto fail;
8053 }
8054 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8055 if (ret) {
8056 goto fail;
8057 }
8058 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8059 if (ret) {
8060 goto fail;
8061 }
8062
8063 /*
8064 * This takes a timespec, and not a timeval, so we cannot
8065 * use the do_select() helper ...
8066 */
8067 if (ts_addr) {
8068 if (target_to_host_timespec(&ts, ts_addr)) {
8069 goto efault;
8070 }
8071 ts_ptr = &ts;
8072 } else {
8073 ts_ptr = NULL;
8074 }
8075
8076 /* Extract the two packed args for the sigset */
8077 if (arg6) {
8078 sig_ptr = &sig;
8079 sig.size = SIGSET_T_SIZE;
8080
8081 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8082 if (!arg7) {
8083 goto efault;
8084 }
8085 arg_sigset = tswapal(arg7[0]);
8086 arg_sigsize = tswapal(arg7[1]);
8087 unlock_user(arg7, arg6, 0);
8088
8089 if (arg_sigset) {
8090 sig.set = &set;
8091 if (arg_sigsize != sizeof(*target_sigset)) {
8092 /* Like the kernel, we enforce correct size sigsets */
8093 ret = -TARGET_EINVAL;
8094 goto fail;
8095 }
8096 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8097 sizeof(*target_sigset), 1);
8098 if (!target_sigset) {
8099 goto efault;
8100 }
8101 target_to_host_sigset(&set, target_sigset);
8102 unlock_user(target_sigset, arg_sigset, 0);
8103 } else {
8104 sig.set = NULL;
8105 }
8106 } else {
8107 sig_ptr = NULL;
8108 }
8109
8110 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8111 ts_ptr, sig_ptr));
8112
8113 if (!is_error(ret)) {
8114 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8115 goto efault;
8116 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8117 goto efault;
8118 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8119 goto efault;
8120
8121 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8122 goto efault;
8123 }
8124 }
8125 break;
8126 #endif
8127 #ifdef TARGET_NR_symlink
8128 case TARGET_NR_symlink:
8129 {
8130 void *p2;
8131 p = lock_user_string(arg1);
8132 p2 = lock_user_string(arg2);
8133 if (!p || !p2)
8134 ret = -TARGET_EFAULT;
8135 else
8136 ret = get_errno(symlink(p, p2));
8137 unlock_user(p2, arg2, 0);
8138 unlock_user(p, arg1, 0);
8139 }
8140 break;
8141 #endif
8142 #if defined(TARGET_NR_symlinkat)
8143 case TARGET_NR_symlinkat:
8144 {
8145 void *p2;
8146 p = lock_user_string(arg1);
8147 p2 = lock_user_string(arg3);
8148 if (!p || !p2)
8149 ret = -TARGET_EFAULT;
8150 else
8151 ret = get_errno(symlinkat(p, arg2, p2));
8152 unlock_user(p2, arg3, 0);
8153 unlock_user(p, arg1, 0);
8154 }
8155 break;
8156 #endif
8157 #ifdef TARGET_NR_oldlstat
8158 case TARGET_NR_oldlstat:
8159 goto unimplemented;
8160 #endif
8161 #ifdef TARGET_NR_readlink
8162 case TARGET_NR_readlink:
8163 {
8164 void *p2;
8165 p = lock_user_string(arg1);
8166 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8167 if (!p || !p2) {
8168 ret = -TARGET_EFAULT;
8169 } else if (!arg3) {
8170 /* Short circuit this for the magic exe check. */
8171 ret = -TARGET_EINVAL;
8172 } else if (is_proc_myself((const char *)p, "exe")) {
8173 char real[PATH_MAX], *temp;
8174 temp = realpath(exec_path, real);
8175 /* Return value is # of bytes that we wrote to the buffer. */
8176 if (temp == NULL) {
8177 ret = get_errno(-1);
8178 } else {
8179 /* Don't worry about sign mismatch as earlier mapping
8180 * logic would have thrown a bad address error. */
8181 ret = MIN(strlen(real), arg3);
8182 /* We cannot NUL terminate the string. */
8183 memcpy(p2, real, ret);
8184 }
8185 } else {
8186 ret = get_errno(readlink(path(p), p2, arg3));
8187 }
8188 unlock_user(p2, arg2, ret);
8189 unlock_user(p, arg1, 0);
8190 }
8191 break;
8192 #endif
8193 #if defined(TARGET_NR_readlinkat)
8194 case TARGET_NR_readlinkat:
8195 {
8196 void *p2;
8197 p = lock_user_string(arg2);
8198 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8199 if (!p || !p2) {
8200 ret = -TARGET_EFAULT;
8201 } else if (is_proc_myself((const char *)p, "exe")) {
8202 char real[PATH_MAX], *temp;
8203 temp = realpath(exec_path, real);
8204 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8205 snprintf((char *)p2, arg4, "%s", real);
8206 } else {
8207 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8208 }
8209 unlock_user(p2, arg3, ret);
8210 unlock_user(p, arg2, 0);
8211 }
8212 break;
8213 #endif
8214 #ifdef TARGET_NR_uselib
8215 case TARGET_NR_uselib:
8216 goto unimplemented;
8217 #endif
8218 #ifdef TARGET_NR_swapon
8219 case TARGET_NR_swapon:
8220 if (!(p = lock_user_string(arg1)))
8221 goto efault;
8222 ret = get_errno(swapon(p, arg2));
8223 unlock_user(p, arg1, 0);
8224 break;
8225 #endif
8226 case TARGET_NR_reboot:
8227 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8228 /* arg4 must be ignored in all other cases */
8229 p = lock_user_string(arg4);
8230 if (!p) {
8231 goto efault;
8232 }
8233 ret = get_errno(reboot(arg1, arg2, arg3, p));
8234 unlock_user(p, arg4, 0);
8235 } else {
8236 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8237 }
8238 break;
8239 #ifdef TARGET_NR_readdir
8240 case TARGET_NR_readdir:
8241 goto unimplemented;
8242 #endif
8243 #ifdef TARGET_NR_mmap
8244 case TARGET_NR_mmap:
8245 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8246 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8247 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8248 || defined(TARGET_S390X)
8249 {
8250 abi_ulong *v;
8251 abi_ulong v1, v2, v3, v4, v5, v6;
8252 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8253 goto efault;
8254 v1 = tswapal(v[0]);
8255 v2 = tswapal(v[1]);
8256 v3 = tswapal(v[2]);
8257 v4 = tswapal(v[3]);
8258 v5 = tswapal(v[4]);
8259 v6 = tswapal(v[5]);
8260 unlock_user(v, arg1, 0);
8261 ret = get_errno(target_mmap(v1, v2, v3,
8262 target_to_host_bitmask(v4, mmap_flags_tbl),
8263 v5, v6));
8264 }
8265 #else
8266 ret = get_errno(target_mmap(arg1, arg2, arg3,
8267 target_to_host_bitmask(arg4, mmap_flags_tbl),
8268 arg5,
8269 arg6));
8270 #endif
8271 break;
8272 #endif
8273 #ifdef TARGET_NR_mmap2
8274 case TARGET_NR_mmap2:
8275 #ifndef MMAP_SHIFT
8276 #define MMAP_SHIFT 12
8277 #endif
8278 ret = get_errno(target_mmap(arg1, arg2, arg3,
8279 target_to_host_bitmask(arg4, mmap_flags_tbl),
8280 arg5,
8281 arg6 << MMAP_SHIFT));
8282 break;
8283 #endif
8284 case TARGET_NR_munmap:
8285 ret = get_errno(target_munmap(arg1, arg2));
8286 break;
8287 case TARGET_NR_mprotect:
8288 {
8289 TaskState *ts = cpu->opaque;
8290 /* Special hack to detect libc making the stack executable. */
8291 if ((arg3 & PROT_GROWSDOWN)
8292 && arg1 >= ts->info->stack_limit
8293 && arg1 <= ts->info->start_stack) {
8294 arg3 &= ~PROT_GROWSDOWN;
8295 arg2 = arg2 + arg1 - ts->info->stack_limit;
8296 arg1 = ts->info->stack_limit;
8297 }
8298 }
8299 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8300 break;
8301 #ifdef TARGET_NR_mremap
8302 case TARGET_NR_mremap:
8303 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8304 break;
8305 #endif
8306 /* ??? msync/mlock/munlock are broken for softmmu. */
8307 #ifdef TARGET_NR_msync
8308 case TARGET_NR_msync:
8309 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8310 break;
8311 #endif
8312 #ifdef TARGET_NR_mlock
8313 case TARGET_NR_mlock:
8314 ret = get_errno(mlock(g2h(arg1), arg2));
8315 break;
8316 #endif
8317 #ifdef TARGET_NR_munlock
8318 case TARGET_NR_munlock:
8319 ret = get_errno(munlock(g2h(arg1), arg2));
8320 break;
8321 #endif
8322 #ifdef TARGET_NR_mlockall
8323 case TARGET_NR_mlockall:
8324 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8325 break;
8326 #endif
8327 #ifdef TARGET_NR_munlockall
8328 case TARGET_NR_munlockall:
8329 ret = get_errno(munlockall());
8330 break;
8331 #endif
8332 case TARGET_NR_truncate:
8333 if (!(p = lock_user_string(arg1)))
8334 goto efault;
8335 ret = get_errno(truncate(p, arg2));
8336 unlock_user(p, arg1, 0);
8337 break;
8338 case TARGET_NR_ftruncate:
8339 ret = get_errno(ftruncate(arg1, arg2));
8340 break;
8341 case TARGET_NR_fchmod:
8342 ret = get_errno(fchmod(arg1, arg2));
8343 break;
8344 #if defined(TARGET_NR_fchmodat)
8345 case TARGET_NR_fchmodat:
8346 if (!(p = lock_user_string(arg2)))
8347 goto efault;
8348 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8349 unlock_user(p, arg2, 0);
8350 break;
8351 #endif
8352 case TARGET_NR_getpriority:
8353 /* Note that negative values are valid for getpriority, so we must
8354 differentiate based on errno settings. */
8355 errno = 0;
8356 ret = getpriority(arg1, arg2);
8357 if (ret == -1 && errno != 0) {
8358 ret = -host_to_target_errno(errno);
8359 break;
8360 }
8361 #ifdef TARGET_ALPHA
8362 /* Return value is the unbiased priority. Signal no error. */
8363 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8364 #else
8365 /* Return value is a biased priority to avoid negative numbers. */
8366 ret = 20 - ret;
8367 #endif
8368 break;
8369 case TARGET_NR_setpriority:
8370 ret = get_errno(setpriority(arg1, arg2, arg3));
8371 break;
8372 #ifdef TARGET_NR_profil
8373 case TARGET_NR_profil:
8374 goto unimplemented;
8375 #endif
8376 case TARGET_NR_statfs:
8377 if (!(p = lock_user_string(arg1)))
8378 goto efault;
8379 ret = get_errno(statfs(path(p), &stfs));
8380 unlock_user(p, arg1, 0);
8381 convert_statfs:
8382 if (!is_error(ret)) {
8383 struct target_statfs *target_stfs;
8384
8385 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8386 goto efault;
8387 __put_user(stfs.f_type, &target_stfs->f_type);
8388 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8389 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8390 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8391 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8392 __put_user(stfs.f_files, &target_stfs->f_files);
8393 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8394 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8395 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8396 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8397 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8398 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8399 unlock_user_struct(target_stfs, arg2, 1);
8400 }
8401 break;
8402 case TARGET_NR_fstatfs:
8403 ret = get_errno(fstatfs(arg1, &stfs));
8404 goto convert_statfs;
8405 #ifdef TARGET_NR_statfs64
8406 case TARGET_NR_statfs64:
8407 if (!(p = lock_user_string(arg1)))
8408 goto efault;
8409 ret = get_errno(statfs(path(p), &stfs));
8410 unlock_user(p, arg1, 0);
8411 convert_statfs64:
8412 if (!is_error(ret)) {
8413 struct target_statfs64 *target_stfs;
8414
8415 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8416 goto efault;
8417 __put_user(stfs.f_type, &target_stfs->f_type);
8418 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8419 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8420 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8421 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8422 __put_user(stfs.f_files, &target_stfs->f_files);
8423 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8424 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8425 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8426 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8427 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8428 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8429 unlock_user_struct(target_stfs, arg3, 1);
8430 }
8431 break;
8432 case TARGET_NR_fstatfs64:
8433 ret = get_errno(fstatfs(arg1, &stfs));
8434 goto convert_statfs64;
8435 #endif
8436 #ifdef TARGET_NR_ioperm
8437 case TARGET_NR_ioperm:
8438 goto unimplemented;
8439 #endif
8440 #ifdef TARGET_NR_socketcall
8441 case TARGET_NR_socketcall:
8442 ret = do_socketcall(arg1, arg2);
8443 break;
8444 #endif
8445 #ifdef TARGET_NR_accept
8446 case TARGET_NR_accept:
8447 ret = do_accept4(arg1, arg2, arg3, 0);
8448 break;
8449 #endif
8450 #ifdef TARGET_NR_accept4
8451 case TARGET_NR_accept4:
8452 ret = do_accept4(arg1, arg2, arg3, arg4);
8453 break;
8454 #endif
8455 #ifdef TARGET_NR_bind
8456 case TARGET_NR_bind:
8457 ret = do_bind(arg1, arg2, arg3);
8458 break;
8459 #endif
8460 #ifdef TARGET_NR_connect
8461 case TARGET_NR_connect:
8462 ret = do_connect(arg1, arg2, arg3);
8463 break;
8464 #endif
8465 #ifdef TARGET_NR_getpeername
8466 case TARGET_NR_getpeername:
8467 ret = do_getpeername(arg1, arg2, arg3);
8468 break;
8469 #endif
8470 #ifdef TARGET_NR_getsockname
8471 case TARGET_NR_getsockname:
8472 ret = do_getsockname(arg1, arg2, arg3);
8473 break;
8474 #endif
8475 #ifdef TARGET_NR_getsockopt
8476 case TARGET_NR_getsockopt:
8477 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8478 break;
8479 #endif
8480 #ifdef TARGET_NR_listen
8481 case TARGET_NR_listen:
8482 ret = get_errno(listen(arg1, arg2));
8483 break;
8484 #endif
8485 #ifdef TARGET_NR_recv
8486 case TARGET_NR_recv:
8487 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8488 break;
8489 #endif
8490 #ifdef TARGET_NR_recvfrom
8491 case TARGET_NR_recvfrom:
8492 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8493 break;
8494 #endif
8495 #ifdef TARGET_NR_recvmsg
8496 case TARGET_NR_recvmsg:
8497 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
8498 break;
8499 #endif
8500 #ifdef TARGET_NR_send
8501 case TARGET_NR_send:
8502 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8503 break;
8504 #endif
8505 #ifdef TARGET_NR_sendmsg
8506 case TARGET_NR_sendmsg:
8507 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
8508 break;
8509 #endif
8510 #ifdef TARGET_NR_sendmmsg
8511 case TARGET_NR_sendmmsg:
8512 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8513 break;
8514 case TARGET_NR_recvmmsg:
8515 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8516 break;
8517 #endif
8518 #ifdef TARGET_NR_sendto
8519 case TARGET_NR_sendto:
8520 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8521 break;
8522 #endif
8523 #ifdef TARGET_NR_shutdown
8524 case TARGET_NR_shutdown:
8525 ret = get_errno(shutdown(arg1, arg2));
8526 break;
8527 #endif
8528 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8529 case TARGET_NR_getrandom:
8530 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8531 if (!p) {
8532 goto efault;
8533 }
8534 ret = get_errno(getrandom(p, arg2, arg3));
8535 unlock_user(p, arg1, ret);
8536 break;
8537 #endif
8538 #ifdef TARGET_NR_socket
8539 case TARGET_NR_socket:
8540 ret = do_socket(arg1, arg2, arg3);
8541 fd_trans_unregister(ret);
8542 break;
8543 #endif
8544 #ifdef TARGET_NR_socketpair
8545 case TARGET_NR_socketpair:
8546 ret = do_socketpair(arg1, arg2, arg3, arg4);
8547 break;
8548 #endif
8549 #ifdef TARGET_NR_setsockopt
8550 case TARGET_NR_setsockopt:
8551 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8552 break;
8553 #endif
8554
8555 case TARGET_NR_syslog:
8556 if (!(p = lock_user_string(arg2)))
8557 goto efault;
8558 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8559 unlock_user(p, arg2, 0);
8560 break;
8561
8562 case TARGET_NR_setitimer:
8563 {
8564 struct itimerval value, ovalue, *pvalue;
8565
8566 if (arg2) {
8567 pvalue = &value;
8568 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8569 || copy_from_user_timeval(&pvalue->it_value,
8570 arg2 + sizeof(struct target_timeval)))
8571 goto efault;
8572 } else {
8573 pvalue = NULL;
8574 }
8575 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8576 if (!is_error(ret) && arg3) {
8577 if (copy_to_user_timeval(arg3,
8578 &ovalue.it_interval)
8579 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8580 &ovalue.it_value))
8581 goto efault;
8582 }
8583 }
8584 break;
8585 case TARGET_NR_getitimer:
8586 {
8587 struct itimerval value;
8588
8589 ret = get_errno(getitimer(arg1, &value));
8590 if (!is_error(ret) && arg2) {
8591 if (copy_to_user_timeval(arg2,
8592 &value.it_interval)
8593 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8594 &value.it_value))
8595 goto efault;
8596 }
8597 }
8598 break;
8599 #ifdef TARGET_NR_stat
8600 case TARGET_NR_stat:
8601 if (!(p = lock_user_string(arg1)))
8602 goto efault;
8603 ret = get_errno(stat(path(p), &st));
8604 unlock_user(p, arg1, 0);
8605 goto do_stat;
8606 #endif
8607 #ifdef TARGET_NR_lstat
8608 case TARGET_NR_lstat:
8609 if (!(p = lock_user_string(arg1)))
8610 goto efault;
8611 ret = get_errno(lstat(path(p), &st));
8612 unlock_user(p, arg1, 0);
8613 goto do_stat;
8614 #endif
8615 case TARGET_NR_fstat:
8616 {
8617 ret = get_errno(fstat(arg1, &st));
8618 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8619 do_stat:
8620 #endif
8621 if (!is_error(ret)) {
8622 struct target_stat *target_st;
8623
8624 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8625 goto efault;
8626 memset(target_st, 0, sizeof(*target_st));
8627 __put_user(st.st_dev, &target_st->st_dev);
8628 __put_user(st.st_ino, &target_st->st_ino);
8629 __put_user(st.st_mode, &target_st->st_mode);
8630 __put_user(st.st_uid, &target_st->st_uid);
8631 __put_user(st.st_gid, &target_st->st_gid);
8632 __put_user(st.st_nlink, &target_st->st_nlink);
8633 __put_user(st.st_rdev, &target_st->st_rdev);
8634 __put_user(st.st_size, &target_st->st_size);
8635 __put_user(st.st_blksize, &target_st->st_blksize);
8636 __put_user(st.st_blocks, &target_st->st_blocks);
8637 __put_user(st.st_atime, &target_st->target_st_atime);
8638 __put_user(st.st_mtime, &target_st->target_st_mtime);
8639 __put_user(st.st_ctime, &target_st->target_st_ctime);
8640 unlock_user_struct(target_st, arg2, 1);
8641 }
8642 }
8643 break;
8644 #ifdef TARGET_NR_olduname
8645 case TARGET_NR_olduname:
8646 goto unimplemented;
8647 #endif
8648 #ifdef TARGET_NR_iopl
8649 case TARGET_NR_iopl:
8650 goto unimplemented;
8651 #endif
8652 case TARGET_NR_vhangup:
8653 ret = get_errno(vhangup());
8654 break;
8655 #ifdef TARGET_NR_idle
8656 case TARGET_NR_idle:
8657 goto unimplemented;
8658 #endif
8659 #ifdef TARGET_NR_syscall
8660 case TARGET_NR_syscall:
8661 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8662 arg6, arg7, arg8, 0);
8663 break;
8664 #endif
8665 case TARGET_NR_wait4:
8666 {
8667 int status;
8668 abi_long status_ptr = arg2;
8669 struct rusage rusage, *rusage_ptr;
8670 abi_ulong target_rusage = arg4;
8671 abi_long rusage_err;
8672 if (target_rusage)
8673 rusage_ptr = &rusage;
8674 else
8675 rusage_ptr = NULL;
8676 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8677 if (!is_error(ret)) {
8678 if (status_ptr && ret) {
8679 status = host_to_target_waitstatus(status);
8680 if (put_user_s32(status, status_ptr))
8681 goto efault;
8682 }
8683 if (target_rusage) {
8684 rusage_err = host_to_target_rusage(target_rusage, &rusage);
8685 if (rusage_err) {
8686 ret = rusage_err;
8687 }
8688 }
8689 }
8690 }
8691 break;
8692 #ifdef TARGET_NR_swapoff
8693 case TARGET_NR_swapoff:
8694 if (!(p = lock_user_string(arg1)))
8695 goto efault;
8696 ret = get_errno(swapoff(p));
8697 unlock_user(p, arg1, 0);
8698 break;
8699 #endif
8700 case TARGET_NR_sysinfo:
8701 {
8702 struct target_sysinfo *target_value;
8703 struct sysinfo value;
8704 ret = get_errno(sysinfo(&value));
8705 if (!is_error(ret) && arg1)
8706 {
8707 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8708 goto efault;
8709 __put_user(value.uptime, &target_value->uptime);
8710 __put_user(value.loads[0], &target_value->loads[0]);
8711 __put_user(value.loads[1], &target_value->loads[1]);
8712 __put_user(value.loads[2], &target_value->loads[2]);
8713 __put_user(value.totalram, &target_value->totalram);
8714 __put_user(value.freeram, &target_value->freeram);
8715 __put_user(value.sharedram, &target_value->sharedram);
8716 __put_user(value.bufferram, &target_value->bufferram);
8717 __put_user(value.totalswap, &target_value->totalswap);
8718 __put_user(value.freeswap, &target_value->freeswap);
8719 __put_user(value.procs, &target_value->procs);
8720 __put_user(value.totalhigh, &target_value->totalhigh);
8721 __put_user(value.freehigh, &target_value->freehigh);
8722 __put_user(value.mem_unit, &target_value->mem_unit);
8723 unlock_user_struct(target_value, arg1, 1);
8724 }
8725 }
8726 break;
8727 #ifdef TARGET_NR_ipc
8728 case TARGET_NR_ipc:
8729 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
8730 break;
8731 #endif
8732 #ifdef TARGET_NR_semget
8733 case TARGET_NR_semget:
8734 ret = get_errno(semget(arg1, arg2, arg3));
8735 break;
8736 #endif
8737 #ifdef TARGET_NR_semop
8738 case TARGET_NR_semop:
8739 ret = do_semop(arg1, arg2, arg3);
8740 break;
8741 #endif
8742 #ifdef TARGET_NR_semctl
8743 case TARGET_NR_semctl:
8744 ret = do_semctl(arg1, arg2, arg3, arg4);
8745 break;
8746 #endif
8747 #ifdef TARGET_NR_msgctl
8748 case TARGET_NR_msgctl:
8749 ret = do_msgctl(arg1, arg2, arg3);
8750 break;
8751 #endif
8752 #ifdef TARGET_NR_msgget
8753 case TARGET_NR_msgget:
8754 ret = get_errno(msgget(arg1, arg2));
8755 break;
8756 #endif
8757 #ifdef TARGET_NR_msgrcv
8758 case TARGET_NR_msgrcv:
8759 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8760 break;
8761 #endif
8762 #ifdef TARGET_NR_msgsnd
8763 case TARGET_NR_msgsnd:
8764 ret = do_msgsnd(arg1, arg2, arg3, arg4);
8765 break;
8766 #endif
8767 #ifdef TARGET_NR_shmget
8768 case TARGET_NR_shmget:
8769 ret = get_errno(shmget(arg1, arg2, arg3));
8770 break;
8771 #endif
8772 #ifdef TARGET_NR_shmctl
8773 case TARGET_NR_shmctl:
8774 ret = do_shmctl(arg1, arg2, arg3);
8775 break;
8776 #endif
8777 #ifdef TARGET_NR_shmat
8778 case TARGET_NR_shmat:
8779 ret = do_shmat(arg1, arg2, arg3);
8780 break;
8781 #endif
8782 #ifdef TARGET_NR_shmdt
8783 case TARGET_NR_shmdt:
8784 ret = do_shmdt(arg1);
8785 break;
8786 #endif
8787 case TARGET_NR_fsync:
8788 ret = get_errno(fsync(arg1));
8789 break;
8790 case TARGET_NR_clone:
8791 /* Linux manages to have three different orderings for its
8792 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8793 * match the kernel's CONFIG_CLONE_* settings.
8794 * Microblaze is further special in that it uses a sixth
8795 * implicit argument to clone for the TLS pointer.
8796 */
8797 #if defined(TARGET_MICROBLAZE)
8798 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8799 #elif defined(TARGET_CLONE_BACKWARDS)
8800 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8801 #elif defined(TARGET_CLONE_BACKWARDS2)
8802 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8803 #else
8804 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8805 #endif
8806 break;
8807 #ifdef __NR_exit_group
8808 /* new thread calls */
8809 case TARGET_NR_exit_group:
8810 #ifdef TARGET_GPROF
8811 _mcleanup();
8812 #endif
8813 gdb_exit(cpu_env, arg1);
8814 ret = get_errno(exit_group(arg1));
8815 break;
8816 #endif
8817 case TARGET_NR_setdomainname:
8818 if (!(p = lock_user_string(arg1)))
8819 goto efault;
8820 ret = get_errno(setdomainname(p, arg2));
8821 unlock_user(p, arg1, 0);
8822 break;
8823 case TARGET_NR_uname:
8824 /* no need to transcode because we use the linux syscall */
8825 {
8826 struct new_utsname * buf;
8827
8828 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8829 goto efault;
8830 ret = get_errno(sys_uname(buf));
8831 if (!is_error(ret)) {
8832 /* Overrite the native machine name with whatever is being
8833 emulated. */
8834 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
8835 /* Allow the user to override the reported release. */
8836 if (qemu_uname_release && *qemu_uname_release)
8837 strcpy (buf->release, qemu_uname_release);
8838 }
8839 unlock_user_struct(buf, arg1, 1);
8840 }
8841 break;
8842 #ifdef TARGET_I386
8843 case TARGET_NR_modify_ldt:
8844 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
8845 break;
8846 #if !defined(TARGET_X86_64)
8847 case TARGET_NR_vm86old:
8848 goto unimplemented;
8849 case TARGET_NR_vm86:
8850 ret = do_vm86(cpu_env, arg1, arg2);
8851 break;
8852 #endif
8853 #endif
8854 case TARGET_NR_adjtimex:
8855 goto unimplemented;
8856 #ifdef TARGET_NR_create_module
8857 case TARGET_NR_create_module:
8858 #endif
8859 case TARGET_NR_init_module:
8860 case TARGET_NR_delete_module:
8861 #ifdef TARGET_NR_get_kernel_syms
8862 case TARGET_NR_get_kernel_syms:
8863 #endif
8864 goto unimplemented;
8865 case TARGET_NR_quotactl:
8866 goto unimplemented;
8867 case TARGET_NR_getpgid:
8868 ret = get_errno(getpgid(arg1));
8869 break;
8870 case TARGET_NR_fchdir:
8871 ret = get_errno(fchdir(arg1));
8872 break;
8873 #ifdef TARGET_NR_bdflush /* not on x86_64 */
8874 case TARGET_NR_bdflush:
8875 goto unimplemented;
8876 #endif
8877 #ifdef TARGET_NR_sysfs
8878 case TARGET_NR_sysfs:
8879 goto unimplemented;
8880 #endif
8881 case TARGET_NR_personality:
8882 ret = get_errno(personality(arg1));
8883 break;
8884 #ifdef TARGET_NR_afs_syscall
8885 case TARGET_NR_afs_syscall:
8886 goto unimplemented;
8887 #endif
8888 #ifdef TARGET_NR__llseek /* Not on alpha */
8889 case TARGET_NR__llseek:
8890 {
8891 int64_t res;
8892 #if !defined(__NR_llseek)
8893 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
8894 if (res == -1) {
8895 ret = get_errno(res);
8896 } else {
8897 ret = 0;
8898 }
8899 #else
8900 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
8901 #endif
8902 if ((ret == 0) && put_user_s64(res, arg4)) {
8903 goto efault;
8904 }
8905 }
8906 break;
8907 #endif
8908 #ifdef TARGET_NR_getdents
8909 case TARGET_NR_getdents:
8910 #ifdef __NR_getdents
8911 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8912 {
8913 struct target_dirent *target_dirp;
8914 struct linux_dirent *dirp;
8915 abi_long count = arg3;
8916
8917 dirp = g_try_malloc(count);
8918 if (!dirp) {
8919 ret = -TARGET_ENOMEM;
8920 goto fail;
8921 }
8922
8923 ret = get_errno(sys_getdents(arg1, dirp, count));
8924 if (!is_error(ret)) {
8925 struct linux_dirent *de;
8926 struct target_dirent *tde;
8927 int len = ret;
8928 int reclen, treclen;
8929 int count1, tnamelen;
8930
8931 count1 = 0;
8932 de = dirp;
8933 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8934 goto efault;
8935 tde = target_dirp;
8936 while (len > 0) {
8937 reclen = de->d_reclen;
8938 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
8939 assert(tnamelen >= 0);
8940 treclen = tnamelen + offsetof(struct target_dirent, d_name);
8941 assert(count1 + treclen <= count);
8942 tde->d_reclen = tswap16(treclen);
8943 tde->d_ino = tswapal(de->d_ino);
8944 tde->d_off = tswapal(de->d_off);
8945 memcpy(tde->d_name, de->d_name, tnamelen);
8946 de = (struct linux_dirent *)((char *)de + reclen);
8947 len -= reclen;
8948 tde = (struct target_dirent *)((char *)tde + treclen);
8949 count1 += treclen;
8950 }
8951 ret = count1;
8952 unlock_user(target_dirp, arg2, ret);
8953 }
8954 g_free(dirp);
8955 }
8956 #else
8957 {
8958 struct linux_dirent *dirp;
8959 abi_long count = arg3;
8960
8961 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8962 goto efault;
8963 ret = get_errno(sys_getdents(arg1, dirp, count));
8964 if (!is_error(ret)) {
8965 struct linux_dirent *de;
8966 int len = ret;
8967 int reclen;
8968 de = dirp;
8969 while (len > 0) {
8970 reclen = de->d_reclen;
8971 if (reclen > len)
8972 break;
8973 de->d_reclen = tswap16(reclen);
8974 tswapls(&de->d_ino);
8975 tswapls(&de->d_off);
8976 de = (struct linux_dirent *)((char *)de + reclen);
8977 len -= reclen;
8978 }
8979 }
8980 unlock_user(dirp, arg2, ret);
8981 }
8982 #endif
8983 #else
8984 /* Implement getdents in terms of getdents64 */
8985 {
8986 struct linux_dirent64 *dirp;
8987 abi_long count = arg3;
8988
8989 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8990 if (!dirp) {
8991 goto efault;
8992 }
8993 ret = get_errno(sys_getdents64(arg1, dirp, count));
8994 if (!is_error(ret)) {
8995 /* Convert the dirent64 structs to target dirent. We do this
8996 * in-place, since we can guarantee that a target_dirent is no
8997 * larger than a dirent64; however this means we have to be
8998 * careful to read everything before writing in the new format.
8999 */
9000 struct linux_dirent64 *de;
9001 struct target_dirent *tde;
9002 int len = ret;
9003 int tlen = 0;
9004
9005 de = dirp;
9006 tde = (struct target_dirent *)dirp;
9007 while (len > 0) {
9008 int namelen, treclen;
9009 int reclen = de->d_reclen;
9010 uint64_t ino = de->d_ino;
9011 int64_t off = de->d_off;
9012 uint8_t type = de->d_type;
9013
9014 namelen = strlen(de->d_name);
9015 treclen = offsetof(struct target_dirent, d_name)
9016 + namelen + 2;
9017 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9018
9019 memmove(tde->d_name, de->d_name, namelen + 1);
9020 tde->d_ino = tswapal(ino);
9021 tde->d_off = tswapal(off);
9022 tde->d_reclen = tswap16(treclen);
9023 /* The target_dirent type is in what was formerly a padding
9024 * byte at the end of the structure:
9025 */
9026 *(((char *)tde) + treclen - 1) = type;
9027
9028 de = (struct linux_dirent64 *)((char *)de + reclen);
9029 tde = (struct target_dirent *)((char *)tde + treclen);
9030 len -= reclen;
9031 tlen += treclen;
9032 }
9033 ret = tlen;
9034 }
9035 unlock_user(dirp, arg2, ret);
9036 }
9037 #endif
9038 break;
9039 #endif /* TARGET_NR_getdents */
9040 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9041 case TARGET_NR_getdents64:
9042 {
9043 struct linux_dirent64 *dirp;
9044 abi_long count = arg3;
9045 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9046 goto efault;
9047 ret = get_errno(sys_getdents64(arg1, dirp, count));
9048 if (!is_error(ret)) {
9049 struct linux_dirent64 *de;
9050 int len = ret;
9051 int reclen;
9052 de = dirp;
9053 while (len > 0) {
9054 reclen = de->d_reclen;
9055 if (reclen > len)
9056 break;
9057 de->d_reclen = tswap16(reclen);
9058 tswap64s((uint64_t *)&de->d_ino);
9059 tswap64s((uint64_t *)&de->d_off);
9060 de = (struct linux_dirent64 *)((char *)de + reclen);
9061 len -= reclen;
9062 }
9063 }
9064 unlock_user(dirp, arg2, ret);
9065 }
9066 break;
9067 #endif /* TARGET_NR_getdents64 */
9068 #if defined(TARGET_NR__newselect)
9069 case TARGET_NR__newselect:
9070 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9071 break;
9072 #endif
9073 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9074 # ifdef TARGET_NR_poll
9075 case TARGET_NR_poll:
9076 # endif
9077 # ifdef TARGET_NR_ppoll
9078 case TARGET_NR_ppoll:
9079 # endif
9080 {
9081 struct target_pollfd *target_pfd;
9082 unsigned int nfds = arg2;
9083 struct pollfd *pfd;
9084 unsigned int i;
9085
9086 pfd = NULL;
9087 target_pfd = NULL;
9088 if (nfds) {
9089 target_pfd = lock_user(VERIFY_WRITE, arg1,
9090 sizeof(struct target_pollfd) * nfds, 1);
9091 if (!target_pfd) {
9092 goto efault;
9093 }
9094
9095 pfd = alloca(sizeof(struct pollfd) * nfds);
9096 for (i = 0; i < nfds; i++) {
9097 pfd[i].fd = tswap32(target_pfd[i].fd);
9098 pfd[i].events = tswap16(target_pfd[i].events);
9099 }
9100 }
9101
9102 switch (num) {
9103 # ifdef TARGET_NR_ppoll
9104 case TARGET_NR_ppoll:
9105 {
9106 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9107 target_sigset_t *target_set;
9108 sigset_t _set, *set = &_set;
9109
9110 if (arg3) {
9111 if (target_to_host_timespec(timeout_ts, arg3)) {
9112 unlock_user(target_pfd, arg1, 0);
9113 goto efault;
9114 }
9115 } else {
9116 timeout_ts = NULL;
9117 }
9118
9119 if (arg4) {
9120 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9121 if (!target_set) {
9122 unlock_user(target_pfd, arg1, 0);
9123 goto efault;
9124 }
9125 target_to_host_sigset(set, target_set);
9126 } else {
9127 set = NULL;
9128 }
9129
9130 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9131 set, SIGSET_T_SIZE));
9132
9133 if (!is_error(ret) && arg3) {
9134 host_to_target_timespec(arg3, timeout_ts);
9135 }
9136 if (arg4) {
9137 unlock_user(target_set, arg4, 0);
9138 }
9139 break;
9140 }
9141 # endif
9142 # ifdef TARGET_NR_poll
9143 case TARGET_NR_poll:
9144 {
9145 struct timespec ts, *pts;
9146
9147 if (arg3 >= 0) {
9148 /* Convert ms to secs, ns */
9149 ts.tv_sec = arg3 / 1000;
9150 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9151 pts = &ts;
9152 } else {
9153 /* -ve poll() timeout means "infinite" */
9154 pts = NULL;
9155 }
9156 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9157 break;
9158 }
9159 # endif
9160 default:
9161 g_assert_not_reached();
9162 }
9163
9164 if (!is_error(ret)) {
9165 for(i = 0; i < nfds; i++) {
9166 target_pfd[i].revents = tswap16(pfd[i].revents);
9167 }
9168 }
9169 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9170 }
9171 break;
9172 #endif
9173 case TARGET_NR_flock:
9174 /* NOTE: the flock constant seems to be the same for every
9175 Linux platform */
9176 ret = get_errno(safe_flock(arg1, arg2));
9177 break;
9178 case TARGET_NR_readv:
9179 {
9180 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9181 if (vec != NULL) {
9182 ret = get_errno(safe_readv(arg1, vec, arg3));
9183 unlock_iovec(vec, arg2, arg3, 1);
9184 } else {
9185 ret = -host_to_target_errno(errno);
9186 }
9187 }
9188 break;
9189 case TARGET_NR_writev:
9190 {
9191 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9192 if (vec != NULL) {
9193 ret = get_errno(safe_writev(arg1, vec, arg3));
9194 unlock_iovec(vec, arg2, arg3, 0);
9195 } else {
9196 ret = -host_to_target_errno(errno);
9197 }
9198 }
9199 break;
9200 case TARGET_NR_getsid:
9201 ret = get_errno(getsid(arg1));
9202 break;
9203 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9204 case TARGET_NR_fdatasync:
9205 ret = get_errno(fdatasync(arg1));
9206 break;
9207 #endif
9208 #ifdef TARGET_NR__sysctl
9209 case TARGET_NR__sysctl:
9210 /* We don't implement this, but ENOTDIR is always a safe
9211 return value. */
9212 ret = -TARGET_ENOTDIR;
9213 break;
9214 #endif
9215 case TARGET_NR_sched_getaffinity:
9216 {
9217 unsigned int mask_size;
9218 unsigned long *mask;
9219
9220 /*
9221 * sched_getaffinity needs multiples of ulong, so need to take
9222 * care of mismatches between target ulong and host ulong sizes.
9223 */
9224 if (arg2 & (sizeof(abi_ulong) - 1)) {
9225 ret = -TARGET_EINVAL;
9226 break;
9227 }
9228 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9229
9230 mask = alloca(mask_size);
9231 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9232
9233 if (!is_error(ret)) {
9234 if (ret > arg2) {
9235 /* More data returned than the caller's buffer will fit.
9236 * This only happens if sizeof(abi_long) < sizeof(long)
9237 * and the caller passed us a buffer holding an odd number
9238 * of abi_longs. If the host kernel is actually using the
9239 * extra 4 bytes then fail EINVAL; otherwise we can just
9240 * ignore them and only copy the interesting part.
9241 */
9242 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9243 if (numcpus > arg2 * 8) {
9244 ret = -TARGET_EINVAL;
9245 break;
9246 }
9247 ret = arg2;
9248 }
9249
9250 if (copy_to_user(arg3, mask, ret)) {
9251 goto efault;
9252 }
9253 }
9254 }
9255 break;
9256 case TARGET_NR_sched_setaffinity:
9257 {
9258 unsigned int mask_size;
9259 unsigned long *mask;
9260
9261 /*
9262 * sched_setaffinity needs multiples of ulong, so need to take
9263 * care of mismatches between target ulong and host ulong sizes.
9264 */
9265 if (arg2 & (sizeof(abi_ulong) - 1)) {
9266 ret = -TARGET_EINVAL;
9267 break;
9268 }
9269 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9270
9271 mask = alloca(mask_size);
9272 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9273 goto efault;
9274 }
9275 memcpy(mask, p, arg2);
9276 unlock_user_struct(p, arg2, 0);
9277
9278 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9279 }
9280 break;
9281 case TARGET_NR_sched_setparam:
9282 {
9283 struct sched_param *target_schp;
9284 struct sched_param schp;
9285
9286 if (arg2 == 0) {
9287 return -TARGET_EINVAL;
9288 }
9289 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9290 goto efault;
9291 schp.sched_priority = tswap32(target_schp->sched_priority);
9292 unlock_user_struct(target_schp, arg2, 0);
9293 ret = get_errno(sched_setparam(arg1, &schp));
9294 }
9295 break;
9296 case TARGET_NR_sched_getparam:
9297 {
9298 struct sched_param *target_schp;
9299 struct sched_param schp;
9300
9301 if (arg2 == 0) {
9302 return -TARGET_EINVAL;
9303 }
9304 ret = get_errno(sched_getparam(arg1, &schp));
9305 if (!is_error(ret)) {
9306 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9307 goto efault;
9308 target_schp->sched_priority = tswap32(schp.sched_priority);
9309 unlock_user_struct(target_schp, arg2, 1);
9310 }
9311 }
9312 break;
9313 case TARGET_NR_sched_setscheduler:
9314 {
9315 struct sched_param *target_schp;
9316 struct sched_param schp;
9317 if (arg3 == 0) {
9318 return -TARGET_EINVAL;
9319 }
9320 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9321 goto efault;
9322 schp.sched_priority = tswap32(target_schp->sched_priority);
9323 unlock_user_struct(target_schp, arg3, 0);
9324 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
9325 }
9326 break;
9327 case TARGET_NR_sched_getscheduler:
9328 ret = get_errno(sched_getscheduler(arg1));
9329 break;
9330 case TARGET_NR_sched_yield:
9331 ret = get_errno(sched_yield());
9332 break;
9333 case TARGET_NR_sched_get_priority_max:
9334 ret = get_errno(sched_get_priority_max(arg1));
9335 break;
9336 case TARGET_NR_sched_get_priority_min:
9337 ret = get_errno(sched_get_priority_min(arg1));
9338 break;
9339 case TARGET_NR_sched_rr_get_interval:
9340 {
9341 struct timespec ts;
9342 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9343 if (!is_error(ret)) {
9344 ret = host_to_target_timespec(arg2, &ts);
9345 }
9346 }
9347 break;
9348 case TARGET_NR_nanosleep:
9349 {
9350 struct timespec req, rem;
9351 target_to_host_timespec(&req, arg1);
9352 ret = get_errno(safe_nanosleep(&req, &rem));
9353 if (is_error(ret) && arg2) {
9354 host_to_target_timespec(arg2, &rem);
9355 }
9356 }
9357 break;
9358 #ifdef TARGET_NR_query_module
9359 case TARGET_NR_query_module:
9360 goto unimplemented;
9361 #endif
9362 #ifdef TARGET_NR_nfsservctl
9363 case TARGET_NR_nfsservctl:
9364 goto unimplemented;
9365 #endif
9366 case TARGET_NR_prctl:
9367 switch (arg1) {
9368 case PR_GET_PDEATHSIG:
9369 {
9370 int deathsig;
9371 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9372 if (!is_error(ret) && arg2
9373 && put_user_ual(deathsig, arg2)) {
9374 goto efault;
9375 }
9376 break;
9377 }
9378 #ifdef PR_GET_NAME
9379 case PR_GET_NAME:
9380 {
9381 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9382 if (!name) {
9383 goto efault;
9384 }
9385 ret = get_errno(prctl(arg1, (unsigned long)name,
9386 arg3, arg4, arg5));
9387 unlock_user(name, arg2, 16);
9388 break;
9389 }
9390 case PR_SET_NAME:
9391 {
9392 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9393 if (!name) {
9394 goto efault;
9395 }
9396 ret = get_errno(prctl(arg1, (unsigned long)name,
9397 arg3, arg4, arg5));
9398 unlock_user(name, arg2, 0);
9399 break;
9400 }
9401 #endif
9402 default:
9403 /* Most prctl options have no pointer arguments */
9404 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9405 break;
9406 }
9407 break;
9408 #ifdef TARGET_NR_arch_prctl
9409 case TARGET_NR_arch_prctl:
9410 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9411 ret = do_arch_prctl(cpu_env, arg1, arg2);
9412 break;
9413 #else
9414 goto unimplemented;
9415 #endif
9416 #endif
9417 #ifdef TARGET_NR_pread64
9418 case TARGET_NR_pread64:
9419 if (regpairs_aligned(cpu_env)) {
9420 arg4 = arg5;
9421 arg5 = arg6;
9422 }
9423 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9424 goto efault;
9425 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9426 unlock_user(p, arg2, ret);
9427 break;
9428 case TARGET_NR_pwrite64:
9429 if (regpairs_aligned(cpu_env)) {
9430 arg4 = arg5;
9431 arg5 = arg6;
9432 }
9433 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9434 goto efault;
9435 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9436 unlock_user(p, arg2, 0);
9437 break;
9438 #endif
9439 case TARGET_NR_getcwd:
9440 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9441 goto efault;
9442 ret = get_errno(sys_getcwd1(p, arg2));
9443 unlock_user(p, arg1, ret);
9444 break;
9445 case TARGET_NR_capget:
9446 case TARGET_NR_capset:
9447 {
9448 struct target_user_cap_header *target_header;
9449 struct target_user_cap_data *target_data = NULL;
9450 struct __user_cap_header_struct header;
9451 struct __user_cap_data_struct data[2];
9452 struct __user_cap_data_struct *dataptr = NULL;
9453 int i, target_datalen;
9454 int data_items = 1;
9455
9456 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9457 goto efault;
9458 }
9459 header.version = tswap32(target_header->version);
9460 header.pid = tswap32(target_header->pid);
9461
9462 if (header.version != _LINUX_CAPABILITY_VERSION) {
9463 /* Version 2 and up takes pointer to two user_data structs */
9464 data_items = 2;
9465 }
9466
9467 target_datalen = sizeof(*target_data) * data_items;
9468
9469 if (arg2) {
9470 if (num == TARGET_NR_capget) {
9471 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9472 } else {
9473 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9474 }
9475 if (!target_data) {
9476 unlock_user_struct(target_header, arg1, 0);
9477 goto efault;
9478 }
9479
9480 if (num == TARGET_NR_capset) {
9481 for (i = 0; i < data_items; i++) {
9482 data[i].effective = tswap32(target_data[i].effective);
9483 data[i].permitted = tswap32(target_data[i].permitted);
9484 data[i].inheritable = tswap32(target_data[i].inheritable);
9485 }
9486 }
9487
9488 dataptr = data;
9489 }
9490
9491 if (num == TARGET_NR_capget) {
9492 ret = get_errno(capget(&header, dataptr));
9493 } else {
9494 ret = get_errno(capset(&header, dataptr));
9495 }
9496
9497 /* The kernel always updates version for both capget and capset */
9498 target_header->version = tswap32(header.version);
9499 unlock_user_struct(target_header, arg1, 1);
9500
9501 if (arg2) {
9502 if (num == TARGET_NR_capget) {
9503 for (i = 0; i < data_items; i++) {
9504 target_data[i].effective = tswap32(data[i].effective);
9505 target_data[i].permitted = tswap32(data[i].permitted);
9506 target_data[i].inheritable = tswap32(data[i].inheritable);
9507 }
9508 unlock_user(target_data, arg2, target_datalen);
9509 } else {
9510 unlock_user(target_data, arg2, 0);
9511 }
9512 }
9513 break;
9514 }
9515 case TARGET_NR_sigaltstack:
9516 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
9517 break;
9518
9519 #ifdef CONFIG_SENDFILE
9520 case TARGET_NR_sendfile:
9521 {
9522 off_t *offp = NULL;
9523 off_t off;
9524 if (arg3) {
9525 ret = get_user_sal(off, arg3);
9526 if (is_error(ret)) {
9527 break;
9528 }
9529 offp = &off;
9530 }
9531 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9532 if (!is_error(ret) && arg3) {
9533 abi_long ret2 = put_user_sal(off, arg3);
9534 if (is_error(ret2)) {
9535 ret = ret2;
9536 }
9537 }
9538 break;
9539 }
9540 #ifdef TARGET_NR_sendfile64
9541 case TARGET_NR_sendfile64:
9542 {
9543 off_t *offp = NULL;
9544 off_t off;
9545 if (arg3) {
9546 ret = get_user_s64(off, arg3);
9547 if (is_error(ret)) {
9548 break;
9549 }
9550 offp = &off;
9551 }
9552 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9553 if (!is_error(ret) && arg3) {
9554 abi_long ret2 = put_user_s64(off, arg3);
9555 if (is_error(ret2)) {
9556 ret = ret2;
9557 }
9558 }
9559 break;
9560 }
9561 #endif
9562 #else
9563 case TARGET_NR_sendfile:
9564 #ifdef TARGET_NR_sendfile64
9565 case TARGET_NR_sendfile64:
9566 #endif
9567 goto unimplemented;
9568 #endif
9569
9570 #ifdef TARGET_NR_getpmsg
9571 case TARGET_NR_getpmsg:
9572 goto unimplemented;
9573 #endif
9574 #ifdef TARGET_NR_putpmsg
9575 case TARGET_NR_putpmsg:
9576 goto unimplemented;
9577 #endif
9578 #ifdef TARGET_NR_vfork
9579 case TARGET_NR_vfork:
9580 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
9581 0, 0, 0, 0));
9582 break;
9583 #endif
9584 #ifdef TARGET_NR_ugetrlimit
9585 case TARGET_NR_ugetrlimit:
9586 {
9587 struct rlimit rlim;
9588 int resource = target_to_host_resource(arg1);
9589 ret = get_errno(getrlimit(resource, &rlim));
9590 if (!is_error(ret)) {
9591 struct target_rlimit *target_rlim;
9592 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9593 goto efault;
9594 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9595 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9596 unlock_user_struct(target_rlim, arg2, 1);
9597 }
9598 break;
9599 }
9600 #endif
9601 #ifdef TARGET_NR_truncate64
9602 case TARGET_NR_truncate64:
9603 if (!(p = lock_user_string(arg1)))
9604 goto efault;
9605 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9606 unlock_user(p, arg1, 0);
9607 break;
9608 #endif
9609 #ifdef TARGET_NR_ftruncate64
9610 case TARGET_NR_ftruncate64:
9611 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9612 break;
9613 #endif
9614 #ifdef TARGET_NR_stat64
9615 case TARGET_NR_stat64:
9616 if (!(p = lock_user_string(arg1)))
9617 goto efault;
9618 ret = get_errno(stat(path(p), &st));
9619 unlock_user(p, arg1, 0);
9620 if (!is_error(ret))
9621 ret = host_to_target_stat64(cpu_env, arg2, &st);
9622 break;
9623 #endif
9624 #ifdef TARGET_NR_lstat64
9625 case TARGET_NR_lstat64:
9626 if (!(p = lock_user_string(arg1)))
9627 goto efault;
9628 ret = get_errno(lstat(path(p), &st));
9629 unlock_user(p, arg1, 0);
9630 if (!is_error(ret))
9631 ret = host_to_target_stat64(cpu_env, arg2, &st);
9632 break;
9633 #endif
9634 #ifdef TARGET_NR_fstat64
9635 case TARGET_NR_fstat64:
9636 ret = get_errno(fstat(arg1, &st));
9637 if (!is_error(ret))
9638 ret = host_to_target_stat64(cpu_env, arg2, &st);
9639 break;
9640 #endif
9641 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9642 #ifdef TARGET_NR_fstatat64
9643 case TARGET_NR_fstatat64:
9644 #endif
9645 #ifdef TARGET_NR_newfstatat
9646 case TARGET_NR_newfstatat:
9647 #endif
9648 if (!(p = lock_user_string(arg2)))
9649 goto efault;
9650 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
9651 if (!is_error(ret))
9652 ret = host_to_target_stat64(cpu_env, arg3, &st);
9653 break;
9654 #endif
9655 #ifdef TARGET_NR_lchown
9656 case TARGET_NR_lchown:
9657 if (!(p = lock_user_string(arg1)))
9658 goto efault;
9659 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
9660 unlock_user(p, arg1, 0);
9661 break;
9662 #endif
9663 #ifdef TARGET_NR_getuid
9664 case TARGET_NR_getuid:
9665 ret = get_errno(high2lowuid(getuid()));
9666 break;
9667 #endif
9668 #ifdef TARGET_NR_getgid
9669 case TARGET_NR_getgid:
9670 ret = get_errno(high2lowgid(getgid()));
9671 break;
9672 #endif
9673 #ifdef TARGET_NR_geteuid
9674 case TARGET_NR_geteuid:
9675 ret = get_errno(high2lowuid(geteuid()));
9676 break;
9677 #endif
9678 #ifdef TARGET_NR_getegid
9679 case TARGET_NR_getegid:
9680 ret = get_errno(high2lowgid(getegid()));
9681 break;
9682 #endif
9683 case TARGET_NR_setreuid:
9684 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
9685 break;
9686 case TARGET_NR_setregid:
9687 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
9688 break;
9689 case TARGET_NR_getgroups:
9690 {
9691 int gidsetsize = arg1;
9692 target_id *target_grouplist;
9693 gid_t *grouplist;
9694 int i;
9695
9696 grouplist = alloca(gidsetsize * sizeof(gid_t));
9697 ret = get_errno(getgroups(gidsetsize, grouplist));
9698 if (gidsetsize == 0)
9699 break;
9700 if (!is_error(ret)) {
9701 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
9702 if (!target_grouplist)
9703 goto efault;
9704 for(i = 0;i < ret; i++)
9705 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
9706 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
9707 }
9708 }
9709 break;
9710 case TARGET_NR_setgroups:
9711 {
9712 int gidsetsize = arg1;
9713 target_id *target_grouplist;
9714 gid_t *grouplist = NULL;
9715 int i;
9716 if (gidsetsize) {
9717 grouplist = alloca(gidsetsize * sizeof(gid_t));
9718 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
9719 if (!target_grouplist) {
9720 ret = -TARGET_EFAULT;
9721 goto fail;
9722 }
9723 for (i = 0; i < gidsetsize; i++) {
9724 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
9725 }
9726 unlock_user(target_grouplist, arg2, 0);
9727 }
9728 ret = get_errno(setgroups(gidsetsize, grouplist));
9729 }
9730 break;
9731 case TARGET_NR_fchown:
9732 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
9733 break;
9734 #if defined(TARGET_NR_fchownat)
9735 case TARGET_NR_fchownat:
9736 if (!(p = lock_user_string(arg2)))
9737 goto efault;
9738 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
9739 low2highgid(arg4), arg5));
9740 unlock_user(p, arg2, 0);
9741 break;
9742 #endif
9743 #ifdef TARGET_NR_setresuid
9744 case TARGET_NR_setresuid:
9745 ret = get_errno(sys_setresuid(low2highuid(arg1),
9746 low2highuid(arg2),
9747 low2highuid(arg3)));
9748 break;
9749 #endif
9750 #ifdef TARGET_NR_getresuid
9751 case TARGET_NR_getresuid:
9752 {
9753 uid_t ruid, euid, suid;
9754 ret = get_errno(getresuid(&ruid, &euid, &suid));
9755 if (!is_error(ret)) {
9756 if (put_user_id(high2lowuid(ruid), arg1)
9757 || put_user_id(high2lowuid(euid), arg2)
9758 || put_user_id(high2lowuid(suid), arg3))
9759 goto efault;
9760 }
9761 }
9762 break;
9763 #endif
9764 #ifdef TARGET_NR_getresgid
9765 case TARGET_NR_setresgid:
9766 ret = get_errno(sys_setresgid(low2highgid(arg1),
9767 low2highgid(arg2),
9768 low2highgid(arg3)));
9769 break;
9770 #endif
9771 #ifdef TARGET_NR_getresgid
9772 case TARGET_NR_getresgid:
9773 {
9774 gid_t rgid, egid, sgid;
9775 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9776 if (!is_error(ret)) {
9777 if (put_user_id(high2lowgid(rgid), arg1)
9778 || put_user_id(high2lowgid(egid), arg2)
9779 || put_user_id(high2lowgid(sgid), arg3))
9780 goto efault;
9781 }
9782 }
9783 break;
9784 #endif
9785 #ifdef TARGET_NR_chown
9786 case TARGET_NR_chown:
9787 if (!(p = lock_user_string(arg1)))
9788 goto efault;
9789 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
9790 unlock_user(p, arg1, 0);
9791 break;
9792 #endif
9793 case TARGET_NR_setuid:
9794 ret = get_errno(sys_setuid(low2highuid(arg1)));
9795 break;
9796 case TARGET_NR_setgid:
9797 ret = get_errno(sys_setgid(low2highgid(arg1)));
9798 break;
9799 case TARGET_NR_setfsuid:
9800 ret = get_errno(setfsuid(arg1));
9801 break;
9802 case TARGET_NR_setfsgid:
9803 ret = get_errno(setfsgid(arg1));
9804 break;
9805
9806 #ifdef TARGET_NR_lchown32
9807 case TARGET_NR_lchown32:
9808 if (!(p = lock_user_string(arg1)))
9809 goto efault;
9810 ret = get_errno(lchown(p, arg2, arg3));
9811 unlock_user(p, arg1, 0);
9812 break;
9813 #endif
9814 #ifdef TARGET_NR_getuid32
9815 case TARGET_NR_getuid32:
9816 ret = get_errno(getuid());
9817 break;
9818 #endif
9819
9820 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9821 /* Alpha specific */
9822 case TARGET_NR_getxuid:
9823 {
9824 uid_t euid;
9825 euid=geteuid();
9826 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
9827 }
9828 ret = get_errno(getuid());
9829 break;
9830 #endif
9831 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9832 /* Alpha specific */
9833 case TARGET_NR_getxgid:
9834 {
9835 uid_t egid;
9836 egid=getegid();
9837 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
9838 }
9839 ret = get_errno(getgid());
9840 break;
9841 #endif
9842 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9843 /* Alpha specific */
9844 case TARGET_NR_osf_getsysinfo:
9845 ret = -TARGET_EOPNOTSUPP;
9846 switch (arg1) {
9847 case TARGET_GSI_IEEE_FP_CONTROL:
9848 {
9849 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
9850
9851 /* Copied from linux ieee_fpcr_to_swcr. */
9852 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
9853 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
9854 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
9855 | SWCR_TRAP_ENABLE_DZE
9856 | SWCR_TRAP_ENABLE_OVF);
9857 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
9858 | SWCR_TRAP_ENABLE_INE);
9859 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
9860 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
9861
9862 if (put_user_u64 (swcr, arg2))
9863 goto efault;
9864 ret = 0;
9865 }
9866 break;
9867
9868 /* case GSI_IEEE_STATE_AT_SIGNAL:
9869 -- Not implemented in linux kernel.
9870 case GSI_UACPROC:
9871 -- Retrieves current unaligned access state; not much used.
9872 case GSI_PROC_TYPE:
9873 -- Retrieves implver information; surely not used.
9874 case GSI_GET_HWRPB:
9875 -- Grabs a copy of the HWRPB; surely not used.
9876 */
9877 }
9878 break;
9879 #endif
9880 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9881 /* Alpha specific */
9882 case TARGET_NR_osf_setsysinfo:
9883 ret = -TARGET_EOPNOTSUPP;
9884 switch (arg1) {
9885 case TARGET_SSI_IEEE_FP_CONTROL:
9886 {
9887 uint64_t swcr, fpcr, orig_fpcr;
9888
9889 if (get_user_u64 (swcr, arg2)) {
9890 goto efault;
9891 }
9892 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9893 fpcr = orig_fpcr & FPCR_DYN_MASK;
9894
9895 /* Copied from linux ieee_swcr_to_fpcr. */
9896 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
9897 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
9898 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
9899 | SWCR_TRAP_ENABLE_DZE
9900 | SWCR_TRAP_ENABLE_OVF)) << 48;
9901 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
9902 | SWCR_TRAP_ENABLE_INE)) << 57;
9903 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
9904 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
9905
9906 cpu_alpha_store_fpcr(cpu_env, fpcr);
9907 ret = 0;
9908 }
9909 break;
9910
9911 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
9912 {
9913 uint64_t exc, fpcr, orig_fpcr;
9914 int si_code;
9915
9916 if (get_user_u64(exc, arg2)) {
9917 goto efault;
9918 }
9919
9920 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9921
9922 /* We only add to the exception status here. */
9923 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
9924
9925 cpu_alpha_store_fpcr(cpu_env, fpcr);
9926 ret = 0;
9927
9928 /* Old exceptions are not signaled. */
9929 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
9930
9931 /* If any exceptions set by this call,
9932 and are unmasked, send a signal. */
9933 si_code = 0;
9934 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
9935 si_code = TARGET_FPE_FLTRES;
9936 }
9937 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
9938 si_code = TARGET_FPE_FLTUND;
9939 }
9940 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
9941 si_code = TARGET_FPE_FLTOVF;
9942 }
9943 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
9944 si_code = TARGET_FPE_FLTDIV;
9945 }
9946 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
9947 si_code = TARGET_FPE_FLTINV;
9948 }
9949 if (si_code != 0) {
9950 target_siginfo_t info;
9951 info.si_signo = SIGFPE;
9952 info.si_errno = 0;
9953 info.si_code = si_code;
9954 info._sifields._sigfault._addr
9955 = ((CPUArchState *)cpu_env)->pc;
9956 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9957 }
9958 }
9959 break;
9960
9961 /* case SSI_NVPAIRS:
9962 -- Used with SSIN_UACPROC to enable unaligned accesses.
9963 case SSI_IEEE_STATE_AT_SIGNAL:
9964 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9965 -- Not implemented in linux kernel
9966 */
9967 }
9968 break;
9969 #endif
9970 #ifdef TARGET_NR_osf_sigprocmask
9971 /* Alpha specific. */
9972 case TARGET_NR_osf_sigprocmask:
9973 {
9974 abi_ulong mask;
9975 int how;
9976 sigset_t set, oldset;
9977
9978 switch(arg1) {
9979 case TARGET_SIG_BLOCK:
9980 how = SIG_BLOCK;
9981 break;
9982 case TARGET_SIG_UNBLOCK:
9983 how = SIG_UNBLOCK;
9984 break;
9985 case TARGET_SIG_SETMASK:
9986 how = SIG_SETMASK;
9987 break;
9988 default:
9989 ret = -TARGET_EINVAL;
9990 goto fail;
9991 }
9992 mask = arg2;
9993 target_to_host_old_sigset(&set, &mask);
9994 ret = do_sigprocmask(how, &set, &oldset);
9995 if (!ret) {
9996 host_to_target_old_sigset(&mask, &oldset);
9997 ret = mask;
9998 }
9999 }
10000 break;
10001 #endif
10002
10003 #ifdef TARGET_NR_getgid32
10004 case TARGET_NR_getgid32:
10005 ret = get_errno(getgid());
10006 break;
10007 #endif
10008 #ifdef TARGET_NR_geteuid32
10009 case TARGET_NR_geteuid32:
10010 ret = get_errno(geteuid());
10011 break;
10012 #endif
10013 #ifdef TARGET_NR_getegid32
10014 case TARGET_NR_getegid32:
10015 ret = get_errno(getegid());
10016 break;
10017 #endif
10018 #ifdef TARGET_NR_setreuid32
10019 case TARGET_NR_setreuid32:
10020 ret = get_errno(setreuid(arg1, arg2));
10021 break;
10022 #endif
10023 #ifdef TARGET_NR_setregid32
10024 case TARGET_NR_setregid32:
10025 ret = get_errno(setregid(arg1, arg2));
10026 break;
10027 #endif
10028 #ifdef TARGET_NR_getgroups32
10029 case TARGET_NR_getgroups32:
10030 {
10031 int gidsetsize = arg1;
10032 uint32_t *target_grouplist;
10033 gid_t *grouplist;
10034 int i;
10035
10036 grouplist = alloca(gidsetsize * sizeof(gid_t));
10037 ret = get_errno(getgroups(gidsetsize, grouplist));
10038 if (gidsetsize == 0)
10039 break;
10040 if (!is_error(ret)) {
10041 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10042 if (!target_grouplist) {
10043 ret = -TARGET_EFAULT;
10044 goto fail;
10045 }
10046 for(i = 0;i < ret; i++)
10047 target_grouplist[i] = tswap32(grouplist[i]);
10048 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10049 }
10050 }
10051 break;
10052 #endif
10053 #ifdef TARGET_NR_setgroups32
10054 case TARGET_NR_setgroups32:
10055 {
10056 int gidsetsize = arg1;
10057 uint32_t *target_grouplist;
10058 gid_t *grouplist;
10059 int i;
10060
10061 grouplist = alloca(gidsetsize * sizeof(gid_t));
10062 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10063 if (!target_grouplist) {
10064 ret = -TARGET_EFAULT;
10065 goto fail;
10066 }
10067 for(i = 0;i < gidsetsize; i++)
10068 grouplist[i] = tswap32(target_grouplist[i]);
10069 unlock_user(target_grouplist, arg2, 0);
10070 ret = get_errno(setgroups(gidsetsize, grouplist));
10071 }
10072 break;
10073 #endif
10074 #ifdef TARGET_NR_fchown32
10075 case TARGET_NR_fchown32:
10076 ret = get_errno(fchown(arg1, arg2, arg3));
10077 break;
10078 #endif
10079 #ifdef TARGET_NR_setresuid32
10080 case TARGET_NR_setresuid32:
10081 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10082 break;
10083 #endif
10084 #ifdef TARGET_NR_getresuid32
10085 case TARGET_NR_getresuid32:
10086 {
10087 uid_t ruid, euid, suid;
10088 ret = get_errno(getresuid(&ruid, &euid, &suid));
10089 if (!is_error(ret)) {
10090 if (put_user_u32(ruid, arg1)
10091 || put_user_u32(euid, arg2)
10092 || put_user_u32(suid, arg3))
10093 goto efault;
10094 }
10095 }
10096 break;
10097 #endif
10098 #ifdef TARGET_NR_setresgid32
10099 case TARGET_NR_setresgid32:
10100 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
10101 break;
10102 #endif
10103 #ifdef TARGET_NR_getresgid32
10104 case TARGET_NR_getresgid32:
10105 {
10106 gid_t rgid, egid, sgid;
10107 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10108 if (!is_error(ret)) {
10109 if (put_user_u32(rgid, arg1)
10110 || put_user_u32(egid, arg2)
10111 || put_user_u32(sgid, arg3))
10112 goto efault;
10113 }
10114 }
10115 break;
10116 #endif
10117 #ifdef TARGET_NR_chown32
10118 case TARGET_NR_chown32:
10119 if (!(p = lock_user_string(arg1)))
10120 goto efault;
10121 ret = get_errno(chown(p, arg2, arg3));
10122 unlock_user(p, arg1, 0);
10123 break;
10124 #endif
10125 #ifdef TARGET_NR_setuid32
10126 case TARGET_NR_setuid32:
10127 ret = get_errno(sys_setuid(arg1));
10128 break;
10129 #endif
10130 #ifdef TARGET_NR_setgid32
10131 case TARGET_NR_setgid32:
10132 ret = get_errno(sys_setgid(arg1));
10133 break;
10134 #endif
10135 #ifdef TARGET_NR_setfsuid32
10136 case TARGET_NR_setfsuid32:
10137 ret = get_errno(setfsuid(arg1));
10138 break;
10139 #endif
10140 #ifdef TARGET_NR_setfsgid32
10141 case TARGET_NR_setfsgid32:
10142 ret = get_errno(setfsgid(arg1));
10143 break;
10144 #endif
10145
10146 case TARGET_NR_pivot_root:
10147 goto unimplemented;
10148 #ifdef TARGET_NR_mincore
10149 case TARGET_NR_mincore:
10150 {
10151 void *a;
10152 ret = -TARGET_EFAULT;
10153 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10154 goto efault;
10155 if (!(p = lock_user_string(arg3)))
10156 goto mincore_fail;
10157 ret = get_errno(mincore(a, arg2, p));
10158 unlock_user(p, arg3, ret);
10159 mincore_fail:
10160 unlock_user(a, arg1, 0);
10161 }
10162 break;
10163 #endif
10164 #ifdef TARGET_NR_arm_fadvise64_64
10165 case TARGET_NR_arm_fadvise64_64:
10166 /* arm_fadvise64_64 looks like fadvise64_64 but
10167 * with different argument order: fd, advice, offset, len
10168 * rather than the usual fd, offset, len, advice.
10169 * Note that offset and len are both 64-bit so appear as
10170 * pairs of 32-bit registers.
10171 */
10172 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10173 target_offset64(arg5, arg6), arg2);
10174 ret = -host_to_target_errno(ret);
10175 break;
10176 #endif
10177
10178 #if TARGET_ABI_BITS == 32
10179
10180 #ifdef TARGET_NR_fadvise64_64
10181 case TARGET_NR_fadvise64_64:
10182 /* 6 args: fd, offset (high, low), len (high, low), advice */
10183 if (regpairs_aligned(cpu_env)) {
10184 /* offset is in (3,4), len in (5,6) and advice in 7 */
10185 arg2 = arg3;
10186 arg3 = arg4;
10187 arg4 = arg5;
10188 arg5 = arg6;
10189 arg6 = arg7;
10190 }
10191 ret = -host_to_target_errno(posix_fadvise(arg1,
10192 target_offset64(arg2, arg3),
10193 target_offset64(arg4, arg5),
10194 arg6));
10195 break;
10196 #endif
10197
10198 #ifdef TARGET_NR_fadvise64
10199 case TARGET_NR_fadvise64:
10200 /* 5 args: fd, offset (high, low), len, advice */
10201 if (regpairs_aligned(cpu_env)) {
10202 /* offset is in (3,4), len in 5 and advice in 6 */
10203 arg2 = arg3;
10204 arg3 = arg4;
10205 arg4 = arg5;
10206 arg5 = arg6;
10207 }
10208 ret = -host_to_target_errno(posix_fadvise(arg1,
10209 target_offset64(arg2, arg3),
10210 arg4, arg5));
10211 break;
10212 #endif
10213
10214 #else /* not a 32-bit ABI */
10215 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10216 #ifdef TARGET_NR_fadvise64_64
10217 case TARGET_NR_fadvise64_64:
10218 #endif
10219 #ifdef TARGET_NR_fadvise64
10220 case TARGET_NR_fadvise64:
10221 #endif
10222 #ifdef TARGET_S390X
10223 switch (arg4) {
10224 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10225 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10226 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10227 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10228 default: break;
10229 }
10230 #endif
10231 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10232 break;
10233 #endif
10234 #endif /* end of 64-bit ABI fadvise handling */
10235
10236 #ifdef TARGET_NR_madvise
10237 case TARGET_NR_madvise:
10238 /* A straight passthrough may not be safe because qemu sometimes
10239 turns private file-backed mappings into anonymous mappings.
10240 This will break MADV_DONTNEED.
10241 This is a hint, so ignoring and returning success is ok. */
10242 ret = get_errno(0);
10243 break;
10244 #endif
10245 #if TARGET_ABI_BITS == 32
10246 case TARGET_NR_fcntl64:
10247 {
10248 int cmd;
10249 struct flock64 fl;
10250 from_flock64_fn *copyfrom = copy_from_user_flock64;
10251 to_flock64_fn *copyto = copy_to_user_flock64;
10252
10253 #ifdef TARGET_ARM
10254 if (((CPUARMState *)cpu_env)->eabi) {
10255 copyfrom = copy_from_user_eabi_flock64;
10256 copyto = copy_to_user_eabi_flock64;
10257 }
10258 #endif
10259
10260 cmd = target_to_host_fcntl_cmd(arg2);
10261 if (cmd == -TARGET_EINVAL) {
10262 ret = cmd;
10263 break;
10264 }
10265
10266 switch(arg2) {
10267 case TARGET_F_GETLK64:
10268 ret = copyfrom(&fl, arg3);
10269 if (ret) {
10270 break;
10271 }
10272 ret = get_errno(fcntl(arg1, cmd, &fl));
10273 if (ret == 0) {
10274 ret = copyto(arg3, &fl);
10275 }
10276 break;
10277
10278 case TARGET_F_SETLK64:
10279 case TARGET_F_SETLKW64:
10280 ret = copyfrom(&fl, arg3);
10281 if (ret) {
10282 break;
10283 }
10284 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10285 break;
10286 default:
10287 ret = do_fcntl(arg1, arg2, arg3);
10288 break;
10289 }
10290 break;
10291 }
10292 #endif
10293 #ifdef TARGET_NR_cacheflush
10294 case TARGET_NR_cacheflush:
10295 /* self-modifying code is handled automatically, so nothing needed */
10296 ret = 0;
10297 break;
10298 #endif
10299 #ifdef TARGET_NR_security
10300 case TARGET_NR_security:
10301 goto unimplemented;
10302 #endif
10303 #ifdef TARGET_NR_getpagesize
10304 case TARGET_NR_getpagesize:
10305 ret = TARGET_PAGE_SIZE;
10306 break;
10307 #endif
10308 case TARGET_NR_gettid:
10309 ret = get_errno(gettid());
10310 break;
10311 #ifdef TARGET_NR_readahead
10312 case TARGET_NR_readahead:
10313 #if TARGET_ABI_BITS == 32
10314 if (regpairs_aligned(cpu_env)) {
10315 arg2 = arg3;
10316 arg3 = arg4;
10317 arg4 = arg5;
10318 }
10319 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
10320 #else
10321 ret = get_errno(readahead(arg1, arg2, arg3));
10322 #endif
10323 break;
10324 #endif
10325 #ifdef CONFIG_ATTR
10326 #ifdef TARGET_NR_setxattr
10327 case TARGET_NR_listxattr:
10328 case TARGET_NR_llistxattr:
10329 {
10330 void *p, *b = 0;
10331 if (arg2) {
10332 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10333 if (!b) {
10334 ret = -TARGET_EFAULT;
10335 break;
10336 }
10337 }
10338 p = lock_user_string(arg1);
10339 if (p) {
10340 if (num == TARGET_NR_listxattr) {
10341 ret = get_errno(listxattr(p, b, arg3));
10342 } else {
10343 ret = get_errno(llistxattr(p, b, arg3));
10344 }
10345 } else {
10346 ret = -TARGET_EFAULT;
10347 }
10348 unlock_user(p, arg1, 0);
10349 unlock_user(b, arg2, arg3);
10350 break;
10351 }
10352 case TARGET_NR_flistxattr:
10353 {
10354 void *b = 0;
10355 if (arg2) {
10356 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10357 if (!b) {
10358 ret = -TARGET_EFAULT;
10359 break;
10360 }
10361 }
10362 ret = get_errno(flistxattr(arg1, b, arg3));
10363 unlock_user(b, arg2, arg3);
10364 break;
10365 }
10366 case TARGET_NR_setxattr:
10367 case TARGET_NR_lsetxattr:
10368 {
10369 void *p, *n, *v = 0;
10370 if (arg3) {
10371 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10372 if (!v) {
10373 ret = -TARGET_EFAULT;
10374 break;
10375 }
10376 }
10377 p = lock_user_string(arg1);
10378 n = lock_user_string(arg2);
10379 if (p && n) {
10380 if (num == TARGET_NR_setxattr) {
10381 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10382 } else {
10383 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10384 }
10385 } else {
10386 ret = -TARGET_EFAULT;
10387 }
10388 unlock_user(p, arg1, 0);
10389 unlock_user(n, arg2, 0);
10390 unlock_user(v, arg3, 0);
10391 }
10392 break;
10393 case TARGET_NR_fsetxattr:
10394 {
10395 void *n, *v = 0;
10396 if (arg3) {
10397 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10398 if (!v) {
10399 ret = -TARGET_EFAULT;
10400 break;
10401 }
10402 }
10403 n = lock_user_string(arg2);
10404 if (n) {
10405 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10406 } else {
10407 ret = -TARGET_EFAULT;
10408 }
10409 unlock_user(n, arg2, 0);
10410 unlock_user(v, arg3, 0);
10411 }
10412 break;
10413 case TARGET_NR_getxattr:
10414 case TARGET_NR_lgetxattr:
10415 {
10416 void *p, *n, *v = 0;
10417 if (arg3) {
10418 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10419 if (!v) {
10420 ret = -TARGET_EFAULT;
10421 break;
10422 }
10423 }
10424 p = lock_user_string(arg1);
10425 n = lock_user_string(arg2);
10426 if (p && n) {
10427 if (num == TARGET_NR_getxattr) {
10428 ret = get_errno(getxattr(p, n, v, arg4));
10429 } else {
10430 ret = get_errno(lgetxattr(p, n, v, arg4));
10431 }
10432 } else {
10433 ret = -TARGET_EFAULT;
10434 }
10435 unlock_user(p, arg1, 0);
10436 unlock_user(n, arg2, 0);
10437 unlock_user(v, arg3, arg4);
10438 }
10439 break;
10440 case TARGET_NR_fgetxattr:
10441 {
10442 void *n, *v = 0;
10443 if (arg3) {
10444 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10445 if (!v) {
10446 ret = -TARGET_EFAULT;
10447 break;
10448 }
10449 }
10450 n = lock_user_string(arg2);
10451 if (n) {
10452 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10453 } else {
10454 ret = -TARGET_EFAULT;
10455 }
10456 unlock_user(n, arg2, 0);
10457 unlock_user(v, arg3, arg4);
10458 }
10459 break;
10460 case TARGET_NR_removexattr:
10461 case TARGET_NR_lremovexattr:
10462 {
10463 void *p, *n;
10464 p = lock_user_string(arg1);
10465 n = lock_user_string(arg2);
10466 if (p && n) {
10467 if (num == TARGET_NR_removexattr) {
10468 ret = get_errno(removexattr(p, n));
10469 } else {
10470 ret = get_errno(lremovexattr(p, n));
10471 }
10472 } else {
10473 ret = -TARGET_EFAULT;
10474 }
10475 unlock_user(p, arg1, 0);
10476 unlock_user(n, arg2, 0);
10477 }
10478 break;
10479 case TARGET_NR_fremovexattr:
10480 {
10481 void *n;
10482 n = lock_user_string(arg2);
10483 if (n) {
10484 ret = get_errno(fremovexattr(arg1, n));
10485 } else {
10486 ret = -TARGET_EFAULT;
10487 }
10488 unlock_user(n, arg2, 0);
10489 }
10490 break;
10491 #endif
10492 #endif /* CONFIG_ATTR */
10493 #ifdef TARGET_NR_set_thread_area
10494 case TARGET_NR_set_thread_area:
10495 #if defined(TARGET_MIPS)
10496 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10497 ret = 0;
10498 break;
10499 #elif defined(TARGET_CRIS)
10500 if (arg1 & 0xff)
10501 ret = -TARGET_EINVAL;
10502 else {
10503 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10504 ret = 0;
10505 }
10506 break;
10507 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10508 ret = do_set_thread_area(cpu_env, arg1);
10509 break;
10510 #elif defined(TARGET_M68K)
10511 {
10512 TaskState *ts = cpu->opaque;
10513 ts->tp_value = arg1;
10514 ret = 0;
10515 break;
10516 }
10517 #else
10518 goto unimplemented_nowarn;
10519 #endif
10520 #endif
10521 #ifdef TARGET_NR_get_thread_area
10522 case TARGET_NR_get_thread_area:
10523 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10524 ret = do_get_thread_area(cpu_env, arg1);
10525 break;
10526 #elif defined(TARGET_M68K)
10527 {
10528 TaskState *ts = cpu->opaque;
10529 ret = ts->tp_value;
10530 break;
10531 }
10532 #else
10533 goto unimplemented_nowarn;
10534 #endif
10535 #endif
10536 #ifdef TARGET_NR_getdomainname
10537 case TARGET_NR_getdomainname:
10538 goto unimplemented_nowarn;
10539 #endif
10540
10541 #ifdef TARGET_NR_clock_gettime
10542 case TARGET_NR_clock_gettime:
10543 {
10544 struct timespec ts;
10545 ret = get_errno(clock_gettime(arg1, &ts));
10546 if (!is_error(ret)) {
10547 host_to_target_timespec(arg2, &ts);
10548 }
10549 break;
10550 }
10551 #endif
10552 #ifdef TARGET_NR_clock_getres
10553 case TARGET_NR_clock_getres:
10554 {
10555 struct timespec ts;
10556 ret = get_errno(clock_getres(arg1, &ts));
10557 if (!is_error(ret)) {
10558 host_to_target_timespec(arg2, &ts);
10559 }
10560 break;
10561 }
10562 #endif
10563 #ifdef TARGET_NR_clock_nanosleep
10564 case TARGET_NR_clock_nanosleep:
10565 {
10566 struct timespec ts;
10567 target_to_host_timespec(&ts, arg3);
10568 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10569 &ts, arg4 ? &ts : NULL));
10570 if (arg4)
10571 host_to_target_timespec(arg4, &ts);
10572
10573 #if defined(TARGET_PPC)
10574 /* clock_nanosleep is odd in that it returns positive errno values.
10575 * On PPC, CR0 bit 3 should be set in such a situation. */
10576 if (ret && ret != -TARGET_ERESTARTSYS) {
10577 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10578 }
10579 #endif
10580 break;
10581 }
10582 #endif
10583
10584 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10585 case TARGET_NR_set_tid_address:
10586 ret = get_errno(set_tid_address((int *)g2h(arg1)));
10587 break;
10588 #endif
10589
10590 case TARGET_NR_tkill:
10591 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10592 break;
10593
10594 case TARGET_NR_tgkill:
10595 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
10596 target_to_host_signal(arg3)));
10597 break;
10598
10599 #ifdef TARGET_NR_set_robust_list
10600 case TARGET_NR_set_robust_list:
10601 case TARGET_NR_get_robust_list:
10602 /* The ABI for supporting robust futexes has userspace pass
10603 * the kernel a pointer to a linked list which is updated by
10604 * userspace after the syscall; the list is walked by the kernel
10605 * when the thread exits. Since the linked list in QEMU guest
10606 * memory isn't a valid linked list for the host and we have
10607 * no way to reliably intercept the thread-death event, we can't
10608 * support these. Silently return ENOSYS so that guest userspace
10609 * falls back to a non-robust futex implementation (which should
10610 * be OK except in the corner case of the guest crashing while
10611 * holding a mutex that is shared with another process via
10612 * shared memory).
10613 */
10614 goto unimplemented_nowarn;
10615 #endif
10616
10617 #if defined(TARGET_NR_utimensat)
10618 case TARGET_NR_utimensat:
10619 {
10620 struct timespec *tsp, ts[2];
10621 if (!arg3) {
10622 tsp = NULL;
10623 } else {
10624 target_to_host_timespec(ts, arg3);
10625 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10626 tsp = ts;
10627 }
10628 if (!arg2)
10629 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10630 else {
10631 if (!(p = lock_user_string(arg2))) {
10632 ret = -TARGET_EFAULT;
10633 goto fail;
10634 }
10635 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10636 unlock_user(p, arg2, 0);
10637 }
10638 }
10639 break;
10640 #endif
10641 case TARGET_NR_futex:
10642 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10643 break;
10644 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10645 case TARGET_NR_inotify_init:
10646 ret = get_errno(sys_inotify_init());
10647 break;
10648 #endif
10649 #ifdef CONFIG_INOTIFY1
10650 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10651 case TARGET_NR_inotify_init1:
10652 ret = get_errno(sys_inotify_init1(arg1));
10653 break;
10654 #endif
10655 #endif
10656 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10657 case TARGET_NR_inotify_add_watch:
10658 p = lock_user_string(arg2);
10659 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10660 unlock_user(p, arg2, 0);
10661 break;
10662 #endif
10663 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10664 case TARGET_NR_inotify_rm_watch:
10665 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
10666 break;
10667 #endif
10668
10669 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10670 case TARGET_NR_mq_open:
10671 {
10672 struct mq_attr posix_mq_attr, *attrp;
10673
10674 p = lock_user_string(arg1 - 1);
10675 if (arg4 != 0) {
10676 copy_from_user_mq_attr (&posix_mq_attr, arg4);
10677 attrp = &posix_mq_attr;
10678 } else {
10679 attrp = 0;
10680 }
10681 ret = get_errno(mq_open(p, arg2, arg3, attrp));
10682 unlock_user (p, arg1, 0);
10683 }
10684 break;
10685
10686 case TARGET_NR_mq_unlink:
10687 p = lock_user_string(arg1 - 1);
10688 ret = get_errno(mq_unlink(p));
10689 unlock_user (p, arg1, 0);
10690 break;
10691
10692 case TARGET_NR_mq_timedsend:
10693 {
10694 struct timespec ts;
10695
10696 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10697 if (arg5 != 0) {
10698 target_to_host_timespec(&ts, arg5);
10699 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
10700 host_to_target_timespec(arg5, &ts);
10701 } else {
10702 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
10703 }
10704 unlock_user (p, arg2, arg3);
10705 }
10706 break;
10707
10708 case TARGET_NR_mq_timedreceive:
10709 {
10710 struct timespec ts;
10711 unsigned int prio;
10712
10713 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10714 if (arg5 != 0) {
10715 target_to_host_timespec(&ts, arg5);
10716 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10717 &prio, &ts));
10718 host_to_target_timespec(arg5, &ts);
10719 } else {
10720 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10721 &prio, NULL));
10722 }
10723 unlock_user (p, arg2, arg3);
10724 if (arg4 != 0)
10725 put_user_u32(prio, arg4);
10726 }
10727 break;
10728
10729 /* Not implemented for now... */
10730 /* case TARGET_NR_mq_notify: */
10731 /* break; */
10732
10733 case TARGET_NR_mq_getsetattr:
10734 {
10735 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
10736 ret = 0;
10737 if (arg3 != 0) {
10738 ret = mq_getattr(arg1, &posix_mq_attr_out);
10739 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
10740 }
10741 if (arg2 != 0) {
10742 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
10743 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
10744 }
10745
10746 }
10747 break;
10748 #endif
10749
10750 #ifdef CONFIG_SPLICE
10751 #ifdef TARGET_NR_tee
10752 case TARGET_NR_tee:
10753 {
10754 ret = get_errno(tee(arg1,arg2,arg3,arg4));
10755 }
10756 break;
10757 #endif
10758 #ifdef TARGET_NR_splice
10759 case TARGET_NR_splice:
10760 {
10761 loff_t loff_in, loff_out;
10762 loff_t *ploff_in = NULL, *ploff_out = NULL;
10763 if (arg2) {
10764 if (get_user_u64(loff_in, arg2)) {
10765 goto efault;
10766 }
10767 ploff_in = &loff_in;
10768 }
10769 if (arg4) {
10770 if (get_user_u64(loff_out, arg4)) {
10771 goto efault;
10772 }
10773 ploff_out = &loff_out;
10774 }
10775 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
10776 if (arg2) {
10777 if (put_user_u64(loff_in, arg2)) {
10778 goto efault;
10779 }
10780 }
10781 if (arg4) {
10782 if (put_user_u64(loff_out, arg4)) {
10783 goto efault;
10784 }
10785 }
10786 }
10787 break;
10788 #endif
10789 #ifdef TARGET_NR_vmsplice
10790 case TARGET_NR_vmsplice:
10791 {
10792 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10793 if (vec != NULL) {
10794 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
10795 unlock_iovec(vec, arg2, arg3, 0);
10796 } else {
10797 ret = -host_to_target_errno(errno);
10798 }
10799 }
10800 break;
10801 #endif
10802 #endif /* CONFIG_SPLICE */
10803 #ifdef CONFIG_EVENTFD
10804 #if defined(TARGET_NR_eventfd)
10805 case TARGET_NR_eventfd:
10806 ret = get_errno(eventfd(arg1, 0));
10807 fd_trans_unregister(ret);
10808 break;
10809 #endif
10810 #if defined(TARGET_NR_eventfd2)
10811 case TARGET_NR_eventfd2:
10812 {
10813 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
10814 if (arg2 & TARGET_O_NONBLOCK) {
10815 host_flags |= O_NONBLOCK;
10816 }
10817 if (arg2 & TARGET_O_CLOEXEC) {
10818 host_flags |= O_CLOEXEC;
10819 }
10820 ret = get_errno(eventfd(arg1, host_flags));
10821 fd_trans_unregister(ret);
10822 break;
10823 }
10824 #endif
10825 #endif /* CONFIG_EVENTFD */
10826 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10827 case TARGET_NR_fallocate:
10828 #if TARGET_ABI_BITS == 32
10829 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
10830 target_offset64(arg5, arg6)));
10831 #else
10832 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
10833 #endif
10834 break;
10835 #endif
10836 #if defined(CONFIG_SYNC_FILE_RANGE)
10837 #if defined(TARGET_NR_sync_file_range)
10838 case TARGET_NR_sync_file_range:
10839 #if TARGET_ABI_BITS == 32
10840 #if defined(TARGET_MIPS)
10841 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10842 target_offset64(arg5, arg6), arg7));
10843 #else
10844 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
10845 target_offset64(arg4, arg5), arg6));
10846 #endif /* !TARGET_MIPS */
10847 #else
10848 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
10849 #endif
10850 break;
10851 #endif
10852 #if defined(TARGET_NR_sync_file_range2)
10853 case TARGET_NR_sync_file_range2:
10854 /* This is like sync_file_range but the arguments are reordered */
10855 #if TARGET_ABI_BITS == 32
10856 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10857 target_offset64(arg5, arg6), arg2));
10858 #else
10859 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
10860 #endif
10861 break;
10862 #endif
10863 #endif
10864 #if defined(TARGET_NR_signalfd4)
10865 case TARGET_NR_signalfd4:
10866 ret = do_signalfd4(arg1, arg2, arg4);
10867 break;
10868 #endif
10869 #if defined(TARGET_NR_signalfd)
10870 case TARGET_NR_signalfd:
10871 ret = do_signalfd4(arg1, arg2, 0);
10872 break;
10873 #endif
10874 #if defined(CONFIG_EPOLL)
10875 #if defined(TARGET_NR_epoll_create)
10876 case TARGET_NR_epoll_create:
10877 ret = get_errno(epoll_create(arg1));
10878 break;
10879 #endif
10880 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10881 case TARGET_NR_epoll_create1:
10882 ret = get_errno(epoll_create1(arg1));
10883 break;
10884 #endif
10885 #if defined(TARGET_NR_epoll_ctl)
10886 case TARGET_NR_epoll_ctl:
10887 {
10888 struct epoll_event ep;
10889 struct epoll_event *epp = 0;
10890 if (arg4) {
10891 struct target_epoll_event *target_ep;
10892 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
10893 goto efault;
10894 }
10895 ep.events = tswap32(target_ep->events);
10896 /* The epoll_data_t union is just opaque data to the kernel,
10897 * so we transfer all 64 bits across and need not worry what
10898 * actual data type it is.
10899 */
10900 ep.data.u64 = tswap64(target_ep->data.u64);
10901 unlock_user_struct(target_ep, arg4, 0);
10902 epp = &ep;
10903 }
10904 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
10905 break;
10906 }
10907 #endif
10908
10909 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
10910 #if defined(TARGET_NR_epoll_wait)
10911 case TARGET_NR_epoll_wait:
10912 #endif
10913 #if defined(TARGET_NR_epoll_pwait)
10914 case TARGET_NR_epoll_pwait:
10915 #endif
10916 {
10917 struct target_epoll_event *target_ep;
10918 struct epoll_event *ep;
10919 int epfd = arg1;
10920 int maxevents = arg3;
10921 int timeout = arg4;
10922
10923 target_ep = lock_user(VERIFY_WRITE, arg2,
10924 maxevents * sizeof(struct target_epoll_event), 1);
10925 if (!target_ep) {
10926 goto efault;
10927 }
10928
10929 ep = alloca(maxevents * sizeof(struct epoll_event));
10930
10931 switch (num) {
10932 #if defined(TARGET_NR_epoll_pwait)
10933 case TARGET_NR_epoll_pwait:
10934 {
10935 target_sigset_t *target_set;
10936 sigset_t _set, *set = &_set;
10937
10938 if (arg5) {
10939 target_set = lock_user(VERIFY_READ, arg5,
10940 sizeof(target_sigset_t), 1);
10941 if (!target_set) {
10942 unlock_user(target_ep, arg2, 0);
10943 goto efault;
10944 }
10945 target_to_host_sigset(set, target_set);
10946 unlock_user(target_set, arg5, 0);
10947 } else {
10948 set = NULL;
10949 }
10950
10951 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
10952 set, SIGSET_T_SIZE));
10953 break;
10954 }
10955 #endif
10956 #if defined(TARGET_NR_epoll_wait)
10957 case TARGET_NR_epoll_wait:
10958 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
10959 NULL, 0));
10960 break;
10961 #endif
10962 default:
10963 ret = -TARGET_ENOSYS;
10964 }
10965 if (!is_error(ret)) {
10966 int i;
10967 for (i = 0; i < ret; i++) {
10968 target_ep[i].events = tswap32(ep[i].events);
10969 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
10970 }
10971 }
10972 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
10973 break;
10974 }
10975 #endif
10976 #endif
10977 #ifdef TARGET_NR_prlimit64
10978 case TARGET_NR_prlimit64:
10979 {
10980 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10981 struct target_rlimit64 *target_rnew, *target_rold;
10982 struct host_rlimit64 rnew, rold, *rnewp = 0;
10983 int resource = target_to_host_resource(arg2);
10984 if (arg3) {
10985 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
10986 goto efault;
10987 }
10988 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
10989 rnew.rlim_max = tswap64(target_rnew->rlim_max);
10990 unlock_user_struct(target_rnew, arg3, 0);
10991 rnewp = &rnew;
10992 }
10993
10994 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10995 if (!is_error(ret) && arg4) {
10996 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10997 goto efault;
10998 }
10999 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11000 target_rold->rlim_max = tswap64(rold.rlim_max);
11001 unlock_user_struct(target_rold, arg4, 1);
11002 }
11003 break;
11004 }
11005 #endif
11006 #ifdef TARGET_NR_gethostname
11007 case TARGET_NR_gethostname:
11008 {
11009 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11010 if (name) {
11011 ret = get_errno(gethostname(name, arg2));
11012 unlock_user(name, arg1, arg2);
11013 } else {
11014 ret = -TARGET_EFAULT;
11015 }
11016 break;
11017 }
11018 #endif
11019 #ifdef TARGET_NR_atomic_cmpxchg_32
11020 case TARGET_NR_atomic_cmpxchg_32:
11021 {
11022 /* should use start_exclusive from main.c */
11023 abi_ulong mem_value;
11024 if (get_user_u32(mem_value, arg6)) {
11025 target_siginfo_t info;
11026 info.si_signo = SIGSEGV;
11027 info.si_errno = 0;
11028 info.si_code = TARGET_SEGV_MAPERR;
11029 info._sifields._sigfault._addr = arg6;
11030 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
11031 ret = 0xdeadbeef;
11032
11033 }
11034 if (mem_value == arg2)
11035 put_user_u32(arg1, arg6);
11036 ret = mem_value;
11037 break;
11038 }
11039 #endif
11040 #ifdef TARGET_NR_atomic_barrier
11041 case TARGET_NR_atomic_barrier:
11042 {
11043 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11044 ret = 0;
11045 break;
11046 }
11047 #endif
11048
11049 #ifdef TARGET_NR_timer_create
11050 case TARGET_NR_timer_create:
11051 {
11052 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11053
11054 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11055
11056 int clkid = arg1;
11057 int timer_index = next_free_host_timer();
11058
11059 if (timer_index < 0) {
11060 ret = -TARGET_EAGAIN;
11061 } else {
11062 timer_t *phtimer = g_posix_timers + timer_index;
11063
11064 if (arg2) {
11065 phost_sevp = &host_sevp;
11066 ret = target_to_host_sigevent(phost_sevp, arg2);
11067 if (ret != 0) {
11068 break;
11069 }
11070 }
11071
11072 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11073 if (ret) {
11074 phtimer = NULL;
11075 } else {
11076 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11077 goto efault;
11078 }
11079 }
11080 }
11081 break;
11082 }
11083 #endif
11084
11085 #ifdef TARGET_NR_timer_settime
11086 case TARGET_NR_timer_settime:
11087 {
11088 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11089 * struct itimerspec * old_value */
11090 target_timer_t timerid = get_timer_id(arg1);
11091
11092 if (timerid < 0) {
11093 ret = timerid;
11094 } else if (arg3 == 0) {
11095 ret = -TARGET_EINVAL;
11096 } else {
11097 timer_t htimer = g_posix_timers[timerid];
11098 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11099
11100 target_to_host_itimerspec(&hspec_new, arg3);
11101 ret = get_errno(
11102 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11103 host_to_target_itimerspec(arg2, &hspec_old);
11104 }
11105 break;
11106 }
11107 #endif
11108
11109 #ifdef TARGET_NR_timer_gettime
11110 case TARGET_NR_timer_gettime:
11111 {
11112 /* args: timer_t timerid, struct itimerspec *curr_value */
11113 target_timer_t timerid = get_timer_id(arg1);
11114
11115 if (timerid < 0) {
11116 ret = timerid;
11117 } else if (!arg2) {
11118 ret = -TARGET_EFAULT;
11119 } else {
11120 timer_t htimer = g_posix_timers[timerid];
11121 struct itimerspec hspec;
11122 ret = get_errno(timer_gettime(htimer, &hspec));
11123
11124 if (host_to_target_itimerspec(arg2, &hspec)) {
11125 ret = -TARGET_EFAULT;
11126 }
11127 }
11128 break;
11129 }
11130 #endif
11131
11132 #ifdef TARGET_NR_timer_getoverrun
11133 case TARGET_NR_timer_getoverrun:
11134 {
11135 /* args: timer_t timerid */
11136 target_timer_t timerid = get_timer_id(arg1);
11137
11138 if (timerid < 0) {
11139 ret = timerid;
11140 } else {
11141 timer_t htimer = g_posix_timers[timerid];
11142 ret = get_errno(timer_getoverrun(htimer));
11143 }
11144 fd_trans_unregister(ret);
11145 break;
11146 }
11147 #endif
11148
11149 #ifdef TARGET_NR_timer_delete
11150 case TARGET_NR_timer_delete:
11151 {
11152 /* args: timer_t timerid */
11153 target_timer_t timerid = get_timer_id(arg1);
11154
11155 if (timerid < 0) {
11156 ret = timerid;
11157 } else {
11158 timer_t htimer = g_posix_timers[timerid];
11159 ret = get_errno(timer_delete(htimer));
11160 g_posix_timers[timerid] = 0;
11161 }
11162 break;
11163 }
11164 #endif
11165
11166 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11167 case TARGET_NR_timerfd_create:
11168 ret = get_errno(timerfd_create(arg1,
11169 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11170 break;
11171 #endif
11172
11173 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11174 case TARGET_NR_timerfd_gettime:
11175 {
11176 struct itimerspec its_curr;
11177
11178 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11179
11180 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11181 goto efault;
11182 }
11183 }
11184 break;
11185 #endif
11186
11187 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11188 case TARGET_NR_timerfd_settime:
11189 {
11190 struct itimerspec its_new, its_old, *p_new;
11191
11192 if (arg3) {
11193 if (target_to_host_itimerspec(&its_new, arg3)) {
11194 goto efault;
11195 }
11196 p_new = &its_new;
11197 } else {
11198 p_new = NULL;
11199 }
11200
11201 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11202
11203 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11204 goto efault;
11205 }
11206 }
11207 break;
11208 #endif
11209
11210 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11211 case TARGET_NR_ioprio_get:
11212 ret = get_errno(ioprio_get(arg1, arg2));
11213 break;
11214 #endif
11215
11216 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11217 case TARGET_NR_ioprio_set:
11218 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11219 break;
11220 #endif
11221
11222 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11223 case TARGET_NR_setns:
11224 ret = get_errno(setns(arg1, arg2));
11225 break;
11226 #endif
11227 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11228 case TARGET_NR_unshare:
11229 ret = get_errno(unshare(arg1));
11230 break;
11231 #endif
11232
11233 default:
11234 unimplemented:
11235 gemu_log("qemu: Unsupported syscall: %d\n", num);
11236 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11237 unimplemented_nowarn:
11238 #endif
11239 ret = -TARGET_ENOSYS;
11240 break;
11241 }
11242 fail:
11243 #ifdef DEBUG
11244 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11245 #endif
11246 if(do_strace)
11247 print_syscall_ret(num, ret);
11248 return ret;
11249 efault:
11250 ret = -TARGET_EFAULT;
11251 goto fail;
11252 }