]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/syscall.c
linux-user: Implement BLKPG ioctl
[mirror_qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <signal.h>
47 #include <sched.h>
48 #ifdef __ia64__
49 int __clone2(int (*fn)(void *), void *child_stack_base,
50 size_t stack_size, int flags, void *arg, ...);
51 #endif
52 #include <sys/socket.h>
53 #include <sys/un.h>
54 #include <sys/uio.h>
55 #include <sys/poll.h>
56 #include <sys/times.h>
57 #include <sys/shm.h>
58 #include <sys/sem.h>
59 #include <sys/statfs.h>
60 #include <utime.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef TARGET_GPROF
70 #include <sys/gmon.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
84
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
91
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/utsname.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #if defined(CONFIG_FIEMAP)
102 #include <linux/fiemap.h>
103 #endif
104 #include <linux/fb.h>
105 #include <linux/vt.h>
106 #include <linux/dm-ioctl.h>
107 #include <linux/reboot.h>
108 #include <linux/route.h>
109 #include <linux/filter.h>
110 #include <linux/blkpg.h>
111 #include "linux_loop.h"
112 #include "cpu-uname.h"
113
114 #include "qemu.h"
115
116 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
117 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
118
119 //#define DEBUG
120
121 //#include <linux/msdos_fs.h>
122 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
123 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
124
125
126 #undef _syscall0
127 #undef _syscall1
128 #undef _syscall2
129 #undef _syscall3
130 #undef _syscall4
131 #undef _syscall5
132 #undef _syscall6
133
134 #define _syscall0(type,name) \
135 static type name (void) \
136 { \
137 return syscall(__NR_##name); \
138 }
139
140 #define _syscall1(type,name,type1,arg1) \
141 static type name (type1 arg1) \
142 { \
143 return syscall(__NR_##name, arg1); \
144 }
145
146 #define _syscall2(type,name,type1,arg1,type2,arg2) \
147 static type name (type1 arg1,type2 arg2) \
148 { \
149 return syscall(__NR_##name, arg1, arg2); \
150 }
151
152 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
153 static type name (type1 arg1,type2 arg2,type3 arg3) \
154 { \
155 return syscall(__NR_##name, arg1, arg2, arg3); \
156 }
157
158 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
160 { \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
162 }
163
164 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
165 type5,arg5) \
166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
167 { \
168 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
169 }
170
171
172 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
173 type5,arg5,type6,arg6) \
174 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
175 type6 arg6) \
176 { \
177 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
178 }
179
180
181 #define __NR_sys_uname __NR_uname
182 #define __NR_sys_getcwd1 __NR_getcwd
183 #define __NR_sys_getdents __NR_getdents
184 #define __NR_sys_getdents64 __NR_getdents64
185 #define __NR_sys_getpriority __NR_getpriority
186 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
187 #define __NR_sys_syslog __NR_syslog
188 #define __NR_sys_tgkill __NR_tgkill
189 #define __NR_sys_tkill __NR_tkill
190 #define __NR_sys_futex __NR_futex
191 #define __NR_sys_inotify_init __NR_inotify_init
192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
194
195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
196 defined(__s390x__)
197 #define __NR__llseek __NR_lseek
198 #endif
199
200 #ifdef __NR_gettid
201 _syscall0(int, gettid)
202 #else
203 /* This is a replacement for the host gettid() and must return a host
204 errno. */
205 static int gettid(void) {
206 return -ENOSYS;
207 }
208 #endif
209 #ifdef __NR_getdents
210 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
211 #endif
212 #if !defined(__NR_getdents) || \
213 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
214 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
215 #endif
216 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
217 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
218 loff_t *, res, uint, wh);
219 #endif
220 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
221 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
222 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
223 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
224 #endif
225 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
226 _syscall2(int,sys_tkill,int,tid,int,sig)
227 #endif
228 #ifdef __NR_exit_group
229 _syscall1(int,exit_group,int,error_code)
230 #endif
231 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
232 _syscall1(int,set_tid_address,int *,tidptr)
233 #endif
234 #if defined(TARGET_NR_futex) && defined(__NR_futex)
235 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
236 const struct timespec *,timeout,int *,uaddr2,int,val3)
237 #endif
238 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
239 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
240 unsigned long *, user_mask_ptr);
241 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
242 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
243 unsigned long *, user_mask_ptr);
244 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
245 void *, arg);
246
247 static bitmask_transtbl fcntl_flags_tbl[] = {
248 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
249 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
250 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
251 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
252 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
253 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
254 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
255 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
256 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
257 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
258 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
259 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
260 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
261 #if defined(O_DIRECT)
262 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
263 #endif
264 #if defined(O_NOATIME)
265 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
266 #endif
267 #if defined(O_CLOEXEC)
268 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
269 #endif
270 #if defined(O_PATH)
271 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
272 #endif
273 /* Don't terminate the list prematurely on 64-bit host+guest. */
274 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
275 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
276 #endif
277 { 0, 0, 0, 0 }
278 };
279
280 #define COPY_UTSNAME_FIELD(dest, src) \
281 do { \
282 /* __NEW_UTS_LEN doesn't include terminating null */ \
283 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
284 (dest)[__NEW_UTS_LEN] = '\0'; \
285 } while (0)
286
287 static int sys_uname(struct new_utsname *buf)
288 {
289 struct utsname uts_buf;
290
291 if (uname(&uts_buf) < 0)
292 return (-1);
293
294 /*
295 * Just in case these have some differences, we
296 * translate utsname to new_utsname (which is the
297 * struct linux kernel uses).
298 */
299
300 memset(buf, 0, sizeof(*buf));
301 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
302 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
303 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
304 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
305 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
306 #ifdef _GNU_SOURCE
307 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
308 #endif
309 return (0);
310
311 #undef COPY_UTSNAME_FIELD
312 }
313
314 static int sys_getcwd1(char *buf, size_t size)
315 {
316 if (getcwd(buf, size) == NULL) {
317 /* getcwd() sets errno */
318 return (-1);
319 }
320 return strlen(buf)+1;
321 }
322
323 #ifdef TARGET_NR_openat
324 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
325 {
326 /*
327 * open(2) has extra parameter 'mode' when called with
328 * flag O_CREAT.
329 */
330 if ((flags & O_CREAT) != 0) {
331 return (openat(dirfd, pathname, flags, mode));
332 }
333 return (openat(dirfd, pathname, flags));
334 }
335 #endif
336
337 #ifdef TARGET_NR_utimensat
338 #ifdef CONFIG_UTIMENSAT
339 static int sys_utimensat(int dirfd, const char *pathname,
340 const struct timespec times[2], int flags)
341 {
342 if (pathname == NULL)
343 return futimens(dirfd, times);
344 else
345 return utimensat(dirfd, pathname, times, flags);
346 }
347 #elif defined(__NR_utimensat)
348 #define __NR_sys_utimensat __NR_utimensat
349 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
350 const struct timespec *,tsp,int,flags)
351 #else
352 static int sys_utimensat(int dirfd, const char *pathname,
353 const struct timespec times[2], int flags)
354 {
355 errno = ENOSYS;
356 return -1;
357 }
358 #endif
359 #endif /* TARGET_NR_utimensat */
360
361 #ifdef CONFIG_INOTIFY
362 #include <sys/inotify.h>
363
364 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
365 static int sys_inotify_init(void)
366 {
367 return (inotify_init());
368 }
369 #endif
370 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
371 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
372 {
373 return (inotify_add_watch(fd, pathname, mask));
374 }
375 #endif
376 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
377 static int sys_inotify_rm_watch(int fd, int32_t wd)
378 {
379 return (inotify_rm_watch(fd, wd));
380 }
381 #endif
382 #ifdef CONFIG_INOTIFY1
383 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
384 static int sys_inotify_init1(int flags)
385 {
386 return (inotify_init1(flags));
387 }
388 #endif
389 #endif
390 #else
391 /* Userspace can usually survive runtime without inotify */
392 #undef TARGET_NR_inotify_init
393 #undef TARGET_NR_inotify_init1
394 #undef TARGET_NR_inotify_add_watch
395 #undef TARGET_NR_inotify_rm_watch
396 #endif /* CONFIG_INOTIFY */
397
398 #if defined(TARGET_NR_ppoll)
399 #ifndef __NR_ppoll
400 # define __NR_ppoll -1
401 #endif
402 #define __NR_sys_ppoll __NR_ppoll
403 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
404 struct timespec *, timeout, const __sigset_t *, sigmask,
405 size_t, sigsetsize)
406 #endif
407
408 #if defined(TARGET_NR_pselect6)
409 #ifndef __NR_pselect6
410 # define __NR_pselect6 -1
411 #endif
412 #define __NR_sys_pselect6 __NR_pselect6
413 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
414 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
415 #endif
416
417 #if defined(TARGET_NR_prlimit64)
418 #ifndef __NR_prlimit64
419 # define __NR_prlimit64 -1
420 #endif
421 #define __NR_sys_prlimit64 __NR_prlimit64
422 /* The glibc rlimit structure may not be that used by the underlying syscall */
423 struct host_rlimit64 {
424 uint64_t rlim_cur;
425 uint64_t rlim_max;
426 };
427 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
428 const struct host_rlimit64 *, new_limit,
429 struct host_rlimit64 *, old_limit)
430 #endif
431
432
433 #if defined(TARGET_NR_timer_create)
434 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
435 static timer_t g_posix_timers[32] = { 0, } ;
436
437 static inline int next_free_host_timer(void)
438 {
439 int k ;
440 /* FIXME: Does finding the next free slot require a lock? */
441 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
442 if (g_posix_timers[k] == 0) {
443 g_posix_timers[k] = (timer_t) 1;
444 return k;
445 }
446 }
447 return -1;
448 }
449 #endif
450
451 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
452 #ifdef TARGET_ARM
453 static inline int regpairs_aligned(void *cpu_env) {
454 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
455 }
456 #elif defined(TARGET_MIPS)
457 static inline int regpairs_aligned(void *cpu_env) { return 1; }
458 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
459 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
460 * of registers which translates to the same as ARM/MIPS, because we start with
461 * r3 as arg1 */
462 static inline int regpairs_aligned(void *cpu_env) { return 1; }
463 #else
464 static inline int regpairs_aligned(void *cpu_env) { return 0; }
465 #endif
466
467 #define ERRNO_TABLE_SIZE 1200
468
469 /* target_to_host_errno_table[] is initialized from
470 * host_to_target_errno_table[] in syscall_init(). */
471 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
472 };
473
474 /*
475 * This list is the union of errno values overridden in asm-<arch>/errno.h
476 * minus the errnos that are not actually generic to all archs.
477 */
478 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
479 [EIDRM] = TARGET_EIDRM,
480 [ECHRNG] = TARGET_ECHRNG,
481 [EL2NSYNC] = TARGET_EL2NSYNC,
482 [EL3HLT] = TARGET_EL3HLT,
483 [EL3RST] = TARGET_EL3RST,
484 [ELNRNG] = TARGET_ELNRNG,
485 [EUNATCH] = TARGET_EUNATCH,
486 [ENOCSI] = TARGET_ENOCSI,
487 [EL2HLT] = TARGET_EL2HLT,
488 [EDEADLK] = TARGET_EDEADLK,
489 [ENOLCK] = TARGET_ENOLCK,
490 [EBADE] = TARGET_EBADE,
491 [EBADR] = TARGET_EBADR,
492 [EXFULL] = TARGET_EXFULL,
493 [ENOANO] = TARGET_ENOANO,
494 [EBADRQC] = TARGET_EBADRQC,
495 [EBADSLT] = TARGET_EBADSLT,
496 [EBFONT] = TARGET_EBFONT,
497 [ENOSTR] = TARGET_ENOSTR,
498 [ENODATA] = TARGET_ENODATA,
499 [ETIME] = TARGET_ETIME,
500 [ENOSR] = TARGET_ENOSR,
501 [ENONET] = TARGET_ENONET,
502 [ENOPKG] = TARGET_ENOPKG,
503 [EREMOTE] = TARGET_EREMOTE,
504 [ENOLINK] = TARGET_ENOLINK,
505 [EADV] = TARGET_EADV,
506 [ESRMNT] = TARGET_ESRMNT,
507 [ECOMM] = TARGET_ECOMM,
508 [EPROTO] = TARGET_EPROTO,
509 [EDOTDOT] = TARGET_EDOTDOT,
510 [EMULTIHOP] = TARGET_EMULTIHOP,
511 [EBADMSG] = TARGET_EBADMSG,
512 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
513 [EOVERFLOW] = TARGET_EOVERFLOW,
514 [ENOTUNIQ] = TARGET_ENOTUNIQ,
515 [EBADFD] = TARGET_EBADFD,
516 [EREMCHG] = TARGET_EREMCHG,
517 [ELIBACC] = TARGET_ELIBACC,
518 [ELIBBAD] = TARGET_ELIBBAD,
519 [ELIBSCN] = TARGET_ELIBSCN,
520 [ELIBMAX] = TARGET_ELIBMAX,
521 [ELIBEXEC] = TARGET_ELIBEXEC,
522 [EILSEQ] = TARGET_EILSEQ,
523 [ENOSYS] = TARGET_ENOSYS,
524 [ELOOP] = TARGET_ELOOP,
525 [ERESTART] = TARGET_ERESTART,
526 [ESTRPIPE] = TARGET_ESTRPIPE,
527 [ENOTEMPTY] = TARGET_ENOTEMPTY,
528 [EUSERS] = TARGET_EUSERS,
529 [ENOTSOCK] = TARGET_ENOTSOCK,
530 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
531 [EMSGSIZE] = TARGET_EMSGSIZE,
532 [EPROTOTYPE] = TARGET_EPROTOTYPE,
533 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
534 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
535 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
536 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
537 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
538 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
539 [EADDRINUSE] = TARGET_EADDRINUSE,
540 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
541 [ENETDOWN] = TARGET_ENETDOWN,
542 [ENETUNREACH] = TARGET_ENETUNREACH,
543 [ENETRESET] = TARGET_ENETRESET,
544 [ECONNABORTED] = TARGET_ECONNABORTED,
545 [ECONNRESET] = TARGET_ECONNRESET,
546 [ENOBUFS] = TARGET_ENOBUFS,
547 [EISCONN] = TARGET_EISCONN,
548 [ENOTCONN] = TARGET_ENOTCONN,
549 [EUCLEAN] = TARGET_EUCLEAN,
550 [ENOTNAM] = TARGET_ENOTNAM,
551 [ENAVAIL] = TARGET_ENAVAIL,
552 [EISNAM] = TARGET_EISNAM,
553 [EREMOTEIO] = TARGET_EREMOTEIO,
554 [ESHUTDOWN] = TARGET_ESHUTDOWN,
555 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
556 [ETIMEDOUT] = TARGET_ETIMEDOUT,
557 [ECONNREFUSED] = TARGET_ECONNREFUSED,
558 [EHOSTDOWN] = TARGET_EHOSTDOWN,
559 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
560 [EALREADY] = TARGET_EALREADY,
561 [EINPROGRESS] = TARGET_EINPROGRESS,
562 [ESTALE] = TARGET_ESTALE,
563 [ECANCELED] = TARGET_ECANCELED,
564 [ENOMEDIUM] = TARGET_ENOMEDIUM,
565 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
566 #ifdef ENOKEY
567 [ENOKEY] = TARGET_ENOKEY,
568 #endif
569 #ifdef EKEYEXPIRED
570 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
571 #endif
572 #ifdef EKEYREVOKED
573 [EKEYREVOKED] = TARGET_EKEYREVOKED,
574 #endif
575 #ifdef EKEYREJECTED
576 [EKEYREJECTED] = TARGET_EKEYREJECTED,
577 #endif
578 #ifdef EOWNERDEAD
579 [EOWNERDEAD] = TARGET_EOWNERDEAD,
580 #endif
581 #ifdef ENOTRECOVERABLE
582 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
583 #endif
584 };
585
586 static inline int host_to_target_errno(int err)
587 {
588 if(host_to_target_errno_table[err])
589 return host_to_target_errno_table[err];
590 return err;
591 }
592
593 static inline int target_to_host_errno(int err)
594 {
595 if (target_to_host_errno_table[err])
596 return target_to_host_errno_table[err];
597 return err;
598 }
599
600 static inline abi_long get_errno(abi_long ret)
601 {
602 if (ret == -1)
603 return -host_to_target_errno(errno);
604 else
605 return ret;
606 }
607
608 static inline int is_error(abi_long ret)
609 {
610 return (abi_ulong)ret >= (abi_ulong)(-4096);
611 }
612
613 char *target_strerror(int err)
614 {
615 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
616 return NULL;
617 }
618 return strerror(target_to_host_errno(err));
619 }
620
621 static abi_ulong target_brk;
622 static abi_ulong target_original_brk;
623 static abi_ulong brk_page;
624
625 void target_set_brk(abi_ulong new_brk)
626 {
627 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
628 brk_page = HOST_PAGE_ALIGN(target_brk);
629 }
630
631 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
632 #define DEBUGF_BRK(message, args...)
633
634 /* do_brk() must return target values and target errnos. */
635 abi_long do_brk(abi_ulong new_brk)
636 {
637 abi_long mapped_addr;
638 int new_alloc_size;
639
640 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
641
642 if (!new_brk) {
643 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
644 return target_brk;
645 }
646 if (new_brk < target_original_brk) {
647 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
648 target_brk);
649 return target_brk;
650 }
651
652 /* If the new brk is less than the highest page reserved to the
653 * target heap allocation, set it and we're almost done... */
654 if (new_brk <= brk_page) {
655 /* Heap contents are initialized to zero, as for anonymous
656 * mapped pages. */
657 if (new_brk > target_brk) {
658 memset(g2h(target_brk), 0, new_brk - target_brk);
659 }
660 target_brk = new_brk;
661 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
662 return target_brk;
663 }
664
665 /* We need to allocate more memory after the brk... Note that
666 * we don't use MAP_FIXED because that will map over the top of
667 * any existing mapping (like the one with the host libc or qemu
668 * itself); instead we treat "mapped but at wrong address" as
669 * a failure and unmap again.
670 */
671 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
672 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
673 PROT_READ|PROT_WRITE,
674 MAP_ANON|MAP_PRIVATE, 0, 0));
675
676 if (mapped_addr == brk_page) {
677 /* Heap contents are initialized to zero, as for anonymous
678 * mapped pages. Technically the new pages are already
679 * initialized to zero since they *are* anonymous mapped
680 * pages, however we have to take care with the contents that
681 * come from the remaining part of the previous page: it may
682 * contains garbage data due to a previous heap usage (grown
683 * then shrunken). */
684 memset(g2h(target_brk), 0, brk_page - target_brk);
685
686 target_brk = new_brk;
687 brk_page = HOST_PAGE_ALIGN(target_brk);
688 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
689 target_brk);
690 return target_brk;
691 } else if (mapped_addr != -1) {
692 /* Mapped but at wrong address, meaning there wasn't actually
693 * enough space for this brk.
694 */
695 target_munmap(mapped_addr, new_alloc_size);
696 mapped_addr = -1;
697 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
698 }
699 else {
700 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
701 }
702
703 #if defined(TARGET_ALPHA)
704 /* We (partially) emulate OSF/1 on Alpha, which requires we
705 return a proper errno, not an unchanged brk value. */
706 return -TARGET_ENOMEM;
707 #endif
708 /* For everything else, return the previous break. */
709 return target_brk;
710 }
711
712 static inline abi_long copy_from_user_fdset(fd_set *fds,
713 abi_ulong target_fds_addr,
714 int n)
715 {
716 int i, nw, j, k;
717 abi_ulong b, *target_fds;
718
719 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
720 if (!(target_fds = lock_user(VERIFY_READ,
721 target_fds_addr,
722 sizeof(abi_ulong) * nw,
723 1)))
724 return -TARGET_EFAULT;
725
726 FD_ZERO(fds);
727 k = 0;
728 for (i = 0; i < nw; i++) {
729 /* grab the abi_ulong */
730 __get_user(b, &target_fds[i]);
731 for (j = 0; j < TARGET_ABI_BITS; j++) {
732 /* check the bit inside the abi_ulong */
733 if ((b >> j) & 1)
734 FD_SET(k, fds);
735 k++;
736 }
737 }
738
739 unlock_user(target_fds, target_fds_addr, 0);
740
741 return 0;
742 }
743
744 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
745 abi_ulong target_fds_addr,
746 int n)
747 {
748 if (target_fds_addr) {
749 if (copy_from_user_fdset(fds, target_fds_addr, n))
750 return -TARGET_EFAULT;
751 *fds_ptr = fds;
752 } else {
753 *fds_ptr = NULL;
754 }
755 return 0;
756 }
757
758 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
759 const fd_set *fds,
760 int n)
761 {
762 int i, nw, j, k;
763 abi_long v;
764 abi_ulong *target_fds;
765
766 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
767 if (!(target_fds = lock_user(VERIFY_WRITE,
768 target_fds_addr,
769 sizeof(abi_ulong) * nw,
770 0)))
771 return -TARGET_EFAULT;
772
773 k = 0;
774 for (i = 0; i < nw; i++) {
775 v = 0;
776 for (j = 0; j < TARGET_ABI_BITS; j++) {
777 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
778 k++;
779 }
780 __put_user(v, &target_fds[i]);
781 }
782
783 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
784
785 return 0;
786 }
787
788 #if defined(__alpha__)
789 #define HOST_HZ 1024
790 #else
791 #define HOST_HZ 100
792 #endif
793
794 static inline abi_long host_to_target_clock_t(long ticks)
795 {
796 #if HOST_HZ == TARGET_HZ
797 return ticks;
798 #else
799 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
800 #endif
801 }
802
803 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
804 const struct rusage *rusage)
805 {
806 struct target_rusage *target_rusage;
807
808 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
809 return -TARGET_EFAULT;
810 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
811 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
812 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
813 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
814 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
815 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
816 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
817 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
818 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
819 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
820 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
821 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
822 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
823 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
824 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
825 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
826 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
827 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
828 unlock_user_struct(target_rusage, target_addr, 1);
829
830 return 0;
831 }
832
833 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
834 {
835 abi_ulong target_rlim_swap;
836 rlim_t result;
837
838 target_rlim_swap = tswapal(target_rlim);
839 if (target_rlim_swap == TARGET_RLIM_INFINITY)
840 return RLIM_INFINITY;
841
842 result = target_rlim_swap;
843 if (target_rlim_swap != (rlim_t)result)
844 return RLIM_INFINITY;
845
846 return result;
847 }
848
849 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
850 {
851 abi_ulong target_rlim_swap;
852 abi_ulong result;
853
854 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
855 target_rlim_swap = TARGET_RLIM_INFINITY;
856 else
857 target_rlim_swap = rlim;
858 result = tswapal(target_rlim_swap);
859
860 return result;
861 }
862
863 static inline int target_to_host_resource(int code)
864 {
865 switch (code) {
866 case TARGET_RLIMIT_AS:
867 return RLIMIT_AS;
868 case TARGET_RLIMIT_CORE:
869 return RLIMIT_CORE;
870 case TARGET_RLIMIT_CPU:
871 return RLIMIT_CPU;
872 case TARGET_RLIMIT_DATA:
873 return RLIMIT_DATA;
874 case TARGET_RLIMIT_FSIZE:
875 return RLIMIT_FSIZE;
876 case TARGET_RLIMIT_LOCKS:
877 return RLIMIT_LOCKS;
878 case TARGET_RLIMIT_MEMLOCK:
879 return RLIMIT_MEMLOCK;
880 case TARGET_RLIMIT_MSGQUEUE:
881 return RLIMIT_MSGQUEUE;
882 case TARGET_RLIMIT_NICE:
883 return RLIMIT_NICE;
884 case TARGET_RLIMIT_NOFILE:
885 return RLIMIT_NOFILE;
886 case TARGET_RLIMIT_NPROC:
887 return RLIMIT_NPROC;
888 case TARGET_RLIMIT_RSS:
889 return RLIMIT_RSS;
890 case TARGET_RLIMIT_RTPRIO:
891 return RLIMIT_RTPRIO;
892 case TARGET_RLIMIT_SIGPENDING:
893 return RLIMIT_SIGPENDING;
894 case TARGET_RLIMIT_STACK:
895 return RLIMIT_STACK;
896 default:
897 return code;
898 }
899 }
900
901 static inline abi_long copy_from_user_timeval(struct timeval *tv,
902 abi_ulong target_tv_addr)
903 {
904 struct target_timeval *target_tv;
905
906 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
907 return -TARGET_EFAULT;
908
909 __get_user(tv->tv_sec, &target_tv->tv_sec);
910 __get_user(tv->tv_usec, &target_tv->tv_usec);
911
912 unlock_user_struct(target_tv, target_tv_addr, 0);
913
914 return 0;
915 }
916
917 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
918 const struct timeval *tv)
919 {
920 struct target_timeval *target_tv;
921
922 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
923 return -TARGET_EFAULT;
924
925 __put_user(tv->tv_sec, &target_tv->tv_sec);
926 __put_user(tv->tv_usec, &target_tv->tv_usec);
927
928 unlock_user_struct(target_tv, target_tv_addr, 1);
929
930 return 0;
931 }
932
933 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
934 #include <mqueue.h>
935
936 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
937 abi_ulong target_mq_attr_addr)
938 {
939 struct target_mq_attr *target_mq_attr;
940
941 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
942 target_mq_attr_addr, 1))
943 return -TARGET_EFAULT;
944
945 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
946 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
947 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
948 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
949
950 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
951
952 return 0;
953 }
954
955 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
956 const struct mq_attr *attr)
957 {
958 struct target_mq_attr *target_mq_attr;
959
960 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
961 target_mq_attr_addr, 0))
962 return -TARGET_EFAULT;
963
964 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
965 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
966 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
967 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
968
969 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
970
971 return 0;
972 }
973 #endif
974
975 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
976 /* do_select() must return target values and target errnos. */
977 static abi_long do_select(int n,
978 abi_ulong rfd_addr, abi_ulong wfd_addr,
979 abi_ulong efd_addr, abi_ulong target_tv_addr)
980 {
981 fd_set rfds, wfds, efds;
982 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
983 struct timeval tv, *tv_ptr;
984 abi_long ret;
985
986 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
987 if (ret) {
988 return ret;
989 }
990 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
991 if (ret) {
992 return ret;
993 }
994 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
995 if (ret) {
996 return ret;
997 }
998
999 if (target_tv_addr) {
1000 if (copy_from_user_timeval(&tv, target_tv_addr))
1001 return -TARGET_EFAULT;
1002 tv_ptr = &tv;
1003 } else {
1004 tv_ptr = NULL;
1005 }
1006
1007 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1008
1009 if (!is_error(ret)) {
1010 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1011 return -TARGET_EFAULT;
1012 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1013 return -TARGET_EFAULT;
1014 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1015 return -TARGET_EFAULT;
1016
1017 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1018 return -TARGET_EFAULT;
1019 }
1020
1021 return ret;
1022 }
1023 #endif
1024
1025 static abi_long do_pipe2(int host_pipe[], int flags)
1026 {
1027 #ifdef CONFIG_PIPE2
1028 return pipe2(host_pipe, flags);
1029 #else
1030 return -ENOSYS;
1031 #endif
1032 }
1033
1034 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1035 int flags, int is_pipe2)
1036 {
1037 int host_pipe[2];
1038 abi_long ret;
1039 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1040
1041 if (is_error(ret))
1042 return get_errno(ret);
1043
1044 /* Several targets have special calling conventions for the original
1045 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1046 if (!is_pipe2) {
1047 #if defined(TARGET_ALPHA)
1048 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1049 return host_pipe[0];
1050 #elif defined(TARGET_MIPS)
1051 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1052 return host_pipe[0];
1053 #elif defined(TARGET_SH4)
1054 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1055 return host_pipe[0];
1056 #elif defined(TARGET_SPARC)
1057 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1058 return host_pipe[0];
1059 #endif
1060 }
1061
1062 if (put_user_s32(host_pipe[0], pipedes)
1063 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1064 return -TARGET_EFAULT;
1065 return get_errno(ret);
1066 }
1067
1068 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1069 abi_ulong target_addr,
1070 socklen_t len)
1071 {
1072 struct target_ip_mreqn *target_smreqn;
1073
1074 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1075 if (!target_smreqn)
1076 return -TARGET_EFAULT;
1077 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1078 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1079 if (len == sizeof(struct target_ip_mreqn))
1080 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1081 unlock_user(target_smreqn, target_addr, 0);
1082
1083 return 0;
1084 }
1085
1086 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1087 abi_ulong target_addr,
1088 socklen_t len)
1089 {
1090 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1091 sa_family_t sa_family;
1092 struct target_sockaddr *target_saddr;
1093
1094 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1095 if (!target_saddr)
1096 return -TARGET_EFAULT;
1097
1098 sa_family = tswap16(target_saddr->sa_family);
1099
1100 /* Oops. The caller might send a incomplete sun_path; sun_path
1101 * must be terminated by \0 (see the manual page), but
1102 * unfortunately it is quite common to specify sockaddr_un
1103 * length as "strlen(x->sun_path)" while it should be
1104 * "strlen(...) + 1". We'll fix that here if needed.
1105 * Linux kernel has a similar feature.
1106 */
1107
1108 if (sa_family == AF_UNIX) {
1109 if (len < unix_maxlen && len > 0) {
1110 char *cp = (char*)target_saddr;
1111
1112 if ( cp[len-1] && !cp[len] )
1113 len++;
1114 }
1115 if (len > unix_maxlen)
1116 len = unix_maxlen;
1117 }
1118
1119 memcpy(addr, target_saddr, len);
1120 addr->sa_family = sa_family;
1121 unlock_user(target_saddr, target_addr, 0);
1122
1123 return 0;
1124 }
1125
1126 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1127 struct sockaddr *addr,
1128 socklen_t len)
1129 {
1130 struct target_sockaddr *target_saddr;
1131
1132 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1133 if (!target_saddr)
1134 return -TARGET_EFAULT;
1135 memcpy(target_saddr, addr, len);
1136 target_saddr->sa_family = tswap16(addr->sa_family);
1137 unlock_user(target_saddr, target_addr, len);
1138
1139 return 0;
1140 }
1141
1142 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1143 struct target_msghdr *target_msgh)
1144 {
1145 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1146 abi_long msg_controllen;
1147 abi_ulong target_cmsg_addr;
1148 struct target_cmsghdr *target_cmsg;
1149 socklen_t space = 0;
1150
1151 msg_controllen = tswapal(target_msgh->msg_controllen);
1152 if (msg_controllen < sizeof (struct target_cmsghdr))
1153 goto the_end;
1154 target_cmsg_addr = tswapal(target_msgh->msg_control);
1155 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1156 if (!target_cmsg)
1157 return -TARGET_EFAULT;
1158
1159 while (cmsg && target_cmsg) {
1160 void *data = CMSG_DATA(cmsg);
1161 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1162
1163 int len = tswapal(target_cmsg->cmsg_len)
1164 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1165
1166 space += CMSG_SPACE(len);
1167 if (space > msgh->msg_controllen) {
1168 space -= CMSG_SPACE(len);
1169 gemu_log("Host cmsg overflow\n");
1170 break;
1171 }
1172
1173 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1174 cmsg->cmsg_level = SOL_SOCKET;
1175 } else {
1176 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1177 }
1178 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1179 cmsg->cmsg_len = CMSG_LEN(len);
1180
1181 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1182 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1183 memcpy(data, target_data, len);
1184 } else {
1185 int *fd = (int *)data;
1186 int *target_fd = (int *)target_data;
1187 int i, numfds = len / sizeof(int);
1188
1189 for (i = 0; i < numfds; i++)
1190 fd[i] = tswap32(target_fd[i]);
1191 }
1192
1193 cmsg = CMSG_NXTHDR(msgh, cmsg);
1194 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1195 }
1196 unlock_user(target_cmsg, target_cmsg_addr, 0);
1197 the_end:
1198 msgh->msg_controllen = space;
1199 return 0;
1200 }
1201
1202 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1203 struct msghdr *msgh)
1204 {
1205 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1206 abi_long msg_controllen;
1207 abi_ulong target_cmsg_addr;
1208 struct target_cmsghdr *target_cmsg;
1209 socklen_t space = 0;
1210
1211 msg_controllen = tswapal(target_msgh->msg_controllen);
1212 if (msg_controllen < sizeof (struct target_cmsghdr))
1213 goto the_end;
1214 target_cmsg_addr = tswapal(target_msgh->msg_control);
1215 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1216 if (!target_cmsg)
1217 return -TARGET_EFAULT;
1218
1219 while (cmsg && target_cmsg) {
1220 void *data = CMSG_DATA(cmsg);
1221 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1222
1223 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1224
1225 space += TARGET_CMSG_SPACE(len);
1226 if (space > msg_controllen) {
1227 space -= TARGET_CMSG_SPACE(len);
1228 gemu_log("Target cmsg overflow\n");
1229 break;
1230 }
1231
1232 if (cmsg->cmsg_level == SOL_SOCKET) {
1233 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1234 } else {
1235 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1236 }
1237 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1238 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1239
1240 if ((cmsg->cmsg_level == SOL_SOCKET) &&
1241 (cmsg->cmsg_type == SCM_RIGHTS)) {
1242 int *fd = (int *)data;
1243 int *target_fd = (int *)target_data;
1244 int i, numfds = len / sizeof(int);
1245
1246 for (i = 0; i < numfds; i++)
1247 target_fd[i] = tswap32(fd[i]);
1248 } else if ((cmsg->cmsg_level == SOL_SOCKET) &&
1249 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1250 (len == sizeof(struct timeval))) {
1251 /* copy struct timeval to target */
1252 struct timeval *tv = (struct timeval *)data;
1253 struct target_timeval *target_tv =
1254 (struct target_timeval *)target_data;
1255
1256 target_tv->tv_sec = tswapal(tv->tv_sec);
1257 target_tv->tv_usec = tswapal(tv->tv_usec);
1258 } else {
1259 gemu_log("Unsupported ancillary data: %d/%d\n",
1260 cmsg->cmsg_level, cmsg->cmsg_type);
1261 memcpy(target_data, data, len);
1262 }
1263
1264 cmsg = CMSG_NXTHDR(msgh, cmsg);
1265 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1266 }
1267 unlock_user(target_cmsg, target_cmsg_addr, space);
1268 the_end:
1269 target_msgh->msg_controllen = tswapal(space);
1270 return 0;
1271 }
1272
1273 /* do_setsockopt() Must return target values and target errnos. */
1274 static abi_long do_setsockopt(int sockfd, int level, int optname,
1275 abi_ulong optval_addr, socklen_t optlen)
1276 {
1277 abi_long ret;
1278 int val;
1279 struct ip_mreqn *ip_mreq;
1280 struct ip_mreq_source *ip_mreq_source;
1281
1282 switch(level) {
1283 case SOL_TCP:
1284 /* TCP options all take an 'int' value. */
1285 if (optlen < sizeof(uint32_t))
1286 return -TARGET_EINVAL;
1287
1288 if (get_user_u32(val, optval_addr))
1289 return -TARGET_EFAULT;
1290 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1291 break;
1292 case SOL_IP:
1293 switch(optname) {
1294 case IP_TOS:
1295 case IP_TTL:
1296 case IP_HDRINCL:
1297 case IP_ROUTER_ALERT:
1298 case IP_RECVOPTS:
1299 case IP_RETOPTS:
1300 case IP_PKTINFO:
1301 case IP_MTU_DISCOVER:
1302 case IP_RECVERR:
1303 case IP_RECVTOS:
1304 #ifdef IP_FREEBIND
1305 case IP_FREEBIND:
1306 #endif
1307 case IP_MULTICAST_TTL:
1308 case IP_MULTICAST_LOOP:
1309 val = 0;
1310 if (optlen >= sizeof(uint32_t)) {
1311 if (get_user_u32(val, optval_addr))
1312 return -TARGET_EFAULT;
1313 } else if (optlen >= 1) {
1314 if (get_user_u8(val, optval_addr))
1315 return -TARGET_EFAULT;
1316 }
1317 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1318 break;
1319 case IP_ADD_MEMBERSHIP:
1320 case IP_DROP_MEMBERSHIP:
1321 if (optlen < sizeof (struct target_ip_mreq) ||
1322 optlen > sizeof (struct target_ip_mreqn))
1323 return -TARGET_EINVAL;
1324
1325 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1326 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1327 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1328 break;
1329
1330 case IP_BLOCK_SOURCE:
1331 case IP_UNBLOCK_SOURCE:
1332 case IP_ADD_SOURCE_MEMBERSHIP:
1333 case IP_DROP_SOURCE_MEMBERSHIP:
1334 if (optlen != sizeof (struct target_ip_mreq_source))
1335 return -TARGET_EINVAL;
1336
1337 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1338 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1339 unlock_user (ip_mreq_source, optval_addr, 0);
1340 break;
1341
1342 default:
1343 goto unimplemented;
1344 }
1345 break;
1346 case SOL_IPV6:
1347 switch (optname) {
1348 case IPV6_MTU_DISCOVER:
1349 case IPV6_MTU:
1350 case IPV6_V6ONLY:
1351 case IPV6_RECVPKTINFO:
1352 val = 0;
1353 if (optlen < sizeof(uint32_t)) {
1354 return -TARGET_EINVAL;
1355 }
1356 if (get_user_u32(val, optval_addr)) {
1357 return -TARGET_EFAULT;
1358 }
1359 ret = get_errno(setsockopt(sockfd, level, optname,
1360 &val, sizeof(val)));
1361 break;
1362 default:
1363 goto unimplemented;
1364 }
1365 break;
1366 case SOL_RAW:
1367 switch (optname) {
1368 case ICMP_FILTER:
1369 /* struct icmp_filter takes an u32 value */
1370 if (optlen < sizeof(uint32_t)) {
1371 return -TARGET_EINVAL;
1372 }
1373
1374 if (get_user_u32(val, optval_addr)) {
1375 return -TARGET_EFAULT;
1376 }
1377 ret = get_errno(setsockopt(sockfd, level, optname,
1378 &val, sizeof(val)));
1379 break;
1380
1381 default:
1382 goto unimplemented;
1383 }
1384 break;
1385 case TARGET_SOL_SOCKET:
1386 switch (optname) {
1387 case TARGET_SO_RCVTIMEO:
1388 {
1389 struct timeval tv;
1390
1391 optname = SO_RCVTIMEO;
1392
1393 set_timeout:
1394 if (optlen != sizeof(struct target_timeval)) {
1395 return -TARGET_EINVAL;
1396 }
1397
1398 if (copy_from_user_timeval(&tv, optval_addr)) {
1399 return -TARGET_EFAULT;
1400 }
1401
1402 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1403 &tv, sizeof(tv)));
1404 return ret;
1405 }
1406 case TARGET_SO_SNDTIMEO:
1407 optname = SO_SNDTIMEO;
1408 goto set_timeout;
1409 case TARGET_SO_ATTACH_FILTER:
1410 {
1411 struct target_sock_fprog *tfprog;
1412 struct target_sock_filter *tfilter;
1413 struct sock_fprog fprog;
1414 struct sock_filter *filter;
1415 int i;
1416
1417 if (optlen != sizeof(*tfprog)) {
1418 return -TARGET_EINVAL;
1419 }
1420 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1421 return -TARGET_EFAULT;
1422 }
1423 if (!lock_user_struct(VERIFY_READ, tfilter,
1424 tswapal(tfprog->filter), 0)) {
1425 unlock_user_struct(tfprog, optval_addr, 1);
1426 return -TARGET_EFAULT;
1427 }
1428
1429 fprog.len = tswap16(tfprog->len);
1430 filter = malloc(fprog.len * sizeof(*filter));
1431 if (filter == NULL) {
1432 unlock_user_struct(tfilter, tfprog->filter, 1);
1433 unlock_user_struct(tfprog, optval_addr, 1);
1434 return -TARGET_ENOMEM;
1435 }
1436 for (i = 0; i < fprog.len; i++) {
1437 filter[i].code = tswap16(tfilter[i].code);
1438 filter[i].jt = tfilter[i].jt;
1439 filter[i].jf = tfilter[i].jf;
1440 filter[i].k = tswap32(tfilter[i].k);
1441 }
1442 fprog.filter = filter;
1443
1444 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1445 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1446 free(filter);
1447
1448 unlock_user_struct(tfilter, tfprog->filter, 1);
1449 unlock_user_struct(tfprog, optval_addr, 1);
1450 return ret;
1451 }
1452 /* Options with 'int' argument. */
1453 case TARGET_SO_DEBUG:
1454 optname = SO_DEBUG;
1455 break;
1456 case TARGET_SO_REUSEADDR:
1457 optname = SO_REUSEADDR;
1458 break;
1459 case TARGET_SO_TYPE:
1460 optname = SO_TYPE;
1461 break;
1462 case TARGET_SO_ERROR:
1463 optname = SO_ERROR;
1464 break;
1465 case TARGET_SO_DONTROUTE:
1466 optname = SO_DONTROUTE;
1467 break;
1468 case TARGET_SO_BROADCAST:
1469 optname = SO_BROADCAST;
1470 break;
1471 case TARGET_SO_SNDBUF:
1472 optname = SO_SNDBUF;
1473 break;
1474 case TARGET_SO_RCVBUF:
1475 optname = SO_RCVBUF;
1476 break;
1477 case TARGET_SO_KEEPALIVE:
1478 optname = SO_KEEPALIVE;
1479 break;
1480 case TARGET_SO_OOBINLINE:
1481 optname = SO_OOBINLINE;
1482 break;
1483 case TARGET_SO_NO_CHECK:
1484 optname = SO_NO_CHECK;
1485 break;
1486 case TARGET_SO_PRIORITY:
1487 optname = SO_PRIORITY;
1488 break;
1489 #ifdef SO_BSDCOMPAT
1490 case TARGET_SO_BSDCOMPAT:
1491 optname = SO_BSDCOMPAT;
1492 break;
1493 #endif
1494 case TARGET_SO_PASSCRED:
1495 optname = SO_PASSCRED;
1496 break;
1497 case TARGET_SO_TIMESTAMP:
1498 optname = SO_TIMESTAMP;
1499 break;
1500 case TARGET_SO_RCVLOWAT:
1501 optname = SO_RCVLOWAT;
1502 break;
1503 break;
1504 default:
1505 goto unimplemented;
1506 }
1507 if (optlen < sizeof(uint32_t))
1508 return -TARGET_EINVAL;
1509
1510 if (get_user_u32(val, optval_addr))
1511 return -TARGET_EFAULT;
1512 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1513 break;
1514 default:
1515 unimplemented:
1516 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1517 ret = -TARGET_ENOPROTOOPT;
1518 }
1519 return ret;
1520 }
1521
1522 /* do_getsockopt() Must return target values and target errnos. */
1523 static abi_long do_getsockopt(int sockfd, int level, int optname,
1524 abi_ulong optval_addr, abi_ulong optlen)
1525 {
1526 abi_long ret;
1527 int len, val;
1528 socklen_t lv;
1529
1530 switch(level) {
1531 case TARGET_SOL_SOCKET:
1532 level = SOL_SOCKET;
1533 switch (optname) {
1534 /* These don't just return a single integer */
1535 case TARGET_SO_LINGER:
1536 case TARGET_SO_RCVTIMEO:
1537 case TARGET_SO_SNDTIMEO:
1538 case TARGET_SO_PEERNAME:
1539 goto unimplemented;
1540 case TARGET_SO_PEERCRED: {
1541 struct ucred cr;
1542 socklen_t crlen;
1543 struct target_ucred *tcr;
1544
1545 if (get_user_u32(len, optlen)) {
1546 return -TARGET_EFAULT;
1547 }
1548 if (len < 0) {
1549 return -TARGET_EINVAL;
1550 }
1551
1552 crlen = sizeof(cr);
1553 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1554 &cr, &crlen));
1555 if (ret < 0) {
1556 return ret;
1557 }
1558 if (len > crlen) {
1559 len = crlen;
1560 }
1561 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1562 return -TARGET_EFAULT;
1563 }
1564 __put_user(cr.pid, &tcr->pid);
1565 __put_user(cr.uid, &tcr->uid);
1566 __put_user(cr.gid, &tcr->gid);
1567 unlock_user_struct(tcr, optval_addr, 1);
1568 if (put_user_u32(len, optlen)) {
1569 return -TARGET_EFAULT;
1570 }
1571 break;
1572 }
1573 /* Options with 'int' argument. */
1574 case TARGET_SO_DEBUG:
1575 optname = SO_DEBUG;
1576 goto int_case;
1577 case TARGET_SO_REUSEADDR:
1578 optname = SO_REUSEADDR;
1579 goto int_case;
1580 case TARGET_SO_TYPE:
1581 optname = SO_TYPE;
1582 goto int_case;
1583 case TARGET_SO_ERROR:
1584 optname = SO_ERROR;
1585 goto int_case;
1586 case TARGET_SO_DONTROUTE:
1587 optname = SO_DONTROUTE;
1588 goto int_case;
1589 case TARGET_SO_BROADCAST:
1590 optname = SO_BROADCAST;
1591 goto int_case;
1592 case TARGET_SO_SNDBUF:
1593 optname = SO_SNDBUF;
1594 goto int_case;
1595 case TARGET_SO_RCVBUF:
1596 optname = SO_RCVBUF;
1597 goto int_case;
1598 case TARGET_SO_KEEPALIVE:
1599 optname = SO_KEEPALIVE;
1600 goto int_case;
1601 case TARGET_SO_OOBINLINE:
1602 optname = SO_OOBINLINE;
1603 goto int_case;
1604 case TARGET_SO_NO_CHECK:
1605 optname = SO_NO_CHECK;
1606 goto int_case;
1607 case TARGET_SO_PRIORITY:
1608 optname = SO_PRIORITY;
1609 goto int_case;
1610 #ifdef SO_BSDCOMPAT
1611 case TARGET_SO_BSDCOMPAT:
1612 optname = SO_BSDCOMPAT;
1613 goto int_case;
1614 #endif
1615 case TARGET_SO_PASSCRED:
1616 optname = SO_PASSCRED;
1617 goto int_case;
1618 case TARGET_SO_TIMESTAMP:
1619 optname = SO_TIMESTAMP;
1620 goto int_case;
1621 case TARGET_SO_RCVLOWAT:
1622 optname = SO_RCVLOWAT;
1623 goto int_case;
1624 default:
1625 goto int_case;
1626 }
1627 break;
1628 case SOL_TCP:
1629 /* TCP options all take an 'int' value. */
1630 int_case:
1631 if (get_user_u32(len, optlen))
1632 return -TARGET_EFAULT;
1633 if (len < 0)
1634 return -TARGET_EINVAL;
1635 lv = sizeof(lv);
1636 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1637 if (ret < 0)
1638 return ret;
1639 if (len > lv)
1640 len = lv;
1641 if (len == 4) {
1642 if (put_user_u32(val, optval_addr))
1643 return -TARGET_EFAULT;
1644 } else {
1645 if (put_user_u8(val, optval_addr))
1646 return -TARGET_EFAULT;
1647 }
1648 if (put_user_u32(len, optlen))
1649 return -TARGET_EFAULT;
1650 break;
1651 case SOL_IP:
1652 switch(optname) {
1653 case IP_TOS:
1654 case IP_TTL:
1655 case IP_HDRINCL:
1656 case IP_ROUTER_ALERT:
1657 case IP_RECVOPTS:
1658 case IP_RETOPTS:
1659 case IP_PKTINFO:
1660 case IP_MTU_DISCOVER:
1661 case IP_RECVERR:
1662 case IP_RECVTOS:
1663 #ifdef IP_FREEBIND
1664 case IP_FREEBIND:
1665 #endif
1666 case IP_MULTICAST_TTL:
1667 case IP_MULTICAST_LOOP:
1668 if (get_user_u32(len, optlen))
1669 return -TARGET_EFAULT;
1670 if (len < 0)
1671 return -TARGET_EINVAL;
1672 lv = sizeof(lv);
1673 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1674 if (ret < 0)
1675 return ret;
1676 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1677 len = 1;
1678 if (put_user_u32(len, optlen)
1679 || put_user_u8(val, optval_addr))
1680 return -TARGET_EFAULT;
1681 } else {
1682 if (len > sizeof(int))
1683 len = sizeof(int);
1684 if (put_user_u32(len, optlen)
1685 || put_user_u32(val, optval_addr))
1686 return -TARGET_EFAULT;
1687 }
1688 break;
1689 default:
1690 ret = -TARGET_ENOPROTOOPT;
1691 break;
1692 }
1693 break;
1694 default:
1695 unimplemented:
1696 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1697 level, optname);
1698 ret = -TARGET_EOPNOTSUPP;
1699 break;
1700 }
1701 return ret;
1702 }
1703
1704 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1705 int count, int copy)
1706 {
1707 struct target_iovec *target_vec;
1708 struct iovec *vec;
1709 abi_ulong total_len, max_len;
1710 int i;
1711 int err = 0;
1712
1713 if (count == 0) {
1714 errno = 0;
1715 return NULL;
1716 }
1717 if (count < 0 || count > IOV_MAX) {
1718 errno = EINVAL;
1719 return NULL;
1720 }
1721
1722 vec = calloc(count, sizeof(struct iovec));
1723 if (vec == NULL) {
1724 errno = ENOMEM;
1725 return NULL;
1726 }
1727
1728 target_vec = lock_user(VERIFY_READ, target_addr,
1729 count * sizeof(struct target_iovec), 1);
1730 if (target_vec == NULL) {
1731 err = EFAULT;
1732 goto fail2;
1733 }
1734
1735 /* ??? If host page size > target page size, this will result in a
1736 value larger than what we can actually support. */
1737 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1738 total_len = 0;
1739
1740 for (i = 0; i < count; i++) {
1741 abi_ulong base = tswapal(target_vec[i].iov_base);
1742 abi_long len = tswapal(target_vec[i].iov_len);
1743
1744 if (len < 0) {
1745 err = EINVAL;
1746 goto fail;
1747 } else if (len == 0) {
1748 /* Zero length pointer is ignored. */
1749 vec[i].iov_base = 0;
1750 } else {
1751 vec[i].iov_base = lock_user(type, base, len, copy);
1752 if (!vec[i].iov_base) {
1753 err = EFAULT;
1754 goto fail;
1755 }
1756 if (len > max_len - total_len) {
1757 len = max_len - total_len;
1758 }
1759 }
1760 vec[i].iov_len = len;
1761 total_len += len;
1762 }
1763
1764 unlock_user(target_vec, target_addr, 0);
1765 return vec;
1766
1767 fail:
1768 unlock_user(target_vec, target_addr, 0);
1769 fail2:
1770 free(vec);
1771 errno = err;
1772 return NULL;
1773 }
1774
1775 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1776 int count, int copy)
1777 {
1778 struct target_iovec *target_vec;
1779 int i;
1780
1781 target_vec = lock_user(VERIFY_READ, target_addr,
1782 count * sizeof(struct target_iovec), 1);
1783 if (target_vec) {
1784 for (i = 0; i < count; i++) {
1785 abi_ulong base = tswapal(target_vec[i].iov_base);
1786 abi_long len = tswapal(target_vec[i].iov_base);
1787 if (len < 0) {
1788 break;
1789 }
1790 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1791 }
1792 unlock_user(target_vec, target_addr, 0);
1793 }
1794
1795 free(vec);
1796 }
1797
1798 static inline int target_to_host_sock_type(int *type)
1799 {
1800 int host_type = 0;
1801 int target_type = *type;
1802
1803 switch (target_type & TARGET_SOCK_TYPE_MASK) {
1804 case TARGET_SOCK_DGRAM:
1805 host_type = SOCK_DGRAM;
1806 break;
1807 case TARGET_SOCK_STREAM:
1808 host_type = SOCK_STREAM;
1809 break;
1810 default:
1811 host_type = target_type & TARGET_SOCK_TYPE_MASK;
1812 break;
1813 }
1814 if (target_type & TARGET_SOCK_CLOEXEC) {
1815 #if defined(SOCK_CLOEXEC)
1816 host_type |= SOCK_CLOEXEC;
1817 #else
1818 return -TARGET_EINVAL;
1819 #endif
1820 }
1821 if (target_type & TARGET_SOCK_NONBLOCK) {
1822 #if defined(SOCK_NONBLOCK)
1823 host_type |= SOCK_NONBLOCK;
1824 #elif !defined(O_NONBLOCK)
1825 return -TARGET_EINVAL;
1826 #endif
1827 }
1828 *type = host_type;
1829 return 0;
1830 }
1831
1832 /* Try to emulate socket type flags after socket creation. */
1833 static int sock_flags_fixup(int fd, int target_type)
1834 {
1835 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
1836 if (target_type & TARGET_SOCK_NONBLOCK) {
1837 int flags = fcntl(fd, F_GETFL);
1838 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
1839 close(fd);
1840 return -TARGET_EINVAL;
1841 }
1842 }
1843 #endif
1844 return fd;
1845 }
1846
1847 /* do_socket() Must return target values and target errnos. */
1848 static abi_long do_socket(int domain, int type, int protocol)
1849 {
1850 int target_type = type;
1851 int ret;
1852
1853 ret = target_to_host_sock_type(&type);
1854 if (ret) {
1855 return ret;
1856 }
1857
1858 if (domain == PF_NETLINK)
1859 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1860 ret = get_errno(socket(domain, type, protocol));
1861 if (ret >= 0) {
1862 ret = sock_flags_fixup(ret, target_type);
1863 }
1864 return ret;
1865 }
1866
1867 /* do_bind() Must return target values and target errnos. */
1868 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1869 socklen_t addrlen)
1870 {
1871 void *addr;
1872 abi_long ret;
1873
1874 if ((int)addrlen < 0) {
1875 return -TARGET_EINVAL;
1876 }
1877
1878 addr = alloca(addrlen+1);
1879
1880 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1881 if (ret)
1882 return ret;
1883
1884 return get_errno(bind(sockfd, addr, addrlen));
1885 }
1886
1887 /* do_connect() Must return target values and target errnos. */
1888 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1889 socklen_t addrlen)
1890 {
1891 void *addr;
1892 abi_long ret;
1893
1894 if ((int)addrlen < 0) {
1895 return -TARGET_EINVAL;
1896 }
1897
1898 addr = alloca(addrlen);
1899
1900 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1901 if (ret)
1902 return ret;
1903
1904 return get_errno(connect(sockfd, addr, addrlen));
1905 }
1906
1907 /* do_sendrecvmsg() Must return target values and target errnos. */
1908 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1909 int flags, int send)
1910 {
1911 abi_long ret, len;
1912 struct target_msghdr *msgp;
1913 struct msghdr msg;
1914 int count;
1915 struct iovec *vec;
1916 abi_ulong target_vec;
1917
1918 /* FIXME */
1919 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1920 msgp,
1921 target_msg,
1922 send ? 1 : 0))
1923 return -TARGET_EFAULT;
1924 if (msgp->msg_name) {
1925 msg.msg_namelen = tswap32(msgp->msg_namelen);
1926 msg.msg_name = alloca(msg.msg_namelen);
1927 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1928 msg.msg_namelen);
1929 if (ret) {
1930 goto out2;
1931 }
1932 } else {
1933 msg.msg_name = NULL;
1934 msg.msg_namelen = 0;
1935 }
1936 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1937 msg.msg_control = alloca(msg.msg_controllen);
1938 msg.msg_flags = tswap32(msgp->msg_flags);
1939
1940 count = tswapal(msgp->msg_iovlen);
1941 target_vec = tswapal(msgp->msg_iov);
1942 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1943 target_vec, count, send);
1944 if (vec == NULL) {
1945 ret = -host_to_target_errno(errno);
1946 goto out2;
1947 }
1948 msg.msg_iovlen = count;
1949 msg.msg_iov = vec;
1950
1951 if (send) {
1952 ret = target_to_host_cmsg(&msg, msgp);
1953 if (ret == 0)
1954 ret = get_errno(sendmsg(fd, &msg, flags));
1955 } else {
1956 ret = get_errno(recvmsg(fd, &msg, flags));
1957 if (!is_error(ret)) {
1958 len = ret;
1959 ret = host_to_target_cmsg(msgp, &msg);
1960 if (!is_error(ret)) {
1961 msgp->msg_namelen = tswap32(msg.msg_namelen);
1962 if (msg.msg_name != NULL) {
1963 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1964 msg.msg_name, msg.msg_namelen);
1965 if (ret) {
1966 goto out;
1967 }
1968 }
1969
1970 ret = len;
1971 }
1972 }
1973 }
1974
1975 out:
1976 unlock_iovec(vec, target_vec, count, !send);
1977 out2:
1978 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1979 return ret;
1980 }
1981
1982 /* If we don't have a system accept4() then just call accept.
1983 * The callsites to do_accept4() will ensure that they don't
1984 * pass a non-zero flags argument in this config.
1985 */
1986 #ifndef CONFIG_ACCEPT4
1987 static inline int accept4(int sockfd, struct sockaddr *addr,
1988 socklen_t *addrlen, int flags)
1989 {
1990 assert(flags == 0);
1991 return accept(sockfd, addr, addrlen);
1992 }
1993 #endif
1994
1995 /* do_accept4() Must return target values and target errnos. */
1996 static abi_long do_accept4(int fd, abi_ulong target_addr,
1997 abi_ulong target_addrlen_addr, int flags)
1998 {
1999 socklen_t addrlen;
2000 void *addr;
2001 abi_long ret;
2002
2003 if (target_addr == 0) {
2004 return get_errno(accept4(fd, NULL, NULL, flags));
2005 }
2006
2007 /* linux returns EINVAL if addrlen pointer is invalid */
2008 if (get_user_u32(addrlen, target_addrlen_addr))
2009 return -TARGET_EINVAL;
2010
2011 if ((int)addrlen < 0) {
2012 return -TARGET_EINVAL;
2013 }
2014
2015 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2016 return -TARGET_EINVAL;
2017
2018 addr = alloca(addrlen);
2019
2020 ret = get_errno(accept4(fd, addr, &addrlen, flags));
2021 if (!is_error(ret)) {
2022 host_to_target_sockaddr(target_addr, addr, addrlen);
2023 if (put_user_u32(addrlen, target_addrlen_addr))
2024 ret = -TARGET_EFAULT;
2025 }
2026 return ret;
2027 }
2028
2029 /* do_getpeername() Must return target values and target errnos. */
2030 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2031 abi_ulong target_addrlen_addr)
2032 {
2033 socklen_t addrlen;
2034 void *addr;
2035 abi_long ret;
2036
2037 if (get_user_u32(addrlen, target_addrlen_addr))
2038 return -TARGET_EFAULT;
2039
2040 if ((int)addrlen < 0) {
2041 return -TARGET_EINVAL;
2042 }
2043
2044 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2045 return -TARGET_EFAULT;
2046
2047 addr = alloca(addrlen);
2048
2049 ret = get_errno(getpeername(fd, addr, &addrlen));
2050 if (!is_error(ret)) {
2051 host_to_target_sockaddr(target_addr, addr, addrlen);
2052 if (put_user_u32(addrlen, target_addrlen_addr))
2053 ret = -TARGET_EFAULT;
2054 }
2055 return ret;
2056 }
2057
2058 /* do_getsockname() Must return target values and target errnos. */
2059 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2060 abi_ulong target_addrlen_addr)
2061 {
2062 socklen_t addrlen;
2063 void *addr;
2064 abi_long ret;
2065
2066 if (get_user_u32(addrlen, target_addrlen_addr))
2067 return -TARGET_EFAULT;
2068
2069 if ((int)addrlen < 0) {
2070 return -TARGET_EINVAL;
2071 }
2072
2073 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2074 return -TARGET_EFAULT;
2075
2076 addr = alloca(addrlen);
2077
2078 ret = get_errno(getsockname(fd, addr, &addrlen));
2079 if (!is_error(ret)) {
2080 host_to_target_sockaddr(target_addr, addr, addrlen);
2081 if (put_user_u32(addrlen, target_addrlen_addr))
2082 ret = -TARGET_EFAULT;
2083 }
2084 return ret;
2085 }
2086
2087 /* do_socketpair() Must return target values and target errnos. */
2088 static abi_long do_socketpair(int domain, int type, int protocol,
2089 abi_ulong target_tab_addr)
2090 {
2091 int tab[2];
2092 abi_long ret;
2093
2094 target_to_host_sock_type(&type);
2095
2096 ret = get_errno(socketpair(domain, type, protocol, tab));
2097 if (!is_error(ret)) {
2098 if (put_user_s32(tab[0], target_tab_addr)
2099 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2100 ret = -TARGET_EFAULT;
2101 }
2102 return ret;
2103 }
2104
2105 /* do_sendto() Must return target values and target errnos. */
2106 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2107 abi_ulong target_addr, socklen_t addrlen)
2108 {
2109 void *addr;
2110 void *host_msg;
2111 abi_long ret;
2112
2113 if ((int)addrlen < 0) {
2114 return -TARGET_EINVAL;
2115 }
2116
2117 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2118 if (!host_msg)
2119 return -TARGET_EFAULT;
2120 if (target_addr) {
2121 addr = alloca(addrlen);
2122 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2123 if (ret) {
2124 unlock_user(host_msg, msg, 0);
2125 return ret;
2126 }
2127 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2128 } else {
2129 ret = get_errno(send(fd, host_msg, len, flags));
2130 }
2131 unlock_user(host_msg, msg, 0);
2132 return ret;
2133 }
2134
2135 /* do_recvfrom() Must return target values and target errnos. */
2136 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2137 abi_ulong target_addr,
2138 abi_ulong target_addrlen)
2139 {
2140 socklen_t addrlen;
2141 void *addr;
2142 void *host_msg;
2143 abi_long ret;
2144
2145 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2146 if (!host_msg)
2147 return -TARGET_EFAULT;
2148 if (target_addr) {
2149 if (get_user_u32(addrlen, target_addrlen)) {
2150 ret = -TARGET_EFAULT;
2151 goto fail;
2152 }
2153 if ((int)addrlen < 0) {
2154 ret = -TARGET_EINVAL;
2155 goto fail;
2156 }
2157 addr = alloca(addrlen);
2158 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2159 } else {
2160 addr = NULL; /* To keep compiler quiet. */
2161 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2162 }
2163 if (!is_error(ret)) {
2164 if (target_addr) {
2165 host_to_target_sockaddr(target_addr, addr, addrlen);
2166 if (put_user_u32(addrlen, target_addrlen)) {
2167 ret = -TARGET_EFAULT;
2168 goto fail;
2169 }
2170 }
2171 unlock_user(host_msg, msg, len);
2172 } else {
2173 fail:
2174 unlock_user(host_msg, msg, 0);
2175 }
2176 return ret;
2177 }
2178
2179 #ifdef TARGET_NR_socketcall
2180 /* do_socketcall() Must return target values and target errnos. */
2181 static abi_long do_socketcall(int num, abi_ulong vptr)
2182 {
2183 static const unsigned ac[] = { /* number of arguments per call */
2184 [SOCKOP_socket] = 3, /* domain, type, protocol */
2185 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2186 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2187 [SOCKOP_listen] = 2, /* sockfd, backlog */
2188 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2189 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2190 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2191 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2192 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2193 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2194 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2195 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2196 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2197 [SOCKOP_shutdown] = 2, /* sockfd, how */
2198 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2199 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2200 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2201 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2202 };
2203 abi_long a[6]; /* max 6 args */
2204
2205 /* first, collect the arguments in a[] according to ac[] */
2206 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2207 unsigned i;
2208 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2209 for (i = 0; i < ac[num]; ++i) {
2210 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2211 return -TARGET_EFAULT;
2212 }
2213 }
2214 }
2215
2216 /* now when we have the args, actually handle the call */
2217 switch (num) {
2218 case SOCKOP_socket: /* domain, type, protocol */
2219 return do_socket(a[0], a[1], a[2]);
2220 case SOCKOP_bind: /* sockfd, addr, addrlen */
2221 return do_bind(a[0], a[1], a[2]);
2222 case SOCKOP_connect: /* sockfd, addr, addrlen */
2223 return do_connect(a[0], a[1], a[2]);
2224 case SOCKOP_listen: /* sockfd, backlog */
2225 return get_errno(listen(a[0], a[1]));
2226 case SOCKOP_accept: /* sockfd, addr, addrlen */
2227 return do_accept4(a[0], a[1], a[2], 0);
2228 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2229 return do_accept4(a[0], a[1], a[2], a[3]);
2230 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2231 return do_getsockname(a[0], a[1], a[2]);
2232 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2233 return do_getpeername(a[0], a[1], a[2]);
2234 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2235 return do_socketpair(a[0], a[1], a[2], a[3]);
2236 case SOCKOP_send: /* sockfd, msg, len, flags */
2237 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2238 case SOCKOP_recv: /* sockfd, msg, len, flags */
2239 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2240 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2241 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2242 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2243 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2244 case SOCKOP_shutdown: /* sockfd, how */
2245 return get_errno(shutdown(a[0], a[1]));
2246 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2247 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2248 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2249 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2250 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2251 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2252 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2253 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2254 default:
2255 gemu_log("Unsupported socketcall: %d\n", num);
2256 return -TARGET_ENOSYS;
2257 }
2258 }
2259 #endif
2260
2261 #define N_SHM_REGIONS 32
2262
2263 static struct shm_region {
2264 abi_ulong start;
2265 abi_ulong size;
2266 } shm_regions[N_SHM_REGIONS];
2267
2268 struct target_semid_ds
2269 {
2270 struct target_ipc_perm sem_perm;
2271 abi_ulong sem_otime;
2272 abi_ulong __unused1;
2273 abi_ulong sem_ctime;
2274 abi_ulong __unused2;
2275 abi_ulong sem_nsems;
2276 abi_ulong __unused3;
2277 abi_ulong __unused4;
2278 };
2279
2280 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2281 abi_ulong target_addr)
2282 {
2283 struct target_ipc_perm *target_ip;
2284 struct target_semid_ds *target_sd;
2285
2286 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2287 return -TARGET_EFAULT;
2288 target_ip = &(target_sd->sem_perm);
2289 host_ip->__key = tswap32(target_ip->__key);
2290 host_ip->uid = tswap32(target_ip->uid);
2291 host_ip->gid = tswap32(target_ip->gid);
2292 host_ip->cuid = tswap32(target_ip->cuid);
2293 host_ip->cgid = tswap32(target_ip->cgid);
2294 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2295 host_ip->mode = tswap32(target_ip->mode);
2296 #else
2297 host_ip->mode = tswap16(target_ip->mode);
2298 #endif
2299 #if defined(TARGET_PPC)
2300 host_ip->__seq = tswap32(target_ip->__seq);
2301 #else
2302 host_ip->__seq = tswap16(target_ip->__seq);
2303 #endif
2304 unlock_user_struct(target_sd, target_addr, 0);
2305 return 0;
2306 }
2307
2308 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2309 struct ipc_perm *host_ip)
2310 {
2311 struct target_ipc_perm *target_ip;
2312 struct target_semid_ds *target_sd;
2313
2314 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2315 return -TARGET_EFAULT;
2316 target_ip = &(target_sd->sem_perm);
2317 target_ip->__key = tswap32(host_ip->__key);
2318 target_ip->uid = tswap32(host_ip->uid);
2319 target_ip->gid = tswap32(host_ip->gid);
2320 target_ip->cuid = tswap32(host_ip->cuid);
2321 target_ip->cgid = tswap32(host_ip->cgid);
2322 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2323 target_ip->mode = tswap32(host_ip->mode);
2324 #else
2325 target_ip->mode = tswap16(host_ip->mode);
2326 #endif
2327 #if defined(TARGET_PPC)
2328 target_ip->__seq = tswap32(host_ip->__seq);
2329 #else
2330 target_ip->__seq = tswap16(host_ip->__seq);
2331 #endif
2332 unlock_user_struct(target_sd, target_addr, 1);
2333 return 0;
2334 }
2335
2336 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2337 abi_ulong target_addr)
2338 {
2339 struct target_semid_ds *target_sd;
2340
2341 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2342 return -TARGET_EFAULT;
2343 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2344 return -TARGET_EFAULT;
2345 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2346 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2347 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2348 unlock_user_struct(target_sd, target_addr, 0);
2349 return 0;
2350 }
2351
2352 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2353 struct semid_ds *host_sd)
2354 {
2355 struct target_semid_ds *target_sd;
2356
2357 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2358 return -TARGET_EFAULT;
2359 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2360 return -TARGET_EFAULT;
2361 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2362 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2363 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2364 unlock_user_struct(target_sd, target_addr, 1);
2365 return 0;
2366 }
2367
2368 struct target_seminfo {
2369 int semmap;
2370 int semmni;
2371 int semmns;
2372 int semmnu;
2373 int semmsl;
2374 int semopm;
2375 int semume;
2376 int semusz;
2377 int semvmx;
2378 int semaem;
2379 };
2380
2381 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2382 struct seminfo *host_seminfo)
2383 {
2384 struct target_seminfo *target_seminfo;
2385 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2386 return -TARGET_EFAULT;
2387 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2388 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2389 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2390 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2391 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2392 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2393 __put_user(host_seminfo->semume, &target_seminfo->semume);
2394 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2395 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2396 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2397 unlock_user_struct(target_seminfo, target_addr, 1);
2398 return 0;
2399 }
2400
2401 union semun {
2402 int val;
2403 struct semid_ds *buf;
2404 unsigned short *array;
2405 struct seminfo *__buf;
2406 };
2407
2408 union target_semun {
2409 int val;
2410 abi_ulong buf;
2411 abi_ulong array;
2412 abi_ulong __buf;
2413 };
2414
2415 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2416 abi_ulong target_addr)
2417 {
2418 int nsems;
2419 unsigned short *array;
2420 union semun semun;
2421 struct semid_ds semid_ds;
2422 int i, ret;
2423
2424 semun.buf = &semid_ds;
2425
2426 ret = semctl(semid, 0, IPC_STAT, semun);
2427 if (ret == -1)
2428 return get_errno(ret);
2429
2430 nsems = semid_ds.sem_nsems;
2431
2432 *host_array = malloc(nsems*sizeof(unsigned short));
2433 array = lock_user(VERIFY_READ, target_addr,
2434 nsems*sizeof(unsigned short), 1);
2435 if (!array)
2436 return -TARGET_EFAULT;
2437
2438 for(i=0; i<nsems; i++) {
2439 __get_user((*host_array)[i], &array[i]);
2440 }
2441 unlock_user(array, target_addr, 0);
2442
2443 return 0;
2444 }
2445
2446 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2447 unsigned short **host_array)
2448 {
2449 int nsems;
2450 unsigned short *array;
2451 union semun semun;
2452 struct semid_ds semid_ds;
2453 int i, ret;
2454
2455 semun.buf = &semid_ds;
2456
2457 ret = semctl(semid, 0, IPC_STAT, semun);
2458 if (ret == -1)
2459 return get_errno(ret);
2460
2461 nsems = semid_ds.sem_nsems;
2462
2463 array = lock_user(VERIFY_WRITE, target_addr,
2464 nsems*sizeof(unsigned short), 0);
2465 if (!array)
2466 return -TARGET_EFAULT;
2467
2468 for(i=0; i<nsems; i++) {
2469 __put_user((*host_array)[i], &array[i]);
2470 }
2471 free(*host_array);
2472 unlock_user(array, target_addr, 1);
2473
2474 return 0;
2475 }
2476
2477 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2478 union target_semun target_su)
2479 {
2480 union semun arg;
2481 struct semid_ds dsarg;
2482 unsigned short *array = NULL;
2483 struct seminfo seminfo;
2484 abi_long ret = -TARGET_EINVAL;
2485 abi_long err;
2486 cmd &= 0xff;
2487
2488 switch( cmd ) {
2489 case GETVAL:
2490 case SETVAL:
2491 arg.val = tswap32(target_su.val);
2492 ret = get_errno(semctl(semid, semnum, cmd, arg));
2493 target_su.val = tswap32(arg.val);
2494 break;
2495 case GETALL:
2496 case SETALL:
2497 err = target_to_host_semarray(semid, &array, target_su.array);
2498 if (err)
2499 return err;
2500 arg.array = array;
2501 ret = get_errno(semctl(semid, semnum, cmd, arg));
2502 err = host_to_target_semarray(semid, target_su.array, &array);
2503 if (err)
2504 return err;
2505 break;
2506 case IPC_STAT:
2507 case IPC_SET:
2508 case SEM_STAT:
2509 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2510 if (err)
2511 return err;
2512 arg.buf = &dsarg;
2513 ret = get_errno(semctl(semid, semnum, cmd, arg));
2514 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2515 if (err)
2516 return err;
2517 break;
2518 case IPC_INFO:
2519 case SEM_INFO:
2520 arg.__buf = &seminfo;
2521 ret = get_errno(semctl(semid, semnum, cmd, arg));
2522 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2523 if (err)
2524 return err;
2525 break;
2526 case IPC_RMID:
2527 case GETPID:
2528 case GETNCNT:
2529 case GETZCNT:
2530 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2531 break;
2532 }
2533
2534 return ret;
2535 }
2536
2537 struct target_sembuf {
2538 unsigned short sem_num;
2539 short sem_op;
2540 short sem_flg;
2541 };
2542
2543 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2544 abi_ulong target_addr,
2545 unsigned nsops)
2546 {
2547 struct target_sembuf *target_sembuf;
2548 int i;
2549
2550 target_sembuf = lock_user(VERIFY_READ, target_addr,
2551 nsops*sizeof(struct target_sembuf), 1);
2552 if (!target_sembuf)
2553 return -TARGET_EFAULT;
2554
2555 for(i=0; i<nsops; i++) {
2556 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2557 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2558 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2559 }
2560
2561 unlock_user(target_sembuf, target_addr, 0);
2562
2563 return 0;
2564 }
2565
2566 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2567 {
2568 struct sembuf sops[nsops];
2569
2570 if (target_to_host_sembuf(sops, ptr, nsops))
2571 return -TARGET_EFAULT;
2572
2573 return get_errno(semop(semid, sops, nsops));
2574 }
2575
2576 struct target_msqid_ds
2577 {
2578 struct target_ipc_perm msg_perm;
2579 abi_ulong msg_stime;
2580 #if TARGET_ABI_BITS == 32
2581 abi_ulong __unused1;
2582 #endif
2583 abi_ulong msg_rtime;
2584 #if TARGET_ABI_BITS == 32
2585 abi_ulong __unused2;
2586 #endif
2587 abi_ulong msg_ctime;
2588 #if TARGET_ABI_BITS == 32
2589 abi_ulong __unused3;
2590 #endif
2591 abi_ulong __msg_cbytes;
2592 abi_ulong msg_qnum;
2593 abi_ulong msg_qbytes;
2594 abi_ulong msg_lspid;
2595 abi_ulong msg_lrpid;
2596 abi_ulong __unused4;
2597 abi_ulong __unused5;
2598 };
2599
2600 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2601 abi_ulong target_addr)
2602 {
2603 struct target_msqid_ds *target_md;
2604
2605 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2606 return -TARGET_EFAULT;
2607 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2608 return -TARGET_EFAULT;
2609 host_md->msg_stime = tswapal(target_md->msg_stime);
2610 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2611 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2612 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2613 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2614 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2615 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2616 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2617 unlock_user_struct(target_md, target_addr, 0);
2618 return 0;
2619 }
2620
2621 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2622 struct msqid_ds *host_md)
2623 {
2624 struct target_msqid_ds *target_md;
2625
2626 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2627 return -TARGET_EFAULT;
2628 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2629 return -TARGET_EFAULT;
2630 target_md->msg_stime = tswapal(host_md->msg_stime);
2631 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2632 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2633 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2634 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2635 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2636 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2637 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2638 unlock_user_struct(target_md, target_addr, 1);
2639 return 0;
2640 }
2641
2642 struct target_msginfo {
2643 int msgpool;
2644 int msgmap;
2645 int msgmax;
2646 int msgmnb;
2647 int msgmni;
2648 int msgssz;
2649 int msgtql;
2650 unsigned short int msgseg;
2651 };
2652
2653 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2654 struct msginfo *host_msginfo)
2655 {
2656 struct target_msginfo *target_msginfo;
2657 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2658 return -TARGET_EFAULT;
2659 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2660 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2661 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2662 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2663 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2664 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2665 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2666 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2667 unlock_user_struct(target_msginfo, target_addr, 1);
2668 return 0;
2669 }
2670
2671 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2672 {
2673 struct msqid_ds dsarg;
2674 struct msginfo msginfo;
2675 abi_long ret = -TARGET_EINVAL;
2676
2677 cmd &= 0xff;
2678
2679 switch (cmd) {
2680 case IPC_STAT:
2681 case IPC_SET:
2682 case MSG_STAT:
2683 if (target_to_host_msqid_ds(&dsarg,ptr))
2684 return -TARGET_EFAULT;
2685 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2686 if (host_to_target_msqid_ds(ptr,&dsarg))
2687 return -TARGET_EFAULT;
2688 break;
2689 case IPC_RMID:
2690 ret = get_errno(msgctl(msgid, cmd, NULL));
2691 break;
2692 case IPC_INFO:
2693 case MSG_INFO:
2694 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2695 if (host_to_target_msginfo(ptr, &msginfo))
2696 return -TARGET_EFAULT;
2697 break;
2698 }
2699
2700 return ret;
2701 }
2702
2703 struct target_msgbuf {
2704 abi_long mtype;
2705 char mtext[1];
2706 };
2707
2708 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2709 unsigned int msgsz, int msgflg)
2710 {
2711 struct target_msgbuf *target_mb;
2712 struct msgbuf *host_mb;
2713 abi_long ret = 0;
2714
2715 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2716 return -TARGET_EFAULT;
2717 host_mb = malloc(msgsz+sizeof(long));
2718 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2719 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2720 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2721 free(host_mb);
2722 unlock_user_struct(target_mb, msgp, 0);
2723
2724 return ret;
2725 }
2726
2727 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2728 unsigned int msgsz, abi_long msgtyp,
2729 int msgflg)
2730 {
2731 struct target_msgbuf *target_mb;
2732 char *target_mtext;
2733 struct msgbuf *host_mb;
2734 abi_long ret = 0;
2735
2736 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2737 return -TARGET_EFAULT;
2738
2739 host_mb = g_malloc(msgsz+sizeof(long));
2740 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2741
2742 if (ret > 0) {
2743 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2744 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2745 if (!target_mtext) {
2746 ret = -TARGET_EFAULT;
2747 goto end;
2748 }
2749 memcpy(target_mb->mtext, host_mb->mtext, ret);
2750 unlock_user(target_mtext, target_mtext_addr, ret);
2751 }
2752
2753 target_mb->mtype = tswapal(host_mb->mtype);
2754
2755 end:
2756 if (target_mb)
2757 unlock_user_struct(target_mb, msgp, 1);
2758 g_free(host_mb);
2759 return ret;
2760 }
2761
2762 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2763 abi_ulong target_addr)
2764 {
2765 struct target_shmid_ds *target_sd;
2766
2767 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2768 return -TARGET_EFAULT;
2769 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2770 return -TARGET_EFAULT;
2771 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2772 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2773 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2774 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2775 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2776 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2777 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2778 unlock_user_struct(target_sd, target_addr, 0);
2779 return 0;
2780 }
2781
2782 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2783 struct shmid_ds *host_sd)
2784 {
2785 struct target_shmid_ds *target_sd;
2786
2787 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2788 return -TARGET_EFAULT;
2789 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2790 return -TARGET_EFAULT;
2791 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2792 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2793 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2794 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2795 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2796 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2797 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2798 unlock_user_struct(target_sd, target_addr, 1);
2799 return 0;
2800 }
2801
2802 struct target_shminfo {
2803 abi_ulong shmmax;
2804 abi_ulong shmmin;
2805 abi_ulong shmmni;
2806 abi_ulong shmseg;
2807 abi_ulong shmall;
2808 };
2809
2810 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2811 struct shminfo *host_shminfo)
2812 {
2813 struct target_shminfo *target_shminfo;
2814 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2815 return -TARGET_EFAULT;
2816 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2817 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2818 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2819 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2820 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2821 unlock_user_struct(target_shminfo, target_addr, 1);
2822 return 0;
2823 }
2824
2825 struct target_shm_info {
2826 int used_ids;
2827 abi_ulong shm_tot;
2828 abi_ulong shm_rss;
2829 abi_ulong shm_swp;
2830 abi_ulong swap_attempts;
2831 abi_ulong swap_successes;
2832 };
2833
2834 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2835 struct shm_info *host_shm_info)
2836 {
2837 struct target_shm_info *target_shm_info;
2838 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2839 return -TARGET_EFAULT;
2840 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2841 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2842 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2843 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2844 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2845 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2846 unlock_user_struct(target_shm_info, target_addr, 1);
2847 return 0;
2848 }
2849
2850 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2851 {
2852 struct shmid_ds dsarg;
2853 struct shminfo shminfo;
2854 struct shm_info shm_info;
2855 abi_long ret = -TARGET_EINVAL;
2856
2857 cmd &= 0xff;
2858
2859 switch(cmd) {
2860 case IPC_STAT:
2861 case IPC_SET:
2862 case SHM_STAT:
2863 if (target_to_host_shmid_ds(&dsarg, buf))
2864 return -TARGET_EFAULT;
2865 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2866 if (host_to_target_shmid_ds(buf, &dsarg))
2867 return -TARGET_EFAULT;
2868 break;
2869 case IPC_INFO:
2870 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2871 if (host_to_target_shminfo(buf, &shminfo))
2872 return -TARGET_EFAULT;
2873 break;
2874 case SHM_INFO:
2875 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2876 if (host_to_target_shm_info(buf, &shm_info))
2877 return -TARGET_EFAULT;
2878 break;
2879 case IPC_RMID:
2880 case SHM_LOCK:
2881 case SHM_UNLOCK:
2882 ret = get_errno(shmctl(shmid, cmd, NULL));
2883 break;
2884 }
2885
2886 return ret;
2887 }
2888
2889 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2890 {
2891 abi_long raddr;
2892 void *host_raddr;
2893 struct shmid_ds shm_info;
2894 int i,ret;
2895
2896 /* find out the length of the shared memory segment */
2897 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2898 if (is_error(ret)) {
2899 /* can't get length, bail out */
2900 return ret;
2901 }
2902
2903 mmap_lock();
2904
2905 if (shmaddr)
2906 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2907 else {
2908 abi_ulong mmap_start;
2909
2910 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2911
2912 if (mmap_start == -1) {
2913 errno = ENOMEM;
2914 host_raddr = (void *)-1;
2915 } else
2916 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2917 }
2918
2919 if (host_raddr == (void *)-1) {
2920 mmap_unlock();
2921 return get_errno((long)host_raddr);
2922 }
2923 raddr=h2g((unsigned long)host_raddr);
2924
2925 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2926 PAGE_VALID | PAGE_READ |
2927 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2928
2929 for (i = 0; i < N_SHM_REGIONS; i++) {
2930 if (shm_regions[i].start == 0) {
2931 shm_regions[i].start = raddr;
2932 shm_regions[i].size = shm_info.shm_segsz;
2933 break;
2934 }
2935 }
2936
2937 mmap_unlock();
2938 return raddr;
2939
2940 }
2941
2942 static inline abi_long do_shmdt(abi_ulong shmaddr)
2943 {
2944 int i;
2945
2946 for (i = 0; i < N_SHM_REGIONS; ++i) {
2947 if (shm_regions[i].start == shmaddr) {
2948 shm_regions[i].start = 0;
2949 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
2950 break;
2951 }
2952 }
2953
2954 return get_errno(shmdt(g2h(shmaddr)));
2955 }
2956
2957 #ifdef TARGET_NR_ipc
2958 /* ??? This only works with linear mappings. */
2959 /* do_ipc() must return target values and target errnos. */
2960 static abi_long do_ipc(unsigned int call, int first,
2961 int second, int third,
2962 abi_long ptr, abi_long fifth)
2963 {
2964 int version;
2965 abi_long ret = 0;
2966
2967 version = call >> 16;
2968 call &= 0xffff;
2969
2970 switch (call) {
2971 case IPCOP_semop:
2972 ret = do_semop(first, ptr, second);
2973 break;
2974
2975 case IPCOP_semget:
2976 ret = get_errno(semget(first, second, third));
2977 break;
2978
2979 case IPCOP_semctl:
2980 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2981 break;
2982
2983 case IPCOP_msgget:
2984 ret = get_errno(msgget(first, second));
2985 break;
2986
2987 case IPCOP_msgsnd:
2988 ret = do_msgsnd(first, ptr, second, third);
2989 break;
2990
2991 case IPCOP_msgctl:
2992 ret = do_msgctl(first, second, ptr);
2993 break;
2994
2995 case IPCOP_msgrcv:
2996 switch (version) {
2997 case 0:
2998 {
2999 struct target_ipc_kludge {
3000 abi_long msgp;
3001 abi_long msgtyp;
3002 } *tmp;
3003
3004 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3005 ret = -TARGET_EFAULT;
3006 break;
3007 }
3008
3009 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3010
3011 unlock_user_struct(tmp, ptr, 0);
3012 break;
3013 }
3014 default:
3015 ret = do_msgrcv(first, ptr, second, fifth, third);
3016 }
3017 break;
3018
3019 case IPCOP_shmat:
3020 switch (version) {
3021 default:
3022 {
3023 abi_ulong raddr;
3024 raddr = do_shmat(first, ptr, second);
3025 if (is_error(raddr))
3026 return get_errno(raddr);
3027 if (put_user_ual(raddr, third))
3028 return -TARGET_EFAULT;
3029 break;
3030 }
3031 case 1:
3032 ret = -TARGET_EINVAL;
3033 break;
3034 }
3035 break;
3036 case IPCOP_shmdt:
3037 ret = do_shmdt(ptr);
3038 break;
3039
3040 case IPCOP_shmget:
3041 /* IPC_* flag values are the same on all linux platforms */
3042 ret = get_errno(shmget(first, second, third));
3043 break;
3044
3045 /* IPC_* and SHM_* command values are the same on all linux platforms */
3046 case IPCOP_shmctl:
3047 ret = do_shmctl(first, second, ptr);
3048 break;
3049 default:
3050 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3051 ret = -TARGET_ENOSYS;
3052 break;
3053 }
3054 return ret;
3055 }
3056 #endif
3057
3058 /* kernel structure types definitions */
3059
3060 #define STRUCT(name, ...) STRUCT_ ## name,
3061 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3062 enum {
3063 #include "syscall_types.h"
3064 };
3065 #undef STRUCT
3066 #undef STRUCT_SPECIAL
3067
3068 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3069 #define STRUCT_SPECIAL(name)
3070 #include "syscall_types.h"
3071 #undef STRUCT
3072 #undef STRUCT_SPECIAL
3073
3074 typedef struct IOCTLEntry IOCTLEntry;
3075
3076 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3077 int fd, abi_long cmd, abi_long arg);
3078
3079 struct IOCTLEntry {
3080 unsigned int target_cmd;
3081 unsigned int host_cmd;
3082 const char *name;
3083 int access;
3084 do_ioctl_fn *do_ioctl;
3085 const argtype arg_type[5];
3086 };
3087
3088 #define IOC_R 0x0001
3089 #define IOC_W 0x0002
3090 #define IOC_RW (IOC_R | IOC_W)
3091
3092 #define MAX_STRUCT_SIZE 4096
3093
3094 #ifdef CONFIG_FIEMAP
3095 /* So fiemap access checks don't overflow on 32 bit systems.
3096 * This is very slightly smaller than the limit imposed by
3097 * the underlying kernel.
3098 */
3099 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3100 / sizeof(struct fiemap_extent))
3101
3102 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3103 int fd, abi_long cmd, abi_long arg)
3104 {
3105 /* The parameter for this ioctl is a struct fiemap followed
3106 * by an array of struct fiemap_extent whose size is set
3107 * in fiemap->fm_extent_count. The array is filled in by the
3108 * ioctl.
3109 */
3110 int target_size_in, target_size_out;
3111 struct fiemap *fm;
3112 const argtype *arg_type = ie->arg_type;
3113 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3114 void *argptr, *p;
3115 abi_long ret;
3116 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3117 uint32_t outbufsz;
3118 int free_fm = 0;
3119
3120 assert(arg_type[0] == TYPE_PTR);
3121 assert(ie->access == IOC_RW);
3122 arg_type++;
3123 target_size_in = thunk_type_size(arg_type, 0);
3124 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3125 if (!argptr) {
3126 return -TARGET_EFAULT;
3127 }
3128 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3129 unlock_user(argptr, arg, 0);
3130 fm = (struct fiemap *)buf_temp;
3131 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3132 return -TARGET_EINVAL;
3133 }
3134
3135 outbufsz = sizeof (*fm) +
3136 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3137
3138 if (outbufsz > MAX_STRUCT_SIZE) {
3139 /* We can't fit all the extents into the fixed size buffer.
3140 * Allocate one that is large enough and use it instead.
3141 */
3142 fm = malloc(outbufsz);
3143 if (!fm) {
3144 return -TARGET_ENOMEM;
3145 }
3146 memcpy(fm, buf_temp, sizeof(struct fiemap));
3147 free_fm = 1;
3148 }
3149 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3150 if (!is_error(ret)) {
3151 target_size_out = target_size_in;
3152 /* An extent_count of 0 means we were only counting the extents
3153 * so there are no structs to copy
3154 */
3155 if (fm->fm_extent_count != 0) {
3156 target_size_out += fm->fm_mapped_extents * extent_size;
3157 }
3158 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3159 if (!argptr) {
3160 ret = -TARGET_EFAULT;
3161 } else {
3162 /* Convert the struct fiemap */
3163 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3164 if (fm->fm_extent_count != 0) {
3165 p = argptr + target_size_in;
3166 /* ...and then all the struct fiemap_extents */
3167 for (i = 0; i < fm->fm_mapped_extents; i++) {
3168 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3169 THUNK_TARGET);
3170 p += extent_size;
3171 }
3172 }
3173 unlock_user(argptr, arg, target_size_out);
3174 }
3175 }
3176 if (free_fm) {
3177 free(fm);
3178 }
3179 return ret;
3180 }
3181 #endif
3182
3183 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3184 int fd, abi_long cmd, abi_long arg)
3185 {
3186 const argtype *arg_type = ie->arg_type;
3187 int target_size;
3188 void *argptr;
3189 int ret;
3190 struct ifconf *host_ifconf;
3191 uint32_t outbufsz;
3192 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3193 int target_ifreq_size;
3194 int nb_ifreq;
3195 int free_buf = 0;
3196 int i;
3197 int target_ifc_len;
3198 abi_long target_ifc_buf;
3199 int host_ifc_len;
3200 char *host_ifc_buf;
3201
3202 assert(arg_type[0] == TYPE_PTR);
3203 assert(ie->access == IOC_RW);
3204
3205 arg_type++;
3206 target_size = thunk_type_size(arg_type, 0);
3207
3208 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3209 if (!argptr)
3210 return -TARGET_EFAULT;
3211 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3212 unlock_user(argptr, arg, 0);
3213
3214 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3215 target_ifc_len = host_ifconf->ifc_len;
3216 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3217
3218 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3219 nb_ifreq = target_ifc_len / target_ifreq_size;
3220 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3221
3222 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3223 if (outbufsz > MAX_STRUCT_SIZE) {
3224 /* We can't fit all the extents into the fixed size buffer.
3225 * Allocate one that is large enough and use it instead.
3226 */
3227 host_ifconf = malloc(outbufsz);
3228 if (!host_ifconf) {
3229 return -TARGET_ENOMEM;
3230 }
3231 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3232 free_buf = 1;
3233 }
3234 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3235
3236 host_ifconf->ifc_len = host_ifc_len;
3237 host_ifconf->ifc_buf = host_ifc_buf;
3238
3239 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3240 if (!is_error(ret)) {
3241 /* convert host ifc_len to target ifc_len */
3242
3243 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3244 target_ifc_len = nb_ifreq * target_ifreq_size;
3245 host_ifconf->ifc_len = target_ifc_len;
3246
3247 /* restore target ifc_buf */
3248
3249 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3250
3251 /* copy struct ifconf to target user */
3252
3253 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3254 if (!argptr)
3255 return -TARGET_EFAULT;
3256 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3257 unlock_user(argptr, arg, target_size);
3258
3259 /* copy ifreq[] to target user */
3260
3261 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3262 for (i = 0; i < nb_ifreq ; i++) {
3263 thunk_convert(argptr + i * target_ifreq_size,
3264 host_ifc_buf + i * sizeof(struct ifreq),
3265 ifreq_arg_type, THUNK_TARGET);
3266 }
3267 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3268 }
3269
3270 if (free_buf) {
3271 free(host_ifconf);
3272 }
3273
3274 return ret;
3275 }
3276
3277 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3278 abi_long cmd, abi_long arg)
3279 {
3280 void *argptr;
3281 struct dm_ioctl *host_dm;
3282 abi_long guest_data;
3283 uint32_t guest_data_size;
3284 int target_size;
3285 const argtype *arg_type = ie->arg_type;
3286 abi_long ret;
3287 void *big_buf = NULL;
3288 char *host_data;
3289
3290 arg_type++;
3291 target_size = thunk_type_size(arg_type, 0);
3292 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3293 if (!argptr) {
3294 ret = -TARGET_EFAULT;
3295 goto out;
3296 }
3297 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3298 unlock_user(argptr, arg, 0);
3299
3300 /* buf_temp is too small, so fetch things into a bigger buffer */
3301 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3302 memcpy(big_buf, buf_temp, target_size);
3303 buf_temp = big_buf;
3304 host_dm = big_buf;
3305
3306 guest_data = arg + host_dm->data_start;
3307 if ((guest_data - arg) < 0) {
3308 ret = -EINVAL;
3309 goto out;
3310 }
3311 guest_data_size = host_dm->data_size - host_dm->data_start;
3312 host_data = (char*)host_dm + host_dm->data_start;
3313
3314 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3315 switch (ie->host_cmd) {
3316 case DM_REMOVE_ALL:
3317 case DM_LIST_DEVICES:
3318 case DM_DEV_CREATE:
3319 case DM_DEV_REMOVE:
3320 case DM_DEV_SUSPEND:
3321 case DM_DEV_STATUS:
3322 case DM_DEV_WAIT:
3323 case DM_TABLE_STATUS:
3324 case DM_TABLE_CLEAR:
3325 case DM_TABLE_DEPS:
3326 case DM_LIST_VERSIONS:
3327 /* no input data */
3328 break;
3329 case DM_DEV_RENAME:
3330 case DM_DEV_SET_GEOMETRY:
3331 /* data contains only strings */
3332 memcpy(host_data, argptr, guest_data_size);
3333 break;
3334 case DM_TARGET_MSG:
3335 memcpy(host_data, argptr, guest_data_size);
3336 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3337 break;
3338 case DM_TABLE_LOAD:
3339 {
3340 void *gspec = argptr;
3341 void *cur_data = host_data;
3342 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3343 int spec_size = thunk_type_size(arg_type, 0);
3344 int i;
3345
3346 for (i = 0; i < host_dm->target_count; i++) {
3347 struct dm_target_spec *spec = cur_data;
3348 uint32_t next;
3349 int slen;
3350
3351 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3352 slen = strlen((char*)gspec + spec_size) + 1;
3353 next = spec->next;
3354 spec->next = sizeof(*spec) + slen;
3355 strcpy((char*)&spec[1], gspec + spec_size);
3356 gspec += next;
3357 cur_data += spec->next;
3358 }
3359 break;
3360 }
3361 default:
3362 ret = -TARGET_EINVAL;
3363 goto out;
3364 }
3365 unlock_user(argptr, guest_data, 0);
3366
3367 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3368 if (!is_error(ret)) {
3369 guest_data = arg + host_dm->data_start;
3370 guest_data_size = host_dm->data_size - host_dm->data_start;
3371 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3372 switch (ie->host_cmd) {
3373 case DM_REMOVE_ALL:
3374 case DM_DEV_CREATE:
3375 case DM_DEV_REMOVE:
3376 case DM_DEV_RENAME:
3377 case DM_DEV_SUSPEND:
3378 case DM_DEV_STATUS:
3379 case DM_TABLE_LOAD:
3380 case DM_TABLE_CLEAR:
3381 case DM_TARGET_MSG:
3382 case DM_DEV_SET_GEOMETRY:
3383 /* no return data */
3384 break;
3385 case DM_LIST_DEVICES:
3386 {
3387 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3388 uint32_t remaining_data = guest_data_size;
3389 void *cur_data = argptr;
3390 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3391 int nl_size = 12; /* can't use thunk_size due to alignment */
3392
3393 while (1) {
3394 uint32_t next = nl->next;
3395 if (next) {
3396 nl->next = nl_size + (strlen(nl->name) + 1);
3397 }
3398 if (remaining_data < nl->next) {
3399 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3400 break;
3401 }
3402 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3403 strcpy(cur_data + nl_size, nl->name);
3404 cur_data += nl->next;
3405 remaining_data -= nl->next;
3406 if (!next) {
3407 break;
3408 }
3409 nl = (void*)nl + next;
3410 }
3411 break;
3412 }
3413 case DM_DEV_WAIT:
3414 case DM_TABLE_STATUS:
3415 {
3416 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3417 void *cur_data = argptr;
3418 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3419 int spec_size = thunk_type_size(arg_type, 0);
3420 int i;
3421
3422 for (i = 0; i < host_dm->target_count; i++) {
3423 uint32_t next = spec->next;
3424 int slen = strlen((char*)&spec[1]) + 1;
3425 spec->next = (cur_data - argptr) + spec_size + slen;
3426 if (guest_data_size < spec->next) {
3427 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3428 break;
3429 }
3430 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3431 strcpy(cur_data + spec_size, (char*)&spec[1]);
3432 cur_data = argptr + spec->next;
3433 spec = (void*)host_dm + host_dm->data_start + next;
3434 }
3435 break;
3436 }
3437 case DM_TABLE_DEPS:
3438 {
3439 void *hdata = (void*)host_dm + host_dm->data_start;
3440 int count = *(uint32_t*)hdata;
3441 uint64_t *hdev = hdata + 8;
3442 uint64_t *gdev = argptr + 8;
3443 int i;
3444
3445 *(uint32_t*)argptr = tswap32(count);
3446 for (i = 0; i < count; i++) {
3447 *gdev = tswap64(*hdev);
3448 gdev++;
3449 hdev++;
3450 }
3451 break;
3452 }
3453 case DM_LIST_VERSIONS:
3454 {
3455 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3456 uint32_t remaining_data = guest_data_size;
3457 void *cur_data = argptr;
3458 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3459 int vers_size = thunk_type_size(arg_type, 0);
3460
3461 while (1) {
3462 uint32_t next = vers->next;
3463 if (next) {
3464 vers->next = vers_size + (strlen(vers->name) + 1);
3465 }
3466 if (remaining_data < vers->next) {
3467 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3468 break;
3469 }
3470 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3471 strcpy(cur_data + vers_size, vers->name);
3472 cur_data += vers->next;
3473 remaining_data -= vers->next;
3474 if (!next) {
3475 break;
3476 }
3477 vers = (void*)vers + next;
3478 }
3479 break;
3480 }
3481 default:
3482 ret = -TARGET_EINVAL;
3483 goto out;
3484 }
3485 unlock_user(argptr, guest_data, guest_data_size);
3486
3487 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3488 if (!argptr) {
3489 ret = -TARGET_EFAULT;
3490 goto out;
3491 }
3492 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3493 unlock_user(argptr, arg, target_size);
3494 }
3495 out:
3496 g_free(big_buf);
3497 return ret;
3498 }
3499
3500 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3501 int fd, abi_long cmd, abi_long arg)
3502 {
3503 const argtype *arg_type = ie->arg_type;
3504 const StructEntry *se;
3505 const argtype *field_types;
3506 const int *dst_offsets, *src_offsets;
3507 int target_size;
3508 void *argptr;
3509 abi_ulong *target_rt_dev_ptr;
3510 unsigned long *host_rt_dev_ptr;
3511 abi_long ret;
3512 int i;
3513
3514 assert(ie->access == IOC_W);
3515 assert(*arg_type == TYPE_PTR);
3516 arg_type++;
3517 assert(*arg_type == TYPE_STRUCT);
3518 target_size = thunk_type_size(arg_type, 0);
3519 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3520 if (!argptr) {
3521 return -TARGET_EFAULT;
3522 }
3523 arg_type++;
3524 assert(*arg_type == (int)STRUCT_rtentry);
3525 se = struct_entries + *arg_type++;
3526 assert(se->convert[0] == NULL);
3527 /* convert struct here to be able to catch rt_dev string */
3528 field_types = se->field_types;
3529 dst_offsets = se->field_offsets[THUNK_HOST];
3530 src_offsets = se->field_offsets[THUNK_TARGET];
3531 for (i = 0; i < se->nb_fields; i++) {
3532 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3533 assert(*field_types == TYPE_PTRVOID);
3534 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3535 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3536 if (*target_rt_dev_ptr != 0) {
3537 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3538 tswapal(*target_rt_dev_ptr));
3539 if (!*host_rt_dev_ptr) {
3540 unlock_user(argptr, arg, 0);
3541 return -TARGET_EFAULT;
3542 }
3543 } else {
3544 *host_rt_dev_ptr = 0;
3545 }
3546 field_types++;
3547 continue;
3548 }
3549 field_types = thunk_convert(buf_temp + dst_offsets[i],
3550 argptr + src_offsets[i],
3551 field_types, THUNK_HOST);
3552 }
3553 unlock_user(argptr, arg, 0);
3554
3555 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3556 if (*host_rt_dev_ptr != 0) {
3557 unlock_user((void *)*host_rt_dev_ptr,
3558 *target_rt_dev_ptr, 0);
3559 }
3560 return ret;
3561 }
3562
3563 static IOCTLEntry ioctl_entries[] = {
3564 #define IOCTL(cmd, access, ...) \
3565 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3566 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3567 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3568 #include "ioctls.h"
3569 { 0, 0, },
3570 };
3571
3572 /* ??? Implement proper locking for ioctls. */
3573 /* do_ioctl() Must return target values and target errnos. */
3574 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3575 {
3576 const IOCTLEntry *ie;
3577 const argtype *arg_type;
3578 abi_long ret;
3579 uint8_t buf_temp[MAX_STRUCT_SIZE];
3580 int target_size;
3581 void *argptr;
3582
3583 ie = ioctl_entries;
3584 for(;;) {
3585 if (ie->target_cmd == 0) {
3586 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3587 return -TARGET_ENOSYS;
3588 }
3589 if (ie->target_cmd == cmd)
3590 break;
3591 ie++;
3592 }
3593 arg_type = ie->arg_type;
3594 #if defined(DEBUG)
3595 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3596 #endif
3597 if (ie->do_ioctl) {
3598 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3599 }
3600
3601 switch(arg_type[0]) {
3602 case TYPE_NULL:
3603 /* no argument */
3604 ret = get_errno(ioctl(fd, ie->host_cmd));
3605 break;
3606 case TYPE_PTRVOID:
3607 case TYPE_INT:
3608 /* int argment */
3609 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3610 break;
3611 case TYPE_PTR:
3612 arg_type++;
3613 target_size = thunk_type_size(arg_type, 0);
3614 switch(ie->access) {
3615 case IOC_R:
3616 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3617 if (!is_error(ret)) {
3618 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3619 if (!argptr)
3620 return -TARGET_EFAULT;
3621 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3622 unlock_user(argptr, arg, target_size);
3623 }
3624 break;
3625 case IOC_W:
3626 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3627 if (!argptr)
3628 return -TARGET_EFAULT;
3629 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3630 unlock_user(argptr, arg, 0);
3631 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3632 break;
3633 default:
3634 case IOC_RW:
3635 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3636 if (!argptr)
3637 return -TARGET_EFAULT;
3638 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3639 unlock_user(argptr, arg, 0);
3640 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3641 if (!is_error(ret)) {
3642 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3643 if (!argptr)
3644 return -TARGET_EFAULT;
3645 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3646 unlock_user(argptr, arg, target_size);
3647 }
3648 break;
3649 }
3650 break;
3651 default:
3652 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3653 (long)cmd, arg_type[0]);
3654 ret = -TARGET_ENOSYS;
3655 break;
3656 }
3657 return ret;
3658 }
3659
3660 static const bitmask_transtbl iflag_tbl[] = {
3661 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3662 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3663 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3664 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3665 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3666 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3667 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3668 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3669 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3670 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3671 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3672 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3673 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3674 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3675 { 0, 0, 0, 0 }
3676 };
3677
3678 static const bitmask_transtbl oflag_tbl[] = {
3679 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3680 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3681 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3682 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3683 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3684 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3685 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3686 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3687 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3688 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3689 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3690 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3691 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3692 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3693 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3694 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3695 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3696 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3697 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3698 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3699 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3700 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3701 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3702 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3703 { 0, 0, 0, 0 }
3704 };
3705
3706 static const bitmask_transtbl cflag_tbl[] = {
3707 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3708 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3709 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3710 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3711 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3712 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3713 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3714 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3715 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3716 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3717 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3718 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3719 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3720 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3721 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3722 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3723 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3724 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3725 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3726 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3727 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3728 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3729 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3730 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3731 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3732 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3733 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3734 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3735 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3736 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3737 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3738 { 0, 0, 0, 0 }
3739 };
3740
3741 static const bitmask_transtbl lflag_tbl[] = {
3742 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3743 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3744 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3745 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3746 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3747 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3748 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3749 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3750 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3751 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3752 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3753 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3754 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3755 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3756 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3757 { 0, 0, 0, 0 }
3758 };
3759
3760 static void target_to_host_termios (void *dst, const void *src)
3761 {
3762 struct host_termios *host = dst;
3763 const struct target_termios *target = src;
3764
3765 host->c_iflag =
3766 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3767 host->c_oflag =
3768 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3769 host->c_cflag =
3770 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3771 host->c_lflag =
3772 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3773 host->c_line = target->c_line;
3774
3775 memset(host->c_cc, 0, sizeof(host->c_cc));
3776 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3777 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3778 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3779 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3780 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3781 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3782 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3783 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3784 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3785 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3786 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3787 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3788 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3789 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3790 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3791 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3792 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3793 }
3794
3795 static void host_to_target_termios (void *dst, const void *src)
3796 {
3797 struct target_termios *target = dst;
3798 const struct host_termios *host = src;
3799
3800 target->c_iflag =
3801 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3802 target->c_oflag =
3803 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3804 target->c_cflag =
3805 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3806 target->c_lflag =
3807 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3808 target->c_line = host->c_line;
3809
3810 memset(target->c_cc, 0, sizeof(target->c_cc));
3811 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3812 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3813 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3814 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3815 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3816 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3817 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3818 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3819 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3820 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3821 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3822 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3823 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3824 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3825 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3826 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3827 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3828 }
3829
3830 static const StructEntry struct_termios_def = {
3831 .convert = { host_to_target_termios, target_to_host_termios },
3832 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3833 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3834 };
3835
3836 static bitmask_transtbl mmap_flags_tbl[] = {
3837 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3838 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3839 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3840 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3841 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3842 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3843 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3844 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3845 { 0, 0, 0, 0 }
3846 };
3847
3848 #if defined(TARGET_I386)
3849
3850 /* NOTE: there is really one LDT for all the threads */
3851 static uint8_t *ldt_table;
3852
3853 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3854 {
3855 int size;
3856 void *p;
3857
3858 if (!ldt_table)
3859 return 0;
3860 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3861 if (size > bytecount)
3862 size = bytecount;
3863 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3864 if (!p)
3865 return -TARGET_EFAULT;
3866 /* ??? Should this by byteswapped? */
3867 memcpy(p, ldt_table, size);
3868 unlock_user(p, ptr, size);
3869 return size;
3870 }
3871
3872 /* XXX: add locking support */
3873 static abi_long write_ldt(CPUX86State *env,
3874 abi_ulong ptr, unsigned long bytecount, int oldmode)
3875 {
3876 struct target_modify_ldt_ldt_s ldt_info;
3877 struct target_modify_ldt_ldt_s *target_ldt_info;
3878 int seg_32bit, contents, read_exec_only, limit_in_pages;
3879 int seg_not_present, useable, lm;
3880 uint32_t *lp, entry_1, entry_2;
3881
3882 if (bytecount != sizeof(ldt_info))
3883 return -TARGET_EINVAL;
3884 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3885 return -TARGET_EFAULT;
3886 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3887 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3888 ldt_info.limit = tswap32(target_ldt_info->limit);
3889 ldt_info.flags = tswap32(target_ldt_info->flags);
3890 unlock_user_struct(target_ldt_info, ptr, 0);
3891
3892 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3893 return -TARGET_EINVAL;
3894 seg_32bit = ldt_info.flags & 1;
3895 contents = (ldt_info.flags >> 1) & 3;
3896 read_exec_only = (ldt_info.flags >> 3) & 1;
3897 limit_in_pages = (ldt_info.flags >> 4) & 1;
3898 seg_not_present = (ldt_info.flags >> 5) & 1;
3899 useable = (ldt_info.flags >> 6) & 1;
3900 #ifdef TARGET_ABI32
3901 lm = 0;
3902 #else
3903 lm = (ldt_info.flags >> 7) & 1;
3904 #endif
3905 if (contents == 3) {
3906 if (oldmode)
3907 return -TARGET_EINVAL;
3908 if (seg_not_present == 0)
3909 return -TARGET_EINVAL;
3910 }
3911 /* allocate the LDT */
3912 if (!ldt_table) {
3913 env->ldt.base = target_mmap(0,
3914 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3915 PROT_READ|PROT_WRITE,
3916 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3917 if (env->ldt.base == -1)
3918 return -TARGET_ENOMEM;
3919 memset(g2h(env->ldt.base), 0,
3920 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3921 env->ldt.limit = 0xffff;
3922 ldt_table = g2h(env->ldt.base);
3923 }
3924
3925 /* NOTE: same code as Linux kernel */
3926 /* Allow LDTs to be cleared by the user. */
3927 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3928 if (oldmode ||
3929 (contents == 0 &&
3930 read_exec_only == 1 &&
3931 seg_32bit == 0 &&
3932 limit_in_pages == 0 &&
3933 seg_not_present == 1 &&
3934 useable == 0 )) {
3935 entry_1 = 0;
3936 entry_2 = 0;
3937 goto install;
3938 }
3939 }
3940
3941 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3942 (ldt_info.limit & 0x0ffff);
3943 entry_2 = (ldt_info.base_addr & 0xff000000) |
3944 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3945 (ldt_info.limit & 0xf0000) |
3946 ((read_exec_only ^ 1) << 9) |
3947 (contents << 10) |
3948 ((seg_not_present ^ 1) << 15) |
3949 (seg_32bit << 22) |
3950 (limit_in_pages << 23) |
3951 (lm << 21) |
3952 0x7000;
3953 if (!oldmode)
3954 entry_2 |= (useable << 20);
3955
3956 /* Install the new entry ... */
3957 install:
3958 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3959 lp[0] = tswap32(entry_1);
3960 lp[1] = tswap32(entry_2);
3961 return 0;
3962 }
3963
3964 /* specific and weird i386 syscalls */
3965 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3966 unsigned long bytecount)
3967 {
3968 abi_long ret;
3969
3970 switch (func) {
3971 case 0:
3972 ret = read_ldt(ptr, bytecount);
3973 break;
3974 case 1:
3975 ret = write_ldt(env, ptr, bytecount, 1);
3976 break;
3977 case 0x11:
3978 ret = write_ldt(env, ptr, bytecount, 0);
3979 break;
3980 default:
3981 ret = -TARGET_ENOSYS;
3982 break;
3983 }
3984 return ret;
3985 }
3986
3987 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3988 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3989 {
3990 uint64_t *gdt_table = g2h(env->gdt.base);
3991 struct target_modify_ldt_ldt_s ldt_info;
3992 struct target_modify_ldt_ldt_s *target_ldt_info;
3993 int seg_32bit, contents, read_exec_only, limit_in_pages;
3994 int seg_not_present, useable, lm;
3995 uint32_t *lp, entry_1, entry_2;
3996 int i;
3997
3998 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3999 if (!target_ldt_info)
4000 return -TARGET_EFAULT;
4001 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4002 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4003 ldt_info.limit = tswap32(target_ldt_info->limit);
4004 ldt_info.flags = tswap32(target_ldt_info->flags);
4005 if (ldt_info.entry_number == -1) {
4006 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4007 if (gdt_table[i] == 0) {
4008 ldt_info.entry_number = i;
4009 target_ldt_info->entry_number = tswap32(i);
4010 break;
4011 }
4012 }
4013 }
4014 unlock_user_struct(target_ldt_info, ptr, 1);
4015
4016 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4017 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4018 return -TARGET_EINVAL;
4019 seg_32bit = ldt_info.flags & 1;
4020 contents = (ldt_info.flags >> 1) & 3;
4021 read_exec_only = (ldt_info.flags >> 3) & 1;
4022 limit_in_pages = (ldt_info.flags >> 4) & 1;
4023 seg_not_present = (ldt_info.flags >> 5) & 1;
4024 useable = (ldt_info.flags >> 6) & 1;
4025 #ifdef TARGET_ABI32
4026 lm = 0;
4027 #else
4028 lm = (ldt_info.flags >> 7) & 1;
4029 #endif
4030
4031 if (contents == 3) {
4032 if (seg_not_present == 0)
4033 return -TARGET_EINVAL;
4034 }
4035
4036 /* NOTE: same code as Linux kernel */
4037 /* Allow LDTs to be cleared by the user. */
4038 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4039 if ((contents == 0 &&
4040 read_exec_only == 1 &&
4041 seg_32bit == 0 &&
4042 limit_in_pages == 0 &&
4043 seg_not_present == 1 &&
4044 useable == 0 )) {
4045 entry_1 = 0;
4046 entry_2 = 0;
4047 goto install;
4048 }
4049 }
4050
4051 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4052 (ldt_info.limit & 0x0ffff);
4053 entry_2 = (ldt_info.base_addr & 0xff000000) |
4054 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4055 (ldt_info.limit & 0xf0000) |
4056 ((read_exec_only ^ 1) << 9) |
4057 (contents << 10) |
4058 ((seg_not_present ^ 1) << 15) |
4059 (seg_32bit << 22) |
4060 (limit_in_pages << 23) |
4061 (useable << 20) |
4062 (lm << 21) |
4063 0x7000;
4064
4065 /* Install the new entry ... */
4066 install:
4067 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4068 lp[0] = tswap32(entry_1);
4069 lp[1] = tswap32(entry_2);
4070 return 0;
4071 }
4072
4073 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4074 {
4075 struct target_modify_ldt_ldt_s *target_ldt_info;
4076 uint64_t *gdt_table = g2h(env->gdt.base);
4077 uint32_t base_addr, limit, flags;
4078 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4079 int seg_not_present, useable, lm;
4080 uint32_t *lp, entry_1, entry_2;
4081
4082 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4083 if (!target_ldt_info)
4084 return -TARGET_EFAULT;
4085 idx = tswap32(target_ldt_info->entry_number);
4086 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4087 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4088 unlock_user_struct(target_ldt_info, ptr, 1);
4089 return -TARGET_EINVAL;
4090 }
4091 lp = (uint32_t *)(gdt_table + idx);
4092 entry_1 = tswap32(lp[0]);
4093 entry_2 = tswap32(lp[1]);
4094
4095 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4096 contents = (entry_2 >> 10) & 3;
4097 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4098 seg_32bit = (entry_2 >> 22) & 1;
4099 limit_in_pages = (entry_2 >> 23) & 1;
4100 useable = (entry_2 >> 20) & 1;
4101 #ifdef TARGET_ABI32
4102 lm = 0;
4103 #else
4104 lm = (entry_2 >> 21) & 1;
4105 #endif
4106 flags = (seg_32bit << 0) | (contents << 1) |
4107 (read_exec_only << 3) | (limit_in_pages << 4) |
4108 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4109 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4110 base_addr = (entry_1 >> 16) |
4111 (entry_2 & 0xff000000) |
4112 ((entry_2 & 0xff) << 16);
4113 target_ldt_info->base_addr = tswapal(base_addr);
4114 target_ldt_info->limit = tswap32(limit);
4115 target_ldt_info->flags = tswap32(flags);
4116 unlock_user_struct(target_ldt_info, ptr, 1);
4117 return 0;
4118 }
4119 #endif /* TARGET_I386 && TARGET_ABI32 */
4120
4121 #ifndef TARGET_ABI32
4122 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4123 {
4124 abi_long ret = 0;
4125 abi_ulong val;
4126 int idx;
4127
4128 switch(code) {
4129 case TARGET_ARCH_SET_GS:
4130 case TARGET_ARCH_SET_FS:
4131 if (code == TARGET_ARCH_SET_GS)
4132 idx = R_GS;
4133 else
4134 idx = R_FS;
4135 cpu_x86_load_seg(env, idx, 0);
4136 env->segs[idx].base = addr;
4137 break;
4138 case TARGET_ARCH_GET_GS:
4139 case TARGET_ARCH_GET_FS:
4140 if (code == TARGET_ARCH_GET_GS)
4141 idx = R_GS;
4142 else
4143 idx = R_FS;
4144 val = env->segs[idx].base;
4145 if (put_user(val, addr, abi_ulong))
4146 ret = -TARGET_EFAULT;
4147 break;
4148 default:
4149 ret = -TARGET_EINVAL;
4150 break;
4151 }
4152 return ret;
4153 }
4154 #endif
4155
4156 #endif /* defined(TARGET_I386) */
4157
4158 #define NEW_STACK_SIZE 0x40000
4159
4160
4161 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4162 typedef struct {
4163 CPUArchState *env;
4164 pthread_mutex_t mutex;
4165 pthread_cond_t cond;
4166 pthread_t thread;
4167 uint32_t tid;
4168 abi_ulong child_tidptr;
4169 abi_ulong parent_tidptr;
4170 sigset_t sigmask;
4171 } new_thread_info;
4172
4173 static void *clone_func(void *arg)
4174 {
4175 new_thread_info *info = arg;
4176 CPUArchState *env;
4177 CPUState *cpu;
4178 TaskState *ts;
4179
4180 env = info->env;
4181 cpu = ENV_GET_CPU(env);
4182 thread_cpu = cpu;
4183 ts = (TaskState *)env->opaque;
4184 info->tid = gettid();
4185 cpu->host_tid = info->tid;
4186 task_settid(ts);
4187 if (info->child_tidptr)
4188 put_user_u32(info->tid, info->child_tidptr);
4189 if (info->parent_tidptr)
4190 put_user_u32(info->tid, info->parent_tidptr);
4191 /* Enable signals. */
4192 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4193 /* Signal to the parent that we're ready. */
4194 pthread_mutex_lock(&info->mutex);
4195 pthread_cond_broadcast(&info->cond);
4196 pthread_mutex_unlock(&info->mutex);
4197 /* Wait until the parent has finshed initializing the tls state. */
4198 pthread_mutex_lock(&clone_lock);
4199 pthread_mutex_unlock(&clone_lock);
4200 cpu_loop(env);
4201 /* never exits */
4202 return NULL;
4203 }
4204
4205 /* do_fork() Must return host values and target errnos (unlike most
4206 do_*() functions). */
4207 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4208 abi_ulong parent_tidptr, target_ulong newtls,
4209 abi_ulong child_tidptr)
4210 {
4211 int ret;
4212 TaskState *ts;
4213 CPUArchState *new_env;
4214 unsigned int nptl_flags;
4215 sigset_t sigmask;
4216
4217 /* Emulate vfork() with fork() */
4218 if (flags & CLONE_VFORK)
4219 flags &= ~(CLONE_VFORK | CLONE_VM);
4220
4221 if (flags & CLONE_VM) {
4222 TaskState *parent_ts = (TaskState *)env->opaque;
4223 new_thread_info info;
4224 pthread_attr_t attr;
4225
4226 ts = g_malloc0(sizeof(TaskState));
4227 init_task_state(ts);
4228 /* we create a new CPU instance. */
4229 new_env = cpu_copy(env);
4230 /* Init regs that differ from the parent. */
4231 cpu_clone_regs(new_env, newsp);
4232 new_env->opaque = ts;
4233 ts->bprm = parent_ts->bprm;
4234 ts->info = parent_ts->info;
4235 nptl_flags = flags;
4236 flags &= ~CLONE_NPTL_FLAGS2;
4237
4238 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4239 ts->child_tidptr = child_tidptr;
4240 }
4241
4242 if (nptl_flags & CLONE_SETTLS)
4243 cpu_set_tls (new_env, newtls);
4244
4245 /* Grab a mutex so that thread setup appears atomic. */
4246 pthread_mutex_lock(&clone_lock);
4247
4248 memset(&info, 0, sizeof(info));
4249 pthread_mutex_init(&info.mutex, NULL);
4250 pthread_mutex_lock(&info.mutex);
4251 pthread_cond_init(&info.cond, NULL);
4252 info.env = new_env;
4253 if (nptl_flags & CLONE_CHILD_SETTID)
4254 info.child_tidptr = child_tidptr;
4255 if (nptl_flags & CLONE_PARENT_SETTID)
4256 info.parent_tidptr = parent_tidptr;
4257
4258 ret = pthread_attr_init(&attr);
4259 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4260 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4261 /* It is not safe to deliver signals until the child has finished
4262 initializing, so temporarily block all signals. */
4263 sigfillset(&sigmask);
4264 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4265
4266 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4267 /* TODO: Free new CPU state if thread creation failed. */
4268
4269 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4270 pthread_attr_destroy(&attr);
4271 if (ret == 0) {
4272 /* Wait for the child to initialize. */
4273 pthread_cond_wait(&info.cond, &info.mutex);
4274 ret = info.tid;
4275 if (flags & CLONE_PARENT_SETTID)
4276 put_user_u32(ret, parent_tidptr);
4277 } else {
4278 ret = -1;
4279 }
4280 pthread_mutex_unlock(&info.mutex);
4281 pthread_cond_destroy(&info.cond);
4282 pthread_mutex_destroy(&info.mutex);
4283 pthread_mutex_unlock(&clone_lock);
4284 } else {
4285 /* if no CLONE_VM, we consider it is a fork */
4286 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4287 return -EINVAL;
4288 fork_start();
4289 ret = fork();
4290 if (ret == 0) {
4291 /* Child Process. */
4292 cpu_clone_regs(env, newsp);
4293 fork_end(1);
4294 /* There is a race condition here. The parent process could
4295 theoretically read the TID in the child process before the child
4296 tid is set. This would require using either ptrace
4297 (not implemented) or having *_tidptr to point at a shared memory
4298 mapping. We can't repeat the spinlock hack used above because
4299 the child process gets its own copy of the lock. */
4300 if (flags & CLONE_CHILD_SETTID)
4301 put_user_u32(gettid(), child_tidptr);
4302 if (flags & CLONE_PARENT_SETTID)
4303 put_user_u32(gettid(), parent_tidptr);
4304 ts = (TaskState *)env->opaque;
4305 if (flags & CLONE_SETTLS)
4306 cpu_set_tls (env, newtls);
4307 if (flags & CLONE_CHILD_CLEARTID)
4308 ts->child_tidptr = child_tidptr;
4309 } else {
4310 fork_end(0);
4311 }
4312 }
4313 return ret;
4314 }
4315
4316 /* warning : doesn't handle linux specific flags... */
4317 static int target_to_host_fcntl_cmd(int cmd)
4318 {
4319 switch(cmd) {
4320 case TARGET_F_DUPFD:
4321 case TARGET_F_GETFD:
4322 case TARGET_F_SETFD:
4323 case TARGET_F_GETFL:
4324 case TARGET_F_SETFL:
4325 return cmd;
4326 case TARGET_F_GETLK:
4327 return F_GETLK;
4328 case TARGET_F_SETLK:
4329 return F_SETLK;
4330 case TARGET_F_SETLKW:
4331 return F_SETLKW;
4332 case TARGET_F_GETOWN:
4333 return F_GETOWN;
4334 case TARGET_F_SETOWN:
4335 return F_SETOWN;
4336 case TARGET_F_GETSIG:
4337 return F_GETSIG;
4338 case TARGET_F_SETSIG:
4339 return F_SETSIG;
4340 #if TARGET_ABI_BITS == 32
4341 case TARGET_F_GETLK64:
4342 return F_GETLK64;
4343 case TARGET_F_SETLK64:
4344 return F_SETLK64;
4345 case TARGET_F_SETLKW64:
4346 return F_SETLKW64;
4347 #endif
4348 case TARGET_F_SETLEASE:
4349 return F_SETLEASE;
4350 case TARGET_F_GETLEASE:
4351 return F_GETLEASE;
4352 #ifdef F_DUPFD_CLOEXEC
4353 case TARGET_F_DUPFD_CLOEXEC:
4354 return F_DUPFD_CLOEXEC;
4355 #endif
4356 case TARGET_F_NOTIFY:
4357 return F_NOTIFY;
4358 default:
4359 return -TARGET_EINVAL;
4360 }
4361 return -TARGET_EINVAL;
4362 }
4363
4364 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4365 static const bitmask_transtbl flock_tbl[] = {
4366 TRANSTBL_CONVERT(F_RDLCK),
4367 TRANSTBL_CONVERT(F_WRLCK),
4368 TRANSTBL_CONVERT(F_UNLCK),
4369 TRANSTBL_CONVERT(F_EXLCK),
4370 TRANSTBL_CONVERT(F_SHLCK),
4371 { 0, 0, 0, 0 }
4372 };
4373
4374 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4375 {
4376 struct flock fl;
4377 struct target_flock *target_fl;
4378 struct flock64 fl64;
4379 struct target_flock64 *target_fl64;
4380 abi_long ret;
4381 int host_cmd = target_to_host_fcntl_cmd(cmd);
4382
4383 if (host_cmd == -TARGET_EINVAL)
4384 return host_cmd;
4385
4386 switch(cmd) {
4387 case TARGET_F_GETLK:
4388 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4389 return -TARGET_EFAULT;
4390 fl.l_type =
4391 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4392 fl.l_whence = tswap16(target_fl->l_whence);
4393 fl.l_start = tswapal(target_fl->l_start);
4394 fl.l_len = tswapal(target_fl->l_len);
4395 fl.l_pid = tswap32(target_fl->l_pid);
4396 unlock_user_struct(target_fl, arg, 0);
4397 ret = get_errno(fcntl(fd, host_cmd, &fl));
4398 if (ret == 0) {
4399 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4400 return -TARGET_EFAULT;
4401 target_fl->l_type =
4402 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4403 target_fl->l_whence = tswap16(fl.l_whence);
4404 target_fl->l_start = tswapal(fl.l_start);
4405 target_fl->l_len = tswapal(fl.l_len);
4406 target_fl->l_pid = tswap32(fl.l_pid);
4407 unlock_user_struct(target_fl, arg, 1);
4408 }
4409 break;
4410
4411 case TARGET_F_SETLK:
4412 case TARGET_F_SETLKW:
4413 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4414 return -TARGET_EFAULT;
4415 fl.l_type =
4416 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4417 fl.l_whence = tswap16(target_fl->l_whence);
4418 fl.l_start = tswapal(target_fl->l_start);
4419 fl.l_len = tswapal(target_fl->l_len);
4420 fl.l_pid = tswap32(target_fl->l_pid);
4421 unlock_user_struct(target_fl, arg, 0);
4422 ret = get_errno(fcntl(fd, host_cmd, &fl));
4423 break;
4424
4425 case TARGET_F_GETLK64:
4426 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4427 return -TARGET_EFAULT;
4428 fl64.l_type =
4429 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4430 fl64.l_whence = tswap16(target_fl64->l_whence);
4431 fl64.l_start = tswap64(target_fl64->l_start);
4432 fl64.l_len = tswap64(target_fl64->l_len);
4433 fl64.l_pid = tswap32(target_fl64->l_pid);
4434 unlock_user_struct(target_fl64, arg, 0);
4435 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4436 if (ret == 0) {
4437 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4438 return -TARGET_EFAULT;
4439 target_fl64->l_type =
4440 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4441 target_fl64->l_whence = tswap16(fl64.l_whence);
4442 target_fl64->l_start = tswap64(fl64.l_start);
4443 target_fl64->l_len = tswap64(fl64.l_len);
4444 target_fl64->l_pid = tswap32(fl64.l_pid);
4445 unlock_user_struct(target_fl64, arg, 1);
4446 }
4447 break;
4448 case TARGET_F_SETLK64:
4449 case TARGET_F_SETLKW64:
4450 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4451 return -TARGET_EFAULT;
4452 fl64.l_type =
4453 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4454 fl64.l_whence = tswap16(target_fl64->l_whence);
4455 fl64.l_start = tswap64(target_fl64->l_start);
4456 fl64.l_len = tswap64(target_fl64->l_len);
4457 fl64.l_pid = tswap32(target_fl64->l_pid);
4458 unlock_user_struct(target_fl64, arg, 0);
4459 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4460 break;
4461
4462 case TARGET_F_GETFL:
4463 ret = get_errno(fcntl(fd, host_cmd, arg));
4464 if (ret >= 0) {
4465 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4466 }
4467 break;
4468
4469 case TARGET_F_SETFL:
4470 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4471 break;
4472
4473 case TARGET_F_SETOWN:
4474 case TARGET_F_GETOWN:
4475 case TARGET_F_SETSIG:
4476 case TARGET_F_GETSIG:
4477 case TARGET_F_SETLEASE:
4478 case TARGET_F_GETLEASE:
4479 ret = get_errno(fcntl(fd, host_cmd, arg));
4480 break;
4481
4482 default:
4483 ret = get_errno(fcntl(fd, cmd, arg));
4484 break;
4485 }
4486 return ret;
4487 }
4488
4489 #ifdef USE_UID16
4490
4491 static inline int high2lowuid(int uid)
4492 {
4493 if (uid > 65535)
4494 return 65534;
4495 else
4496 return uid;
4497 }
4498
4499 static inline int high2lowgid(int gid)
4500 {
4501 if (gid > 65535)
4502 return 65534;
4503 else
4504 return gid;
4505 }
4506
4507 static inline int low2highuid(int uid)
4508 {
4509 if ((int16_t)uid == -1)
4510 return -1;
4511 else
4512 return uid;
4513 }
4514
4515 static inline int low2highgid(int gid)
4516 {
4517 if ((int16_t)gid == -1)
4518 return -1;
4519 else
4520 return gid;
4521 }
4522 static inline int tswapid(int id)
4523 {
4524 return tswap16(id);
4525 }
4526 #else /* !USE_UID16 */
4527 static inline int high2lowuid(int uid)
4528 {
4529 return uid;
4530 }
4531 static inline int high2lowgid(int gid)
4532 {
4533 return gid;
4534 }
4535 static inline int low2highuid(int uid)
4536 {
4537 return uid;
4538 }
4539 static inline int low2highgid(int gid)
4540 {
4541 return gid;
4542 }
4543 static inline int tswapid(int id)
4544 {
4545 return tswap32(id);
4546 }
4547 #endif /* USE_UID16 */
4548
4549 void syscall_init(void)
4550 {
4551 IOCTLEntry *ie;
4552 const argtype *arg_type;
4553 int size;
4554 int i;
4555
4556 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4557 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4558 #include "syscall_types.h"
4559 #undef STRUCT
4560 #undef STRUCT_SPECIAL
4561
4562 /* Build target_to_host_errno_table[] table from
4563 * host_to_target_errno_table[]. */
4564 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4565 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4566 }
4567
4568 /* we patch the ioctl size if necessary. We rely on the fact that
4569 no ioctl has all the bits at '1' in the size field */
4570 ie = ioctl_entries;
4571 while (ie->target_cmd != 0) {
4572 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4573 TARGET_IOC_SIZEMASK) {
4574 arg_type = ie->arg_type;
4575 if (arg_type[0] != TYPE_PTR) {
4576 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4577 ie->target_cmd);
4578 exit(1);
4579 }
4580 arg_type++;
4581 size = thunk_type_size(arg_type, 0);
4582 ie->target_cmd = (ie->target_cmd &
4583 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4584 (size << TARGET_IOC_SIZESHIFT);
4585 }
4586
4587 /* automatic consistency check if same arch */
4588 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4589 (defined(__x86_64__) && defined(TARGET_X86_64))
4590 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4591 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4592 ie->name, ie->target_cmd, ie->host_cmd);
4593 }
4594 #endif
4595 ie++;
4596 }
4597 }
4598
4599 #if TARGET_ABI_BITS == 32
4600 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4601 {
4602 #ifdef TARGET_WORDS_BIGENDIAN
4603 return ((uint64_t)word0 << 32) | word1;
4604 #else
4605 return ((uint64_t)word1 << 32) | word0;
4606 #endif
4607 }
4608 #else /* TARGET_ABI_BITS == 32 */
4609 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4610 {
4611 return word0;
4612 }
4613 #endif /* TARGET_ABI_BITS != 32 */
4614
4615 #ifdef TARGET_NR_truncate64
4616 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4617 abi_long arg2,
4618 abi_long arg3,
4619 abi_long arg4)
4620 {
4621 if (regpairs_aligned(cpu_env)) {
4622 arg2 = arg3;
4623 arg3 = arg4;
4624 }
4625 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4626 }
4627 #endif
4628
4629 #ifdef TARGET_NR_ftruncate64
4630 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4631 abi_long arg2,
4632 abi_long arg3,
4633 abi_long arg4)
4634 {
4635 if (regpairs_aligned(cpu_env)) {
4636 arg2 = arg3;
4637 arg3 = arg4;
4638 }
4639 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4640 }
4641 #endif
4642
4643 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4644 abi_ulong target_addr)
4645 {
4646 struct target_timespec *target_ts;
4647
4648 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4649 return -TARGET_EFAULT;
4650 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4651 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4652 unlock_user_struct(target_ts, target_addr, 0);
4653 return 0;
4654 }
4655
4656 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4657 struct timespec *host_ts)
4658 {
4659 struct target_timespec *target_ts;
4660
4661 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4662 return -TARGET_EFAULT;
4663 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4664 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4665 unlock_user_struct(target_ts, target_addr, 1);
4666 return 0;
4667 }
4668
4669 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
4670 abi_ulong target_addr)
4671 {
4672 struct target_itimerspec *target_itspec;
4673
4674 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
4675 return -TARGET_EFAULT;
4676 }
4677
4678 host_itspec->it_interval.tv_sec =
4679 tswapal(target_itspec->it_interval.tv_sec);
4680 host_itspec->it_interval.tv_nsec =
4681 tswapal(target_itspec->it_interval.tv_nsec);
4682 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
4683 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
4684
4685 unlock_user_struct(target_itspec, target_addr, 1);
4686 return 0;
4687 }
4688
4689 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
4690 struct itimerspec *host_its)
4691 {
4692 struct target_itimerspec *target_itspec;
4693
4694 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
4695 return -TARGET_EFAULT;
4696 }
4697
4698 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
4699 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
4700
4701 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
4702 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
4703
4704 unlock_user_struct(target_itspec, target_addr, 0);
4705 return 0;
4706 }
4707
4708 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4709 static inline abi_long host_to_target_stat64(void *cpu_env,
4710 abi_ulong target_addr,
4711 struct stat *host_st)
4712 {
4713 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
4714 if (((CPUARMState *)cpu_env)->eabi) {
4715 struct target_eabi_stat64 *target_st;
4716
4717 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4718 return -TARGET_EFAULT;
4719 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4720 __put_user(host_st->st_dev, &target_st->st_dev);
4721 __put_user(host_st->st_ino, &target_st->st_ino);
4722 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4723 __put_user(host_st->st_ino, &target_st->__st_ino);
4724 #endif
4725 __put_user(host_st->st_mode, &target_st->st_mode);
4726 __put_user(host_st->st_nlink, &target_st->st_nlink);
4727 __put_user(host_st->st_uid, &target_st->st_uid);
4728 __put_user(host_st->st_gid, &target_st->st_gid);
4729 __put_user(host_st->st_rdev, &target_st->st_rdev);
4730 __put_user(host_st->st_size, &target_st->st_size);
4731 __put_user(host_st->st_blksize, &target_st->st_blksize);
4732 __put_user(host_st->st_blocks, &target_st->st_blocks);
4733 __put_user(host_st->st_atime, &target_st->target_st_atime);
4734 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4735 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4736 unlock_user_struct(target_st, target_addr, 1);
4737 } else
4738 #endif
4739 {
4740 #if defined(TARGET_HAS_STRUCT_STAT64)
4741 struct target_stat64 *target_st;
4742 #else
4743 struct target_stat *target_st;
4744 #endif
4745
4746 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4747 return -TARGET_EFAULT;
4748 memset(target_st, 0, sizeof(*target_st));
4749 __put_user(host_st->st_dev, &target_st->st_dev);
4750 __put_user(host_st->st_ino, &target_st->st_ino);
4751 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4752 __put_user(host_st->st_ino, &target_st->__st_ino);
4753 #endif
4754 __put_user(host_st->st_mode, &target_st->st_mode);
4755 __put_user(host_st->st_nlink, &target_st->st_nlink);
4756 __put_user(host_st->st_uid, &target_st->st_uid);
4757 __put_user(host_st->st_gid, &target_st->st_gid);
4758 __put_user(host_st->st_rdev, &target_st->st_rdev);
4759 /* XXX: better use of kernel struct */
4760 __put_user(host_st->st_size, &target_st->st_size);
4761 __put_user(host_st->st_blksize, &target_st->st_blksize);
4762 __put_user(host_st->st_blocks, &target_st->st_blocks);
4763 __put_user(host_st->st_atime, &target_st->target_st_atime);
4764 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4765 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4766 unlock_user_struct(target_st, target_addr, 1);
4767 }
4768
4769 return 0;
4770 }
4771 #endif
4772
4773 /* ??? Using host futex calls even when target atomic operations
4774 are not really atomic probably breaks things. However implementing
4775 futexes locally would make futexes shared between multiple processes
4776 tricky. However they're probably useless because guest atomic
4777 operations won't work either. */
4778 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4779 target_ulong uaddr2, int val3)
4780 {
4781 struct timespec ts, *pts;
4782 int base_op;
4783
4784 /* ??? We assume FUTEX_* constants are the same on both host
4785 and target. */
4786 #ifdef FUTEX_CMD_MASK
4787 base_op = op & FUTEX_CMD_MASK;
4788 #else
4789 base_op = op;
4790 #endif
4791 switch (base_op) {
4792 case FUTEX_WAIT:
4793 case FUTEX_WAIT_BITSET:
4794 if (timeout) {
4795 pts = &ts;
4796 target_to_host_timespec(pts, timeout);
4797 } else {
4798 pts = NULL;
4799 }
4800 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4801 pts, NULL, val3));
4802 case FUTEX_WAKE:
4803 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4804 case FUTEX_FD:
4805 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4806 case FUTEX_REQUEUE:
4807 case FUTEX_CMP_REQUEUE:
4808 case FUTEX_WAKE_OP:
4809 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4810 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4811 But the prototype takes a `struct timespec *'; insert casts
4812 to satisfy the compiler. We do not need to tswap TIMEOUT
4813 since it's not compared to guest memory. */
4814 pts = (struct timespec *)(uintptr_t) timeout;
4815 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4816 g2h(uaddr2),
4817 (base_op == FUTEX_CMP_REQUEUE
4818 ? tswap32(val3)
4819 : val3)));
4820 default:
4821 return -TARGET_ENOSYS;
4822 }
4823 }
4824
4825 /* Map host to target signal numbers for the wait family of syscalls.
4826 Assume all other status bits are the same. */
4827 int host_to_target_waitstatus(int status)
4828 {
4829 if (WIFSIGNALED(status)) {
4830 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4831 }
4832 if (WIFSTOPPED(status)) {
4833 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4834 | (status & 0xff);
4835 }
4836 return status;
4837 }
4838
4839 static int relstr_to_int(const char *s)
4840 {
4841 /* Convert a uname release string like "2.6.18" to an integer
4842 * of the form 0x020612. (Beware that 0x020612 is *not* 2.6.12.)
4843 */
4844 int i, n, tmp;
4845
4846 tmp = 0;
4847 for (i = 0; i < 3; i++) {
4848 n = 0;
4849 while (*s >= '0' && *s <= '9') {
4850 n *= 10;
4851 n += *s - '0';
4852 s++;
4853 }
4854 tmp = (tmp << 8) + n;
4855 if (*s == '.') {
4856 s++;
4857 }
4858 }
4859 return tmp;
4860 }
4861
4862 int get_osversion(void)
4863 {
4864 static int osversion;
4865 struct new_utsname buf;
4866 const char *s;
4867
4868 if (osversion)
4869 return osversion;
4870 if (qemu_uname_release && *qemu_uname_release) {
4871 s = qemu_uname_release;
4872 } else {
4873 if (sys_uname(&buf))
4874 return 0;
4875 s = buf.release;
4876 }
4877 osversion = relstr_to_int(s);
4878 return osversion;
4879 }
4880
4881 void init_qemu_uname_release(void)
4882 {
4883 /* Initialize qemu_uname_release for later use.
4884 * If the host kernel is too old and the user hasn't asked for
4885 * a specific fake version number, we might want to fake a minimum
4886 * target kernel version.
4887 */
4888 #ifdef UNAME_MINIMUM_RELEASE
4889 struct new_utsname buf;
4890
4891 if (qemu_uname_release && *qemu_uname_release) {
4892 return;
4893 }
4894
4895 if (sys_uname(&buf)) {
4896 return;
4897 }
4898
4899 if (relstr_to_int(buf.release) < relstr_to_int(UNAME_MINIMUM_RELEASE)) {
4900 qemu_uname_release = UNAME_MINIMUM_RELEASE;
4901 }
4902 #endif
4903 }
4904
4905 static int open_self_maps(void *cpu_env, int fd)
4906 {
4907 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4908 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4909 #endif
4910 FILE *fp;
4911 char *line = NULL;
4912 size_t len = 0;
4913 ssize_t read;
4914
4915 fp = fopen("/proc/self/maps", "r");
4916 if (fp == NULL) {
4917 return -EACCES;
4918 }
4919
4920 while ((read = getline(&line, &len, fp)) != -1) {
4921 int fields, dev_maj, dev_min, inode;
4922 uint64_t min, max, offset;
4923 char flag_r, flag_w, flag_x, flag_p;
4924 char path[512] = "";
4925 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
4926 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
4927 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
4928
4929 if ((fields < 10) || (fields > 11)) {
4930 continue;
4931 }
4932 if (!strncmp(path, "[stack]", 7)) {
4933 continue;
4934 }
4935 if (h2g_valid(min) && h2g_valid(max)) {
4936 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
4937 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
4938 h2g(min), h2g(max), flag_r, flag_w,
4939 flag_x, flag_p, offset, dev_maj, dev_min, inode,
4940 path[0] ? " " : "", path);
4941 }
4942 }
4943
4944 free(line);
4945 fclose(fp);
4946
4947 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4948 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4949 (unsigned long long)ts->info->stack_limit,
4950 (unsigned long long)(ts->info->start_stack +
4951 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
4952 (unsigned long long)0);
4953 #endif
4954
4955 return 0;
4956 }
4957
4958 static int open_self_stat(void *cpu_env, int fd)
4959 {
4960 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4961 abi_ulong start_stack = ts->info->start_stack;
4962 int i;
4963
4964 for (i = 0; i < 44; i++) {
4965 char buf[128];
4966 int len;
4967 uint64_t val = 0;
4968
4969 if (i == 0) {
4970 /* pid */
4971 val = getpid();
4972 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4973 } else if (i == 1) {
4974 /* app name */
4975 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
4976 } else if (i == 27) {
4977 /* stack bottom */
4978 val = start_stack;
4979 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4980 } else {
4981 /* for the rest, there is MasterCard */
4982 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
4983 }
4984
4985 len = strlen(buf);
4986 if (write(fd, buf, len) != len) {
4987 return -1;
4988 }
4989 }
4990
4991 return 0;
4992 }
4993
4994 static int open_self_auxv(void *cpu_env, int fd)
4995 {
4996 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4997 abi_ulong auxv = ts->info->saved_auxv;
4998 abi_ulong len = ts->info->auxv_len;
4999 char *ptr;
5000
5001 /*
5002 * Auxiliary vector is stored in target process stack.
5003 * read in whole auxv vector and copy it to file
5004 */
5005 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5006 if (ptr != NULL) {
5007 while (len > 0) {
5008 ssize_t r;
5009 r = write(fd, ptr, len);
5010 if (r <= 0) {
5011 break;
5012 }
5013 len -= r;
5014 ptr += r;
5015 }
5016 lseek(fd, 0, SEEK_SET);
5017 unlock_user(ptr, auxv, len);
5018 }
5019
5020 return 0;
5021 }
5022
5023 static int is_proc_myself(const char *filename, const char *entry)
5024 {
5025 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5026 filename += strlen("/proc/");
5027 if (!strncmp(filename, "self/", strlen("self/"))) {
5028 filename += strlen("self/");
5029 } else if (*filename >= '1' && *filename <= '9') {
5030 char myself[80];
5031 snprintf(myself, sizeof(myself), "%d/", getpid());
5032 if (!strncmp(filename, myself, strlen(myself))) {
5033 filename += strlen(myself);
5034 } else {
5035 return 0;
5036 }
5037 } else {
5038 return 0;
5039 }
5040 if (!strcmp(filename, entry)) {
5041 return 1;
5042 }
5043 }
5044 return 0;
5045 }
5046
5047 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5048 static int is_proc(const char *filename, const char *entry)
5049 {
5050 return strcmp(filename, entry) == 0;
5051 }
5052
5053 static int open_net_route(void *cpu_env, int fd)
5054 {
5055 FILE *fp;
5056 char *line = NULL;
5057 size_t len = 0;
5058 ssize_t read;
5059
5060 fp = fopen("/proc/net/route", "r");
5061 if (fp == NULL) {
5062 return -EACCES;
5063 }
5064
5065 /* read header */
5066
5067 read = getline(&line, &len, fp);
5068 dprintf(fd, "%s", line);
5069
5070 /* read routes */
5071
5072 while ((read = getline(&line, &len, fp)) != -1) {
5073 char iface[16];
5074 uint32_t dest, gw, mask;
5075 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5076 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5077 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5078 &mask, &mtu, &window, &irtt);
5079 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5080 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5081 metric, tswap32(mask), mtu, window, irtt);
5082 }
5083
5084 free(line);
5085 fclose(fp);
5086
5087 return 0;
5088 }
5089 #endif
5090
5091 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5092 {
5093 struct fake_open {
5094 const char *filename;
5095 int (*fill)(void *cpu_env, int fd);
5096 int (*cmp)(const char *s1, const char *s2);
5097 };
5098 const struct fake_open *fake_open;
5099 static const struct fake_open fakes[] = {
5100 { "maps", open_self_maps, is_proc_myself },
5101 { "stat", open_self_stat, is_proc_myself },
5102 { "auxv", open_self_auxv, is_proc_myself },
5103 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5104 { "/proc/net/route", open_net_route, is_proc },
5105 #endif
5106 { NULL, NULL, NULL }
5107 };
5108
5109 for (fake_open = fakes; fake_open->filename; fake_open++) {
5110 if (fake_open->cmp(pathname, fake_open->filename)) {
5111 break;
5112 }
5113 }
5114
5115 if (fake_open->filename) {
5116 const char *tmpdir;
5117 char filename[PATH_MAX];
5118 int fd, r;
5119
5120 /* create temporary file to map stat to */
5121 tmpdir = getenv("TMPDIR");
5122 if (!tmpdir)
5123 tmpdir = "/tmp";
5124 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5125 fd = mkstemp(filename);
5126 if (fd < 0) {
5127 return fd;
5128 }
5129 unlink(filename);
5130
5131 if ((r = fake_open->fill(cpu_env, fd))) {
5132 close(fd);
5133 return r;
5134 }
5135 lseek(fd, 0, SEEK_SET);
5136
5137 return fd;
5138 }
5139
5140 return get_errno(open(path(pathname), flags, mode));
5141 }
5142
5143 /* do_syscall() should always have a single exit point at the end so
5144 that actions, such as logging of syscall results, can be performed.
5145 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5146 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5147 abi_long arg2, abi_long arg3, abi_long arg4,
5148 abi_long arg5, abi_long arg6, abi_long arg7,
5149 abi_long arg8)
5150 {
5151 CPUState *cpu = ENV_GET_CPU(cpu_env);
5152 abi_long ret;
5153 struct stat st;
5154 struct statfs stfs;
5155 void *p;
5156
5157 #ifdef DEBUG
5158 gemu_log("syscall %d", num);
5159 #endif
5160 if(do_strace)
5161 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5162
5163 switch(num) {
5164 case TARGET_NR_exit:
5165 /* In old applications this may be used to implement _exit(2).
5166 However in threaded applictions it is used for thread termination,
5167 and _exit_group is used for application termination.
5168 Do thread termination if we have more then one thread. */
5169 /* FIXME: This probably breaks if a signal arrives. We should probably
5170 be disabling signals. */
5171 if (CPU_NEXT(first_cpu)) {
5172 TaskState *ts;
5173
5174 cpu_list_lock();
5175 /* Remove the CPU from the list. */
5176 QTAILQ_REMOVE(&cpus, cpu, node);
5177 cpu_list_unlock();
5178 ts = ((CPUArchState *)cpu_env)->opaque;
5179 if (ts->child_tidptr) {
5180 put_user_u32(0, ts->child_tidptr);
5181 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5182 NULL, NULL, 0);
5183 }
5184 thread_cpu = NULL;
5185 object_unref(OBJECT(ENV_GET_CPU(cpu_env)));
5186 g_free(ts);
5187 pthread_exit(NULL);
5188 }
5189 #ifdef TARGET_GPROF
5190 _mcleanup();
5191 #endif
5192 gdb_exit(cpu_env, arg1);
5193 _exit(arg1);
5194 ret = 0; /* avoid warning */
5195 break;
5196 case TARGET_NR_read:
5197 if (arg3 == 0)
5198 ret = 0;
5199 else {
5200 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5201 goto efault;
5202 ret = get_errno(read(arg1, p, arg3));
5203 unlock_user(p, arg2, ret);
5204 }
5205 break;
5206 case TARGET_NR_write:
5207 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5208 goto efault;
5209 ret = get_errno(write(arg1, p, arg3));
5210 unlock_user(p, arg2, 0);
5211 break;
5212 case TARGET_NR_open:
5213 if (!(p = lock_user_string(arg1)))
5214 goto efault;
5215 ret = get_errno(do_open(cpu_env, p,
5216 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5217 arg3));
5218 unlock_user(p, arg1, 0);
5219 break;
5220 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5221 case TARGET_NR_openat:
5222 if (!(p = lock_user_string(arg2)))
5223 goto efault;
5224 ret = get_errno(sys_openat(arg1,
5225 path(p),
5226 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5227 arg4));
5228 unlock_user(p, arg2, 0);
5229 break;
5230 #endif
5231 case TARGET_NR_close:
5232 ret = get_errno(close(arg1));
5233 break;
5234 case TARGET_NR_brk:
5235 ret = do_brk(arg1);
5236 break;
5237 case TARGET_NR_fork:
5238 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5239 break;
5240 #ifdef TARGET_NR_waitpid
5241 case TARGET_NR_waitpid:
5242 {
5243 int status;
5244 ret = get_errno(waitpid(arg1, &status, arg3));
5245 if (!is_error(ret) && arg2 && ret
5246 && put_user_s32(host_to_target_waitstatus(status), arg2))
5247 goto efault;
5248 }
5249 break;
5250 #endif
5251 #ifdef TARGET_NR_waitid
5252 case TARGET_NR_waitid:
5253 {
5254 siginfo_t info;
5255 info.si_pid = 0;
5256 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5257 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5258 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5259 goto efault;
5260 host_to_target_siginfo(p, &info);
5261 unlock_user(p, arg3, sizeof(target_siginfo_t));
5262 }
5263 }
5264 break;
5265 #endif
5266 #ifdef TARGET_NR_creat /* not on alpha */
5267 case TARGET_NR_creat:
5268 if (!(p = lock_user_string(arg1)))
5269 goto efault;
5270 ret = get_errno(creat(p, arg2));
5271 unlock_user(p, arg1, 0);
5272 break;
5273 #endif
5274 case TARGET_NR_link:
5275 {
5276 void * p2;
5277 p = lock_user_string(arg1);
5278 p2 = lock_user_string(arg2);
5279 if (!p || !p2)
5280 ret = -TARGET_EFAULT;
5281 else
5282 ret = get_errno(link(p, p2));
5283 unlock_user(p2, arg2, 0);
5284 unlock_user(p, arg1, 0);
5285 }
5286 break;
5287 #if defined(TARGET_NR_linkat)
5288 case TARGET_NR_linkat:
5289 {
5290 void * p2 = NULL;
5291 if (!arg2 || !arg4)
5292 goto efault;
5293 p = lock_user_string(arg2);
5294 p2 = lock_user_string(arg4);
5295 if (!p || !p2)
5296 ret = -TARGET_EFAULT;
5297 else
5298 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5299 unlock_user(p, arg2, 0);
5300 unlock_user(p2, arg4, 0);
5301 }
5302 break;
5303 #endif
5304 case TARGET_NR_unlink:
5305 if (!(p = lock_user_string(arg1)))
5306 goto efault;
5307 ret = get_errno(unlink(p));
5308 unlock_user(p, arg1, 0);
5309 break;
5310 #if defined(TARGET_NR_unlinkat)
5311 case TARGET_NR_unlinkat:
5312 if (!(p = lock_user_string(arg2)))
5313 goto efault;
5314 ret = get_errno(unlinkat(arg1, p, arg3));
5315 unlock_user(p, arg2, 0);
5316 break;
5317 #endif
5318 case TARGET_NR_execve:
5319 {
5320 char **argp, **envp;
5321 int argc, envc;
5322 abi_ulong gp;
5323 abi_ulong guest_argp;
5324 abi_ulong guest_envp;
5325 abi_ulong addr;
5326 char **q;
5327 int total_size = 0;
5328
5329 argc = 0;
5330 guest_argp = arg2;
5331 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5332 if (get_user_ual(addr, gp))
5333 goto efault;
5334 if (!addr)
5335 break;
5336 argc++;
5337 }
5338 envc = 0;
5339 guest_envp = arg3;
5340 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5341 if (get_user_ual(addr, gp))
5342 goto efault;
5343 if (!addr)
5344 break;
5345 envc++;
5346 }
5347
5348 argp = alloca((argc + 1) * sizeof(void *));
5349 envp = alloca((envc + 1) * sizeof(void *));
5350
5351 for (gp = guest_argp, q = argp; gp;
5352 gp += sizeof(abi_ulong), q++) {
5353 if (get_user_ual(addr, gp))
5354 goto execve_efault;
5355 if (!addr)
5356 break;
5357 if (!(*q = lock_user_string(addr)))
5358 goto execve_efault;
5359 total_size += strlen(*q) + 1;
5360 }
5361 *q = NULL;
5362
5363 for (gp = guest_envp, q = envp; gp;
5364 gp += sizeof(abi_ulong), q++) {
5365 if (get_user_ual(addr, gp))
5366 goto execve_efault;
5367 if (!addr)
5368 break;
5369 if (!(*q = lock_user_string(addr)))
5370 goto execve_efault;
5371 total_size += strlen(*q) + 1;
5372 }
5373 *q = NULL;
5374
5375 /* This case will not be caught by the host's execve() if its
5376 page size is bigger than the target's. */
5377 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5378 ret = -TARGET_E2BIG;
5379 goto execve_end;
5380 }
5381 if (!(p = lock_user_string(arg1)))
5382 goto execve_efault;
5383 ret = get_errno(execve(p, argp, envp));
5384 unlock_user(p, arg1, 0);
5385
5386 goto execve_end;
5387
5388 execve_efault:
5389 ret = -TARGET_EFAULT;
5390
5391 execve_end:
5392 for (gp = guest_argp, q = argp; *q;
5393 gp += sizeof(abi_ulong), q++) {
5394 if (get_user_ual(addr, gp)
5395 || !addr)
5396 break;
5397 unlock_user(*q, addr, 0);
5398 }
5399 for (gp = guest_envp, q = envp; *q;
5400 gp += sizeof(abi_ulong), q++) {
5401 if (get_user_ual(addr, gp)
5402 || !addr)
5403 break;
5404 unlock_user(*q, addr, 0);
5405 }
5406 }
5407 break;
5408 case TARGET_NR_chdir:
5409 if (!(p = lock_user_string(arg1)))
5410 goto efault;
5411 ret = get_errno(chdir(p));
5412 unlock_user(p, arg1, 0);
5413 break;
5414 #ifdef TARGET_NR_time
5415 case TARGET_NR_time:
5416 {
5417 time_t host_time;
5418 ret = get_errno(time(&host_time));
5419 if (!is_error(ret)
5420 && arg1
5421 && put_user_sal(host_time, arg1))
5422 goto efault;
5423 }
5424 break;
5425 #endif
5426 case TARGET_NR_mknod:
5427 if (!(p = lock_user_string(arg1)))
5428 goto efault;
5429 ret = get_errno(mknod(p, arg2, arg3));
5430 unlock_user(p, arg1, 0);
5431 break;
5432 #if defined(TARGET_NR_mknodat)
5433 case TARGET_NR_mknodat:
5434 if (!(p = lock_user_string(arg2)))
5435 goto efault;
5436 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5437 unlock_user(p, arg2, 0);
5438 break;
5439 #endif
5440 case TARGET_NR_chmod:
5441 if (!(p = lock_user_string(arg1)))
5442 goto efault;
5443 ret = get_errno(chmod(p, arg2));
5444 unlock_user(p, arg1, 0);
5445 break;
5446 #ifdef TARGET_NR_break
5447 case TARGET_NR_break:
5448 goto unimplemented;
5449 #endif
5450 #ifdef TARGET_NR_oldstat
5451 case TARGET_NR_oldstat:
5452 goto unimplemented;
5453 #endif
5454 case TARGET_NR_lseek:
5455 ret = get_errno(lseek(arg1, arg2, arg3));
5456 break;
5457 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5458 /* Alpha specific */
5459 case TARGET_NR_getxpid:
5460 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5461 ret = get_errno(getpid());
5462 break;
5463 #endif
5464 #ifdef TARGET_NR_getpid
5465 case TARGET_NR_getpid:
5466 ret = get_errno(getpid());
5467 break;
5468 #endif
5469 case TARGET_NR_mount:
5470 {
5471 /* need to look at the data field */
5472 void *p2, *p3;
5473 p = lock_user_string(arg1);
5474 p2 = lock_user_string(arg2);
5475 p3 = lock_user_string(arg3);
5476 if (!p || !p2 || !p3)
5477 ret = -TARGET_EFAULT;
5478 else {
5479 /* FIXME - arg5 should be locked, but it isn't clear how to
5480 * do that since it's not guaranteed to be a NULL-terminated
5481 * string.
5482 */
5483 if ( ! arg5 )
5484 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5485 else
5486 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5487 }
5488 unlock_user(p, arg1, 0);
5489 unlock_user(p2, arg2, 0);
5490 unlock_user(p3, arg3, 0);
5491 break;
5492 }
5493 #ifdef TARGET_NR_umount
5494 case TARGET_NR_umount:
5495 if (!(p = lock_user_string(arg1)))
5496 goto efault;
5497 ret = get_errno(umount(p));
5498 unlock_user(p, arg1, 0);
5499 break;
5500 #endif
5501 #ifdef TARGET_NR_stime /* not on alpha */
5502 case TARGET_NR_stime:
5503 {
5504 time_t host_time;
5505 if (get_user_sal(host_time, arg1))
5506 goto efault;
5507 ret = get_errno(stime(&host_time));
5508 }
5509 break;
5510 #endif
5511 case TARGET_NR_ptrace:
5512 goto unimplemented;
5513 #ifdef TARGET_NR_alarm /* not on alpha */
5514 case TARGET_NR_alarm:
5515 ret = alarm(arg1);
5516 break;
5517 #endif
5518 #ifdef TARGET_NR_oldfstat
5519 case TARGET_NR_oldfstat:
5520 goto unimplemented;
5521 #endif
5522 #ifdef TARGET_NR_pause /* not on alpha */
5523 case TARGET_NR_pause:
5524 ret = get_errno(pause());
5525 break;
5526 #endif
5527 #ifdef TARGET_NR_utime
5528 case TARGET_NR_utime:
5529 {
5530 struct utimbuf tbuf, *host_tbuf;
5531 struct target_utimbuf *target_tbuf;
5532 if (arg2) {
5533 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5534 goto efault;
5535 tbuf.actime = tswapal(target_tbuf->actime);
5536 tbuf.modtime = tswapal(target_tbuf->modtime);
5537 unlock_user_struct(target_tbuf, arg2, 0);
5538 host_tbuf = &tbuf;
5539 } else {
5540 host_tbuf = NULL;
5541 }
5542 if (!(p = lock_user_string(arg1)))
5543 goto efault;
5544 ret = get_errno(utime(p, host_tbuf));
5545 unlock_user(p, arg1, 0);
5546 }
5547 break;
5548 #endif
5549 case TARGET_NR_utimes:
5550 {
5551 struct timeval *tvp, tv[2];
5552 if (arg2) {
5553 if (copy_from_user_timeval(&tv[0], arg2)
5554 || copy_from_user_timeval(&tv[1],
5555 arg2 + sizeof(struct target_timeval)))
5556 goto efault;
5557 tvp = tv;
5558 } else {
5559 tvp = NULL;
5560 }
5561 if (!(p = lock_user_string(arg1)))
5562 goto efault;
5563 ret = get_errno(utimes(p, tvp));
5564 unlock_user(p, arg1, 0);
5565 }
5566 break;
5567 #if defined(TARGET_NR_futimesat)
5568 case TARGET_NR_futimesat:
5569 {
5570 struct timeval *tvp, tv[2];
5571 if (arg3) {
5572 if (copy_from_user_timeval(&tv[0], arg3)
5573 || copy_from_user_timeval(&tv[1],
5574 arg3 + sizeof(struct target_timeval)))
5575 goto efault;
5576 tvp = tv;
5577 } else {
5578 tvp = NULL;
5579 }
5580 if (!(p = lock_user_string(arg2)))
5581 goto efault;
5582 ret = get_errno(futimesat(arg1, path(p), tvp));
5583 unlock_user(p, arg2, 0);
5584 }
5585 break;
5586 #endif
5587 #ifdef TARGET_NR_stty
5588 case TARGET_NR_stty:
5589 goto unimplemented;
5590 #endif
5591 #ifdef TARGET_NR_gtty
5592 case TARGET_NR_gtty:
5593 goto unimplemented;
5594 #endif
5595 case TARGET_NR_access:
5596 if (!(p = lock_user_string(arg1)))
5597 goto efault;
5598 ret = get_errno(access(path(p), arg2));
5599 unlock_user(p, arg1, 0);
5600 break;
5601 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5602 case TARGET_NR_faccessat:
5603 if (!(p = lock_user_string(arg2)))
5604 goto efault;
5605 ret = get_errno(faccessat(arg1, p, arg3, 0));
5606 unlock_user(p, arg2, 0);
5607 break;
5608 #endif
5609 #ifdef TARGET_NR_nice /* not on alpha */
5610 case TARGET_NR_nice:
5611 ret = get_errno(nice(arg1));
5612 break;
5613 #endif
5614 #ifdef TARGET_NR_ftime
5615 case TARGET_NR_ftime:
5616 goto unimplemented;
5617 #endif
5618 case TARGET_NR_sync:
5619 sync();
5620 ret = 0;
5621 break;
5622 case TARGET_NR_kill:
5623 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5624 break;
5625 case TARGET_NR_rename:
5626 {
5627 void *p2;
5628 p = lock_user_string(arg1);
5629 p2 = lock_user_string(arg2);
5630 if (!p || !p2)
5631 ret = -TARGET_EFAULT;
5632 else
5633 ret = get_errno(rename(p, p2));
5634 unlock_user(p2, arg2, 0);
5635 unlock_user(p, arg1, 0);
5636 }
5637 break;
5638 #if defined(TARGET_NR_renameat)
5639 case TARGET_NR_renameat:
5640 {
5641 void *p2;
5642 p = lock_user_string(arg2);
5643 p2 = lock_user_string(arg4);
5644 if (!p || !p2)
5645 ret = -TARGET_EFAULT;
5646 else
5647 ret = get_errno(renameat(arg1, p, arg3, p2));
5648 unlock_user(p2, arg4, 0);
5649 unlock_user(p, arg2, 0);
5650 }
5651 break;
5652 #endif
5653 case TARGET_NR_mkdir:
5654 if (!(p = lock_user_string(arg1)))
5655 goto efault;
5656 ret = get_errno(mkdir(p, arg2));
5657 unlock_user(p, arg1, 0);
5658 break;
5659 #if defined(TARGET_NR_mkdirat)
5660 case TARGET_NR_mkdirat:
5661 if (!(p = lock_user_string(arg2)))
5662 goto efault;
5663 ret = get_errno(mkdirat(arg1, p, arg3));
5664 unlock_user(p, arg2, 0);
5665 break;
5666 #endif
5667 case TARGET_NR_rmdir:
5668 if (!(p = lock_user_string(arg1)))
5669 goto efault;
5670 ret = get_errno(rmdir(p));
5671 unlock_user(p, arg1, 0);
5672 break;
5673 case TARGET_NR_dup:
5674 ret = get_errno(dup(arg1));
5675 break;
5676 case TARGET_NR_pipe:
5677 ret = do_pipe(cpu_env, arg1, 0, 0);
5678 break;
5679 #ifdef TARGET_NR_pipe2
5680 case TARGET_NR_pipe2:
5681 ret = do_pipe(cpu_env, arg1,
5682 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5683 break;
5684 #endif
5685 case TARGET_NR_times:
5686 {
5687 struct target_tms *tmsp;
5688 struct tms tms;
5689 ret = get_errno(times(&tms));
5690 if (arg1) {
5691 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5692 if (!tmsp)
5693 goto efault;
5694 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5695 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5696 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5697 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5698 }
5699 if (!is_error(ret))
5700 ret = host_to_target_clock_t(ret);
5701 }
5702 break;
5703 #ifdef TARGET_NR_prof
5704 case TARGET_NR_prof:
5705 goto unimplemented;
5706 #endif
5707 #ifdef TARGET_NR_signal
5708 case TARGET_NR_signal:
5709 goto unimplemented;
5710 #endif
5711 case TARGET_NR_acct:
5712 if (arg1 == 0) {
5713 ret = get_errno(acct(NULL));
5714 } else {
5715 if (!(p = lock_user_string(arg1)))
5716 goto efault;
5717 ret = get_errno(acct(path(p)));
5718 unlock_user(p, arg1, 0);
5719 }
5720 break;
5721 #ifdef TARGET_NR_umount2
5722 case TARGET_NR_umount2:
5723 if (!(p = lock_user_string(arg1)))
5724 goto efault;
5725 ret = get_errno(umount2(p, arg2));
5726 unlock_user(p, arg1, 0);
5727 break;
5728 #endif
5729 #ifdef TARGET_NR_lock
5730 case TARGET_NR_lock:
5731 goto unimplemented;
5732 #endif
5733 case TARGET_NR_ioctl:
5734 ret = do_ioctl(arg1, arg2, arg3);
5735 break;
5736 case TARGET_NR_fcntl:
5737 ret = do_fcntl(arg1, arg2, arg3);
5738 break;
5739 #ifdef TARGET_NR_mpx
5740 case TARGET_NR_mpx:
5741 goto unimplemented;
5742 #endif
5743 case TARGET_NR_setpgid:
5744 ret = get_errno(setpgid(arg1, arg2));
5745 break;
5746 #ifdef TARGET_NR_ulimit
5747 case TARGET_NR_ulimit:
5748 goto unimplemented;
5749 #endif
5750 #ifdef TARGET_NR_oldolduname
5751 case TARGET_NR_oldolduname:
5752 goto unimplemented;
5753 #endif
5754 case TARGET_NR_umask:
5755 ret = get_errno(umask(arg1));
5756 break;
5757 case TARGET_NR_chroot:
5758 if (!(p = lock_user_string(arg1)))
5759 goto efault;
5760 ret = get_errno(chroot(p));
5761 unlock_user(p, arg1, 0);
5762 break;
5763 case TARGET_NR_ustat:
5764 goto unimplemented;
5765 case TARGET_NR_dup2:
5766 ret = get_errno(dup2(arg1, arg2));
5767 break;
5768 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5769 case TARGET_NR_dup3:
5770 ret = get_errno(dup3(arg1, arg2, arg3));
5771 break;
5772 #endif
5773 #ifdef TARGET_NR_getppid /* not on alpha */
5774 case TARGET_NR_getppid:
5775 ret = get_errno(getppid());
5776 break;
5777 #endif
5778 case TARGET_NR_getpgrp:
5779 ret = get_errno(getpgrp());
5780 break;
5781 case TARGET_NR_setsid:
5782 ret = get_errno(setsid());
5783 break;
5784 #ifdef TARGET_NR_sigaction
5785 case TARGET_NR_sigaction:
5786 {
5787 #if defined(TARGET_ALPHA)
5788 struct target_sigaction act, oact, *pact = 0;
5789 struct target_old_sigaction *old_act;
5790 if (arg2) {
5791 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5792 goto efault;
5793 act._sa_handler = old_act->_sa_handler;
5794 target_siginitset(&act.sa_mask, old_act->sa_mask);
5795 act.sa_flags = old_act->sa_flags;
5796 act.sa_restorer = 0;
5797 unlock_user_struct(old_act, arg2, 0);
5798 pact = &act;
5799 }
5800 ret = get_errno(do_sigaction(arg1, pact, &oact));
5801 if (!is_error(ret) && arg3) {
5802 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5803 goto efault;
5804 old_act->_sa_handler = oact._sa_handler;
5805 old_act->sa_mask = oact.sa_mask.sig[0];
5806 old_act->sa_flags = oact.sa_flags;
5807 unlock_user_struct(old_act, arg3, 1);
5808 }
5809 #elif defined(TARGET_MIPS)
5810 struct target_sigaction act, oact, *pact, *old_act;
5811
5812 if (arg2) {
5813 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5814 goto efault;
5815 act._sa_handler = old_act->_sa_handler;
5816 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5817 act.sa_flags = old_act->sa_flags;
5818 unlock_user_struct(old_act, arg2, 0);
5819 pact = &act;
5820 } else {
5821 pact = NULL;
5822 }
5823
5824 ret = get_errno(do_sigaction(arg1, pact, &oact));
5825
5826 if (!is_error(ret) && arg3) {
5827 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5828 goto efault;
5829 old_act->_sa_handler = oact._sa_handler;
5830 old_act->sa_flags = oact.sa_flags;
5831 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5832 old_act->sa_mask.sig[1] = 0;
5833 old_act->sa_mask.sig[2] = 0;
5834 old_act->sa_mask.sig[3] = 0;
5835 unlock_user_struct(old_act, arg3, 1);
5836 }
5837 #else
5838 struct target_old_sigaction *old_act;
5839 struct target_sigaction act, oact, *pact;
5840 if (arg2) {
5841 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5842 goto efault;
5843 act._sa_handler = old_act->_sa_handler;
5844 target_siginitset(&act.sa_mask, old_act->sa_mask);
5845 act.sa_flags = old_act->sa_flags;
5846 act.sa_restorer = old_act->sa_restorer;
5847 unlock_user_struct(old_act, arg2, 0);
5848 pact = &act;
5849 } else {
5850 pact = NULL;
5851 }
5852 ret = get_errno(do_sigaction(arg1, pact, &oact));
5853 if (!is_error(ret) && arg3) {
5854 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5855 goto efault;
5856 old_act->_sa_handler = oact._sa_handler;
5857 old_act->sa_mask = oact.sa_mask.sig[0];
5858 old_act->sa_flags = oact.sa_flags;
5859 old_act->sa_restorer = oact.sa_restorer;
5860 unlock_user_struct(old_act, arg3, 1);
5861 }
5862 #endif
5863 }
5864 break;
5865 #endif
5866 case TARGET_NR_rt_sigaction:
5867 {
5868 #if defined(TARGET_ALPHA)
5869 struct target_sigaction act, oact, *pact = 0;
5870 struct target_rt_sigaction *rt_act;
5871 /* ??? arg4 == sizeof(sigset_t). */
5872 if (arg2) {
5873 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5874 goto efault;
5875 act._sa_handler = rt_act->_sa_handler;
5876 act.sa_mask = rt_act->sa_mask;
5877 act.sa_flags = rt_act->sa_flags;
5878 act.sa_restorer = arg5;
5879 unlock_user_struct(rt_act, arg2, 0);
5880 pact = &act;
5881 }
5882 ret = get_errno(do_sigaction(arg1, pact, &oact));
5883 if (!is_error(ret) && arg3) {
5884 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5885 goto efault;
5886 rt_act->_sa_handler = oact._sa_handler;
5887 rt_act->sa_mask = oact.sa_mask;
5888 rt_act->sa_flags = oact.sa_flags;
5889 unlock_user_struct(rt_act, arg3, 1);
5890 }
5891 #else
5892 struct target_sigaction *act;
5893 struct target_sigaction *oact;
5894
5895 if (arg2) {
5896 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5897 goto efault;
5898 } else
5899 act = NULL;
5900 if (arg3) {
5901 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5902 ret = -TARGET_EFAULT;
5903 goto rt_sigaction_fail;
5904 }
5905 } else
5906 oact = NULL;
5907 ret = get_errno(do_sigaction(arg1, act, oact));
5908 rt_sigaction_fail:
5909 if (act)
5910 unlock_user_struct(act, arg2, 0);
5911 if (oact)
5912 unlock_user_struct(oact, arg3, 1);
5913 #endif
5914 }
5915 break;
5916 #ifdef TARGET_NR_sgetmask /* not on alpha */
5917 case TARGET_NR_sgetmask:
5918 {
5919 sigset_t cur_set;
5920 abi_ulong target_set;
5921 sigprocmask(0, NULL, &cur_set);
5922 host_to_target_old_sigset(&target_set, &cur_set);
5923 ret = target_set;
5924 }
5925 break;
5926 #endif
5927 #ifdef TARGET_NR_ssetmask /* not on alpha */
5928 case TARGET_NR_ssetmask:
5929 {
5930 sigset_t set, oset, cur_set;
5931 abi_ulong target_set = arg1;
5932 sigprocmask(0, NULL, &cur_set);
5933 target_to_host_old_sigset(&set, &target_set);
5934 sigorset(&set, &set, &cur_set);
5935 sigprocmask(SIG_SETMASK, &set, &oset);
5936 host_to_target_old_sigset(&target_set, &oset);
5937 ret = target_set;
5938 }
5939 break;
5940 #endif
5941 #ifdef TARGET_NR_sigprocmask
5942 case TARGET_NR_sigprocmask:
5943 {
5944 #if defined(TARGET_ALPHA)
5945 sigset_t set, oldset;
5946 abi_ulong mask;
5947 int how;
5948
5949 switch (arg1) {
5950 case TARGET_SIG_BLOCK:
5951 how = SIG_BLOCK;
5952 break;
5953 case TARGET_SIG_UNBLOCK:
5954 how = SIG_UNBLOCK;
5955 break;
5956 case TARGET_SIG_SETMASK:
5957 how = SIG_SETMASK;
5958 break;
5959 default:
5960 ret = -TARGET_EINVAL;
5961 goto fail;
5962 }
5963 mask = arg2;
5964 target_to_host_old_sigset(&set, &mask);
5965
5966 ret = get_errno(sigprocmask(how, &set, &oldset));
5967 if (!is_error(ret)) {
5968 host_to_target_old_sigset(&mask, &oldset);
5969 ret = mask;
5970 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
5971 }
5972 #else
5973 sigset_t set, oldset, *set_ptr;
5974 int how;
5975
5976 if (arg2) {
5977 switch (arg1) {
5978 case TARGET_SIG_BLOCK:
5979 how = SIG_BLOCK;
5980 break;
5981 case TARGET_SIG_UNBLOCK:
5982 how = SIG_UNBLOCK;
5983 break;
5984 case TARGET_SIG_SETMASK:
5985 how = SIG_SETMASK;
5986 break;
5987 default:
5988 ret = -TARGET_EINVAL;
5989 goto fail;
5990 }
5991 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5992 goto efault;
5993 target_to_host_old_sigset(&set, p);
5994 unlock_user(p, arg2, 0);
5995 set_ptr = &set;
5996 } else {
5997 how = 0;
5998 set_ptr = NULL;
5999 }
6000 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6001 if (!is_error(ret) && arg3) {
6002 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6003 goto efault;
6004 host_to_target_old_sigset(p, &oldset);
6005 unlock_user(p, arg3, sizeof(target_sigset_t));
6006 }
6007 #endif
6008 }
6009 break;
6010 #endif
6011 case TARGET_NR_rt_sigprocmask:
6012 {
6013 int how = arg1;
6014 sigset_t set, oldset, *set_ptr;
6015
6016 if (arg2) {
6017 switch(how) {
6018 case TARGET_SIG_BLOCK:
6019 how = SIG_BLOCK;
6020 break;
6021 case TARGET_SIG_UNBLOCK:
6022 how = SIG_UNBLOCK;
6023 break;
6024 case TARGET_SIG_SETMASK:
6025 how = SIG_SETMASK;
6026 break;
6027 default:
6028 ret = -TARGET_EINVAL;
6029 goto fail;
6030 }
6031 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6032 goto efault;
6033 target_to_host_sigset(&set, p);
6034 unlock_user(p, arg2, 0);
6035 set_ptr = &set;
6036 } else {
6037 how = 0;
6038 set_ptr = NULL;
6039 }
6040 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6041 if (!is_error(ret) && arg3) {
6042 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6043 goto efault;
6044 host_to_target_sigset(p, &oldset);
6045 unlock_user(p, arg3, sizeof(target_sigset_t));
6046 }
6047 }
6048 break;
6049 #ifdef TARGET_NR_sigpending
6050 case TARGET_NR_sigpending:
6051 {
6052 sigset_t set;
6053 ret = get_errno(sigpending(&set));
6054 if (!is_error(ret)) {
6055 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6056 goto efault;
6057 host_to_target_old_sigset(p, &set);
6058 unlock_user(p, arg1, sizeof(target_sigset_t));
6059 }
6060 }
6061 break;
6062 #endif
6063 case TARGET_NR_rt_sigpending:
6064 {
6065 sigset_t set;
6066 ret = get_errno(sigpending(&set));
6067 if (!is_error(ret)) {
6068 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6069 goto efault;
6070 host_to_target_sigset(p, &set);
6071 unlock_user(p, arg1, sizeof(target_sigset_t));
6072 }
6073 }
6074 break;
6075 #ifdef TARGET_NR_sigsuspend
6076 case TARGET_NR_sigsuspend:
6077 {
6078 sigset_t set;
6079 #if defined(TARGET_ALPHA)
6080 abi_ulong mask = arg1;
6081 target_to_host_old_sigset(&set, &mask);
6082 #else
6083 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6084 goto efault;
6085 target_to_host_old_sigset(&set, p);
6086 unlock_user(p, arg1, 0);
6087 #endif
6088 ret = get_errno(sigsuspend(&set));
6089 }
6090 break;
6091 #endif
6092 case TARGET_NR_rt_sigsuspend:
6093 {
6094 sigset_t set;
6095 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6096 goto efault;
6097 target_to_host_sigset(&set, p);
6098 unlock_user(p, arg1, 0);
6099 ret = get_errno(sigsuspend(&set));
6100 }
6101 break;
6102 case TARGET_NR_rt_sigtimedwait:
6103 {
6104 sigset_t set;
6105 struct timespec uts, *puts;
6106 siginfo_t uinfo;
6107
6108 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6109 goto efault;
6110 target_to_host_sigset(&set, p);
6111 unlock_user(p, arg1, 0);
6112 if (arg3) {
6113 puts = &uts;
6114 target_to_host_timespec(puts, arg3);
6115 } else {
6116 puts = NULL;
6117 }
6118 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6119 if (!is_error(ret) && arg2) {
6120 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6121 goto efault;
6122 host_to_target_siginfo(p, &uinfo);
6123 unlock_user(p, arg2, sizeof(target_siginfo_t));
6124 }
6125 }
6126 break;
6127 case TARGET_NR_rt_sigqueueinfo:
6128 {
6129 siginfo_t uinfo;
6130 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6131 goto efault;
6132 target_to_host_siginfo(&uinfo, p);
6133 unlock_user(p, arg1, 0);
6134 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6135 }
6136 break;
6137 #ifdef TARGET_NR_sigreturn
6138 case TARGET_NR_sigreturn:
6139 /* NOTE: ret is eax, so not transcoding must be done */
6140 ret = do_sigreturn(cpu_env);
6141 break;
6142 #endif
6143 case TARGET_NR_rt_sigreturn:
6144 /* NOTE: ret is eax, so not transcoding must be done */
6145 ret = do_rt_sigreturn(cpu_env);
6146 break;
6147 case TARGET_NR_sethostname:
6148 if (!(p = lock_user_string(arg1)))
6149 goto efault;
6150 ret = get_errno(sethostname(p, arg2));
6151 unlock_user(p, arg1, 0);
6152 break;
6153 case TARGET_NR_setrlimit:
6154 {
6155 int resource = target_to_host_resource(arg1);
6156 struct target_rlimit *target_rlim;
6157 struct rlimit rlim;
6158 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6159 goto efault;
6160 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6161 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6162 unlock_user_struct(target_rlim, arg2, 0);
6163 ret = get_errno(setrlimit(resource, &rlim));
6164 }
6165 break;
6166 case TARGET_NR_getrlimit:
6167 {
6168 int resource = target_to_host_resource(arg1);
6169 struct target_rlimit *target_rlim;
6170 struct rlimit rlim;
6171
6172 ret = get_errno(getrlimit(resource, &rlim));
6173 if (!is_error(ret)) {
6174 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6175 goto efault;
6176 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6177 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6178 unlock_user_struct(target_rlim, arg2, 1);
6179 }
6180 }
6181 break;
6182 case TARGET_NR_getrusage:
6183 {
6184 struct rusage rusage;
6185 ret = get_errno(getrusage(arg1, &rusage));
6186 if (!is_error(ret)) {
6187 host_to_target_rusage(arg2, &rusage);
6188 }
6189 }
6190 break;
6191 case TARGET_NR_gettimeofday:
6192 {
6193 struct timeval tv;
6194 ret = get_errno(gettimeofday(&tv, NULL));
6195 if (!is_error(ret)) {
6196 if (copy_to_user_timeval(arg1, &tv))
6197 goto efault;
6198 }
6199 }
6200 break;
6201 case TARGET_NR_settimeofday:
6202 {
6203 struct timeval tv;
6204 if (copy_from_user_timeval(&tv, arg1))
6205 goto efault;
6206 ret = get_errno(settimeofday(&tv, NULL));
6207 }
6208 break;
6209 #if defined(TARGET_NR_select)
6210 case TARGET_NR_select:
6211 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6212 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6213 #else
6214 {
6215 struct target_sel_arg_struct *sel;
6216 abi_ulong inp, outp, exp, tvp;
6217 long nsel;
6218
6219 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6220 goto efault;
6221 nsel = tswapal(sel->n);
6222 inp = tswapal(sel->inp);
6223 outp = tswapal(sel->outp);
6224 exp = tswapal(sel->exp);
6225 tvp = tswapal(sel->tvp);
6226 unlock_user_struct(sel, arg1, 0);
6227 ret = do_select(nsel, inp, outp, exp, tvp);
6228 }
6229 #endif
6230 break;
6231 #endif
6232 #ifdef TARGET_NR_pselect6
6233 case TARGET_NR_pselect6:
6234 {
6235 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6236 fd_set rfds, wfds, efds;
6237 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6238 struct timespec ts, *ts_ptr;
6239
6240 /*
6241 * The 6th arg is actually two args smashed together,
6242 * so we cannot use the C library.
6243 */
6244 sigset_t set;
6245 struct {
6246 sigset_t *set;
6247 size_t size;
6248 } sig, *sig_ptr;
6249
6250 abi_ulong arg_sigset, arg_sigsize, *arg7;
6251 target_sigset_t *target_sigset;
6252
6253 n = arg1;
6254 rfd_addr = arg2;
6255 wfd_addr = arg3;
6256 efd_addr = arg4;
6257 ts_addr = arg5;
6258
6259 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6260 if (ret) {
6261 goto fail;
6262 }
6263 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6264 if (ret) {
6265 goto fail;
6266 }
6267 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6268 if (ret) {
6269 goto fail;
6270 }
6271
6272 /*
6273 * This takes a timespec, and not a timeval, so we cannot
6274 * use the do_select() helper ...
6275 */
6276 if (ts_addr) {
6277 if (target_to_host_timespec(&ts, ts_addr)) {
6278 goto efault;
6279 }
6280 ts_ptr = &ts;
6281 } else {
6282 ts_ptr = NULL;
6283 }
6284
6285 /* Extract the two packed args for the sigset */
6286 if (arg6) {
6287 sig_ptr = &sig;
6288 sig.size = _NSIG / 8;
6289
6290 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6291 if (!arg7) {
6292 goto efault;
6293 }
6294 arg_sigset = tswapal(arg7[0]);
6295 arg_sigsize = tswapal(arg7[1]);
6296 unlock_user(arg7, arg6, 0);
6297
6298 if (arg_sigset) {
6299 sig.set = &set;
6300 if (arg_sigsize != sizeof(*target_sigset)) {
6301 /* Like the kernel, we enforce correct size sigsets */
6302 ret = -TARGET_EINVAL;
6303 goto fail;
6304 }
6305 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6306 sizeof(*target_sigset), 1);
6307 if (!target_sigset) {
6308 goto efault;
6309 }
6310 target_to_host_sigset(&set, target_sigset);
6311 unlock_user(target_sigset, arg_sigset, 0);
6312 } else {
6313 sig.set = NULL;
6314 }
6315 } else {
6316 sig_ptr = NULL;
6317 }
6318
6319 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6320 ts_ptr, sig_ptr));
6321
6322 if (!is_error(ret)) {
6323 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6324 goto efault;
6325 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6326 goto efault;
6327 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6328 goto efault;
6329
6330 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6331 goto efault;
6332 }
6333 }
6334 break;
6335 #endif
6336 case TARGET_NR_symlink:
6337 {
6338 void *p2;
6339 p = lock_user_string(arg1);
6340 p2 = lock_user_string(arg2);
6341 if (!p || !p2)
6342 ret = -TARGET_EFAULT;
6343 else
6344 ret = get_errno(symlink(p, p2));
6345 unlock_user(p2, arg2, 0);
6346 unlock_user(p, arg1, 0);
6347 }
6348 break;
6349 #if defined(TARGET_NR_symlinkat)
6350 case TARGET_NR_symlinkat:
6351 {
6352 void *p2;
6353 p = lock_user_string(arg1);
6354 p2 = lock_user_string(arg3);
6355 if (!p || !p2)
6356 ret = -TARGET_EFAULT;
6357 else
6358 ret = get_errno(symlinkat(p, arg2, p2));
6359 unlock_user(p2, arg3, 0);
6360 unlock_user(p, arg1, 0);
6361 }
6362 break;
6363 #endif
6364 #ifdef TARGET_NR_oldlstat
6365 case TARGET_NR_oldlstat:
6366 goto unimplemented;
6367 #endif
6368 case TARGET_NR_readlink:
6369 {
6370 void *p2;
6371 p = lock_user_string(arg1);
6372 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6373 if (!p || !p2) {
6374 ret = -TARGET_EFAULT;
6375 } else if (is_proc_myself((const char *)p, "exe")) {
6376 char real[PATH_MAX], *temp;
6377 temp = realpath(exec_path, real);
6378 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6379 snprintf((char *)p2, arg3, "%s", real);
6380 } else {
6381 ret = get_errno(readlink(path(p), p2, arg3));
6382 }
6383 unlock_user(p2, arg2, ret);
6384 unlock_user(p, arg1, 0);
6385 }
6386 break;
6387 #if defined(TARGET_NR_readlinkat)
6388 case TARGET_NR_readlinkat:
6389 {
6390 void *p2;
6391 p = lock_user_string(arg2);
6392 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6393 if (!p || !p2) {
6394 ret = -TARGET_EFAULT;
6395 } else if (is_proc_myself((const char *)p, "exe")) {
6396 char real[PATH_MAX], *temp;
6397 temp = realpath(exec_path, real);
6398 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6399 snprintf((char *)p2, arg4, "%s", real);
6400 } else {
6401 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6402 }
6403 unlock_user(p2, arg3, ret);
6404 unlock_user(p, arg2, 0);
6405 }
6406 break;
6407 #endif
6408 #ifdef TARGET_NR_uselib
6409 case TARGET_NR_uselib:
6410 goto unimplemented;
6411 #endif
6412 #ifdef TARGET_NR_swapon
6413 case TARGET_NR_swapon:
6414 if (!(p = lock_user_string(arg1)))
6415 goto efault;
6416 ret = get_errno(swapon(p, arg2));
6417 unlock_user(p, arg1, 0);
6418 break;
6419 #endif
6420 case TARGET_NR_reboot:
6421 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6422 /* arg4 must be ignored in all other cases */
6423 p = lock_user_string(arg4);
6424 if (!p) {
6425 goto efault;
6426 }
6427 ret = get_errno(reboot(arg1, arg2, arg3, p));
6428 unlock_user(p, arg4, 0);
6429 } else {
6430 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6431 }
6432 break;
6433 #ifdef TARGET_NR_readdir
6434 case TARGET_NR_readdir:
6435 goto unimplemented;
6436 #endif
6437 #ifdef TARGET_NR_mmap
6438 case TARGET_NR_mmap:
6439 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6440 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6441 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6442 || defined(TARGET_S390X)
6443 {
6444 abi_ulong *v;
6445 abi_ulong v1, v2, v3, v4, v5, v6;
6446 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6447 goto efault;
6448 v1 = tswapal(v[0]);
6449 v2 = tswapal(v[1]);
6450 v3 = tswapal(v[2]);
6451 v4 = tswapal(v[3]);
6452 v5 = tswapal(v[4]);
6453 v6 = tswapal(v[5]);
6454 unlock_user(v, arg1, 0);
6455 ret = get_errno(target_mmap(v1, v2, v3,
6456 target_to_host_bitmask(v4, mmap_flags_tbl),
6457 v5, v6));
6458 }
6459 #else
6460 ret = get_errno(target_mmap(arg1, arg2, arg3,
6461 target_to_host_bitmask(arg4, mmap_flags_tbl),
6462 arg5,
6463 arg6));
6464 #endif
6465 break;
6466 #endif
6467 #ifdef TARGET_NR_mmap2
6468 case TARGET_NR_mmap2:
6469 #ifndef MMAP_SHIFT
6470 #define MMAP_SHIFT 12
6471 #endif
6472 ret = get_errno(target_mmap(arg1, arg2, arg3,
6473 target_to_host_bitmask(arg4, mmap_flags_tbl),
6474 arg5,
6475 arg6 << MMAP_SHIFT));
6476 break;
6477 #endif
6478 case TARGET_NR_munmap:
6479 ret = get_errno(target_munmap(arg1, arg2));
6480 break;
6481 case TARGET_NR_mprotect:
6482 {
6483 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6484 /* Special hack to detect libc making the stack executable. */
6485 if ((arg3 & PROT_GROWSDOWN)
6486 && arg1 >= ts->info->stack_limit
6487 && arg1 <= ts->info->start_stack) {
6488 arg3 &= ~PROT_GROWSDOWN;
6489 arg2 = arg2 + arg1 - ts->info->stack_limit;
6490 arg1 = ts->info->stack_limit;
6491 }
6492 }
6493 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6494 break;
6495 #ifdef TARGET_NR_mremap
6496 case TARGET_NR_mremap:
6497 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6498 break;
6499 #endif
6500 /* ??? msync/mlock/munlock are broken for softmmu. */
6501 #ifdef TARGET_NR_msync
6502 case TARGET_NR_msync:
6503 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6504 break;
6505 #endif
6506 #ifdef TARGET_NR_mlock
6507 case TARGET_NR_mlock:
6508 ret = get_errno(mlock(g2h(arg1), arg2));
6509 break;
6510 #endif
6511 #ifdef TARGET_NR_munlock
6512 case TARGET_NR_munlock:
6513 ret = get_errno(munlock(g2h(arg1), arg2));
6514 break;
6515 #endif
6516 #ifdef TARGET_NR_mlockall
6517 case TARGET_NR_mlockall:
6518 ret = get_errno(mlockall(arg1));
6519 break;
6520 #endif
6521 #ifdef TARGET_NR_munlockall
6522 case TARGET_NR_munlockall:
6523 ret = get_errno(munlockall());
6524 break;
6525 #endif
6526 case TARGET_NR_truncate:
6527 if (!(p = lock_user_string(arg1)))
6528 goto efault;
6529 ret = get_errno(truncate(p, arg2));
6530 unlock_user(p, arg1, 0);
6531 break;
6532 case TARGET_NR_ftruncate:
6533 ret = get_errno(ftruncate(arg1, arg2));
6534 break;
6535 case TARGET_NR_fchmod:
6536 ret = get_errno(fchmod(arg1, arg2));
6537 break;
6538 #if defined(TARGET_NR_fchmodat)
6539 case TARGET_NR_fchmodat:
6540 if (!(p = lock_user_string(arg2)))
6541 goto efault;
6542 ret = get_errno(fchmodat(arg1, p, arg3, 0));
6543 unlock_user(p, arg2, 0);
6544 break;
6545 #endif
6546 case TARGET_NR_getpriority:
6547 /* Note that negative values are valid for getpriority, so we must
6548 differentiate based on errno settings. */
6549 errno = 0;
6550 ret = getpriority(arg1, arg2);
6551 if (ret == -1 && errno != 0) {
6552 ret = -host_to_target_errno(errno);
6553 break;
6554 }
6555 #ifdef TARGET_ALPHA
6556 /* Return value is the unbiased priority. Signal no error. */
6557 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6558 #else
6559 /* Return value is a biased priority to avoid negative numbers. */
6560 ret = 20 - ret;
6561 #endif
6562 break;
6563 case TARGET_NR_setpriority:
6564 ret = get_errno(setpriority(arg1, arg2, arg3));
6565 break;
6566 #ifdef TARGET_NR_profil
6567 case TARGET_NR_profil:
6568 goto unimplemented;
6569 #endif
6570 case TARGET_NR_statfs:
6571 if (!(p = lock_user_string(arg1)))
6572 goto efault;
6573 ret = get_errno(statfs(path(p), &stfs));
6574 unlock_user(p, arg1, 0);
6575 convert_statfs:
6576 if (!is_error(ret)) {
6577 struct target_statfs *target_stfs;
6578
6579 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6580 goto efault;
6581 __put_user(stfs.f_type, &target_stfs->f_type);
6582 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6583 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6584 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6585 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6586 __put_user(stfs.f_files, &target_stfs->f_files);
6587 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6588 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6589 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6590 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6591 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6592 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6593 unlock_user_struct(target_stfs, arg2, 1);
6594 }
6595 break;
6596 case TARGET_NR_fstatfs:
6597 ret = get_errno(fstatfs(arg1, &stfs));
6598 goto convert_statfs;
6599 #ifdef TARGET_NR_statfs64
6600 case TARGET_NR_statfs64:
6601 if (!(p = lock_user_string(arg1)))
6602 goto efault;
6603 ret = get_errno(statfs(path(p), &stfs));
6604 unlock_user(p, arg1, 0);
6605 convert_statfs64:
6606 if (!is_error(ret)) {
6607 struct target_statfs64 *target_stfs;
6608
6609 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6610 goto efault;
6611 __put_user(stfs.f_type, &target_stfs->f_type);
6612 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6613 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6614 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6615 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6616 __put_user(stfs.f_files, &target_stfs->f_files);
6617 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6618 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6619 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6620 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6621 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6622 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6623 unlock_user_struct(target_stfs, arg3, 1);
6624 }
6625 break;
6626 case TARGET_NR_fstatfs64:
6627 ret = get_errno(fstatfs(arg1, &stfs));
6628 goto convert_statfs64;
6629 #endif
6630 #ifdef TARGET_NR_ioperm
6631 case TARGET_NR_ioperm:
6632 goto unimplemented;
6633 #endif
6634 #ifdef TARGET_NR_socketcall
6635 case TARGET_NR_socketcall:
6636 ret = do_socketcall(arg1, arg2);
6637 break;
6638 #endif
6639 #ifdef TARGET_NR_accept
6640 case TARGET_NR_accept:
6641 ret = do_accept4(arg1, arg2, arg3, 0);
6642 break;
6643 #endif
6644 #ifdef TARGET_NR_accept4
6645 case TARGET_NR_accept4:
6646 #ifdef CONFIG_ACCEPT4
6647 ret = do_accept4(arg1, arg2, arg3, arg4);
6648 #else
6649 goto unimplemented;
6650 #endif
6651 break;
6652 #endif
6653 #ifdef TARGET_NR_bind
6654 case TARGET_NR_bind:
6655 ret = do_bind(arg1, arg2, arg3);
6656 break;
6657 #endif
6658 #ifdef TARGET_NR_connect
6659 case TARGET_NR_connect:
6660 ret = do_connect(arg1, arg2, arg3);
6661 break;
6662 #endif
6663 #ifdef TARGET_NR_getpeername
6664 case TARGET_NR_getpeername:
6665 ret = do_getpeername(arg1, arg2, arg3);
6666 break;
6667 #endif
6668 #ifdef TARGET_NR_getsockname
6669 case TARGET_NR_getsockname:
6670 ret = do_getsockname(arg1, arg2, arg3);
6671 break;
6672 #endif
6673 #ifdef TARGET_NR_getsockopt
6674 case TARGET_NR_getsockopt:
6675 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6676 break;
6677 #endif
6678 #ifdef TARGET_NR_listen
6679 case TARGET_NR_listen:
6680 ret = get_errno(listen(arg1, arg2));
6681 break;
6682 #endif
6683 #ifdef TARGET_NR_recv
6684 case TARGET_NR_recv:
6685 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6686 break;
6687 #endif
6688 #ifdef TARGET_NR_recvfrom
6689 case TARGET_NR_recvfrom:
6690 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6691 break;
6692 #endif
6693 #ifdef TARGET_NR_recvmsg
6694 case TARGET_NR_recvmsg:
6695 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6696 break;
6697 #endif
6698 #ifdef TARGET_NR_send
6699 case TARGET_NR_send:
6700 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6701 break;
6702 #endif
6703 #ifdef TARGET_NR_sendmsg
6704 case TARGET_NR_sendmsg:
6705 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6706 break;
6707 #endif
6708 #ifdef TARGET_NR_sendto
6709 case TARGET_NR_sendto:
6710 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6711 break;
6712 #endif
6713 #ifdef TARGET_NR_shutdown
6714 case TARGET_NR_shutdown:
6715 ret = get_errno(shutdown(arg1, arg2));
6716 break;
6717 #endif
6718 #ifdef TARGET_NR_socket
6719 case TARGET_NR_socket:
6720 ret = do_socket(arg1, arg2, arg3);
6721 break;
6722 #endif
6723 #ifdef TARGET_NR_socketpair
6724 case TARGET_NR_socketpair:
6725 ret = do_socketpair(arg1, arg2, arg3, arg4);
6726 break;
6727 #endif
6728 #ifdef TARGET_NR_setsockopt
6729 case TARGET_NR_setsockopt:
6730 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6731 break;
6732 #endif
6733
6734 case TARGET_NR_syslog:
6735 if (!(p = lock_user_string(arg2)))
6736 goto efault;
6737 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6738 unlock_user(p, arg2, 0);
6739 break;
6740
6741 case TARGET_NR_setitimer:
6742 {
6743 struct itimerval value, ovalue, *pvalue;
6744
6745 if (arg2) {
6746 pvalue = &value;
6747 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6748 || copy_from_user_timeval(&pvalue->it_value,
6749 arg2 + sizeof(struct target_timeval)))
6750 goto efault;
6751 } else {
6752 pvalue = NULL;
6753 }
6754 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6755 if (!is_error(ret) && arg3) {
6756 if (copy_to_user_timeval(arg3,
6757 &ovalue.it_interval)
6758 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6759 &ovalue.it_value))
6760 goto efault;
6761 }
6762 }
6763 break;
6764 case TARGET_NR_getitimer:
6765 {
6766 struct itimerval value;
6767
6768 ret = get_errno(getitimer(arg1, &value));
6769 if (!is_error(ret) && arg2) {
6770 if (copy_to_user_timeval(arg2,
6771 &value.it_interval)
6772 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6773 &value.it_value))
6774 goto efault;
6775 }
6776 }
6777 break;
6778 case TARGET_NR_stat:
6779 if (!(p = lock_user_string(arg1)))
6780 goto efault;
6781 ret = get_errno(stat(path(p), &st));
6782 unlock_user(p, arg1, 0);
6783 goto do_stat;
6784 case TARGET_NR_lstat:
6785 if (!(p = lock_user_string(arg1)))
6786 goto efault;
6787 ret = get_errno(lstat(path(p), &st));
6788 unlock_user(p, arg1, 0);
6789 goto do_stat;
6790 case TARGET_NR_fstat:
6791 {
6792 ret = get_errno(fstat(arg1, &st));
6793 do_stat:
6794 if (!is_error(ret)) {
6795 struct target_stat *target_st;
6796
6797 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6798 goto efault;
6799 memset(target_st, 0, sizeof(*target_st));
6800 __put_user(st.st_dev, &target_st->st_dev);
6801 __put_user(st.st_ino, &target_st->st_ino);
6802 __put_user(st.st_mode, &target_st->st_mode);
6803 __put_user(st.st_uid, &target_st->st_uid);
6804 __put_user(st.st_gid, &target_st->st_gid);
6805 __put_user(st.st_nlink, &target_st->st_nlink);
6806 __put_user(st.st_rdev, &target_st->st_rdev);
6807 __put_user(st.st_size, &target_st->st_size);
6808 __put_user(st.st_blksize, &target_st->st_blksize);
6809 __put_user(st.st_blocks, &target_st->st_blocks);
6810 __put_user(st.st_atime, &target_st->target_st_atime);
6811 __put_user(st.st_mtime, &target_st->target_st_mtime);
6812 __put_user(st.st_ctime, &target_st->target_st_ctime);
6813 unlock_user_struct(target_st, arg2, 1);
6814 }
6815 }
6816 break;
6817 #ifdef TARGET_NR_olduname
6818 case TARGET_NR_olduname:
6819 goto unimplemented;
6820 #endif
6821 #ifdef TARGET_NR_iopl
6822 case TARGET_NR_iopl:
6823 goto unimplemented;
6824 #endif
6825 case TARGET_NR_vhangup:
6826 ret = get_errno(vhangup());
6827 break;
6828 #ifdef TARGET_NR_idle
6829 case TARGET_NR_idle:
6830 goto unimplemented;
6831 #endif
6832 #ifdef TARGET_NR_syscall
6833 case TARGET_NR_syscall:
6834 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6835 arg6, arg7, arg8, 0);
6836 break;
6837 #endif
6838 case TARGET_NR_wait4:
6839 {
6840 int status;
6841 abi_long status_ptr = arg2;
6842 struct rusage rusage, *rusage_ptr;
6843 abi_ulong target_rusage = arg4;
6844 if (target_rusage)
6845 rusage_ptr = &rusage;
6846 else
6847 rusage_ptr = NULL;
6848 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6849 if (!is_error(ret)) {
6850 if (status_ptr && ret) {
6851 status = host_to_target_waitstatus(status);
6852 if (put_user_s32(status, status_ptr))
6853 goto efault;
6854 }
6855 if (target_rusage)
6856 host_to_target_rusage(target_rusage, &rusage);
6857 }
6858 }
6859 break;
6860 #ifdef TARGET_NR_swapoff
6861 case TARGET_NR_swapoff:
6862 if (!(p = lock_user_string(arg1)))
6863 goto efault;
6864 ret = get_errno(swapoff(p));
6865 unlock_user(p, arg1, 0);
6866 break;
6867 #endif
6868 case TARGET_NR_sysinfo:
6869 {
6870 struct target_sysinfo *target_value;
6871 struct sysinfo value;
6872 ret = get_errno(sysinfo(&value));
6873 if (!is_error(ret) && arg1)
6874 {
6875 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6876 goto efault;
6877 __put_user(value.uptime, &target_value->uptime);
6878 __put_user(value.loads[0], &target_value->loads[0]);
6879 __put_user(value.loads[1], &target_value->loads[1]);
6880 __put_user(value.loads[2], &target_value->loads[2]);
6881 __put_user(value.totalram, &target_value->totalram);
6882 __put_user(value.freeram, &target_value->freeram);
6883 __put_user(value.sharedram, &target_value->sharedram);
6884 __put_user(value.bufferram, &target_value->bufferram);
6885 __put_user(value.totalswap, &target_value->totalswap);
6886 __put_user(value.freeswap, &target_value->freeswap);
6887 __put_user(value.procs, &target_value->procs);
6888 __put_user(value.totalhigh, &target_value->totalhigh);
6889 __put_user(value.freehigh, &target_value->freehigh);
6890 __put_user(value.mem_unit, &target_value->mem_unit);
6891 unlock_user_struct(target_value, arg1, 1);
6892 }
6893 }
6894 break;
6895 #ifdef TARGET_NR_ipc
6896 case TARGET_NR_ipc:
6897 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6898 break;
6899 #endif
6900 #ifdef TARGET_NR_semget
6901 case TARGET_NR_semget:
6902 ret = get_errno(semget(arg1, arg2, arg3));
6903 break;
6904 #endif
6905 #ifdef TARGET_NR_semop
6906 case TARGET_NR_semop:
6907 ret = do_semop(arg1, arg2, arg3);
6908 break;
6909 #endif
6910 #ifdef TARGET_NR_semctl
6911 case TARGET_NR_semctl:
6912 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6913 break;
6914 #endif
6915 #ifdef TARGET_NR_msgctl
6916 case TARGET_NR_msgctl:
6917 ret = do_msgctl(arg1, arg2, arg3);
6918 break;
6919 #endif
6920 #ifdef TARGET_NR_msgget
6921 case TARGET_NR_msgget:
6922 ret = get_errno(msgget(arg1, arg2));
6923 break;
6924 #endif
6925 #ifdef TARGET_NR_msgrcv
6926 case TARGET_NR_msgrcv:
6927 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6928 break;
6929 #endif
6930 #ifdef TARGET_NR_msgsnd
6931 case TARGET_NR_msgsnd:
6932 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6933 break;
6934 #endif
6935 #ifdef TARGET_NR_shmget
6936 case TARGET_NR_shmget:
6937 ret = get_errno(shmget(arg1, arg2, arg3));
6938 break;
6939 #endif
6940 #ifdef TARGET_NR_shmctl
6941 case TARGET_NR_shmctl:
6942 ret = do_shmctl(arg1, arg2, arg3);
6943 break;
6944 #endif
6945 #ifdef TARGET_NR_shmat
6946 case TARGET_NR_shmat:
6947 ret = do_shmat(arg1, arg2, arg3);
6948 break;
6949 #endif
6950 #ifdef TARGET_NR_shmdt
6951 case TARGET_NR_shmdt:
6952 ret = do_shmdt(arg1);
6953 break;
6954 #endif
6955 case TARGET_NR_fsync:
6956 ret = get_errno(fsync(arg1));
6957 break;
6958 case TARGET_NR_clone:
6959 /* Linux manages to have three different orderings for its
6960 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
6961 * match the kernel's CONFIG_CLONE_* settings.
6962 * Microblaze is further special in that it uses a sixth
6963 * implicit argument to clone for the TLS pointer.
6964 */
6965 #if defined(TARGET_MICROBLAZE)
6966 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
6967 #elif defined(TARGET_CLONE_BACKWARDS)
6968 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6969 #elif defined(TARGET_CLONE_BACKWARDS2)
6970 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6971 #else
6972 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6973 #endif
6974 break;
6975 #ifdef __NR_exit_group
6976 /* new thread calls */
6977 case TARGET_NR_exit_group:
6978 #ifdef TARGET_GPROF
6979 _mcleanup();
6980 #endif
6981 gdb_exit(cpu_env, arg1);
6982 ret = get_errno(exit_group(arg1));
6983 break;
6984 #endif
6985 case TARGET_NR_setdomainname:
6986 if (!(p = lock_user_string(arg1)))
6987 goto efault;
6988 ret = get_errno(setdomainname(p, arg2));
6989 unlock_user(p, arg1, 0);
6990 break;
6991 case TARGET_NR_uname:
6992 /* no need to transcode because we use the linux syscall */
6993 {
6994 struct new_utsname * buf;
6995
6996 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6997 goto efault;
6998 ret = get_errno(sys_uname(buf));
6999 if (!is_error(ret)) {
7000 /* Overrite the native machine name with whatever is being
7001 emulated. */
7002 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7003 /* Allow the user to override the reported release. */
7004 if (qemu_uname_release && *qemu_uname_release)
7005 strcpy (buf->release, qemu_uname_release);
7006 }
7007 unlock_user_struct(buf, arg1, 1);
7008 }
7009 break;
7010 #ifdef TARGET_I386
7011 case TARGET_NR_modify_ldt:
7012 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7013 break;
7014 #if !defined(TARGET_X86_64)
7015 case TARGET_NR_vm86old:
7016 goto unimplemented;
7017 case TARGET_NR_vm86:
7018 ret = do_vm86(cpu_env, arg1, arg2);
7019 break;
7020 #endif
7021 #endif
7022 case TARGET_NR_adjtimex:
7023 goto unimplemented;
7024 #ifdef TARGET_NR_create_module
7025 case TARGET_NR_create_module:
7026 #endif
7027 case TARGET_NR_init_module:
7028 case TARGET_NR_delete_module:
7029 #ifdef TARGET_NR_get_kernel_syms
7030 case TARGET_NR_get_kernel_syms:
7031 #endif
7032 goto unimplemented;
7033 case TARGET_NR_quotactl:
7034 goto unimplemented;
7035 case TARGET_NR_getpgid:
7036 ret = get_errno(getpgid(arg1));
7037 break;
7038 case TARGET_NR_fchdir:
7039 ret = get_errno(fchdir(arg1));
7040 break;
7041 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7042 case TARGET_NR_bdflush:
7043 goto unimplemented;
7044 #endif
7045 #ifdef TARGET_NR_sysfs
7046 case TARGET_NR_sysfs:
7047 goto unimplemented;
7048 #endif
7049 case TARGET_NR_personality:
7050 ret = get_errno(personality(arg1));
7051 break;
7052 #ifdef TARGET_NR_afs_syscall
7053 case TARGET_NR_afs_syscall:
7054 goto unimplemented;
7055 #endif
7056 #ifdef TARGET_NR__llseek /* Not on alpha */
7057 case TARGET_NR__llseek:
7058 {
7059 int64_t res;
7060 #if !defined(__NR_llseek)
7061 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7062 if (res == -1) {
7063 ret = get_errno(res);
7064 } else {
7065 ret = 0;
7066 }
7067 #else
7068 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7069 #endif
7070 if ((ret == 0) && put_user_s64(res, arg4)) {
7071 goto efault;
7072 }
7073 }
7074 break;
7075 #endif
7076 case TARGET_NR_getdents:
7077 #ifdef __NR_getdents
7078 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7079 {
7080 struct target_dirent *target_dirp;
7081 struct linux_dirent *dirp;
7082 abi_long count = arg3;
7083
7084 dirp = malloc(count);
7085 if (!dirp) {
7086 ret = -TARGET_ENOMEM;
7087 goto fail;
7088 }
7089
7090 ret = get_errno(sys_getdents(arg1, dirp, count));
7091 if (!is_error(ret)) {
7092 struct linux_dirent *de;
7093 struct target_dirent *tde;
7094 int len = ret;
7095 int reclen, treclen;
7096 int count1, tnamelen;
7097
7098 count1 = 0;
7099 de = dirp;
7100 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7101 goto efault;
7102 tde = target_dirp;
7103 while (len > 0) {
7104 reclen = de->d_reclen;
7105 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7106 assert(tnamelen >= 0);
7107 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7108 assert(count1 + treclen <= count);
7109 tde->d_reclen = tswap16(treclen);
7110 tde->d_ino = tswapal(de->d_ino);
7111 tde->d_off = tswapal(de->d_off);
7112 memcpy(tde->d_name, de->d_name, tnamelen);
7113 de = (struct linux_dirent *)((char *)de + reclen);
7114 len -= reclen;
7115 tde = (struct target_dirent *)((char *)tde + treclen);
7116 count1 += treclen;
7117 }
7118 ret = count1;
7119 unlock_user(target_dirp, arg2, ret);
7120 }
7121 free(dirp);
7122 }
7123 #else
7124 {
7125 struct linux_dirent *dirp;
7126 abi_long count = arg3;
7127
7128 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7129 goto efault;
7130 ret = get_errno(sys_getdents(arg1, dirp, count));
7131 if (!is_error(ret)) {
7132 struct linux_dirent *de;
7133 int len = ret;
7134 int reclen;
7135 de = dirp;
7136 while (len > 0) {
7137 reclen = de->d_reclen;
7138 if (reclen > len)
7139 break;
7140 de->d_reclen = tswap16(reclen);
7141 tswapls(&de->d_ino);
7142 tswapls(&de->d_off);
7143 de = (struct linux_dirent *)((char *)de + reclen);
7144 len -= reclen;
7145 }
7146 }
7147 unlock_user(dirp, arg2, ret);
7148 }
7149 #endif
7150 #else
7151 /* Implement getdents in terms of getdents64 */
7152 {
7153 struct linux_dirent64 *dirp;
7154 abi_long count = arg3;
7155
7156 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7157 if (!dirp) {
7158 goto efault;
7159 }
7160 ret = get_errno(sys_getdents64(arg1, dirp, count));
7161 if (!is_error(ret)) {
7162 /* Convert the dirent64 structs to target dirent. We do this
7163 * in-place, since we can guarantee that a target_dirent is no
7164 * larger than a dirent64; however this means we have to be
7165 * careful to read everything before writing in the new format.
7166 */
7167 struct linux_dirent64 *de;
7168 struct target_dirent *tde;
7169 int len = ret;
7170 int tlen = 0;
7171
7172 de = dirp;
7173 tde = (struct target_dirent *)dirp;
7174 while (len > 0) {
7175 int namelen, treclen;
7176 int reclen = de->d_reclen;
7177 uint64_t ino = de->d_ino;
7178 int64_t off = de->d_off;
7179 uint8_t type = de->d_type;
7180
7181 namelen = strlen(de->d_name);
7182 treclen = offsetof(struct target_dirent, d_name)
7183 + namelen + 2;
7184 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7185
7186 memmove(tde->d_name, de->d_name, namelen + 1);
7187 tde->d_ino = tswapal(ino);
7188 tde->d_off = tswapal(off);
7189 tde->d_reclen = tswap16(treclen);
7190 /* The target_dirent type is in what was formerly a padding
7191 * byte at the end of the structure:
7192 */
7193 *(((char *)tde) + treclen - 1) = type;
7194
7195 de = (struct linux_dirent64 *)((char *)de + reclen);
7196 tde = (struct target_dirent *)((char *)tde + treclen);
7197 len -= reclen;
7198 tlen += treclen;
7199 }
7200 ret = tlen;
7201 }
7202 unlock_user(dirp, arg2, ret);
7203 }
7204 #endif
7205 break;
7206 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7207 case TARGET_NR_getdents64:
7208 {
7209 struct linux_dirent64 *dirp;
7210 abi_long count = arg3;
7211 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7212 goto efault;
7213 ret = get_errno(sys_getdents64(arg1, dirp, count));
7214 if (!is_error(ret)) {
7215 struct linux_dirent64 *de;
7216 int len = ret;
7217 int reclen;
7218 de = dirp;
7219 while (len > 0) {
7220 reclen = de->d_reclen;
7221 if (reclen > len)
7222 break;
7223 de->d_reclen = tswap16(reclen);
7224 tswap64s((uint64_t *)&de->d_ino);
7225 tswap64s((uint64_t *)&de->d_off);
7226 de = (struct linux_dirent64 *)((char *)de + reclen);
7227 len -= reclen;
7228 }
7229 }
7230 unlock_user(dirp, arg2, ret);
7231 }
7232 break;
7233 #endif /* TARGET_NR_getdents64 */
7234 #if defined(TARGET_NR__newselect)
7235 case TARGET_NR__newselect:
7236 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7237 break;
7238 #endif
7239 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7240 # ifdef TARGET_NR_poll
7241 case TARGET_NR_poll:
7242 # endif
7243 # ifdef TARGET_NR_ppoll
7244 case TARGET_NR_ppoll:
7245 # endif
7246 {
7247 struct target_pollfd *target_pfd;
7248 unsigned int nfds = arg2;
7249 int timeout = arg3;
7250 struct pollfd *pfd;
7251 unsigned int i;
7252
7253 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7254 if (!target_pfd)
7255 goto efault;
7256
7257 pfd = alloca(sizeof(struct pollfd) * nfds);
7258 for(i = 0; i < nfds; i++) {
7259 pfd[i].fd = tswap32(target_pfd[i].fd);
7260 pfd[i].events = tswap16(target_pfd[i].events);
7261 }
7262
7263 # ifdef TARGET_NR_ppoll
7264 if (num == TARGET_NR_ppoll) {
7265 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7266 target_sigset_t *target_set;
7267 sigset_t _set, *set = &_set;
7268
7269 if (arg3) {
7270 if (target_to_host_timespec(timeout_ts, arg3)) {
7271 unlock_user(target_pfd, arg1, 0);
7272 goto efault;
7273 }
7274 } else {
7275 timeout_ts = NULL;
7276 }
7277
7278 if (arg4) {
7279 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7280 if (!target_set) {
7281 unlock_user(target_pfd, arg1, 0);
7282 goto efault;
7283 }
7284 target_to_host_sigset(set, target_set);
7285 } else {
7286 set = NULL;
7287 }
7288
7289 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7290
7291 if (!is_error(ret) && arg3) {
7292 host_to_target_timespec(arg3, timeout_ts);
7293 }
7294 if (arg4) {
7295 unlock_user(target_set, arg4, 0);
7296 }
7297 } else
7298 # endif
7299 ret = get_errno(poll(pfd, nfds, timeout));
7300
7301 if (!is_error(ret)) {
7302 for(i = 0; i < nfds; i++) {
7303 target_pfd[i].revents = tswap16(pfd[i].revents);
7304 }
7305 }
7306 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7307 }
7308 break;
7309 #endif
7310 case TARGET_NR_flock:
7311 /* NOTE: the flock constant seems to be the same for every
7312 Linux platform */
7313 ret = get_errno(flock(arg1, arg2));
7314 break;
7315 case TARGET_NR_readv:
7316 {
7317 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7318 if (vec != NULL) {
7319 ret = get_errno(readv(arg1, vec, arg3));
7320 unlock_iovec(vec, arg2, arg3, 1);
7321 } else {
7322 ret = -host_to_target_errno(errno);
7323 }
7324 }
7325 break;
7326 case TARGET_NR_writev:
7327 {
7328 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7329 if (vec != NULL) {
7330 ret = get_errno(writev(arg1, vec, arg3));
7331 unlock_iovec(vec, arg2, arg3, 0);
7332 } else {
7333 ret = -host_to_target_errno(errno);
7334 }
7335 }
7336 break;
7337 case TARGET_NR_getsid:
7338 ret = get_errno(getsid(arg1));
7339 break;
7340 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7341 case TARGET_NR_fdatasync:
7342 ret = get_errno(fdatasync(arg1));
7343 break;
7344 #endif
7345 case TARGET_NR__sysctl:
7346 /* We don't implement this, but ENOTDIR is always a safe
7347 return value. */
7348 ret = -TARGET_ENOTDIR;
7349 break;
7350 case TARGET_NR_sched_getaffinity:
7351 {
7352 unsigned int mask_size;
7353 unsigned long *mask;
7354
7355 /*
7356 * sched_getaffinity needs multiples of ulong, so need to take
7357 * care of mismatches between target ulong and host ulong sizes.
7358 */
7359 if (arg2 & (sizeof(abi_ulong) - 1)) {
7360 ret = -TARGET_EINVAL;
7361 break;
7362 }
7363 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7364
7365 mask = alloca(mask_size);
7366 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7367
7368 if (!is_error(ret)) {
7369 if (copy_to_user(arg3, mask, ret)) {
7370 goto efault;
7371 }
7372 }
7373 }
7374 break;
7375 case TARGET_NR_sched_setaffinity:
7376 {
7377 unsigned int mask_size;
7378 unsigned long *mask;
7379
7380 /*
7381 * sched_setaffinity needs multiples of ulong, so need to take
7382 * care of mismatches between target ulong and host ulong sizes.
7383 */
7384 if (arg2 & (sizeof(abi_ulong) - 1)) {
7385 ret = -TARGET_EINVAL;
7386 break;
7387 }
7388 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7389
7390 mask = alloca(mask_size);
7391 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7392 goto efault;
7393 }
7394 memcpy(mask, p, arg2);
7395 unlock_user_struct(p, arg2, 0);
7396
7397 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7398 }
7399 break;
7400 case TARGET_NR_sched_setparam:
7401 {
7402 struct sched_param *target_schp;
7403 struct sched_param schp;
7404
7405 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7406 goto efault;
7407 schp.sched_priority = tswap32(target_schp->sched_priority);
7408 unlock_user_struct(target_schp, arg2, 0);
7409 ret = get_errno(sched_setparam(arg1, &schp));
7410 }
7411 break;
7412 case TARGET_NR_sched_getparam:
7413 {
7414 struct sched_param *target_schp;
7415 struct sched_param schp;
7416 ret = get_errno(sched_getparam(arg1, &schp));
7417 if (!is_error(ret)) {
7418 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7419 goto efault;
7420 target_schp->sched_priority = tswap32(schp.sched_priority);
7421 unlock_user_struct(target_schp, arg2, 1);
7422 }
7423 }
7424 break;
7425 case TARGET_NR_sched_setscheduler:
7426 {
7427 struct sched_param *target_schp;
7428 struct sched_param schp;
7429 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7430 goto efault;
7431 schp.sched_priority = tswap32(target_schp->sched_priority);
7432 unlock_user_struct(target_schp, arg3, 0);
7433 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7434 }
7435 break;
7436 case TARGET_NR_sched_getscheduler:
7437 ret = get_errno(sched_getscheduler(arg1));
7438 break;
7439 case TARGET_NR_sched_yield:
7440 ret = get_errno(sched_yield());
7441 break;
7442 case TARGET_NR_sched_get_priority_max:
7443 ret = get_errno(sched_get_priority_max(arg1));
7444 break;
7445 case TARGET_NR_sched_get_priority_min:
7446 ret = get_errno(sched_get_priority_min(arg1));
7447 break;
7448 case TARGET_NR_sched_rr_get_interval:
7449 {
7450 struct timespec ts;
7451 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7452 if (!is_error(ret)) {
7453 host_to_target_timespec(arg2, &ts);
7454 }
7455 }
7456 break;
7457 case TARGET_NR_nanosleep:
7458 {
7459 struct timespec req, rem;
7460 target_to_host_timespec(&req, arg1);
7461 ret = get_errno(nanosleep(&req, &rem));
7462 if (is_error(ret) && arg2) {
7463 host_to_target_timespec(arg2, &rem);
7464 }
7465 }
7466 break;
7467 #ifdef TARGET_NR_query_module
7468 case TARGET_NR_query_module:
7469 goto unimplemented;
7470 #endif
7471 #ifdef TARGET_NR_nfsservctl
7472 case TARGET_NR_nfsservctl:
7473 goto unimplemented;
7474 #endif
7475 case TARGET_NR_prctl:
7476 switch (arg1) {
7477 case PR_GET_PDEATHSIG:
7478 {
7479 int deathsig;
7480 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7481 if (!is_error(ret) && arg2
7482 && put_user_ual(deathsig, arg2)) {
7483 goto efault;
7484 }
7485 break;
7486 }
7487 #ifdef PR_GET_NAME
7488 case PR_GET_NAME:
7489 {
7490 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7491 if (!name) {
7492 goto efault;
7493 }
7494 ret = get_errno(prctl(arg1, (unsigned long)name,
7495 arg3, arg4, arg5));
7496 unlock_user(name, arg2, 16);
7497 break;
7498 }
7499 case PR_SET_NAME:
7500 {
7501 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7502 if (!name) {
7503 goto efault;
7504 }
7505 ret = get_errno(prctl(arg1, (unsigned long)name,
7506 arg3, arg4, arg5));
7507 unlock_user(name, arg2, 0);
7508 break;
7509 }
7510 #endif
7511 default:
7512 /* Most prctl options have no pointer arguments */
7513 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7514 break;
7515 }
7516 break;
7517 #ifdef TARGET_NR_arch_prctl
7518 case TARGET_NR_arch_prctl:
7519 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7520 ret = do_arch_prctl(cpu_env, arg1, arg2);
7521 break;
7522 #else
7523 goto unimplemented;
7524 #endif
7525 #endif
7526 #ifdef TARGET_NR_pread64
7527 case TARGET_NR_pread64:
7528 if (regpairs_aligned(cpu_env)) {
7529 arg4 = arg5;
7530 arg5 = arg6;
7531 }
7532 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7533 goto efault;
7534 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7535 unlock_user(p, arg2, ret);
7536 break;
7537 case TARGET_NR_pwrite64:
7538 if (regpairs_aligned(cpu_env)) {
7539 arg4 = arg5;
7540 arg5 = arg6;
7541 }
7542 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7543 goto efault;
7544 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7545 unlock_user(p, arg2, 0);
7546 break;
7547 #endif
7548 case TARGET_NR_getcwd:
7549 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7550 goto efault;
7551 ret = get_errno(sys_getcwd1(p, arg2));
7552 unlock_user(p, arg1, ret);
7553 break;
7554 case TARGET_NR_capget:
7555 goto unimplemented;
7556 case TARGET_NR_capset:
7557 goto unimplemented;
7558 case TARGET_NR_sigaltstack:
7559 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7560 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7561 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7562 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7563 break;
7564 #else
7565 goto unimplemented;
7566 #endif
7567
7568 #ifdef CONFIG_SENDFILE
7569 case TARGET_NR_sendfile:
7570 {
7571 off_t *offp = NULL;
7572 off_t off;
7573 if (arg3) {
7574 ret = get_user_sal(off, arg3);
7575 if (is_error(ret)) {
7576 break;
7577 }
7578 offp = &off;
7579 }
7580 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7581 if (!is_error(ret) && arg3) {
7582 abi_long ret2 = put_user_sal(off, arg3);
7583 if (is_error(ret2)) {
7584 ret = ret2;
7585 }
7586 }
7587 break;
7588 }
7589 #ifdef TARGET_NR_sendfile64
7590 case TARGET_NR_sendfile64:
7591 {
7592 off_t *offp = NULL;
7593 off_t off;
7594 if (arg3) {
7595 ret = get_user_s64(off, arg3);
7596 if (is_error(ret)) {
7597 break;
7598 }
7599 offp = &off;
7600 }
7601 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7602 if (!is_error(ret) && arg3) {
7603 abi_long ret2 = put_user_s64(off, arg3);
7604 if (is_error(ret2)) {
7605 ret = ret2;
7606 }
7607 }
7608 break;
7609 }
7610 #endif
7611 #else
7612 case TARGET_NR_sendfile:
7613 #ifdef TARGET_NR_sendfile64
7614 case TARGET_NR_sendfile64:
7615 #endif
7616 goto unimplemented;
7617 #endif
7618
7619 #ifdef TARGET_NR_getpmsg
7620 case TARGET_NR_getpmsg:
7621 goto unimplemented;
7622 #endif
7623 #ifdef TARGET_NR_putpmsg
7624 case TARGET_NR_putpmsg:
7625 goto unimplemented;
7626 #endif
7627 #ifdef TARGET_NR_vfork
7628 case TARGET_NR_vfork:
7629 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7630 0, 0, 0, 0));
7631 break;
7632 #endif
7633 #ifdef TARGET_NR_ugetrlimit
7634 case TARGET_NR_ugetrlimit:
7635 {
7636 struct rlimit rlim;
7637 int resource = target_to_host_resource(arg1);
7638 ret = get_errno(getrlimit(resource, &rlim));
7639 if (!is_error(ret)) {
7640 struct target_rlimit *target_rlim;
7641 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7642 goto efault;
7643 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7644 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7645 unlock_user_struct(target_rlim, arg2, 1);
7646 }
7647 break;
7648 }
7649 #endif
7650 #ifdef TARGET_NR_truncate64
7651 case TARGET_NR_truncate64:
7652 if (!(p = lock_user_string(arg1)))
7653 goto efault;
7654 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7655 unlock_user(p, arg1, 0);
7656 break;
7657 #endif
7658 #ifdef TARGET_NR_ftruncate64
7659 case TARGET_NR_ftruncate64:
7660 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7661 break;
7662 #endif
7663 #ifdef TARGET_NR_stat64
7664 case TARGET_NR_stat64:
7665 if (!(p = lock_user_string(arg1)))
7666 goto efault;
7667 ret = get_errno(stat(path(p), &st));
7668 unlock_user(p, arg1, 0);
7669 if (!is_error(ret))
7670 ret = host_to_target_stat64(cpu_env, arg2, &st);
7671 break;
7672 #endif
7673 #ifdef TARGET_NR_lstat64
7674 case TARGET_NR_lstat64:
7675 if (!(p = lock_user_string(arg1)))
7676 goto efault;
7677 ret = get_errno(lstat(path(p), &st));
7678 unlock_user(p, arg1, 0);
7679 if (!is_error(ret))
7680 ret = host_to_target_stat64(cpu_env, arg2, &st);
7681 break;
7682 #endif
7683 #ifdef TARGET_NR_fstat64
7684 case TARGET_NR_fstat64:
7685 ret = get_errno(fstat(arg1, &st));
7686 if (!is_error(ret))
7687 ret = host_to_target_stat64(cpu_env, arg2, &st);
7688 break;
7689 #endif
7690 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
7691 #ifdef TARGET_NR_fstatat64
7692 case TARGET_NR_fstatat64:
7693 #endif
7694 #ifdef TARGET_NR_newfstatat
7695 case TARGET_NR_newfstatat:
7696 #endif
7697 if (!(p = lock_user_string(arg2)))
7698 goto efault;
7699 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
7700 if (!is_error(ret))
7701 ret = host_to_target_stat64(cpu_env, arg3, &st);
7702 break;
7703 #endif
7704 case TARGET_NR_lchown:
7705 if (!(p = lock_user_string(arg1)))
7706 goto efault;
7707 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7708 unlock_user(p, arg1, 0);
7709 break;
7710 #ifdef TARGET_NR_getuid
7711 case TARGET_NR_getuid:
7712 ret = get_errno(high2lowuid(getuid()));
7713 break;
7714 #endif
7715 #ifdef TARGET_NR_getgid
7716 case TARGET_NR_getgid:
7717 ret = get_errno(high2lowgid(getgid()));
7718 break;
7719 #endif
7720 #ifdef TARGET_NR_geteuid
7721 case TARGET_NR_geteuid:
7722 ret = get_errno(high2lowuid(geteuid()));
7723 break;
7724 #endif
7725 #ifdef TARGET_NR_getegid
7726 case TARGET_NR_getegid:
7727 ret = get_errno(high2lowgid(getegid()));
7728 break;
7729 #endif
7730 case TARGET_NR_setreuid:
7731 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7732 break;
7733 case TARGET_NR_setregid:
7734 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7735 break;
7736 case TARGET_NR_getgroups:
7737 {
7738 int gidsetsize = arg1;
7739 target_id *target_grouplist;
7740 gid_t *grouplist;
7741 int i;
7742
7743 grouplist = alloca(gidsetsize * sizeof(gid_t));
7744 ret = get_errno(getgroups(gidsetsize, grouplist));
7745 if (gidsetsize == 0)
7746 break;
7747 if (!is_error(ret)) {
7748 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
7749 if (!target_grouplist)
7750 goto efault;
7751 for(i = 0;i < ret; i++)
7752 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7753 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
7754 }
7755 }
7756 break;
7757 case TARGET_NR_setgroups:
7758 {
7759 int gidsetsize = arg1;
7760 target_id *target_grouplist;
7761 gid_t *grouplist = NULL;
7762 int i;
7763 if (gidsetsize) {
7764 grouplist = alloca(gidsetsize * sizeof(gid_t));
7765 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
7766 if (!target_grouplist) {
7767 ret = -TARGET_EFAULT;
7768 goto fail;
7769 }
7770 for (i = 0; i < gidsetsize; i++) {
7771 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7772 }
7773 unlock_user(target_grouplist, arg2, 0);
7774 }
7775 ret = get_errno(setgroups(gidsetsize, grouplist));
7776 }
7777 break;
7778 case TARGET_NR_fchown:
7779 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7780 break;
7781 #if defined(TARGET_NR_fchownat)
7782 case TARGET_NR_fchownat:
7783 if (!(p = lock_user_string(arg2)))
7784 goto efault;
7785 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
7786 low2highgid(arg4), arg5));
7787 unlock_user(p, arg2, 0);
7788 break;
7789 #endif
7790 #ifdef TARGET_NR_setresuid
7791 case TARGET_NR_setresuid:
7792 ret = get_errno(setresuid(low2highuid(arg1),
7793 low2highuid(arg2),
7794 low2highuid(arg3)));
7795 break;
7796 #endif
7797 #ifdef TARGET_NR_getresuid
7798 case TARGET_NR_getresuid:
7799 {
7800 uid_t ruid, euid, suid;
7801 ret = get_errno(getresuid(&ruid, &euid, &suid));
7802 if (!is_error(ret)) {
7803 if (put_user_u16(high2lowuid(ruid), arg1)
7804 || put_user_u16(high2lowuid(euid), arg2)
7805 || put_user_u16(high2lowuid(suid), arg3))
7806 goto efault;
7807 }
7808 }
7809 break;
7810 #endif
7811 #ifdef TARGET_NR_getresgid
7812 case TARGET_NR_setresgid:
7813 ret = get_errno(setresgid(low2highgid(arg1),
7814 low2highgid(arg2),
7815 low2highgid(arg3)));
7816 break;
7817 #endif
7818 #ifdef TARGET_NR_getresgid
7819 case TARGET_NR_getresgid:
7820 {
7821 gid_t rgid, egid, sgid;
7822 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7823 if (!is_error(ret)) {
7824 if (put_user_u16(high2lowgid(rgid), arg1)
7825 || put_user_u16(high2lowgid(egid), arg2)
7826 || put_user_u16(high2lowgid(sgid), arg3))
7827 goto efault;
7828 }
7829 }
7830 break;
7831 #endif
7832 case TARGET_NR_chown:
7833 if (!(p = lock_user_string(arg1)))
7834 goto efault;
7835 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7836 unlock_user(p, arg1, 0);
7837 break;
7838 case TARGET_NR_setuid:
7839 ret = get_errno(setuid(low2highuid(arg1)));
7840 break;
7841 case TARGET_NR_setgid:
7842 ret = get_errno(setgid(low2highgid(arg1)));
7843 break;
7844 case TARGET_NR_setfsuid:
7845 ret = get_errno(setfsuid(arg1));
7846 break;
7847 case TARGET_NR_setfsgid:
7848 ret = get_errno(setfsgid(arg1));
7849 break;
7850
7851 #ifdef TARGET_NR_lchown32
7852 case TARGET_NR_lchown32:
7853 if (!(p = lock_user_string(arg1)))
7854 goto efault;
7855 ret = get_errno(lchown(p, arg2, arg3));
7856 unlock_user(p, arg1, 0);
7857 break;
7858 #endif
7859 #ifdef TARGET_NR_getuid32
7860 case TARGET_NR_getuid32:
7861 ret = get_errno(getuid());
7862 break;
7863 #endif
7864
7865 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7866 /* Alpha specific */
7867 case TARGET_NR_getxuid:
7868 {
7869 uid_t euid;
7870 euid=geteuid();
7871 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7872 }
7873 ret = get_errno(getuid());
7874 break;
7875 #endif
7876 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7877 /* Alpha specific */
7878 case TARGET_NR_getxgid:
7879 {
7880 uid_t egid;
7881 egid=getegid();
7882 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7883 }
7884 ret = get_errno(getgid());
7885 break;
7886 #endif
7887 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7888 /* Alpha specific */
7889 case TARGET_NR_osf_getsysinfo:
7890 ret = -TARGET_EOPNOTSUPP;
7891 switch (arg1) {
7892 case TARGET_GSI_IEEE_FP_CONTROL:
7893 {
7894 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7895
7896 /* Copied from linux ieee_fpcr_to_swcr. */
7897 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7898 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7899 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7900 | SWCR_TRAP_ENABLE_DZE
7901 | SWCR_TRAP_ENABLE_OVF);
7902 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7903 | SWCR_TRAP_ENABLE_INE);
7904 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7905 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7906
7907 if (put_user_u64 (swcr, arg2))
7908 goto efault;
7909 ret = 0;
7910 }
7911 break;
7912
7913 /* case GSI_IEEE_STATE_AT_SIGNAL:
7914 -- Not implemented in linux kernel.
7915 case GSI_UACPROC:
7916 -- Retrieves current unaligned access state; not much used.
7917 case GSI_PROC_TYPE:
7918 -- Retrieves implver information; surely not used.
7919 case GSI_GET_HWRPB:
7920 -- Grabs a copy of the HWRPB; surely not used.
7921 */
7922 }
7923 break;
7924 #endif
7925 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7926 /* Alpha specific */
7927 case TARGET_NR_osf_setsysinfo:
7928 ret = -TARGET_EOPNOTSUPP;
7929 switch (arg1) {
7930 case TARGET_SSI_IEEE_FP_CONTROL:
7931 {
7932 uint64_t swcr, fpcr, orig_fpcr;
7933
7934 if (get_user_u64 (swcr, arg2)) {
7935 goto efault;
7936 }
7937 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7938 fpcr = orig_fpcr & FPCR_DYN_MASK;
7939
7940 /* Copied from linux ieee_swcr_to_fpcr. */
7941 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7942 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7943 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7944 | SWCR_TRAP_ENABLE_DZE
7945 | SWCR_TRAP_ENABLE_OVF)) << 48;
7946 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7947 | SWCR_TRAP_ENABLE_INE)) << 57;
7948 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7949 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7950
7951 cpu_alpha_store_fpcr(cpu_env, fpcr);
7952 ret = 0;
7953 }
7954 break;
7955
7956 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7957 {
7958 uint64_t exc, fpcr, orig_fpcr;
7959 int si_code;
7960
7961 if (get_user_u64(exc, arg2)) {
7962 goto efault;
7963 }
7964
7965 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7966
7967 /* We only add to the exception status here. */
7968 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7969
7970 cpu_alpha_store_fpcr(cpu_env, fpcr);
7971 ret = 0;
7972
7973 /* Old exceptions are not signaled. */
7974 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7975
7976 /* If any exceptions set by this call,
7977 and are unmasked, send a signal. */
7978 si_code = 0;
7979 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7980 si_code = TARGET_FPE_FLTRES;
7981 }
7982 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7983 si_code = TARGET_FPE_FLTUND;
7984 }
7985 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7986 si_code = TARGET_FPE_FLTOVF;
7987 }
7988 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7989 si_code = TARGET_FPE_FLTDIV;
7990 }
7991 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7992 si_code = TARGET_FPE_FLTINV;
7993 }
7994 if (si_code != 0) {
7995 target_siginfo_t info;
7996 info.si_signo = SIGFPE;
7997 info.si_errno = 0;
7998 info.si_code = si_code;
7999 info._sifields._sigfault._addr
8000 = ((CPUArchState *)cpu_env)->pc;
8001 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8002 }
8003 }
8004 break;
8005
8006 /* case SSI_NVPAIRS:
8007 -- Used with SSIN_UACPROC to enable unaligned accesses.
8008 case SSI_IEEE_STATE_AT_SIGNAL:
8009 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8010 -- Not implemented in linux kernel
8011 */
8012 }
8013 break;
8014 #endif
8015 #ifdef TARGET_NR_osf_sigprocmask
8016 /* Alpha specific. */
8017 case TARGET_NR_osf_sigprocmask:
8018 {
8019 abi_ulong mask;
8020 int how;
8021 sigset_t set, oldset;
8022
8023 switch(arg1) {
8024 case TARGET_SIG_BLOCK:
8025 how = SIG_BLOCK;
8026 break;
8027 case TARGET_SIG_UNBLOCK:
8028 how = SIG_UNBLOCK;
8029 break;
8030 case TARGET_SIG_SETMASK:
8031 how = SIG_SETMASK;
8032 break;
8033 default:
8034 ret = -TARGET_EINVAL;
8035 goto fail;
8036 }
8037 mask = arg2;
8038 target_to_host_old_sigset(&set, &mask);
8039 sigprocmask(how, &set, &oldset);
8040 host_to_target_old_sigset(&mask, &oldset);
8041 ret = mask;
8042 }
8043 break;
8044 #endif
8045
8046 #ifdef TARGET_NR_getgid32
8047 case TARGET_NR_getgid32:
8048 ret = get_errno(getgid());
8049 break;
8050 #endif
8051 #ifdef TARGET_NR_geteuid32
8052 case TARGET_NR_geteuid32:
8053 ret = get_errno(geteuid());
8054 break;
8055 #endif
8056 #ifdef TARGET_NR_getegid32
8057 case TARGET_NR_getegid32:
8058 ret = get_errno(getegid());
8059 break;
8060 #endif
8061 #ifdef TARGET_NR_setreuid32
8062 case TARGET_NR_setreuid32:
8063 ret = get_errno(setreuid(arg1, arg2));
8064 break;
8065 #endif
8066 #ifdef TARGET_NR_setregid32
8067 case TARGET_NR_setregid32:
8068 ret = get_errno(setregid(arg1, arg2));
8069 break;
8070 #endif
8071 #ifdef TARGET_NR_getgroups32
8072 case TARGET_NR_getgroups32:
8073 {
8074 int gidsetsize = arg1;
8075 uint32_t *target_grouplist;
8076 gid_t *grouplist;
8077 int i;
8078
8079 grouplist = alloca(gidsetsize * sizeof(gid_t));
8080 ret = get_errno(getgroups(gidsetsize, grouplist));
8081 if (gidsetsize == 0)
8082 break;
8083 if (!is_error(ret)) {
8084 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8085 if (!target_grouplist) {
8086 ret = -TARGET_EFAULT;
8087 goto fail;
8088 }
8089 for(i = 0;i < ret; i++)
8090 target_grouplist[i] = tswap32(grouplist[i]);
8091 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8092 }
8093 }
8094 break;
8095 #endif
8096 #ifdef TARGET_NR_setgroups32
8097 case TARGET_NR_setgroups32:
8098 {
8099 int gidsetsize = arg1;
8100 uint32_t *target_grouplist;
8101 gid_t *grouplist;
8102 int i;
8103
8104 grouplist = alloca(gidsetsize * sizeof(gid_t));
8105 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8106 if (!target_grouplist) {
8107 ret = -TARGET_EFAULT;
8108 goto fail;
8109 }
8110 for(i = 0;i < gidsetsize; i++)
8111 grouplist[i] = tswap32(target_grouplist[i]);
8112 unlock_user(target_grouplist, arg2, 0);
8113 ret = get_errno(setgroups(gidsetsize, grouplist));
8114 }
8115 break;
8116 #endif
8117 #ifdef TARGET_NR_fchown32
8118 case TARGET_NR_fchown32:
8119 ret = get_errno(fchown(arg1, arg2, arg3));
8120 break;
8121 #endif
8122 #ifdef TARGET_NR_setresuid32
8123 case TARGET_NR_setresuid32:
8124 ret = get_errno(setresuid(arg1, arg2, arg3));
8125 break;
8126 #endif
8127 #ifdef TARGET_NR_getresuid32
8128 case TARGET_NR_getresuid32:
8129 {
8130 uid_t ruid, euid, suid;
8131 ret = get_errno(getresuid(&ruid, &euid, &suid));
8132 if (!is_error(ret)) {
8133 if (put_user_u32(ruid, arg1)
8134 || put_user_u32(euid, arg2)
8135 || put_user_u32(suid, arg3))
8136 goto efault;
8137 }
8138 }
8139 break;
8140 #endif
8141 #ifdef TARGET_NR_setresgid32
8142 case TARGET_NR_setresgid32:
8143 ret = get_errno(setresgid(arg1, arg2, arg3));
8144 break;
8145 #endif
8146 #ifdef TARGET_NR_getresgid32
8147 case TARGET_NR_getresgid32:
8148 {
8149 gid_t rgid, egid, sgid;
8150 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8151 if (!is_error(ret)) {
8152 if (put_user_u32(rgid, arg1)
8153 || put_user_u32(egid, arg2)
8154 || put_user_u32(sgid, arg3))
8155 goto efault;
8156 }
8157 }
8158 break;
8159 #endif
8160 #ifdef TARGET_NR_chown32
8161 case TARGET_NR_chown32:
8162 if (!(p = lock_user_string(arg1)))
8163 goto efault;
8164 ret = get_errno(chown(p, arg2, arg3));
8165 unlock_user(p, arg1, 0);
8166 break;
8167 #endif
8168 #ifdef TARGET_NR_setuid32
8169 case TARGET_NR_setuid32:
8170 ret = get_errno(setuid(arg1));
8171 break;
8172 #endif
8173 #ifdef TARGET_NR_setgid32
8174 case TARGET_NR_setgid32:
8175 ret = get_errno(setgid(arg1));
8176 break;
8177 #endif
8178 #ifdef TARGET_NR_setfsuid32
8179 case TARGET_NR_setfsuid32:
8180 ret = get_errno(setfsuid(arg1));
8181 break;
8182 #endif
8183 #ifdef TARGET_NR_setfsgid32
8184 case TARGET_NR_setfsgid32:
8185 ret = get_errno(setfsgid(arg1));
8186 break;
8187 #endif
8188
8189 case TARGET_NR_pivot_root:
8190 goto unimplemented;
8191 #ifdef TARGET_NR_mincore
8192 case TARGET_NR_mincore:
8193 {
8194 void *a;
8195 ret = -TARGET_EFAULT;
8196 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8197 goto efault;
8198 if (!(p = lock_user_string(arg3)))
8199 goto mincore_fail;
8200 ret = get_errno(mincore(a, arg2, p));
8201 unlock_user(p, arg3, ret);
8202 mincore_fail:
8203 unlock_user(a, arg1, 0);
8204 }
8205 break;
8206 #endif
8207 #ifdef TARGET_NR_arm_fadvise64_64
8208 case TARGET_NR_arm_fadvise64_64:
8209 {
8210 /*
8211 * arm_fadvise64_64 looks like fadvise64_64 but
8212 * with different argument order
8213 */
8214 abi_long temp;
8215 temp = arg3;
8216 arg3 = arg4;
8217 arg4 = temp;
8218 }
8219 #endif
8220 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8221 #ifdef TARGET_NR_fadvise64_64
8222 case TARGET_NR_fadvise64_64:
8223 #endif
8224 #ifdef TARGET_NR_fadvise64
8225 case TARGET_NR_fadvise64:
8226 #endif
8227 #ifdef TARGET_S390X
8228 switch (arg4) {
8229 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8230 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8231 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8232 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8233 default: break;
8234 }
8235 #endif
8236 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8237 break;
8238 #endif
8239 #ifdef TARGET_NR_madvise
8240 case TARGET_NR_madvise:
8241 /* A straight passthrough may not be safe because qemu sometimes
8242 turns private file-backed mappings into anonymous mappings.
8243 This will break MADV_DONTNEED.
8244 This is a hint, so ignoring and returning success is ok. */
8245 ret = get_errno(0);
8246 break;
8247 #endif
8248 #if TARGET_ABI_BITS == 32
8249 case TARGET_NR_fcntl64:
8250 {
8251 int cmd;
8252 struct flock64 fl;
8253 struct target_flock64 *target_fl;
8254 #ifdef TARGET_ARM
8255 struct target_eabi_flock64 *target_efl;
8256 #endif
8257
8258 cmd = target_to_host_fcntl_cmd(arg2);
8259 if (cmd == -TARGET_EINVAL) {
8260 ret = cmd;
8261 break;
8262 }
8263
8264 switch(arg2) {
8265 case TARGET_F_GETLK64:
8266 #ifdef TARGET_ARM
8267 if (((CPUARMState *)cpu_env)->eabi) {
8268 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8269 goto efault;
8270 fl.l_type = tswap16(target_efl->l_type);
8271 fl.l_whence = tswap16(target_efl->l_whence);
8272 fl.l_start = tswap64(target_efl->l_start);
8273 fl.l_len = tswap64(target_efl->l_len);
8274 fl.l_pid = tswap32(target_efl->l_pid);
8275 unlock_user_struct(target_efl, arg3, 0);
8276 } else
8277 #endif
8278 {
8279 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8280 goto efault;
8281 fl.l_type = tswap16(target_fl->l_type);
8282 fl.l_whence = tswap16(target_fl->l_whence);
8283 fl.l_start = tswap64(target_fl->l_start);
8284 fl.l_len = tswap64(target_fl->l_len);
8285 fl.l_pid = tswap32(target_fl->l_pid);
8286 unlock_user_struct(target_fl, arg3, 0);
8287 }
8288 ret = get_errno(fcntl(arg1, cmd, &fl));
8289 if (ret == 0) {
8290 #ifdef TARGET_ARM
8291 if (((CPUARMState *)cpu_env)->eabi) {
8292 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8293 goto efault;
8294 target_efl->l_type = tswap16(fl.l_type);
8295 target_efl->l_whence = tswap16(fl.l_whence);
8296 target_efl->l_start = tswap64(fl.l_start);
8297 target_efl->l_len = tswap64(fl.l_len);
8298 target_efl->l_pid = tswap32(fl.l_pid);
8299 unlock_user_struct(target_efl, arg3, 1);
8300 } else
8301 #endif
8302 {
8303 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8304 goto efault;
8305 target_fl->l_type = tswap16(fl.l_type);
8306 target_fl->l_whence = tswap16(fl.l_whence);
8307 target_fl->l_start = tswap64(fl.l_start);
8308 target_fl->l_len = tswap64(fl.l_len);
8309 target_fl->l_pid = tswap32(fl.l_pid);
8310 unlock_user_struct(target_fl, arg3, 1);
8311 }
8312 }
8313 break;
8314
8315 case TARGET_F_SETLK64:
8316 case TARGET_F_SETLKW64:
8317 #ifdef TARGET_ARM
8318 if (((CPUARMState *)cpu_env)->eabi) {
8319 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8320 goto efault;
8321 fl.l_type = tswap16(target_efl->l_type);
8322 fl.l_whence = tswap16(target_efl->l_whence);
8323 fl.l_start = tswap64(target_efl->l_start);
8324 fl.l_len = tswap64(target_efl->l_len);
8325 fl.l_pid = tswap32(target_efl->l_pid);
8326 unlock_user_struct(target_efl, arg3, 0);
8327 } else
8328 #endif
8329 {
8330 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8331 goto efault;
8332 fl.l_type = tswap16(target_fl->l_type);
8333 fl.l_whence = tswap16(target_fl->l_whence);
8334 fl.l_start = tswap64(target_fl->l_start);
8335 fl.l_len = tswap64(target_fl->l_len);
8336 fl.l_pid = tswap32(target_fl->l_pid);
8337 unlock_user_struct(target_fl, arg3, 0);
8338 }
8339 ret = get_errno(fcntl(arg1, cmd, &fl));
8340 break;
8341 default:
8342 ret = do_fcntl(arg1, arg2, arg3);
8343 break;
8344 }
8345 break;
8346 }
8347 #endif
8348 #ifdef TARGET_NR_cacheflush
8349 case TARGET_NR_cacheflush:
8350 /* self-modifying code is handled automatically, so nothing needed */
8351 ret = 0;
8352 break;
8353 #endif
8354 #ifdef TARGET_NR_security
8355 case TARGET_NR_security:
8356 goto unimplemented;
8357 #endif
8358 #ifdef TARGET_NR_getpagesize
8359 case TARGET_NR_getpagesize:
8360 ret = TARGET_PAGE_SIZE;
8361 break;
8362 #endif
8363 case TARGET_NR_gettid:
8364 ret = get_errno(gettid());
8365 break;
8366 #ifdef TARGET_NR_readahead
8367 case TARGET_NR_readahead:
8368 #if TARGET_ABI_BITS == 32
8369 if (regpairs_aligned(cpu_env)) {
8370 arg2 = arg3;
8371 arg3 = arg4;
8372 arg4 = arg5;
8373 }
8374 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8375 #else
8376 ret = get_errno(readahead(arg1, arg2, arg3));
8377 #endif
8378 break;
8379 #endif
8380 #ifdef CONFIG_ATTR
8381 #ifdef TARGET_NR_setxattr
8382 case TARGET_NR_listxattr:
8383 case TARGET_NR_llistxattr:
8384 {
8385 void *p, *b = 0;
8386 if (arg2) {
8387 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8388 if (!b) {
8389 ret = -TARGET_EFAULT;
8390 break;
8391 }
8392 }
8393 p = lock_user_string(arg1);
8394 if (p) {
8395 if (num == TARGET_NR_listxattr) {
8396 ret = get_errno(listxattr(p, b, arg3));
8397 } else {
8398 ret = get_errno(llistxattr(p, b, arg3));
8399 }
8400 } else {
8401 ret = -TARGET_EFAULT;
8402 }
8403 unlock_user(p, arg1, 0);
8404 unlock_user(b, arg2, arg3);
8405 break;
8406 }
8407 case TARGET_NR_flistxattr:
8408 {
8409 void *b = 0;
8410 if (arg2) {
8411 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8412 if (!b) {
8413 ret = -TARGET_EFAULT;
8414 break;
8415 }
8416 }
8417 ret = get_errno(flistxattr(arg1, b, arg3));
8418 unlock_user(b, arg2, arg3);
8419 break;
8420 }
8421 case TARGET_NR_setxattr:
8422 case TARGET_NR_lsetxattr:
8423 {
8424 void *p, *n, *v = 0;
8425 if (arg3) {
8426 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8427 if (!v) {
8428 ret = -TARGET_EFAULT;
8429 break;
8430 }
8431 }
8432 p = lock_user_string(arg1);
8433 n = lock_user_string(arg2);
8434 if (p && n) {
8435 if (num == TARGET_NR_setxattr) {
8436 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8437 } else {
8438 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8439 }
8440 } else {
8441 ret = -TARGET_EFAULT;
8442 }
8443 unlock_user(p, arg1, 0);
8444 unlock_user(n, arg2, 0);
8445 unlock_user(v, arg3, 0);
8446 }
8447 break;
8448 case TARGET_NR_fsetxattr:
8449 {
8450 void *n, *v = 0;
8451 if (arg3) {
8452 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8453 if (!v) {
8454 ret = -TARGET_EFAULT;
8455 break;
8456 }
8457 }
8458 n = lock_user_string(arg2);
8459 if (n) {
8460 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8461 } else {
8462 ret = -TARGET_EFAULT;
8463 }
8464 unlock_user(n, arg2, 0);
8465 unlock_user(v, arg3, 0);
8466 }
8467 break;
8468 case TARGET_NR_getxattr:
8469 case TARGET_NR_lgetxattr:
8470 {
8471 void *p, *n, *v = 0;
8472 if (arg3) {
8473 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8474 if (!v) {
8475 ret = -TARGET_EFAULT;
8476 break;
8477 }
8478 }
8479 p = lock_user_string(arg1);
8480 n = lock_user_string(arg2);
8481 if (p && n) {
8482 if (num == TARGET_NR_getxattr) {
8483 ret = get_errno(getxattr(p, n, v, arg4));
8484 } else {
8485 ret = get_errno(lgetxattr(p, n, v, arg4));
8486 }
8487 } else {
8488 ret = -TARGET_EFAULT;
8489 }
8490 unlock_user(p, arg1, 0);
8491 unlock_user(n, arg2, 0);
8492 unlock_user(v, arg3, arg4);
8493 }
8494 break;
8495 case TARGET_NR_fgetxattr:
8496 {
8497 void *n, *v = 0;
8498 if (arg3) {
8499 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8500 if (!v) {
8501 ret = -TARGET_EFAULT;
8502 break;
8503 }
8504 }
8505 n = lock_user_string(arg2);
8506 if (n) {
8507 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8508 } else {
8509 ret = -TARGET_EFAULT;
8510 }
8511 unlock_user(n, arg2, 0);
8512 unlock_user(v, arg3, arg4);
8513 }
8514 break;
8515 case TARGET_NR_removexattr:
8516 case TARGET_NR_lremovexattr:
8517 {
8518 void *p, *n;
8519 p = lock_user_string(arg1);
8520 n = lock_user_string(arg2);
8521 if (p && n) {
8522 if (num == TARGET_NR_removexattr) {
8523 ret = get_errno(removexattr(p, n));
8524 } else {
8525 ret = get_errno(lremovexattr(p, n));
8526 }
8527 } else {
8528 ret = -TARGET_EFAULT;
8529 }
8530 unlock_user(p, arg1, 0);
8531 unlock_user(n, arg2, 0);
8532 }
8533 break;
8534 case TARGET_NR_fremovexattr:
8535 {
8536 void *n;
8537 n = lock_user_string(arg2);
8538 if (n) {
8539 ret = get_errno(fremovexattr(arg1, n));
8540 } else {
8541 ret = -TARGET_EFAULT;
8542 }
8543 unlock_user(n, arg2, 0);
8544 }
8545 break;
8546 #endif
8547 #endif /* CONFIG_ATTR */
8548 #ifdef TARGET_NR_set_thread_area
8549 case TARGET_NR_set_thread_area:
8550 #if defined(TARGET_MIPS)
8551 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8552 ret = 0;
8553 break;
8554 #elif defined(TARGET_CRIS)
8555 if (arg1 & 0xff)
8556 ret = -TARGET_EINVAL;
8557 else {
8558 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8559 ret = 0;
8560 }
8561 break;
8562 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8563 ret = do_set_thread_area(cpu_env, arg1);
8564 break;
8565 #elif defined(TARGET_M68K)
8566 {
8567 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
8568 ts->tp_value = arg1;
8569 ret = 0;
8570 break;
8571 }
8572 #else
8573 goto unimplemented_nowarn;
8574 #endif
8575 #endif
8576 #ifdef TARGET_NR_get_thread_area
8577 case TARGET_NR_get_thread_area:
8578 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8579 ret = do_get_thread_area(cpu_env, arg1);
8580 break;
8581 #elif defined(TARGET_M68K)
8582 {
8583 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
8584 ret = ts->tp_value;
8585 break;
8586 }
8587 #else
8588 goto unimplemented_nowarn;
8589 #endif
8590 #endif
8591 #ifdef TARGET_NR_getdomainname
8592 case TARGET_NR_getdomainname:
8593 goto unimplemented_nowarn;
8594 #endif
8595
8596 #ifdef TARGET_NR_clock_gettime
8597 case TARGET_NR_clock_gettime:
8598 {
8599 struct timespec ts;
8600 ret = get_errno(clock_gettime(arg1, &ts));
8601 if (!is_error(ret)) {
8602 host_to_target_timespec(arg2, &ts);
8603 }
8604 break;
8605 }
8606 #endif
8607 #ifdef TARGET_NR_clock_getres
8608 case TARGET_NR_clock_getres:
8609 {
8610 struct timespec ts;
8611 ret = get_errno(clock_getres(arg1, &ts));
8612 if (!is_error(ret)) {
8613 host_to_target_timespec(arg2, &ts);
8614 }
8615 break;
8616 }
8617 #endif
8618 #ifdef TARGET_NR_clock_nanosleep
8619 case TARGET_NR_clock_nanosleep:
8620 {
8621 struct timespec ts;
8622 target_to_host_timespec(&ts, arg3);
8623 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8624 if (arg4)
8625 host_to_target_timespec(arg4, &ts);
8626 break;
8627 }
8628 #endif
8629
8630 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8631 case TARGET_NR_set_tid_address:
8632 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8633 break;
8634 #endif
8635
8636 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8637 case TARGET_NR_tkill:
8638 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8639 break;
8640 #endif
8641
8642 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8643 case TARGET_NR_tgkill:
8644 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8645 target_to_host_signal(arg3)));
8646 break;
8647 #endif
8648
8649 #ifdef TARGET_NR_set_robust_list
8650 case TARGET_NR_set_robust_list:
8651 case TARGET_NR_get_robust_list:
8652 /* The ABI for supporting robust futexes has userspace pass
8653 * the kernel a pointer to a linked list which is updated by
8654 * userspace after the syscall; the list is walked by the kernel
8655 * when the thread exits. Since the linked list in QEMU guest
8656 * memory isn't a valid linked list for the host and we have
8657 * no way to reliably intercept the thread-death event, we can't
8658 * support these. Silently return ENOSYS so that guest userspace
8659 * falls back to a non-robust futex implementation (which should
8660 * be OK except in the corner case of the guest crashing while
8661 * holding a mutex that is shared with another process via
8662 * shared memory).
8663 */
8664 goto unimplemented_nowarn;
8665 #endif
8666
8667 #if defined(TARGET_NR_utimensat)
8668 case TARGET_NR_utimensat:
8669 {
8670 struct timespec *tsp, ts[2];
8671 if (!arg3) {
8672 tsp = NULL;
8673 } else {
8674 target_to_host_timespec(ts, arg3);
8675 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8676 tsp = ts;
8677 }
8678 if (!arg2)
8679 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8680 else {
8681 if (!(p = lock_user_string(arg2))) {
8682 ret = -TARGET_EFAULT;
8683 goto fail;
8684 }
8685 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8686 unlock_user(p, arg2, 0);
8687 }
8688 }
8689 break;
8690 #endif
8691 case TARGET_NR_futex:
8692 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8693 break;
8694 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8695 case TARGET_NR_inotify_init:
8696 ret = get_errno(sys_inotify_init());
8697 break;
8698 #endif
8699 #ifdef CONFIG_INOTIFY1
8700 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8701 case TARGET_NR_inotify_init1:
8702 ret = get_errno(sys_inotify_init1(arg1));
8703 break;
8704 #endif
8705 #endif
8706 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8707 case TARGET_NR_inotify_add_watch:
8708 p = lock_user_string(arg2);
8709 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8710 unlock_user(p, arg2, 0);
8711 break;
8712 #endif
8713 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8714 case TARGET_NR_inotify_rm_watch:
8715 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8716 break;
8717 #endif
8718
8719 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8720 case TARGET_NR_mq_open:
8721 {
8722 struct mq_attr posix_mq_attr;
8723
8724 p = lock_user_string(arg1 - 1);
8725 if (arg4 != 0)
8726 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8727 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8728 unlock_user (p, arg1, 0);
8729 }
8730 break;
8731
8732 case TARGET_NR_mq_unlink:
8733 p = lock_user_string(arg1 - 1);
8734 ret = get_errno(mq_unlink(p));
8735 unlock_user (p, arg1, 0);
8736 break;
8737
8738 case TARGET_NR_mq_timedsend:
8739 {
8740 struct timespec ts;
8741
8742 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8743 if (arg5 != 0) {
8744 target_to_host_timespec(&ts, arg5);
8745 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8746 host_to_target_timespec(arg5, &ts);
8747 }
8748 else
8749 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8750 unlock_user (p, arg2, arg3);
8751 }
8752 break;
8753
8754 case TARGET_NR_mq_timedreceive:
8755 {
8756 struct timespec ts;
8757 unsigned int prio;
8758
8759 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8760 if (arg5 != 0) {
8761 target_to_host_timespec(&ts, arg5);
8762 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8763 host_to_target_timespec(arg5, &ts);
8764 }
8765 else
8766 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8767 unlock_user (p, arg2, arg3);
8768 if (arg4 != 0)
8769 put_user_u32(prio, arg4);
8770 }
8771 break;
8772
8773 /* Not implemented for now... */
8774 /* case TARGET_NR_mq_notify: */
8775 /* break; */
8776
8777 case TARGET_NR_mq_getsetattr:
8778 {
8779 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8780 ret = 0;
8781 if (arg3 != 0) {
8782 ret = mq_getattr(arg1, &posix_mq_attr_out);
8783 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8784 }
8785 if (arg2 != 0) {
8786 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8787 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8788 }
8789
8790 }
8791 break;
8792 #endif
8793
8794 #ifdef CONFIG_SPLICE
8795 #ifdef TARGET_NR_tee
8796 case TARGET_NR_tee:
8797 {
8798 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8799 }
8800 break;
8801 #endif
8802 #ifdef TARGET_NR_splice
8803 case TARGET_NR_splice:
8804 {
8805 loff_t loff_in, loff_out;
8806 loff_t *ploff_in = NULL, *ploff_out = NULL;
8807 if(arg2) {
8808 get_user_u64(loff_in, arg2);
8809 ploff_in = &loff_in;
8810 }
8811 if(arg4) {
8812 get_user_u64(loff_out, arg2);
8813 ploff_out = &loff_out;
8814 }
8815 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8816 }
8817 break;
8818 #endif
8819 #ifdef TARGET_NR_vmsplice
8820 case TARGET_NR_vmsplice:
8821 {
8822 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8823 if (vec != NULL) {
8824 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
8825 unlock_iovec(vec, arg2, arg3, 0);
8826 } else {
8827 ret = -host_to_target_errno(errno);
8828 }
8829 }
8830 break;
8831 #endif
8832 #endif /* CONFIG_SPLICE */
8833 #ifdef CONFIG_EVENTFD
8834 #if defined(TARGET_NR_eventfd)
8835 case TARGET_NR_eventfd:
8836 ret = get_errno(eventfd(arg1, 0));
8837 break;
8838 #endif
8839 #if defined(TARGET_NR_eventfd2)
8840 case TARGET_NR_eventfd2:
8841 {
8842 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
8843 if (arg2 & TARGET_O_NONBLOCK) {
8844 host_flags |= O_NONBLOCK;
8845 }
8846 if (arg2 & TARGET_O_CLOEXEC) {
8847 host_flags |= O_CLOEXEC;
8848 }
8849 ret = get_errno(eventfd(arg1, host_flags));
8850 break;
8851 }
8852 #endif
8853 #endif /* CONFIG_EVENTFD */
8854 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8855 case TARGET_NR_fallocate:
8856 #if TARGET_ABI_BITS == 32
8857 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8858 target_offset64(arg5, arg6)));
8859 #else
8860 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8861 #endif
8862 break;
8863 #endif
8864 #if defined(CONFIG_SYNC_FILE_RANGE)
8865 #if defined(TARGET_NR_sync_file_range)
8866 case TARGET_NR_sync_file_range:
8867 #if TARGET_ABI_BITS == 32
8868 #if defined(TARGET_MIPS)
8869 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8870 target_offset64(arg5, arg6), arg7));
8871 #else
8872 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8873 target_offset64(arg4, arg5), arg6));
8874 #endif /* !TARGET_MIPS */
8875 #else
8876 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8877 #endif
8878 break;
8879 #endif
8880 #if defined(TARGET_NR_sync_file_range2)
8881 case TARGET_NR_sync_file_range2:
8882 /* This is like sync_file_range but the arguments are reordered */
8883 #if TARGET_ABI_BITS == 32
8884 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8885 target_offset64(arg5, arg6), arg2));
8886 #else
8887 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8888 #endif
8889 break;
8890 #endif
8891 #endif
8892 #if defined(CONFIG_EPOLL)
8893 #if defined(TARGET_NR_epoll_create)
8894 case TARGET_NR_epoll_create:
8895 ret = get_errno(epoll_create(arg1));
8896 break;
8897 #endif
8898 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8899 case TARGET_NR_epoll_create1:
8900 ret = get_errno(epoll_create1(arg1));
8901 break;
8902 #endif
8903 #if defined(TARGET_NR_epoll_ctl)
8904 case TARGET_NR_epoll_ctl:
8905 {
8906 struct epoll_event ep;
8907 struct epoll_event *epp = 0;
8908 if (arg4) {
8909 struct target_epoll_event *target_ep;
8910 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8911 goto efault;
8912 }
8913 ep.events = tswap32(target_ep->events);
8914 /* The epoll_data_t union is just opaque data to the kernel,
8915 * so we transfer all 64 bits across and need not worry what
8916 * actual data type it is.
8917 */
8918 ep.data.u64 = tswap64(target_ep->data.u64);
8919 unlock_user_struct(target_ep, arg4, 0);
8920 epp = &ep;
8921 }
8922 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8923 break;
8924 }
8925 #endif
8926
8927 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8928 #define IMPLEMENT_EPOLL_PWAIT
8929 #endif
8930 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8931 #if defined(TARGET_NR_epoll_wait)
8932 case TARGET_NR_epoll_wait:
8933 #endif
8934 #if defined(IMPLEMENT_EPOLL_PWAIT)
8935 case TARGET_NR_epoll_pwait:
8936 #endif
8937 {
8938 struct target_epoll_event *target_ep;
8939 struct epoll_event *ep;
8940 int epfd = arg1;
8941 int maxevents = arg3;
8942 int timeout = arg4;
8943
8944 target_ep = lock_user(VERIFY_WRITE, arg2,
8945 maxevents * sizeof(struct target_epoll_event), 1);
8946 if (!target_ep) {
8947 goto efault;
8948 }
8949
8950 ep = alloca(maxevents * sizeof(struct epoll_event));
8951
8952 switch (num) {
8953 #if defined(IMPLEMENT_EPOLL_PWAIT)
8954 case TARGET_NR_epoll_pwait:
8955 {
8956 target_sigset_t *target_set;
8957 sigset_t _set, *set = &_set;
8958
8959 if (arg5) {
8960 target_set = lock_user(VERIFY_READ, arg5,
8961 sizeof(target_sigset_t), 1);
8962 if (!target_set) {
8963 unlock_user(target_ep, arg2, 0);
8964 goto efault;
8965 }
8966 target_to_host_sigset(set, target_set);
8967 unlock_user(target_set, arg5, 0);
8968 } else {
8969 set = NULL;
8970 }
8971
8972 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8973 break;
8974 }
8975 #endif
8976 #if defined(TARGET_NR_epoll_wait)
8977 case TARGET_NR_epoll_wait:
8978 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8979 break;
8980 #endif
8981 default:
8982 ret = -TARGET_ENOSYS;
8983 }
8984 if (!is_error(ret)) {
8985 int i;
8986 for (i = 0; i < ret; i++) {
8987 target_ep[i].events = tswap32(ep[i].events);
8988 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8989 }
8990 }
8991 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8992 break;
8993 }
8994 #endif
8995 #endif
8996 #ifdef TARGET_NR_prlimit64
8997 case TARGET_NR_prlimit64:
8998 {
8999 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9000 struct target_rlimit64 *target_rnew, *target_rold;
9001 struct host_rlimit64 rnew, rold, *rnewp = 0;
9002 if (arg3) {
9003 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9004 goto efault;
9005 }
9006 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9007 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9008 unlock_user_struct(target_rnew, arg3, 0);
9009 rnewp = &rnew;
9010 }
9011
9012 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
9013 if (!is_error(ret) && arg4) {
9014 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
9015 goto efault;
9016 }
9017 target_rold->rlim_cur = tswap64(rold.rlim_cur);
9018 target_rold->rlim_max = tswap64(rold.rlim_max);
9019 unlock_user_struct(target_rold, arg4, 1);
9020 }
9021 break;
9022 }
9023 #endif
9024 #ifdef TARGET_NR_gethostname
9025 case TARGET_NR_gethostname:
9026 {
9027 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9028 if (name) {
9029 ret = get_errno(gethostname(name, arg2));
9030 unlock_user(name, arg1, arg2);
9031 } else {
9032 ret = -TARGET_EFAULT;
9033 }
9034 break;
9035 }
9036 #endif
9037 #ifdef TARGET_NR_atomic_cmpxchg_32
9038 case TARGET_NR_atomic_cmpxchg_32:
9039 {
9040 /* should use start_exclusive from main.c */
9041 abi_ulong mem_value;
9042 if (get_user_u32(mem_value, arg6)) {
9043 target_siginfo_t info;
9044 info.si_signo = SIGSEGV;
9045 info.si_errno = 0;
9046 info.si_code = TARGET_SEGV_MAPERR;
9047 info._sifields._sigfault._addr = arg6;
9048 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9049 ret = 0xdeadbeef;
9050
9051 }
9052 if (mem_value == arg2)
9053 put_user_u32(arg1, arg6);
9054 ret = mem_value;
9055 break;
9056 }
9057 #endif
9058 #ifdef TARGET_NR_atomic_barrier
9059 case TARGET_NR_atomic_barrier:
9060 {
9061 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
9062 break;
9063 }
9064 #endif
9065
9066 #ifdef TARGET_NR_timer_create
9067 case TARGET_NR_timer_create:
9068 {
9069 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
9070
9071 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
9072 struct target_sigevent *ptarget_sevp;
9073 struct target_timer_t *ptarget_timer;
9074
9075 int clkid = arg1;
9076 int timer_index = next_free_host_timer();
9077
9078 if (timer_index < 0) {
9079 ret = -TARGET_EAGAIN;
9080 } else {
9081 timer_t *phtimer = g_posix_timers + timer_index;
9082
9083 if (arg2) {
9084 if (!lock_user_struct(VERIFY_READ, ptarget_sevp, arg2, 1)) {
9085 goto efault;
9086 }
9087
9088 host_sevp.sigev_signo = tswap32(ptarget_sevp->sigev_signo);
9089 host_sevp.sigev_notify = tswap32(ptarget_sevp->sigev_notify);
9090
9091 phost_sevp = &host_sevp;
9092 }
9093
9094 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
9095 if (ret) {
9096 phtimer = NULL;
9097 } else {
9098 if (!lock_user_struct(VERIFY_WRITE, ptarget_timer, arg3, 1)) {
9099 goto efault;
9100 }
9101 ptarget_timer->ptr = tswap32(0xcafe0000 | timer_index);
9102 unlock_user_struct(ptarget_timer, arg3, 1);
9103 }
9104 }
9105 break;
9106 }
9107 #endif
9108
9109 #ifdef TARGET_NR_timer_settime
9110 case TARGET_NR_timer_settime:
9111 {
9112 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
9113 * struct itimerspec * old_value */
9114 arg1 &= 0xffff;
9115 if (arg3 == 0 || arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9116 ret = -TARGET_EINVAL;
9117 } else {
9118 timer_t htimer = g_posix_timers[arg1];
9119 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
9120
9121 target_to_host_itimerspec(&hspec_new, arg3);
9122 ret = get_errno(
9123 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
9124 host_to_target_itimerspec(arg2, &hspec_old);
9125 }
9126 break;
9127 }
9128 #endif
9129
9130 #ifdef TARGET_NR_timer_gettime
9131 case TARGET_NR_timer_gettime:
9132 {
9133 /* args: timer_t timerid, struct itimerspec *curr_value */
9134 arg1 &= 0xffff;
9135 if (!arg2) {
9136 return -TARGET_EFAULT;
9137 } else if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9138 ret = -TARGET_EINVAL;
9139 } else {
9140 timer_t htimer = g_posix_timers[arg1];
9141 struct itimerspec hspec;
9142 ret = get_errno(timer_gettime(htimer, &hspec));
9143
9144 if (host_to_target_itimerspec(arg2, &hspec)) {
9145 ret = -TARGET_EFAULT;
9146 }
9147 }
9148 break;
9149 }
9150 #endif
9151
9152 #ifdef TARGET_NR_timer_getoverrun
9153 case TARGET_NR_timer_getoverrun:
9154 {
9155 /* args: timer_t timerid */
9156 arg1 &= 0xffff;
9157 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9158 ret = -TARGET_EINVAL;
9159 } else {
9160 timer_t htimer = g_posix_timers[arg1];
9161 ret = get_errno(timer_getoverrun(htimer));
9162 }
9163 break;
9164 }
9165 #endif
9166
9167 #ifdef TARGET_NR_timer_delete
9168 case TARGET_NR_timer_delete:
9169 {
9170 /* args: timer_t timerid */
9171 arg1 &= 0xffff;
9172 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9173 ret = -TARGET_EINVAL;
9174 } else {
9175 timer_t htimer = g_posix_timers[arg1];
9176 ret = get_errno(timer_delete(htimer));
9177 g_posix_timers[arg1] = 0;
9178 }
9179 break;
9180 }
9181 #endif
9182
9183 default:
9184 unimplemented:
9185 gemu_log("qemu: Unsupported syscall: %d\n", num);
9186 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9187 unimplemented_nowarn:
9188 #endif
9189 ret = -TARGET_ENOSYS;
9190 break;
9191 }
9192 fail:
9193 #ifdef DEBUG
9194 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
9195 #endif
9196 if(do_strace)
9197 print_syscall_ret(num, ret);
9198 return ret;
9199 efault:
9200 ret = -TARGET_EFAULT;
9201 goto fail;
9202 }