]> git.proxmox.com Git - qemu.git/blob - linux-user/syscall.c
linux-user: Fix pipe syscall return for SPARC
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <signal.h>
47 #include <sched.h>
48 #ifdef __ia64__
49 int __clone2(int (*fn)(void *), void *child_stack_base,
50 size_t stack_size, int flags, void *arg, ...);
51 #endif
52 #include <sys/socket.h>
53 #include <sys/un.h>
54 #include <sys/uio.h>
55 #include <sys/poll.h>
56 #include <sys/times.h>
57 #include <sys/shm.h>
58 #include <sys/sem.h>
59 #include <sys/statfs.h>
60 #include <utime.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef TARGET_GPROF
70 #include <sys/gmon.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
84
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
91
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/utsname.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #if defined(CONFIG_FIEMAP)
102 #include <linux/fiemap.h>
103 #endif
104 #include <linux/fb.h>
105 #include <linux/vt.h>
106 #include <linux/dm-ioctl.h>
107 #include <linux/reboot.h>
108 #include <linux/route.h>
109 #include "linux_loop.h"
110 #include "cpu-uname.h"
111
112 #include "qemu.h"
113
114 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
115 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
116
117 //#define DEBUG
118
119 //#include <linux/msdos_fs.h>
120 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
121 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
122
123
124 #undef _syscall0
125 #undef _syscall1
126 #undef _syscall2
127 #undef _syscall3
128 #undef _syscall4
129 #undef _syscall5
130 #undef _syscall6
131
132 #define _syscall0(type,name) \
133 static type name (void) \
134 { \
135 return syscall(__NR_##name); \
136 }
137
138 #define _syscall1(type,name,type1,arg1) \
139 static type name (type1 arg1) \
140 { \
141 return syscall(__NR_##name, arg1); \
142 }
143
144 #define _syscall2(type,name,type1,arg1,type2,arg2) \
145 static type name (type1 arg1,type2 arg2) \
146 { \
147 return syscall(__NR_##name, arg1, arg2); \
148 }
149
150 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
151 static type name (type1 arg1,type2 arg2,type3 arg3) \
152 { \
153 return syscall(__NR_##name, arg1, arg2, arg3); \
154 }
155
156 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
157 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
158 { \
159 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
160 }
161
162 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 type5,arg5) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
165 { \
166 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
167 }
168
169
170 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
171 type5,arg5,type6,arg6) \
172 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
173 type6 arg6) \
174 { \
175 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
176 }
177
178
179 #define __NR_sys_uname __NR_uname
180 #define __NR_sys_getcwd1 __NR_getcwd
181 #define __NR_sys_getdents __NR_getdents
182 #define __NR_sys_getdents64 __NR_getdents64
183 #define __NR_sys_getpriority __NR_getpriority
184 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
185 #define __NR_sys_syslog __NR_syslog
186 #define __NR_sys_tgkill __NR_tgkill
187 #define __NR_sys_tkill __NR_tkill
188 #define __NR_sys_futex __NR_futex
189 #define __NR_sys_inotify_init __NR_inotify_init
190 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
191 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
192
193 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
194 defined(__s390x__)
195 #define __NR__llseek __NR_lseek
196 #endif
197
198 #ifdef __NR_gettid
199 _syscall0(int, gettid)
200 #else
201 /* This is a replacement for the host gettid() and must return a host
202 errno. */
203 static int gettid(void) {
204 return -ENOSYS;
205 }
206 #endif
207 #ifdef __NR_getdents
208 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
209 #endif
210 #if !defined(__NR_getdents) || \
211 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
212 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
213 #endif
214 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
215 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
216 loff_t *, res, uint, wh);
217 #endif
218 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
219 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
220 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
221 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
222 #endif
223 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
224 _syscall2(int,sys_tkill,int,tid,int,sig)
225 #endif
226 #ifdef __NR_exit_group
227 _syscall1(int,exit_group,int,error_code)
228 #endif
229 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
230 _syscall1(int,set_tid_address,int *,tidptr)
231 #endif
232 #if defined(TARGET_NR_futex) && defined(__NR_futex)
233 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
234 const struct timespec *,timeout,int *,uaddr2,int,val3)
235 #endif
236 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
237 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
238 unsigned long *, user_mask_ptr);
239 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
240 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
241 unsigned long *, user_mask_ptr);
242 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
243 void *, arg);
244
245 static bitmask_transtbl fcntl_flags_tbl[] = {
246 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
247 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
248 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
249 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
250 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
251 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
252 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
253 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
254 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
255 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
256 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
257 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
258 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
259 #if defined(O_DIRECT)
260 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
261 #endif
262 #if defined(O_NOATIME)
263 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
264 #endif
265 #if defined(O_CLOEXEC)
266 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
267 #endif
268 #if defined(O_PATH)
269 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
270 #endif
271 /* Don't terminate the list prematurely on 64-bit host+guest. */
272 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
273 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
274 #endif
275 { 0, 0, 0, 0 }
276 };
277
278 #define COPY_UTSNAME_FIELD(dest, src) \
279 do { \
280 /* __NEW_UTS_LEN doesn't include terminating null */ \
281 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
282 (dest)[__NEW_UTS_LEN] = '\0'; \
283 } while (0)
284
285 static int sys_uname(struct new_utsname *buf)
286 {
287 struct utsname uts_buf;
288
289 if (uname(&uts_buf) < 0)
290 return (-1);
291
292 /*
293 * Just in case these have some differences, we
294 * translate utsname to new_utsname (which is the
295 * struct linux kernel uses).
296 */
297
298 memset(buf, 0, sizeof(*buf));
299 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
300 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
301 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
302 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
303 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
304 #ifdef _GNU_SOURCE
305 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
306 #endif
307 return (0);
308
309 #undef COPY_UTSNAME_FIELD
310 }
311
312 static int sys_getcwd1(char *buf, size_t size)
313 {
314 if (getcwd(buf, size) == NULL) {
315 /* getcwd() sets errno */
316 return (-1);
317 }
318 return strlen(buf)+1;
319 }
320
321 #ifdef TARGET_NR_openat
322 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
323 {
324 /*
325 * open(2) has extra parameter 'mode' when called with
326 * flag O_CREAT.
327 */
328 if ((flags & O_CREAT) != 0) {
329 return (openat(dirfd, pathname, flags, mode));
330 }
331 return (openat(dirfd, pathname, flags));
332 }
333 #endif
334
335 #ifdef TARGET_NR_utimensat
336 #ifdef CONFIG_UTIMENSAT
337 static int sys_utimensat(int dirfd, const char *pathname,
338 const struct timespec times[2], int flags)
339 {
340 if (pathname == NULL)
341 return futimens(dirfd, times);
342 else
343 return utimensat(dirfd, pathname, times, flags);
344 }
345 #elif defined(__NR_utimensat)
346 #define __NR_sys_utimensat __NR_utimensat
347 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
348 const struct timespec *,tsp,int,flags)
349 #else
350 static int sys_utimensat(int dirfd, const char *pathname,
351 const struct timespec times[2], int flags)
352 {
353 errno = ENOSYS;
354 return -1;
355 }
356 #endif
357 #endif /* TARGET_NR_utimensat */
358
359 #ifdef CONFIG_INOTIFY
360 #include <sys/inotify.h>
361
362 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
363 static int sys_inotify_init(void)
364 {
365 return (inotify_init());
366 }
367 #endif
368 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
369 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
370 {
371 return (inotify_add_watch(fd, pathname, mask));
372 }
373 #endif
374 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
375 static int sys_inotify_rm_watch(int fd, int32_t wd)
376 {
377 return (inotify_rm_watch(fd, wd));
378 }
379 #endif
380 #ifdef CONFIG_INOTIFY1
381 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
382 static int sys_inotify_init1(int flags)
383 {
384 return (inotify_init1(flags));
385 }
386 #endif
387 #endif
388 #else
389 /* Userspace can usually survive runtime without inotify */
390 #undef TARGET_NR_inotify_init
391 #undef TARGET_NR_inotify_init1
392 #undef TARGET_NR_inotify_add_watch
393 #undef TARGET_NR_inotify_rm_watch
394 #endif /* CONFIG_INOTIFY */
395
396 #if defined(TARGET_NR_ppoll)
397 #ifndef __NR_ppoll
398 # define __NR_ppoll -1
399 #endif
400 #define __NR_sys_ppoll __NR_ppoll
401 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
402 struct timespec *, timeout, const __sigset_t *, sigmask,
403 size_t, sigsetsize)
404 #endif
405
406 #if defined(TARGET_NR_pselect6)
407 #ifndef __NR_pselect6
408 # define __NR_pselect6 -1
409 #endif
410 #define __NR_sys_pselect6 __NR_pselect6
411 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
412 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
413 #endif
414
415 #if defined(TARGET_NR_prlimit64)
416 #ifndef __NR_prlimit64
417 # define __NR_prlimit64 -1
418 #endif
419 #define __NR_sys_prlimit64 __NR_prlimit64
420 /* The glibc rlimit structure may not be that used by the underlying syscall */
421 struct host_rlimit64 {
422 uint64_t rlim_cur;
423 uint64_t rlim_max;
424 };
425 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
426 const struct host_rlimit64 *, new_limit,
427 struct host_rlimit64 *, old_limit)
428 #endif
429
430 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
431 #ifdef TARGET_ARM
432 static inline int regpairs_aligned(void *cpu_env) {
433 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
434 }
435 #elif defined(TARGET_MIPS)
436 static inline int regpairs_aligned(void *cpu_env) { return 1; }
437 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
438 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
439 * of registers which translates to the same as ARM/MIPS, because we start with
440 * r3 as arg1 */
441 static inline int regpairs_aligned(void *cpu_env) { return 1; }
442 #else
443 static inline int regpairs_aligned(void *cpu_env) { return 0; }
444 #endif
445
446 #define ERRNO_TABLE_SIZE 1200
447
448 /* target_to_host_errno_table[] is initialized from
449 * host_to_target_errno_table[] in syscall_init(). */
450 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
451 };
452
453 /*
454 * This list is the union of errno values overridden in asm-<arch>/errno.h
455 * minus the errnos that are not actually generic to all archs.
456 */
457 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
458 [EIDRM] = TARGET_EIDRM,
459 [ECHRNG] = TARGET_ECHRNG,
460 [EL2NSYNC] = TARGET_EL2NSYNC,
461 [EL3HLT] = TARGET_EL3HLT,
462 [EL3RST] = TARGET_EL3RST,
463 [ELNRNG] = TARGET_ELNRNG,
464 [EUNATCH] = TARGET_EUNATCH,
465 [ENOCSI] = TARGET_ENOCSI,
466 [EL2HLT] = TARGET_EL2HLT,
467 [EDEADLK] = TARGET_EDEADLK,
468 [ENOLCK] = TARGET_ENOLCK,
469 [EBADE] = TARGET_EBADE,
470 [EBADR] = TARGET_EBADR,
471 [EXFULL] = TARGET_EXFULL,
472 [ENOANO] = TARGET_ENOANO,
473 [EBADRQC] = TARGET_EBADRQC,
474 [EBADSLT] = TARGET_EBADSLT,
475 [EBFONT] = TARGET_EBFONT,
476 [ENOSTR] = TARGET_ENOSTR,
477 [ENODATA] = TARGET_ENODATA,
478 [ETIME] = TARGET_ETIME,
479 [ENOSR] = TARGET_ENOSR,
480 [ENONET] = TARGET_ENONET,
481 [ENOPKG] = TARGET_ENOPKG,
482 [EREMOTE] = TARGET_EREMOTE,
483 [ENOLINK] = TARGET_ENOLINK,
484 [EADV] = TARGET_EADV,
485 [ESRMNT] = TARGET_ESRMNT,
486 [ECOMM] = TARGET_ECOMM,
487 [EPROTO] = TARGET_EPROTO,
488 [EDOTDOT] = TARGET_EDOTDOT,
489 [EMULTIHOP] = TARGET_EMULTIHOP,
490 [EBADMSG] = TARGET_EBADMSG,
491 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
492 [EOVERFLOW] = TARGET_EOVERFLOW,
493 [ENOTUNIQ] = TARGET_ENOTUNIQ,
494 [EBADFD] = TARGET_EBADFD,
495 [EREMCHG] = TARGET_EREMCHG,
496 [ELIBACC] = TARGET_ELIBACC,
497 [ELIBBAD] = TARGET_ELIBBAD,
498 [ELIBSCN] = TARGET_ELIBSCN,
499 [ELIBMAX] = TARGET_ELIBMAX,
500 [ELIBEXEC] = TARGET_ELIBEXEC,
501 [EILSEQ] = TARGET_EILSEQ,
502 [ENOSYS] = TARGET_ENOSYS,
503 [ELOOP] = TARGET_ELOOP,
504 [ERESTART] = TARGET_ERESTART,
505 [ESTRPIPE] = TARGET_ESTRPIPE,
506 [ENOTEMPTY] = TARGET_ENOTEMPTY,
507 [EUSERS] = TARGET_EUSERS,
508 [ENOTSOCK] = TARGET_ENOTSOCK,
509 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
510 [EMSGSIZE] = TARGET_EMSGSIZE,
511 [EPROTOTYPE] = TARGET_EPROTOTYPE,
512 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
513 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
514 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
515 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
516 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
517 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
518 [EADDRINUSE] = TARGET_EADDRINUSE,
519 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
520 [ENETDOWN] = TARGET_ENETDOWN,
521 [ENETUNREACH] = TARGET_ENETUNREACH,
522 [ENETRESET] = TARGET_ENETRESET,
523 [ECONNABORTED] = TARGET_ECONNABORTED,
524 [ECONNRESET] = TARGET_ECONNRESET,
525 [ENOBUFS] = TARGET_ENOBUFS,
526 [EISCONN] = TARGET_EISCONN,
527 [ENOTCONN] = TARGET_ENOTCONN,
528 [EUCLEAN] = TARGET_EUCLEAN,
529 [ENOTNAM] = TARGET_ENOTNAM,
530 [ENAVAIL] = TARGET_ENAVAIL,
531 [EISNAM] = TARGET_EISNAM,
532 [EREMOTEIO] = TARGET_EREMOTEIO,
533 [ESHUTDOWN] = TARGET_ESHUTDOWN,
534 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
535 [ETIMEDOUT] = TARGET_ETIMEDOUT,
536 [ECONNREFUSED] = TARGET_ECONNREFUSED,
537 [EHOSTDOWN] = TARGET_EHOSTDOWN,
538 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
539 [EALREADY] = TARGET_EALREADY,
540 [EINPROGRESS] = TARGET_EINPROGRESS,
541 [ESTALE] = TARGET_ESTALE,
542 [ECANCELED] = TARGET_ECANCELED,
543 [ENOMEDIUM] = TARGET_ENOMEDIUM,
544 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
545 #ifdef ENOKEY
546 [ENOKEY] = TARGET_ENOKEY,
547 #endif
548 #ifdef EKEYEXPIRED
549 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
550 #endif
551 #ifdef EKEYREVOKED
552 [EKEYREVOKED] = TARGET_EKEYREVOKED,
553 #endif
554 #ifdef EKEYREJECTED
555 [EKEYREJECTED] = TARGET_EKEYREJECTED,
556 #endif
557 #ifdef EOWNERDEAD
558 [EOWNERDEAD] = TARGET_EOWNERDEAD,
559 #endif
560 #ifdef ENOTRECOVERABLE
561 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
562 #endif
563 };
564
565 static inline int host_to_target_errno(int err)
566 {
567 if(host_to_target_errno_table[err])
568 return host_to_target_errno_table[err];
569 return err;
570 }
571
572 static inline int target_to_host_errno(int err)
573 {
574 if (target_to_host_errno_table[err])
575 return target_to_host_errno_table[err];
576 return err;
577 }
578
579 static inline abi_long get_errno(abi_long ret)
580 {
581 if (ret == -1)
582 return -host_to_target_errno(errno);
583 else
584 return ret;
585 }
586
587 static inline int is_error(abi_long ret)
588 {
589 return (abi_ulong)ret >= (abi_ulong)(-4096);
590 }
591
592 char *target_strerror(int err)
593 {
594 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
595 return NULL;
596 }
597 return strerror(target_to_host_errno(err));
598 }
599
600 static abi_ulong target_brk;
601 static abi_ulong target_original_brk;
602 static abi_ulong brk_page;
603
604 void target_set_brk(abi_ulong new_brk)
605 {
606 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
607 brk_page = HOST_PAGE_ALIGN(target_brk);
608 }
609
610 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
611 #define DEBUGF_BRK(message, args...)
612
613 /* do_brk() must return target values and target errnos. */
614 abi_long do_brk(abi_ulong new_brk)
615 {
616 abi_long mapped_addr;
617 int new_alloc_size;
618
619 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
620
621 if (!new_brk) {
622 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
623 return target_brk;
624 }
625 if (new_brk < target_original_brk) {
626 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
627 target_brk);
628 return target_brk;
629 }
630
631 /* If the new brk is less than the highest page reserved to the
632 * target heap allocation, set it and we're almost done... */
633 if (new_brk <= brk_page) {
634 /* Heap contents are initialized to zero, as for anonymous
635 * mapped pages. */
636 if (new_brk > target_brk) {
637 memset(g2h(target_brk), 0, new_brk - target_brk);
638 }
639 target_brk = new_brk;
640 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
641 return target_brk;
642 }
643
644 /* We need to allocate more memory after the brk... Note that
645 * we don't use MAP_FIXED because that will map over the top of
646 * any existing mapping (like the one with the host libc or qemu
647 * itself); instead we treat "mapped but at wrong address" as
648 * a failure and unmap again.
649 */
650 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
651 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
652 PROT_READ|PROT_WRITE,
653 MAP_ANON|MAP_PRIVATE, 0, 0));
654
655 if (mapped_addr == brk_page) {
656 /* Heap contents are initialized to zero, as for anonymous
657 * mapped pages. Technically the new pages are already
658 * initialized to zero since they *are* anonymous mapped
659 * pages, however we have to take care with the contents that
660 * come from the remaining part of the previous page: it may
661 * contains garbage data due to a previous heap usage (grown
662 * then shrunken). */
663 memset(g2h(target_brk), 0, brk_page - target_brk);
664
665 target_brk = new_brk;
666 brk_page = HOST_PAGE_ALIGN(target_brk);
667 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
668 target_brk);
669 return target_brk;
670 } else if (mapped_addr != -1) {
671 /* Mapped but at wrong address, meaning there wasn't actually
672 * enough space for this brk.
673 */
674 target_munmap(mapped_addr, new_alloc_size);
675 mapped_addr = -1;
676 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
677 }
678 else {
679 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
680 }
681
682 #if defined(TARGET_ALPHA)
683 /* We (partially) emulate OSF/1 on Alpha, which requires we
684 return a proper errno, not an unchanged brk value. */
685 return -TARGET_ENOMEM;
686 #endif
687 /* For everything else, return the previous break. */
688 return target_brk;
689 }
690
691 static inline abi_long copy_from_user_fdset(fd_set *fds,
692 abi_ulong target_fds_addr,
693 int n)
694 {
695 int i, nw, j, k;
696 abi_ulong b, *target_fds;
697
698 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
699 if (!(target_fds = lock_user(VERIFY_READ,
700 target_fds_addr,
701 sizeof(abi_ulong) * nw,
702 1)))
703 return -TARGET_EFAULT;
704
705 FD_ZERO(fds);
706 k = 0;
707 for (i = 0; i < nw; i++) {
708 /* grab the abi_ulong */
709 __get_user(b, &target_fds[i]);
710 for (j = 0; j < TARGET_ABI_BITS; j++) {
711 /* check the bit inside the abi_ulong */
712 if ((b >> j) & 1)
713 FD_SET(k, fds);
714 k++;
715 }
716 }
717
718 unlock_user(target_fds, target_fds_addr, 0);
719
720 return 0;
721 }
722
723 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
724 abi_ulong target_fds_addr,
725 int n)
726 {
727 if (target_fds_addr) {
728 if (copy_from_user_fdset(fds, target_fds_addr, n))
729 return -TARGET_EFAULT;
730 *fds_ptr = fds;
731 } else {
732 *fds_ptr = NULL;
733 }
734 return 0;
735 }
736
737 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
738 const fd_set *fds,
739 int n)
740 {
741 int i, nw, j, k;
742 abi_long v;
743 abi_ulong *target_fds;
744
745 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
746 if (!(target_fds = lock_user(VERIFY_WRITE,
747 target_fds_addr,
748 sizeof(abi_ulong) * nw,
749 0)))
750 return -TARGET_EFAULT;
751
752 k = 0;
753 for (i = 0; i < nw; i++) {
754 v = 0;
755 for (j = 0; j < TARGET_ABI_BITS; j++) {
756 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
757 k++;
758 }
759 __put_user(v, &target_fds[i]);
760 }
761
762 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
763
764 return 0;
765 }
766
767 #if defined(__alpha__)
768 #define HOST_HZ 1024
769 #else
770 #define HOST_HZ 100
771 #endif
772
773 static inline abi_long host_to_target_clock_t(long ticks)
774 {
775 #if HOST_HZ == TARGET_HZ
776 return ticks;
777 #else
778 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
779 #endif
780 }
781
782 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
783 const struct rusage *rusage)
784 {
785 struct target_rusage *target_rusage;
786
787 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
788 return -TARGET_EFAULT;
789 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
790 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
791 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
792 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
793 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
794 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
795 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
796 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
797 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
798 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
799 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
800 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
801 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
802 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
803 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
804 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
805 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
806 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
807 unlock_user_struct(target_rusage, target_addr, 1);
808
809 return 0;
810 }
811
812 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
813 {
814 abi_ulong target_rlim_swap;
815 rlim_t result;
816
817 target_rlim_swap = tswapal(target_rlim);
818 if (target_rlim_swap == TARGET_RLIM_INFINITY)
819 return RLIM_INFINITY;
820
821 result = target_rlim_swap;
822 if (target_rlim_swap != (rlim_t)result)
823 return RLIM_INFINITY;
824
825 return result;
826 }
827
828 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
829 {
830 abi_ulong target_rlim_swap;
831 abi_ulong result;
832
833 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
834 target_rlim_swap = TARGET_RLIM_INFINITY;
835 else
836 target_rlim_swap = rlim;
837 result = tswapal(target_rlim_swap);
838
839 return result;
840 }
841
842 static inline int target_to_host_resource(int code)
843 {
844 switch (code) {
845 case TARGET_RLIMIT_AS:
846 return RLIMIT_AS;
847 case TARGET_RLIMIT_CORE:
848 return RLIMIT_CORE;
849 case TARGET_RLIMIT_CPU:
850 return RLIMIT_CPU;
851 case TARGET_RLIMIT_DATA:
852 return RLIMIT_DATA;
853 case TARGET_RLIMIT_FSIZE:
854 return RLIMIT_FSIZE;
855 case TARGET_RLIMIT_LOCKS:
856 return RLIMIT_LOCKS;
857 case TARGET_RLIMIT_MEMLOCK:
858 return RLIMIT_MEMLOCK;
859 case TARGET_RLIMIT_MSGQUEUE:
860 return RLIMIT_MSGQUEUE;
861 case TARGET_RLIMIT_NICE:
862 return RLIMIT_NICE;
863 case TARGET_RLIMIT_NOFILE:
864 return RLIMIT_NOFILE;
865 case TARGET_RLIMIT_NPROC:
866 return RLIMIT_NPROC;
867 case TARGET_RLIMIT_RSS:
868 return RLIMIT_RSS;
869 case TARGET_RLIMIT_RTPRIO:
870 return RLIMIT_RTPRIO;
871 case TARGET_RLIMIT_SIGPENDING:
872 return RLIMIT_SIGPENDING;
873 case TARGET_RLIMIT_STACK:
874 return RLIMIT_STACK;
875 default:
876 return code;
877 }
878 }
879
880 static inline abi_long copy_from_user_timeval(struct timeval *tv,
881 abi_ulong target_tv_addr)
882 {
883 struct target_timeval *target_tv;
884
885 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
886 return -TARGET_EFAULT;
887
888 __get_user(tv->tv_sec, &target_tv->tv_sec);
889 __get_user(tv->tv_usec, &target_tv->tv_usec);
890
891 unlock_user_struct(target_tv, target_tv_addr, 0);
892
893 return 0;
894 }
895
896 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
897 const struct timeval *tv)
898 {
899 struct target_timeval *target_tv;
900
901 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
902 return -TARGET_EFAULT;
903
904 __put_user(tv->tv_sec, &target_tv->tv_sec);
905 __put_user(tv->tv_usec, &target_tv->tv_usec);
906
907 unlock_user_struct(target_tv, target_tv_addr, 1);
908
909 return 0;
910 }
911
912 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
913 #include <mqueue.h>
914
915 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
916 abi_ulong target_mq_attr_addr)
917 {
918 struct target_mq_attr *target_mq_attr;
919
920 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
921 target_mq_attr_addr, 1))
922 return -TARGET_EFAULT;
923
924 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
925 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
926 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
927 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
928
929 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
930
931 return 0;
932 }
933
934 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
935 const struct mq_attr *attr)
936 {
937 struct target_mq_attr *target_mq_attr;
938
939 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
940 target_mq_attr_addr, 0))
941 return -TARGET_EFAULT;
942
943 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
944 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
945 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
946 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
947
948 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
949
950 return 0;
951 }
952 #endif
953
954 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
955 /* do_select() must return target values and target errnos. */
956 static abi_long do_select(int n,
957 abi_ulong rfd_addr, abi_ulong wfd_addr,
958 abi_ulong efd_addr, abi_ulong target_tv_addr)
959 {
960 fd_set rfds, wfds, efds;
961 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
962 struct timeval tv, *tv_ptr;
963 abi_long ret;
964
965 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
966 if (ret) {
967 return ret;
968 }
969 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
970 if (ret) {
971 return ret;
972 }
973 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
974 if (ret) {
975 return ret;
976 }
977
978 if (target_tv_addr) {
979 if (copy_from_user_timeval(&tv, target_tv_addr))
980 return -TARGET_EFAULT;
981 tv_ptr = &tv;
982 } else {
983 tv_ptr = NULL;
984 }
985
986 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
987
988 if (!is_error(ret)) {
989 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
990 return -TARGET_EFAULT;
991 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
992 return -TARGET_EFAULT;
993 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
994 return -TARGET_EFAULT;
995
996 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
997 return -TARGET_EFAULT;
998 }
999
1000 return ret;
1001 }
1002 #endif
1003
1004 static abi_long do_pipe2(int host_pipe[], int flags)
1005 {
1006 #ifdef CONFIG_PIPE2
1007 return pipe2(host_pipe, flags);
1008 #else
1009 return -ENOSYS;
1010 #endif
1011 }
1012
1013 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1014 int flags, int is_pipe2)
1015 {
1016 int host_pipe[2];
1017 abi_long ret;
1018 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1019
1020 if (is_error(ret))
1021 return get_errno(ret);
1022
1023 /* Several targets have special calling conventions for the original
1024 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1025 if (!is_pipe2) {
1026 #if defined(TARGET_ALPHA)
1027 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1028 return host_pipe[0];
1029 #elif defined(TARGET_MIPS)
1030 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1031 return host_pipe[0];
1032 #elif defined(TARGET_SH4)
1033 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1034 return host_pipe[0];
1035 #elif defined(TARGET_SPARC)
1036 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1037 return host_pipe[0];
1038 #endif
1039 }
1040
1041 if (put_user_s32(host_pipe[0], pipedes)
1042 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1043 return -TARGET_EFAULT;
1044 return get_errno(ret);
1045 }
1046
1047 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1048 abi_ulong target_addr,
1049 socklen_t len)
1050 {
1051 struct target_ip_mreqn *target_smreqn;
1052
1053 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1054 if (!target_smreqn)
1055 return -TARGET_EFAULT;
1056 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1057 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1058 if (len == sizeof(struct target_ip_mreqn))
1059 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1060 unlock_user(target_smreqn, target_addr, 0);
1061
1062 return 0;
1063 }
1064
1065 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1066 abi_ulong target_addr,
1067 socklen_t len)
1068 {
1069 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1070 sa_family_t sa_family;
1071 struct target_sockaddr *target_saddr;
1072
1073 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1074 if (!target_saddr)
1075 return -TARGET_EFAULT;
1076
1077 sa_family = tswap16(target_saddr->sa_family);
1078
1079 /* Oops. The caller might send a incomplete sun_path; sun_path
1080 * must be terminated by \0 (see the manual page), but
1081 * unfortunately it is quite common to specify sockaddr_un
1082 * length as "strlen(x->sun_path)" while it should be
1083 * "strlen(...) + 1". We'll fix that here if needed.
1084 * Linux kernel has a similar feature.
1085 */
1086
1087 if (sa_family == AF_UNIX) {
1088 if (len < unix_maxlen && len > 0) {
1089 char *cp = (char*)target_saddr;
1090
1091 if ( cp[len-1] && !cp[len] )
1092 len++;
1093 }
1094 if (len > unix_maxlen)
1095 len = unix_maxlen;
1096 }
1097
1098 memcpy(addr, target_saddr, len);
1099 addr->sa_family = sa_family;
1100 unlock_user(target_saddr, target_addr, 0);
1101
1102 return 0;
1103 }
1104
1105 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1106 struct sockaddr *addr,
1107 socklen_t len)
1108 {
1109 struct target_sockaddr *target_saddr;
1110
1111 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1112 if (!target_saddr)
1113 return -TARGET_EFAULT;
1114 memcpy(target_saddr, addr, len);
1115 target_saddr->sa_family = tswap16(addr->sa_family);
1116 unlock_user(target_saddr, target_addr, len);
1117
1118 return 0;
1119 }
1120
1121 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1122 struct target_msghdr *target_msgh)
1123 {
1124 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1125 abi_long msg_controllen;
1126 abi_ulong target_cmsg_addr;
1127 struct target_cmsghdr *target_cmsg;
1128 socklen_t space = 0;
1129
1130 msg_controllen = tswapal(target_msgh->msg_controllen);
1131 if (msg_controllen < sizeof (struct target_cmsghdr))
1132 goto the_end;
1133 target_cmsg_addr = tswapal(target_msgh->msg_control);
1134 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1135 if (!target_cmsg)
1136 return -TARGET_EFAULT;
1137
1138 while (cmsg && target_cmsg) {
1139 void *data = CMSG_DATA(cmsg);
1140 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1141
1142 int len = tswapal(target_cmsg->cmsg_len)
1143 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1144
1145 space += CMSG_SPACE(len);
1146 if (space > msgh->msg_controllen) {
1147 space -= CMSG_SPACE(len);
1148 gemu_log("Host cmsg overflow\n");
1149 break;
1150 }
1151
1152 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1153 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1154 cmsg->cmsg_len = CMSG_LEN(len);
1155
1156 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1157 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1158 memcpy(data, target_data, len);
1159 } else {
1160 int *fd = (int *)data;
1161 int *target_fd = (int *)target_data;
1162 int i, numfds = len / sizeof(int);
1163
1164 for (i = 0; i < numfds; i++)
1165 fd[i] = tswap32(target_fd[i]);
1166 }
1167
1168 cmsg = CMSG_NXTHDR(msgh, cmsg);
1169 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1170 }
1171 unlock_user(target_cmsg, target_cmsg_addr, 0);
1172 the_end:
1173 msgh->msg_controllen = space;
1174 return 0;
1175 }
1176
1177 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1178 struct msghdr *msgh)
1179 {
1180 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1181 abi_long msg_controllen;
1182 abi_ulong target_cmsg_addr;
1183 struct target_cmsghdr *target_cmsg;
1184 socklen_t space = 0;
1185
1186 msg_controllen = tswapal(target_msgh->msg_controllen);
1187 if (msg_controllen < sizeof (struct target_cmsghdr))
1188 goto the_end;
1189 target_cmsg_addr = tswapal(target_msgh->msg_control);
1190 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1191 if (!target_cmsg)
1192 return -TARGET_EFAULT;
1193
1194 while (cmsg && target_cmsg) {
1195 void *data = CMSG_DATA(cmsg);
1196 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1197
1198 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1199
1200 space += TARGET_CMSG_SPACE(len);
1201 if (space > msg_controllen) {
1202 space -= TARGET_CMSG_SPACE(len);
1203 gemu_log("Target cmsg overflow\n");
1204 break;
1205 }
1206
1207 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1208 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1209 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1210
1211 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1212 (cmsg->cmsg_type == SCM_RIGHTS)) {
1213 int *fd = (int *)data;
1214 int *target_fd = (int *)target_data;
1215 int i, numfds = len / sizeof(int);
1216
1217 for (i = 0; i < numfds; i++)
1218 target_fd[i] = tswap32(fd[i]);
1219 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1220 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1221 (len == sizeof(struct timeval))) {
1222 /* copy struct timeval to target */
1223 struct timeval *tv = (struct timeval *)data;
1224 struct target_timeval *target_tv =
1225 (struct target_timeval *)target_data;
1226
1227 target_tv->tv_sec = tswapal(tv->tv_sec);
1228 target_tv->tv_usec = tswapal(tv->tv_usec);
1229 } else {
1230 gemu_log("Unsupported ancillary data: %d/%d\n",
1231 cmsg->cmsg_level, cmsg->cmsg_type);
1232 memcpy(target_data, data, len);
1233 }
1234
1235 cmsg = CMSG_NXTHDR(msgh, cmsg);
1236 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1237 }
1238 unlock_user(target_cmsg, target_cmsg_addr, space);
1239 the_end:
1240 target_msgh->msg_controllen = tswapal(space);
1241 return 0;
1242 }
1243
1244 /* do_setsockopt() Must return target values and target errnos. */
1245 static abi_long do_setsockopt(int sockfd, int level, int optname,
1246 abi_ulong optval_addr, socklen_t optlen)
1247 {
1248 abi_long ret;
1249 int val;
1250 struct ip_mreqn *ip_mreq;
1251 struct ip_mreq_source *ip_mreq_source;
1252
1253 switch(level) {
1254 case SOL_TCP:
1255 /* TCP options all take an 'int' value. */
1256 if (optlen < sizeof(uint32_t))
1257 return -TARGET_EINVAL;
1258
1259 if (get_user_u32(val, optval_addr))
1260 return -TARGET_EFAULT;
1261 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1262 break;
1263 case SOL_IP:
1264 switch(optname) {
1265 case IP_TOS:
1266 case IP_TTL:
1267 case IP_HDRINCL:
1268 case IP_ROUTER_ALERT:
1269 case IP_RECVOPTS:
1270 case IP_RETOPTS:
1271 case IP_PKTINFO:
1272 case IP_MTU_DISCOVER:
1273 case IP_RECVERR:
1274 case IP_RECVTOS:
1275 #ifdef IP_FREEBIND
1276 case IP_FREEBIND:
1277 #endif
1278 case IP_MULTICAST_TTL:
1279 case IP_MULTICAST_LOOP:
1280 val = 0;
1281 if (optlen >= sizeof(uint32_t)) {
1282 if (get_user_u32(val, optval_addr))
1283 return -TARGET_EFAULT;
1284 } else if (optlen >= 1) {
1285 if (get_user_u8(val, optval_addr))
1286 return -TARGET_EFAULT;
1287 }
1288 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1289 break;
1290 case IP_ADD_MEMBERSHIP:
1291 case IP_DROP_MEMBERSHIP:
1292 if (optlen < sizeof (struct target_ip_mreq) ||
1293 optlen > sizeof (struct target_ip_mreqn))
1294 return -TARGET_EINVAL;
1295
1296 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1297 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1298 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1299 break;
1300
1301 case IP_BLOCK_SOURCE:
1302 case IP_UNBLOCK_SOURCE:
1303 case IP_ADD_SOURCE_MEMBERSHIP:
1304 case IP_DROP_SOURCE_MEMBERSHIP:
1305 if (optlen != sizeof (struct target_ip_mreq_source))
1306 return -TARGET_EINVAL;
1307
1308 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1309 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1310 unlock_user (ip_mreq_source, optval_addr, 0);
1311 break;
1312
1313 default:
1314 goto unimplemented;
1315 }
1316 break;
1317 case SOL_RAW:
1318 switch (optname) {
1319 case ICMP_FILTER:
1320 /* struct icmp_filter takes an u32 value */
1321 if (optlen < sizeof(uint32_t)) {
1322 return -TARGET_EINVAL;
1323 }
1324
1325 if (get_user_u32(val, optval_addr)) {
1326 return -TARGET_EFAULT;
1327 }
1328 ret = get_errno(setsockopt(sockfd, level, optname,
1329 &val, sizeof(val)));
1330 break;
1331
1332 default:
1333 goto unimplemented;
1334 }
1335 break;
1336 case TARGET_SOL_SOCKET:
1337 switch (optname) {
1338 case TARGET_SO_RCVTIMEO:
1339 {
1340 struct timeval tv;
1341
1342 optname = SO_RCVTIMEO;
1343
1344 set_timeout:
1345 if (optlen != sizeof(struct target_timeval)) {
1346 return -TARGET_EINVAL;
1347 }
1348
1349 if (copy_from_user_timeval(&tv, optval_addr)) {
1350 return -TARGET_EFAULT;
1351 }
1352
1353 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1354 &tv, sizeof(tv)));
1355 return ret;
1356 }
1357 case TARGET_SO_SNDTIMEO:
1358 optname = SO_SNDTIMEO;
1359 goto set_timeout;
1360 /* Options with 'int' argument. */
1361 case TARGET_SO_DEBUG:
1362 optname = SO_DEBUG;
1363 break;
1364 case TARGET_SO_REUSEADDR:
1365 optname = SO_REUSEADDR;
1366 break;
1367 case TARGET_SO_TYPE:
1368 optname = SO_TYPE;
1369 break;
1370 case TARGET_SO_ERROR:
1371 optname = SO_ERROR;
1372 break;
1373 case TARGET_SO_DONTROUTE:
1374 optname = SO_DONTROUTE;
1375 break;
1376 case TARGET_SO_BROADCAST:
1377 optname = SO_BROADCAST;
1378 break;
1379 case TARGET_SO_SNDBUF:
1380 optname = SO_SNDBUF;
1381 break;
1382 case TARGET_SO_RCVBUF:
1383 optname = SO_RCVBUF;
1384 break;
1385 case TARGET_SO_KEEPALIVE:
1386 optname = SO_KEEPALIVE;
1387 break;
1388 case TARGET_SO_OOBINLINE:
1389 optname = SO_OOBINLINE;
1390 break;
1391 case TARGET_SO_NO_CHECK:
1392 optname = SO_NO_CHECK;
1393 break;
1394 case TARGET_SO_PRIORITY:
1395 optname = SO_PRIORITY;
1396 break;
1397 #ifdef SO_BSDCOMPAT
1398 case TARGET_SO_BSDCOMPAT:
1399 optname = SO_BSDCOMPAT;
1400 break;
1401 #endif
1402 case TARGET_SO_PASSCRED:
1403 optname = SO_PASSCRED;
1404 break;
1405 case TARGET_SO_TIMESTAMP:
1406 optname = SO_TIMESTAMP;
1407 break;
1408 case TARGET_SO_RCVLOWAT:
1409 optname = SO_RCVLOWAT;
1410 break;
1411 break;
1412 default:
1413 goto unimplemented;
1414 }
1415 if (optlen < sizeof(uint32_t))
1416 return -TARGET_EINVAL;
1417
1418 if (get_user_u32(val, optval_addr))
1419 return -TARGET_EFAULT;
1420 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1421 break;
1422 default:
1423 unimplemented:
1424 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1425 ret = -TARGET_ENOPROTOOPT;
1426 }
1427 return ret;
1428 }
1429
1430 /* do_getsockopt() Must return target values and target errnos. */
1431 static abi_long do_getsockopt(int sockfd, int level, int optname,
1432 abi_ulong optval_addr, abi_ulong optlen)
1433 {
1434 abi_long ret;
1435 int len, val;
1436 socklen_t lv;
1437
1438 switch(level) {
1439 case TARGET_SOL_SOCKET:
1440 level = SOL_SOCKET;
1441 switch (optname) {
1442 /* These don't just return a single integer */
1443 case TARGET_SO_LINGER:
1444 case TARGET_SO_RCVTIMEO:
1445 case TARGET_SO_SNDTIMEO:
1446 case TARGET_SO_PEERNAME:
1447 goto unimplemented;
1448 case TARGET_SO_PEERCRED: {
1449 struct ucred cr;
1450 socklen_t crlen;
1451 struct target_ucred *tcr;
1452
1453 if (get_user_u32(len, optlen)) {
1454 return -TARGET_EFAULT;
1455 }
1456 if (len < 0) {
1457 return -TARGET_EINVAL;
1458 }
1459
1460 crlen = sizeof(cr);
1461 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1462 &cr, &crlen));
1463 if (ret < 0) {
1464 return ret;
1465 }
1466 if (len > crlen) {
1467 len = crlen;
1468 }
1469 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1470 return -TARGET_EFAULT;
1471 }
1472 __put_user(cr.pid, &tcr->pid);
1473 __put_user(cr.uid, &tcr->uid);
1474 __put_user(cr.gid, &tcr->gid);
1475 unlock_user_struct(tcr, optval_addr, 1);
1476 if (put_user_u32(len, optlen)) {
1477 return -TARGET_EFAULT;
1478 }
1479 break;
1480 }
1481 /* Options with 'int' argument. */
1482 case TARGET_SO_DEBUG:
1483 optname = SO_DEBUG;
1484 goto int_case;
1485 case TARGET_SO_REUSEADDR:
1486 optname = SO_REUSEADDR;
1487 goto int_case;
1488 case TARGET_SO_TYPE:
1489 optname = SO_TYPE;
1490 goto int_case;
1491 case TARGET_SO_ERROR:
1492 optname = SO_ERROR;
1493 goto int_case;
1494 case TARGET_SO_DONTROUTE:
1495 optname = SO_DONTROUTE;
1496 goto int_case;
1497 case TARGET_SO_BROADCAST:
1498 optname = SO_BROADCAST;
1499 goto int_case;
1500 case TARGET_SO_SNDBUF:
1501 optname = SO_SNDBUF;
1502 goto int_case;
1503 case TARGET_SO_RCVBUF:
1504 optname = SO_RCVBUF;
1505 goto int_case;
1506 case TARGET_SO_KEEPALIVE:
1507 optname = SO_KEEPALIVE;
1508 goto int_case;
1509 case TARGET_SO_OOBINLINE:
1510 optname = SO_OOBINLINE;
1511 goto int_case;
1512 case TARGET_SO_NO_CHECK:
1513 optname = SO_NO_CHECK;
1514 goto int_case;
1515 case TARGET_SO_PRIORITY:
1516 optname = SO_PRIORITY;
1517 goto int_case;
1518 #ifdef SO_BSDCOMPAT
1519 case TARGET_SO_BSDCOMPAT:
1520 optname = SO_BSDCOMPAT;
1521 goto int_case;
1522 #endif
1523 case TARGET_SO_PASSCRED:
1524 optname = SO_PASSCRED;
1525 goto int_case;
1526 case TARGET_SO_TIMESTAMP:
1527 optname = SO_TIMESTAMP;
1528 goto int_case;
1529 case TARGET_SO_RCVLOWAT:
1530 optname = SO_RCVLOWAT;
1531 goto int_case;
1532 default:
1533 goto int_case;
1534 }
1535 break;
1536 case SOL_TCP:
1537 /* TCP options all take an 'int' value. */
1538 int_case:
1539 if (get_user_u32(len, optlen))
1540 return -TARGET_EFAULT;
1541 if (len < 0)
1542 return -TARGET_EINVAL;
1543 lv = sizeof(lv);
1544 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1545 if (ret < 0)
1546 return ret;
1547 if (len > lv)
1548 len = lv;
1549 if (len == 4) {
1550 if (put_user_u32(val, optval_addr))
1551 return -TARGET_EFAULT;
1552 } else {
1553 if (put_user_u8(val, optval_addr))
1554 return -TARGET_EFAULT;
1555 }
1556 if (put_user_u32(len, optlen))
1557 return -TARGET_EFAULT;
1558 break;
1559 case SOL_IP:
1560 switch(optname) {
1561 case IP_TOS:
1562 case IP_TTL:
1563 case IP_HDRINCL:
1564 case IP_ROUTER_ALERT:
1565 case IP_RECVOPTS:
1566 case IP_RETOPTS:
1567 case IP_PKTINFO:
1568 case IP_MTU_DISCOVER:
1569 case IP_RECVERR:
1570 case IP_RECVTOS:
1571 #ifdef IP_FREEBIND
1572 case IP_FREEBIND:
1573 #endif
1574 case IP_MULTICAST_TTL:
1575 case IP_MULTICAST_LOOP:
1576 if (get_user_u32(len, optlen))
1577 return -TARGET_EFAULT;
1578 if (len < 0)
1579 return -TARGET_EINVAL;
1580 lv = sizeof(lv);
1581 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1582 if (ret < 0)
1583 return ret;
1584 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1585 len = 1;
1586 if (put_user_u32(len, optlen)
1587 || put_user_u8(val, optval_addr))
1588 return -TARGET_EFAULT;
1589 } else {
1590 if (len > sizeof(int))
1591 len = sizeof(int);
1592 if (put_user_u32(len, optlen)
1593 || put_user_u32(val, optval_addr))
1594 return -TARGET_EFAULT;
1595 }
1596 break;
1597 default:
1598 ret = -TARGET_ENOPROTOOPT;
1599 break;
1600 }
1601 break;
1602 default:
1603 unimplemented:
1604 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1605 level, optname);
1606 ret = -TARGET_EOPNOTSUPP;
1607 break;
1608 }
1609 return ret;
1610 }
1611
1612 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1613 int count, int copy)
1614 {
1615 struct target_iovec *target_vec;
1616 struct iovec *vec;
1617 abi_ulong total_len, max_len;
1618 int i;
1619
1620 if (count == 0) {
1621 errno = 0;
1622 return NULL;
1623 }
1624 if (count < 0 || count > IOV_MAX) {
1625 errno = EINVAL;
1626 return NULL;
1627 }
1628
1629 vec = calloc(count, sizeof(struct iovec));
1630 if (vec == NULL) {
1631 errno = ENOMEM;
1632 return NULL;
1633 }
1634
1635 target_vec = lock_user(VERIFY_READ, target_addr,
1636 count * sizeof(struct target_iovec), 1);
1637 if (target_vec == NULL) {
1638 errno = EFAULT;
1639 goto fail2;
1640 }
1641
1642 /* ??? If host page size > target page size, this will result in a
1643 value larger than what we can actually support. */
1644 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1645 total_len = 0;
1646
1647 for (i = 0; i < count; i++) {
1648 abi_ulong base = tswapal(target_vec[i].iov_base);
1649 abi_long len = tswapal(target_vec[i].iov_len);
1650
1651 if (len < 0) {
1652 errno = EINVAL;
1653 goto fail;
1654 } else if (len == 0) {
1655 /* Zero length pointer is ignored. */
1656 vec[i].iov_base = 0;
1657 } else {
1658 vec[i].iov_base = lock_user(type, base, len, copy);
1659 if (!vec[i].iov_base) {
1660 errno = EFAULT;
1661 goto fail;
1662 }
1663 if (len > max_len - total_len) {
1664 len = max_len - total_len;
1665 }
1666 }
1667 vec[i].iov_len = len;
1668 total_len += len;
1669 }
1670
1671 unlock_user(target_vec, target_addr, 0);
1672 return vec;
1673
1674 fail:
1675 free(vec);
1676 fail2:
1677 unlock_user(target_vec, target_addr, 0);
1678 return NULL;
1679 }
1680
1681 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1682 int count, int copy)
1683 {
1684 struct target_iovec *target_vec;
1685 int i;
1686
1687 target_vec = lock_user(VERIFY_READ, target_addr,
1688 count * sizeof(struct target_iovec), 1);
1689 if (target_vec) {
1690 for (i = 0; i < count; i++) {
1691 abi_ulong base = tswapal(target_vec[i].iov_base);
1692 abi_long len = tswapal(target_vec[i].iov_base);
1693 if (len < 0) {
1694 break;
1695 }
1696 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1697 }
1698 unlock_user(target_vec, target_addr, 0);
1699 }
1700
1701 free(vec);
1702 }
1703
1704 static inline void target_to_host_sock_type(int *type)
1705 {
1706 int host_type = 0;
1707 int target_type = *type;
1708
1709 switch (target_type & TARGET_SOCK_TYPE_MASK) {
1710 case TARGET_SOCK_DGRAM:
1711 host_type = SOCK_DGRAM;
1712 break;
1713 case TARGET_SOCK_STREAM:
1714 host_type = SOCK_STREAM;
1715 break;
1716 default:
1717 host_type = target_type & TARGET_SOCK_TYPE_MASK;
1718 break;
1719 }
1720 if (target_type & TARGET_SOCK_CLOEXEC) {
1721 host_type |= SOCK_CLOEXEC;
1722 }
1723 if (target_type & TARGET_SOCK_NONBLOCK) {
1724 host_type |= SOCK_NONBLOCK;
1725 }
1726 *type = host_type;
1727 }
1728
1729 /* do_socket() Must return target values and target errnos. */
1730 static abi_long do_socket(int domain, int type, int protocol)
1731 {
1732 target_to_host_sock_type(&type);
1733
1734 if (domain == PF_NETLINK)
1735 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1736 return get_errno(socket(domain, type, protocol));
1737 }
1738
1739 /* do_bind() Must return target values and target errnos. */
1740 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1741 socklen_t addrlen)
1742 {
1743 void *addr;
1744 abi_long ret;
1745
1746 if ((int)addrlen < 0) {
1747 return -TARGET_EINVAL;
1748 }
1749
1750 addr = alloca(addrlen+1);
1751
1752 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1753 if (ret)
1754 return ret;
1755
1756 return get_errno(bind(sockfd, addr, addrlen));
1757 }
1758
1759 /* do_connect() Must return target values and target errnos. */
1760 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1761 socklen_t addrlen)
1762 {
1763 void *addr;
1764 abi_long ret;
1765
1766 if ((int)addrlen < 0) {
1767 return -TARGET_EINVAL;
1768 }
1769
1770 addr = alloca(addrlen);
1771
1772 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1773 if (ret)
1774 return ret;
1775
1776 return get_errno(connect(sockfd, addr, addrlen));
1777 }
1778
1779 /* do_sendrecvmsg() Must return target values and target errnos. */
1780 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1781 int flags, int send)
1782 {
1783 abi_long ret, len;
1784 struct target_msghdr *msgp;
1785 struct msghdr msg;
1786 int count;
1787 struct iovec *vec;
1788 abi_ulong target_vec;
1789
1790 /* FIXME */
1791 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1792 msgp,
1793 target_msg,
1794 send ? 1 : 0))
1795 return -TARGET_EFAULT;
1796 if (msgp->msg_name) {
1797 msg.msg_namelen = tswap32(msgp->msg_namelen);
1798 msg.msg_name = alloca(msg.msg_namelen);
1799 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1800 msg.msg_namelen);
1801 if (ret) {
1802 goto out2;
1803 }
1804 } else {
1805 msg.msg_name = NULL;
1806 msg.msg_namelen = 0;
1807 }
1808 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1809 msg.msg_control = alloca(msg.msg_controllen);
1810 msg.msg_flags = tswap32(msgp->msg_flags);
1811
1812 count = tswapal(msgp->msg_iovlen);
1813 target_vec = tswapal(msgp->msg_iov);
1814 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1815 target_vec, count, send);
1816 if (vec == NULL) {
1817 ret = -host_to_target_errno(errno);
1818 goto out2;
1819 }
1820 msg.msg_iovlen = count;
1821 msg.msg_iov = vec;
1822
1823 if (send) {
1824 ret = target_to_host_cmsg(&msg, msgp);
1825 if (ret == 0)
1826 ret = get_errno(sendmsg(fd, &msg, flags));
1827 } else {
1828 ret = get_errno(recvmsg(fd, &msg, flags));
1829 if (!is_error(ret)) {
1830 len = ret;
1831 ret = host_to_target_cmsg(msgp, &msg);
1832 if (!is_error(ret)) {
1833 msgp->msg_namelen = tswap32(msg.msg_namelen);
1834 if (msg.msg_name != NULL) {
1835 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1836 msg.msg_name, msg.msg_namelen);
1837 if (ret) {
1838 goto out;
1839 }
1840 }
1841
1842 ret = len;
1843 }
1844 }
1845 }
1846
1847 out:
1848 unlock_iovec(vec, target_vec, count, !send);
1849 out2:
1850 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1851 return ret;
1852 }
1853
1854 /* If we don't have a system accept4() then just call accept.
1855 * The callsites to do_accept4() will ensure that they don't
1856 * pass a non-zero flags argument in this config.
1857 */
1858 #ifndef CONFIG_ACCEPT4
1859 static inline int accept4(int sockfd, struct sockaddr *addr,
1860 socklen_t *addrlen, int flags)
1861 {
1862 assert(flags == 0);
1863 return accept(sockfd, addr, addrlen);
1864 }
1865 #endif
1866
1867 /* do_accept4() Must return target values and target errnos. */
1868 static abi_long do_accept4(int fd, abi_ulong target_addr,
1869 abi_ulong target_addrlen_addr, int flags)
1870 {
1871 socklen_t addrlen;
1872 void *addr;
1873 abi_long ret;
1874
1875 if (target_addr == 0) {
1876 return get_errno(accept4(fd, NULL, NULL, flags));
1877 }
1878
1879 /* linux returns EINVAL if addrlen pointer is invalid */
1880 if (get_user_u32(addrlen, target_addrlen_addr))
1881 return -TARGET_EINVAL;
1882
1883 if ((int)addrlen < 0) {
1884 return -TARGET_EINVAL;
1885 }
1886
1887 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1888 return -TARGET_EINVAL;
1889
1890 addr = alloca(addrlen);
1891
1892 ret = get_errno(accept4(fd, addr, &addrlen, flags));
1893 if (!is_error(ret)) {
1894 host_to_target_sockaddr(target_addr, addr, addrlen);
1895 if (put_user_u32(addrlen, target_addrlen_addr))
1896 ret = -TARGET_EFAULT;
1897 }
1898 return ret;
1899 }
1900
1901 /* do_getpeername() Must return target values and target errnos. */
1902 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1903 abi_ulong target_addrlen_addr)
1904 {
1905 socklen_t addrlen;
1906 void *addr;
1907 abi_long ret;
1908
1909 if (get_user_u32(addrlen, target_addrlen_addr))
1910 return -TARGET_EFAULT;
1911
1912 if ((int)addrlen < 0) {
1913 return -TARGET_EINVAL;
1914 }
1915
1916 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1917 return -TARGET_EFAULT;
1918
1919 addr = alloca(addrlen);
1920
1921 ret = get_errno(getpeername(fd, addr, &addrlen));
1922 if (!is_error(ret)) {
1923 host_to_target_sockaddr(target_addr, addr, addrlen);
1924 if (put_user_u32(addrlen, target_addrlen_addr))
1925 ret = -TARGET_EFAULT;
1926 }
1927 return ret;
1928 }
1929
1930 /* do_getsockname() Must return target values and target errnos. */
1931 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1932 abi_ulong target_addrlen_addr)
1933 {
1934 socklen_t addrlen;
1935 void *addr;
1936 abi_long ret;
1937
1938 if (get_user_u32(addrlen, target_addrlen_addr))
1939 return -TARGET_EFAULT;
1940
1941 if ((int)addrlen < 0) {
1942 return -TARGET_EINVAL;
1943 }
1944
1945 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1946 return -TARGET_EFAULT;
1947
1948 addr = alloca(addrlen);
1949
1950 ret = get_errno(getsockname(fd, addr, &addrlen));
1951 if (!is_error(ret)) {
1952 host_to_target_sockaddr(target_addr, addr, addrlen);
1953 if (put_user_u32(addrlen, target_addrlen_addr))
1954 ret = -TARGET_EFAULT;
1955 }
1956 return ret;
1957 }
1958
1959 /* do_socketpair() Must return target values and target errnos. */
1960 static abi_long do_socketpair(int domain, int type, int protocol,
1961 abi_ulong target_tab_addr)
1962 {
1963 int tab[2];
1964 abi_long ret;
1965
1966 target_to_host_sock_type(&type);
1967
1968 ret = get_errno(socketpair(domain, type, protocol, tab));
1969 if (!is_error(ret)) {
1970 if (put_user_s32(tab[0], target_tab_addr)
1971 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1972 ret = -TARGET_EFAULT;
1973 }
1974 return ret;
1975 }
1976
1977 /* do_sendto() Must return target values and target errnos. */
1978 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1979 abi_ulong target_addr, socklen_t addrlen)
1980 {
1981 void *addr;
1982 void *host_msg;
1983 abi_long ret;
1984
1985 if ((int)addrlen < 0) {
1986 return -TARGET_EINVAL;
1987 }
1988
1989 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1990 if (!host_msg)
1991 return -TARGET_EFAULT;
1992 if (target_addr) {
1993 addr = alloca(addrlen);
1994 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1995 if (ret) {
1996 unlock_user(host_msg, msg, 0);
1997 return ret;
1998 }
1999 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2000 } else {
2001 ret = get_errno(send(fd, host_msg, len, flags));
2002 }
2003 unlock_user(host_msg, msg, 0);
2004 return ret;
2005 }
2006
2007 /* do_recvfrom() Must return target values and target errnos. */
2008 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2009 abi_ulong target_addr,
2010 abi_ulong target_addrlen)
2011 {
2012 socklen_t addrlen;
2013 void *addr;
2014 void *host_msg;
2015 abi_long ret;
2016
2017 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2018 if (!host_msg)
2019 return -TARGET_EFAULT;
2020 if (target_addr) {
2021 if (get_user_u32(addrlen, target_addrlen)) {
2022 ret = -TARGET_EFAULT;
2023 goto fail;
2024 }
2025 if ((int)addrlen < 0) {
2026 ret = -TARGET_EINVAL;
2027 goto fail;
2028 }
2029 addr = alloca(addrlen);
2030 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2031 } else {
2032 addr = NULL; /* To keep compiler quiet. */
2033 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2034 }
2035 if (!is_error(ret)) {
2036 if (target_addr) {
2037 host_to_target_sockaddr(target_addr, addr, addrlen);
2038 if (put_user_u32(addrlen, target_addrlen)) {
2039 ret = -TARGET_EFAULT;
2040 goto fail;
2041 }
2042 }
2043 unlock_user(host_msg, msg, len);
2044 } else {
2045 fail:
2046 unlock_user(host_msg, msg, 0);
2047 }
2048 return ret;
2049 }
2050
2051 #ifdef TARGET_NR_socketcall
2052 /* do_socketcall() Must return target values and target errnos. */
2053 static abi_long do_socketcall(int num, abi_ulong vptr)
2054 {
2055 abi_long ret;
2056 const int n = sizeof(abi_ulong);
2057
2058 switch(num) {
2059 case SOCKOP_socket:
2060 {
2061 abi_ulong domain, type, protocol;
2062
2063 if (get_user_ual(domain, vptr)
2064 || get_user_ual(type, vptr + n)
2065 || get_user_ual(protocol, vptr + 2 * n))
2066 return -TARGET_EFAULT;
2067
2068 ret = do_socket(domain, type, protocol);
2069 }
2070 break;
2071 case SOCKOP_bind:
2072 {
2073 abi_ulong sockfd;
2074 abi_ulong target_addr;
2075 socklen_t addrlen;
2076
2077 if (get_user_ual(sockfd, vptr)
2078 || get_user_ual(target_addr, vptr + n)
2079 || get_user_ual(addrlen, vptr + 2 * n))
2080 return -TARGET_EFAULT;
2081
2082 ret = do_bind(sockfd, target_addr, addrlen);
2083 }
2084 break;
2085 case SOCKOP_connect:
2086 {
2087 abi_ulong sockfd;
2088 abi_ulong target_addr;
2089 socklen_t addrlen;
2090
2091 if (get_user_ual(sockfd, vptr)
2092 || get_user_ual(target_addr, vptr + n)
2093 || get_user_ual(addrlen, vptr + 2 * n))
2094 return -TARGET_EFAULT;
2095
2096 ret = do_connect(sockfd, target_addr, addrlen);
2097 }
2098 break;
2099 case SOCKOP_listen:
2100 {
2101 abi_ulong sockfd, backlog;
2102
2103 if (get_user_ual(sockfd, vptr)
2104 || get_user_ual(backlog, vptr + n))
2105 return -TARGET_EFAULT;
2106
2107 ret = get_errno(listen(sockfd, backlog));
2108 }
2109 break;
2110 case SOCKOP_accept:
2111 {
2112 abi_ulong sockfd;
2113 abi_ulong target_addr, target_addrlen;
2114
2115 if (get_user_ual(sockfd, vptr)
2116 || get_user_ual(target_addr, vptr + n)
2117 || get_user_ual(target_addrlen, vptr + 2 * n))
2118 return -TARGET_EFAULT;
2119
2120 ret = do_accept4(sockfd, target_addr, target_addrlen, 0);
2121 }
2122 break;
2123 case SOCKOP_getsockname:
2124 {
2125 abi_ulong sockfd;
2126 abi_ulong target_addr, target_addrlen;
2127
2128 if (get_user_ual(sockfd, vptr)
2129 || get_user_ual(target_addr, vptr + n)
2130 || get_user_ual(target_addrlen, vptr + 2 * n))
2131 return -TARGET_EFAULT;
2132
2133 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2134 }
2135 break;
2136 case SOCKOP_getpeername:
2137 {
2138 abi_ulong sockfd;
2139 abi_ulong target_addr, target_addrlen;
2140
2141 if (get_user_ual(sockfd, vptr)
2142 || get_user_ual(target_addr, vptr + n)
2143 || get_user_ual(target_addrlen, vptr + 2 * n))
2144 return -TARGET_EFAULT;
2145
2146 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2147 }
2148 break;
2149 case SOCKOP_socketpair:
2150 {
2151 abi_ulong domain, type, protocol;
2152 abi_ulong tab;
2153
2154 if (get_user_ual(domain, vptr)
2155 || get_user_ual(type, vptr + n)
2156 || get_user_ual(protocol, vptr + 2 * n)
2157 || get_user_ual(tab, vptr + 3 * n))
2158 return -TARGET_EFAULT;
2159
2160 ret = do_socketpair(domain, type, protocol, tab);
2161 }
2162 break;
2163 case SOCKOP_send:
2164 {
2165 abi_ulong sockfd;
2166 abi_ulong msg;
2167 size_t len;
2168 abi_ulong flags;
2169
2170 if (get_user_ual(sockfd, vptr)
2171 || get_user_ual(msg, vptr + n)
2172 || get_user_ual(len, vptr + 2 * n)
2173 || get_user_ual(flags, vptr + 3 * n))
2174 return -TARGET_EFAULT;
2175
2176 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2177 }
2178 break;
2179 case SOCKOP_recv:
2180 {
2181 abi_ulong sockfd;
2182 abi_ulong msg;
2183 size_t len;
2184 abi_ulong flags;
2185
2186 if (get_user_ual(sockfd, vptr)
2187 || get_user_ual(msg, vptr + n)
2188 || get_user_ual(len, vptr + 2 * n)
2189 || get_user_ual(flags, vptr + 3 * n))
2190 return -TARGET_EFAULT;
2191
2192 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2193 }
2194 break;
2195 case SOCKOP_sendto:
2196 {
2197 abi_ulong sockfd;
2198 abi_ulong msg;
2199 size_t len;
2200 abi_ulong flags;
2201 abi_ulong addr;
2202 socklen_t addrlen;
2203
2204 if (get_user_ual(sockfd, vptr)
2205 || get_user_ual(msg, vptr + n)
2206 || get_user_ual(len, vptr + 2 * n)
2207 || get_user_ual(flags, vptr + 3 * n)
2208 || get_user_ual(addr, vptr + 4 * n)
2209 || get_user_ual(addrlen, vptr + 5 * n))
2210 return -TARGET_EFAULT;
2211
2212 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2213 }
2214 break;
2215 case SOCKOP_recvfrom:
2216 {
2217 abi_ulong sockfd;
2218 abi_ulong msg;
2219 size_t len;
2220 abi_ulong flags;
2221 abi_ulong addr;
2222 socklen_t addrlen;
2223
2224 if (get_user_ual(sockfd, vptr)
2225 || get_user_ual(msg, vptr + n)
2226 || get_user_ual(len, vptr + 2 * n)
2227 || get_user_ual(flags, vptr + 3 * n)
2228 || get_user_ual(addr, vptr + 4 * n)
2229 || get_user_ual(addrlen, vptr + 5 * n))
2230 return -TARGET_EFAULT;
2231
2232 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2233 }
2234 break;
2235 case SOCKOP_shutdown:
2236 {
2237 abi_ulong sockfd, how;
2238
2239 if (get_user_ual(sockfd, vptr)
2240 || get_user_ual(how, vptr + n))
2241 return -TARGET_EFAULT;
2242
2243 ret = get_errno(shutdown(sockfd, how));
2244 }
2245 break;
2246 case SOCKOP_sendmsg:
2247 case SOCKOP_recvmsg:
2248 {
2249 abi_ulong fd;
2250 abi_ulong target_msg;
2251 abi_ulong flags;
2252
2253 if (get_user_ual(fd, vptr)
2254 || get_user_ual(target_msg, vptr + n)
2255 || get_user_ual(flags, vptr + 2 * n))
2256 return -TARGET_EFAULT;
2257
2258 ret = do_sendrecvmsg(fd, target_msg, flags,
2259 (num == SOCKOP_sendmsg));
2260 }
2261 break;
2262 case SOCKOP_setsockopt:
2263 {
2264 abi_ulong sockfd;
2265 abi_ulong level;
2266 abi_ulong optname;
2267 abi_ulong optval;
2268 socklen_t optlen;
2269
2270 if (get_user_ual(sockfd, vptr)
2271 || get_user_ual(level, vptr + n)
2272 || get_user_ual(optname, vptr + 2 * n)
2273 || get_user_ual(optval, vptr + 3 * n)
2274 || get_user_ual(optlen, vptr + 4 * n))
2275 return -TARGET_EFAULT;
2276
2277 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2278 }
2279 break;
2280 case SOCKOP_getsockopt:
2281 {
2282 abi_ulong sockfd;
2283 abi_ulong level;
2284 abi_ulong optname;
2285 abi_ulong optval;
2286 socklen_t optlen;
2287
2288 if (get_user_ual(sockfd, vptr)
2289 || get_user_ual(level, vptr + n)
2290 || get_user_ual(optname, vptr + 2 * n)
2291 || get_user_ual(optval, vptr + 3 * n)
2292 || get_user_ual(optlen, vptr + 4 * n))
2293 return -TARGET_EFAULT;
2294
2295 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2296 }
2297 break;
2298 default:
2299 gemu_log("Unsupported socketcall: %d\n", num);
2300 ret = -TARGET_ENOSYS;
2301 break;
2302 }
2303 return ret;
2304 }
2305 #endif
2306
2307 #define N_SHM_REGIONS 32
2308
2309 static struct shm_region {
2310 abi_ulong start;
2311 abi_ulong size;
2312 } shm_regions[N_SHM_REGIONS];
2313
2314 struct target_ipc_perm
2315 {
2316 abi_long __key;
2317 abi_ulong uid;
2318 abi_ulong gid;
2319 abi_ulong cuid;
2320 abi_ulong cgid;
2321 unsigned short int mode;
2322 unsigned short int __pad1;
2323 unsigned short int __seq;
2324 unsigned short int __pad2;
2325 abi_ulong __unused1;
2326 abi_ulong __unused2;
2327 };
2328
2329 struct target_semid_ds
2330 {
2331 struct target_ipc_perm sem_perm;
2332 abi_ulong sem_otime;
2333 abi_ulong __unused1;
2334 abi_ulong sem_ctime;
2335 abi_ulong __unused2;
2336 abi_ulong sem_nsems;
2337 abi_ulong __unused3;
2338 abi_ulong __unused4;
2339 };
2340
2341 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2342 abi_ulong target_addr)
2343 {
2344 struct target_ipc_perm *target_ip;
2345 struct target_semid_ds *target_sd;
2346
2347 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2348 return -TARGET_EFAULT;
2349 target_ip = &(target_sd->sem_perm);
2350 host_ip->__key = tswapal(target_ip->__key);
2351 host_ip->uid = tswapal(target_ip->uid);
2352 host_ip->gid = tswapal(target_ip->gid);
2353 host_ip->cuid = tswapal(target_ip->cuid);
2354 host_ip->cgid = tswapal(target_ip->cgid);
2355 host_ip->mode = tswap16(target_ip->mode);
2356 unlock_user_struct(target_sd, target_addr, 0);
2357 return 0;
2358 }
2359
2360 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2361 struct ipc_perm *host_ip)
2362 {
2363 struct target_ipc_perm *target_ip;
2364 struct target_semid_ds *target_sd;
2365
2366 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2367 return -TARGET_EFAULT;
2368 target_ip = &(target_sd->sem_perm);
2369 target_ip->__key = tswapal(host_ip->__key);
2370 target_ip->uid = tswapal(host_ip->uid);
2371 target_ip->gid = tswapal(host_ip->gid);
2372 target_ip->cuid = tswapal(host_ip->cuid);
2373 target_ip->cgid = tswapal(host_ip->cgid);
2374 target_ip->mode = tswap16(host_ip->mode);
2375 unlock_user_struct(target_sd, target_addr, 1);
2376 return 0;
2377 }
2378
2379 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2380 abi_ulong target_addr)
2381 {
2382 struct target_semid_ds *target_sd;
2383
2384 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2385 return -TARGET_EFAULT;
2386 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2387 return -TARGET_EFAULT;
2388 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2389 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2390 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2391 unlock_user_struct(target_sd, target_addr, 0);
2392 return 0;
2393 }
2394
2395 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2396 struct semid_ds *host_sd)
2397 {
2398 struct target_semid_ds *target_sd;
2399
2400 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2401 return -TARGET_EFAULT;
2402 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2403 return -TARGET_EFAULT;
2404 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2405 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2406 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2407 unlock_user_struct(target_sd, target_addr, 1);
2408 return 0;
2409 }
2410
2411 struct target_seminfo {
2412 int semmap;
2413 int semmni;
2414 int semmns;
2415 int semmnu;
2416 int semmsl;
2417 int semopm;
2418 int semume;
2419 int semusz;
2420 int semvmx;
2421 int semaem;
2422 };
2423
2424 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2425 struct seminfo *host_seminfo)
2426 {
2427 struct target_seminfo *target_seminfo;
2428 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2429 return -TARGET_EFAULT;
2430 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2431 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2432 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2433 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2434 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2435 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2436 __put_user(host_seminfo->semume, &target_seminfo->semume);
2437 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2438 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2439 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2440 unlock_user_struct(target_seminfo, target_addr, 1);
2441 return 0;
2442 }
2443
2444 union semun {
2445 int val;
2446 struct semid_ds *buf;
2447 unsigned short *array;
2448 struct seminfo *__buf;
2449 };
2450
2451 union target_semun {
2452 int val;
2453 abi_ulong buf;
2454 abi_ulong array;
2455 abi_ulong __buf;
2456 };
2457
2458 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2459 abi_ulong target_addr)
2460 {
2461 int nsems;
2462 unsigned short *array;
2463 union semun semun;
2464 struct semid_ds semid_ds;
2465 int i, ret;
2466
2467 semun.buf = &semid_ds;
2468
2469 ret = semctl(semid, 0, IPC_STAT, semun);
2470 if (ret == -1)
2471 return get_errno(ret);
2472
2473 nsems = semid_ds.sem_nsems;
2474
2475 *host_array = malloc(nsems*sizeof(unsigned short));
2476 array = lock_user(VERIFY_READ, target_addr,
2477 nsems*sizeof(unsigned short), 1);
2478 if (!array)
2479 return -TARGET_EFAULT;
2480
2481 for(i=0; i<nsems; i++) {
2482 __get_user((*host_array)[i], &array[i]);
2483 }
2484 unlock_user(array, target_addr, 0);
2485
2486 return 0;
2487 }
2488
2489 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2490 unsigned short **host_array)
2491 {
2492 int nsems;
2493 unsigned short *array;
2494 union semun semun;
2495 struct semid_ds semid_ds;
2496 int i, ret;
2497
2498 semun.buf = &semid_ds;
2499
2500 ret = semctl(semid, 0, IPC_STAT, semun);
2501 if (ret == -1)
2502 return get_errno(ret);
2503
2504 nsems = semid_ds.sem_nsems;
2505
2506 array = lock_user(VERIFY_WRITE, target_addr,
2507 nsems*sizeof(unsigned short), 0);
2508 if (!array)
2509 return -TARGET_EFAULT;
2510
2511 for(i=0; i<nsems; i++) {
2512 __put_user((*host_array)[i], &array[i]);
2513 }
2514 free(*host_array);
2515 unlock_user(array, target_addr, 1);
2516
2517 return 0;
2518 }
2519
2520 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2521 union target_semun target_su)
2522 {
2523 union semun arg;
2524 struct semid_ds dsarg;
2525 unsigned short *array = NULL;
2526 struct seminfo seminfo;
2527 abi_long ret = -TARGET_EINVAL;
2528 abi_long err;
2529 cmd &= 0xff;
2530
2531 switch( cmd ) {
2532 case GETVAL:
2533 case SETVAL:
2534 arg.val = tswap32(target_su.val);
2535 ret = get_errno(semctl(semid, semnum, cmd, arg));
2536 target_su.val = tswap32(arg.val);
2537 break;
2538 case GETALL:
2539 case SETALL:
2540 err = target_to_host_semarray(semid, &array, target_su.array);
2541 if (err)
2542 return err;
2543 arg.array = array;
2544 ret = get_errno(semctl(semid, semnum, cmd, arg));
2545 err = host_to_target_semarray(semid, target_su.array, &array);
2546 if (err)
2547 return err;
2548 break;
2549 case IPC_STAT:
2550 case IPC_SET:
2551 case SEM_STAT:
2552 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2553 if (err)
2554 return err;
2555 arg.buf = &dsarg;
2556 ret = get_errno(semctl(semid, semnum, cmd, arg));
2557 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2558 if (err)
2559 return err;
2560 break;
2561 case IPC_INFO:
2562 case SEM_INFO:
2563 arg.__buf = &seminfo;
2564 ret = get_errno(semctl(semid, semnum, cmd, arg));
2565 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2566 if (err)
2567 return err;
2568 break;
2569 case IPC_RMID:
2570 case GETPID:
2571 case GETNCNT:
2572 case GETZCNT:
2573 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2574 break;
2575 }
2576
2577 return ret;
2578 }
2579
2580 struct target_sembuf {
2581 unsigned short sem_num;
2582 short sem_op;
2583 short sem_flg;
2584 };
2585
2586 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2587 abi_ulong target_addr,
2588 unsigned nsops)
2589 {
2590 struct target_sembuf *target_sembuf;
2591 int i;
2592
2593 target_sembuf = lock_user(VERIFY_READ, target_addr,
2594 nsops*sizeof(struct target_sembuf), 1);
2595 if (!target_sembuf)
2596 return -TARGET_EFAULT;
2597
2598 for(i=0; i<nsops; i++) {
2599 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2600 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2601 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2602 }
2603
2604 unlock_user(target_sembuf, target_addr, 0);
2605
2606 return 0;
2607 }
2608
2609 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2610 {
2611 struct sembuf sops[nsops];
2612
2613 if (target_to_host_sembuf(sops, ptr, nsops))
2614 return -TARGET_EFAULT;
2615
2616 return get_errno(semop(semid, sops, nsops));
2617 }
2618
2619 struct target_msqid_ds
2620 {
2621 struct target_ipc_perm msg_perm;
2622 abi_ulong msg_stime;
2623 #if TARGET_ABI_BITS == 32
2624 abi_ulong __unused1;
2625 #endif
2626 abi_ulong msg_rtime;
2627 #if TARGET_ABI_BITS == 32
2628 abi_ulong __unused2;
2629 #endif
2630 abi_ulong msg_ctime;
2631 #if TARGET_ABI_BITS == 32
2632 abi_ulong __unused3;
2633 #endif
2634 abi_ulong __msg_cbytes;
2635 abi_ulong msg_qnum;
2636 abi_ulong msg_qbytes;
2637 abi_ulong msg_lspid;
2638 abi_ulong msg_lrpid;
2639 abi_ulong __unused4;
2640 abi_ulong __unused5;
2641 };
2642
2643 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2644 abi_ulong target_addr)
2645 {
2646 struct target_msqid_ds *target_md;
2647
2648 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2649 return -TARGET_EFAULT;
2650 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2651 return -TARGET_EFAULT;
2652 host_md->msg_stime = tswapal(target_md->msg_stime);
2653 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2654 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2655 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2656 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2657 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2658 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2659 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2660 unlock_user_struct(target_md, target_addr, 0);
2661 return 0;
2662 }
2663
2664 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2665 struct msqid_ds *host_md)
2666 {
2667 struct target_msqid_ds *target_md;
2668
2669 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2670 return -TARGET_EFAULT;
2671 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2672 return -TARGET_EFAULT;
2673 target_md->msg_stime = tswapal(host_md->msg_stime);
2674 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2675 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2676 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2677 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2678 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2679 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2680 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2681 unlock_user_struct(target_md, target_addr, 1);
2682 return 0;
2683 }
2684
2685 struct target_msginfo {
2686 int msgpool;
2687 int msgmap;
2688 int msgmax;
2689 int msgmnb;
2690 int msgmni;
2691 int msgssz;
2692 int msgtql;
2693 unsigned short int msgseg;
2694 };
2695
2696 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2697 struct msginfo *host_msginfo)
2698 {
2699 struct target_msginfo *target_msginfo;
2700 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2701 return -TARGET_EFAULT;
2702 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2703 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2704 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2705 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2706 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2707 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2708 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2709 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2710 unlock_user_struct(target_msginfo, target_addr, 1);
2711 return 0;
2712 }
2713
2714 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2715 {
2716 struct msqid_ds dsarg;
2717 struct msginfo msginfo;
2718 abi_long ret = -TARGET_EINVAL;
2719
2720 cmd &= 0xff;
2721
2722 switch (cmd) {
2723 case IPC_STAT:
2724 case IPC_SET:
2725 case MSG_STAT:
2726 if (target_to_host_msqid_ds(&dsarg,ptr))
2727 return -TARGET_EFAULT;
2728 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2729 if (host_to_target_msqid_ds(ptr,&dsarg))
2730 return -TARGET_EFAULT;
2731 break;
2732 case IPC_RMID:
2733 ret = get_errno(msgctl(msgid, cmd, NULL));
2734 break;
2735 case IPC_INFO:
2736 case MSG_INFO:
2737 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2738 if (host_to_target_msginfo(ptr, &msginfo))
2739 return -TARGET_EFAULT;
2740 break;
2741 }
2742
2743 return ret;
2744 }
2745
2746 struct target_msgbuf {
2747 abi_long mtype;
2748 char mtext[1];
2749 };
2750
2751 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2752 unsigned int msgsz, int msgflg)
2753 {
2754 struct target_msgbuf *target_mb;
2755 struct msgbuf *host_mb;
2756 abi_long ret = 0;
2757
2758 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2759 return -TARGET_EFAULT;
2760 host_mb = malloc(msgsz+sizeof(long));
2761 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2762 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2763 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2764 free(host_mb);
2765 unlock_user_struct(target_mb, msgp, 0);
2766
2767 return ret;
2768 }
2769
2770 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2771 unsigned int msgsz, abi_long msgtyp,
2772 int msgflg)
2773 {
2774 struct target_msgbuf *target_mb;
2775 char *target_mtext;
2776 struct msgbuf *host_mb;
2777 abi_long ret = 0;
2778
2779 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2780 return -TARGET_EFAULT;
2781
2782 host_mb = g_malloc(msgsz+sizeof(long));
2783 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2784
2785 if (ret > 0) {
2786 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2787 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2788 if (!target_mtext) {
2789 ret = -TARGET_EFAULT;
2790 goto end;
2791 }
2792 memcpy(target_mb->mtext, host_mb->mtext, ret);
2793 unlock_user(target_mtext, target_mtext_addr, ret);
2794 }
2795
2796 target_mb->mtype = tswapal(host_mb->mtype);
2797
2798 end:
2799 if (target_mb)
2800 unlock_user_struct(target_mb, msgp, 1);
2801 g_free(host_mb);
2802 return ret;
2803 }
2804
2805 struct target_shmid_ds
2806 {
2807 struct target_ipc_perm shm_perm;
2808 abi_ulong shm_segsz;
2809 abi_ulong shm_atime;
2810 #if TARGET_ABI_BITS == 32
2811 abi_ulong __unused1;
2812 #endif
2813 abi_ulong shm_dtime;
2814 #if TARGET_ABI_BITS == 32
2815 abi_ulong __unused2;
2816 #endif
2817 abi_ulong shm_ctime;
2818 #if TARGET_ABI_BITS == 32
2819 abi_ulong __unused3;
2820 #endif
2821 int shm_cpid;
2822 int shm_lpid;
2823 abi_ulong shm_nattch;
2824 unsigned long int __unused4;
2825 unsigned long int __unused5;
2826 };
2827
2828 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2829 abi_ulong target_addr)
2830 {
2831 struct target_shmid_ds *target_sd;
2832
2833 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2834 return -TARGET_EFAULT;
2835 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2836 return -TARGET_EFAULT;
2837 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2838 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2839 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2840 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2841 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2842 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2843 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2844 unlock_user_struct(target_sd, target_addr, 0);
2845 return 0;
2846 }
2847
2848 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2849 struct shmid_ds *host_sd)
2850 {
2851 struct target_shmid_ds *target_sd;
2852
2853 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2854 return -TARGET_EFAULT;
2855 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2856 return -TARGET_EFAULT;
2857 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2858 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2859 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2860 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2861 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2862 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2863 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2864 unlock_user_struct(target_sd, target_addr, 1);
2865 return 0;
2866 }
2867
2868 struct target_shminfo {
2869 abi_ulong shmmax;
2870 abi_ulong shmmin;
2871 abi_ulong shmmni;
2872 abi_ulong shmseg;
2873 abi_ulong shmall;
2874 };
2875
2876 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2877 struct shminfo *host_shminfo)
2878 {
2879 struct target_shminfo *target_shminfo;
2880 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2881 return -TARGET_EFAULT;
2882 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2883 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2884 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2885 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2886 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2887 unlock_user_struct(target_shminfo, target_addr, 1);
2888 return 0;
2889 }
2890
2891 struct target_shm_info {
2892 int used_ids;
2893 abi_ulong shm_tot;
2894 abi_ulong shm_rss;
2895 abi_ulong shm_swp;
2896 abi_ulong swap_attempts;
2897 abi_ulong swap_successes;
2898 };
2899
2900 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2901 struct shm_info *host_shm_info)
2902 {
2903 struct target_shm_info *target_shm_info;
2904 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2905 return -TARGET_EFAULT;
2906 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2907 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2908 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2909 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2910 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2911 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2912 unlock_user_struct(target_shm_info, target_addr, 1);
2913 return 0;
2914 }
2915
2916 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2917 {
2918 struct shmid_ds dsarg;
2919 struct shminfo shminfo;
2920 struct shm_info shm_info;
2921 abi_long ret = -TARGET_EINVAL;
2922
2923 cmd &= 0xff;
2924
2925 switch(cmd) {
2926 case IPC_STAT:
2927 case IPC_SET:
2928 case SHM_STAT:
2929 if (target_to_host_shmid_ds(&dsarg, buf))
2930 return -TARGET_EFAULT;
2931 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2932 if (host_to_target_shmid_ds(buf, &dsarg))
2933 return -TARGET_EFAULT;
2934 break;
2935 case IPC_INFO:
2936 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2937 if (host_to_target_shminfo(buf, &shminfo))
2938 return -TARGET_EFAULT;
2939 break;
2940 case SHM_INFO:
2941 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2942 if (host_to_target_shm_info(buf, &shm_info))
2943 return -TARGET_EFAULT;
2944 break;
2945 case IPC_RMID:
2946 case SHM_LOCK:
2947 case SHM_UNLOCK:
2948 ret = get_errno(shmctl(shmid, cmd, NULL));
2949 break;
2950 }
2951
2952 return ret;
2953 }
2954
2955 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2956 {
2957 abi_long raddr;
2958 void *host_raddr;
2959 struct shmid_ds shm_info;
2960 int i,ret;
2961
2962 /* find out the length of the shared memory segment */
2963 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2964 if (is_error(ret)) {
2965 /* can't get length, bail out */
2966 return ret;
2967 }
2968
2969 mmap_lock();
2970
2971 if (shmaddr)
2972 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2973 else {
2974 abi_ulong mmap_start;
2975
2976 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2977
2978 if (mmap_start == -1) {
2979 errno = ENOMEM;
2980 host_raddr = (void *)-1;
2981 } else
2982 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2983 }
2984
2985 if (host_raddr == (void *)-1) {
2986 mmap_unlock();
2987 return get_errno((long)host_raddr);
2988 }
2989 raddr=h2g((unsigned long)host_raddr);
2990
2991 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2992 PAGE_VALID | PAGE_READ |
2993 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2994
2995 for (i = 0; i < N_SHM_REGIONS; i++) {
2996 if (shm_regions[i].start == 0) {
2997 shm_regions[i].start = raddr;
2998 shm_regions[i].size = shm_info.shm_segsz;
2999 break;
3000 }
3001 }
3002
3003 mmap_unlock();
3004 return raddr;
3005
3006 }
3007
3008 static inline abi_long do_shmdt(abi_ulong shmaddr)
3009 {
3010 int i;
3011
3012 for (i = 0; i < N_SHM_REGIONS; ++i) {
3013 if (shm_regions[i].start == shmaddr) {
3014 shm_regions[i].start = 0;
3015 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3016 break;
3017 }
3018 }
3019
3020 return get_errno(shmdt(g2h(shmaddr)));
3021 }
3022
3023 #ifdef TARGET_NR_ipc
3024 /* ??? This only works with linear mappings. */
3025 /* do_ipc() must return target values and target errnos. */
3026 static abi_long do_ipc(unsigned int call, int first,
3027 int second, int third,
3028 abi_long ptr, abi_long fifth)
3029 {
3030 int version;
3031 abi_long ret = 0;
3032
3033 version = call >> 16;
3034 call &= 0xffff;
3035
3036 switch (call) {
3037 case IPCOP_semop:
3038 ret = do_semop(first, ptr, second);
3039 break;
3040
3041 case IPCOP_semget:
3042 ret = get_errno(semget(first, second, third));
3043 break;
3044
3045 case IPCOP_semctl:
3046 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3047 break;
3048
3049 case IPCOP_msgget:
3050 ret = get_errno(msgget(first, second));
3051 break;
3052
3053 case IPCOP_msgsnd:
3054 ret = do_msgsnd(first, ptr, second, third);
3055 break;
3056
3057 case IPCOP_msgctl:
3058 ret = do_msgctl(first, second, ptr);
3059 break;
3060
3061 case IPCOP_msgrcv:
3062 switch (version) {
3063 case 0:
3064 {
3065 struct target_ipc_kludge {
3066 abi_long msgp;
3067 abi_long msgtyp;
3068 } *tmp;
3069
3070 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3071 ret = -TARGET_EFAULT;
3072 break;
3073 }
3074
3075 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3076
3077 unlock_user_struct(tmp, ptr, 0);
3078 break;
3079 }
3080 default:
3081 ret = do_msgrcv(first, ptr, second, fifth, third);
3082 }
3083 break;
3084
3085 case IPCOP_shmat:
3086 switch (version) {
3087 default:
3088 {
3089 abi_ulong raddr;
3090 raddr = do_shmat(first, ptr, second);
3091 if (is_error(raddr))
3092 return get_errno(raddr);
3093 if (put_user_ual(raddr, third))
3094 return -TARGET_EFAULT;
3095 break;
3096 }
3097 case 1:
3098 ret = -TARGET_EINVAL;
3099 break;
3100 }
3101 break;
3102 case IPCOP_shmdt:
3103 ret = do_shmdt(ptr);
3104 break;
3105
3106 case IPCOP_shmget:
3107 /* IPC_* flag values are the same on all linux platforms */
3108 ret = get_errno(shmget(first, second, third));
3109 break;
3110
3111 /* IPC_* and SHM_* command values are the same on all linux platforms */
3112 case IPCOP_shmctl:
3113 ret = do_shmctl(first, second, third);
3114 break;
3115 default:
3116 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3117 ret = -TARGET_ENOSYS;
3118 break;
3119 }
3120 return ret;
3121 }
3122 #endif
3123
3124 /* kernel structure types definitions */
3125
3126 #define STRUCT(name, ...) STRUCT_ ## name,
3127 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3128 enum {
3129 #include "syscall_types.h"
3130 };
3131 #undef STRUCT
3132 #undef STRUCT_SPECIAL
3133
3134 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3135 #define STRUCT_SPECIAL(name)
3136 #include "syscall_types.h"
3137 #undef STRUCT
3138 #undef STRUCT_SPECIAL
3139
3140 typedef struct IOCTLEntry IOCTLEntry;
3141
3142 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3143 int fd, abi_long cmd, abi_long arg);
3144
3145 struct IOCTLEntry {
3146 unsigned int target_cmd;
3147 unsigned int host_cmd;
3148 const char *name;
3149 int access;
3150 do_ioctl_fn *do_ioctl;
3151 const argtype arg_type[5];
3152 };
3153
3154 #define IOC_R 0x0001
3155 #define IOC_W 0x0002
3156 #define IOC_RW (IOC_R | IOC_W)
3157
3158 #define MAX_STRUCT_SIZE 4096
3159
3160 #ifdef CONFIG_FIEMAP
3161 /* So fiemap access checks don't overflow on 32 bit systems.
3162 * This is very slightly smaller than the limit imposed by
3163 * the underlying kernel.
3164 */
3165 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3166 / sizeof(struct fiemap_extent))
3167
3168 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3169 int fd, abi_long cmd, abi_long arg)
3170 {
3171 /* The parameter for this ioctl is a struct fiemap followed
3172 * by an array of struct fiemap_extent whose size is set
3173 * in fiemap->fm_extent_count. The array is filled in by the
3174 * ioctl.
3175 */
3176 int target_size_in, target_size_out;
3177 struct fiemap *fm;
3178 const argtype *arg_type = ie->arg_type;
3179 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3180 void *argptr, *p;
3181 abi_long ret;
3182 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3183 uint32_t outbufsz;
3184 int free_fm = 0;
3185
3186 assert(arg_type[0] == TYPE_PTR);
3187 assert(ie->access == IOC_RW);
3188 arg_type++;
3189 target_size_in = thunk_type_size(arg_type, 0);
3190 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3191 if (!argptr) {
3192 return -TARGET_EFAULT;
3193 }
3194 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3195 unlock_user(argptr, arg, 0);
3196 fm = (struct fiemap *)buf_temp;
3197 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3198 return -TARGET_EINVAL;
3199 }
3200
3201 outbufsz = sizeof (*fm) +
3202 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3203
3204 if (outbufsz > MAX_STRUCT_SIZE) {
3205 /* We can't fit all the extents into the fixed size buffer.
3206 * Allocate one that is large enough and use it instead.
3207 */
3208 fm = malloc(outbufsz);
3209 if (!fm) {
3210 return -TARGET_ENOMEM;
3211 }
3212 memcpy(fm, buf_temp, sizeof(struct fiemap));
3213 free_fm = 1;
3214 }
3215 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3216 if (!is_error(ret)) {
3217 target_size_out = target_size_in;
3218 /* An extent_count of 0 means we were only counting the extents
3219 * so there are no structs to copy
3220 */
3221 if (fm->fm_extent_count != 0) {
3222 target_size_out += fm->fm_mapped_extents * extent_size;
3223 }
3224 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3225 if (!argptr) {
3226 ret = -TARGET_EFAULT;
3227 } else {
3228 /* Convert the struct fiemap */
3229 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3230 if (fm->fm_extent_count != 0) {
3231 p = argptr + target_size_in;
3232 /* ...and then all the struct fiemap_extents */
3233 for (i = 0; i < fm->fm_mapped_extents; i++) {
3234 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3235 THUNK_TARGET);
3236 p += extent_size;
3237 }
3238 }
3239 unlock_user(argptr, arg, target_size_out);
3240 }
3241 }
3242 if (free_fm) {
3243 free(fm);
3244 }
3245 return ret;
3246 }
3247 #endif
3248
3249 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3250 int fd, abi_long cmd, abi_long arg)
3251 {
3252 const argtype *arg_type = ie->arg_type;
3253 int target_size;
3254 void *argptr;
3255 int ret;
3256 struct ifconf *host_ifconf;
3257 uint32_t outbufsz;
3258 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3259 int target_ifreq_size;
3260 int nb_ifreq;
3261 int free_buf = 0;
3262 int i;
3263 int target_ifc_len;
3264 abi_long target_ifc_buf;
3265 int host_ifc_len;
3266 char *host_ifc_buf;
3267
3268 assert(arg_type[0] == TYPE_PTR);
3269 assert(ie->access == IOC_RW);
3270
3271 arg_type++;
3272 target_size = thunk_type_size(arg_type, 0);
3273
3274 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3275 if (!argptr)
3276 return -TARGET_EFAULT;
3277 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3278 unlock_user(argptr, arg, 0);
3279
3280 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3281 target_ifc_len = host_ifconf->ifc_len;
3282 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3283
3284 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3285 nb_ifreq = target_ifc_len / target_ifreq_size;
3286 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3287
3288 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3289 if (outbufsz > MAX_STRUCT_SIZE) {
3290 /* We can't fit all the extents into the fixed size buffer.
3291 * Allocate one that is large enough and use it instead.
3292 */
3293 host_ifconf = malloc(outbufsz);
3294 if (!host_ifconf) {
3295 return -TARGET_ENOMEM;
3296 }
3297 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3298 free_buf = 1;
3299 }
3300 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3301
3302 host_ifconf->ifc_len = host_ifc_len;
3303 host_ifconf->ifc_buf = host_ifc_buf;
3304
3305 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3306 if (!is_error(ret)) {
3307 /* convert host ifc_len to target ifc_len */
3308
3309 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3310 target_ifc_len = nb_ifreq * target_ifreq_size;
3311 host_ifconf->ifc_len = target_ifc_len;
3312
3313 /* restore target ifc_buf */
3314
3315 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3316
3317 /* copy struct ifconf to target user */
3318
3319 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3320 if (!argptr)
3321 return -TARGET_EFAULT;
3322 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3323 unlock_user(argptr, arg, target_size);
3324
3325 /* copy ifreq[] to target user */
3326
3327 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3328 for (i = 0; i < nb_ifreq ; i++) {
3329 thunk_convert(argptr + i * target_ifreq_size,
3330 host_ifc_buf + i * sizeof(struct ifreq),
3331 ifreq_arg_type, THUNK_TARGET);
3332 }
3333 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3334 }
3335
3336 if (free_buf) {
3337 free(host_ifconf);
3338 }
3339
3340 return ret;
3341 }
3342
3343 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3344 abi_long cmd, abi_long arg)
3345 {
3346 void *argptr;
3347 struct dm_ioctl *host_dm;
3348 abi_long guest_data;
3349 uint32_t guest_data_size;
3350 int target_size;
3351 const argtype *arg_type = ie->arg_type;
3352 abi_long ret;
3353 void *big_buf = NULL;
3354 char *host_data;
3355
3356 arg_type++;
3357 target_size = thunk_type_size(arg_type, 0);
3358 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3359 if (!argptr) {
3360 ret = -TARGET_EFAULT;
3361 goto out;
3362 }
3363 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3364 unlock_user(argptr, arg, 0);
3365
3366 /* buf_temp is too small, so fetch things into a bigger buffer */
3367 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3368 memcpy(big_buf, buf_temp, target_size);
3369 buf_temp = big_buf;
3370 host_dm = big_buf;
3371
3372 guest_data = arg + host_dm->data_start;
3373 if ((guest_data - arg) < 0) {
3374 ret = -EINVAL;
3375 goto out;
3376 }
3377 guest_data_size = host_dm->data_size - host_dm->data_start;
3378 host_data = (char*)host_dm + host_dm->data_start;
3379
3380 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3381 switch (ie->host_cmd) {
3382 case DM_REMOVE_ALL:
3383 case DM_LIST_DEVICES:
3384 case DM_DEV_CREATE:
3385 case DM_DEV_REMOVE:
3386 case DM_DEV_SUSPEND:
3387 case DM_DEV_STATUS:
3388 case DM_DEV_WAIT:
3389 case DM_TABLE_STATUS:
3390 case DM_TABLE_CLEAR:
3391 case DM_TABLE_DEPS:
3392 case DM_LIST_VERSIONS:
3393 /* no input data */
3394 break;
3395 case DM_DEV_RENAME:
3396 case DM_DEV_SET_GEOMETRY:
3397 /* data contains only strings */
3398 memcpy(host_data, argptr, guest_data_size);
3399 break;
3400 case DM_TARGET_MSG:
3401 memcpy(host_data, argptr, guest_data_size);
3402 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3403 break;
3404 case DM_TABLE_LOAD:
3405 {
3406 void *gspec = argptr;
3407 void *cur_data = host_data;
3408 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3409 int spec_size = thunk_type_size(arg_type, 0);
3410 int i;
3411
3412 for (i = 0; i < host_dm->target_count; i++) {
3413 struct dm_target_spec *spec = cur_data;
3414 uint32_t next;
3415 int slen;
3416
3417 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3418 slen = strlen((char*)gspec + spec_size) + 1;
3419 next = spec->next;
3420 spec->next = sizeof(*spec) + slen;
3421 strcpy((char*)&spec[1], gspec + spec_size);
3422 gspec += next;
3423 cur_data += spec->next;
3424 }
3425 break;
3426 }
3427 default:
3428 ret = -TARGET_EINVAL;
3429 goto out;
3430 }
3431 unlock_user(argptr, guest_data, 0);
3432
3433 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3434 if (!is_error(ret)) {
3435 guest_data = arg + host_dm->data_start;
3436 guest_data_size = host_dm->data_size - host_dm->data_start;
3437 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3438 switch (ie->host_cmd) {
3439 case DM_REMOVE_ALL:
3440 case DM_DEV_CREATE:
3441 case DM_DEV_REMOVE:
3442 case DM_DEV_RENAME:
3443 case DM_DEV_SUSPEND:
3444 case DM_DEV_STATUS:
3445 case DM_TABLE_LOAD:
3446 case DM_TABLE_CLEAR:
3447 case DM_TARGET_MSG:
3448 case DM_DEV_SET_GEOMETRY:
3449 /* no return data */
3450 break;
3451 case DM_LIST_DEVICES:
3452 {
3453 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3454 uint32_t remaining_data = guest_data_size;
3455 void *cur_data = argptr;
3456 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3457 int nl_size = 12; /* can't use thunk_size due to alignment */
3458
3459 while (1) {
3460 uint32_t next = nl->next;
3461 if (next) {
3462 nl->next = nl_size + (strlen(nl->name) + 1);
3463 }
3464 if (remaining_data < nl->next) {
3465 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3466 break;
3467 }
3468 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3469 strcpy(cur_data + nl_size, nl->name);
3470 cur_data += nl->next;
3471 remaining_data -= nl->next;
3472 if (!next) {
3473 break;
3474 }
3475 nl = (void*)nl + next;
3476 }
3477 break;
3478 }
3479 case DM_DEV_WAIT:
3480 case DM_TABLE_STATUS:
3481 {
3482 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3483 void *cur_data = argptr;
3484 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3485 int spec_size = thunk_type_size(arg_type, 0);
3486 int i;
3487
3488 for (i = 0; i < host_dm->target_count; i++) {
3489 uint32_t next = spec->next;
3490 int slen = strlen((char*)&spec[1]) + 1;
3491 spec->next = (cur_data - argptr) + spec_size + slen;
3492 if (guest_data_size < spec->next) {
3493 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3494 break;
3495 }
3496 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3497 strcpy(cur_data + spec_size, (char*)&spec[1]);
3498 cur_data = argptr + spec->next;
3499 spec = (void*)host_dm + host_dm->data_start + next;
3500 }
3501 break;
3502 }
3503 case DM_TABLE_DEPS:
3504 {
3505 void *hdata = (void*)host_dm + host_dm->data_start;
3506 int count = *(uint32_t*)hdata;
3507 uint64_t *hdev = hdata + 8;
3508 uint64_t *gdev = argptr + 8;
3509 int i;
3510
3511 *(uint32_t*)argptr = tswap32(count);
3512 for (i = 0; i < count; i++) {
3513 *gdev = tswap64(*hdev);
3514 gdev++;
3515 hdev++;
3516 }
3517 break;
3518 }
3519 case DM_LIST_VERSIONS:
3520 {
3521 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3522 uint32_t remaining_data = guest_data_size;
3523 void *cur_data = argptr;
3524 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3525 int vers_size = thunk_type_size(arg_type, 0);
3526
3527 while (1) {
3528 uint32_t next = vers->next;
3529 if (next) {
3530 vers->next = vers_size + (strlen(vers->name) + 1);
3531 }
3532 if (remaining_data < vers->next) {
3533 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3534 break;
3535 }
3536 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3537 strcpy(cur_data + vers_size, vers->name);
3538 cur_data += vers->next;
3539 remaining_data -= vers->next;
3540 if (!next) {
3541 break;
3542 }
3543 vers = (void*)vers + next;
3544 }
3545 break;
3546 }
3547 default:
3548 ret = -TARGET_EINVAL;
3549 goto out;
3550 }
3551 unlock_user(argptr, guest_data, guest_data_size);
3552
3553 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3554 if (!argptr) {
3555 ret = -TARGET_EFAULT;
3556 goto out;
3557 }
3558 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3559 unlock_user(argptr, arg, target_size);
3560 }
3561 out:
3562 g_free(big_buf);
3563 return ret;
3564 }
3565
3566 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3567 int fd, abi_long cmd, abi_long arg)
3568 {
3569 const argtype *arg_type = ie->arg_type;
3570 const StructEntry *se;
3571 const argtype *field_types;
3572 const int *dst_offsets, *src_offsets;
3573 int target_size;
3574 void *argptr;
3575 abi_ulong *target_rt_dev_ptr;
3576 unsigned long *host_rt_dev_ptr;
3577 abi_long ret;
3578 int i;
3579
3580 assert(ie->access == IOC_W);
3581 assert(*arg_type == TYPE_PTR);
3582 arg_type++;
3583 assert(*arg_type == TYPE_STRUCT);
3584 target_size = thunk_type_size(arg_type, 0);
3585 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3586 if (!argptr) {
3587 return -TARGET_EFAULT;
3588 }
3589 arg_type++;
3590 assert(*arg_type == (int)STRUCT_rtentry);
3591 se = struct_entries + *arg_type++;
3592 assert(se->convert[0] == NULL);
3593 /* convert struct here to be able to catch rt_dev string */
3594 field_types = se->field_types;
3595 dst_offsets = se->field_offsets[THUNK_HOST];
3596 src_offsets = se->field_offsets[THUNK_TARGET];
3597 for (i = 0; i < se->nb_fields; i++) {
3598 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3599 assert(*field_types == TYPE_PTRVOID);
3600 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3601 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3602 if (*target_rt_dev_ptr != 0) {
3603 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3604 tswapal(*target_rt_dev_ptr));
3605 if (!*host_rt_dev_ptr) {
3606 unlock_user(argptr, arg, 0);
3607 return -TARGET_EFAULT;
3608 }
3609 } else {
3610 *host_rt_dev_ptr = 0;
3611 }
3612 field_types++;
3613 continue;
3614 }
3615 field_types = thunk_convert(buf_temp + dst_offsets[i],
3616 argptr + src_offsets[i],
3617 field_types, THUNK_HOST);
3618 }
3619 unlock_user(argptr, arg, 0);
3620
3621 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3622 if (*host_rt_dev_ptr != 0) {
3623 unlock_user((void *)*host_rt_dev_ptr,
3624 *target_rt_dev_ptr, 0);
3625 }
3626 return ret;
3627 }
3628
3629 static IOCTLEntry ioctl_entries[] = {
3630 #define IOCTL(cmd, access, ...) \
3631 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3632 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3633 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3634 #include "ioctls.h"
3635 { 0, 0, },
3636 };
3637
3638 /* ??? Implement proper locking for ioctls. */
3639 /* do_ioctl() Must return target values and target errnos. */
3640 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3641 {
3642 const IOCTLEntry *ie;
3643 const argtype *arg_type;
3644 abi_long ret;
3645 uint8_t buf_temp[MAX_STRUCT_SIZE];
3646 int target_size;
3647 void *argptr;
3648
3649 ie = ioctl_entries;
3650 for(;;) {
3651 if (ie->target_cmd == 0) {
3652 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3653 return -TARGET_ENOSYS;
3654 }
3655 if (ie->target_cmd == cmd)
3656 break;
3657 ie++;
3658 }
3659 arg_type = ie->arg_type;
3660 #if defined(DEBUG)
3661 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3662 #endif
3663 if (ie->do_ioctl) {
3664 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3665 }
3666
3667 switch(arg_type[0]) {
3668 case TYPE_NULL:
3669 /* no argument */
3670 ret = get_errno(ioctl(fd, ie->host_cmd));
3671 break;
3672 case TYPE_PTRVOID:
3673 case TYPE_INT:
3674 /* int argment */
3675 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3676 break;
3677 case TYPE_PTR:
3678 arg_type++;
3679 target_size = thunk_type_size(arg_type, 0);
3680 switch(ie->access) {
3681 case IOC_R:
3682 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3683 if (!is_error(ret)) {
3684 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3685 if (!argptr)
3686 return -TARGET_EFAULT;
3687 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3688 unlock_user(argptr, arg, target_size);
3689 }
3690 break;
3691 case IOC_W:
3692 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3693 if (!argptr)
3694 return -TARGET_EFAULT;
3695 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3696 unlock_user(argptr, arg, 0);
3697 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3698 break;
3699 default:
3700 case IOC_RW:
3701 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3702 if (!argptr)
3703 return -TARGET_EFAULT;
3704 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3705 unlock_user(argptr, arg, 0);
3706 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3707 if (!is_error(ret)) {
3708 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3709 if (!argptr)
3710 return -TARGET_EFAULT;
3711 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3712 unlock_user(argptr, arg, target_size);
3713 }
3714 break;
3715 }
3716 break;
3717 default:
3718 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3719 (long)cmd, arg_type[0]);
3720 ret = -TARGET_ENOSYS;
3721 break;
3722 }
3723 return ret;
3724 }
3725
3726 static const bitmask_transtbl iflag_tbl[] = {
3727 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3728 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3729 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3730 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3731 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3732 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3733 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3734 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3735 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3736 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3737 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3738 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3739 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3740 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3741 { 0, 0, 0, 0 }
3742 };
3743
3744 static const bitmask_transtbl oflag_tbl[] = {
3745 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3746 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3747 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3748 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3749 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3750 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3751 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3752 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3753 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3754 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3755 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3756 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3757 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3758 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3759 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3760 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3761 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3762 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3763 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3764 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3765 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3766 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3767 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3768 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3769 { 0, 0, 0, 0 }
3770 };
3771
3772 static const bitmask_transtbl cflag_tbl[] = {
3773 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3774 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3775 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3776 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3777 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3778 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3779 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3780 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3781 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3782 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3783 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3784 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3785 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3786 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3787 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3788 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3789 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3790 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3791 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3792 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3793 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3794 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3795 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3796 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3797 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3798 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3799 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3800 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3801 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3802 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3803 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3804 { 0, 0, 0, 0 }
3805 };
3806
3807 static const bitmask_transtbl lflag_tbl[] = {
3808 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3809 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3810 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3811 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3812 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3813 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3814 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3815 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3816 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3817 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3818 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3819 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3820 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3821 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3822 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3823 { 0, 0, 0, 0 }
3824 };
3825
3826 static void target_to_host_termios (void *dst, const void *src)
3827 {
3828 struct host_termios *host = dst;
3829 const struct target_termios *target = src;
3830
3831 host->c_iflag =
3832 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3833 host->c_oflag =
3834 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3835 host->c_cflag =
3836 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3837 host->c_lflag =
3838 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3839 host->c_line = target->c_line;
3840
3841 memset(host->c_cc, 0, sizeof(host->c_cc));
3842 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3843 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3844 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3845 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3846 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3847 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3848 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3849 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3850 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3851 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3852 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3853 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3854 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3855 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3856 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3857 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3858 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3859 }
3860
3861 static void host_to_target_termios (void *dst, const void *src)
3862 {
3863 struct target_termios *target = dst;
3864 const struct host_termios *host = src;
3865
3866 target->c_iflag =
3867 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3868 target->c_oflag =
3869 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3870 target->c_cflag =
3871 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3872 target->c_lflag =
3873 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3874 target->c_line = host->c_line;
3875
3876 memset(target->c_cc, 0, sizeof(target->c_cc));
3877 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3878 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3879 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3880 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3881 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3882 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3883 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3884 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3885 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3886 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3887 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3888 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3889 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3890 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3891 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3892 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3893 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3894 }
3895
3896 static const StructEntry struct_termios_def = {
3897 .convert = { host_to_target_termios, target_to_host_termios },
3898 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3899 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3900 };
3901
3902 static bitmask_transtbl mmap_flags_tbl[] = {
3903 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3904 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3905 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3906 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3907 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3908 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3909 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3910 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3911 { 0, 0, 0, 0 }
3912 };
3913
3914 #if defined(TARGET_I386)
3915
3916 /* NOTE: there is really one LDT for all the threads */
3917 static uint8_t *ldt_table;
3918
3919 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3920 {
3921 int size;
3922 void *p;
3923
3924 if (!ldt_table)
3925 return 0;
3926 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3927 if (size > bytecount)
3928 size = bytecount;
3929 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3930 if (!p)
3931 return -TARGET_EFAULT;
3932 /* ??? Should this by byteswapped? */
3933 memcpy(p, ldt_table, size);
3934 unlock_user(p, ptr, size);
3935 return size;
3936 }
3937
3938 /* XXX: add locking support */
3939 static abi_long write_ldt(CPUX86State *env,
3940 abi_ulong ptr, unsigned long bytecount, int oldmode)
3941 {
3942 struct target_modify_ldt_ldt_s ldt_info;
3943 struct target_modify_ldt_ldt_s *target_ldt_info;
3944 int seg_32bit, contents, read_exec_only, limit_in_pages;
3945 int seg_not_present, useable, lm;
3946 uint32_t *lp, entry_1, entry_2;
3947
3948 if (bytecount != sizeof(ldt_info))
3949 return -TARGET_EINVAL;
3950 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3951 return -TARGET_EFAULT;
3952 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3953 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3954 ldt_info.limit = tswap32(target_ldt_info->limit);
3955 ldt_info.flags = tswap32(target_ldt_info->flags);
3956 unlock_user_struct(target_ldt_info, ptr, 0);
3957
3958 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3959 return -TARGET_EINVAL;
3960 seg_32bit = ldt_info.flags & 1;
3961 contents = (ldt_info.flags >> 1) & 3;
3962 read_exec_only = (ldt_info.flags >> 3) & 1;
3963 limit_in_pages = (ldt_info.flags >> 4) & 1;
3964 seg_not_present = (ldt_info.flags >> 5) & 1;
3965 useable = (ldt_info.flags >> 6) & 1;
3966 #ifdef TARGET_ABI32
3967 lm = 0;
3968 #else
3969 lm = (ldt_info.flags >> 7) & 1;
3970 #endif
3971 if (contents == 3) {
3972 if (oldmode)
3973 return -TARGET_EINVAL;
3974 if (seg_not_present == 0)
3975 return -TARGET_EINVAL;
3976 }
3977 /* allocate the LDT */
3978 if (!ldt_table) {
3979 env->ldt.base = target_mmap(0,
3980 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3981 PROT_READ|PROT_WRITE,
3982 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3983 if (env->ldt.base == -1)
3984 return -TARGET_ENOMEM;
3985 memset(g2h(env->ldt.base), 0,
3986 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3987 env->ldt.limit = 0xffff;
3988 ldt_table = g2h(env->ldt.base);
3989 }
3990
3991 /* NOTE: same code as Linux kernel */
3992 /* Allow LDTs to be cleared by the user. */
3993 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3994 if (oldmode ||
3995 (contents == 0 &&
3996 read_exec_only == 1 &&
3997 seg_32bit == 0 &&
3998 limit_in_pages == 0 &&
3999 seg_not_present == 1 &&
4000 useable == 0 )) {
4001 entry_1 = 0;
4002 entry_2 = 0;
4003 goto install;
4004 }
4005 }
4006
4007 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4008 (ldt_info.limit & 0x0ffff);
4009 entry_2 = (ldt_info.base_addr & 0xff000000) |
4010 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4011 (ldt_info.limit & 0xf0000) |
4012 ((read_exec_only ^ 1) << 9) |
4013 (contents << 10) |
4014 ((seg_not_present ^ 1) << 15) |
4015 (seg_32bit << 22) |
4016 (limit_in_pages << 23) |
4017 (lm << 21) |
4018 0x7000;
4019 if (!oldmode)
4020 entry_2 |= (useable << 20);
4021
4022 /* Install the new entry ... */
4023 install:
4024 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4025 lp[0] = tswap32(entry_1);
4026 lp[1] = tswap32(entry_2);
4027 return 0;
4028 }
4029
4030 /* specific and weird i386 syscalls */
4031 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4032 unsigned long bytecount)
4033 {
4034 abi_long ret;
4035
4036 switch (func) {
4037 case 0:
4038 ret = read_ldt(ptr, bytecount);
4039 break;
4040 case 1:
4041 ret = write_ldt(env, ptr, bytecount, 1);
4042 break;
4043 case 0x11:
4044 ret = write_ldt(env, ptr, bytecount, 0);
4045 break;
4046 default:
4047 ret = -TARGET_ENOSYS;
4048 break;
4049 }
4050 return ret;
4051 }
4052
4053 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4054 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4055 {
4056 uint64_t *gdt_table = g2h(env->gdt.base);
4057 struct target_modify_ldt_ldt_s ldt_info;
4058 struct target_modify_ldt_ldt_s *target_ldt_info;
4059 int seg_32bit, contents, read_exec_only, limit_in_pages;
4060 int seg_not_present, useable, lm;
4061 uint32_t *lp, entry_1, entry_2;
4062 int i;
4063
4064 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4065 if (!target_ldt_info)
4066 return -TARGET_EFAULT;
4067 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4068 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4069 ldt_info.limit = tswap32(target_ldt_info->limit);
4070 ldt_info.flags = tswap32(target_ldt_info->flags);
4071 if (ldt_info.entry_number == -1) {
4072 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4073 if (gdt_table[i] == 0) {
4074 ldt_info.entry_number = i;
4075 target_ldt_info->entry_number = tswap32(i);
4076 break;
4077 }
4078 }
4079 }
4080 unlock_user_struct(target_ldt_info, ptr, 1);
4081
4082 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4083 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4084 return -TARGET_EINVAL;
4085 seg_32bit = ldt_info.flags & 1;
4086 contents = (ldt_info.flags >> 1) & 3;
4087 read_exec_only = (ldt_info.flags >> 3) & 1;
4088 limit_in_pages = (ldt_info.flags >> 4) & 1;
4089 seg_not_present = (ldt_info.flags >> 5) & 1;
4090 useable = (ldt_info.flags >> 6) & 1;
4091 #ifdef TARGET_ABI32
4092 lm = 0;
4093 #else
4094 lm = (ldt_info.flags >> 7) & 1;
4095 #endif
4096
4097 if (contents == 3) {
4098 if (seg_not_present == 0)
4099 return -TARGET_EINVAL;
4100 }
4101
4102 /* NOTE: same code as Linux kernel */
4103 /* Allow LDTs to be cleared by the user. */
4104 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4105 if ((contents == 0 &&
4106 read_exec_only == 1 &&
4107 seg_32bit == 0 &&
4108 limit_in_pages == 0 &&
4109 seg_not_present == 1 &&
4110 useable == 0 )) {
4111 entry_1 = 0;
4112 entry_2 = 0;
4113 goto install;
4114 }
4115 }
4116
4117 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4118 (ldt_info.limit & 0x0ffff);
4119 entry_2 = (ldt_info.base_addr & 0xff000000) |
4120 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4121 (ldt_info.limit & 0xf0000) |
4122 ((read_exec_only ^ 1) << 9) |
4123 (contents << 10) |
4124 ((seg_not_present ^ 1) << 15) |
4125 (seg_32bit << 22) |
4126 (limit_in_pages << 23) |
4127 (useable << 20) |
4128 (lm << 21) |
4129 0x7000;
4130
4131 /* Install the new entry ... */
4132 install:
4133 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4134 lp[0] = tswap32(entry_1);
4135 lp[1] = tswap32(entry_2);
4136 return 0;
4137 }
4138
4139 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4140 {
4141 struct target_modify_ldt_ldt_s *target_ldt_info;
4142 uint64_t *gdt_table = g2h(env->gdt.base);
4143 uint32_t base_addr, limit, flags;
4144 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4145 int seg_not_present, useable, lm;
4146 uint32_t *lp, entry_1, entry_2;
4147
4148 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4149 if (!target_ldt_info)
4150 return -TARGET_EFAULT;
4151 idx = tswap32(target_ldt_info->entry_number);
4152 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4153 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4154 unlock_user_struct(target_ldt_info, ptr, 1);
4155 return -TARGET_EINVAL;
4156 }
4157 lp = (uint32_t *)(gdt_table + idx);
4158 entry_1 = tswap32(lp[0]);
4159 entry_2 = tswap32(lp[1]);
4160
4161 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4162 contents = (entry_2 >> 10) & 3;
4163 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4164 seg_32bit = (entry_2 >> 22) & 1;
4165 limit_in_pages = (entry_2 >> 23) & 1;
4166 useable = (entry_2 >> 20) & 1;
4167 #ifdef TARGET_ABI32
4168 lm = 0;
4169 #else
4170 lm = (entry_2 >> 21) & 1;
4171 #endif
4172 flags = (seg_32bit << 0) | (contents << 1) |
4173 (read_exec_only << 3) | (limit_in_pages << 4) |
4174 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4175 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4176 base_addr = (entry_1 >> 16) |
4177 (entry_2 & 0xff000000) |
4178 ((entry_2 & 0xff) << 16);
4179 target_ldt_info->base_addr = tswapal(base_addr);
4180 target_ldt_info->limit = tswap32(limit);
4181 target_ldt_info->flags = tswap32(flags);
4182 unlock_user_struct(target_ldt_info, ptr, 1);
4183 return 0;
4184 }
4185 #endif /* TARGET_I386 && TARGET_ABI32 */
4186
4187 #ifndef TARGET_ABI32
4188 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4189 {
4190 abi_long ret = 0;
4191 abi_ulong val;
4192 int idx;
4193
4194 switch(code) {
4195 case TARGET_ARCH_SET_GS:
4196 case TARGET_ARCH_SET_FS:
4197 if (code == TARGET_ARCH_SET_GS)
4198 idx = R_GS;
4199 else
4200 idx = R_FS;
4201 cpu_x86_load_seg(env, idx, 0);
4202 env->segs[idx].base = addr;
4203 break;
4204 case TARGET_ARCH_GET_GS:
4205 case TARGET_ARCH_GET_FS:
4206 if (code == TARGET_ARCH_GET_GS)
4207 idx = R_GS;
4208 else
4209 idx = R_FS;
4210 val = env->segs[idx].base;
4211 if (put_user(val, addr, abi_ulong))
4212 ret = -TARGET_EFAULT;
4213 break;
4214 default:
4215 ret = -TARGET_EINVAL;
4216 break;
4217 }
4218 return ret;
4219 }
4220 #endif
4221
4222 #endif /* defined(TARGET_I386) */
4223
4224 #define NEW_STACK_SIZE 0x40000
4225
4226
4227 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4228 typedef struct {
4229 CPUArchState *env;
4230 pthread_mutex_t mutex;
4231 pthread_cond_t cond;
4232 pthread_t thread;
4233 uint32_t tid;
4234 abi_ulong child_tidptr;
4235 abi_ulong parent_tidptr;
4236 sigset_t sigmask;
4237 } new_thread_info;
4238
4239 static void *clone_func(void *arg)
4240 {
4241 new_thread_info *info = arg;
4242 CPUArchState *env;
4243 CPUState *cpu;
4244 TaskState *ts;
4245
4246 env = info->env;
4247 cpu = ENV_GET_CPU(env);
4248 thread_cpu = cpu;
4249 ts = (TaskState *)env->opaque;
4250 info->tid = gettid();
4251 cpu->host_tid = info->tid;
4252 task_settid(ts);
4253 if (info->child_tidptr)
4254 put_user_u32(info->tid, info->child_tidptr);
4255 if (info->parent_tidptr)
4256 put_user_u32(info->tid, info->parent_tidptr);
4257 /* Enable signals. */
4258 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4259 /* Signal to the parent that we're ready. */
4260 pthread_mutex_lock(&info->mutex);
4261 pthread_cond_broadcast(&info->cond);
4262 pthread_mutex_unlock(&info->mutex);
4263 /* Wait until the parent has finshed initializing the tls state. */
4264 pthread_mutex_lock(&clone_lock);
4265 pthread_mutex_unlock(&clone_lock);
4266 cpu_loop(env);
4267 /* never exits */
4268 return NULL;
4269 }
4270
4271 /* do_fork() Must return host values and target errnos (unlike most
4272 do_*() functions). */
4273 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4274 abi_ulong parent_tidptr, target_ulong newtls,
4275 abi_ulong child_tidptr)
4276 {
4277 int ret;
4278 TaskState *ts;
4279 CPUArchState *new_env;
4280 unsigned int nptl_flags;
4281 sigset_t sigmask;
4282
4283 /* Emulate vfork() with fork() */
4284 if (flags & CLONE_VFORK)
4285 flags &= ~(CLONE_VFORK | CLONE_VM);
4286
4287 if (flags & CLONE_VM) {
4288 TaskState *parent_ts = (TaskState *)env->opaque;
4289 new_thread_info info;
4290 pthread_attr_t attr;
4291
4292 ts = g_malloc0(sizeof(TaskState));
4293 init_task_state(ts);
4294 /* we create a new CPU instance. */
4295 new_env = cpu_copy(env);
4296 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4297 cpu_reset(ENV_GET_CPU(new_env));
4298 #endif
4299 /* Init regs that differ from the parent. */
4300 cpu_clone_regs(new_env, newsp);
4301 new_env->opaque = ts;
4302 ts->bprm = parent_ts->bprm;
4303 ts->info = parent_ts->info;
4304 nptl_flags = flags;
4305 flags &= ~CLONE_NPTL_FLAGS2;
4306
4307 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4308 ts->child_tidptr = child_tidptr;
4309 }
4310
4311 if (nptl_flags & CLONE_SETTLS)
4312 cpu_set_tls (new_env, newtls);
4313
4314 /* Grab a mutex so that thread setup appears atomic. */
4315 pthread_mutex_lock(&clone_lock);
4316
4317 memset(&info, 0, sizeof(info));
4318 pthread_mutex_init(&info.mutex, NULL);
4319 pthread_mutex_lock(&info.mutex);
4320 pthread_cond_init(&info.cond, NULL);
4321 info.env = new_env;
4322 if (nptl_flags & CLONE_CHILD_SETTID)
4323 info.child_tidptr = child_tidptr;
4324 if (nptl_flags & CLONE_PARENT_SETTID)
4325 info.parent_tidptr = parent_tidptr;
4326
4327 ret = pthread_attr_init(&attr);
4328 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4329 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4330 /* It is not safe to deliver signals until the child has finished
4331 initializing, so temporarily block all signals. */
4332 sigfillset(&sigmask);
4333 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4334
4335 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4336 /* TODO: Free new CPU state if thread creation failed. */
4337
4338 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4339 pthread_attr_destroy(&attr);
4340 if (ret == 0) {
4341 /* Wait for the child to initialize. */
4342 pthread_cond_wait(&info.cond, &info.mutex);
4343 ret = info.tid;
4344 if (flags & CLONE_PARENT_SETTID)
4345 put_user_u32(ret, parent_tidptr);
4346 } else {
4347 ret = -1;
4348 }
4349 pthread_mutex_unlock(&info.mutex);
4350 pthread_cond_destroy(&info.cond);
4351 pthread_mutex_destroy(&info.mutex);
4352 pthread_mutex_unlock(&clone_lock);
4353 } else {
4354 /* if no CLONE_VM, we consider it is a fork */
4355 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4356 return -EINVAL;
4357 fork_start();
4358 ret = fork();
4359 if (ret == 0) {
4360 /* Child Process. */
4361 cpu_clone_regs(env, newsp);
4362 fork_end(1);
4363 /* There is a race condition here. The parent process could
4364 theoretically read the TID in the child process before the child
4365 tid is set. This would require using either ptrace
4366 (not implemented) or having *_tidptr to point at a shared memory
4367 mapping. We can't repeat the spinlock hack used above because
4368 the child process gets its own copy of the lock. */
4369 if (flags & CLONE_CHILD_SETTID)
4370 put_user_u32(gettid(), child_tidptr);
4371 if (flags & CLONE_PARENT_SETTID)
4372 put_user_u32(gettid(), parent_tidptr);
4373 ts = (TaskState *)env->opaque;
4374 if (flags & CLONE_SETTLS)
4375 cpu_set_tls (env, newtls);
4376 if (flags & CLONE_CHILD_CLEARTID)
4377 ts->child_tidptr = child_tidptr;
4378 } else {
4379 fork_end(0);
4380 }
4381 }
4382 return ret;
4383 }
4384
4385 /* warning : doesn't handle linux specific flags... */
4386 static int target_to_host_fcntl_cmd(int cmd)
4387 {
4388 switch(cmd) {
4389 case TARGET_F_DUPFD:
4390 case TARGET_F_GETFD:
4391 case TARGET_F_SETFD:
4392 case TARGET_F_GETFL:
4393 case TARGET_F_SETFL:
4394 return cmd;
4395 case TARGET_F_GETLK:
4396 return F_GETLK;
4397 case TARGET_F_SETLK:
4398 return F_SETLK;
4399 case TARGET_F_SETLKW:
4400 return F_SETLKW;
4401 case TARGET_F_GETOWN:
4402 return F_GETOWN;
4403 case TARGET_F_SETOWN:
4404 return F_SETOWN;
4405 case TARGET_F_GETSIG:
4406 return F_GETSIG;
4407 case TARGET_F_SETSIG:
4408 return F_SETSIG;
4409 #if TARGET_ABI_BITS == 32
4410 case TARGET_F_GETLK64:
4411 return F_GETLK64;
4412 case TARGET_F_SETLK64:
4413 return F_SETLK64;
4414 case TARGET_F_SETLKW64:
4415 return F_SETLKW64;
4416 #endif
4417 case TARGET_F_SETLEASE:
4418 return F_SETLEASE;
4419 case TARGET_F_GETLEASE:
4420 return F_GETLEASE;
4421 #ifdef F_DUPFD_CLOEXEC
4422 case TARGET_F_DUPFD_CLOEXEC:
4423 return F_DUPFD_CLOEXEC;
4424 #endif
4425 case TARGET_F_NOTIFY:
4426 return F_NOTIFY;
4427 default:
4428 return -TARGET_EINVAL;
4429 }
4430 return -TARGET_EINVAL;
4431 }
4432
4433 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4434 static const bitmask_transtbl flock_tbl[] = {
4435 TRANSTBL_CONVERT(F_RDLCK),
4436 TRANSTBL_CONVERT(F_WRLCK),
4437 TRANSTBL_CONVERT(F_UNLCK),
4438 TRANSTBL_CONVERT(F_EXLCK),
4439 TRANSTBL_CONVERT(F_SHLCK),
4440 { 0, 0, 0, 0 }
4441 };
4442
4443 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4444 {
4445 struct flock fl;
4446 struct target_flock *target_fl;
4447 struct flock64 fl64;
4448 struct target_flock64 *target_fl64;
4449 abi_long ret;
4450 int host_cmd = target_to_host_fcntl_cmd(cmd);
4451
4452 if (host_cmd == -TARGET_EINVAL)
4453 return host_cmd;
4454
4455 switch(cmd) {
4456 case TARGET_F_GETLK:
4457 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4458 return -TARGET_EFAULT;
4459 fl.l_type =
4460 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4461 fl.l_whence = tswap16(target_fl->l_whence);
4462 fl.l_start = tswapal(target_fl->l_start);
4463 fl.l_len = tswapal(target_fl->l_len);
4464 fl.l_pid = tswap32(target_fl->l_pid);
4465 unlock_user_struct(target_fl, arg, 0);
4466 ret = get_errno(fcntl(fd, host_cmd, &fl));
4467 if (ret == 0) {
4468 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4469 return -TARGET_EFAULT;
4470 target_fl->l_type =
4471 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4472 target_fl->l_whence = tswap16(fl.l_whence);
4473 target_fl->l_start = tswapal(fl.l_start);
4474 target_fl->l_len = tswapal(fl.l_len);
4475 target_fl->l_pid = tswap32(fl.l_pid);
4476 unlock_user_struct(target_fl, arg, 1);
4477 }
4478 break;
4479
4480 case TARGET_F_SETLK:
4481 case TARGET_F_SETLKW:
4482 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4483 return -TARGET_EFAULT;
4484 fl.l_type =
4485 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4486 fl.l_whence = tswap16(target_fl->l_whence);
4487 fl.l_start = tswapal(target_fl->l_start);
4488 fl.l_len = tswapal(target_fl->l_len);
4489 fl.l_pid = tswap32(target_fl->l_pid);
4490 unlock_user_struct(target_fl, arg, 0);
4491 ret = get_errno(fcntl(fd, host_cmd, &fl));
4492 break;
4493
4494 case TARGET_F_GETLK64:
4495 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4496 return -TARGET_EFAULT;
4497 fl64.l_type =
4498 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4499 fl64.l_whence = tswap16(target_fl64->l_whence);
4500 fl64.l_start = tswap64(target_fl64->l_start);
4501 fl64.l_len = tswap64(target_fl64->l_len);
4502 fl64.l_pid = tswap32(target_fl64->l_pid);
4503 unlock_user_struct(target_fl64, arg, 0);
4504 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4505 if (ret == 0) {
4506 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4507 return -TARGET_EFAULT;
4508 target_fl64->l_type =
4509 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4510 target_fl64->l_whence = tswap16(fl64.l_whence);
4511 target_fl64->l_start = tswap64(fl64.l_start);
4512 target_fl64->l_len = tswap64(fl64.l_len);
4513 target_fl64->l_pid = tswap32(fl64.l_pid);
4514 unlock_user_struct(target_fl64, arg, 1);
4515 }
4516 break;
4517 case TARGET_F_SETLK64:
4518 case TARGET_F_SETLKW64:
4519 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4520 return -TARGET_EFAULT;
4521 fl64.l_type =
4522 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4523 fl64.l_whence = tswap16(target_fl64->l_whence);
4524 fl64.l_start = tswap64(target_fl64->l_start);
4525 fl64.l_len = tswap64(target_fl64->l_len);
4526 fl64.l_pid = tswap32(target_fl64->l_pid);
4527 unlock_user_struct(target_fl64, arg, 0);
4528 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4529 break;
4530
4531 case TARGET_F_GETFL:
4532 ret = get_errno(fcntl(fd, host_cmd, arg));
4533 if (ret >= 0) {
4534 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4535 }
4536 break;
4537
4538 case TARGET_F_SETFL:
4539 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4540 break;
4541
4542 case TARGET_F_SETOWN:
4543 case TARGET_F_GETOWN:
4544 case TARGET_F_SETSIG:
4545 case TARGET_F_GETSIG:
4546 case TARGET_F_SETLEASE:
4547 case TARGET_F_GETLEASE:
4548 ret = get_errno(fcntl(fd, host_cmd, arg));
4549 break;
4550
4551 default:
4552 ret = get_errno(fcntl(fd, cmd, arg));
4553 break;
4554 }
4555 return ret;
4556 }
4557
4558 #ifdef USE_UID16
4559
4560 static inline int high2lowuid(int uid)
4561 {
4562 if (uid > 65535)
4563 return 65534;
4564 else
4565 return uid;
4566 }
4567
4568 static inline int high2lowgid(int gid)
4569 {
4570 if (gid > 65535)
4571 return 65534;
4572 else
4573 return gid;
4574 }
4575
4576 static inline int low2highuid(int uid)
4577 {
4578 if ((int16_t)uid == -1)
4579 return -1;
4580 else
4581 return uid;
4582 }
4583
4584 static inline int low2highgid(int gid)
4585 {
4586 if ((int16_t)gid == -1)
4587 return -1;
4588 else
4589 return gid;
4590 }
4591 static inline int tswapid(int id)
4592 {
4593 return tswap16(id);
4594 }
4595 #else /* !USE_UID16 */
4596 static inline int high2lowuid(int uid)
4597 {
4598 return uid;
4599 }
4600 static inline int high2lowgid(int gid)
4601 {
4602 return gid;
4603 }
4604 static inline int low2highuid(int uid)
4605 {
4606 return uid;
4607 }
4608 static inline int low2highgid(int gid)
4609 {
4610 return gid;
4611 }
4612 static inline int tswapid(int id)
4613 {
4614 return tswap32(id);
4615 }
4616 #endif /* USE_UID16 */
4617
4618 void syscall_init(void)
4619 {
4620 IOCTLEntry *ie;
4621 const argtype *arg_type;
4622 int size;
4623 int i;
4624
4625 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4626 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4627 #include "syscall_types.h"
4628 #undef STRUCT
4629 #undef STRUCT_SPECIAL
4630
4631 /* Build target_to_host_errno_table[] table from
4632 * host_to_target_errno_table[]. */
4633 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4634 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4635 }
4636
4637 /* we patch the ioctl size if necessary. We rely on the fact that
4638 no ioctl has all the bits at '1' in the size field */
4639 ie = ioctl_entries;
4640 while (ie->target_cmd != 0) {
4641 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4642 TARGET_IOC_SIZEMASK) {
4643 arg_type = ie->arg_type;
4644 if (arg_type[0] != TYPE_PTR) {
4645 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4646 ie->target_cmd);
4647 exit(1);
4648 }
4649 arg_type++;
4650 size = thunk_type_size(arg_type, 0);
4651 ie->target_cmd = (ie->target_cmd &
4652 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4653 (size << TARGET_IOC_SIZESHIFT);
4654 }
4655
4656 /* automatic consistency check if same arch */
4657 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4658 (defined(__x86_64__) && defined(TARGET_X86_64))
4659 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4660 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4661 ie->name, ie->target_cmd, ie->host_cmd);
4662 }
4663 #endif
4664 ie++;
4665 }
4666 }
4667
4668 #if TARGET_ABI_BITS == 32
4669 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4670 {
4671 #ifdef TARGET_WORDS_BIGENDIAN
4672 return ((uint64_t)word0 << 32) | word1;
4673 #else
4674 return ((uint64_t)word1 << 32) | word0;
4675 #endif
4676 }
4677 #else /* TARGET_ABI_BITS == 32 */
4678 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4679 {
4680 return word0;
4681 }
4682 #endif /* TARGET_ABI_BITS != 32 */
4683
4684 #ifdef TARGET_NR_truncate64
4685 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4686 abi_long arg2,
4687 abi_long arg3,
4688 abi_long arg4)
4689 {
4690 if (regpairs_aligned(cpu_env)) {
4691 arg2 = arg3;
4692 arg3 = arg4;
4693 }
4694 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4695 }
4696 #endif
4697
4698 #ifdef TARGET_NR_ftruncate64
4699 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4700 abi_long arg2,
4701 abi_long arg3,
4702 abi_long arg4)
4703 {
4704 if (regpairs_aligned(cpu_env)) {
4705 arg2 = arg3;
4706 arg3 = arg4;
4707 }
4708 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4709 }
4710 #endif
4711
4712 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4713 abi_ulong target_addr)
4714 {
4715 struct target_timespec *target_ts;
4716
4717 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4718 return -TARGET_EFAULT;
4719 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4720 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4721 unlock_user_struct(target_ts, target_addr, 0);
4722 return 0;
4723 }
4724
4725 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4726 struct timespec *host_ts)
4727 {
4728 struct target_timespec *target_ts;
4729
4730 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4731 return -TARGET_EFAULT;
4732 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4733 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4734 unlock_user_struct(target_ts, target_addr, 1);
4735 return 0;
4736 }
4737
4738 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4739 static inline abi_long host_to_target_stat64(void *cpu_env,
4740 abi_ulong target_addr,
4741 struct stat *host_st)
4742 {
4743 #ifdef TARGET_ARM
4744 if (((CPUARMState *)cpu_env)->eabi) {
4745 struct target_eabi_stat64 *target_st;
4746
4747 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4748 return -TARGET_EFAULT;
4749 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4750 __put_user(host_st->st_dev, &target_st->st_dev);
4751 __put_user(host_st->st_ino, &target_st->st_ino);
4752 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4753 __put_user(host_st->st_ino, &target_st->__st_ino);
4754 #endif
4755 __put_user(host_st->st_mode, &target_st->st_mode);
4756 __put_user(host_st->st_nlink, &target_st->st_nlink);
4757 __put_user(host_st->st_uid, &target_st->st_uid);
4758 __put_user(host_st->st_gid, &target_st->st_gid);
4759 __put_user(host_st->st_rdev, &target_st->st_rdev);
4760 __put_user(host_st->st_size, &target_st->st_size);
4761 __put_user(host_st->st_blksize, &target_st->st_blksize);
4762 __put_user(host_st->st_blocks, &target_st->st_blocks);
4763 __put_user(host_st->st_atime, &target_st->target_st_atime);
4764 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4765 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4766 unlock_user_struct(target_st, target_addr, 1);
4767 } else
4768 #endif
4769 {
4770 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4771 struct target_stat *target_st;
4772 #else
4773 struct target_stat64 *target_st;
4774 #endif
4775
4776 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4777 return -TARGET_EFAULT;
4778 memset(target_st, 0, sizeof(*target_st));
4779 __put_user(host_st->st_dev, &target_st->st_dev);
4780 __put_user(host_st->st_ino, &target_st->st_ino);
4781 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4782 __put_user(host_st->st_ino, &target_st->__st_ino);
4783 #endif
4784 __put_user(host_st->st_mode, &target_st->st_mode);
4785 __put_user(host_st->st_nlink, &target_st->st_nlink);
4786 __put_user(host_st->st_uid, &target_st->st_uid);
4787 __put_user(host_st->st_gid, &target_st->st_gid);
4788 __put_user(host_st->st_rdev, &target_st->st_rdev);
4789 /* XXX: better use of kernel struct */
4790 __put_user(host_st->st_size, &target_st->st_size);
4791 __put_user(host_st->st_blksize, &target_st->st_blksize);
4792 __put_user(host_st->st_blocks, &target_st->st_blocks);
4793 __put_user(host_st->st_atime, &target_st->target_st_atime);
4794 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4795 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4796 unlock_user_struct(target_st, target_addr, 1);
4797 }
4798
4799 return 0;
4800 }
4801 #endif
4802
4803 /* ??? Using host futex calls even when target atomic operations
4804 are not really atomic probably breaks things. However implementing
4805 futexes locally would make futexes shared between multiple processes
4806 tricky. However they're probably useless because guest atomic
4807 operations won't work either. */
4808 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4809 target_ulong uaddr2, int val3)
4810 {
4811 struct timespec ts, *pts;
4812 int base_op;
4813
4814 /* ??? We assume FUTEX_* constants are the same on both host
4815 and target. */
4816 #ifdef FUTEX_CMD_MASK
4817 base_op = op & FUTEX_CMD_MASK;
4818 #else
4819 base_op = op;
4820 #endif
4821 switch (base_op) {
4822 case FUTEX_WAIT:
4823 case FUTEX_WAIT_BITSET:
4824 if (timeout) {
4825 pts = &ts;
4826 target_to_host_timespec(pts, timeout);
4827 } else {
4828 pts = NULL;
4829 }
4830 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4831 pts, NULL, val3));
4832 case FUTEX_WAKE:
4833 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4834 case FUTEX_FD:
4835 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4836 case FUTEX_REQUEUE:
4837 case FUTEX_CMP_REQUEUE:
4838 case FUTEX_WAKE_OP:
4839 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4840 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4841 But the prototype takes a `struct timespec *'; insert casts
4842 to satisfy the compiler. We do not need to tswap TIMEOUT
4843 since it's not compared to guest memory. */
4844 pts = (struct timespec *)(uintptr_t) timeout;
4845 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4846 g2h(uaddr2),
4847 (base_op == FUTEX_CMP_REQUEUE
4848 ? tswap32(val3)
4849 : val3)));
4850 default:
4851 return -TARGET_ENOSYS;
4852 }
4853 }
4854
4855 /* Map host to target signal numbers for the wait family of syscalls.
4856 Assume all other status bits are the same. */
4857 int host_to_target_waitstatus(int status)
4858 {
4859 if (WIFSIGNALED(status)) {
4860 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4861 }
4862 if (WIFSTOPPED(status)) {
4863 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4864 | (status & 0xff);
4865 }
4866 return status;
4867 }
4868
4869 int get_osversion(void)
4870 {
4871 static int osversion;
4872 struct new_utsname buf;
4873 const char *s;
4874 int i, n, tmp;
4875 if (osversion)
4876 return osversion;
4877 if (qemu_uname_release && *qemu_uname_release) {
4878 s = qemu_uname_release;
4879 } else {
4880 if (sys_uname(&buf))
4881 return 0;
4882 s = buf.release;
4883 }
4884 tmp = 0;
4885 for (i = 0; i < 3; i++) {
4886 n = 0;
4887 while (*s >= '0' && *s <= '9') {
4888 n *= 10;
4889 n += *s - '0';
4890 s++;
4891 }
4892 tmp = (tmp << 8) + n;
4893 if (*s == '.')
4894 s++;
4895 }
4896 osversion = tmp;
4897 return osversion;
4898 }
4899
4900
4901 static int open_self_maps(void *cpu_env, int fd)
4902 {
4903 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4904 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4905 #endif
4906 FILE *fp;
4907 char *line = NULL;
4908 size_t len = 0;
4909 ssize_t read;
4910
4911 fp = fopen("/proc/self/maps", "r");
4912 if (fp == NULL) {
4913 return -EACCES;
4914 }
4915
4916 while ((read = getline(&line, &len, fp)) != -1) {
4917 int fields, dev_maj, dev_min, inode;
4918 uint64_t min, max, offset;
4919 char flag_r, flag_w, flag_x, flag_p;
4920 char path[512] = "";
4921 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
4922 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
4923 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
4924
4925 if ((fields < 10) || (fields > 11)) {
4926 continue;
4927 }
4928 if (!strncmp(path, "[stack]", 7)) {
4929 continue;
4930 }
4931 if (h2g_valid(min) && h2g_valid(max)) {
4932 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
4933 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
4934 h2g(min), h2g(max), flag_r, flag_w,
4935 flag_x, flag_p, offset, dev_maj, dev_min, inode,
4936 path[0] ? " " : "", path);
4937 }
4938 }
4939
4940 free(line);
4941 fclose(fp);
4942
4943 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4944 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4945 (unsigned long long)ts->info->stack_limit,
4946 (unsigned long long)(ts->info->start_stack +
4947 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
4948 (unsigned long long)0);
4949 #endif
4950
4951 return 0;
4952 }
4953
4954 static int open_self_stat(void *cpu_env, int fd)
4955 {
4956 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4957 abi_ulong start_stack = ts->info->start_stack;
4958 int i;
4959
4960 for (i = 0; i < 44; i++) {
4961 char buf[128];
4962 int len;
4963 uint64_t val = 0;
4964
4965 if (i == 0) {
4966 /* pid */
4967 val = getpid();
4968 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4969 } else if (i == 1) {
4970 /* app name */
4971 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
4972 } else if (i == 27) {
4973 /* stack bottom */
4974 val = start_stack;
4975 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4976 } else {
4977 /* for the rest, there is MasterCard */
4978 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
4979 }
4980
4981 len = strlen(buf);
4982 if (write(fd, buf, len) != len) {
4983 return -1;
4984 }
4985 }
4986
4987 return 0;
4988 }
4989
4990 static int open_self_auxv(void *cpu_env, int fd)
4991 {
4992 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4993 abi_ulong auxv = ts->info->saved_auxv;
4994 abi_ulong len = ts->info->auxv_len;
4995 char *ptr;
4996
4997 /*
4998 * Auxiliary vector is stored in target process stack.
4999 * read in whole auxv vector and copy it to file
5000 */
5001 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5002 if (ptr != NULL) {
5003 while (len > 0) {
5004 ssize_t r;
5005 r = write(fd, ptr, len);
5006 if (r <= 0) {
5007 break;
5008 }
5009 len -= r;
5010 ptr += r;
5011 }
5012 lseek(fd, 0, SEEK_SET);
5013 unlock_user(ptr, auxv, len);
5014 }
5015
5016 return 0;
5017 }
5018
5019 static int is_proc_myself(const char *filename, const char *entry)
5020 {
5021 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5022 filename += strlen("/proc/");
5023 if (!strncmp(filename, "self/", strlen("self/"))) {
5024 filename += strlen("self/");
5025 } else if (*filename >= '1' && *filename <= '9') {
5026 char myself[80];
5027 snprintf(myself, sizeof(myself), "%d/", getpid());
5028 if (!strncmp(filename, myself, strlen(myself))) {
5029 filename += strlen(myself);
5030 } else {
5031 return 0;
5032 }
5033 } else {
5034 return 0;
5035 }
5036 if (!strcmp(filename, entry)) {
5037 return 1;
5038 }
5039 }
5040 return 0;
5041 }
5042
5043 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5044 {
5045 struct fake_open {
5046 const char *filename;
5047 int (*fill)(void *cpu_env, int fd);
5048 };
5049 const struct fake_open *fake_open;
5050 static const struct fake_open fakes[] = {
5051 { "maps", open_self_maps },
5052 { "stat", open_self_stat },
5053 { "auxv", open_self_auxv },
5054 { NULL, NULL }
5055 };
5056
5057 for (fake_open = fakes; fake_open->filename; fake_open++) {
5058 if (is_proc_myself(pathname, fake_open->filename)) {
5059 break;
5060 }
5061 }
5062
5063 if (fake_open->filename) {
5064 const char *tmpdir;
5065 char filename[PATH_MAX];
5066 int fd, r;
5067
5068 /* create temporary file to map stat to */
5069 tmpdir = getenv("TMPDIR");
5070 if (!tmpdir)
5071 tmpdir = "/tmp";
5072 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5073 fd = mkstemp(filename);
5074 if (fd < 0) {
5075 return fd;
5076 }
5077 unlink(filename);
5078
5079 if ((r = fake_open->fill(cpu_env, fd))) {
5080 close(fd);
5081 return r;
5082 }
5083 lseek(fd, 0, SEEK_SET);
5084
5085 return fd;
5086 }
5087
5088 return get_errno(open(path(pathname), flags, mode));
5089 }
5090
5091 /* do_syscall() should always have a single exit point at the end so
5092 that actions, such as logging of syscall results, can be performed.
5093 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5094 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5095 abi_long arg2, abi_long arg3, abi_long arg4,
5096 abi_long arg5, abi_long arg6, abi_long arg7,
5097 abi_long arg8)
5098 {
5099 CPUState *cpu = ENV_GET_CPU(cpu_env);
5100 abi_long ret;
5101 struct stat st;
5102 struct statfs stfs;
5103 void *p;
5104
5105 #ifdef DEBUG
5106 gemu_log("syscall %d", num);
5107 #endif
5108 if(do_strace)
5109 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5110
5111 switch(num) {
5112 case TARGET_NR_exit:
5113 /* In old applications this may be used to implement _exit(2).
5114 However in threaded applictions it is used for thread termination,
5115 and _exit_group is used for application termination.
5116 Do thread termination if we have more then one thread. */
5117 /* FIXME: This probably breaks if a signal arrives. We should probably
5118 be disabling signals. */
5119 if (first_cpu->next_cpu) {
5120 TaskState *ts;
5121 CPUState **lastp;
5122 CPUState *p;
5123
5124 cpu_list_lock();
5125 lastp = &first_cpu;
5126 p = first_cpu;
5127 while (p && p != cpu) {
5128 lastp = &p->next_cpu;
5129 p = p->next_cpu;
5130 }
5131 /* If we didn't find the CPU for this thread then something is
5132 horribly wrong. */
5133 if (!p) {
5134 abort();
5135 }
5136 /* Remove the CPU from the list. */
5137 *lastp = p->next_cpu;
5138 cpu_list_unlock();
5139 ts = ((CPUArchState *)cpu_env)->opaque;
5140 if (ts->child_tidptr) {
5141 put_user_u32(0, ts->child_tidptr);
5142 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5143 NULL, NULL, 0);
5144 }
5145 thread_cpu = NULL;
5146 object_unref(OBJECT(ENV_GET_CPU(cpu_env)));
5147 g_free(ts);
5148 pthread_exit(NULL);
5149 }
5150 #ifdef TARGET_GPROF
5151 _mcleanup();
5152 #endif
5153 gdb_exit(cpu_env, arg1);
5154 _exit(arg1);
5155 ret = 0; /* avoid warning */
5156 break;
5157 case TARGET_NR_read:
5158 if (arg3 == 0)
5159 ret = 0;
5160 else {
5161 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5162 goto efault;
5163 ret = get_errno(read(arg1, p, arg3));
5164 unlock_user(p, arg2, ret);
5165 }
5166 break;
5167 case TARGET_NR_write:
5168 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5169 goto efault;
5170 ret = get_errno(write(arg1, p, arg3));
5171 unlock_user(p, arg2, 0);
5172 break;
5173 case TARGET_NR_open:
5174 if (!(p = lock_user_string(arg1)))
5175 goto efault;
5176 ret = get_errno(do_open(cpu_env, p,
5177 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5178 arg3));
5179 unlock_user(p, arg1, 0);
5180 break;
5181 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5182 case TARGET_NR_openat:
5183 if (!(p = lock_user_string(arg2)))
5184 goto efault;
5185 ret = get_errno(sys_openat(arg1,
5186 path(p),
5187 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5188 arg4));
5189 unlock_user(p, arg2, 0);
5190 break;
5191 #endif
5192 case TARGET_NR_close:
5193 ret = get_errno(close(arg1));
5194 break;
5195 case TARGET_NR_brk:
5196 ret = do_brk(arg1);
5197 break;
5198 case TARGET_NR_fork:
5199 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5200 break;
5201 #ifdef TARGET_NR_waitpid
5202 case TARGET_NR_waitpid:
5203 {
5204 int status;
5205 ret = get_errno(waitpid(arg1, &status, arg3));
5206 if (!is_error(ret) && arg2 && ret
5207 && put_user_s32(host_to_target_waitstatus(status), arg2))
5208 goto efault;
5209 }
5210 break;
5211 #endif
5212 #ifdef TARGET_NR_waitid
5213 case TARGET_NR_waitid:
5214 {
5215 siginfo_t info;
5216 info.si_pid = 0;
5217 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5218 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5219 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5220 goto efault;
5221 host_to_target_siginfo(p, &info);
5222 unlock_user(p, arg3, sizeof(target_siginfo_t));
5223 }
5224 }
5225 break;
5226 #endif
5227 #ifdef TARGET_NR_creat /* not on alpha */
5228 case TARGET_NR_creat:
5229 if (!(p = lock_user_string(arg1)))
5230 goto efault;
5231 ret = get_errno(creat(p, arg2));
5232 unlock_user(p, arg1, 0);
5233 break;
5234 #endif
5235 case TARGET_NR_link:
5236 {
5237 void * p2;
5238 p = lock_user_string(arg1);
5239 p2 = lock_user_string(arg2);
5240 if (!p || !p2)
5241 ret = -TARGET_EFAULT;
5242 else
5243 ret = get_errno(link(p, p2));
5244 unlock_user(p2, arg2, 0);
5245 unlock_user(p, arg1, 0);
5246 }
5247 break;
5248 #if defined(TARGET_NR_linkat)
5249 case TARGET_NR_linkat:
5250 {
5251 void * p2 = NULL;
5252 if (!arg2 || !arg4)
5253 goto efault;
5254 p = lock_user_string(arg2);
5255 p2 = lock_user_string(arg4);
5256 if (!p || !p2)
5257 ret = -TARGET_EFAULT;
5258 else
5259 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5260 unlock_user(p, arg2, 0);
5261 unlock_user(p2, arg4, 0);
5262 }
5263 break;
5264 #endif
5265 case TARGET_NR_unlink:
5266 if (!(p = lock_user_string(arg1)))
5267 goto efault;
5268 ret = get_errno(unlink(p));
5269 unlock_user(p, arg1, 0);
5270 break;
5271 #if defined(TARGET_NR_unlinkat)
5272 case TARGET_NR_unlinkat:
5273 if (!(p = lock_user_string(arg2)))
5274 goto efault;
5275 ret = get_errno(unlinkat(arg1, p, arg3));
5276 unlock_user(p, arg2, 0);
5277 break;
5278 #endif
5279 case TARGET_NR_execve:
5280 {
5281 char **argp, **envp;
5282 int argc, envc;
5283 abi_ulong gp;
5284 abi_ulong guest_argp;
5285 abi_ulong guest_envp;
5286 abi_ulong addr;
5287 char **q;
5288 int total_size = 0;
5289
5290 argc = 0;
5291 guest_argp = arg2;
5292 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5293 if (get_user_ual(addr, gp))
5294 goto efault;
5295 if (!addr)
5296 break;
5297 argc++;
5298 }
5299 envc = 0;
5300 guest_envp = arg3;
5301 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5302 if (get_user_ual(addr, gp))
5303 goto efault;
5304 if (!addr)
5305 break;
5306 envc++;
5307 }
5308
5309 argp = alloca((argc + 1) * sizeof(void *));
5310 envp = alloca((envc + 1) * sizeof(void *));
5311
5312 for (gp = guest_argp, q = argp; gp;
5313 gp += sizeof(abi_ulong), q++) {
5314 if (get_user_ual(addr, gp))
5315 goto execve_efault;
5316 if (!addr)
5317 break;
5318 if (!(*q = lock_user_string(addr)))
5319 goto execve_efault;
5320 total_size += strlen(*q) + 1;
5321 }
5322 *q = NULL;
5323
5324 for (gp = guest_envp, q = envp; gp;
5325 gp += sizeof(abi_ulong), q++) {
5326 if (get_user_ual(addr, gp))
5327 goto execve_efault;
5328 if (!addr)
5329 break;
5330 if (!(*q = lock_user_string(addr)))
5331 goto execve_efault;
5332 total_size += strlen(*q) + 1;
5333 }
5334 *q = NULL;
5335
5336 /* This case will not be caught by the host's execve() if its
5337 page size is bigger than the target's. */
5338 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5339 ret = -TARGET_E2BIG;
5340 goto execve_end;
5341 }
5342 if (!(p = lock_user_string(arg1)))
5343 goto execve_efault;
5344 ret = get_errno(execve(p, argp, envp));
5345 unlock_user(p, arg1, 0);
5346
5347 goto execve_end;
5348
5349 execve_efault:
5350 ret = -TARGET_EFAULT;
5351
5352 execve_end:
5353 for (gp = guest_argp, q = argp; *q;
5354 gp += sizeof(abi_ulong), q++) {
5355 if (get_user_ual(addr, gp)
5356 || !addr)
5357 break;
5358 unlock_user(*q, addr, 0);
5359 }
5360 for (gp = guest_envp, q = envp; *q;
5361 gp += sizeof(abi_ulong), q++) {
5362 if (get_user_ual(addr, gp)
5363 || !addr)
5364 break;
5365 unlock_user(*q, addr, 0);
5366 }
5367 }
5368 break;
5369 case TARGET_NR_chdir:
5370 if (!(p = lock_user_string(arg1)))
5371 goto efault;
5372 ret = get_errno(chdir(p));
5373 unlock_user(p, arg1, 0);
5374 break;
5375 #ifdef TARGET_NR_time
5376 case TARGET_NR_time:
5377 {
5378 time_t host_time;
5379 ret = get_errno(time(&host_time));
5380 if (!is_error(ret)
5381 && arg1
5382 && put_user_sal(host_time, arg1))
5383 goto efault;
5384 }
5385 break;
5386 #endif
5387 case TARGET_NR_mknod:
5388 if (!(p = lock_user_string(arg1)))
5389 goto efault;
5390 ret = get_errno(mknod(p, arg2, arg3));
5391 unlock_user(p, arg1, 0);
5392 break;
5393 #if defined(TARGET_NR_mknodat)
5394 case TARGET_NR_mknodat:
5395 if (!(p = lock_user_string(arg2)))
5396 goto efault;
5397 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5398 unlock_user(p, arg2, 0);
5399 break;
5400 #endif
5401 case TARGET_NR_chmod:
5402 if (!(p = lock_user_string(arg1)))
5403 goto efault;
5404 ret = get_errno(chmod(p, arg2));
5405 unlock_user(p, arg1, 0);
5406 break;
5407 #ifdef TARGET_NR_break
5408 case TARGET_NR_break:
5409 goto unimplemented;
5410 #endif
5411 #ifdef TARGET_NR_oldstat
5412 case TARGET_NR_oldstat:
5413 goto unimplemented;
5414 #endif
5415 case TARGET_NR_lseek:
5416 ret = get_errno(lseek(arg1, arg2, arg3));
5417 break;
5418 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5419 /* Alpha specific */
5420 case TARGET_NR_getxpid:
5421 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5422 ret = get_errno(getpid());
5423 break;
5424 #endif
5425 #ifdef TARGET_NR_getpid
5426 case TARGET_NR_getpid:
5427 ret = get_errno(getpid());
5428 break;
5429 #endif
5430 case TARGET_NR_mount:
5431 {
5432 /* need to look at the data field */
5433 void *p2, *p3;
5434 p = lock_user_string(arg1);
5435 p2 = lock_user_string(arg2);
5436 p3 = lock_user_string(arg3);
5437 if (!p || !p2 || !p3)
5438 ret = -TARGET_EFAULT;
5439 else {
5440 /* FIXME - arg5 should be locked, but it isn't clear how to
5441 * do that since it's not guaranteed to be a NULL-terminated
5442 * string.
5443 */
5444 if ( ! arg5 )
5445 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5446 else
5447 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5448 }
5449 unlock_user(p, arg1, 0);
5450 unlock_user(p2, arg2, 0);
5451 unlock_user(p3, arg3, 0);
5452 break;
5453 }
5454 #ifdef TARGET_NR_umount
5455 case TARGET_NR_umount:
5456 if (!(p = lock_user_string(arg1)))
5457 goto efault;
5458 ret = get_errno(umount(p));
5459 unlock_user(p, arg1, 0);
5460 break;
5461 #endif
5462 #ifdef TARGET_NR_stime /* not on alpha */
5463 case TARGET_NR_stime:
5464 {
5465 time_t host_time;
5466 if (get_user_sal(host_time, arg1))
5467 goto efault;
5468 ret = get_errno(stime(&host_time));
5469 }
5470 break;
5471 #endif
5472 case TARGET_NR_ptrace:
5473 goto unimplemented;
5474 #ifdef TARGET_NR_alarm /* not on alpha */
5475 case TARGET_NR_alarm:
5476 ret = alarm(arg1);
5477 break;
5478 #endif
5479 #ifdef TARGET_NR_oldfstat
5480 case TARGET_NR_oldfstat:
5481 goto unimplemented;
5482 #endif
5483 #ifdef TARGET_NR_pause /* not on alpha */
5484 case TARGET_NR_pause:
5485 ret = get_errno(pause());
5486 break;
5487 #endif
5488 #ifdef TARGET_NR_utime
5489 case TARGET_NR_utime:
5490 {
5491 struct utimbuf tbuf, *host_tbuf;
5492 struct target_utimbuf *target_tbuf;
5493 if (arg2) {
5494 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5495 goto efault;
5496 tbuf.actime = tswapal(target_tbuf->actime);
5497 tbuf.modtime = tswapal(target_tbuf->modtime);
5498 unlock_user_struct(target_tbuf, arg2, 0);
5499 host_tbuf = &tbuf;
5500 } else {
5501 host_tbuf = NULL;
5502 }
5503 if (!(p = lock_user_string(arg1)))
5504 goto efault;
5505 ret = get_errno(utime(p, host_tbuf));
5506 unlock_user(p, arg1, 0);
5507 }
5508 break;
5509 #endif
5510 case TARGET_NR_utimes:
5511 {
5512 struct timeval *tvp, tv[2];
5513 if (arg2) {
5514 if (copy_from_user_timeval(&tv[0], arg2)
5515 || copy_from_user_timeval(&tv[1],
5516 arg2 + sizeof(struct target_timeval)))
5517 goto efault;
5518 tvp = tv;
5519 } else {
5520 tvp = NULL;
5521 }
5522 if (!(p = lock_user_string(arg1)))
5523 goto efault;
5524 ret = get_errno(utimes(p, tvp));
5525 unlock_user(p, arg1, 0);
5526 }
5527 break;
5528 #if defined(TARGET_NR_futimesat)
5529 case TARGET_NR_futimesat:
5530 {
5531 struct timeval *tvp, tv[2];
5532 if (arg3) {
5533 if (copy_from_user_timeval(&tv[0], arg3)
5534 || copy_from_user_timeval(&tv[1],
5535 arg3 + sizeof(struct target_timeval)))
5536 goto efault;
5537 tvp = tv;
5538 } else {
5539 tvp = NULL;
5540 }
5541 if (!(p = lock_user_string(arg2)))
5542 goto efault;
5543 ret = get_errno(futimesat(arg1, path(p), tvp));
5544 unlock_user(p, arg2, 0);
5545 }
5546 break;
5547 #endif
5548 #ifdef TARGET_NR_stty
5549 case TARGET_NR_stty:
5550 goto unimplemented;
5551 #endif
5552 #ifdef TARGET_NR_gtty
5553 case TARGET_NR_gtty:
5554 goto unimplemented;
5555 #endif
5556 case TARGET_NR_access:
5557 if (!(p = lock_user_string(arg1)))
5558 goto efault;
5559 ret = get_errno(access(path(p), arg2));
5560 unlock_user(p, arg1, 0);
5561 break;
5562 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5563 case TARGET_NR_faccessat:
5564 if (!(p = lock_user_string(arg2)))
5565 goto efault;
5566 ret = get_errno(faccessat(arg1, p, arg3, 0));
5567 unlock_user(p, arg2, 0);
5568 break;
5569 #endif
5570 #ifdef TARGET_NR_nice /* not on alpha */
5571 case TARGET_NR_nice:
5572 ret = get_errno(nice(arg1));
5573 break;
5574 #endif
5575 #ifdef TARGET_NR_ftime
5576 case TARGET_NR_ftime:
5577 goto unimplemented;
5578 #endif
5579 case TARGET_NR_sync:
5580 sync();
5581 ret = 0;
5582 break;
5583 case TARGET_NR_kill:
5584 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5585 break;
5586 case TARGET_NR_rename:
5587 {
5588 void *p2;
5589 p = lock_user_string(arg1);
5590 p2 = lock_user_string(arg2);
5591 if (!p || !p2)
5592 ret = -TARGET_EFAULT;
5593 else
5594 ret = get_errno(rename(p, p2));
5595 unlock_user(p2, arg2, 0);
5596 unlock_user(p, arg1, 0);
5597 }
5598 break;
5599 #if defined(TARGET_NR_renameat)
5600 case TARGET_NR_renameat:
5601 {
5602 void *p2;
5603 p = lock_user_string(arg2);
5604 p2 = lock_user_string(arg4);
5605 if (!p || !p2)
5606 ret = -TARGET_EFAULT;
5607 else
5608 ret = get_errno(renameat(arg1, p, arg3, p2));
5609 unlock_user(p2, arg4, 0);
5610 unlock_user(p, arg2, 0);
5611 }
5612 break;
5613 #endif
5614 case TARGET_NR_mkdir:
5615 if (!(p = lock_user_string(arg1)))
5616 goto efault;
5617 ret = get_errno(mkdir(p, arg2));
5618 unlock_user(p, arg1, 0);
5619 break;
5620 #if defined(TARGET_NR_mkdirat)
5621 case TARGET_NR_mkdirat:
5622 if (!(p = lock_user_string(arg2)))
5623 goto efault;
5624 ret = get_errno(mkdirat(arg1, p, arg3));
5625 unlock_user(p, arg2, 0);
5626 break;
5627 #endif
5628 case TARGET_NR_rmdir:
5629 if (!(p = lock_user_string(arg1)))
5630 goto efault;
5631 ret = get_errno(rmdir(p));
5632 unlock_user(p, arg1, 0);
5633 break;
5634 case TARGET_NR_dup:
5635 ret = get_errno(dup(arg1));
5636 break;
5637 case TARGET_NR_pipe:
5638 ret = do_pipe(cpu_env, arg1, 0, 0);
5639 break;
5640 #ifdef TARGET_NR_pipe2
5641 case TARGET_NR_pipe2:
5642 ret = do_pipe(cpu_env, arg1,
5643 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5644 break;
5645 #endif
5646 case TARGET_NR_times:
5647 {
5648 struct target_tms *tmsp;
5649 struct tms tms;
5650 ret = get_errno(times(&tms));
5651 if (arg1) {
5652 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5653 if (!tmsp)
5654 goto efault;
5655 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5656 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5657 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5658 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5659 }
5660 if (!is_error(ret))
5661 ret = host_to_target_clock_t(ret);
5662 }
5663 break;
5664 #ifdef TARGET_NR_prof
5665 case TARGET_NR_prof:
5666 goto unimplemented;
5667 #endif
5668 #ifdef TARGET_NR_signal
5669 case TARGET_NR_signal:
5670 goto unimplemented;
5671 #endif
5672 case TARGET_NR_acct:
5673 if (arg1 == 0) {
5674 ret = get_errno(acct(NULL));
5675 } else {
5676 if (!(p = lock_user_string(arg1)))
5677 goto efault;
5678 ret = get_errno(acct(path(p)));
5679 unlock_user(p, arg1, 0);
5680 }
5681 break;
5682 #ifdef TARGET_NR_umount2 /* not on alpha */
5683 case TARGET_NR_umount2:
5684 if (!(p = lock_user_string(arg1)))
5685 goto efault;
5686 ret = get_errno(umount2(p, arg2));
5687 unlock_user(p, arg1, 0);
5688 break;
5689 #endif
5690 #ifdef TARGET_NR_lock
5691 case TARGET_NR_lock:
5692 goto unimplemented;
5693 #endif
5694 case TARGET_NR_ioctl:
5695 ret = do_ioctl(arg1, arg2, arg3);
5696 break;
5697 case TARGET_NR_fcntl:
5698 ret = do_fcntl(arg1, arg2, arg3);
5699 break;
5700 #ifdef TARGET_NR_mpx
5701 case TARGET_NR_mpx:
5702 goto unimplemented;
5703 #endif
5704 case TARGET_NR_setpgid:
5705 ret = get_errno(setpgid(arg1, arg2));
5706 break;
5707 #ifdef TARGET_NR_ulimit
5708 case TARGET_NR_ulimit:
5709 goto unimplemented;
5710 #endif
5711 #ifdef TARGET_NR_oldolduname
5712 case TARGET_NR_oldolduname:
5713 goto unimplemented;
5714 #endif
5715 case TARGET_NR_umask:
5716 ret = get_errno(umask(arg1));
5717 break;
5718 case TARGET_NR_chroot:
5719 if (!(p = lock_user_string(arg1)))
5720 goto efault;
5721 ret = get_errno(chroot(p));
5722 unlock_user(p, arg1, 0);
5723 break;
5724 case TARGET_NR_ustat:
5725 goto unimplemented;
5726 case TARGET_NR_dup2:
5727 ret = get_errno(dup2(arg1, arg2));
5728 break;
5729 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5730 case TARGET_NR_dup3:
5731 ret = get_errno(dup3(arg1, arg2, arg3));
5732 break;
5733 #endif
5734 #ifdef TARGET_NR_getppid /* not on alpha */
5735 case TARGET_NR_getppid:
5736 ret = get_errno(getppid());
5737 break;
5738 #endif
5739 case TARGET_NR_getpgrp:
5740 ret = get_errno(getpgrp());
5741 break;
5742 case TARGET_NR_setsid:
5743 ret = get_errno(setsid());
5744 break;
5745 #ifdef TARGET_NR_sigaction
5746 case TARGET_NR_sigaction:
5747 {
5748 #if defined(TARGET_ALPHA)
5749 struct target_sigaction act, oact, *pact = 0;
5750 struct target_old_sigaction *old_act;
5751 if (arg2) {
5752 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5753 goto efault;
5754 act._sa_handler = old_act->_sa_handler;
5755 target_siginitset(&act.sa_mask, old_act->sa_mask);
5756 act.sa_flags = old_act->sa_flags;
5757 act.sa_restorer = 0;
5758 unlock_user_struct(old_act, arg2, 0);
5759 pact = &act;
5760 }
5761 ret = get_errno(do_sigaction(arg1, pact, &oact));
5762 if (!is_error(ret) && arg3) {
5763 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5764 goto efault;
5765 old_act->_sa_handler = oact._sa_handler;
5766 old_act->sa_mask = oact.sa_mask.sig[0];
5767 old_act->sa_flags = oact.sa_flags;
5768 unlock_user_struct(old_act, arg3, 1);
5769 }
5770 #elif defined(TARGET_MIPS)
5771 struct target_sigaction act, oact, *pact, *old_act;
5772
5773 if (arg2) {
5774 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5775 goto efault;
5776 act._sa_handler = old_act->_sa_handler;
5777 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5778 act.sa_flags = old_act->sa_flags;
5779 unlock_user_struct(old_act, arg2, 0);
5780 pact = &act;
5781 } else {
5782 pact = NULL;
5783 }
5784
5785 ret = get_errno(do_sigaction(arg1, pact, &oact));
5786
5787 if (!is_error(ret) && arg3) {
5788 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5789 goto efault;
5790 old_act->_sa_handler = oact._sa_handler;
5791 old_act->sa_flags = oact.sa_flags;
5792 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5793 old_act->sa_mask.sig[1] = 0;
5794 old_act->sa_mask.sig[2] = 0;
5795 old_act->sa_mask.sig[3] = 0;
5796 unlock_user_struct(old_act, arg3, 1);
5797 }
5798 #else
5799 struct target_old_sigaction *old_act;
5800 struct target_sigaction act, oact, *pact;
5801 if (arg2) {
5802 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5803 goto efault;
5804 act._sa_handler = old_act->_sa_handler;
5805 target_siginitset(&act.sa_mask, old_act->sa_mask);
5806 act.sa_flags = old_act->sa_flags;
5807 act.sa_restorer = old_act->sa_restorer;
5808 unlock_user_struct(old_act, arg2, 0);
5809 pact = &act;
5810 } else {
5811 pact = NULL;
5812 }
5813 ret = get_errno(do_sigaction(arg1, pact, &oact));
5814 if (!is_error(ret) && arg3) {
5815 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5816 goto efault;
5817 old_act->_sa_handler = oact._sa_handler;
5818 old_act->sa_mask = oact.sa_mask.sig[0];
5819 old_act->sa_flags = oact.sa_flags;
5820 old_act->sa_restorer = oact.sa_restorer;
5821 unlock_user_struct(old_act, arg3, 1);
5822 }
5823 #endif
5824 }
5825 break;
5826 #endif
5827 case TARGET_NR_rt_sigaction:
5828 {
5829 #if defined(TARGET_ALPHA)
5830 struct target_sigaction act, oact, *pact = 0;
5831 struct target_rt_sigaction *rt_act;
5832 /* ??? arg4 == sizeof(sigset_t). */
5833 if (arg2) {
5834 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5835 goto efault;
5836 act._sa_handler = rt_act->_sa_handler;
5837 act.sa_mask = rt_act->sa_mask;
5838 act.sa_flags = rt_act->sa_flags;
5839 act.sa_restorer = arg5;
5840 unlock_user_struct(rt_act, arg2, 0);
5841 pact = &act;
5842 }
5843 ret = get_errno(do_sigaction(arg1, pact, &oact));
5844 if (!is_error(ret) && arg3) {
5845 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5846 goto efault;
5847 rt_act->_sa_handler = oact._sa_handler;
5848 rt_act->sa_mask = oact.sa_mask;
5849 rt_act->sa_flags = oact.sa_flags;
5850 unlock_user_struct(rt_act, arg3, 1);
5851 }
5852 #else
5853 struct target_sigaction *act;
5854 struct target_sigaction *oact;
5855
5856 if (arg2) {
5857 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5858 goto efault;
5859 } else
5860 act = NULL;
5861 if (arg3) {
5862 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5863 ret = -TARGET_EFAULT;
5864 goto rt_sigaction_fail;
5865 }
5866 } else
5867 oact = NULL;
5868 ret = get_errno(do_sigaction(arg1, act, oact));
5869 rt_sigaction_fail:
5870 if (act)
5871 unlock_user_struct(act, arg2, 0);
5872 if (oact)
5873 unlock_user_struct(oact, arg3, 1);
5874 #endif
5875 }
5876 break;
5877 #ifdef TARGET_NR_sgetmask /* not on alpha */
5878 case TARGET_NR_sgetmask:
5879 {
5880 sigset_t cur_set;
5881 abi_ulong target_set;
5882 sigprocmask(0, NULL, &cur_set);
5883 host_to_target_old_sigset(&target_set, &cur_set);
5884 ret = target_set;
5885 }
5886 break;
5887 #endif
5888 #ifdef TARGET_NR_ssetmask /* not on alpha */
5889 case TARGET_NR_ssetmask:
5890 {
5891 sigset_t set, oset, cur_set;
5892 abi_ulong target_set = arg1;
5893 sigprocmask(0, NULL, &cur_set);
5894 target_to_host_old_sigset(&set, &target_set);
5895 sigorset(&set, &set, &cur_set);
5896 sigprocmask(SIG_SETMASK, &set, &oset);
5897 host_to_target_old_sigset(&target_set, &oset);
5898 ret = target_set;
5899 }
5900 break;
5901 #endif
5902 #ifdef TARGET_NR_sigprocmask
5903 case TARGET_NR_sigprocmask:
5904 {
5905 #if defined(TARGET_ALPHA)
5906 sigset_t set, oldset;
5907 abi_ulong mask;
5908 int how;
5909
5910 switch (arg1) {
5911 case TARGET_SIG_BLOCK:
5912 how = SIG_BLOCK;
5913 break;
5914 case TARGET_SIG_UNBLOCK:
5915 how = SIG_UNBLOCK;
5916 break;
5917 case TARGET_SIG_SETMASK:
5918 how = SIG_SETMASK;
5919 break;
5920 default:
5921 ret = -TARGET_EINVAL;
5922 goto fail;
5923 }
5924 mask = arg2;
5925 target_to_host_old_sigset(&set, &mask);
5926
5927 ret = get_errno(sigprocmask(how, &set, &oldset));
5928 if (!is_error(ret)) {
5929 host_to_target_old_sigset(&mask, &oldset);
5930 ret = mask;
5931 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
5932 }
5933 #else
5934 sigset_t set, oldset, *set_ptr;
5935 int how;
5936
5937 if (arg2) {
5938 switch (arg1) {
5939 case TARGET_SIG_BLOCK:
5940 how = SIG_BLOCK;
5941 break;
5942 case TARGET_SIG_UNBLOCK:
5943 how = SIG_UNBLOCK;
5944 break;
5945 case TARGET_SIG_SETMASK:
5946 how = SIG_SETMASK;
5947 break;
5948 default:
5949 ret = -TARGET_EINVAL;
5950 goto fail;
5951 }
5952 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5953 goto efault;
5954 target_to_host_old_sigset(&set, p);
5955 unlock_user(p, arg2, 0);
5956 set_ptr = &set;
5957 } else {
5958 how = 0;
5959 set_ptr = NULL;
5960 }
5961 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5962 if (!is_error(ret) && arg3) {
5963 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5964 goto efault;
5965 host_to_target_old_sigset(p, &oldset);
5966 unlock_user(p, arg3, sizeof(target_sigset_t));
5967 }
5968 #endif
5969 }
5970 break;
5971 #endif
5972 case TARGET_NR_rt_sigprocmask:
5973 {
5974 int how = arg1;
5975 sigset_t set, oldset, *set_ptr;
5976
5977 if (arg2) {
5978 switch(how) {
5979 case TARGET_SIG_BLOCK:
5980 how = SIG_BLOCK;
5981 break;
5982 case TARGET_SIG_UNBLOCK:
5983 how = SIG_UNBLOCK;
5984 break;
5985 case TARGET_SIG_SETMASK:
5986 how = SIG_SETMASK;
5987 break;
5988 default:
5989 ret = -TARGET_EINVAL;
5990 goto fail;
5991 }
5992 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5993 goto efault;
5994 target_to_host_sigset(&set, p);
5995 unlock_user(p, arg2, 0);
5996 set_ptr = &set;
5997 } else {
5998 how = 0;
5999 set_ptr = NULL;
6000 }
6001 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6002 if (!is_error(ret) && arg3) {
6003 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6004 goto efault;
6005 host_to_target_sigset(p, &oldset);
6006 unlock_user(p, arg3, sizeof(target_sigset_t));
6007 }
6008 }
6009 break;
6010 #ifdef TARGET_NR_sigpending
6011 case TARGET_NR_sigpending:
6012 {
6013 sigset_t set;
6014 ret = get_errno(sigpending(&set));
6015 if (!is_error(ret)) {
6016 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6017 goto efault;
6018 host_to_target_old_sigset(p, &set);
6019 unlock_user(p, arg1, sizeof(target_sigset_t));
6020 }
6021 }
6022 break;
6023 #endif
6024 case TARGET_NR_rt_sigpending:
6025 {
6026 sigset_t set;
6027 ret = get_errno(sigpending(&set));
6028 if (!is_error(ret)) {
6029 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6030 goto efault;
6031 host_to_target_sigset(p, &set);
6032 unlock_user(p, arg1, sizeof(target_sigset_t));
6033 }
6034 }
6035 break;
6036 #ifdef TARGET_NR_sigsuspend
6037 case TARGET_NR_sigsuspend:
6038 {
6039 sigset_t set;
6040 #if defined(TARGET_ALPHA)
6041 abi_ulong mask = arg1;
6042 target_to_host_old_sigset(&set, &mask);
6043 #else
6044 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6045 goto efault;
6046 target_to_host_old_sigset(&set, p);
6047 unlock_user(p, arg1, 0);
6048 #endif
6049 ret = get_errno(sigsuspend(&set));
6050 }
6051 break;
6052 #endif
6053 case TARGET_NR_rt_sigsuspend:
6054 {
6055 sigset_t set;
6056 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6057 goto efault;
6058 target_to_host_sigset(&set, p);
6059 unlock_user(p, arg1, 0);
6060 ret = get_errno(sigsuspend(&set));
6061 }
6062 break;
6063 case TARGET_NR_rt_sigtimedwait:
6064 {
6065 sigset_t set;
6066 struct timespec uts, *puts;
6067 siginfo_t uinfo;
6068
6069 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6070 goto efault;
6071 target_to_host_sigset(&set, p);
6072 unlock_user(p, arg1, 0);
6073 if (arg3) {
6074 puts = &uts;
6075 target_to_host_timespec(puts, arg3);
6076 } else {
6077 puts = NULL;
6078 }
6079 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6080 if (!is_error(ret) && arg2) {
6081 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6082 goto efault;
6083 host_to_target_siginfo(p, &uinfo);
6084 unlock_user(p, arg2, sizeof(target_siginfo_t));
6085 }
6086 }
6087 break;
6088 case TARGET_NR_rt_sigqueueinfo:
6089 {
6090 siginfo_t uinfo;
6091 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6092 goto efault;
6093 target_to_host_siginfo(&uinfo, p);
6094 unlock_user(p, arg1, 0);
6095 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6096 }
6097 break;
6098 #ifdef TARGET_NR_sigreturn
6099 case TARGET_NR_sigreturn:
6100 /* NOTE: ret is eax, so not transcoding must be done */
6101 ret = do_sigreturn(cpu_env);
6102 break;
6103 #endif
6104 case TARGET_NR_rt_sigreturn:
6105 /* NOTE: ret is eax, so not transcoding must be done */
6106 ret = do_rt_sigreturn(cpu_env);
6107 break;
6108 case TARGET_NR_sethostname:
6109 if (!(p = lock_user_string(arg1)))
6110 goto efault;
6111 ret = get_errno(sethostname(p, arg2));
6112 unlock_user(p, arg1, 0);
6113 break;
6114 case TARGET_NR_setrlimit:
6115 {
6116 int resource = target_to_host_resource(arg1);
6117 struct target_rlimit *target_rlim;
6118 struct rlimit rlim;
6119 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6120 goto efault;
6121 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6122 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6123 unlock_user_struct(target_rlim, arg2, 0);
6124 ret = get_errno(setrlimit(resource, &rlim));
6125 }
6126 break;
6127 case TARGET_NR_getrlimit:
6128 {
6129 int resource = target_to_host_resource(arg1);
6130 struct target_rlimit *target_rlim;
6131 struct rlimit rlim;
6132
6133 ret = get_errno(getrlimit(resource, &rlim));
6134 if (!is_error(ret)) {
6135 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6136 goto efault;
6137 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6138 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6139 unlock_user_struct(target_rlim, arg2, 1);
6140 }
6141 }
6142 break;
6143 case TARGET_NR_getrusage:
6144 {
6145 struct rusage rusage;
6146 ret = get_errno(getrusage(arg1, &rusage));
6147 if (!is_error(ret)) {
6148 host_to_target_rusage(arg2, &rusage);
6149 }
6150 }
6151 break;
6152 case TARGET_NR_gettimeofday:
6153 {
6154 struct timeval tv;
6155 ret = get_errno(gettimeofday(&tv, NULL));
6156 if (!is_error(ret)) {
6157 if (copy_to_user_timeval(arg1, &tv))
6158 goto efault;
6159 }
6160 }
6161 break;
6162 case TARGET_NR_settimeofday:
6163 {
6164 struct timeval tv;
6165 if (copy_from_user_timeval(&tv, arg1))
6166 goto efault;
6167 ret = get_errno(settimeofday(&tv, NULL));
6168 }
6169 break;
6170 #if defined(TARGET_NR_select)
6171 case TARGET_NR_select:
6172 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6173 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6174 #else
6175 {
6176 struct target_sel_arg_struct *sel;
6177 abi_ulong inp, outp, exp, tvp;
6178 long nsel;
6179
6180 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6181 goto efault;
6182 nsel = tswapal(sel->n);
6183 inp = tswapal(sel->inp);
6184 outp = tswapal(sel->outp);
6185 exp = tswapal(sel->exp);
6186 tvp = tswapal(sel->tvp);
6187 unlock_user_struct(sel, arg1, 0);
6188 ret = do_select(nsel, inp, outp, exp, tvp);
6189 }
6190 #endif
6191 break;
6192 #endif
6193 #ifdef TARGET_NR_pselect6
6194 case TARGET_NR_pselect6:
6195 {
6196 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6197 fd_set rfds, wfds, efds;
6198 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6199 struct timespec ts, *ts_ptr;
6200
6201 /*
6202 * The 6th arg is actually two args smashed together,
6203 * so we cannot use the C library.
6204 */
6205 sigset_t set;
6206 struct {
6207 sigset_t *set;
6208 size_t size;
6209 } sig, *sig_ptr;
6210
6211 abi_ulong arg_sigset, arg_sigsize, *arg7;
6212 target_sigset_t *target_sigset;
6213
6214 n = arg1;
6215 rfd_addr = arg2;
6216 wfd_addr = arg3;
6217 efd_addr = arg4;
6218 ts_addr = arg5;
6219
6220 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6221 if (ret) {
6222 goto fail;
6223 }
6224 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6225 if (ret) {
6226 goto fail;
6227 }
6228 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6229 if (ret) {
6230 goto fail;
6231 }
6232
6233 /*
6234 * This takes a timespec, and not a timeval, so we cannot
6235 * use the do_select() helper ...
6236 */
6237 if (ts_addr) {
6238 if (target_to_host_timespec(&ts, ts_addr)) {
6239 goto efault;
6240 }
6241 ts_ptr = &ts;
6242 } else {
6243 ts_ptr = NULL;
6244 }
6245
6246 /* Extract the two packed args for the sigset */
6247 if (arg6) {
6248 sig_ptr = &sig;
6249 sig.size = _NSIG / 8;
6250
6251 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6252 if (!arg7) {
6253 goto efault;
6254 }
6255 arg_sigset = tswapal(arg7[0]);
6256 arg_sigsize = tswapal(arg7[1]);
6257 unlock_user(arg7, arg6, 0);
6258
6259 if (arg_sigset) {
6260 sig.set = &set;
6261 if (arg_sigsize != sizeof(*target_sigset)) {
6262 /* Like the kernel, we enforce correct size sigsets */
6263 ret = -TARGET_EINVAL;
6264 goto fail;
6265 }
6266 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6267 sizeof(*target_sigset), 1);
6268 if (!target_sigset) {
6269 goto efault;
6270 }
6271 target_to_host_sigset(&set, target_sigset);
6272 unlock_user(target_sigset, arg_sigset, 0);
6273 } else {
6274 sig.set = NULL;
6275 }
6276 } else {
6277 sig_ptr = NULL;
6278 }
6279
6280 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6281 ts_ptr, sig_ptr));
6282
6283 if (!is_error(ret)) {
6284 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6285 goto efault;
6286 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6287 goto efault;
6288 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6289 goto efault;
6290
6291 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6292 goto efault;
6293 }
6294 }
6295 break;
6296 #endif
6297 case TARGET_NR_symlink:
6298 {
6299 void *p2;
6300 p = lock_user_string(arg1);
6301 p2 = lock_user_string(arg2);
6302 if (!p || !p2)
6303 ret = -TARGET_EFAULT;
6304 else
6305 ret = get_errno(symlink(p, p2));
6306 unlock_user(p2, arg2, 0);
6307 unlock_user(p, arg1, 0);
6308 }
6309 break;
6310 #if defined(TARGET_NR_symlinkat)
6311 case TARGET_NR_symlinkat:
6312 {
6313 void *p2;
6314 p = lock_user_string(arg1);
6315 p2 = lock_user_string(arg3);
6316 if (!p || !p2)
6317 ret = -TARGET_EFAULT;
6318 else
6319 ret = get_errno(symlinkat(p, arg2, p2));
6320 unlock_user(p2, arg3, 0);
6321 unlock_user(p, arg1, 0);
6322 }
6323 break;
6324 #endif
6325 #ifdef TARGET_NR_oldlstat
6326 case TARGET_NR_oldlstat:
6327 goto unimplemented;
6328 #endif
6329 case TARGET_NR_readlink:
6330 {
6331 void *p2;
6332 p = lock_user_string(arg1);
6333 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6334 if (!p || !p2) {
6335 ret = -TARGET_EFAULT;
6336 } else if (is_proc_myself((const char *)p, "exe")) {
6337 char real[PATH_MAX], *temp;
6338 temp = realpath(exec_path, real);
6339 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6340 snprintf((char *)p2, arg3, "%s", real);
6341 } else {
6342 ret = get_errno(readlink(path(p), p2, arg3));
6343 }
6344 unlock_user(p2, arg2, ret);
6345 unlock_user(p, arg1, 0);
6346 }
6347 break;
6348 #if defined(TARGET_NR_readlinkat)
6349 case TARGET_NR_readlinkat:
6350 {
6351 void *p2;
6352 p = lock_user_string(arg2);
6353 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6354 if (!p || !p2) {
6355 ret = -TARGET_EFAULT;
6356 } else if (is_proc_myself((const char *)p, "exe")) {
6357 char real[PATH_MAX], *temp;
6358 temp = realpath(exec_path, real);
6359 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6360 snprintf((char *)p2, arg4, "%s", real);
6361 } else {
6362 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6363 }
6364 unlock_user(p2, arg3, ret);
6365 unlock_user(p, arg2, 0);
6366 }
6367 break;
6368 #endif
6369 #ifdef TARGET_NR_uselib
6370 case TARGET_NR_uselib:
6371 goto unimplemented;
6372 #endif
6373 #ifdef TARGET_NR_swapon
6374 case TARGET_NR_swapon:
6375 if (!(p = lock_user_string(arg1)))
6376 goto efault;
6377 ret = get_errno(swapon(p, arg2));
6378 unlock_user(p, arg1, 0);
6379 break;
6380 #endif
6381 case TARGET_NR_reboot:
6382 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6383 /* arg4 must be ignored in all other cases */
6384 p = lock_user_string(arg4);
6385 if (!p) {
6386 goto efault;
6387 }
6388 ret = get_errno(reboot(arg1, arg2, arg3, p));
6389 unlock_user(p, arg4, 0);
6390 } else {
6391 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6392 }
6393 break;
6394 #ifdef TARGET_NR_readdir
6395 case TARGET_NR_readdir:
6396 goto unimplemented;
6397 #endif
6398 #ifdef TARGET_NR_mmap
6399 case TARGET_NR_mmap:
6400 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6401 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6402 || defined(TARGET_S390X)
6403 {
6404 abi_ulong *v;
6405 abi_ulong v1, v2, v3, v4, v5, v6;
6406 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6407 goto efault;
6408 v1 = tswapal(v[0]);
6409 v2 = tswapal(v[1]);
6410 v3 = tswapal(v[2]);
6411 v4 = tswapal(v[3]);
6412 v5 = tswapal(v[4]);
6413 v6 = tswapal(v[5]);
6414 unlock_user(v, arg1, 0);
6415 ret = get_errno(target_mmap(v1, v2, v3,
6416 target_to_host_bitmask(v4, mmap_flags_tbl),
6417 v5, v6));
6418 }
6419 #else
6420 ret = get_errno(target_mmap(arg1, arg2, arg3,
6421 target_to_host_bitmask(arg4, mmap_flags_tbl),
6422 arg5,
6423 arg6));
6424 #endif
6425 break;
6426 #endif
6427 #ifdef TARGET_NR_mmap2
6428 case TARGET_NR_mmap2:
6429 #ifndef MMAP_SHIFT
6430 #define MMAP_SHIFT 12
6431 #endif
6432 ret = get_errno(target_mmap(arg1, arg2, arg3,
6433 target_to_host_bitmask(arg4, mmap_flags_tbl),
6434 arg5,
6435 arg6 << MMAP_SHIFT));
6436 break;
6437 #endif
6438 case TARGET_NR_munmap:
6439 ret = get_errno(target_munmap(arg1, arg2));
6440 break;
6441 case TARGET_NR_mprotect:
6442 {
6443 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6444 /* Special hack to detect libc making the stack executable. */
6445 if ((arg3 & PROT_GROWSDOWN)
6446 && arg1 >= ts->info->stack_limit
6447 && arg1 <= ts->info->start_stack) {
6448 arg3 &= ~PROT_GROWSDOWN;
6449 arg2 = arg2 + arg1 - ts->info->stack_limit;
6450 arg1 = ts->info->stack_limit;
6451 }
6452 }
6453 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6454 break;
6455 #ifdef TARGET_NR_mremap
6456 case TARGET_NR_mremap:
6457 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6458 break;
6459 #endif
6460 /* ??? msync/mlock/munlock are broken for softmmu. */
6461 #ifdef TARGET_NR_msync
6462 case TARGET_NR_msync:
6463 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6464 break;
6465 #endif
6466 #ifdef TARGET_NR_mlock
6467 case TARGET_NR_mlock:
6468 ret = get_errno(mlock(g2h(arg1), arg2));
6469 break;
6470 #endif
6471 #ifdef TARGET_NR_munlock
6472 case TARGET_NR_munlock:
6473 ret = get_errno(munlock(g2h(arg1), arg2));
6474 break;
6475 #endif
6476 #ifdef TARGET_NR_mlockall
6477 case TARGET_NR_mlockall:
6478 ret = get_errno(mlockall(arg1));
6479 break;
6480 #endif
6481 #ifdef TARGET_NR_munlockall
6482 case TARGET_NR_munlockall:
6483 ret = get_errno(munlockall());
6484 break;
6485 #endif
6486 case TARGET_NR_truncate:
6487 if (!(p = lock_user_string(arg1)))
6488 goto efault;
6489 ret = get_errno(truncate(p, arg2));
6490 unlock_user(p, arg1, 0);
6491 break;
6492 case TARGET_NR_ftruncate:
6493 ret = get_errno(ftruncate(arg1, arg2));
6494 break;
6495 case TARGET_NR_fchmod:
6496 ret = get_errno(fchmod(arg1, arg2));
6497 break;
6498 #if defined(TARGET_NR_fchmodat)
6499 case TARGET_NR_fchmodat:
6500 if (!(p = lock_user_string(arg2)))
6501 goto efault;
6502 ret = get_errno(fchmodat(arg1, p, arg3, 0));
6503 unlock_user(p, arg2, 0);
6504 break;
6505 #endif
6506 case TARGET_NR_getpriority:
6507 /* Note that negative values are valid for getpriority, so we must
6508 differentiate based on errno settings. */
6509 errno = 0;
6510 ret = getpriority(arg1, arg2);
6511 if (ret == -1 && errno != 0) {
6512 ret = -host_to_target_errno(errno);
6513 break;
6514 }
6515 #ifdef TARGET_ALPHA
6516 /* Return value is the unbiased priority. Signal no error. */
6517 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6518 #else
6519 /* Return value is a biased priority to avoid negative numbers. */
6520 ret = 20 - ret;
6521 #endif
6522 break;
6523 case TARGET_NR_setpriority:
6524 ret = get_errno(setpriority(arg1, arg2, arg3));
6525 break;
6526 #ifdef TARGET_NR_profil
6527 case TARGET_NR_profil:
6528 goto unimplemented;
6529 #endif
6530 case TARGET_NR_statfs:
6531 if (!(p = lock_user_string(arg1)))
6532 goto efault;
6533 ret = get_errno(statfs(path(p), &stfs));
6534 unlock_user(p, arg1, 0);
6535 convert_statfs:
6536 if (!is_error(ret)) {
6537 struct target_statfs *target_stfs;
6538
6539 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6540 goto efault;
6541 __put_user(stfs.f_type, &target_stfs->f_type);
6542 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6543 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6544 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6545 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6546 __put_user(stfs.f_files, &target_stfs->f_files);
6547 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6548 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6549 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6550 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6551 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6552 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6553 unlock_user_struct(target_stfs, arg2, 1);
6554 }
6555 break;
6556 case TARGET_NR_fstatfs:
6557 ret = get_errno(fstatfs(arg1, &stfs));
6558 goto convert_statfs;
6559 #ifdef TARGET_NR_statfs64
6560 case TARGET_NR_statfs64:
6561 if (!(p = lock_user_string(arg1)))
6562 goto efault;
6563 ret = get_errno(statfs(path(p), &stfs));
6564 unlock_user(p, arg1, 0);
6565 convert_statfs64:
6566 if (!is_error(ret)) {
6567 struct target_statfs64 *target_stfs;
6568
6569 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6570 goto efault;
6571 __put_user(stfs.f_type, &target_stfs->f_type);
6572 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6573 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6574 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6575 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6576 __put_user(stfs.f_files, &target_stfs->f_files);
6577 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6578 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6579 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6580 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6581 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6582 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6583 unlock_user_struct(target_stfs, arg3, 1);
6584 }
6585 break;
6586 case TARGET_NR_fstatfs64:
6587 ret = get_errno(fstatfs(arg1, &stfs));
6588 goto convert_statfs64;
6589 #endif
6590 #ifdef TARGET_NR_ioperm
6591 case TARGET_NR_ioperm:
6592 goto unimplemented;
6593 #endif
6594 #ifdef TARGET_NR_socketcall
6595 case TARGET_NR_socketcall:
6596 ret = do_socketcall(arg1, arg2);
6597 break;
6598 #endif
6599 #ifdef TARGET_NR_accept
6600 case TARGET_NR_accept:
6601 ret = do_accept4(arg1, arg2, arg3, 0);
6602 break;
6603 #endif
6604 #ifdef TARGET_NR_accept4
6605 case TARGET_NR_accept4:
6606 #ifdef CONFIG_ACCEPT4
6607 ret = do_accept4(arg1, arg2, arg3, arg4);
6608 #else
6609 goto unimplemented;
6610 #endif
6611 break;
6612 #endif
6613 #ifdef TARGET_NR_bind
6614 case TARGET_NR_bind:
6615 ret = do_bind(arg1, arg2, arg3);
6616 break;
6617 #endif
6618 #ifdef TARGET_NR_connect
6619 case TARGET_NR_connect:
6620 ret = do_connect(arg1, arg2, arg3);
6621 break;
6622 #endif
6623 #ifdef TARGET_NR_getpeername
6624 case TARGET_NR_getpeername:
6625 ret = do_getpeername(arg1, arg2, arg3);
6626 break;
6627 #endif
6628 #ifdef TARGET_NR_getsockname
6629 case TARGET_NR_getsockname:
6630 ret = do_getsockname(arg1, arg2, arg3);
6631 break;
6632 #endif
6633 #ifdef TARGET_NR_getsockopt
6634 case TARGET_NR_getsockopt:
6635 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6636 break;
6637 #endif
6638 #ifdef TARGET_NR_listen
6639 case TARGET_NR_listen:
6640 ret = get_errno(listen(arg1, arg2));
6641 break;
6642 #endif
6643 #ifdef TARGET_NR_recv
6644 case TARGET_NR_recv:
6645 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6646 break;
6647 #endif
6648 #ifdef TARGET_NR_recvfrom
6649 case TARGET_NR_recvfrom:
6650 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6651 break;
6652 #endif
6653 #ifdef TARGET_NR_recvmsg
6654 case TARGET_NR_recvmsg:
6655 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6656 break;
6657 #endif
6658 #ifdef TARGET_NR_send
6659 case TARGET_NR_send:
6660 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6661 break;
6662 #endif
6663 #ifdef TARGET_NR_sendmsg
6664 case TARGET_NR_sendmsg:
6665 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6666 break;
6667 #endif
6668 #ifdef TARGET_NR_sendto
6669 case TARGET_NR_sendto:
6670 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6671 break;
6672 #endif
6673 #ifdef TARGET_NR_shutdown
6674 case TARGET_NR_shutdown:
6675 ret = get_errno(shutdown(arg1, arg2));
6676 break;
6677 #endif
6678 #ifdef TARGET_NR_socket
6679 case TARGET_NR_socket:
6680 ret = do_socket(arg1, arg2, arg3);
6681 break;
6682 #endif
6683 #ifdef TARGET_NR_socketpair
6684 case TARGET_NR_socketpair:
6685 ret = do_socketpair(arg1, arg2, arg3, arg4);
6686 break;
6687 #endif
6688 #ifdef TARGET_NR_setsockopt
6689 case TARGET_NR_setsockopt:
6690 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6691 break;
6692 #endif
6693
6694 case TARGET_NR_syslog:
6695 if (!(p = lock_user_string(arg2)))
6696 goto efault;
6697 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6698 unlock_user(p, arg2, 0);
6699 break;
6700
6701 case TARGET_NR_setitimer:
6702 {
6703 struct itimerval value, ovalue, *pvalue;
6704
6705 if (arg2) {
6706 pvalue = &value;
6707 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6708 || copy_from_user_timeval(&pvalue->it_value,
6709 arg2 + sizeof(struct target_timeval)))
6710 goto efault;
6711 } else {
6712 pvalue = NULL;
6713 }
6714 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6715 if (!is_error(ret) && arg3) {
6716 if (copy_to_user_timeval(arg3,
6717 &ovalue.it_interval)
6718 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6719 &ovalue.it_value))
6720 goto efault;
6721 }
6722 }
6723 break;
6724 case TARGET_NR_getitimer:
6725 {
6726 struct itimerval value;
6727
6728 ret = get_errno(getitimer(arg1, &value));
6729 if (!is_error(ret) && arg2) {
6730 if (copy_to_user_timeval(arg2,
6731 &value.it_interval)
6732 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6733 &value.it_value))
6734 goto efault;
6735 }
6736 }
6737 break;
6738 case TARGET_NR_stat:
6739 if (!(p = lock_user_string(arg1)))
6740 goto efault;
6741 ret = get_errno(stat(path(p), &st));
6742 unlock_user(p, arg1, 0);
6743 goto do_stat;
6744 case TARGET_NR_lstat:
6745 if (!(p = lock_user_string(arg1)))
6746 goto efault;
6747 ret = get_errno(lstat(path(p), &st));
6748 unlock_user(p, arg1, 0);
6749 goto do_stat;
6750 case TARGET_NR_fstat:
6751 {
6752 ret = get_errno(fstat(arg1, &st));
6753 do_stat:
6754 if (!is_error(ret)) {
6755 struct target_stat *target_st;
6756
6757 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6758 goto efault;
6759 memset(target_st, 0, sizeof(*target_st));
6760 __put_user(st.st_dev, &target_st->st_dev);
6761 __put_user(st.st_ino, &target_st->st_ino);
6762 __put_user(st.st_mode, &target_st->st_mode);
6763 __put_user(st.st_uid, &target_st->st_uid);
6764 __put_user(st.st_gid, &target_st->st_gid);
6765 __put_user(st.st_nlink, &target_st->st_nlink);
6766 __put_user(st.st_rdev, &target_st->st_rdev);
6767 __put_user(st.st_size, &target_st->st_size);
6768 __put_user(st.st_blksize, &target_st->st_blksize);
6769 __put_user(st.st_blocks, &target_st->st_blocks);
6770 __put_user(st.st_atime, &target_st->target_st_atime);
6771 __put_user(st.st_mtime, &target_st->target_st_mtime);
6772 __put_user(st.st_ctime, &target_st->target_st_ctime);
6773 unlock_user_struct(target_st, arg2, 1);
6774 }
6775 }
6776 break;
6777 #ifdef TARGET_NR_olduname
6778 case TARGET_NR_olduname:
6779 goto unimplemented;
6780 #endif
6781 #ifdef TARGET_NR_iopl
6782 case TARGET_NR_iopl:
6783 goto unimplemented;
6784 #endif
6785 case TARGET_NR_vhangup:
6786 ret = get_errno(vhangup());
6787 break;
6788 #ifdef TARGET_NR_idle
6789 case TARGET_NR_idle:
6790 goto unimplemented;
6791 #endif
6792 #ifdef TARGET_NR_syscall
6793 case TARGET_NR_syscall:
6794 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6795 arg6, arg7, arg8, 0);
6796 break;
6797 #endif
6798 case TARGET_NR_wait4:
6799 {
6800 int status;
6801 abi_long status_ptr = arg2;
6802 struct rusage rusage, *rusage_ptr;
6803 abi_ulong target_rusage = arg4;
6804 if (target_rusage)
6805 rusage_ptr = &rusage;
6806 else
6807 rusage_ptr = NULL;
6808 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6809 if (!is_error(ret)) {
6810 if (status_ptr && ret) {
6811 status = host_to_target_waitstatus(status);
6812 if (put_user_s32(status, status_ptr))
6813 goto efault;
6814 }
6815 if (target_rusage)
6816 host_to_target_rusage(target_rusage, &rusage);
6817 }
6818 }
6819 break;
6820 #ifdef TARGET_NR_swapoff
6821 case TARGET_NR_swapoff:
6822 if (!(p = lock_user_string(arg1)))
6823 goto efault;
6824 ret = get_errno(swapoff(p));
6825 unlock_user(p, arg1, 0);
6826 break;
6827 #endif
6828 case TARGET_NR_sysinfo:
6829 {
6830 struct target_sysinfo *target_value;
6831 struct sysinfo value;
6832 ret = get_errno(sysinfo(&value));
6833 if (!is_error(ret) && arg1)
6834 {
6835 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6836 goto efault;
6837 __put_user(value.uptime, &target_value->uptime);
6838 __put_user(value.loads[0], &target_value->loads[0]);
6839 __put_user(value.loads[1], &target_value->loads[1]);
6840 __put_user(value.loads[2], &target_value->loads[2]);
6841 __put_user(value.totalram, &target_value->totalram);
6842 __put_user(value.freeram, &target_value->freeram);
6843 __put_user(value.sharedram, &target_value->sharedram);
6844 __put_user(value.bufferram, &target_value->bufferram);
6845 __put_user(value.totalswap, &target_value->totalswap);
6846 __put_user(value.freeswap, &target_value->freeswap);
6847 __put_user(value.procs, &target_value->procs);
6848 __put_user(value.totalhigh, &target_value->totalhigh);
6849 __put_user(value.freehigh, &target_value->freehigh);
6850 __put_user(value.mem_unit, &target_value->mem_unit);
6851 unlock_user_struct(target_value, arg1, 1);
6852 }
6853 }
6854 break;
6855 #ifdef TARGET_NR_ipc
6856 case TARGET_NR_ipc:
6857 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6858 break;
6859 #endif
6860 #ifdef TARGET_NR_semget
6861 case TARGET_NR_semget:
6862 ret = get_errno(semget(arg1, arg2, arg3));
6863 break;
6864 #endif
6865 #ifdef TARGET_NR_semop
6866 case TARGET_NR_semop:
6867 ret = do_semop(arg1, arg2, arg3);
6868 break;
6869 #endif
6870 #ifdef TARGET_NR_semctl
6871 case TARGET_NR_semctl:
6872 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6873 break;
6874 #endif
6875 #ifdef TARGET_NR_msgctl
6876 case TARGET_NR_msgctl:
6877 ret = do_msgctl(arg1, arg2, arg3);
6878 break;
6879 #endif
6880 #ifdef TARGET_NR_msgget
6881 case TARGET_NR_msgget:
6882 ret = get_errno(msgget(arg1, arg2));
6883 break;
6884 #endif
6885 #ifdef TARGET_NR_msgrcv
6886 case TARGET_NR_msgrcv:
6887 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6888 break;
6889 #endif
6890 #ifdef TARGET_NR_msgsnd
6891 case TARGET_NR_msgsnd:
6892 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6893 break;
6894 #endif
6895 #ifdef TARGET_NR_shmget
6896 case TARGET_NR_shmget:
6897 ret = get_errno(shmget(arg1, arg2, arg3));
6898 break;
6899 #endif
6900 #ifdef TARGET_NR_shmctl
6901 case TARGET_NR_shmctl:
6902 ret = do_shmctl(arg1, arg2, arg3);
6903 break;
6904 #endif
6905 #ifdef TARGET_NR_shmat
6906 case TARGET_NR_shmat:
6907 ret = do_shmat(arg1, arg2, arg3);
6908 break;
6909 #endif
6910 #ifdef TARGET_NR_shmdt
6911 case TARGET_NR_shmdt:
6912 ret = do_shmdt(arg1);
6913 break;
6914 #endif
6915 case TARGET_NR_fsync:
6916 ret = get_errno(fsync(arg1));
6917 break;
6918 case TARGET_NR_clone:
6919 /* Linux manages to have three different orderings for its
6920 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
6921 * match the kernel's CONFIG_CLONE_* settings.
6922 * Microblaze is further special in that it uses a sixth
6923 * implicit argument to clone for the TLS pointer.
6924 */
6925 #if defined(TARGET_MICROBLAZE)
6926 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
6927 #elif defined(TARGET_CLONE_BACKWARDS)
6928 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6929 #elif defined(TARGET_CLONE_BACKWARDS2)
6930 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6931 #else
6932 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6933 #endif
6934 break;
6935 #ifdef __NR_exit_group
6936 /* new thread calls */
6937 case TARGET_NR_exit_group:
6938 #ifdef TARGET_GPROF
6939 _mcleanup();
6940 #endif
6941 gdb_exit(cpu_env, arg1);
6942 ret = get_errno(exit_group(arg1));
6943 break;
6944 #endif
6945 case TARGET_NR_setdomainname:
6946 if (!(p = lock_user_string(arg1)))
6947 goto efault;
6948 ret = get_errno(setdomainname(p, arg2));
6949 unlock_user(p, arg1, 0);
6950 break;
6951 case TARGET_NR_uname:
6952 /* no need to transcode because we use the linux syscall */
6953 {
6954 struct new_utsname * buf;
6955
6956 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6957 goto efault;
6958 ret = get_errno(sys_uname(buf));
6959 if (!is_error(ret)) {
6960 /* Overrite the native machine name with whatever is being
6961 emulated. */
6962 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6963 /* Allow the user to override the reported release. */
6964 if (qemu_uname_release && *qemu_uname_release)
6965 strcpy (buf->release, qemu_uname_release);
6966 }
6967 unlock_user_struct(buf, arg1, 1);
6968 }
6969 break;
6970 #ifdef TARGET_I386
6971 case TARGET_NR_modify_ldt:
6972 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6973 break;
6974 #if !defined(TARGET_X86_64)
6975 case TARGET_NR_vm86old:
6976 goto unimplemented;
6977 case TARGET_NR_vm86:
6978 ret = do_vm86(cpu_env, arg1, arg2);
6979 break;
6980 #endif
6981 #endif
6982 case TARGET_NR_adjtimex:
6983 goto unimplemented;
6984 #ifdef TARGET_NR_create_module
6985 case TARGET_NR_create_module:
6986 #endif
6987 case TARGET_NR_init_module:
6988 case TARGET_NR_delete_module:
6989 #ifdef TARGET_NR_get_kernel_syms
6990 case TARGET_NR_get_kernel_syms:
6991 #endif
6992 goto unimplemented;
6993 case TARGET_NR_quotactl:
6994 goto unimplemented;
6995 case TARGET_NR_getpgid:
6996 ret = get_errno(getpgid(arg1));
6997 break;
6998 case TARGET_NR_fchdir:
6999 ret = get_errno(fchdir(arg1));
7000 break;
7001 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7002 case TARGET_NR_bdflush:
7003 goto unimplemented;
7004 #endif
7005 #ifdef TARGET_NR_sysfs
7006 case TARGET_NR_sysfs:
7007 goto unimplemented;
7008 #endif
7009 case TARGET_NR_personality:
7010 ret = get_errno(personality(arg1));
7011 break;
7012 #ifdef TARGET_NR_afs_syscall
7013 case TARGET_NR_afs_syscall:
7014 goto unimplemented;
7015 #endif
7016 #ifdef TARGET_NR__llseek /* Not on alpha */
7017 case TARGET_NR__llseek:
7018 {
7019 int64_t res;
7020 #if !defined(__NR_llseek)
7021 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7022 if (res == -1) {
7023 ret = get_errno(res);
7024 } else {
7025 ret = 0;
7026 }
7027 #else
7028 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7029 #endif
7030 if ((ret == 0) && put_user_s64(res, arg4)) {
7031 goto efault;
7032 }
7033 }
7034 break;
7035 #endif
7036 case TARGET_NR_getdents:
7037 #ifdef __NR_getdents
7038 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7039 {
7040 struct target_dirent *target_dirp;
7041 struct linux_dirent *dirp;
7042 abi_long count = arg3;
7043
7044 dirp = malloc(count);
7045 if (!dirp) {
7046 ret = -TARGET_ENOMEM;
7047 goto fail;
7048 }
7049
7050 ret = get_errno(sys_getdents(arg1, dirp, count));
7051 if (!is_error(ret)) {
7052 struct linux_dirent *de;
7053 struct target_dirent *tde;
7054 int len = ret;
7055 int reclen, treclen;
7056 int count1, tnamelen;
7057
7058 count1 = 0;
7059 de = dirp;
7060 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7061 goto efault;
7062 tde = target_dirp;
7063 while (len > 0) {
7064 reclen = de->d_reclen;
7065 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7066 assert(tnamelen >= 0);
7067 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7068 assert(count1 + treclen <= count);
7069 tde->d_reclen = tswap16(treclen);
7070 tde->d_ino = tswapal(de->d_ino);
7071 tde->d_off = tswapal(de->d_off);
7072 memcpy(tde->d_name, de->d_name, tnamelen);
7073 de = (struct linux_dirent *)((char *)de + reclen);
7074 len -= reclen;
7075 tde = (struct target_dirent *)((char *)tde + treclen);
7076 count1 += treclen;
7077 }
7078 ret = count1;
7079 unlock_user(target_dirp, arg2, ret);
7080 }
7081 free(dirp);
7082 }
7083 #else
7084 {
7085 struct linux_dirent *dirp;
7086 abi_long count = arg3;
7087
7088 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7089 goto efault;
7090 ret = get_errno(sys_getdents(arg1, dirp, count));
7091 if (!is_error(ret)) {
7092 struct linux_dirent *de;
7093 int len = ret;
7094 int reclen;
7095 de = dirp;
7096 while (len > 0) {
7097 reclen = de->d_reclen;
7098 if (reclen > len)
7099 break;
7100 de->d_reclen = tswap16(reclen);
7101 tswapls(&de->d_ino);
7102 tswapls(&de->d_off);
7103 de = (struct linux_dirent *)((char *)de + reclen);
7104 len -= reclen;
7105 }
7106 }
7107 unlock_user(dirp, arg2, ret);
7108 }
7109 #endif
7110 #else
7111 /* Implement getdents in terms of getdents64 */
7112 {
7113 struct linux_dirent64 *dirp;
7114 abi_long count = arg3;
7115
7116 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7117 if (!dirp) {
7118 goto efault;
7119 }
7120 ret = get_errno(sys_getdents64(arg1, dirp, count));
7121 if (!is_error(ret)) {
7122 /* Convert the dirent64 structs to target dirent. We do this
7123 * in-place, since we can guarantee that a target_dirent is no
7124 * larger than a dirent64; however this means we have to be
7125 * careful to read everything before writing in the new format.
7126 */
7127 struct linux_dirent64 *de;
7128 struct target_dirent *tde;
7129 int len = ret;
7130 int tlen = 0;
7131
7132 de = dirp;
7133 tde = (struct target_dirent *)dirp;
7134 while (len > 0) {
7135 int namelen, treclen;
7136 int reclen = de->d_reclen;
7137 uint64_t ino = de->d_ino;
7138 int64_t off = de->d_off;
7139 uint8_t type = de->d_type;
7140
7141 namelen = strlen(de->d_name);
7142 treclen = offsetof(struct target_dirent, d_name)
7143 + namelen + 2;
7144 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7145
7146 memmove(tde->d_name, de->d_name, namelen + 1);
7147 tde->d_ino = tswapal(ino);
7148 tde->d_off = tswapal(off);
7149 tde->d_reclen = tswap16(treclen);
7150 /* The target_dirent type is in what was formerly a padding
7151 * byte at the end of the structure:
7152 */
7153 *(((char *)tde) + treclen - 1) = type;
7154
7155 de = (struct linux_dirent64 *)((char *)de + reclen);
7156 tde = (struct target_dirent *)((char *)tde + treclen);
7157 len -= reclen;
7158 tlen += treclen;
7159 }
7160 ret = tlen;
7161 }
7162 unlock_user(dirp, arg2, ret);
7163 }
7164 #endif
7165 break;
7166 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7167 case TARGET_NR_getdents64:
7168 {
7169 struct linux_dirent64 *dirp;
7170 abi_long count = arg3;
7171 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7172 goto efault;
7173 ret = get_errno(sys_getdents64(arg1, dirp, count));
7174 if (!is_error(ret)) {
7175 struct linux_dirent64 *de;
7176 int len = ret;
7177 int reclen;
7178 de = dirp;
7179 while (len > 0) {
7180 reclen = de->d_reclen;
7181 if (reclen > len)
7182 break;
7183 de->d_reclen = tswap16(reclen);
7184 tswap64s((uint64_t *)&de->d_ino);
7185 tswap64s((uint64_t *)&de->d_off);
7186 de = (struct linux_dirent64 *)((char *)de + reclen);
7187 len -= reclen;
7188 }
7189 }
7190 unlock_user(dirp, arg2, ret);
7191 }
7192 break;
7193 #endif /* TARGET_NR_getdents64 */
7194 #if defined(TARGET_NR__newselect)
7195 case TARGET_NR__newselect:
7196 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7197 break;
7198 #endif
7199 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7200 # ifdef TARGET_NR_poll
7201 case TARGET_NR_poll:
7202 # endif
7203 # ifdef TARGET_NR_ppoll
7204 case TARGET_NR_ppoll:
7205 # endif
7206 {
7207 struct target_pollfd *target_pfd;
7208 unsigned int nfds = arg2;
7209 int timeout = arg3;
7210 struct pollfd *pfd;
7211 unsigned int i;
7212
7213 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7214 if (!target_pfd)
7215 goto efault;
7216
7217 pfd = alloca(sizeof(struct pollfd) * nfds);
7218 for(i = 0; i < nfds; i++) {
7219 pfd[i].fd = tswap32(target_pfd[i].fd);
7220 pfd[i].events = tswap16(target_pfd[i].events);
7221 }
7222
7223 # ifdef TARGET_NR_ppoll
7224 if (num == TARGET_NR_ppoll) {
7225 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7226 target_sigset_t *target_set;
7227 sigset_t _set, *set = &_set;
7228
7229 if (arg3) {
7230 if (target_to_host_timespec(timeout_ts, arg3)) {
7231 unlock_user(target_pfd, arg1, 0);
7232 goto efault;
7233 }
7234 } else {
7235 timeout_ts = NULL;
7236 }
7237
7238 if (arg4) {
7239 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7240 if (!target_set) {
7241 unlock_user(target_pfd, arg1, 0);
7242 goto efault;
7243 }
7244 target_to_host_sigset(set, target_set);
7245 } else {
7246 set = NULL;
7247 }
7248
7249 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7250
7251 if (!is_error(ret) && arg3) {
7252 host_to_target_timespec(arg3, timeout_ts);
7253 }
7254 if (arg4) {
7255 unlock_user(target_set, arg4, 0);
7256 }
7257 } else
7258 # endif
7259 ret = get_errno(poll(pfd, nfds, timeout));
7260
7261 if (!is_error(ret)) {
7262 for(i = 0; i < nfds; i++) {
7263 target_pfd[i].revents = tswap16(pfd[i].revents);
7264 }
7265 }
7266 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7267 }
7268 break;
7269 #endif
7270 case TARGET_NR_flock:
7271 /* NOTE: the flock constant seems to be the same for every
7272 Linux platform */
7273 ret = get_errno(flock(arg1, arg2));
7274 break;
7275 case TARGET_NR_readv:
7276 {
7277 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7278 if (vec != NULL) {
7279 ret = get_errno(readv(arg1, vec, arg3));
7280 unlock_iovec(vec, arg2, arg3, 1);
7281 } else {
7282 ret = -host_to_target_errno(errno);
7283 }
7284 }
7285 break;
7286 case TARGET_NR_writev:
7287 {
7288 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7289 if (vec != NULL) {
7290 ret = get_errno(writev(arg1, vec, arg3));
7291 unlock_iovec(vec, arg2, arg3, 0);
7292 } else {
7293 ret = -host_to_target_errno(errno);
7294 }
7295 }
7296 break;
7297 case TARGET_NR_getsid:
7298 ret = get_errno(getsid(arg1));
7299 break;
7300 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7301 case TARGET_NR_fdatasync:
7302 ret = get_errno(fdatasync(arg1));
7303 break;
7304 #endif
7305 case TARGET_NR__sysctl:
7306 /* We don't implement this, but ENOTDIR is always a safe
7307 return value. */
7308 ret = -TARGET_ENOTDIR;
7309 break;
7310 case TARGET_NR_sched_getaffinity:
7311 {
7312 unsigned int mask_size;
7313 unsigned long *mask;
7314
7315 /*
7316 * sched_getaffinity needs multiples of ulong, so need to take
7317 * care of mismatches between target ulong and host ulong sizes.
7318 */
7319 if (arg2 & (sizeof(abi_ulong) - 1)) {
7320 ret = -TARGET_EINVAL;
7321 break;
7322 }
7323 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7324
7325 mask = alloca(mask_size);
7326 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7327
7328 if (!is_error(ret)) {
7329 if (copy_to_user(arg3, mask, ret)) {
7330 goto efault;
7331 }
7332 }
7333 }
7334 break;
7335 case TARGET_NR_sched_setaffinity:
7336 {
7337 unsigned int mask_size;
7338 unsigned long *mask;
7339
7340 /*
7341 * sched_setaffinity needs multiples of ulong, so need to take
7342 * care of mismatches between target ulong and host ulong sizes.
7343 */
7344 if (arg2 & (sizeof(abi_ulong) - 1)) {
7345 ret = -TARGET_EINVAL;
7346 break;
7347 }
7348 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7349
7350 mask = alloca(mask_size);
7351 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7352 goto efault;
7353 }
7354 memcpy(mask, p, arg2);
7355 unlock_user_struct(p, arg2, 0);
7356
7357 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7358 }
7359 break;
7360 case TARGET_NR_sched_setparam:
7361 {
7362 struct sched_param *target_schp;
7363 struct sched_param schp;
7364
7365 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7366 goto efault;
7367 schp.sched_priority = tswap32(target_schp->sched_priority);
7368 unlock_user_struct(target_schp, arg2, 0);
7369 ret = get_errno(sched_setparam(arg1, &schp));
7370 }
7371 break;
7372 case TARGET_NR_sched_getparam:
7373 {
7374 struct sched_param *target_schp;
7375 struct sched_param schp;
7376 ret = get_errno(sched_getparam(arg1, &schp));
7377 if (!is_error(ret)) {
7378 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7379 goto efault;
7380 target_schp->sched_priority = tswap32(schp.sched_priority);
7381 unlock_user_struct(target_schp, arg2, 1);
7382 }
7383 }
7384 break;
7385 case TARGET_NR_sched_setscheduler:
7386 {
7387 struct sched_param *target_schp;
7388 struct sched_param schp;
7389 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7390 goto efault;
7391 schp.sched_priority = tswap32(target_schp->sched_priority);
7392 unlock_user_struct(target_schp, arg3, 0);
7393 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7394 }
7395 break;
7396 case TARGET_NR_sched_getscheduler:
7397 ret = get_errno(sched_getscheduler(arg1));
7398 break;
7399 case TARGET_NR_sched_yield:
7400 ret = get_errno(sched_yield());
7401 break;
7402 case TARGET_NR_sched_get_priority_max:
7403 ret = get_errno(sched_get_priority_max(arg1));
7404 break;
7405 case TARGET_NR_sched_get_priority_min:
7406 ret = get_errno(sched_get_priority_min(arg1));
7407 break;
7408 case TARGET_NR_sched_rr_get_interval:
7409 {
7410 struct timespec ts;
7411 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7412 if (!is_error(ret)) {
7413 host_to_target_timespec(arg2, &ts);
7414 }
7415 }
7416 break;
7417 case TARGET_NR_nanosleep:
7418 {
7419 struct timespec req, rem;
7420 target_to_host_timespec(&req, arg1);
7421 ret = get_errno(nanosleep(&req, &rem));
7422 if (is_error(ret) && arg2) {
7423 host_to_target_timespec(arg2, &rem);
7424 }
7425 }
7426 break;
7427 #ifdef TARGET_NR_query_module
7428 case TARGET_NR_query_module:
7429 goto unimplemented;
7430 #endif
7431 #ifdef TARGET_NR_nfsservctl
7432 case TARGET_NR_nfsservctl:
7433 goto unimplemented;
7434 #endif
7435 case TARGET_NR_prctl:
7436 switch (arg1) {
7437 case PR_GET_PDEATHSIG:
7438 {
7439 int deathsig;
7440 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7441 if (!is_error(ret) && arg2
7442 && put_user_ual(deathsig, arg2)) {
7443 goto efault;
7444 }
7445 break;
7446 }
7447 #ifdef PR_GET_NAME
7448 case PR_GET_NAME:
7449 {
7450 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7451 if (!name) {
7452 goto efault;
7453 }
7454 ret = get_errno(prctl(arg1, (unsigned long)name,
7455 arg3, arg4, arg5));
7456 unlock_user(name, arg2, 16);
7457 break;
7458 }
7459 case PR_SET_NAME:
7460 {
7461 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7462 if (!name) {
7463 goto efault;
7464 }
7465 ret = get_errno(prctl(arg1, (unsigned long)name,
7466 arg3, arg4, arg5));
7467 unlock_user(name, arg2, 0);
7468 break;
7469 }
7470 #endif
7471 default:
7472 /* Most prctl options have no pointer arguments */
7473 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7474 break;
7475 }
7476 break;
7477 #ifdef TARGET_NR_arch_prctl
7478 case TARGET_NR_arch_prctl:
7479 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7480 ret = do_arch_prctl(cpu_env, arg1, arg2);
7481 break;
7482 #else
7483 goto unimplemented;
7484 #endif
7485 #endif
7486 #ifdef TARGET_NR_pread64
7487 case TARGET_NR_pread64:
7488 if (regpairs_aligned(cpu_env)) {
7489 arg4 = arg5;
7490 arg5 = arg6;
7491 }
7492 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7493 goto efault;
7494 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7495 unlock_user(p, arg2, ret);
7496 break;
7497 case TARGET_NR_pwrite64:
7498 if (regpairs_aligned(cpu_env)) {
7499 arg4 = arg5;
7500 arg5 = arg6;
7501 }
7502 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7503 goto efault;
7504 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7505 unlock_user(p, arg2, 0);
7506 break;
7507 #endif
7508 case TARGET_NR_getcwd:
7509 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7510 goto efault;
7511 ret = get_errno(sys_getcwd1(p, arg2));
7512 unlock_user(p, arg1, ret);
7513 break;
7514 case TARGET_NR_capget:
7515 goto unimplemented;
7516 case TARGET_NR_capset:
7517 goto unimplemented;
7518 case TARGET_NR_sigaltstack:
7519 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7520 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7521 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7522 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7523 break;
7524 #else
7525 goto unimplemented;
7526 #endif
7527
7528 #ifdef CONFIG_SENDFILE
7529 case TARGET_NR_sendfile:
7530 {
7531 off_t *offp = NULL;
7532 off_t off;
7533 if (arg3) {
7534 ret = get_user_sal(off, arg3);
7535 if (is_error(ret)) {
7536 break;
7537 }
7538 offp = &off;
7539 }
7540 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7541 if (!is_error(ret) && arg3) {
7542 abi_long ret2 = put_user_sal(off, arg3);
7543 if (is_error(ret2)) {
7544 ret = ret2;
7545 }
7546 }
7547 break;
7548 }
7549 #ifdef TARGET_NR_sendfile64
7550 case TARGET_NR_sendfile64:
7551 {
7552 off_t *offp = NULL;
7553 off_t off;
7554 if (arg3) {
7555 ret = get_user_s64(off, arg3);
7556 if (is_error(ret)) {
7557 break;
7558 }
7559 offp = &off;
7560 }
7561 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7562 if (!is_error(ret) && arg3) {
7563 abi_long ret2 = put_user_s64(off, arg3);
7564 if (is_error(ret2)) {
7565 ret = ret2;
7566 }
7567 }
7568 break;
7569 }
7570 #endif
7571 #else
7572 case TARGET_NR_sendfile:
7573 #ifdef TARGET_NR_sendfile64
7574 case TARGET_NR_sendfile64:
7575 #endif
7576 goto unimplemented;
7577 #endif
7578
7579 #ifdef TARGET_NR_getpmsg
7580 case TARGET_NR_getpmsg:
7581 goto unimplemented;
7582 #endif
7583 #ifdef TARGET_NR_putpmsg
7584 case TARGET_NR_putpmsg:
7585 goto unimplemented;
7586 #endif
7587 #ifdef TARGET_NR_vfork
7588 case TARGET_NR_vfork:
7589 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7590 0, 0, 0, 0));
7591 break;
7592 #endif
7593 #ifdef TARGET_NR_ugetrlimit
7594 case TARGET_NR_ugetrlimit:
7595 {
7596 struct rlimit rlim;
7597 int resource = target_to_host_resource(arg1);
7598 ret = get_errno(getrlimit(resource, &rlim));
7599 if (!is_error(ret)) {
7600 struct target_rlimit *target_rlim;
7601 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7602 goto efault;
7603 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7604 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7605 unlock_user_struct(target_rlim, arg2, 1);
7606 }
7607 break;
7608 }
7609 #endif
7610 #ifdef TARGET_NR_truncate64
7611 case TARGET_NR_truncate64:
7612 if (!(p = lock_user_string(arg1)))
7613 goto efault;
7614 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7615 unlock_user(p, arg1, 0);
7616 break;
7617 #endif
7618 #ifdef TARGET_NR_ftruncate64
7619 case TARGET_NR_ftruncate64:
7620 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7621 break;
7622 #endif
7623 #ifdef TARGET_NR_stat64
7624 case TARGET_NR_stat64:
7625 if (!(p = lock_user_string(arg1)))
7626 goto efault;
7627 ret = get_errno(stat(path(p), &st));
7628 unlock_user(p, arg1, 0);
7629 if (!is_error(ret))
7630 ret = host_to_target_stat64(cpu_env, arg2, &st);
7631 break;
7632 #endif
7633 #ifdef TARGET_NR_lstat64
7634 case TARGET_NR_lstat64:
7635 if (!(p = lock_user_string(arg1)))
7636 goto efault;
7637 ret = get_errno(lstat(path(p), &st));
7638 unlock_user(p, arg1, 0);
7639 if (!is_error(ret))
7640 ret = host_to_target_stat64(cpu_env, arg2, &st);
7641 break;
7642 #endif
7643 #ifdef TARGET_NR_fstat64
7644 case TARGET_NR_fstat64:
7645 ret = get_errno(fstat(arg1, &st));
7646 if (!is_error(ret))
7647 ret = host_to_target_stat64(cpu_env, arg2, &st);
7648 break;
7649 #endif
7650 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
7651 #ifdef TARGET_NR_fstatat64
7652 case TARGET_NR_fstatat64:
7653 #endif
7654 #ifdef TARGET_NR_newfstatat
7655 case TARGET_NR_newfstatat:
7656 #endif
7657 if (!(p = lock_user_string(arg2)))
7658 goto efault;
7659 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
7660 if (!is_error(ret))
7661 ret = host_to_target_stat64(cpu_env, arg3, &st);
7662 break;
7663 #endif
7664 case TARGET_NR_lchown:
7665 if (!(p = lock_user_string(arg1)))
7666 goto efault;
7667 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7668 unlock_user(p, arg1, 0);
7669 break;
7670 #ifdef TARGET_NR_getuid
7671 case TARGET_NR_getuid:
7672 ret = get_errno(high2lowuid(getuid()));
7673 break;
7674 #endif
7675 #ifdef TARGET_NR_getgid
7676 case TARGET_NR_getgid:
7677 ret = get_errno(high2lowgid(getgid()));
7678 break;
7679 #endif
7680 #ifdef TARGET_NR_geteuid
7681 case TARGET_NR_geteuid:
7682 ret = get_errno(high2lowuid(geteuid()));
7683 break;
7684 #endif
7685 #ifdef TARGET_NR_getegid
7686 case TARGET_NR_getegid:
7687 ret = get_errno(high2lowgid(getegid()));
7688 break;
7689 #endif
7690 case TARGET_NR_setreuid:
7691 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7692 break;
7693 case TARGET_NR_setregid:
7694 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7695 break;
7696 case TARGET_NR_getgroups:
7697 {
7698 int gidsetsize = arg1;
7699 target_id *target_grouplist;
7700 gid_t *grouplist;
7701 int i;
7702
7703 grouplist = alloca(gidsetsize * sizeof(gid_t));
7704 ret = get_errno(getgroups(gidsetsize, grouplist));
7705 if (gidsetsize == 0)
7706 break;
7707 if (!is_error(ret)) {
7708 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
7709 if (!target_grouplist)
7710 goto efault;
7711 for(i = 0;i < ret; i++)
7712 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7713 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
7714 }
7715 }
7716 break;
7717 case TARGET_NR_setgroups:
7718 {
7719 int gidsetsize = arg1;
7720 target_id *target_grouplist;
7721 gid_t *grouplist = NULL;
7722 int i;
7723 if (gidsetsize) {
7724 grouplist = alloca(gidsetsize * sizeof(gid_t));
7725 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
7726 if (!target_grouplist) {
7727 ret = -TARGET_EFAULT;
7728 goto fail;
7729 }
7730 for (i = 0; i < gidsetsize; i++) {
7731 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7732 }
7733 unlock_user(target_grouplist, arg2, 0);
7734 }
7735 ret = get_errno(setgroups(gidsetsize, grouplist));
7736 }
7737 break;
7738 case TARGET_NR_fchown:
7739 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7740 break;
7741 #if defined(TARGET_NR_fchownat)
7742 case TARGET_NR_fchownat:
7743 if (!(p = lock_user_string(arg2)))
7744 goto efault;
7745 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
7746 low2highgid(arg4), arg5));
7747 unlock_user(p, arg2, 0);
7748 break;
7749 #endif
7750 #ifdef TARGET_NR_setresuid
7751 case TARGET_NR_setresuid:
7752 ret = get_errno(setresuid(low2highuid(arg1),
7753 low2highuid(arg2),
7754 low2highuid(arg3)));
7755 break;
7756 #endif
7757 #ifdef TARGET_NR_getresuid
7758 case TARGET_NR_getresuid:
7759 {
7760 uid_t ruid, euid, suid;
7761 ret = get_errno(getresuid(&ruid, &euid, &suid));
7762 if (!is_error(ret)) {
7763 if (put_user_u16(high2lowuid(ruid), arg1)
7764 || put_user_u16(high2lowuid(euid), arg2)
7765 || put_user_u16(high2lowuid(suid), arg3))
7766 goto efault;
7767 }
7768 }
7769 break;
7770 #endif
7771 #ifdef TARGET_NR_getresgid
7772 case TARGET_NR_setresgid:
7773 ret = get_errno(setresgid(low2highgid(arg1),
7774 low2highgid(arg2),
7775 low2highgid(arg3)));
7776 break;
7777 #endif
7778 #ifdef TARGET_NR_getresgid
7779 case TARGET_NR_getresgid:
7780 {
7781 gid_t rgid, egid, sgid;
7782 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7783 if (!is_error(ret)) {
7784 if (put_user_u16(high2lowgid(rgid), arg1)
7785 || put_user_u16(high2lowgid(egid), arg2)
7786 || put_user_u16(high2lowgid(sgid), arg3))
7787 goto efault;
7788 }
7789 }
7790 break;
7791 #endif
7792 case TARGET_NR_chown:
7793 if (!(p = lock_user_string(arg1)))
7794 goto efault;
7795 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7796 unlock_user(p, arg1, 0);
7797 break;
7798 case TARGET_NR_setuid:
7799 ret = get_errno(setuid(low2highuid(arg1)));
7800 break;
7801 case TARGET_NR_setgid:
7802 ret = get_errno(setgid(low2highgid(arg1)));
7803 break;
7804 case TARGET_NR_setfsuid:
7805 ret = get_errno(setfsuid(arg1));
7806 break;
7807 case TARGET_NR_setfsgid:
7808 ret = get_errno(setfsgid(arg1));
7809 break;
7810
7811 #ifdef TARGET_NR_lchown32
7812 case TARGET_NR_lchown32:
7813 if (!(p = lock_user_string(arg1)))
7814 goto efault;
7815 ret = get_errno(lchown(p, arg2, arg3));
7816 unlock_user(p, arg1, 0);
7817 break;
7818 #endif
7819 #ifdef TARGET_NR_getuid32
7820 case TARGET_NR_getuid32:
7821 ret = get_errno(getuid());
7822 break;
7823 #endif
7824
7825 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7826 /* Alpha specific */
7827 case TARGET_NR_getxuid:
7828 {
7829 uid_t euid;
7830 euid=geteuid();
7831 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7832 }
7833 ret = get_errno(getuid());
7834 break;
7835 #endif
7836 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7837 /* Alpha specific */
7838 case TARGET_NR_getxgid:
7839 {
7840 uid_t egid;
7841 egid=getegid();
7842 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7843 }
7844 ret = get_errno(getgid());
7845 break;
7846 #endif
7847 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7848 /* Alpha specific */
7849 case TARGET_NR_osf_getsysinfo:
7850 ret = -TARGET_EOPNOTSUPP;
7851 switch (arg1) {
7852 case TARGET_GSI_IEEE_FP_CONTROL:
7853 {
7854 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7855
7856 /* Copied from linux ieee_fpcr_to_swcr. */
7857 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7858 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7859 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7860 | SWCR_TRAP_ENABLE_DZE
7861 | SWCR_TRAP_ENABLE_OVF);
7862 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7863 | SWCR_TRAP_ENABLE_INE);
7864 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7865 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7866
7867 if (put_user_u64 (swcr, arg2))
7868 goto efault;
7869 ret = 0;
7870 }
7871 break;
7872
7873 /* case GSI_IEEE_STATE_AT_SIGNAL:
7874 -- Not implemented in linux kernel.
7875 case GSI_UACPROC:
7876 -- Retrieves current unaligned access state; not much used.
7877 case GSI_PROC_TYPE:
7878 -- Retrieves implver information; surely not used.
7879 case GSI_GET_HWRPB:
7880 -- Grabs a copy of the HWRPB; surely not used.
7881 */
7882 }
7883 break;
7884 #endif
7885 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7886 /* Alpha specific */
7887 case TARGET_NR_osf_setsysinfo:
7888 ret = -TARGET_EOPNOTSUPP;
7889 switch (arg1) {
7890 case TARGET_SSI_IEEE_FP_CONTROL:
7891 {
7892 uint64_t swcr, fpcr, orig_fpcr;
7893
7894 if (get_user_u64 (swcr, arg2)) {
7895 goto efault;
7896 }
7897 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7898 fpcr = orig_fpcr & FPCR_DYN_MASK;
7899
7900 /* Copied from linux ieee_swcr_to_fpcr. */
7901 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7902 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7903 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7904 | SWCR_TRAP_ENABLE_DZE
7905 | SWCR_TRAP_ENABLE_OVF)) << 48;
7906 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7907 | SWCR_TRAP_ENABLE_INE)) << 57;
7908 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7909 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7910
7911 cpu_alpha_store_fpcr(cpu_env, fpcr);
7912 ret = 0;
7913 }
7914 break;
7915
7916 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7917 {
7918 uint64_t exc, fpcr, orig_fpcr;
7919 int si_code;
7920
7921 if (get_user_u64(exc, arg2)) {
7922 goto efault;
7923 }
7924
7925 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7926
7927 /* We only add to the exception status here. */
7928 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7929
7930 cpu_alpha_store_fpcr(cpu_env, fpcr);
7931 ret = 0;
7932
7933 /* Old exceptions are not signaled. */
7934 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7935
7936 /* If any exceptions set by this call,
7937 and are unmasked, send a signal. */
7938 si_code = 0;
7939 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7940 si_code = TARGET_FPE_FLTRES;
7941 }
7942 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7943 si_code = TARGET_FPE_FLTUND;
7944 }
7945 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7946 si_code = TARGET_FPE_FLTOVF;
7947 }
7948 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7949 si_code = TARGET_FPE_FLTDIV;
7950 }
7951 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7952 si_code = TARGET_FPE_FLTINV;
7953 }
7954 if (si_code != 0) {
7955 target_siginfo_t info;
7956 info.si_signo = SIGFPE;
7957 info.si_errno = 0;
7958 info.si_code = si_code;
7959 info._sifields._sigfault._addr
7960 = ((CPUArchState *)cpu_env)->pc;
7961 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
7962 }
7963 }
7964 break;
7965
7966 /* case SSI_NVPAIRS:
7967 -- Used with SSIN_UACPROC to enable unaligned accesses.
7968 case SSI_IEEE_STATE_AT_SIGNAL:
7969 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7970 -- Not implemented in linux kernel
7971 */
7972 }
7973 break;
7974 #endif
7975 #ifdef TARGET_NR_osf_sigprocmask
7976 /* Alpha specific. */
7977 case TARGET_NR_osf_sigprocmask:
7978 {
7979 abi_ulong mask;
7980 int how;
7981 sigset_t set, oldset;
7982
7983 switch(arg1) {
7984 case TARGET_SIG_BLOCK:
7985 how = SIG_BLOCK;
7986 break;
7987 case TARGET_SIG_UNBLOCK:
7988 how = SIG_UNBLOCK;
7989 break;
7990 case TARGET_SIG_SETMASK:
7991 how = SIG_SETMASK;
7992 break;
7993 default:
7994 ret = -TARGET_EINVAL;
7995 goto fail;
7996 }
7997 mask = arg2;
7998 target_to_host_old_sigset(&set, &mask);
7999 sigprocmask(how, &set, &oldset);
8000 host_to_target_old_sigset(&mask, &oldset);
8001 ret = mask;
8002 }
8003 break;
8004 #endif
8005
8006 #ifdef TARGET_NR_getgid32
8007 case TARGET_NR_getgid32:
8008 ret = get_errno(getgid());
8009 break;
8010 #endif
8011 #ifdef TARGET_NR_geteuid32
8012 case TARGET_NR_geteuid32:
8013 ret = get_errno(geteuid());
8014 break;
8015 #endif
8016 #ifdef TARGET_NR_getegid32
8017 case TARGET_NR_getegid32:
8018 ret = get_errno(getegid());
8019 break;
8020 #endif
8021 #ifdef TARGET_NR_setreuid32
8022 case TARGET_NR_setreuid32:
8023 ret = get_errno(setreuid(arg1, arg2));
8024 break;
8025 #endif
8026 #ifdef TARGET_NR_setregid32
8027 case TARGET_NR_setregid32:
8028 ret = get_errno(setregid(arg1, arg2));
8029 break;
8030 #endif
8031 #ifdef TARGET_NR_getgroups32
8032 case TARGET_NR_getgroups32:
8033 {
8034 int gidsetsize = arg1;
8035 uint32_t *target_grouplist;
8036 gid_t *grouplist;
8037 int i;
8038
8039 grouplist = alloca(gidsetsize * sizeof(gid_t));
8040 ret = get_errno(getgroups(gidsetsize, grouplist));
8041 if (gidsetsize == 0)
8042 break;
8043 if (!is_error(ret)) {
8044 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8045 if (!target_grouplist) {
8046 ret = -TARGET_EFAULT;
8047 goto fail;
8048 }
8049 for(i = 0;i < ret; i++)
8050 target_grouplist[i] = tswap32(grouplist[i]);
8051 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8052 }
8053 }
8054 break;
8055 #endif
8056 #ifdef TARGET_NR_setgroups32
8057 case TARGET_NR_setgroups32:
8058 {
8059 int gidsetsize = arg1;
8060 uint32_t *target_grouplist;
8061 gid_t *grouplist;
8062 int i;
8063
8064 grouplist = alloca(gidsetsize * sizeof(gid_t));
8065 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8066 if (!target_grouplist) {
8067 ret = -TARGET_EFAULT;
8068 goto fail;
8069 }
8070 for(i = 0;i < gidsetsize; i++)
8071 grouplist[i] = tswap32(target_grouplist[i]);
8072 unlock_user(target_grouplist, arg2, 0);
8073 ret = get_errno(setgroups(gidsetsize, grouplist));
8074 }
8075 break;
8076 #endif
8077 #ifdef TARGET_NR_fchown32
8078 case TARGET_NR_fchown32:
8079 ret = get_errno(fchown(arg1, arg2, arg3));
8080 break;
8081 #endif
8082 #ifdef TARGET_NR_setresuid32
8083 case TARGET_NR_setresuid32:
8084 ret = get_errno(setresuid(arg1, arg2, arg3));
8085 break;
8086 #endif
8087 #ifdef TARGET_NR_getresuid32
8088 case TARGET_NR_getresuid32:
8089 {
8090 uid_t ruid, euid, suid;
8091 ret = get_errno(getresuid(&ruid, &euid, &suid));
8092 if (!is_error(ret)) {
8093 if (put_user_u32(ruid, arg1)
8094 || put_user_u32(euid, arg2)
8095 || put_user_u32(suid, arg3))
8096 goto efault;
8097 }
8098 }
8099 break;
8100 #endif
8101 #ifdef TARGET_NR_setresgid32
8102 case TARGET_NR_setresgid32:
8103 ret = get_errno(setresgid(arg1, arg2, arg3));
8104 break;
8105 #endif
8106 #ifdef TARGET_NR_getresgid32
8107 case TARGET_NR_getresgid32:
8108 {
8109 gid_t rgid, egid, sgid;
8110 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8111 if (!is_error(ret)) {
8112 if (put_user_u32(rgid, arg1)
8113 || put_user_u32(egid, arg2)
8114 || put_user_u32(sgid, arg3))
8115 goto efault;
8116 }
8117 }
8118 break;
8119 #endif
8120 #ifdef TARGET_NR_chown32
8121 case TARGET_NR_chown32:
8122 if (!(p = lock_user_string(arg1)))
8123 goto efault;
8124 ret = get_errno(chown(p, arg2, arg3));
8125 unlock_user(p, arg1, 0);
8126 break;
8127 #endif
8128 #ifdef TARGET_NR_setuid32
8129 case TARGET_NR_setuid32:
8130 ret = get_errno(setuid(arg1));
8131 break;
8132 #endif
8133 #ifdef TARGET_NR_setgid32
8134 case TARGET_NR_setgid32:
8135 ret = get_errno(setgid(arg1));
8136 break;
8137 #endif
8138 #ifdef TARGET_NR_setfsuid32
8139 case TARGET_NR_setfsuid32:
8140 ret = get_errno(setfsuid(arg1));
8141 break;
8142 #endif
8143 #ifdef TARGET_NR_setfsgid32
8144 case TARGET_NR_setfsgid32:
8145 ret = get_errno(setfsgid(arg1));
8146 break;
8147 #endif
8148
8149 case TARGET_NR_pivot_root:
8150 goto unimplemented;
8151 #ifdef TARGET_NR_mincore
8152 case TARGET_NR_mincore:
8153 {
8154 void *a;
8155 ret = -TARGET_EFAULT;
8156 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8157 goto efault;
8158 if (!(p = lock_user_string(arg3)))
8159 goto mincore_fail;
8160 ret = get_errno(mincore(a, arg2, p));
8161 unlock_user(p, arg3, ret);
8162 mincore_fail:
8163 unlock_user(a, arg1, 0);
8164 }
8165 break;
8166 #endif
8167 #ifdef TARGET_NR_arm_fadvise64_64
8168 case TARGET_NR_arm_fadvise64_64:
8169 {
8170 /*
8171 * arm_fadvise64_64 looks like fadvise64_64 but
8172 * with different argument order
8173 */
8174 abi_long temp;
8175 temp = arg3;
8176 arg3 = arg4;
8177 arg4 = temp;
8178 }
8179 #endif
8180 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8181 #ifdef TARGET_NR_fadvise64_64
8182 case TARGET_NR_fadvise64_64:
8183 #endif
8184 #ifdef TARGET_NR_fadvise64
8185 case TARGET_NR_fadvise64:
8186 #endif
8187 #ifdef TARGET_S390X
8188 switch (arg4) {
8189 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8190 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8191 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8192 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8193 default: break;
8194 }
8195 #endif
8196 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8197 break;
8198 #endif
8199 #ifdef TARGET_NR_madvise
8200 case TARGET_NR_madvise:
8201 /* A straight passthrough may not be safe because qemu sometimes
8202 turns private file-backed mappings into anonymous mappings.
8203 This will break MADV_DONTNEED.
8204 This is a hint, so ignoring and returning success is ok. */
8205 ret = get_errno(0);
8206 break;
8207 #endif
8208 #if TARGET_ABI_BITS == 32
8209 case TARGET_NR_fcntl64:
8210 {
8211 int cmd;
8212 struct flock64 fl;
8213 struct target_flock64 *target_fl;
8214 #ifdef TARGET_ARM
8215 struct target_eabi_flock64 *target_efl;
8216 #endif
8217
8218 cmd = target_to_host_fcntl_cmd(arg2);
8219 if (cmd == -TARGET_EINVAL) {
8220 ret = cmd;
8221 break;
8222 }
8223
8224 switch(arg2) {
8225 case TARGET_F_GETLK64:
8226 #ifdef TARGET_ARM
8227 if (((CPUARMState *)cpu_env)->eabi) {
8228 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8229 goto efault;
8230 fl.l_type = tswap16(target_efl->l_type);
8231 fl.l_whence = tswap16(target_efl->l_whence);
8232 fl.l_start = tswap64(target_efl->l_start);
8233 fl.l_len = tswap64(target_efl->l_len);
8234 fl.l_pid = tswap32(target_efl->l_pid);
8235 unlock_user_struct(target_efl, arg3, 0);
8236 } else
8237 #endif
8238 {
8239 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8240 goto efault;
8241 fl.l_type = tswap16(target_fl->l_type);
8242 fl.l_whence = tswap16(target_fl->l_whence);
8243 fl.l_start = tswap64(target_fl->l_start);
8244 fl.l_len = tswap64(target_fl->l_len);
8245 fl.l_pid = tswap32(target_fl->l_pid);
8246 unlock_user_struct(target_fl, arg3, 0);
8247 }
8248 ret = get_errno(fcntl(arg1, cmd, &fl));
8249 if (ret == 0) {
8250 #ifdef TARGET_ARM
8251 if (((CPUARMState *)cpu_env)->eabi) {
8252 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8253 goto efault;
8254 target_efl->l_type = tswap16(fl.l_type);
8255 target_efl->l_whence = tswap16(fl.l_whence);
8256 target_efl->l_start = tswap64(fl.l_start);
8257 target_efl->l_len = tswap64(fl.l_len);
8258 target_efl->l_pid = tswap32(fl.l_pid);
8259 unlock_user_struct(target_efl, arg3, 1);
8260 } else
8261 #endif
8262 {
8263 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8264 goto efault;
8265 target_fl->l_type = tswap16(fl.l_type);
8266 target_fl->l_whence = tswap16(fl.l_whence);
8267 target_fl->l_start = tswap64(fl.l_start);
8268 target_fl->l_len = tswap64(fl.l_len);
8269 target_fl->l_pid = tswap32(fl.l_pid);
8270 unlock_user_struct(target_fl, arg3, 1);
8271 }
8272 }
8273 break;
8274
8275 case TARGET_F_SETLK64:
8276 case TARGET_F_SETLKW64:
8277 #ifdef TARGET_ARM
8278 if (((CPUARMState *)cpu_env)->eabi) {
8279 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8280 goto efault;
8281 fl.l_type = tswap16(target_efl->l_type);
8282 fl.l_whence = tswap16(target_efl->l_whence);
8283 fl.l_start = tswap64(target_efl->l_start);
8284 fl.l_len = tswap64(target_efl->l_len);
8285 fl.l_pid = tswap32(target_efl->l_pid);
8286 unlock_user_struct(target_efl, arg3, 0);
8287 } else
8288 #endif
8289 {
8290 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8291 goto efault;
8292 fl.l_type = tswap16(target_fl->l_type);
8293 fl.l_whence = tswap16(target_fl->l_whence);
8294 fl.l_start = tswap64(target_fl->l_start);
8295 fl.l_len = tswap64(target_fl->l_len);
8296 fl.l_pid = tswap32(target_fl->l_pid);
8297 unlock_user_struct(target_fl, arg3, 0);
8298 }
8299 ret = get_errno(fcntl(arg1, cmd, &fl));
8300 break;
8301 default:
8302 ret = do_fcntl(arg1, arg2, arg3);
8303 break;
8304 }
8305 break;
8306 }
8307 #endif
8308 #ifdef TARGET_NR_cacheflush
8309 case TARGET_NR_cacheflush:
8310 /* self-modifying code is handled automatically, so nothing needed */
8311 ret = 0;
8312 break;
8313 #endif
8314 #ifdef TARGET_NR_security
8315 case TARGET_NR_security:
8316 goto unimplemented;
8317 #endif
8318 #ifdef TARGET_NR_getpagesize
8319 case TARGET_NR_getpagesize:
8320 ret = TARGET_PAGE_SIZE;
8321 break;
8322 #endif
8323 case TARGET_NR_gettid:
8324 ret = get_errno(gettid());
8325 break;
8326 #ifdef TARGET_NR_readahead
8327 case TARGET_NR_readahead:
8328 #if TARGET_ABI_BITS == 32
8329 if (regpairs_aligned(cpu_env)) {
8330 arg2 = arg3;
8331 arg3 = arg4;
8332 arg4 = arg5;
8333 }
8334 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8335 #else
8336 ret = get_errno(readahead(arg1, arg2, arg3));
8337 #endif
8338 break;
8339 #endif
8340 #ifdef CONFIG_ATTR
8341 #ifdef TARGET_NR_setxattr
8342 case TARGET_NR_listxattr:
8343 case TARGET_NR_llistxattr:
8344 {
8345 void *p, *b = 0;
8346 if (arg2) {
8347 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8348 if (!b) {
8349 ret = -TARGET_EFAULT;
8350 break;
8351 }
8352 }
8353 p = lock_user_string(arg1);
8354 if (p) {
8355 if (num == TARGET_NR_listxattr) {
8356 ret = get_errno(listxattr(p, b, arg3));
8357 } else {
8358 ret = get_errno(llistxattr(p, b, arg3));
8359 }
8360 } else {
8361 ret = -TARGET_EFAULT;
8362 }
8363 unlock_user(p, arg1, 0);
8364 unlock_user(b, arg2, arg3);
8365 break;
8366 }
8367 case TARGET_NR_flistxattr:
8368 {
8369 void *b = 0;
8370 if (arg2) {
8371 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8372 if (!b) {
8373 ret = -TARGET_EFAULT;
8374 break;
8375 }
8376 }
8377 ret = get_errno(flistxattr(arg1, b, arg3));
8378 unlock_user(b, arg2, arg3);
8379 break;
8380 }
8381 case TARGET_NR_setxattr:
8382 case TARGET_NR_lsetxattr:
8383 {
8384 void *p, *n, *v = 0;
8385 if (arg3) {
8386 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8387 if (!v) {
8388 ret = -TARGET_EFAULT;
8389 break;
8390 }
8391 }
8392 p = lock_user_string(arg1);
8393 n = lock_user_string(arg2);
8394 if (p && n) {
8395 if (num == TARGET_NR_setxattr) {
8396 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8397 } else {
8398 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8399 }
8400 } else {
8401 ret = -TARGET_EFAULT;
8402 }
8403 unlock_user(p, arg1, 0);
8404 unlock_user(n, arg2, 0);
8405 unlock_user(v, arg3, 0);
8406 }
8407 break;
8408 case TARGET_NR_fsetxattr:
8409 {
8410 void *n, *v = 0;
8411 if (arg3) {
8412 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8413 if (!v) {
8414 ret = -TARGET_EFAULT;
8415 break;
8416 }
8417 }
8418 n = lock_user_string(arg2);
8419 if (n) {
8420 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8421 } else {
8422 ret = -TARGET_EFAULT;
8423 }
8424 unlock_user(n, arg2, 0);
8425 unlock_user(v, arg3, 0);
8426 }
8427 break;
8428 case TARGET_NR_getxattr:
8429 case TARGET_NR_lgetxattr:
8430 {
8431 void *p, *n, *v = 0;
8432 if (arg3) {
8433 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8434 if (!v) {
8435 ret = -TARGET_EFAULT;
8436 break;
8437 }
8438 }
8439 p = lock_user_string(arg1);
8440 n = lock_user_string(arg2);
8441 if (p && n) {
8442 if (num == TARGET_NR_getxattr) {
8443 ret = get_errno(getxattr(p, n, v, arg4));
8444 } else {
8445 ret = get_errno(lgetxattr(p, n, v, arg4));
8446 }
8447 } else {
8448 ret = -TARGET_EFAULT;
8449 }
8450 unlock_user(p, arg1, 0);
8451 unlock_user(n, arg2, 0);
8452 unlock_user(v, arg3, arg4);
8453 }
8454 break;
8455 case TARGET_NR_fgetxattr:
8456 {
8457 void *n, *v = 0;
8458 if (arg3) {
8459 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8460 if (!v) {
8461 ret = -TARGET_EFAULT;
8462 break;
8463 }
8464 }
8465 n = lock_user_string(arg2);
8466 if (n) {
8467 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8468 } else {
8469 ret = -TARGET_EFAULT;
8470 }
8471 unlock_user(n, arg2, 0);
8472 unlock_user(v, arg3, arg4);
8473 }
8474 break;
8475 case TARGET_NR_removexattr:
8476 case TARGET_NR_lremovexattr:
8477 {
8478 void *p, *n;
8479 p = lock_user_string(arg1);
8480 n = lock_user_string(arg2);
8481 if (p && n) {
8482 if (num == TARGET_NR_removexattr) {
8483 ret = get_errno(removexattr(p, n));
8484 } else {
8485 ret = get_errno(lremovexattr(p, n));
8486 }
8487 } else {
8488 ret = -TARGET_EFAULT;
8489 }
8490 unlock_user(p, arg1, 0);
8491 unlock_user(n, arg2, 0);
8492 }
8493 break;
8494 case TARGET_NR_fremovexattr:
8495 {
8496 void *n;
8497 n = lock_user_string(arg2);
8498 if (n) {
8499 ret = get_errno(fremovexattr(arg1, n));
8500 } else {
8501 ret = -TARGET_EFAULT;
8502 }
8503 unlock_user(n, arg2, 0);
8504 }
8505 break;
8506 #endif
8507 #endif /* CONFIG_ATTR */
8508 #ifdef TARGET_NR_set_thread_area
8509 case TARGET_NR_set_thread_area:
8510 #if defined(TARGET_MIPS)
8511 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8512 ret = 0;
8513 break;
8514 #elif defined(TARGET_CRIS)
8515 if (arg1 & 0xff)
8516 ret = -TARGET_EINVAL;
8517 else {
8518 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8519 ret = 0;
8520 }
8521 break;
8522 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8523 ret = do_set_thread_area(cpu_env, arg1);
8524 break;
8525 #elif defined(TARGET_M68K)
8526 {
8527 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
8528 ts->tp_value = arg1;
8529 break;
8530 }
8531 #else
8532 goto unimplemented_nowarn;
8533 #endif
8534 #endif
8535 #ifdef TARGET_NR_get_thread_area
8536 case TARGET_NR_get_thread_area:
8537 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8538 ret = do_get_thread_area(cpu_env, arg1);
8539 break;
8540 #elif defined(TARGET_M68K)
8541 {
8542 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
8543 ret = ts->tp_value;
8544 break;
8545 }
8546 #else
8547 goto unimplemented_nowarn;
8548 #endif
8549 #endif
8550 #ifdef TARGET_NR_getdomainname
8551 case TARGET_NR_getdomainname:
8552 goto unimplemented_nowarn;
8553 #endif
8554
8555 #ifdef TARGET_NR_clock_gettime
8556 case TARGET_NR_clock_gettime:
8557 {
8558 struct timespec ts;
8559 ret = get_errno(clock_gettime(arg1, &ts));
8560 if (!is_error(ret)) {
8561 host_to_target_timespec(arg2, &ts);
8562 }
8563 break;
8564 }
8565 #endif
8566 #ifdef TARGET_NR_clock_getres
8567 case TARGET_NR_clock_getres:
8568 {
8569 struct timespec ts;
8570 ret = get_errno(clock_getres(arg1, &ts));
8571 if (!is_error(ret)) {
8572 host_to_target_timespec(arg2, &ts);
8573 }
8574 break;
8575 }
8576 #endif
8577 #ifdef TARGET_NR_clock_nanosleep
8578 case TARGET_NR_clock_nanosleep:
8579 {
8580 struct timespec ts;
8581 target_to_host_timespec(&ts, arg3);
8582 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8583 if (arg4)
8584 host_to_target_timespec(arg4, &ts);
8585 break;
8586 }
8587 #endif
8588
8589 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8590 case TARGET_NR_set_tid_address:
8591 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8592 break;
8593 #endif
8594
8595 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8596 case TARGET_NR_tkill:
8597 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8598 break;
8599 #endif
8600
8601 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8602 case TARGET_NR_tgkill:
8603 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8604 target_to_host_signal(arg3)));
8605 break;
8606 #endif
8607
8608 #ifdef TARGET_NR_set_robust_list
8609 case TARGET_NR_set_robust_list:
8610 case TARGET_NR_get_robust_list:
8611 /* The ABI for supporting robust futexes has userspace pass
8612 * the kernel a pointer to a linked list which is updated by
8613 * userspace after the syscall; the list is walked by the kernel
8614 * when the thread exits. Since the linked list in QEMU guest
8615 * memory isn't a valid linked list for the host and we have
8616 * no way to reliably intercept the thread-death event, we can't
8617 * support these. Silently return ENOSYS so that guest userspace
8618 * falls back to a non-robust futex implementation (which should
8619 * be OK except in the corner case of the guest crashing while
8620 * holding a mutex that is shared with another process via
8621 * shared memory).
8622 */
8623 goto unimplemented_nowarn;
8624 #endif
8625
8626 #if defined(TARGET_NR_utimensat)
8627 case TARGET_NR_utimensat:
8628 {
8629 struct timespec *tsp, ts[2];
8630 if (!arg3) {
8631 tsp = NULL;
8632 } else {
8633 target_to_host_timespec(ts, arg3);
8634 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8635 tsp = ts;
8636 }
8637 if (!arg2)
8638 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8639 else {
8640 if (!(p = lock_user_string(arg2))) {
8641 ret = -TARGET_EFAULT;
8642 goto fail;
8643 }
8644 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8645 unlock_user(p, arg2, 0);
8646 }
8647 }
8648 break;
8649 #endif
8650 case TARGET_NR_futex:
8651 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8652 break;
8653 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8654 case TARGET_NR_inotify_init:
8655 ret = get_errno(sys_inotify_init());
8656 break;
8657 #endif
8658 #ifdef CONFIG_INOTIFY1
8659 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8660 case TARGET_NR_inotify_init1:
8661 ret = get_errno(sys_inotify_init1(arg1));
8662 break;
8663 #endif
8664 #endif
8665 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8666 case TARGET_NR_inotify_add_watch:
8667 p = lock_user_string(arg2);
8668 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8669 unlock_user(p, arg2, 0);
8670 break;
8671 #endif
8672 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8673 case TARGET_NR_inotify_rm_watch:
8674 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8675 break;
8676 #endif
8677
8678 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8679 case TARGET_NR_mq_open:
8680 {
8681 struct mq_attr posix_mq_attr;
8682
8683 p = lock_user_string(arg1 - 1);
8684 if (arg4 != 0)
8685 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8686 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8687 unlock_user (p, arg1, 0);
8688 }
8689 break;
8690
8691 case TARGET_NR_mq_unlink:
8692 p = lock_user_string(arg1 - 1);
8693 ret = get_errno(mq_unlink(p));
8694 unlock_user (p, arg1, 0);
8695 break;
8696
8697 case TARGET_NR_mq_timedsend:
8698 {
8699 struct timespec ts;
8700
8701 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8702 if (arg5 != 0) {
8703 target_to_host_timespec(&ts, arg5);
8704 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8705 host_to_target_timespec(arg5, &ts);
8706 }
8707 else
8708 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8709 unlock_user (p, arg2, arg3);
8710 }
8711 break;
8712
8713 case TARGET_NR_mq_timedreceive:
8714 {
8715 struct timespec ts;
8716 unsigned int prio;
8717
8718 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8719 if (arg5 != 0) {
8720 target_to_host_timespec(&ts, arg5);
8721 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8722 host_to_target_timespec(arg5, &ts);
8723 }
8724 else
8725 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8726 unlock_user (p, arg2, arg3);
8727 if (arg4 != 0)
8728 put_user_u32(prio, arg4);
8729 }
8730 break;
8731
8732 /* Not implemented for now... */
8733 /* case TARGET_NR_mq_notify: */
8734 /* break; */
8735
8736 case TARGET_NR_mq_getsetattr:
8737 {
8738 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8739 ret = 0;
8740 if (arg3 != 0) {
8741 ret = mq_getattr(arg1, &posix_mq_attr_out);
8742 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8743 }
8744 if (arg2 != 0) {
8745 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8746 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8747 }
8748
8749 }
8750 break;
8751 #endif
8752
8753 #ifdef CONFIG_SPLICE
8754 #ifdef TARGET_NR_tee
8755 case TARGET_NR_tee:
8756 {
8757 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8758 }
8759 break;
8760 #endif
8761 #ifdef TARGET_NR_splice
8762 case TARGET_NR_splice:
8763 {
8764 loff_t loff_in, loff_out;
8765 loff_t *ploff_in = NULL, *ploff_out = NULL;
8766 if(arg2) {
8767 get_user_u64(loff_in, arg2);
8768 ploff_in = &loff_in;
8769 }
8770 if(arg4) {
8771 get_user_u64(loff_out, arg2);
8772 ploff_out = &loff_out;
8773 }
8774 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8775 }
8776 break;
8777 #endif
8778 #ifdef TARGET_NR_vmsplice
8779 case TARGET_NR_vmsplice:
8780 {
8781 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8782 if (vec != NULL) {
8783 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
8784 unlock_iovec(vec, arg2, arg3, 0);
8785 } else {
8786 ret = -host_to_target_errno(errno);
8787 }
8788 }
8789 break;
8790 #endif
8791 #endif /* CONFIG_SPLICE */
8792 #ifdef CONFIG_EVENTFD
8793 #if defined(TARGET_NR_eventfd)
8794 case TARGET_NR_eventfd:
8795 ret = get_errno(eventfd(arg1, 0));
8796 break;
8797 #endif
8798 #if defined(TARGET_NR_eventfd2)
8799 case TARGET_NR_eventfd2:
8800 {
8801 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
8802 if (arg2 & TARGET_O_NONBLOCK) {
8803 host_flags |= O_NONBLOCK;
8804 }
8805 if (arg2 & TARGET_O_CLOEXEC) {
8806 host_flags |= O_CLOEXEC;
8807 }
8808 ret = get_errno(eventfd(arg1, host_flags));
8809 break;
8810 }
8811 #endif
8812 #endif /* CONFIG_EVENTFD */
8813 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8814 case TARGET_NR_fallocate:
8815 #if TARGET_ABI_BITS == 32
8816 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8817 target_offset64(arg5, arg6)));
8818 #else
8819 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8820 #endif
8821 break;
8822 #endif
8823 #if defined(CONFIG_SYNC_FILE_RANGE)
8824 #if defined(TARGET_NR_sync_file_range)
8825 case TARGET_NR_sync_file_range:
8826 #if TARGET_ABI_BITS == 32
8827 #if defined(TARGET_MIPS)
8828 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8829 target_offset64(arg5, arg6), arg7));
8830 #else
8831 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8832 target_offset64(arg4, arg5), arg6));
8833 #endif /* !TARGET_MIPS */
8834 #else
8835 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8836 #endif
8837 break;
8838 #endif
8839 #if defined(TARGET_NR_sync_file_range2)
8840 case TARGET_NR_sync_file_range2:
8841 /* This is like sync_file_range but the arguments are reordered */
8842 #if TARGET_ABI_BITS == 32
8843 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8844 target_offset64(arg5, arg6), arg2));
8845 #else
8846 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8847 #endif
8848 break;
8849 #endif
8850 #endif
8851 #if defined(CONFIG_EPOLL)
8852 #if defined(TARGET_NR_epoll_create)
8853 case TARGET_NR_epoll_create:
8854 ret = get_errno(epoll_create(arg1));
8855 break;
8856 #endif
8857 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8858 case TARGET_NR_epoll_create1:
8859 ret = get_errno(epoll_create1(arg1));
8860 break;
8861 #endif
8862 #if defined(TARGET_NR_epoll_ctl)
8863 case TARGET_NR_epoll_ctl:
8864 {
8865 struct epoll_event ep;
8866 struct epoll_event *epp = 0;
8867 if (arg4) {
8868 struct target_epoll_event *target_ep;
8869 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8870 goto efault;
8871 }
8872 ep.events = tswap32(target_ep->events);
8873 /* The epoll_data_t union is just opaque data to the kernel,
8874 * so we transfer all 64 bits across and need not worry what
8875 * actual data type it is.
8876 */
8877 ep.data.u64 = tswap64(target_ep->data.u64);
8878 unlock_user_struct(target_ep, arg4, 0);
8879 epp = &ep;
8880 }
8881 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8882 break;
8883 }
8884 #endif
8885
8886 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8887 #define IMPLEMENT_EPOLL_PWAIT
8888 #endif
8889 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8890 #if defined(TARGET_NR_epoll_wait)
8891 case TARGET_NR_epoll_wait:
8892 #endif
8893 #if defined(IMPLEMENT_EPOLL_PWAIT)
8894 case TARGET_NR_epoll_pwait:
8895 #endif
8896 {
8897 struct target_epoll_event *target_ep;
8898 struct epoll_event *ep;
8899 int epfd = arg1;
8900 int maxevents = arg3;
8901 int timeout = arg4;
8902
8903 target_ep = lock_user(VERIFY_WRITE, arg2,
8904 maxevents * sizeof(struct target_epoll_event), 1);
8905 if (!target_ep) {
8906 goto efault;
8907 }
8908
8909 ep = alloca(maxevents * sizeof(struct epoll_event));
8910
8911 switch (num) {
8912 #if defined(IMPLEMENT_EPOLL_PWAIT)
8913 case TARGET_NR_epoll_pwait:
8914 {
8915 target_sigset_t *target_set;
8916 sigset_t _set, *set = &_set;
8917
8918 if (arg5) {
8919 target_set = lock_user(VERIFY_READ, arg5,
8920 sizeof(target_sigset_t), 1);
8921 if (!target_set) {
8922 unlock_user(target_ep, arg2, 0);
8923 goto efault;
8924 }
8925 target_to_host_sigset(set, target_set);
8926 unlock_user(target_set, arg5, 0);
8927 } else {
8928 set = NULL;
8929 }
8930
8931 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8932 break;
8933 }
8934 #endif
8935 #if defined(TARGET_NR_epoll_wait)
8936 case TARGET_NR_epoll_wait:
8937 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8938 break;
8939 #endif
8940 default:
8941 ret = -TARGET_ENOSYS;
8942 }
8943 if (!is_error(ret)) {
8944 int i;
8945 for (i = 0; i < ret; i++) {
8946 target_ep[i].events = tswap32(ep[i].events);
8947 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8948 }
8949 }
8950 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8951 break;
8952 }
8953 #endif
8954 #endif
8955 #ifdef TARGET_NR_prlimit64
8956 case TARGET_NR_prlimit64:
8957 {
8958 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8959 struct target_rlimit64 *target_rnew, *target_rold;
8960 struct host_rlimit64 rnew, rold, *rnewp = 0;
8961 if (arg3) {
8962 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8963 goto efault;
8964 }
8965 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8966 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8967 unlock_user_struct(target_rnew, arg3, 0);
8968 rnewp = &rnew;
8969 }
8970
8971 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8972 if (!is_error(ret) && arg4) {
8973 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8974 goto efault;
8975 }
8976 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8977 target_rold->rlim_max = tswap64(rold.rlim_max);
8978 unlock_user_struct(target_rold, arg4, 1);
8979 }
8980 break;
8981 }
8982 #endif
8983 #ifdef TARGET_NR_gethostname
8984 case TARGET_NR_gethostname:
8985 {
8986 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8987 if (name) {
8988 ret = get_errno(gethostname(name, arg2));
8989 unlock_user(name, arg1, arg2);
8990 } else {
8991 ret = -TARGET_EFAULT;
8992 }
8993 break;
8994 }
8995 #endif
8996 default:
8997 unimplemented:
8998 gemu_log("qemu: Unsupported syscall: %d\n", num);
8999 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9000 unimplemented_nowarn:
9001 #endif
9002 ret = -TARGET_ENOSYS;
9003 break;
9004 }
9005 fail:
9006 #ifdef DEBUG
9007 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
9008 #endif
9009 if(do_strace)
9010 print_syscall_ret(num, ret);
9011 return ret;
9012 efault:
9013 ret = -TARGET_EFAULT;
9014 goto fail;
9015 }