]> git.proxmox.com Git - qemu.git/blob - linux-user/syscall.c
ed2c9305b1347e38bf82611ee191ea9012d49e42
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <signal.h>
47 #include <sched.h>
48 #ifdef __ia64__
49 int __clone2(int (*fn)(void *), void *child_stack_base,
50 size_t stack_size, int flags, void *arg, ...);
51 #endif
52 #include <sys/socket.h>
53 #include <sys/un.h>
54 #include <sys/uio.h>
55 #include <sys/poll.h>
56 #include <sys/times.h>
57 #include <sys/shm.h>
58 #include <sys/sem.h>
59 #include <sys/statfs.h>
60 #include <utime.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef TARGET_GPROF
70 #include <sys/gmon.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
84
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
91
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/utsname.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #if defined(CONFIG_FIEMAP)
102 #include <linux/fiemap.h>
103 #endif
104 #include <linux/fb.h>
105 #include <linux/vt.h>
106 #include <linux/dm-ioctl.h>
107 #include <linux/reboot.h>
108 #include "linux_loop.h"
109 #include "cpu-uname.h"
110
111 #include "qemu.h"
112
113 #if defined(CONFIG_USE_NPTL)
114 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
115 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
116 #else
117 /* XXX: Hardcode the above values. */
118 #define CLONE_NPTL_FLAGS2 0
119 #endif
120
121 //#define DEBUG
122
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
126
127
128 #undef _syscall0
129 #undef _syscall1
130 #undef _syscall2
131 #undef _syscall3
132 #undef _syscall4
133 #undef _syscall5
134 #undef _syscall6
135
136 #define _syscall0(type,name) \
137 static type name (void) \
138 { \
139 return syscall(__NR_##name); \
140 }
141
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
144 { \
145 return syscall(__NR_##name, arg1); \
146 }
147
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
150 { \
151 return syscall(__NR_##name, arg1, arg2); \
152 }
153
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
156 { \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
158 }
159
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
162 { \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
164 }
165
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
167 type5,arg5) \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
169 { \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
171 }
172
173
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
177 type6 arg6) \
178 { \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
180 }
181
182
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_tgkill __NR_tgkill
191 #define __NR_sys_tkill __NR_tkill
192 #define __NR_sys_futex __NR_futex
193 #define __NR_sys_inotify_init __NR_inotify_init
194 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
195 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
196
197 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
198 defined(__s390x__)
199 #define __NR__llseek __NR_lseek
200 #endif
201
202 #ifdef __NR_gettid
203 _syscall0(int, gettid)
204 #else
205 /* This is a replacement for the host gettid() and must return a host
206 errno. */
207 static int gettid(void) {
208 return -ENOSYS;
209 }
210 #endif
211 #ifdef __NR_getdents
212 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
213 #endif
214 #if !defined(__NR_getdents) || \
215 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
216 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
217 #endif
218 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
219 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
220 loff_t *, res, uint, wh);
221 #endif
222 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
223 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
224 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
225 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
226 #endif
227 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
228 _syscall2(int,sys_tkill,int,tid,int,sig)
229 #endif
230 #ifdef __NR_exit_group
231 _syscall1(int,exit_group,int,error_code)
232 #endif
233 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
234 _syscall1(int,set_tid_address,int *,tidptr)
235 #endif
236 #if defined(CONFIG_USE_NPTL)
237 #if defined(TARGET_NR_futex) && defined(__NR_futex)
238 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
239 const struct timespec *,timeout,int *,uaddr2,int,val3)
240 #endif
241 #endif
242 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
243 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
244 unsigned long *, user_mask_ptr);
245 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
246 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
247 unsigned long *, user_mask_ptr);
248 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
249 void *, arg);
250
251 static bitmask_transtbl fcntl_flags_tbl[] = {
252 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
253 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
254 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
255 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
256 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
257 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
258 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
259 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
260 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
261 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
262 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
263 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
264 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
265 #if defined(O_DIRECT)
266 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
267 #endif
268 #if defined(O_NOATIME)
269 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
270 #endif
271 #if defined(O_CLOEXEC)
272 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
273 #endif
274 #if defined(O_PATH)
275 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
276 #endif
277 /* Don't terminate the list prematurely on 64-bit host+guest. */
278 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
279 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
280 #endif
281 { 0, 0, 0, 0 }
282 };
283
284 #define COPY_UTSNAME_FIELD(dest, src) \
285 do { \
286 /* __NEW_UTS_LEN doesn't include terminating null */ \
287 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
288 (dest)[__NEW_UTS_LEN] = '\0'; \
289 } while (0)
290
291 static int sys_uname(struct new_utsname *buf)
292 {
293 struct utsname uts_buf;
294
295 if (uname(&uts_buf) < 0)
296 return (-1);
297
298 /*
299 * Just in case these have some differences, we
300 * translate utsname to new_utsname (which is the
301 * struct linux kernel uses).
302 */
303
304 memset(buf, 0, sizeof(*buf));
305 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
306 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
307 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
308 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
309 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
310 #ifdef _GNU_SOURCE
311 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
312 #endif
313 return (0);
314
315 #undef COPY_UTSNAME_FIELD
316 }
317
318 static int sys_getcwd1(char *buf, size_t size)
319 {
320 if (getcwd(buf, size) == NULL) {
321 /* getcwd() sets errno */
322 return (-1);
323 }
324 return strlen(buf)+1;
325 }
326
327 #ifdef TARGET_NR_openat
328 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
329 {
330 /*
331 * open(2) has extra parameter 'mode' when called with
332 * flag O_CREAT.
333 */
334 if ((flags & O_CREAT) != 0) {
335 return (openat(dirfd, pathname, flags, mode));
336 }
337 return (openat(dirfd, pathname, flags));
338 }
339 #endif
340
341 #ifdef CONFIG_UTIMENSAT
342 static int sys_utimensat(int dirfd, const char *pathname,
343 const struct timespec times[2], int flags)
344 {
345 if (pathname == NULL)
346 return futimens(dirfd, times);
347 else
348 return utimensat(dirfd, pathname, times, flags);
349 }
350 #else
351 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
352 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
353 const struct timespec *,tsp,int,flags)
354 #endif
355 #endif /* CONFIG_UTIMENSAT */
356
357 #ifdef CONFIG_INOTIFY
358 #include <sys/inotify.h>
359
360 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
361 static int sys_inotify_init(void)
362 {
363 return (inotify_init());
364 }
365 #endif
366 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
367 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
368 {
369 return (inotify_add_watch(fd, pathname, mask));
370 }
371 #endif
372 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
373 static int sys_inotify_rm_watch(int fd, int32_t wd)
374 {
375 return (inotify_rm_watch(fd, wd));
376 }
377 #endif
378 #ifdef CONFIG_INOTIFY1
379 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
380 static int sys_inotify_init1(int flags)
381 {
382 return (inotify_init1(flags));
383 }
384 #endif
385 #endif
386 #else
387 /* Userspace can usually survive runtime without inotify */
388 #undef TARGET_NR_inotify_init
389 #undef TARGET_NR_inotify_init1
390 #undef TARGET_NR_inotify_add_watch
391 #undef TARGET_NR_inotify_rm_watch
392 #endif /* CONFIG_INOTIFY */
393
394 #if defined(TARGET_NR_ppoll)
395 #ifndef __NR_ppoll
396 # define __NR_ppoll -1
397 #endif
398 #define __NR_sys_ppoll __NR_ppoll
399 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
400 struct timespec *, timeout, const __sigset_t *, sigmask,
401 size_t, sigsetsize)
402 #endif
403
404 #if defined(TARGET_NR_pselect6)
405 #ifndef __NR_pselect6
406 # define __NR_pselect6 -1
407 #endif
408 #define __NR_sys_pselect6 __NR_pselect6
409 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
410 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
411 #endif
412
413 #if defined(TARGET_NR_prlimit64)
414 #ifndef __NR_prlimit64
415 # define __NR_prlimit64 -1
416 #endif
417 #define __NR_sys_prlimit64 __NR_prlimit64
418 /* The glibc rlimit structure may not be that used by the underlying syscall */
419 struct host_rlimit64 {
420 uint64_t rlim_cur;
421 uint64_t rlim_max;
422 };
423 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
424 const struct host_rlimit64 *, new_limit,
425 struct host_rlimit64 *, old_limit)
426 #endif
427
428 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
429 #ifdef TARGET_ARM
430 static inline int regpairs_aligned(void *cpu_env) {
431 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
432 }
433 #elif defined(TARGET_MIPS)
434 static inline int regpairs_aligned(void *cpu_env) { return 1; }
435 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
436 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
437 * of registers which translates to the same as ARM/MIPS, because we start with
438 * r3 as arg1 */
439 static inline int regpairs_aligned(void *cpu_env) { return 1; }
440 #else
441 static inline int regpairs_aligned(void *cpu_env) { return 0; }
442 #endif
443
444 #define ERRNO_TABLE_SIZE 1200
445
446 /* target_to_host_errno_table[] is initialized from
447 * host_to_target_errno_table[] in syscall_init(). */
448 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
449 };
450
451 /*
452 * This list is the union of errno values overridden in asm-<arch>/errno.h
453 * minus the errnos that are not actually generic to all archs.
454 */
455 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
456 [EIDRM] = TARGET_EIDRM,
457 [ECHRNG] = TARGET_ECHRNG,
458 [EL2NSYNC] = TARGET_EL2NSYNC,
459 [EL3HLT] = TARGET_EL3HLT,
460 [EL3RST] = TARGET_EL3RST,
461 [ELNRNG] = TARGET_ELNRNG,
462 [EUNATCH] = TARGET_EUNATCH,
463 [ENOCSI] = TARGET_ENOCSI,
464 [EL2HLT] = TARGET_EL2HLT,
465 [EDEADLK] = TARGET_EDEADLK,
466 [ENOLCK] = TARGET_ENOLCK,
467 [EBADE] = TARGET_EBADE,
468 [EBADR] = TARGET_EBADR,
469 [EXFULL] = TARGET_EXFULL,
470 [ENOANO] = TARGET_ENOANO,
471 [EBADRQC] = TARGET_EBADRQC,
472 [EBADSLT] = TARGET_EBADSLT,
473 [EBFONT] = TARGET_EBFONT,
474 [ENOSTR] = TARGET_ENOSTR,
475 [ENODATA] = TARGET_ENODATA,
476 [ETIME] = TARGET_ETIME,
477 [ENOSR] = TARGET_ENOSR,
478 [ENONET] = TARGET_ENONET,
479 [ENOPKG] = TARGET_ENOPKG,
480 [EREMOTE] = TARGET_EREMOTE,
481 [ENOLINK] = TARGET_ENOLINK,
482 [EADV] = TARGET_EADV,
483 [ESRMNT] = TARGET_ESRMNT,
484 [ECOMM] = TARGET_ECOMM,
485 [EPROTO] = TARGET_EPROTO,
486 [EDOTDOT] = TARGET_EDOTDOT,
487 [EMULTIHOP] = TARGET_EMULTIHOP,
488 [EBADMSG] = TARGET_EBADMSG,
489 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
490 [EOVERFLOW] = TARGET_EOVERFLOW,
491 [ENOTUNIQ] = TARGET_ENOTUNIQ,
492 [EBADFD] = TARGET_EBADFD,
493 [EREMCHG] = TARGET_EREMCHG,
494 [ELIBACC] = TARGET_ELIBACC,
495 [ELIBBAD] = TARGET_ELIBBAD,
496 [ELIBSCN] = TARGET_ELIBSCN,
497 [ELIBMAX] = TARGET_ELIBMAX,
498 [ELIBEXEC] = TARGET_ELIBEXEC,
499 [EILSEQ] = TARGET_EILSEQ,
500 [ENOSYS] = TARGET_ENOSYS,
501 [ELOOP] = TARGET_ELOOP,
502 [ERESTART] = TARGET_ERESTART,
503 [ESTRPIPE] = TARGET_ESTRPIPE,
504 [ENOTEMPTY] = TARGET_ENOTEMPTY,
505 [EUSERS] = TARGET_EUSERS,
506 [ENOTSOCK] = TARGET_ENOTSOCK,
507 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
508 [EMSGSIZE] = TARGET_EMSGSIZE,
509 [EPROTOTYPE] = TARGET_EPROTOTYPE,
510 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
511 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
512 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
513 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
514 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
515 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
516 [EADDRINUSE] = TARGET_EADDRINUSE,
517 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
518 [ENETDOWN] = TARGET_ENETDOWN,
519 [ENETUNREACH] = TARGET_ENETUNREACH,
520 [ENETRESET] = TARGET_ENETRESET,
521 [ECONNABORTED] = TARGET_ECONNABORTED,
522 [ECONNRESET] = TARGET_ECONNRESET,
523 [ENOBUFS] = TARGET_ENOBUFS,
524 [EISCONN] = TARGET_EISCONN,
525 [ENOTCONN] = TARGET_ENOTCONN,
526 [EUCLEAN] = TARGET_EUCLEAN,
527 [ENOTNAM] = TARGET_ENOTNAM,
528 [ENAVAIL] = TARGET_ENAVAIL,
529 [EISNAM] = TARGET_EISNAM,
530 [EREMOTEIO] = TARGET_EREMOTEIO,
531 [ESHUTDOWN] = TARGET_ESHUTDOWN,
532 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
533 [ETIMEDOUT] = TARGET_ETIMEDOUT,
534 [ECONNREFUSED] = TARGET_ECONNREFUSED,
535 [EHOSTDOWN] = TARGET_EHOSTDOWN,
536 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
537 [EALREADY] = TARGET_EALREADY,
538 [EINPROGRESS] = TARGET_EINPROGRESS,
539 [ESTALE] = TARGET_ESTALE,
540 [ECANCELED] = TARGET_ECANCELED,
541 [ENOMEDIUM] = TARGET_ENOMEDIUM,
542 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
543 #ifdef ENOKEY
544 [ENOKEY] = TARGET_ENOKEY,
545 #endif
546 #ifdef EKEYEXPIRED
547 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
548 #endif
549 #ifdef EKEYREVOKED
550 [EKEYREVOKED] = TARGET_EKEYREVOKED,
551 #endif
552 #ifdef EKEYREJECTED
553 [EKEYREJECTED] = TARGET_EKEYREJECTED,
554 #endif
555 #ifdef EOWNERDEAD
556 [EOWNERDEAD] = TARGET_EOWNERDEAD,
557 #endif
558 #ifdef ENOTRECOVERABLE
559 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
560 #endif
561 };
562
563 static inline int host_to_target_errno(int err)
564 {
565 if(host_to_target_errno_table[err])
566 return host_to_target_errno_table[err];
567 return err;
568 }
569
570 static inline int target_to_host_errno(int err)
571 {
572 if (target_to_host_errno_table[err])
573 return target_to_host_errno_table[err];
574 return err;
575 }
576
577 static inline abi_long get_errno(abi_long ret)
578 {
579 if (ret == -1)
580 return -host_to_target_errno(errno);
581 else
582 return ret;
583 }
584
585 static inline int is_error(abi_long ret)
586 {
587 return (abi_ulong)ret >= (abi_ulong)(-4096);
588 }
589
590 char *target_strerror(int err)
591 {
592 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
593 return NULL;
594 }
595 return strerror(target_to_host_errno(err));
596 }
597
598 static abi_ulong target_brk;
599 static abi_ulong target_original_brk;
600 static abi_ulong brk_page;
601
602 void target_set_brk(abi_ulong new_brk)
603 {
604 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
605 brk_page = HOST_PAGE_ALIGN(target_brk);
606 }
607
608 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
609 #define DEBUGF_BRK(message, args...)
610
611 /* do_brk() must return target values and target errnos. */
612 abi_long do_brk(abi_ulong new_brk)
613 {
614 abi_long mapped_addr;
615 int new_alloc_size;
616
617 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
618
619 if (!new_brk) {
620 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
621 return target_brk;
622 }
623 if (new_brk < target_original_brk) {
624 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
625 target_brk);
626 return target_brk;
627 }
628
629 /* If the new brk is less than the highest page reserved to the
630 * target heap allocation, set it and we're almost done... */
631 if (new_brk <= brk_page) {
632 /* Heap contents are initialized to zero, as for anonymous
633 * mapped pages. */
634 if (new_brk > target_brk) {
635 memset(g2h(target_brk), 0, new_brk - target_brk);
636 }
637 target_brk = new_brk;
638 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
639 return target_brk;
640 }
641
642 /* We need to allocate more memory after the brk... Note that
643 * we don't use MAP_FIXED because that will map over the top of
644 * any existing mapping (like the one with the host libc or qemu
645 * itself); instead we treat "mapped but at wrong address" as
646 * a failure and unmap again.
647 */
648 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
649 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
650 PROT_READ|PROT_WRITE,
651 MAP_ANON|MAP_PRIVATE, 0, 0));
652
653 if (mapped_addr == brk_page) {
654 /* Heap contents are initialized to zero, as for anonymous
655 * mapped pages. Technically the new pages are already
656 * initialized to zero since they *are* anonymous mapped
657 * pages, however we have to take care with the contents that
658 * come from the remaining part of the previous page: it may
659 * contains garbage data due to a previous heap usage (grown
660 * then shrunken). */
661 memset(g2h(target_brk), 0, brk_page - target_brk);
662
663 target_brk = new_brk;
664 brk_page = HOST_PAGE_ALIGN(target_brk);
665 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
666 target_brk);
667 return target_brk;
668 } else if (mapped_addr != -1) {
669 /* Mapped but at wrong address, meaning there wasn't actually
670 * enough space for this brk.
671 */
672 target_munmap(mapped_addr, new_alloc_size);
673 mapped_addr = -1;
674 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
675 }
676 else {
677 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
678 }
679
680 #if defined(TARGET_ALPHA)
681 /* We (partially) emulate OSF/1 on Alpha, which requires we
682 return a proper errno, not an unchanged brk value. */
683 return -TARGET_ENOMEM;
684 #endif
685 /* For everything else, return the previous break. */
686 return target_brk;
687 }
688
689 static inline abi_long copy_from_user_fdset(fd_set *fds,
690 abi_ulong target_fds_addr,
691 int n)
692 {
693 int i, nw, j, k;
694 abi_ulong b, *target_fds;
695
696 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
697 if (!(target_fds = lock_user(VERIFY_READ,
698 target_fds_addr,
699 sizeof(abi_ulong) * nw,
700 1)))
701 return -TARGET_EFAULT;
702
703 FD_ZERO(fds);
704 k = 0;
705 for (i = 0; i < nw; i++) {
706 /* grab the abi_ulong */
707 __get_user(b, &target_fds[i]);
708 for (j = 0; j < TARGET_ABI_BITS; j++) {
709 /* check the bit inside the abi_ulong */
710 if ((b >> j) & 1)
711 FD_SET(k, fds);
712 k++;
713 }
714 }
715
716 unlock_user(target_fds, target_fds_addr, 0);
717
718 return 0;
719 }
720
721 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
722 abi_ulong target_fds_addr,
723 int n)
724 {
725 if (target_fds_addr) {
726 if (copy_from_user_fdset(fds, target_fds_addr, n))
727 return -TARGET_EFAULT;
728 *fds_ptr = fds;
729 } else {
730 *fds_ptr = NULL;
731 }
732 return 0;
733 }
734
735 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
736 const fd_set *fds,
737 int n)
738 {
739 int i, nw, j, k;
740 abi_long v;
741 abi_ulong *target_fds;
742
743 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
744 if (!(target_fds = lock_user(VERIFY_WRITE,
745 target_fds_addr,
746 sizeof(abi_ulong) * nw,
747 0)))
748 return -TARGET_EFAULT;
749
750 k = 0;
751 for (i = 0; i < nw; i++) {
752 v = 0;
753 for (j = 0; j < TARGET_ABI_BITS; j++) {
754 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
755 k++;
756 }
757 __put_user(v, &target_fds[i]);
758 }
759
760 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
761
762 return 0;
763 }
764
765 #if defined(__alpha__)
766 #define HOST_HZ 1024
767 #else
768 #define HOST_HZ 100
769 #endif
770
771 static inline abi_long host_to_target_clock_t(long ticks)
772 {
773 #if HOST_HZ == TARGET_HZ
774 return ticks;
775 #else
776 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
777 #endif
778 }
779
780 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
781 const struct rusage *rusage)
782 {
783 struct target_rusage *target_rusage;
784
785 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
786 return -TARGET_EFAULT;
787 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
788 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
789 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
790 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
791 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
792 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
793 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
794 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
795 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
796 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
797 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
798 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
799 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
800 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
801 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
802 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
803 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
804 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
805 unlock_user_struct(target_rusage, target_addr, 1);
806
807 return 0;
808 }
809
810 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
811 {
812 abi_ulong target_rlim_swap;
813 rlim_t result;
814
815 target_rlim_swap = tswapal(target_rlim);
816 if (target_rlim_swap == TARGET_RLIM_INFINITY)
817 return RLIM_INFINITY;
818
819 result = target_rlim_swap;
820 if (target_rlim_swap != (rlim_t)result)
821 return RLIM_INFINITY;
822
823 return result;
824 }
825
826 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
827 {
828 abi_ulong target_rlim_swap;
829 abi_ulong result;
830
831 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
832 target_rlim_swap = TARGET_RLIM_INFINITY;
833 else
834 target_rlim_swap = rlim;
835 result = tswapal(target_rlim_swap);
836
837 return result;
838 }
839
840 static inline int target_to_host_resource(int code)
841 {
842 switch (code) {
843 case TARGET_RLIMIT_AS:
844 return RLIMIT_AS;
845 case TARGET_RLIMIT_CORE:
846 return RLIMIT_CORE;
847 case TARGET_RLIMIT_CPU:
848 return RLIMIT_CPU;
849 case TARGET_RLIMIT_DATA:
850 return RLIMIT_DATA;
851 case TARGET_RLIMIT_FSIZE:
852 return RLIMIT_FSIZE;
853 case TARGET_RLIMIT_LOCKS:
854 return RLIMIT_LOCKS;
855 case TARGET_RLIMIT_MEMLOCK:
856 return RLIMIT_MEMLOCK;
857 case TARGET_RLIMIT_MSGQUEUE:
858 return RLIMIT_MSGQUEUE;
859 case TARGET_RLIMIT_NICE:
860 return RLIMIT_NICE;
861 case TARGET_RLIMIT_NOFILE:
862 return RLIMIT_NOFILE;
863 case TARGET_RLIMIT_NPROC:
864 return RLIMIT_NPROC;
865 case TARGET_RLIMIT_RSS:
866 return RLIMIT_RSS;
867 case TARGET_RLIMIT_RTPRIO:
868 return RLIMIT_RTPRIO;
869 case TARGET_RLIMIT_SIGPENDING:
870 return RLIMIT_SIGPENDING;
871 case TARGET_RLIMIT_STACK:
872 return RLIMIT_STACK;
873 default:
874 return code;
875 }
876 }
877
878 static inline abi_long copy_from_user_timeval(struct timeval *tv,
879 abi_ulong target_tv_addr)
880 {
881 struct target_timeval *target_tv;
882
883 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
884 return -TARGET_EFAULT;
885
886 __get_user(tv->tv_sec, &target_tv->tv_sec);
887 __get_user(tv->tv_usec, &target_tv->tv_usec);
888
889 unlock_user_struct(target_tv, target_tv_addr, 0);
890
891 return 0;
892 }
893
894 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
895 const struct timeval *tv)
896 {
897 struct target_timeval *target_tv;
898
899 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
900 return -TARGET_EFAULT;
901
902 __put_user(tv->tv_sec, &target_tv->tv_sec);
903 __put_user(tv->tv_usec, &target_tv->tv_usec);
904
905 unlock_user_struct(target_tv, target_tv_addr, 1);
906
907 return 0;
908 }
909
910 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
911 #include <mqueue.h>
912
913 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
914 abi_ulong target_mq_attr_addr)
915 {
916 struct target_mq_attr *target_mq_attr;
917
918 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
919 target_mq_attr_addr, 1))
920 return -TARGET_EFAULT;
921
922 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
923 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
924 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
925 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
926
927 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
928
929 return 0;
930 }
931
932 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
933 const struct mq_attr *attr)
934 {
935 struct target_mq_attr *target_mq_attr;
936
937 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
938 target_mq_attr_addr, 0))
939 return -TARGET_EFAULT;
940
941 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
942 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
943 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
944 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
945
946 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
947
948 return 0;
949 }
950 #endif
951
952 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
953 /* do_select() must return target values and target errnos. */
954 static abi_long do_select(int n,
955 abi_ulong rfd_addr, abi_ulong wfd_addr,
956 abi_ulong efd_addr, abi_ulong target_tv_addr)
957 {
958 fd_set rfds, wfds, efds;
959 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
960 struct timeval tv, *tv_ptr;
961 abi_long ret;
962
963 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
964 if (ret) {
965 return ret;
966 }
967 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
968 if (ret) {
969 return ret;
970 }
971 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
972 if (ret) {
973 return ret;
974 }
975
976 if (target_tv_addr) {
977 if (copy_from_user_timeval(&tv, target_tv_addr))
978 return -TARGET_EFAULT;
979 tv_ptr = &tv;
980 } else {
981 tv_ptr = NULL;
982 }
983
984 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
985
986 if (!is_error(ret)) {
987 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
988 return -TARGET_EFAULT;
989 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
990 return -TARGET_EFAULT;
991 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
992 return -TARGET_EFAULT;
993
994 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
995 return -TARGET_EFAULT;
996 }
997
998 return ret;
999 }
1000 #endif
1001
1002 static abi_long do_pipe2(int host_pipe[], int flags)
1003 {
1004 #ifdef CONFIG_PIPE2
1005 return pipe2(host_pipe, flags);
1006 #else
1007 return -ENOSYS;
1008 #endif
1009 }
1010
1011 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1012 int flags, int is_pipe2)
1013 {
1014 int host_pipe[2];
1015 abi_long ret;
1016 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1017
1018 if (is_error(ret))
1019 return get_errno(ret);
1020
1021 /* Several targets have special calling conventions for the original
1022 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1023 if (!is_pipe2) {
1024 #if defined(TARGET_ALPHA)
1025 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1026 return host_pipe[0];
1027 #elif defined(TARGET_MIPS)
1028 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1029 return host_pipe[0];
1030 #elif defined(TARGET_SH4)
1031 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1032 return host_pipe[0];
1033 #endif
1034 }
1035
1036 if (put_user_s32(host_pipe[0], pipedes)
1037 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1038 return -TARGET_EFAULT;
1039 return get_errno(ret);
1040 }
1041
1042 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1043 abi_ulong target_addr,
1044 socklen_t len)
1045 {
1046 struct target_ip_mreqn *target_smreqn;
1047
1048 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1049 if (!target_smreqn)
1050 return -TARGET_EFAULT;
1051 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1052 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1053 if (len == sizeof(struct target_ip_mreqn))
1054 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1055 unlock_user(target_smreqn, target_addr, 0);
1056
1057 return 0;
1058 }
1059
1060 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1061 abi_ulong target_addr,
1062 socklen_t len)
1063 {
1064 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1065 sa_family_t sa_family;
1066 struct target_sockaddr *target_saddr;
1067
1068 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1069 if (!target_saddr)
1070 return -TARGET_EFAULT;
1071
1072 sa_family = tswap16(target_saddr->sa_family);
1073
1074 /* Oops. The caller might send a incomplete sun_path; sun_path
1075 * must be terminated by \0 (see the manual page), but
1076 * unfortunately it is quite common to specify sockaddr_un
1077 * length as "strlen(x->sun_path)" while it should be
1078 * "strlen(...) + 1". We'll fix that here if needed.
1079 * Linux kernel has a similar feature.
1080 */
1081
1082 if (sa_family == AF_UNIX) {
1083 if (len < unix_maxlen && len > 0) {
1084 char *cp = (char*)target_saddr;
1085
1086 if ( cp[len-1] && !cp[len] )
1087 len++;
1088 }
1089 if (len > unix_maxlen)
1090 len = unix_maxlen;
1091 }
1092
1093 memcpy(addr, target_saddr, len);
1094 addr->sa_family = sa_family;
1095 unlock_user(target_saddr, target_addr, 0);
1096
1097 return 0;
1098 }
1099
1100 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1101 struct sockaddr *addr,
1102 socklen_t len)
1103 {
1104 struct target_sockaddr *target_saddr;
1105
1106 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1107 if (!target_saddr)
1108 return -TARGET_EFAULT;
1109 memcpy(target_saddr, addr, len);
1110 target_saddr->sa_family = tswap16(addr->sa_family);
1111 unlock_user(target_saddr, target_addr, len);
1112
1113 return 0;
1114 }
1115
1116 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1117 struct target_msghdr *target_msgh)
1118 {
1119 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1120 abi_long msg_controllen;
1121 abi_ulong target_cmsg_addr;
1122 struct target_cmsghdr *target_cmsg;
1123 socklen_t space = 0;
1124
1125 msg_controllen = tswapal(target_msgh->msg_controllen);
1126 if (msg_controllen < sizeof (struct target_cmsghdr))
1127 goto the_end;
1128 target_cmsg_addr = tswapal(target_msgh->msg_control);
1129 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1130 if (!target_cmsg)
1131 return -TARGET_EFAULT;
1132
1133 while (cmsg && target_cmsg) {
1134 void *data = CMSG_DATA(cmsg);
1135 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1136
1137 int len = tswapal(target_cmsg->cmsg_len)
1138 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1139
1140 space += CMSG_SPACE(len);
1141 if (space > msgh->msg_controllen) {
1142 space -= CMSG_SPACE(len);
1143 gemu_log("Host cmsg overflow\n");
1144 break;
1145 }
1146
1147 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1148 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1149 cmsg->cmsg_len = CMSG_LEN(len);
1150
1151 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1152 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1153 memcpy(data, target_data, len);
1154 } else {
1155 int *fd = (int *)data;
1156 int *target_fd = (int *)target_data;
1157 int i, numfds = len / sizeof(int);
1158
1159 for (i = 0; i < numfds; i++)
1160 fd[i] = tswap32(target_fd[i]);
1161 }
1162
1163 cmsg = CMSG_NXTHDR(msgh, cmsg);
1164 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1165 }
1166 unlock_user(target_cmsg, target_cmsg_addr, 0);
1167 the_end:
1168 msgh->msg_controllen = space;
1169 return 0;
1170 }
1171
1172 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1173 struct msghdr *msgh)
1174 {
1175 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1176 abi_long msg_controllen;
1177 abi_ulong target_cmsg_addr;
1178 struct target_cmsghdr *target_cmsg;
1179 socklen_t space = 0;
1180
1181 msg_controllen = tswapal(target_msgh->msg_controllen);
1182 if (msg_controllen < sizeof (struct target_cmsghdr))
1183 goto the_end;
1184 target_cmsg_addr = tswapal(target_msgh->msg_control);
1185 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1186 if (!target_cmsg)
1187 return -TARGET_EFAULT;
1188
1189 while (cmsg && target_cmsg) {
1190 void *data = CMSG_DATA(cmsg);
1191 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1192
1193 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1194
1195 space += TARGET_CMSG_SPACE(len);
1196 if (space > msg_controllen) {
1197 space -= TARGET_CMSG_SPACE(len);
1198 gemu_log("Target cmsg overflow\n");
1199 break;
1200 }
1201
1202 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1203 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1204 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1205
1206 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1207 (cmsg->cmsg_type == SCM_RIGHTS)) {
1208 int *fd = (int *)data;
1209 int *target_fd = (int *)target_data;
1210 int i, numfds = len / sizeof(int);
1211
1212 for (i = 0; i < numfds; i++)
1213 target_fd[i] = tswap32(fd[i]);
1214 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1215 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1216 (len == sizeof(struct timeval))) {
1217 /* copy struct timeval to target */
1218 struct timeval *tv = (struct timeval *)data;
1219 struct target_timeval *target_tv =
1220 (struct target_timeval *)target_data;
1221
1222 target_tv->tv_sec = tswapal(tv->tv_sec);
1223 target_tv->tv_usec = tswapal(tv->tv_usec);
1224 } else {
1225 gemu_log("Unsupported ancillary data: %d/%d\n",
1226 cmsg->cmsg_level, cmsg->cmsg_type);
1227 memcpy(target_data, data, len);
1228 }
1229
1230 cmsg = CMSG_NXTHDR(msgh, cmsg);
1231 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1232 }
1233 unlock_user(target_cmsg, target_cmsg_addr, space);
1234 the_end:
1235 target_msgh->msg_controllen = tswapal(space);
1236 return 0;
1237 }
1238
1239 /* do_setsockopt() Must return target values and target errnos. */
1240 static abi_long do_setsockopt(int sockfd, int level, int optname,
1241 abi_ulong optval_addr, socklen_t optlen)
1242 {
1243 abi_long ret;
1244 int val;
1245 struct ip_mreqn *ip_mreq;
1246 struct ip_mreq_source *ip_mreq_source;
1247
1248 switch(level) {
1249 case SOL_TCP:
1250 /* TCP options all take an 'int' value. */
1251 if (optlen < sizeof(uint32_t))
1252 return -TARGET_EINVAL;
1253
1254 if (get_user_u32(val, optval_addr))
1255 return -TARGET_EFAULT;
1256 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1257 break;
1258 case SOL_IP:
1259 switch(optname) {
1260 case IP_TOS:
1261 case IP_TTL:
1262 case IP_HDRINCL:
1263 case IP_ROUTER_ALERT:
1264 case IP_RECVOPTS:
1265 case IP_RETOPTS:
1266 case IP_PKTINFO:
1267 case IP_MTU_DISCOVER:
1268 case IP_RECVERR:
1269 case IP_RECVTOS:
1270 #ifdef IP_FREEBIND
1271 case IP_FREEBIND:
1272 #endif
1273 case IP_MULTICAST_TTL:
1274 case IP_MULTICAST_LOOP:
1275 val = 0;
1276 if (optlen >= sizeof(uint32_t)) {
1277 if (get_user_u32(val, optval_addr))
1278 return -TARGET_EFAULT;
1279 } else if (optlen >= 1) {
1280 if (get_user_u8(val, optval_addr))
1281 return -TARGET_EFAULT;
1282 }
1283 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1284 break;
1285 case IP_ADD_MEMBERSHIP:
1286 case IP_DROP_MEMBERSHIP:
1287 if (optlen < sizeof (struct target_ip_mreq) ||
1288 optlen > sizeof (struct target_ip_mreqn))
1289 return -TARGET_EINVAL;
1290
1291 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1292 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1293 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1294 break;
1295
1296 case IP_BLOCK_SOURCE:
1297 case IP_UNBLOCK_SOURCE:
1298 case IP_ADD_SOURCE_MEMBERSHIP:
1299 case IP_DROP_SOURCE_MEMBERSHIP:
1300 if (optlen != sizeof (struct target_ip_mreq_source))
1301 return -TARGET_EINVAL;
1302
1303 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1304 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1305 unlock_user (ip_mreq_source, optval_addr, 0);
1306 break;
1307
1308 default:
1309 goto unimplemented;
1310 }
1311 break;
1312 case SOL_RAW:
1313 switch (optname) {
1314 case ICMP_FILTER:
1315 /* struct icmp_filter takes an u32 value */
1316 if (optlen < sizeof(uint32_t)) {
1317 return -TARGET_EINVAL;
1318 }
1319
1320 if (get_user_u32(val, optval_addr)) {
1321 return -TARGET_EFAULT;
1322 }
1323 ret = get_errno(setsockopt(sockfd, level, optname,
1324 &val, sizeof(val)));
1325 break;
1326
1327 default:
1328 goto unimplemented;
1329 }
1330 break;
1331 case TARGET_SOL_SOCKET:
1332 switch (optname) {
1333 case TARGET_SO_RCVTIMEO:
1334 {
1335 struct timeval tv;
1336
1337 optname = SO_RCVTIMEO;
1338
1339 set_timeout:
1340 if (optlen != sizeof(struct target_timeval)) {
1341 return -TARGET_EINVAL;
1342 }
1343
1344 if (copy_from_user_timeval(&tv, optval_addr)) {
1345 return -TARGET_EFAULT;
1346 }
1347
1348 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1349 &tv, sizeof(tv)));
1350 return ret;
1351 }
1352 case TARGET_SO_SNDTIMEO:
1353 optname = SO_SNDTIMEO;
1354 goto set_timeout;
1355 /* Options with 'int' argument. */
1356 case TARGET_SO_DEBUG:
1357 optname = SO_DEBUG;
1358 break;
1359 case TARGET_SO_REUSEADDR:
1360 optname = SO_REUSEADDR;
1361 break;
1362 case TARGET_SO_TYPE:
1363 optname = SO_TYPE;
1364 break;
1365 case TARGET_SO_ERROR:
1366 optname = SO_ERROR;
1367 break;
1368 case TARGET_SO_DONTROUTE:
1369 optname = SO_DONTROUTE;
1370 break;
1371 case TARGET_SO_BROADCAST:
1372 optname = SO_BROADCAST;
1373 break;
1374 case TARGET_SO_SNDBUF:
1375 optname = SO_SNDBUF;
1376 break;
1377 case TARGET_SO_RCVBUF:
1378 optname = SO_RCVBUF;
1379 break;
1380 case TARGET_SO_KEEPALIVE:
1381 optname = SO_KEEPALIVE;
1382 break;
1383 case TARGET_SO_OOBINLINE:
1384 optname = SO_OOBINLINE;
1385 break;
1386 case TARGET_SO_NO_CHECK:
1387 optname = SO_NO_CHECK;
1388 break;
1389 case TARGET_SO_PRIORITY:
1390 optname = SO_PRIORITY;
1391 break;
1392 #ifdef SO_BSDCOMPAT
1393 case TARGET_SO_BSDCOMPAT:
1394 optname = SO_BSDCOMPAT;
1395 break;
1396 #endif
1397 case TARGET_SO_PASSCRED:
1398 optname = SO_PASSCRED;
1399 break;
1400 case TARGET_SO_TIMESTAMP:
1401 optname = SO_TIMESTAMP;
1402 break;
1403 case TARGET_SO_RCVLOWAT:
1404 optname = SO_RCVLOWAT;
1405 break;
1406 break;
1407 default:
1408 goto unimplemented;
1409 }
1410 if (optlen < sizeof(uint32_t))
1411 return -TARGET_EINVAL;
1412
1413 if (get_user_u32(val, optval_addr))
1414 return -TARGET_EFAULT;
1415 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1416 break;
1417 default:
1418 unimplemented:
1419 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1420 ret = -TARGET_ENOPROTOOPT;
1421 }
1422 return ret;
1423 }
1424
1425 /* do_getsockopt() Must return target values and target errnos. */
1426 static abi_long do_getsockopt(int sockfd, int level, int optname,
1427 abi_ulong optval_addr, abi_ulong optlen)
1428 {
1429 abi_long ret;
1430 int len, val;
1431 socklen_t lv;
1432
1433 switch(level) {
1434 case TARGET_SOL_SOCKET:
1435 level = SOL_SOCKET;
1436 switch (optname) {
1437 /* These don't just return a single integer */
1438 case TARGET_SO_LINGER:
1439 case TARGET_SO_RCVTIMEO:
1440 case TARGET_SO_SNDTIMEO:
1441 case TARGET_SO_PEERNAME:
1442 goto unimplemented;
1443 case TARGET_SO_PEERCRED: {
1444 struct ucred cr;
1445 socklen_t crlen;
1446 struct target_ucred *tcr;
1447
1448 if (get_user_u32(len, optlen)) {
1449 return -TARGET_EFAULT;
1450 }
1451 if (len < 0) {
1452 return -TARGET_EINVAL;
1453 }
1454
1455 crlen = sizeof(cr);
1456 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1457 &cr, &crlen));
1458 if (ret < 0) {
1459 return ret;
1460 }
1461 if (len > crlen) {
1462 len = crlen;
1463 }
1464 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1465 return -TARGET_EFAULT;
1466 }
1467 __put_user(cr.pid, &tcr->pid);
1468 __put_user(cr.uid, &tcr->uid);
1469 __put_user(cr.gid, &tcr->gid);
1470 unlock_user_struct(tcr, optval_addr, 1);
1471 if (put_user_u32(len, optlen)) {
1472 return -TARGET_EFAULT;
1473 }
1474 break;
1475 }
1476 /* Options with 'int' argument. */
1477 case TARGET_SO_DEBUG:
1478 optname = SO_DEBUG;
1479 goto int_case;
1480 case TARGET_SO_REUSEADDR:
1481 optname = SO_REUSEADDR;
1482 goto int_case;
1483 case TARGET_SO_TYPE:
1484 optname = SO_TYPE;
1485 goto int_case;
1486 case TARGET_SO_ERROR:
1487 optname = SO_ERROR;
1488 goto int_case;
1489 case TARGET_SO_DONTROUTE:
1490 optname = SO_DONTROUTE;
1491 goto int_case;
1492 case TARGET_SO_BROADCAST:
1493 optname = SO_BROADCAST;
1494 goto int_case;
1495 case TARGET_SO_SNDBUF:
1496 optname = SO_SNDBUF;
1497 goto int_case;
1498 case TARGET_SO_RCVBUF:
1499 optname = SO_RCVBUF;
1500 goto int_case;
1501 case TARGET_SO_KEEPALIVE:
1502 optname = SO_KEEPALIVE;
1503 goto int_case;
1504 case TARGET_SO_OOBINLINE:
1505 optname = SO_OOBINLINE;
1506 goto int_case;
1507 case TARGET_SO_NO_CHECK:
1508 optname = SO_NO_CHECK;
1509 goto int_case;
1510 case TARGET_SO_PRIORITY:
1511 optname = SO_PRIORITY;
1512 goto int_case;
1513 #ifdef SO_BSDCOMPAT
1514 case TARGET_SO_BSDCOMPAT:
1515 optname = SO_BSDCOMPAT;
1516 goto int_case;
1517 #endif
1518 case TARGET_SO_PASSCRED:
1519 optname = SO_PASSCRED;
1520 goto int_case;
1521 case TARGET_SO_TIMESTAMP:
1522 optname = SO_TIMESTAMP;
1523 goto int_case;
1524 case TARGET_SO_RCVLOWAT:
1525 optname = SO_RCVLOWAT;
1526 goto int_case;
1527 default:
1528 goto int_case;
1529 }
1530 break;
1531 case SOL_TCP:
1532 /* TCP options all take an 'int' value. */
1533 int_case:
1534 if (get_user_u32(len, optlen))
1535 return -TARGET_EFAULT;
1536 if (len < 0)
1537 return -TARGET_EINVAL;
1538 lv = sizeof(lv);
1539 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1540 if (ret < 0)
1541 return ret;
1542 if (len > lv)
1543 len = lv;
1544 if (len == 4) {
1545 if (put_user_u32(val, optval_addr))
1546 return -TARGET_EFAULT;
1547 } else {
1548 if (put_user_u8(val, optval_addr))
1549 return -TARGET_EFAULT;
1550 }
1551 if (put_user_u32(len, optlen))
1552 return -TARGET_EFAULT;
1553 break;
1554 case SOL_IP:
1555 switch(optname) {
1556 case IP_TOS:
1557 case IP_TTL:
1558 case IP_HDRINCL:
1559 case IP_ROUTER_ALERT:
1560 case IP_RECVOPTS:
1561 case IP_RETOPTS:
1562 case IP_PKTINFO:
1563 case IP_MTU_DISCOVER:
1564 case IP_RECVERR:
1565 case IP_RECVTOS:
1566 #ifdef IP_FREEBIND
1567 case IP_FREEBIND:
1568 #endif
1569 case IP_MULTICAST_TTL:
1570 case IP_MULTICAST_LOOP:
1571 if (get_user_u32(len, optlen))
1572 return -TARGET_EFAULT;
1573 if (len < 0)
1574 return -TARGET_EINVAL;
1575 lv = sizeof(lv);
1576 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1577 if (ret < 0)
1578 return ret;
1579 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1580 len = 1;
1581 if (put_user_u32(len, optlen)
1582 || put_user_u8(val, optval_addr))
1583 return -TARGET_EFAULT;
1584 } else {
1585 if (len > sizeof(int))
1586 len = sizeof(int);
1587 if (put_user_u32(len, optlen)
1588 || put_user_u32(val, optval_addr))
1589 return -TARGET_EFAULT;
1590 }
1591 break;
1592 default:
1593 ret = -TARGET_ENOPROTOOPT;
1594 break;
1595 }
1596 break;
1597 default:
1598 unimplemented:
1599 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1600 level, optname);
1601 ret = -TARGET_EOPNOTSUPP;
1602 break;
1603 }
1604 return ret;
1605 }
1606
1607 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1608 int count, int copy)
1609 {
1610 struct target_iovec *target_vec;
1611 struct iovec *vec;
1612 abi_ulong total_len, max_len;
1613 int i;
1614
1615 if (count == 0) {
1616 errno = 0;
1617 return NULL;
1618 }
1619 if (count < 0 || count > IOV_MAX) {
1620 errno = EINVAL;
1621 return NULL;
1622 }
1623
1624 vec = calloc(count, sizeof(struct iovec));
1625 if (vec == NULL) {
1626 errno = ENOMEM;
1627 return NULL;
1628 }
1629
1630 target_vec = lock_user(VERIFY_READ, target_addr,
1631 count * sizeof(struct target_iovec), 1);
1632 if (target_vec == NULL) {
1633 errno = EFAULT;
1634 goto fail2;
1635 }
1636
1637 /* ??? If host page size > target page size, this will result in a
1638 value larger than what we can actually support. */
1639 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1640 total_len = 0;
1641
1642 for (i = 0; i < count; i++) {
1643 abi_ulong base = tswapal(target_vec[i].iov_base);
1644 abi_long len = tswapal(target_vec[i].iov_len);
1645
1646 if (len < 0) {
1647 errno = EINVAL;
1648 goto fail;
1649 } else if (len == 0) {
1650 /* Zero length pointer is ignored. */
1651 vec[i].iov_base = 0;
1652 } else {
1653 vec[i].iov_base = lock_user(type, base, len, copy);
1654 if (!vec[i].iov_base) {
1655 errno = EFAULT;
1656 goto fail;
1657 }
1658 if (len > max_len - total_len) {
1659 len = max_len - total_len;
1660 }
1661 }
1662 vec[i].iov_len = len;
1663 total_len += len;
1664 }
1665
1666 unlock_user(target_vec, target_addr, 0);
1667 return vec;
1668
1669 fail:
1670 free(vec);
1671 fail2:
1672 unlock_user(target_vec, target_addr, 0);
1673 return NULL;
1674 }
1675
1676 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1677 int count, int copy)
1678 {
1679 struct target_iovec *target_vec;
1680 int i;
1681
1682 target_vec = lock_user(VERIFY_READ, target_addr,
1683 count * sizeof(struct target_iovec), 1);
1684 if (target_vec) {
1685 for (i = 0; i < count; i++) {
1686 abi_ulong base = tswapal(target_vec[i].iov_base);
1687 abi_long len = tswapal(target_vec[i].iov_base);
1688 if (len < 0) {
1689 break;
1690 }
1691 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1692 }
1693 unlock_user(target_vec, target_addr, 0);
1694 }
1695
1696 free(vec);
1697 }
1698
1699 /* do_socket() Must return target values and target errnos. */
1700 static abi_long do_socket(int domain, int type, int protocol)
1701 {
1702 #if defined(TARGET_MIPS)
1703 switch(type) {
1704 case TARGET_SOCK_DGRAM:
1705 type = SOCK_DGRAM;
1706 break;
1707 case TARGET_SOCK_STREAM:
1708 type = SOCK_STREAM;
1709 break;
1710 case TARGET_SOCK_RAW:
1711 type = SOCK_RAW;
1712 break;
1713 case TARGET_SOCK_RDM:
1714 type = SOCK_RDM;
1715 break;
1716 case TARGET_SOCK_SEQPACKET:
1717 type = SOCK_SEQPACKET;
1718 break;
1719 case TARGET_SOCK_PACKET:
1720 type = SOCK_PACKET;
1721 break;
1722 }
1723 #endif
1724 if (domain == PF_NETLINK)
1725 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1726 return get_errno(socket(domain, type, protocol));
1727 }
1728
1729 /* do_bind() Must return target values and target errnos. */
1730 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1731 socklen_t addrlen)
1732 {
1733 void *addr;
1734 abi_long ret;
1735
1736 if ((int)addrlen < 0) {
1737 return -TARGET_EINVAL;
1738 }
1739
1740 addr = alloca(addrlen+1);
1741
1742 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1743 if (ret)
1744 return ret;
1745
1746 return get_errno(bind(sockfd, addr, addrlen));
1747 }
1748
1749 /* do_connect() Must return target values and target errnos. */
1750 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1751 socklen_t addrlen)
1752 {
1753 void *addr;
1754 abi_long ret;
1755
1756 if ((int)addrlen < 0) {
1757 return -TARGET_EINVAL;
1758 }
1759
1760 addr = alloca(addrlen);
1761
1762 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1763 if (ret)
1764 return ret;
1765
1766 return get_errno(connect(sockfd, addr, addrlen));
1767 }
1768
1769 /* do_sendrecvmsg() Must return target values and target errnos. */
1770 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1771 int flags, int send)
1772 {
1773 abi_long ret, len;
1774 struct target_msghdr *msgp;
1775 struct msghdr msg;
1776 int count;
1777 struct iovec *vec;
1778 abi_ulong target_vec;
1779
1780 /* FIXME */
1781 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1782 msgp,
1783 target_msg,
1784 send ? 1 : 0))
1785 return -TARGET_EFAULT;
1786 if (msgp->msg_name) {
1787 msg.msg_namelen = tswap32(msgp->msg_namelen);
1788 msg.msg_name = alloca(msg.msg_namelen);
1789 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1790 msg.msg_namelen);
1791 if (ret) {
1792 goto out2;
1793 }
1794 } else {
1795 msg.msg_name = NULL;
1796 msg.msg_namelen = 0;
1797 }
1798 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1799 msg.msg_control = alloca(msg.msg_controllen);
1800 msg.msg_flags = tswap32(msgp->msg_flags);
1801
1802 count = tswapal(msgp->msg_iovlen);
1803 target_vec = tswapal(msgp->msg_iov);
1804 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1805 target_vec, count, send);
1806 if (vec == NULL) {
1807 ret = -host_to_target_errno(errno);
1808 goto out2;
1809 }
1810 msg.msg_iovlen = count;
1811 msg.msg_iov = vec;
1812
1813 if (send) {
1814 ret = target_to_host_cmsg(&msg, msgp);
1815 if (ret == 0)
1816 ret = get_errno(sendmsg(fd, &msg, flags));
1817 } else {
1818 ret = get_errno(recvmsg(fd, &msg, flags));
1819 if (!is_error(ret)) {
1820 len = ret;
1821 ret = host_to_target_cmsg(msgp, &msg);
1822 if (!is_error(ret)) {
1823 msgp->msg_namelen = tswap32(msg.msg_namelen);
1824 if (msg.msg_name != NULL) {
1825 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1826 msg.msg_name, msg.msg_namelen);
1827 if (ret) {
1828 goto out;
1829 }
1830 }
1831
1832 ret = len;
1833 }
1834 }
1835 }
1836
1837 out:
1838 unlock_iovec(vec, target_vec, count, !send);
1839 out2:
1840 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1841 return ret;
1842 }
1843
1844 /* If we don't have a system accept4() then just call accept.
1845 * The callsites to do_accept4() will ensure that they don't
1846 * pass a non-zero flags argument in this config.
1847 */
1848 #ifndef CONFIG_ACCEPT4
1849 static inline int accept4(int sockfd, struct sockaddr *addr,
1850 socklen_t *addrlen, int flags)
1851 {
1852 assert(flags == 0);
1853 return accept(sockfd, addr, addrlen);
1854 }
1855 #endif
1856
1857 /* do_accept4() Must return target values and target errnos. */
1858 static abi_long do_accept4(int fd, abi_ulong target_addr,
1859 abi_ulong target_addrlen_addr, int flags)
1860 {
1861 socklen_t addrlen;
1862 void *addr;
1863 abi_long ret;
1864
1865 if (target_addr == 0) {
1866 return get_errno(accept4(fd, NULL, NULL, flags));
1867 }
1868
1869 /* linux returns EINVAL if addrlen pointer is invalid */
1870 if (get_user_u32(addrlen, target_addrlen_addr))
1871 return -TARGET_EINVAL;
1872
1873 if ((int)addrlen < 0) {
1874 return -TARGET_EINVAL;
1875 }
1876
1877 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1878 return -TARGET_EINVAL;
1879
1880 addr = alloca(addrlen);
1881
1882 ret = get_errno(accept4(fd, addr, &addrlen, flags));
1883 if (!is_error(ret)) {
1884 host_to_target_sockaddr(target_addr, addr, addrlen);
1885 if (put_user_u32(addrlen, target_addrlen_addr))
1886 ret = -TARGET_EFAULT;
1887 }
1888 return ret;
1889 }
1890
1891 /* do_getpeername() Must return target values and target errnos. */
1892 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1893 abi_ulong target_addrlen_addr)
1894 {
1895 socklen_t addrlen;
1896 void *addr;
1897 abi_long ret;
1898
1899 if (get_user_u32(addrlen, target_addrlen_addr))
1900 return -TARGET_EFAULT;
1901
1902 if ((int)addrlen < 0) {
1903 return -TARGET_EINVAL;
1904 }
1905
1906 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1907 return -TARGET_EFAULT;
1908
1909 addr = alloca(addrlen);
1910
1911 ret = get_errno(getpeername(fd, addr, &addrlen));
1912 if (!is_error(ret)) {
1913 host_to_target_sockaddr(target_addr, addr, addrlen);
1914 if (put_user_u32(addrlen, target_addrlen_addr))
1915 ret = -TARGET_EFAULT;
1916 }
1917 return ret;
1918 }
1919
1920 /* do_getsockname() Must return target values and target errnos. */
1921 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1922 abi_ulong target_addrlen_addr)
1923 {
1924 socklen_t addrlen;
1925 void *addr;
1926 abi_long ret;
1927
1928 if (get_user_u32(addrlen, target_addrlen_addr))
1929 return -TARGET_EFAULT;
1930
1931 if ((int)addrlen < 0) {
1932 return -TARGET_EINVAL;
1933 }
1934
1935 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1936 return -TARGET_EFAULT;
1937
1938 addr = alloca(addrlen);
1939
1940 ret = get_errno(getsockname(fd, addr, &addrlen));
1941 if (!is_error(ret)) {
1942 host_to_target_sockaddr(target_addr, addr, addrlen);
1943 if (put_user_u32(addrlen, target_addrlen_addr))
1944 ret = -TARGET_EFAULT;
1945 }
1946 return ret;
1947 }
1948
1949 /* do_socketpair() Must return target values and target errnos. */
1950 static abi_long do_socketpair(int domain, int type, int protocol,
1951 abi_ulong target_tab_addr)
1952 {
1953 int tab[2];
1954 abi_long ret;
1955
1956 ret = get_errno(socketpair(domain, type, protocol, tab));
1957 if (!is_error(ret)) {
1958 if (put_user_s32(tab[0], target_tab_addr)
1959 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1960 ret = -TARGET_EFAULT;
1961 }
1962 return ret;
1963 }
1964
1965 /* do_sendto() Must return target values and target errnos. */
1966 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1967 abi_ulong target_addr, socklen_t addrlen)
1968 {
1969 void *addr;
1970 void *host_msg;
1971 abi_long ret;
1972
1973 if ((int)addrlen < 0) {
1974 return -TARGET_EINVAL;
1975 }
1976
1977 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1978 if (!host_msg)
1979 return -TARGET_EFAULT;
1980 if (target_addr) {
1981 addr = alloca(addrlen);
1982 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1983 if (ret) {
1984 unlock_user(host_msg, msg, 0);
1985 return ret;
1986 }
1987 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1988 } else {
1989 ret = get_errno(send(fd, host_msg, len, flags));
1990 }
1991 unlock_user(host_msg, msg, 0);
1992 return ret;
1993 }
1994
1995 /* do_recvfrom() Must return target values and target errnos. */
1996 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1997 abi_ulong target_addr,
1998 abi_ulong target_addrlen)
1999 {
2000 socklen_t addrlen;
2001 void *addr;
2002 void *host_msg;
2003 abi_long ret;
2004
2005 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2006 if (!host_msg)
2007 return -TARGET_EFAULT;
2008 if (target_addr) {
2009 if (get_user_u32(addrlen, target_addrlen)) {
2010 ret = -TARGET_EFAULT;
2011 goto fail;
2012 }
2013 if ((int)addrlen < 0) {
2014 ret = -TARGET_EINVAL;
2015 goto fail;
2016 }
2017 addr = alloca(addrlen);
2018 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2019 } else {
2020 addr = NULL; /* To keep compiler quiet. */
2021 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2022 }
2023 if (!is_error(ret)) {
2024 if (target_addr) {
2025 host_to_target_sockaddr(target_addr, addr, addrlen);
2026 if (put_user_u32(addrlen, target_addrlen)) {
2027 ret = -TARGET_EFAULT;
2028 goto fail;
2029 }
2030 }
2031 unlock_user(host_msg, msg, len);
2032 } else {
2033 fail:
2034 unlock_user(host_msg, msg, 0);
2035 }
2036 return ret;
2037 }
2038
2039 #ifdef TARGET_NR_socketcall
2040 /* do_socketcall() Must return target values and target errnos. */
2041 static abi_long do_socketcall(int num, abi_ulong vptr)
2042 {
2043 abi_long ret;
2044 const int n = sizeof(abi_ulong);
2045
2046 switch(num) {
2047 case SOCKOP_socket:
2048 {
2049 abi_ulong domain, type, protocol;
2050
2051 if (get_user_ual(domain, vptr)
2052 || get_user_ual(type, vptr + n)
2053 || get_user_ual(protocol, vptr + 2 * n))
2054 return -TARGET_EFAULT;
2055
2056 ret = do_socket(domain, type, protocol);
2057 }
2058 break;
2059 case SOCKOP_bind:
2060 {
2061 abi_ulong sockfd;
2062 abi_ulong target_addr;
2063 socklen_t addrlen;
2064
2065 if (get_user_ual(sockfd, vptr)
2066 || get_user_ual(target_addr, vptr + n)
2067 || get_user_ual(addrlen, vptr + 2 * n))
2068 return -TARGET_EFAULT;
2069
2070 ret = do_bind(sockfd, target_addr, addrlen);
2071 }
2072 break;
2073 case SOCKOP_connect:
2074 {
2075 abi_ulong sockfd;
2076 abi_ulong target_addr;
2077 socklen_t addrlen;
2078
2079 if (get_user_ual(sockfd, vptr)
2080 || get_user_ual(target_addr, vptr + n)
2081 || get_user_ual(addrlen, vptr + 2 * n))
2082 return -TARGET_EFAULT;
2083
2084 ret = do_connect(sockfd, target_addr, addrlen);
2085 }
2086 break;
2087 case SOCKOP_listen:
2088 {
2089 abi_ulong sockfd, backlog;
2090
2091 if (get_user_ual(sockfd, vptr)
2092 || get_user_ual(backlog, vptr + n))
2093 return -TARGET_EFAULT;
2094
2095 ret = get_errno(listen(sockfd, backlog));
2096 }
2097 break;
2098 case SOCKOP_accept:
2099 {
2100 abi_ulong sockfd;
2101 abi_ulong target_addr, target_addrlen;
2102
2103 if (get_user_ual(sockfd, vptr)
2104 || get_user_ual(target_addr, vptr + n)
2105 || get_user_ual(target_addrlen, vptr + 2 * n))
2106 return -TARGET_EFAULT;
2107
2108 ret = do_accept4(sockfd, target_addr, target_addrlen, 0);
2109 }
2110 break;
2111 case SOCKOP_getsockname:
2112 {
2113 abi_ulong sockfd;
2114 abi_ulong target_addr, target_addrlen;
2115
2116 if (get_user_ual(sockfd, vptr)
2117 || get_user_ual(target_addr, vptr + n)
2118 || get_user_ual(target_addrlen, vptr + 2 * n))
2119 return -TARGET_EFAULT;
2120
2121 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2122 }
2123 break;
2124 case SOCKOP_getpeername:
2125 {
2126 abi_ulong sockfd;
2127 abi_ulong target_addr, target_addrlen;
2128
2129 if (get_user_ual(sockfd, vptr)
2130 || get_user_ual(target_addr, vptr + n)
2131 || get_user_ual(target_addrlen, vptr + 2 * n))
2132 return -TARGET_EFAULT;
2133
2134 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2135 }
2136 break;
2137 case SOCKOP_socketpair:
2138 {
2139 abi_ulong domain, type, protocol;
2140 abi_ulong tab;
2141
2142 if (get_user_ual(domain, vptr)
2143 || get_user_ual(type, vptr + n)
2144 || get_user_ual(protocol, vptr + 2 * n)
2145 || get_user_ual(tab, vptr + 3 * n))
2146 return -TARGET_EFAULT;
2147
2148 ret = do_socketpair(domain, type, protocol, tab);
2149 }
2150 break;
2151 case SOCKOP_send:
2152 {
2153 abi_ulong sockfd;
2154 abi_ulong msg;
2155 size_t len;
2156 abi_ulong flags;
2157
2158 if (get_user_ual(sockfd, vptr)
2159 || get_user_ual(msg, vptr + n)
2160 || get_user_ual(len, vptr + 2 * n)
2161 || get_user_ual(flags, vptr + 3 * n))
2162 return -TARGET_EFAULT;
2163
2164 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2165 }
2166 break;
2167 case SOCKOP_recv:
2168 {
2169 abi_ulong sockfd;
2170 abi_ulong msg;
2171 size_t len;
2172 abi_ulong flags;
2173
2174 if (get_user_ual(sockfd, vptr)
2175 || get_user_ual(msg, vptr + n)
2176 || get_user_ual(len, vptr + 2 * n)
2177 || get_user_ual(flags, vptr + 3 * n))
2178 return -TARGET_EFAULT;
2179
2180 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2181 }
2182 break;
2183 case SOCKOP_sendto:
2184 {
2185 abi_ulong sockfd;
2186 abi_ulong msg;
2187 size_t len;
2188 abi_ulong flags;
2189 abi_ulong addr;
2190 socklen_t addrlen;
2191
2192 if (get_user_ual(sockfd, vptr)
2193 || get_user_ual(msg, vptr + n)
2194 || get_user_ual(len, vptr + 2 * n)
2195 || get_user_ual(flags, vptr + 3 * n)
2196 || get_user_ual(addr, vptr + 4 * n)
2197 || get_user_ual(addrlen, vptr + 5 * n))
2198 return -TARGET_EFAULT;
2199
2200 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2201 }
2202 break;
2203 case SOCKOP_recvfrom:
2204 {
2205 abi_ulong sockfd;
2206 abi_ulong msg;
2207 size_t len;
2208 abi_ulong flags;
2209 abi_ulong addr;
2210 socklen_t addrlen;
2211
2212 if (get_user_ual(sockfd, vptr)
2213 || get_user_ual(msg, vptr + n)
2214 || get_user_ual(len, vptr + 2 * n)
2215 || get_user_ual(flags, vptr + 3 * n)
2216 || get_user_ual(addr, vptr + 4 * n)
2217 || get_user_ual(addrlen, vptr + 5 * n))
2218 return -TARGET_EFAULT;
2219
2220 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2221 }
2222 break;
2223 case SOCKOP_shutdown:
2224 {
2225 abi_ulong sockfd, how;
2226
2227 if (get_user_ual(sockfd, vptr)
2228 || get_user_ual(how, vptr + n))
2229 return -TARGET_EFAULT;
2230
2231 ret = get_errno(shutdown(sockfd, how));
2232 }
2233 break;
2234 case SOCKOP_sendmsg:
2235 case SOCKOP_recvmsg:
2236 {
2237 abi_ulong fd;
2238 abi_ulong target_msg;
2239 abi_ulong flags;
2240
2241 if (get_user_ual(fd, vptr)
2242 || get_user_ual(target_msg, vptr + n)
2243 || get_user_ual(flags, vptr + 2 * n))
2244 return -TARGET_EFAULT;
2245
2246 ret = do_sendrecvmsg(fd, target_msg, flags,
2247 (num == SOCKOP_sendmsg));
2248 }
2249 break;
2250 case SOCKOP_setsockopt:
2251 {
2252 abi_ulong sockfd;
2253 abi_ulong level;
2254 abi_ulong optname;
2255 abi_ulong optval;
2256 socklen_t optlen;
2257
2258 if (get_user_ual(sockfd, vptr)
2259 || get_user_ual(level, vptr + n)
2260 || get_user_ual(optname, vptr + 2 * n)
2261 || get_user_ual(optval, vptr + 3 * n)
2262 || get_user_ual(optlen, vptr + 4 * n))
2263 return -TARGET_EFAULT;
2264
2265 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2266 }
2267 break;
2268 case SOCKOP_getsockopt:
2269 {
2270 abi_ulong sockfd;
2271 abi_ulong level;
2272 abi_ulong optname;
2273 abi_ulong optval;
2274 socklen_t optlen;
2275
2276 if (get_user_ual(sockfd, vptr)
2277 || get_user_ual(level, vptr + n)
2278 || get_user_ual(optname, vptr + 2 * n)
2279 || get_user_ual(optval, vptr + 3 * n)
2280 || get_user_ual(optlen, vptr + 4 * n))
2281 return -TARGET_EFAULT;
2282
2283 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2284 }
2285 break;
2286 default:
2287 gemu_log("Unsupported socketcall: %d\n", num);
2288 ret = -TARGET_ENOSYS;
2289 break;
2290 }
2291 return ret;
2292 }
2293 #endif
2294
2295 #define N_SHM_REGIONS 32
2296
2297 static struct shm_region {
2298 abi_ulong start;
2299 abi_ulong size;
2300 } shm_regions[N_SHM_REGIONS];
2301
2302 struct target_ipc_perm
2303 {
2304 abi_long __key;
2305 abi_ulong uid;
2306 abi_ulong gid;
2307 abi_ulong cuid;
2308 abi_ulong cgid;
2309 unsigned short int mode;
2310 unsigned short int __pad1;
2311 unsigned short int __seq;
2312 unsigned short int __pad2;
2313 abi_ulong __unused1;
2314 abi_ulong __unused2;
2315 };
2316
2317 struct target_semid_ds
2318 {
2319 struct target_ipc_perm sem_perm;
2320 abi_ulong sem_otime;
2321 abi_ulong __unused1;
2322 abi_ulong sem_ctime;
2323 abi_ulong __unused2;
2324 abi_ulong sem_nsems;
2325 abi_ulong __unused3;
2326 abi_ulong __unused4;
2327 };
2328
2329 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2330 abi_ulong target_addr)
2331 {
2332 struct target_ipc_perm *target_ip;
2333 struct target_semid_ds *target_sd;
2334
2335 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2336 return -TARGET_EFAULT;
2337 target_ip = &(target_sd->sem_perm);
2338 host_ip->__key = tswapal(target_ip->__key);
2339 host_ip->uid = tswapal(target_ip->uid);
2340 host_ip->gid = tswapal(target_ip->gid);
2341 host_ip->cuid = tswapal(target_ip->cuid);
2342 host_ip->cgid = tswapal(target_ip->cgid);
2343 host_ip->mode = tswap16(target_ip->mode);
2344 unlock_user_struct(target_sd, target_addr, 0);
2345 return 0;
2346 }
2347
2348 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2349 struct ipc_perm *host_ip)
2350 {
2351 struct target_ipc_perm *target_ip;
2352 struct target_semid_ds *target_sd;
2353
2354 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2355 return -TARGET_EFAULT;
2356 target_ip = &(target_sd->sem_perm);
2357 target_ip->__key = tswapal(host_ip->__key);
2358 target_ip->uid = tswapal(host_ip->uid);
2359 target_ip->gid = tswapal(host_ip->gid);
2360 target_ip->cuid = tswapal(host_ip->cuid);
2361 target_ip->cgid = tswapal(host_ip->cgid);
2362 target_ip->mode = tswap16(host_ip->mode);
2363 unlock_user_struct(target_sd, target_addr, 1);
2364 return 0;
2365 }
2366
2367 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2368 abi_ulong target_addr)
2369 {
2370 struct target_semid_ds *target_sd;
2371
2372 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2373 return -TARGET_EFAULT;
2374 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2375 return -TARGET_EFAULT;
2376 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2377 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2378 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2379 unlock_user_struct(target_sd, target_addr, 0);
2380 return 0;
2381 }
2382
2383 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2384 struct semid_ds *host_sd)
2385 {
2386 struct target_semid_ds *target_sd;
2387
2388 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2389 return -TARGET_EFAULT;
2390 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2391 return -TARGET_EFAULT;
2392 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2393 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2394 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2395 unlock_user_struct(target_sd, target_addr, 1);
2396 return 0;
2397 }
2398
2399 struct target_seminfo {
2400 int semmap;
2401 int semmni;
2402 int semmns;
2403 int semmnu;
2404 int semmsl;
2405 int semopm;
2406 int semume;
2407 int semusz;
2408 int semvmx;
2409 int semaem;
2410 };
2411
2412 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2413 struct seminfo *host_seminfo)
2414 {
2415 struct target_seminfo *target_seminfo;
2416 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2417 return -TARGET_EFAULT;
2418 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2419 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2420 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2421 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2422 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2423 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2424 __put_user(host_seminfo->semume, &target_seminfo->semume);
2425 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2426 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2427 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2428 unlock_user_struct(target_seminfo, target_addr, 1);
2429 return 0;
2430 }
2431
2432 union semun {
2433 int val;
2434 struct semid_ds *buf;
2435 unsigned short *array;
2436 struct seminfo *__buf;
2437 };
2438
2439 union target_semun {
2440 int val;
2441 abi_ulong buf;
2442 abi_ulong array;
2443 abi_ulong __buf;
2444 };
2445
2446 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2447 abi_ulong target_addr)
2448 {
2449 int nsems;
2450 unsigned short *array;
2451 union semun semun;
2452 struct semid_ds semid_ds;
2453 int i, ret;
2454
2455 semun.buf = &semid_ds;
2456
2457 ret = semctl(semid, 0, IPC_STAT, semun);
2458 if (ret == -1)
2459 return get_errno(ret);
2460
2461 nsems = semid_ds.sem_nsems;
2462
2463 *host_array = malloc(nsems*sizeof(unsigned short));
2464 array = lock_user(VERIFY_READ, target_addr,
2465 nsems*sizeof(unsigned short), 1);
2466 if (!array)
2467 return -TARGET_EFAULT;
2468
2469 for(i=0; i<nsems; i++) {
2470 __get_user((*host_array)[i], &array[i]);
2471 }
2472 unlock_user(array, target_addr, 0);
2473
2474 return 0;
2475 }
2476
2477 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2478 unsigned short **host_array)
2479 {
2480 int nsems;
2481 unsigned short *array;
2482 union semun semun;
2483 struct semid_ds semid_ds;
2484 int i, ret;
2485
2486 semun.buf = &semid_ds;
2487
2488 ret = semctl(semid, 0, IPC_STAT, semun);
2489 if (ret == -1)
2490 return get_errno(ret);
2491
2492 nsems = semid_ds.sem_nsems;
2493
2494 array = lock_user(VERIFY_WRITE, target_addr,
2495 nsems*sizeof(unsigned short), 0);
2496 if (!array)
2497 return -TARGET_EFAULT;
2498
2499 for(i=0; i<nsems; i++) {
2500 __put_user((*host_array)[i], &array[i]);
2501 }
2502 free(*host_array);
2503 unlock_user(array, target_addr, 1);
2504
2505 return 0;
2506 }
2507
2508 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2509 union target_semun target_su)
2510 {
2511 union semun arg;
2512 struct semid_ds dsarg;
2513 unsigned short *array = NULL;
2514 struct seminfo seminfo;
2515 abi_long ret = -TARGET_EINVAL;
2516 abi_long err;
2517 cmd &= 0xff;
2518
2519 switch( cmd ) {
2520 case GETVAL:
2521 case SETVAL:
2522 arg.val = tswap32(target_su.val);
2523 ret = get_errno(semctl(semid, semnum, cmd, arg));
2524 target_su.val = tswap32(arg.val);
2525 break;
2526 case GETALL:
2527 case SETALL:
2528 err = target_to_host_semarray(semid, &array, target_su.array);
2529 if (err)
2530 return err;
2531 arg.array = array;
2532 ret = get_errno(semctl(semid, semnum, cmd, arg));
2533 err = host_to_target_semarray(semid, target_su.array, &array);
2534 if (err)
2535 return err;
2536 break;
2537 case IPC_STAT:
2538 case IPC_SET:
2539 case SEM_STAT:
2540 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2541 if (err)
2542 return err;
2543 arg.buf = &dsarg;
2544 ret = get_errno(semctl(semid, semnum, cmd, arg));
2545 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2546 if (err)
2547 return err;
2548 break;
2549 case IPC_INFO:
2550 case SEM_INFO:
2551 arg.__buf = &seminfo;
2552 ret = get_errno(semctl(semid, semnum, cmd, arg));
2553 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2554 if (err)
2555 return err;
2556 break;
2557 case IPC_RMID:
2558 case GETPID:
2559 case GETNCNT:
2560 case GETZCNT:
2561 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2562 break;
2563 }
2564
2565 return ret;
2566 }
2567
2568 struct target_sembuf {
2569 unsigned short sem_num;
2570 short sem_op;
2571 short sem_flg;
2572 };
2573
2574 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2575 abi_ulong target_addr,
2576 unsigned nsops)
2577 {
2578 struct target_sembuf *target_sembuf;
2579 int i;
2580
2581 target_sembuf = lock_user(VERIFY_READ, target_addr,
2582 nsops*sizeof(struct target_sembuf), 1);
2583 if (!target_sembuf)
2584 return -TARGET_EFAULT;
2585
2586 for(i=0; i<nsops; i++) {
2587 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2588 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2589 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2590 }
2591
2592 unlock_user(target_sembuf, target_addr, 0);
2593
2594 return 0;
2595 }
2596
2597 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2598 {
2599 struct sembuf sops[nsops];
2600
2601 if (target_to_host_sembuf(sops, ptr, nsops))
2602 return -TARGET_EFAULT;
2603
2604 return get_errno(semop(semid, sops, nsops));
2605 }
2606
2607 struct target_msqid_ds
2608 {
2609 struct target_ipc_perm msg_perm;
2610 abi_ulong msg_stime;
2611 #if TARGET_ABI_BITS == 32
2612 abi_ulong __unused1;
2613 #endif
2614 abi_ulong msg_rtime;
2615 #if TARGET_ABI_BITS == 32
2616 abi_ulong __unused2;
2617 #endif
2618 abi_ulong msg_ctime;
2619 #if TARGET_ABI_BITS == 32
2620 abi_ulong __unused3;
2621 #endif
2622 abi_ulong __msg_cbytes;
2623 abi_ulong msg_qnum;
2624 abi_ulong msg_qbytes;
2625 abi_ulong msg_lspid;
2626 abi_ulong msg_lrpid;
2627 abi_ulong __unused4;
2628 abi_ulong __unused5;
2629 };
2630
2631 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2632 abi_ulong target_addr)
2633 {
2634 struct target_msqid_ds *target_md;
2635
2636 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2637 return -TARGET_EFAULT;
2638 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2639 return -TARGET_EFAULT;
2640 host_md->msg_stime = tswapal(target_md->msg_stime);
2641 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2642 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2643 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2644 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2645 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2646 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2647 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2648 unlock_user_struct(target_md, target_addr, 0);
2649 return 0;
2650 }
2651
2652 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2653 struct msqid_ds *host_md)
2654 {
2655 struct target_msqid_ds *target_md;
2656
2657 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2658 return -TARGET_EFAULT;
2659 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2660 return -TARGET_EFAULT;
2661 target_md->msg_stime = tswapal(host_md->msg_stime);
2662 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2663 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2664 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2665 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2666 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2667 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2668 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2669 unlock_user_struct(target_md, target_addr, 1);
2670 return 0;
2671 }
2672
2673 struct target_msginfo {
2674 int msgpool;
2675 int msgmap;
2676 int msgmax;
2677 int msgmnb;
2678 int msgmni;
2679 int msgssz;
2680 int msgtql;
2681 unsigned short int msgseg;
2682 };
2683
2684 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2685 struct msginfo *host_msginfo)
2686 {
2687 struct target_msginfo *target_msginfo;
2688 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2689 return -TARGET_EFAULT;
2690 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2691 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2692 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2693 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2694 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2695 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2696 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2697 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2698 unlock_user_struct(target_msginfo, target_addr, 1);
2699 return 0;
2700 }
2701
2702 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2703 {
2704 struct msqid_ds dsarg;
2705 struct msginfo msginfo;
2706 abi_long ret = -TARGET_EINVAL;
2707
2708 cmd &= 0xff;
2709
2710 switch (cmd) {
2711 case IPC_STAT:
2712 case IPC_SET:
2713 case MSG_STAT:
2714 if (target_to_host_msqid_ds(&dsarg,ptr))
2715 return -TARGET_EFAULT;
2716 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2717 if (host_to_target_msqid_ds(ptr,&dsarg))
2718 return -TARGET_EFAULT;
2719 break;
2720 case IPC_RMID:
2721 ret = get_errno(msgctl(msgid, cmd, NULL));
2722 break;
2723 case IPC_INFO:
2724 case MSG_INFO:
2725 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2726 if (host_to_target_msginfo(ptr, &msginfo))
2727 return -TARGET_EFAULT;
2728 break;
2729 }
2730
2731 return ret;
2732 }
2733
2734 struct target_msgbuf {
2735 abi_long mtype;
2736 char mtext[1];
2737 };
2738
2739 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2740 unsigned int msgsz, int msgflg)
2741 {
2742 struct target_msgbuf *target_mb;
2743 struct msgbuf *host_mb;
2744 abi_long ret = 0;
2745
2746 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2747 return -TARGET_EFAULT;
2748 host_mb = malloc(msgsz+sizeof(long));
2749 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2750 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2751 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2752 free(host_mb);
2753 unlock_user_struct(target_mb, msgp, 0);
2754
2755 return ret;
2756 }
2757
2758 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2759 unsigned int msgsz, abi_long msgtyp,
2760 int msgflg)
2761 {
2762 struct target_msgbuf *target_mb;
2763 char *target_mtext;
2764 struct msgbuf *host_mb;
2765 abi_long ret = 0;
2766
2767 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2768 return -TARGET_EFAULT;
2769
2770 host_mb = g_malloc(msgsz+sizeof(long));
2771 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2772
2773 if (ret > 0) {
2774 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2775 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2776 if (!target_mtext) {
2777 ret = -TARGET_EFAULT;
2778 goto end;
2779 }
2780 memcpy(target_mb->mtext, host_mb->mtext, ret);
2781 unlock_user(target_mtext, target_mtext_addr, ret);
2782 }
2783
2784 target_mb->mtype = tswapal(host_mb->mtype);
2785
2786 end:
2787 if (target_mb)
2788 unlock_user_struct(target_mb, msgp, 1);
2789 g_free(host_mb);
2790 return ret;
2791 }
2792
2793 struct target_shmid_ds
2794 {
2795 struct target_ipc_perm shm_perm;
2796 abi_ulong shm_segsz;
2797 abi_ulong shm_atime;
2798 #if TARGET_ABI_BITS == 32
2799 abi_ulong __unused1;
2800 #endif
2801 abi_ulong shm_dtime;
2802 #if TARGET_ABI_BITS == 32
2803 abi_ulong __unused2;
2804 #endif
2805 abi_ulong shm_ctime;
2806 #if TARGET_ABI_BITS == 32
2807 abi_ulong __unused3;
2808 #endif
2809 int shm_cpid;
2810 int shm_lpid;
2811 abi_ulong shm_nattch;
2812 unsigned long int __unused4;
2813 unsigned long int __unused5;
2814 };
2815
2816 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2817 abi_ulong target_addr)
2818 {
2819 struct target_shmid_ds *target_sd;
2820
2821 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2822 return -TARGET_EFAULT;
2823 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2824 return -TARGET_EFAULT;
2825 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2826 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2827 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2828 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2829 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2830 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2831 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2832 unlock_user_struct(target_sd, target_addr, 0);
2833 return 0;
2834 }
2835
2836 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2837 struct shmid_ds *host_sd)
2838 {
2839 struct target_shmid_ds *target_sd;
2840
2841 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2842 return -TARGET_EFAULT;
2843 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2844 return -TARGET_EFAULT;
2845 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2846 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2847 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2848 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2849 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2850 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2851 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2852 unlock_user_struct(target_sd, target_addr, 1);
2853 return 0;
2854 }
2855
2856 struct target_shminfo {
2857 abi_ulong shmmax;
2858 abi_ulong shmmin;
2859 abi_ulong shmmni;
2860 abi_ulong shmseg;
2861 abi_ulong shmall;
2862 };
2863
2864 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2865 struct shminfo *host_shminfo)
2866 {
2867 struct target_shminfo *target_shminfo;
2868 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2869 return -TARGET_EFAULT;
2870 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2871 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2872 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2873 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2874 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2875 unlock_user_struct(target_shminfo, target_addr, 1);
2876 return 0;
2877 }
2878
2879 struct target_shm_info {
2880 int used_ids;
2881 abi_ulong shm_tot;
2882 abi_ulong shm_rss;
2883 abi_ulong shm_swp;
2884 abi_ulong swap_attempts;
2885 abi_ulong swap_successes;
2886 };
2887
2888 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2889 struct shm_info *host_shm_info)
2890 {
2891 struct target_shm_info *target_shm_info;
2892 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2893 return -TARGET_EFAULT;
2894 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2895 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2896 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2897 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2898 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2899 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2900 unlock_user_struct(target_shm_info, target_addr, 1);
2901 return 0;
2902 }
2903
2904 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2905 {
2906 struct shmid_ds dsarg;
2907 struct shminfo shminfo;
2908 struct shm_info shm_info;
2909 abi_long ret = -TARGET_EINVAL;
2910
2911 cmd &= 0xff;
2912
2913 switch(cmd) {
2914 case IPC_STAT:
2915 case IPC_SET:
2916 case SHM_STAT:
2917 if (target_to_host_shmid_ds(&dsarg, buf))
2918 return -TARGET_EFAULT;
2919 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2920 if (host_to_target_shmid_ds(buf, &dsarg))
2921 return -TARGET_EFAULT;
2922 break;
2923 case IPC_INFO:
2924 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2925 if (host_to_target_shminfo(buf, &shminfo))
2926 return -TARGET_EFAULT;
2927 break;
2928 case SHM_INFO:
2929 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2930 if (host_to_target_shm_info(buf, &shm_info))
2931 return -TARGET_EFAULT;
2932 break;
2933 case IPC_RMID:
2934 case SHM_LOCK:
2935 case SHM_UNLOCK:
2936 ret = get_errno(shmctl(shmid, cmd, NULL));
2937 break;
2938 }
2939
2940 return ret;
2941 }
2942
2943 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2944 {
2945 abi_long raddr;
2946 void *host_raddr;
2947 struct shmid_ds shm_info;
2948 int i,ret;
2949
2950 /* find out the length of the shared memory segment */
2951 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2952 if (is_error(ret)) {
2953 /* can't get length, bail out */
2954 return ret;
2955 }
2956
2957 mmap_lock();
2958
2959 if (shmaddr)
2960 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2961 else {
2962 abi_ulong mmap_start;
2963
2964 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2965
2966 if (mmap_start == -1) {
2967 errno = ENOMEM;
2968 host_raddr = (void *)-1;
2969 } else
2970 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2971 }
2972
2973 if (host_raddr == (void *)-1) {
2974 mmap_unlock();
2975 return get_errno((long)host_raddr);
2976 }
2977 raddr=h2g((unsigned long)host_raddr);
2978
2979 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2980 PAGE_VALID | PAGE_READ |
2981 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2982
2983 for (i = 0; i < N_SHM_REGIONS; i++) {
2984 if (shm_regions[i].start == 0) {
2985 shm_regions[i].start = raddr;
2986 shm_regions[i].size = shm_info.shm_segsz;
2987 break;
2988 }
2989 }
2990
2991 mmap_unlock();
2992 return raddr;
2993
2994 }
2995
2996 static inline abi_long do_shmdt(abi_ulong shmaddr)
2997 {
2998 int i;
2999
3000 for (i = 0; i < N_SHM_REGIONS; ++i) {
3001 if (shm_regions[i].start == shmaddr) {
3002 shm_regions[i].start = 0;
3003 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3004 break;
3005 }
3006 }
3007
3008 return get_errno(shmdt(g2h(shmaddr)));
3009 }
3010
3011 #ifdef TARGET_NR_ipc
3012 /* ??? This only works with linear mappings. */
3013 /* do_ipc() must return target values and target errnos. */
3014 static abi_long do_ipc(unsigned int call, int first,
3015 int second, int third,
3016 abi_long ptr, abi_long fifth)
3017 {
3018 int version;
3019 abi_long ret = 0;
3020
3021 version = call >> 16;
3022 call &= 0xffff;
3023
3024 switch (call) {
3025 case IPCOP_semop:
3026 ret = do_semop(first, ptr, second);
3027 break;
3028
3029 case IPCOP_semget:
3030 ret = get_errno(semget(first, second, third));
3031 break;
3032
3033 case IPCOP_semctl:
3034 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3035 break;
3036
3037 case IPCOP_msgget:
3038 ret = get_errno(msgget(first, second));
3039 break;
3040
3041 case IPCOP_msgsnd:
3042 ret = do_msgsnd(first, ptr, second, third);
3043 break;
3044
3045 case IPCOP_msgctl:
3046 ret = do_msgctl(first, second, ptr);
3047 break;
3048
3049 case IPCOP_msgrcv:
3050 switch (version) {
3051 case 0:
3052 {
3053 struct target_ipc_kludge {
3054 abi_long msgp;
3055 abi_long msgtyp;
3056 } *tmp;
3057
3058 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3059 ret = -TARGET_EFAULT;
3060 break;
3061 }
3062
3063 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3064
3065 unlock_user_struct(tmp, ptr, 0);
3066 break;
3067 }
3068 default:
3069 ret = do_msgrcv(first, ptr, second, fifth, third);
3070 }
3071 break;
3072
3073 case IPCOP_shmat:
3074 switch (version) {
3075 default:
3076 {
3077 abi_ulong raddr;
3078 raddr = do_shmat(first, ptr, second);
3079 if (is_error(raddr))
3080 return get_errno(raddr);
3081 if (put_user_ual(raddr, third))
3082 return -TARGET_EFAULT;
3083 break;
3084 }
3085 case 1:
3086 ret = -TARGET_EINVAL;
3087 break;
3088 }
3089 break;
3090 case IPCOP_shmdt:
3091 ret = do_shmdt(ptr);
3092 break;
3093
3094 case IPCOP_shmget:
3095 /* IPC_* flag values are the same on all linux platforms */
3096 ret = get_errno(shmget(first, second, third));
3097 break;
3098
3099 /* IPC_* and SHM_* command values are the same on all linux platforms */
3100 case IPCOP_shmctl:
3101 ret = do_shmctl(first, second, third);
3102 break;
3103 default:
3104 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3105 ret = -TARGET_ENOSYS;
3106 break;
3107 }
3108 return ret;
3109 }
3110 #endif
3111
3112 /* kernel structure types definitions */
3113
3114 #define STRUCT(name, ...) STRUCT_ ## name,
3115 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3116 enum {
3117 #include "syscall_types.h"
3118 };
3119 #undef STRUCT
3120 #undef STRUCT_SPECIAL
3121
3122 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3123 #define STRUCT_SPECIAL(name)
3124 #include "syscall_types.h"
3125 #undef STRUCT
3126 #undef STRUCT_SPECIAL
3127
3128 typedef struct IOCTLEntry IOCTLEntry;
3129
3130 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3131 int fd, abi_long cmd, abi_long arg);
3132
3133 struct IOCTLEntry {
3134 unsigned int target_cmd;
3135 unsigned int host_cmd;
3136 const char *name;
3137 int access;
3138 do_ioctl_fn *do_ioctl;
3139 const argtype arg_type[5];
3140 };
3141
3142 #define IOC_R 0x0001
3143 #define IOC_W 0x0002
3144 #define IOC_RW (IOC_R | IOC_W)
3145
3146 #define MAX_STRUCT_SIZE 4096
3147
3148 #ifdef CONFIG_FIEMAP
3149 /* So fiemap access checks don't overflow on 32 bit systems.
3150 * This is very slightly smaller than the limit imposed by
3151 * the underlying kernel.
3152 */
3153 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3154 / sizeof(struct fiemap_extent))
3155
3156 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3157 int fd, abi_long cmd, abi_long arg)
3158 {
3159 /* The parameter for this ioctl is a struct fiemap followed
3160 * by an array of struct fiemap_extent whose size is set
3161 * in fiemap->fm_extent_count. The array is filled in by the
3162 * ioctl.
3163 */
3164 int target_size_in, target_size_out;
3165 struct fiemap *fm;
3166 const argtype *arg_type = ie->arg_type;
3167 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3168 void *argptr, *p;
3169 abi_long ret;
3170 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3171 uint32_t outbufsz;
3172 int free_fm = 0;
3173
3174 assert(arg_type[0] == TYPE_PTR);
3175 assert(ie->access == IOC_RW);
3176 arg_type++;
3177 target_size_in = thunk_type_size(arg_type, 0);
3178 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3179 if (!argptr) {
3180 return -TARGET_EFAULT;
3181 }
3182 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3183 unlock_user(argptr, arg, 0);
3184 fm = (struct fiemap *)buf_temp;
3185 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3186 return -TARGET_EINVAL;
3187 }
3188
3189 outbufsz = sizeof (*fm) +
3190 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3191
3192 if (outbufsz > MAX_STRUCT_SIZE) {
3193 /* We can't fit all the extents into the fixed size buffer.
3194 * Allocate one that is large enough and use it instead.
3195 */
3196 fm = malloc(outbufsz);
3197 if (!fm) {
3198 return -TARGET_ENOMEM;
3199 }
3200 memcpy(fm, buf_temp, sizeof(struct fiemap));
3201 free_fm = 1;
3202 }
3203 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3204 if (!is_error(ret)) {
3205 target_size_out = target_size_in;
3206 /* An extent_count of 0 means we were only counting the extents
3207 * so there are no structs to copy
3208 */
3209 if (fm->fm_extent_count != 0) {
3210 target_size_out += fm->fm_mapped_extents * extent_size;
3211 }
3212 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3213 if (!argptr) {
3214 ret = -TARGET_EFAULT;
3215 } else {
3216 /* Convert the struct fiemap */
3217 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3218 if (fm->fm_extent_count != 0) {
3219 p = argptr + target_size_in;
3220 /* ...and then all the struct fiemap_extents */
3221 for (i = 0; i < fm->fm_mapped_extents; i++) {
3222 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3223 THUNK_TARGET);
3224 p += extent_size;
3225 }
3226 }
3227 unlock_user(argptr, arg, target_size_out);
3228 }
3229 }
3230 if (free_fm) {
3231 free(fm);
3232 }
3233 return ret;
3234 }
3235 #endif
3236
3237 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3238 int fd, abi_long cmd, abi_long arg)
3239 {
3240 const argtype *arg_type = ie->arg_type;
3241 int target_size;
3242 void *argptr;
3243 int ret;
3244 struct ifconf *host_ifconf;
3245 uint32_t outbufsz;
3246 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3247 int target_ifreq_size;
3248 int nb_ifreq;
3249 int free_buf = 0;
3250 int i;
3251 int target_ifc_len;
3252 abi_long target_ifc_buf;
3253 int host_ifc_len;
3254 char *host_ifc_buf;
3255
3256 assert(arg_type[0] == TYPE_PTR);
3257 assert(ie->access == IOC_RW);
3258
3259 arg_type++;
3260 target_size = thunk_type_size(arg_type, 0);
3261
3262 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3263 if (!argptr)
3264 return -TARGET_EFAULT;
3265 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3266 unlock_user(argptr, arg, 0);
3267
3268 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3269 target_ifc_len = host_ifconf->ifc_len;
3270 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3271
3272 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3273 nb_ifreq = target_ifc_len / target_ifreq_size;
3274 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3275
3276 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3277 if (outbufsz > MAX_STRUCT_SIZE) {
3278 /* We can't fit all the extents into the fixed size buffer.
3279 * Allocate one that is large enough and use it instead.
3280 */
3281 host_ifconf = malloc(outbufsz);
3282 if (!host_ifconf) {
3283 return -TARGET_ENOMEM;
3284 }
3285 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3286 free_buf = 1;
3287 }
3288 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3289
3290 host_ifconf->ifc_len = host_ifc_len;
3291 host_ifconf->ifc_buf = host_ifc_buf;
3292
3293 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3294 if (!is_error(ret)) {
3295 /* convert host ifc_len to target ifc_len */
3296
3297 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3298 target_ifc_len = nb_ifreq * target_ifreq_size;
3299 host_ifconf->ifc_len = target_ifc_len;
3300
3301 /* restore target ifc_buf */
3302
3303 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3304
3305 /* copy struct ifconf to target user */
3306
3307 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3308 if (!argptr)
3309 return -TARGET_EFAULT;
3310 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3311 unlock_user(argptr, arg, target_size);
3312
3313 /* copy ifreq[] to target user */
3314
3315 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3316 for (i = 0; i < nb_ifreq ; i++) {
3317 thunk_convert(argptr + i * target_ifreq_size,
3318 host_ifc_buf + i * sizeof(struct ifreq),
3319 ifreq_arg_type, THUNK_TARGET);
3320 }
3321 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3322 }
3323
3324 if (free_buf) {
3325 free(host_ifconf);
3326 }
3327
3328 return ret;
3329 }
3330
3331 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3332 abi_long cmd, abi_long arg)
3333 {
3334 void *argptr;
3335 struct dm_ioctl *host_dm;
3336 abi_long guest_data;
3337 uint32_t guest_data_size;
3338 int target_size;
3339 const argtype *arg_type = ie->arg_type;
3340 abi_long ret;
3341 void *big_buf = NULL;
3342 char *host_data;
3343
3344 arg_type++;
3345 target_size = thunk_type_size(arg_type, 0);
3346 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3347 if (!argptr) {
3348 ret = -TARGET_EFAULT;
3349 goto out;
3350 }
3351 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3352 unlock_user(argptr, arg, 0);
3353
3354 /* buf_temp is too small, so fetch things into a bigger buffer */
3355 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3356 memcpy(big_buf, buf_temp, target_size);
3357 buf_temp = big_buf;
3358 host_dm = big_buf;
3359
3360 guest_data = arg + host_dm->data_start;
3361 if ((guest_data - arg) < 0) {
3362 ret = -EINVAL;
3363 goto out;
3364 }
3365 guest_data_size = host_dm->data_size - host_dm->data_start;
3366 host_data = (char*)host_dm + host_dm->data_start;
3367
3368 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3369 switch (ie->host_cmd) {
3370 case DM_REMOVE_ALL:
3371 case DM_LIST_DEVICES:
3372 case DM_DEV_CREATE:
3373 case DM_DEV_REMOVE:
3374 case DM_DEV_SUSPEND:
3375 case DM_DEV_STATUS:
3376 case DM_DEV_WAIT:
3377 case DM_TABLE_STATUS:
3378 case DM_TABLE_CLEAR:
3379 case DM_TABLE_DEPS:
3380 case DM_LIST_VERSIONS:
3381 /* no input data */
3382 break;
3383 case DM_DEV_RENAME:
3384 case DM_DEV_SET_GEOMETRY:
3385 /* data contains only strings */
3386 memcpy(host_data, argptr, guest_data_size);
3387 break;
3388 case DM_TARGET_MSG:
3389 memcpy(host_data, argptr, guest_data_size);
3390 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3391 break;
3392 case DM_TABLE_LOAD:
3393 {
3394 void *gspec = argptr;
3395 void *cur_data = host_data;
3396 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3397 int spec_size = thunk_type_size(arg_type, 0);
3398 int i;
3399
3400 for (i = 0; i < host_dm->target_count; i++) {
3401 struct dm_target_spec *spec = cur_data;
3402 uint32_t next;
3403 int slen;
3404
3405 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3406 slen = strlen((char*)gspec + spec_size) + 1;
3407 next = spec->next;
3408 spec->next = sizeof(*spec) + slen;
3409 strcpy((char*)&spec[1], gspec + spec_size);
3410 gspec += next;
3411 cur_data += spec->next;
3412 }
3413 break;
3414 }
3415 default:
3416 ret = -TARGET_EINVAL;
3417 goto out;
3418 }
3419 unlock_user(argptr, guest_data, 0);
3420
3421 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3422 if (!is_error(ret)) {
3423 guest_data = arg + host_dm->data_start;
3424 guest_data_size = host_dm->data_size - host_dm->data_start;
3425 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3426 switch (ie->host_cmd) {
3427 case DM_REMOVE_ALL:
3428 case DM_DEV_CREATE:
3429 case DM_DEV_REMOVE:
3430 case DM_DEV_RENAME:
3431 case DM_DEV_SUSPEND:
3432 case DM_DEV_STATUS:
3433 case DM_TABLE_LOAD:
3434 case DM_TABLE_CLEAR:
3435 case DM_TARGET_MSG:
3436 case DM_DEV_SET_GEOMETRY:
3437 /* no return data */
3438 break;
3439 case DM_LIST_DEVICES:
3440 {
3441 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3442 uint32_t remaining_data = guest_data_size;
3443 void *cur_data = argptr;
3444 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3445 int nl_size = 12; /* can't use thunk_size due to alignment */
3446
3447 while (1) {
3448 uint32_t next = nl->next;
3449 if (next) {
3450 nl->next = nl_size + (strlen(nl->name) + 1);
3451 }
3452 if (remaining_data < nl->next) {
3453 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3454 break;
3455 }
3456 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3457 strcpy(cur_data + nl_size, nl->name);
3458 cur_data += nl->next;
3459 remaining_data -= nl->next;
3460 if (!next) {
3461 break;
3462 }
3463 nl = (void*)nl + next;
3464 }
3465 break;
3466 }
3467 case DM_DEV_WAIT:
3468 case DM_TABLE_STATUS:
3469 {
3470 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3471 void *cur_data = argptr;
3472 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3473 int spec_size = thunk_type_size(arg_type, 0);
3474 int i;
3475
3476 for (i = 0; i < host_dm->target_count; i++) {
3477 uint32_t next = spec->next;
3478 int slen = strlen((char*)&spec[1]) + 1;
3479 spec->next = (cur_data - argptr) + spec_size + slen;
3480 if (guest_data_size < spec->next) {
3481 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3482 break;
3483 }
3484 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3485 strcpy(cur_data + spec_size, (char*)&spec[1]);
3486 cur_data = argptr + spec->next;
3487 spec = (void*)host_dm + host_dm->data_start + next;
3488 }
3489 break;
3490 }
3491 case DM_TABLE_DEPS:
3492 {
3493 void *hdata = (void*)host_dm + host_dm->data_start;
3494 int count = *(uint32_t*)hdata;
3495 uint64_t *hdev = hdata + 8;
3496 uint64_t *gdev = argptr + 8;
3497 int i;
3498
3499 *(uint32_t*)argptr = tswap32(count);
3500 for (i = 0; i < count; i++) {
3501 *gdev = tswap64(*hdev);
3502 gdev++;
3503 hdev++;
3504 }
3505 break;
3506 }
3507 case DM_LIST_VERSIONS:
3508 {
3509 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3510 uint32_t remaining_data = guest_data_size;
3511 void *cur_data = argptr;
3512 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3513 int vers_size = thunk_type_size(arg_type, 0);
3514
3515 while (1) {
3516 uint32_t next = vers->next;
3517 if (next) {
3518 vers->next = vers_size + (strlen(vers->name) + 1);
3519 }
3520 if (remaining_data < vers->next) {
3521 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3522 break;
3523 }
3524 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3525 strcpy(cur_data + vers_size, vers->name);
3526 cur_data += vers->next;
3527 remaining_data -= vers->next;
3528 if (!next) {
3529 break;
3530 }
3531 vers = (void*)vers + next;
3532 }
3533 break;
3534 }
3535 default:
3536 ret = -TARGET_EINVAL;
3537 goto out;
3538 }
3539 unlock_user(argptr, guest_data, guest_data_size);
3540
3541 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3542 if (!argptr) {
3543 ret = -TARGET_EFAULT;
3544 goto out;
3545 }
3546 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3547 unlock_user(argptr, arg, target_size);
3548 }
3549 out:
3550 g_free(big_buf);
3551 return ret;
3552 }
3553
3554 static IOCTLEntry ioctl_entries[] = {
3555 #define IOCTL(cmd, access, ...) \
3556 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3557 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3558 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3559 #include "ioctls.h"
3560 { 0, 0, },
3561 };
3562
3563 /* ??? Implement proper locking for ioctls. */
3564 /* do_ioctl() Must return target values and target errnos. */
3565 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3566 {
3567 const IOCTLEntry *ie;
3568 const argtype *arg_type;
3569 abi_long ret;
3570 uint8_t buf_temp[MAX_STRUCT_SIZE];
3571 int target_size;
3572 void *argptr;
3573
3574 ie = ioctl_entries;
3575 for(;;) {
3576 if (ie->target_cmd == 0) {
3577 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3578 return -TARGET_ENOSYS;
3579 }
3580 if (ie->target_cmd == cmd)
3581 break;
3582 ie++;
3583 }
3584 arg_type = ie->arg_type;
3585 #if defined(DEBUG)
3586 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3587 #endif
3588 if (ie->do_ioctl) {
3589 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3590 }
3591
3592 switch(arg_type[0]) {
3593 case TYPE_NULL:
3594 /* no argument */
3595 ret = get_errno(ioctl(fd, ie->host_cmd));
3596 break;
3597 case TYPE_PTRVOID:
3598 case TYPE_INT:
3599 /* int argment */
3600 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3601 break;
3602 case TYPE_PTR:
3603 arg_type++;
3604 target_size = thunk_type_size(arg_type, 0);
3605 switch(ie->access) {
3606 case IOC_R:
3607 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3608 if (!is_error(ret)) {
3609 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3610 if (!argptr)
3611 return -TARGET_EFAULT;
3612 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3613 unlock_user(argptr, arg, target_size);
3614 }
3615 break;
3616 case IOC_W:
3617 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3618 if (!argptr)
3619 return -TARGET_EFAULT;
3620 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3621 unlock_user(argptr, arg, 0);
3622 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3623 break;
3624 default:
3625 case IOC_RW:
3626 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3627 if (!argptr)
3628 return -TARGET_EFAULT;
3629 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3630 unlock_user(argptr, arg, 0);
3631 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3632 if (!is_error(ret)) {
3633 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3634 if (!argptr)
3635 return -TARGET_EFAULT;
3636 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3637 unlock_user(argptr, arg, target_size);
3638 }
3639 break;
3640 }
3641 break;
3642 default:
3643 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3644 (long)cmd, arg_type[0]);
3645 ret = -TARGET_ENOSYS;
3646 break;
3647 }
3648 return ret;
3649 }
3650
3651 static const bitmask_transtbl iflag_tbl[] = {
3652 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3653 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3654 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3655 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3656 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3657 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3658 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3659 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3660 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3661 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3662 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3663 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3664 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3665 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3666 { 0, 0, 0, 0 }
3667 };
3668
3669 static const bitmask_transtbl oflag_tbl[] = {
3670 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3671 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3672 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3673 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3674 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3675 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3676 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3677 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3678 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3679 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3680 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3681 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3682 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3683 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3684 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3685 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3686 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3687 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3688 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3689 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3690 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3691 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3692 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3693 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3694 { 0, 0, 0, 0 }
3695 };
3696
3697 static const bitmask_transtbl cflag_tbl[] = {
3698 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3699 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3700 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3701 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3702 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3703 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3704 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3705 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3706 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3707 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3708 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3709 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3710 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3711 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3712 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3713 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3714 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3715 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3716 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3717 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3718 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3719 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3720 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3721 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3722 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3723 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3724 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3725 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3726 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3727 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3728 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3729 { 0, 0, 0, 0 }
3730 };
3731
3732 static const bitmask_transtbl lflag_tbl[] = {
3733 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3734 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3735 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3736 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3737 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3738 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3739 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3740 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3741 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3742 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3743 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3744 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3745 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3746 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3747 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3748 { 0, 0, 0, 0 }
3749 };
3750
3751 static void target_to_host_termios (void *dst, const void *src)
3752 {
3753 struct host_termios *host = dst;
3754 const struct target_termios *target = src;
3755
3756 host->c_iflag =
3757 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3758 host->c_oflag =
3759 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3760 host->c_cflag =
3761 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3762 host->c_lflag =
3763 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3764 host->c_line = target->c_line;
3765
3766 memset(host->c_cc, 0, sizeof(host->c_cc));
3767 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3768 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3769 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3770 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3771 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3772 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3773 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3774 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3775 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3776 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3777 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3778 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3779 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3780 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3781 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3782 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3783 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3784 }
3785
3786 static void host_to_target_termios (void *dst, const void *src)
3787 {
3788 struct target_termios *target = dst;
3789 const struct host_termios *host = src;
3790
3791 target->c_iflag =
3792 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3793 target->c_oflag =
3794 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3795 target->c_cflag =
3796 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3797 target->c_lflag =
3798 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3799 target->c_line = host->c_line;
3800
3801 memset(target->c_cc, 0, sizeof(target->c_cc));
3802 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3803 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3804 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3805 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3806 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3807 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3808 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3809 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3810 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3811 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3812 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3813 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3814 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3815 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3816 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3817 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3818 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3819 }
3820
3821 static const StructEntry struct_termios_def = {
3822 .convert = { host_to_target_termios, target_to_host_termios },
3823 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3824 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3825 };
3826
3827 static bitmask_transtbl mmap_flags_tbl[] = {
3828 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3829 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3830 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3831 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3832 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3833 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3834 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3835 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3836 { 0, 0, 0, 0 }
3837 };
3838
3839 #if defined(TARGET_I386)
3840
3841 /* NOTE: there is really one LDT for all the threads */
3842 static uint8_t *ldt_table;
3843
3844 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3845 {
3846 int size;
3847 void *p;
3848
3849 if (!ldt_table)
3850 return 0;
3851 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3852 if (size > bytecount)
3853 size = bytecount;
3854 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3855 if (!p)
3856 return -TARGET_EFAULT;
3857 /* ??? Should this by byteswapped? */
3858 memcpy(p, ldt_table, size);
3859 unlock_user(p, ptr, size);
3860 return size;
3861 }
3862
3863 /* XXX: add locking support */
3864 static abi_long write_ldt(CPUX86State *env,
3865 abi_ulong ptr, unsigned long bytecount, int oldmode)
3866 {
3867 struct target_modify_ldt_ldt_s ldt_info;
3868 struct target_modify_ldt_ldt_s *target_ldt_info;
3869 int seg_32bit, contents, read_exec_only, limit_in_pages;
3870 int seg_not_present, useable, lm;
3871 uint32_t *lp, entry_1, entry_2;
3872
3873 if (bytecount != sizeof(ldt_info))
3874 return -TARGET_EINVAL;
3875 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3876 return -TARGET_EFAULT;
3877 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3878 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3879 ldt_info.limit = tswap32(target_ldt_info->limit);
3880 ldt_info.flags = tswap32(target_ldt_info->flags);
3881 unlock_user_struct(target_ldt_info, ptr, 0);
3882
3883 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3884 return -TARGET_EINVAL;
3885 seg_32bit = ldt_info.flags & 1;
3886 contents = (ldt_info.flags >> 1) & 3;
3887 read_exec_only = (ldt_info.flags >> 3) & 1;
3888 limit_in_pages = (ldt_info.flags >> 4) & 1;
3889 seg_not_present = (ldt_info.flags >> 5) & 1;
3890 useable = (ldt_info.flags >> 6) & 1;
3891 #ifdef TARGET_ABI32
3892 lm = 0;
3893 #else
3894 lm = (ldt_info.flags >> 7) & 1;
3895 #endif
3896 if (contents == 3) {
3897 if (oldmode)
3898 return -TARGET_EINVAL;
3899 if (seg_not_present == 0)
3900 return -TARGET_EINVAL;
3901 }
3902 /* allocate the LDT */
3903 if (!ldt_table) {
3904 env->ldt.base = target_mmap(0,
3905 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3906 PROT_READ|PROT_WRITE,
3907 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3908 if (env->ldt.base == -1)
3909 return -TARGET_ENOMEM;
3910 memset(g2h(env->ldt.base), 0,
3911 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3912 env->ldt.limit = 0xffff;
3913 ldt_table = g2h(env->ldt.base);
3914 }
3915
3916 /* NOTE: same code as Linux kernel */
3917 /* Allow LDTs to be cleared by the user. */
3918 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3919 if (oldmode ||
3920 (contents == 0 &&
3921 read_exec_only == 1 &&
3922 seg_32bit == 0 &&
3923 limit_in_pages == 0 &&
3924 seg_not_present == 1 &&
3925 useable == 0 )) {
3926 entry_1 = 0;
3927 entry_2 = 0;
3928 goto install;
3929 }
3930 }
3931
3932 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3933 (ldt_info.limit & 0x0ffff);
3934 entry_2 = (ldt_info.base_addr & 0xff000000) |
3935 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3936 (ldt_info.limit & 0xf0000) |
3937 ((read_exec_only ^ 1) << 9) |
3938 (contents << 10) |
3939 ((seg_not_present ^ 1) << 15) |
3940 (seg_32bit << 22) |
3941 (limit_in_pages << 23) |
3942 (lm << 21) |
3943 0x7000;
3944 if (!oldmode)
3945 entry_2 |= (useable << 20);
3946
3947 /* Install the new entry ... */
3948 install:
3949 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3950 lp[0] = tswap32(entry_1);
3951 lp[1] = tswap32(entry_2);
3952 return 0;
3953 }
3954
3955 /* specific and weird i386 syscalls */
3956 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3957 unsigned long bytecount)
3958 {
3959 abi_long ret;
3960
3961 switch (func) {
3962 case 0:
3963 ret = read_ldt(ptr, bytecount);
3964 break;
3965 case 1:
3966 ret = write_ldt(env, ptr, bytecount, 1);
3967 break;
3968 case 0x11:
3969 ret = write_ldt(env, ptr, bytecount, 0);
3970 break;
3971 default:
3972 ret = -TARGET_ENOSYS;
3973 break;
3974 }
3975 return ret;
3976 }
3977
3978 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3979 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3980 {
3981 uint64_t *gdt_table = g2h(env->gdt.base);
3982 struct target_modify_ldt_ldt_s ldt_info;
3983 struct target_modify_ldt_ldt_s *target_ldt_info;
3984 int seg_32bit, contents, read_exec_only, limit_in_pages;
3985 int seg_not_present, useable, lm;
3986 uint32_t *lp, entry_1, entry_2;
3987 int i;
3988
3989 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3990 if (!target_ldt_info)
3991 return -TARGET_EFAULT;
3992 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3993 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3994 ldt_info.limit = tswap32(target_ldt_info->limit);
3995 ldt_info.flags = tswap32(target_ldt_info->flags);
3996 if (ldt_info.entry_number == -1) {
3997 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3998 if (gdt_table[i] == 0) {
3999 ldt_info.entry_number = i;
4000 target_ldt_info->entry_number = tswap32(i);
4001 break;
4002 }
4003 }
4004 }
4005 unlock_user_struct(target_ldt_info, ptr, 1);
4006
4007 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4008 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4009 return -TARGET_EINVAL;
4010 seg_32bit = ldt_info.flags & 1;
4011 contents = (ldt_info.flags >> 1) & 3;
4012 read_exec_only = (ldt_info.flags >> 3) & 1;
4013 limit_in_pages = (ldt_info.flags >> 4) & 1;
4014 seg_not_present = (ldt_info.flags >> 5) & 1;
4015 useable = (ldt_info.flags >> 6) & 1;
4016 #ifdef TARGET_ABI32
4017 lm = 0;
4018 #else
4019 lm = (ldt_info.flags >> 7) & 1;
4020 #endif
4021
4022 if (contents == 3) {
4023 if (seg_not_present == 0)
4024 return -TARGET_EINVAL;
4025 }
4026
4027 /* NOTE: same code as Linux kernel */
4028 /* Allow LDTs to be cleared by the user. */
4029 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4030 if ((contents == 0 &&
4031 read_exec_only == 1 &&
4032 seg_32bit == 0 &&
4033 limit_in_pages == 0 &&
4034 seg_not_present == 1 &&
4035 useable == 0 )) {
4036 entry_1 = 0;
4037 entry_2 = 0;
4038 goto install;
4039 }
4040 }
4041
4042 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4043 (ldt_info.limit & 0x0ffff);
4044 entry_2 = (ldt_info.base_addr & 0xff000000) |
4045 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4046 (ldt_info.limit & 0xf0000) |
4047 ((read_exec_only ^ 1) << 9) |
4048 (contents << 10) |
4049 ((seg_not_present ^ 1) << 15) |
4050 (seg_32bit << 22) |
4051 (limit_in_pages << 23) |
4052 (useable << 20) |
4053 (lm << 21) |
4054 0x7000;
4055
4056 /* Install the new entry ... */
4057 install:
4058 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4059 lp[0] = tswap32(entry_1);
4060 lp[1] = tswap32(entry_2);
4061 return 0;
4062 }
4063
4064 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4065 {
4066 struct target_modify_ldt_ldt_s *target_ldt_info;
4067 uint64_t *gdt_table = g2h(env->gdt.base);
4068 uint32_t base_addr, limit, flags;
4069 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4070 int seg_not_present, useable, lm;
4071 uint32_t *lp, entry_1, entry_2;
4072
4073 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4074 if (!target_ldt_info)
4075 return -TARGET_EFAULT;
4076 idx = tswap32(target_ldt_info->entry_number);
4077 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4078 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4079 unlock_user_struct(target_ldt_info, ptr, 1);
4080 return -TARGET_EINVAL;
4081 }
4082 lp = (uint32_t *)(gdt_table + idx);
4083 entry_1 = tswap32(lp[0]);
4084 entry_2 = tswap32(lp[1]);
4085
4086 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4087 contents = (entry_2 >> 10) & 3;
4088 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4089 seg_32bit = (entry_2 >> 22) & 1;
4090 limit_in_pages = (entry_2 >> 23) & 1;
4091 useable = (entry_2 >> 20) & 1;
4092 #ifdef TARGET_ABI32
4093 lm = 0;
4094 #else
4095 lm = (entry_2 >> 21) & 1;
4096 #endif
4097 flags = (seg_32bit << 0) | (contents << 1) |
4098 (read_exec_only << 3) | (limit_in_pages << 4) |
4099 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4100 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4101 base_addr = (entry_1 >> 16) |
4102 (entry_2 & 0xff000000) |
4103 ((entry_2 & 0xff) << 16);
4104 target_ldt_info->base_addr = tswapal(base_addr);
4105 target_ldt_info->limit = tswap32(limit);
4106 target_ldt_info->flags = tswap32(flags);
4107 unlock_user_struct(target_ldt_info, ptr, 1);
4108 return 0;
4109 }
4110 #endif /* TARGET_I386 && TARGET_ABI32 */
4111
4112 #ifndef TARGET_ABI32
4113 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4114 {
4115 abi_long ret = 0;
4116 abi_ulong val;
4117 int idx;
4118
4119 switch(code) {
4120 case TARGET_ARCH_SET_GS:
4121 case TARGET_ARCH_SET_FS:
4122 if (code == TARGET_ARCH_SET_GS)
4123 idx = R_GS;
4124 else
4125 idx = R_FS;
4126 cpu_x86_load_seg(env, idx, 0);
4127 env->segs[idx].base = addr;
4128 break;
4129 case TARGET_ARCH_GET_GS:
4130 case TARGET_ARCH_GET_FS:
4131 if (code == TARGET_ARCH_GET_GS)
4132 idx = R_GS;
4133 else
4134 idx = R_FS;
4135 val = env->segs[idx].base;
4136 if (put_user(val, addr, abi_ulong))
4137 ret = -TARGET_EFAULT;
4138 break;
4139 default:
4140 ret = -TARGET_EINVAL;
4141 break;
4142 }
4143 return ret;
4144 }
4145 #endif
4146
4147 #endif /* defined(TARGET_I386) */
4148
4149 #define NEW_STACK_SIZE 0x40000
4150
4151 #if defined(CONFIG_USE_NPTL)
4152
4153 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4154 typedef struct {
4155 CPUArchState *env;
4156 pthread_mutex_t mutex;
4157 pthread_cond_t cond;
4158 pthread_t thread;
4159 uint32_t tid;
4160 abi_ulong child_tidptr;
4161 abi_ulong parent_tidptr;
4162 sigset_t sigmask;
4163 } new_thread_info;
4164
4165 static void *clone_func(void *arg)
4166 {
4167 new_thread_info *info = arg;
4168 CPUArchState *env;
4169 CPUState *cpu;
4170 TaskState *ts;
4171
4172 env = info->env;
4173 cpu = ENV_GET_CPU(env);
4174 thread_env = env;
4175 ts = (TaskState *)thread_env->opaque;
4176 info->tid = gettid();
4177 cpu->host_tid = info->tid;
4178 task_settid(ts);
4179 if (info->child_tidptr)
4180 put_user_u32(info->tid, info->child_tidptr);
4181 if (info->parent_tidptr)
4182 put_user_u32(info->tid, info->parent_tidptr);
4183 /* Enable signals. */
4184 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4185 /* Signal to the parent that we're ready. */
4186 pthread_mutex_lock(&info->mutex);
4187 pthread_cond_broadcast(&info->cond);
4188 pthread_mutex_unlock(&info->mutex);
4189 /* Wait until the parent has finshed initializing the tls state. */
4190 pthread_mutex_lock(&clone_lock);
4191 pthread_mutex_unlock(&clone_lock);
4192 cpu_loop(env);
4193 /* never exits */
4194 return NULL;
4195 }
4196 #else
4197
4198 static int clone_func(void *arg)
4199 {
4200 CPUArchState *env = arg;
4201 cpu_loop(env);
4202 /* never exits */
4203 return 0;
4204 }
4205 #endif
4206
4207 /* do_fork() Must return host values and target errnos (unlike most
4208 do_*() functions). */
4209 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4210 abi_ulong parent_tidptr, target_ulong newtls,
4211 abi_ulong child_tidptr)
4212 {
4213 int ret;
4214 TaskState *ts;
4215 CPUArchState *new_env;
4216 #if defined(CONFIG_USE_NPTL)
4217 unsigned int nptl_flags;
4218 sigset_t sigmask;
4219 #else
4220 uint8_t *new_stack;
4221 #endif
4222
4223 /* Emulate vfork() with fork() */
4224 if (flags & CLONE_VFORK)
4225 flags &= ~(CLONE_VFORK | CLONE_VM);
4226
4227 if (flags & CLONE_VM) {
4228 TaskState *parent_ts = (TaskState *)env->opaque;
4229 #if defined(CONFIG_USE_NPTL)
4230 new_thread_info info;
4231 pthread_attr_t attr;
4232 #endif
4233 ts = g_malloc0(sizeof(TaskState));
4234 init_task_state(ts);
4235 /* we create a new CPU instance. */
4236 new_env = cpu_copy(env);
4237 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4238 cpu_reset(ENV_GET_CPU(new_env));
4239 #endif
4240 /* Init regs that differ from the parent. */
4241 cpu_clone_regs(new_env, newsp);
4242 new_env->opaque = ts;
4243 ts->bprm = parent_ts->bprm;
4244 ts->info = parent_ts->info;
4245 #if defined(CONFIG_USE_NPTL)
4246 nptl_flags = flags;
4247 flags &= ~CLONE_NPTL_FLAGS2;
4248
4249 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4250 ts->child_tidptr = child_tidptr;
4251 }
4252
4253 if (nptl_flags & CLONE_SETTLS)
4254 cpu_set_tls (new_env, newtls);
4255
4256 /* Grab a mutex so that thread setup appears atomic. */
4257 pthread_mutex_lock(&clone_lock);
4258
4259 memset(&info, 0, sizeof(info));
4260 pthread_mutex_init(&info.mutex, NULL);
4261 pthread_mutex_lock(&info.mutex);
4262 pthread_cond_init(&info.cond, NULL);
4263 info.env = new_env;
4264 if (nptl_flags & CLONE_CHILD_SETTID)
4265 info.child_tidptr = child_tidptr;
4266 if (nptl_flags & CLONE_PARENT_SETTID)
4267 info.parent_tidptr = parent_tidptr;
4268
4269 ret = pthread_attr_init(&attr);
4270 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4271 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4272 /* It is not safe to deliver signals until the child has finished
4273 initializing, so temporarily block all signals. */
4274 sigfillset(&sigmask);
4275 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4276
4277 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4278 /* TODO: Free new CPU state if thread creation failed. */
4279
4280 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4281 pthread_attr_destroy(&attr);
4282 if (ret == 0) {
4283 /* Wait for the child to initialize. */
4284 pthread_cond_wait(&info.cond, &info.mutex);
4285 ret = info.tid;
4286 if (flags & CLONE_PARENT_SETTID)
4287 put_user_u32(ret, parent_tidptr);
4288 } else {
4289 ret = -1;
4290 }
4291 pthread_mutex_unlock(&info.mutex);
4292 pthread_cond_destroy(&info.cond);
4293 pthread_mutex_destroy(&info.mutex);
4294 pthread_mutex_unlock(&clone_lock);
4295 #else
4296 if (flags & CLONE_NPTL_FLAGS2)
4297 return -EINVAL;
4298 /* This is probably going to die very quickly, but do it anyway. */
4299 new_stack = g_malloc0 (NEW_STACK_SIZE);
4300 #ifdef __ia64__
4301 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4302 #else
4303 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4304 #endif
4305 #endif
4306 } else {
4307 /* if no CLONE_VM, we consider it is a fork */
4308 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4309 return -EINVAL;
4310 fork_start();
4311 ret = fork();
4312 if (ret == 0) {
4313 /* Child Process. */
4314 cpu_clone_regs(env, newsp);
4315 fork_end(1);
4316 #if defined(CONFIG_USE_NPTL)
4317 /* There is a race condition here. The parent process could
4318 theoretically read the TID in the child process before the child
4319 tid is set. This would require using either ptrace
4320 (not implemented) or having *_tidptr to point at a shared memory
4321 mapping. We can't repeat the spinlock hack used above because
4322 the child process gets its own copy of the lock. */
4323 if (flags & CLONE_CHILD_SETTID)
4324 put_user_u32(gettid(), child_tidptr);
4325 if (flags & CLONE_PARENT_SETTID)
4326 put_user_u32(gettid(), parent_tidptr);
4327 ts = (TaskState *)env->opaque;
4328 if (flags & CLONE_SETTLS)
4329 cpu_set_tls (env, newtls);
4330 if (flags & CLONE_CHILD_CLEARTID)
4331 ts->child_tidptr = child_tidptr;
4332 #endif
4333 } else {
4334 fork_end(0);
4335 }
4336 }
4337 return ret;
4338 }
4339
4340 /* warning : doesn't handle linux specific flags... */
4341 static int target_to_host_fcntl_cmd(int cmd)
4342 {
4343 switch(cmd) {
4344 case TARGET_F_DUPFD:
4345 case TARGET_F_GETFD:
4346 case TARGET_F_SETFD:
4347 case TARGET_F_GETFL:
4348 case TARGET_F_SETFL:
4349 return cmd;
4350 case TARGET_F_GETLK:
4351 return F_GETLK;
4352 case TARGET_F_SETLK:
4353 return F_SETLK;
4354 case TARGET_F_SETLKW:
4355 return F_SETLKW;
4356 case TARGET_F_GETOWN:
4357 return F_GETOWN;
4358 case TARGET_F_SETOWN:
4359 return F_SETOWN;
4360 case TARGET_F_GETSIG:
4361 return F_GETSIG;
4362 case TARGET_F_SETSIG:
4363 return F_SETSIG;
4364 #if TARGET_ABI_BITS == 32
4365 case TARGET_F_GETLK64:
4366 return F_GETLK64;
4367 case TARGET_F_SETLK64:
4368 return F_SETLK64;
4369 case TARGET_F_SETLKW64:
4370 return F_SETLKW64;
4371 #endif
4372 case TARGET_F_SETLEASE:
4373 return F_SETLEASE;
4374 case TARGET_F_GETLEASE:
4375 return F_GETLEASE;
4376 #ifdef F_DUPFD_CLOEXEC
4377 case TARGET_F_DUPFD_CLOEXEC:
4378 return F_DUPFD_CLOEXEC;
4379 #endif
4380 case TARGET_F_NOTIFY:
4381 return F_NOTIFY;
4382 default:
4383 return -TARGET_EINVAL;
4384 }
4385 return -TARGET_EINVAL;
4386 }
4387
4388 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4389 static const bitmask_transtbl flock_tbl[] = {
4390 TRANSTBL_CONVERT(F_RDLCK),
4391 TRANSTBL_CONVERT(F_WRLCK),
4392 TRANSTBL_CONVERT(F_UNLCK),
4393 TRANSTBL_CONVERT(F_EXLCK),
4394 TRANSTBL_CONVERT(F_SHLCK),
4395 { 0, 0, 0, 0 }
4396 };
4397
4398 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4399 {
4400 struct flock fl;
4401 struct target_flock *target_fl;
4402 struct flock64 fl64;
4403 struct target_flock64 *target_fl64;
4404 abi_long ret;
4405 int host_cmd = target_to_host_fcntl_cmd(cmd);
4406
4407 if (host_cmd == -TARGET_EINVAL)
4408 return host_cmd;
4409
4410 switch(cmd) {
4411 case TARGET_F_GETLK:
4412 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4413 return -TARGET_EFAULT;
4414 fl.l_type =
4415 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4416 fl.l_whence = tswap16(target_fl->l_whence);
4417 fl.l_start = tswapal(target_fl->l_start);
4418 fl.l_len = tswapal(target_fl->l_len);
4419 fl.l_pid = tswap32(target_fl->l_pid);
4420 unlock_user_struct(target_fl, arg, 0);
4421 ret = get_errno(fcntl(fd, host_cmd, &fl));
4422 if (ret == 0) {
4423 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4424 return -TARGET_EFAULT;
4425 target_fl->l_type =
4426 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4427 target_fl->l_whence = tswap16(fl.l_whence);
4428 target_fl->l_start = tswapal(fl.l_start);
4429 target_fl->l_len = tswapal(fl.l_len);
4430 target_fl->l_pid = tswap32(fl.l_pid);
4431 unlock_user_struct(target_fl, arg, 1);
4432 }
4433 break;
4434
4435 case TARGET_F_SETLK:
4436 case TARGET_F_SETLKW:
4437 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4438 return -TARGET_EFAULT;
4439 fl.l_type =
4440 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4441 fl.l_whence = tswap16(target_fl->l_whence);
4442 fl.l_start = tswapal(target_fl->l_start);
4443 fl.l_len = tswapal(target_fl->l_len);
4444 fl.l_pid = tswap32(target_fl->l_pid);
4445 unlock_user_struct(target_fl, arg, 0);
4446 ret = get_errno(fcntl(fd, host_cmd, &fl));
4447 break;
4448
4449 case TARGET_F_GETLK64:
4450 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4451 return -TARGET_EFAULT;
4452 fl64.l_type =
4453 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4454 fl64.l_whence = tswap16(target_fl64->l_whence);
4455 fl64.l_start = tswap64(target_fl64->l_start);
4456 fl64.l_len = tswap64(target_fl64->l_len);
4457 fl64.l_pid = tswap32(target_fl64->l_pid);
4458 unlock_user_struct(target_fl64, arg, 0);
4459 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4460 if (ret == 0) {
4461 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4462 return -TARGET_EFAULT;
4463 target_fl64->l_type =
4464 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4465 target_fl64->l_whence = tswap16(fl64.l_whence);
4466 target_fl64->l_start = tswap64(fl64.l_start);
4467 target_fl64->l_len = tswap64(fl64.l_len);
4468 target_fl64->l_pid = tswap32(fl64.l_pid);
4469 unlock_user_struct(target_fl64, arg, 1);
4470 }
4471 break;
4472 case TARGET_F_SETLK64:
4473 case TARGET_F_SETLKW64:
4474 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4475 return -TARGET_EFAULT;
4476 fl64.l_type =
4477 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4478 fl64.l_whence = tswap16(target_fl64->l_whence);
4479 fl64.l_start = tswap64(target_fl64->l_start);
4480 fl64.l_len = tswap64(target_fl64->l_len);
4481 fl64.l_pid = tswap32(target_fl64->l_pid);
4482 unlock_user_struct(target_fl64, arg, 0);
4483 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4484 break;
4485
4486 case TARGET_F_GETFL:
4487 ret = get_errno(fcntl(fd, host_cmd, arg));
4488 if (ret >= 0) {
4489 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4490 }
4491 break;
4492
4493 case TARGET_F_SETFL:
4494 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4495 break;
4496
4497 case TARGET_F_SETOWN:
4498 case TARGET_F_GETOWN:
4499 case TARGET_F_SETSIG:
4500 case TARGET_F_GETSIG:
4501 case TARGET_F_SETLEASE:
4502 case TARGET_F_GETLEASE:
4503 ret = get_errno(fcntl(fd, host_cmd, arg));
4504 break;
4505
4506 default:
4507 ret = get_errno(fcntl(fd, cmd, arg));
4508 break;
4509 }
4510 return ret;
4511 }
4512
4513 #ifdef USE_UID16
4514
4515 static inline int high2lowuid(int uid)
4516 {
4517 if (uid > 65535)
4518 return 65534;
4519 else
4520 return uid;
4521 }
4522
4523 static inline int high2lowgid(int gid)
4524 {
4525 if (gid > 65535)
4526 return 65534;
4527 else
4528 return gid;
4529 }
4530
4531 static inline int low2highuid(int uid)
4532 {
4533 if ((int16_t)uid == -1)
4534 return -1;
4535 else
4536 return uid;
4537 }
4538
4539 static inline int low2highgid(int gid)
4540 {
4541 if ((int16_t)gid == -1)
4542 return -1;
4543 else
4544 return gid;
4545 }
4546 static inline int tswapid(int id)
4547 {
4548 return tswap16(id);
4549 }
4550 #else /* !USE_UID16 */
4551 static inline int high2lowuid(int uid)
4552 {
4553 return uid;
4554 }
4555 static inline int high2lowgid(int gid)
4556 {
4557 return gid;
4558 }
4559 static inline int low2highuid(int uid)
4560 {
4561 return uid;
4562 }
4563 static inline int low2highgid(int gid)
4564 {
4565 return gid;
4566 }
4567 static inline int tswapid(int id)
4568 {
4569 return tswap32(id);
4570 }
4571 #endif /* USE_UID16 */
4572
4573 void syscall_init(void)
4574 {
4575 IOCTLEntry *ie;
4576 const argtype *arg_type;
4577 int size;
4578 int i;
4579
4580 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4581 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4582 #include "syscall_types.h"
4583 #undef STRUCT
4584 #undef STRUCT_SPECIAL
4585
4586 /* Build target_to_host_errno_table[] table from
4587 * host_to_target_errno_table[]. */
4588 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4589 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4590 }
4591
4592 /* we patch the ioctl size if necessary. We rely on the fact that
4593 no ioctl has all the bits at '1' in the size field */
4594 ie = ioctl_entries;
4595 while (ie->target_cmd != 0) {
4596 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4597 TARGET_IOC_SIZEMASK) {
4598 arg_type = ie->arg_type;
4599 if (arg_type[0] != TYPE_PTR) {
4600 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4601 ie->target_cmd);
4602 exit(1);
4603 }
4604 arg_type++;
4605 size = thunk_type_size(arg_type, 0);
4606 ie->target_cmd = (ie->target_cmd &
4607 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4608 (size << TARGET_IOC_SIZESHIFT);
4609 }
4610
4611 /* automatic consistency check if same arch */
4612 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4613 (defined(__x86_64__) && defined(TARGET_X86_64))
4614 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4615 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4616 ie->name, ie->target_cmd, ie->host_cmd);
4617 }
4618 #endif
4619 ie++;
4620 }
4621 }
4622
4623 #if TARGET_ABI_BITS == 32
4624 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4625 {
4626 #ifdef TARGET_WORDS_BIGENDIAN
4627 return ((uint64_t)word0 << 32) | word1;
4628 #else
4629 return ((uint64_t)word1 << 32) | word0;
4630 #endif
4631 }
4632 #else /* TARGET_ABI_BITS == 32 */
4633 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4634 {
4635 return word0;
4636 }
4637 #endif /* TARGET_ABI_BITS != 32 */
4638
4639 #ifdef TARGET_NR_truncate64
4640 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4641 abi_long arg2,
4642 abi_long arg3,
4643 abi_long arg4)
4644 {
4645 if (regpairs_aligned(cpu_env)) {
4646 arg2 = arg3;
4647 arg3 = arg4;
4648 }
4649 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4650 }
4651 #endif
4652
4653 #ifdef TARGET_NR_ftruncate64
4654 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4655 abi_long arg2,
4656 abi_long arg3,
4657 abi_long arg4)
4658 {
4659 if (regpairs_aligned(cpu_env)) {
4660 arg2 = arg3;
4661 arg3 = arg4;
4662 }
4663 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4664 }
4665 #endif
4666
4667 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4668 abi_ulong target_addr)
4669 {
4670 struct target_timespec *target_ts;
4671
4672 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4673 return -TARGET_EFAULT;
4674 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4675 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4676 unlock_user_struct(target_ts, target_addr, 0);
4677 return 0;
4678 }
4679
4680 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4681 struct timespec *host_ts)
4682 {
4683 struct target_timespec *target_ts;
4684
4685 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4686 return -TARGET_EFAULT;
4687 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4688 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4689 unlock_user_struct(target_ts, target_addr, 1);
4690 return 0;
4691 }
4692
4693 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4694 static inline abi_long host_to_target_stat64(void *cpu_env,
4695 abi_ulong target_addr,
4696 struct stat *host_st)
4697 {
4698 #ifdef TARGET_ARM
4699 if (((CPUARMState *)cpu_env)->eabi) {
4700 struct target_eabi_stat64 *target_st;
4701
4702 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4703 return -TARGET_EFAULT;
4704 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4705 __put_user(host_st->st_dev, &target_st->st_dev);
4706 __put_user(host_st->st_ino, &target_st->st_ino);
4707 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4708 __put_user(host_st->st_ino, &target_st->__st_ino);
4709 #endif
4710 __put_user(host_st->st_mode, &target_st->st_mode);
4711 __put_user(host_st->st_nlink, &target_st->st_nlink);
4712 __put_user(host_st->st_uid, &target_st->st_uid);
4713 __put_user(host_st->st_gid, &target_st->st_gid);
4714 __put_user(host_st->st_rdev, &target_st->st_rdev);
4715 __put_user(host_st->st_size, &target_st->st_size);
4716 __put_user(host_st->st_blksize, &target_st->st_blksize);
4717 __put_user(host_st->st_blocks, &target_st->st_blocks);
4718 __put_user(host_st->st_atime, &target_st->target_st_atime);
4719 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4720 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4721 unlock_user_struct(target_st, target_addr, 1);
4722 } else
4723 #endif
4724 {
4725 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4726 struct target_stat *target_st;
4727 #else
4728 struct target_stat64 *target_st;
4729 #endif
4730
4731 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4732 return -TARGET_EFAULT;
4733 memset(target_st, 0, sizeof(*target_st));
4734 __put_user(host_st->st_dev, &target_st->st_dev);
4735 __put_user(host_st->st_ino, &target_st->st_ino);
4736 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4737 __put_user(host_st->st_ino, &target_st->__st_ino);
4738 #endif
4739 __put_user(host_st->st_mode, &target_st->st_mode);
4740 __put_user(host_st->st_nlink, &target_st->st_nlink);
4741 __put_user(host_st->st_uid, &target_st->st_uid);
4742 __put_user(host_st->st_gid, &target_st->st_gid);
4743 __put_user(host_st->st_rdev, &target_st->st_rdev);
4744 /* XXX: better use of kernel struct */
4745 __put_user(host_st->st_size, &target_st->st_size);
4746 __put_user(host_st->st_blksize, &target_st->st_blksize);
4747 __put_user(host_st->st_blocks, &target_st->st_blocks);
4748 __put_user(host_st->st_atime, &target_st->target_st_atime);
4749 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4750 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4751 unlock_user_struct(target_st, target_addr, 1);
4752 }
4753
4754 return 0;
4755 }
4756 #endif
4757
4758 #if defined(CONFIG_USE_NPTL)
4759 /* ??? Using host futex calls even when target atomic operations
4760 are not really atomic probably breaks things. However implementing
4761 futexes locally would make futexes shared between multiple processes
4762 tricky. However they're probably useless because guest atomic
4763 operations won't work either. */
4764 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4765 target_ulong uaddr2, int val3)
4766 {
4767 struct timespec ts, *pts;
4768 int base_op;
4769
4770 /* ??? We assume FUTEX_* constants are the same on both host
4771 and target. */
4772 #ifdef FUTEX_CMD_MASK
4773 base_op = op & FUTEX_CMD_MASK;
4774 #else
4775 base_op = op;
4776 #endif
4777 switch (base_op) {
4778 case FUTEX_WAIT:
4779 case FUTEX_WAIT_BITSET:
4780 if (timeout) {
4781 pts = &ts;
4782 target_to_host_timespec(pts, timeout);
4783 } else {
4784 pts = NULL;
4785 }
4786 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4787 pts, NULL, val3));
4788 case FUTEX_WAKE:
4789 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4790 case FUTEX_FD:
4791 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4792 case FUTEX_REQUEUE:
4793 case FUTEX_CMP_REQUEUE:
4794 case FUTEX_WAKE_OP:
4795 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4796 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4797 But the prototype takes a `struct timespec *'; insert casts
4798 to satisfy the compiler. We do not need to tswap TIMEOUT
4799 since it's not compared to guest memory. */
4800 pts = (struct timespec *)(uintptr_t) timeout;
4801 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4802 g2h(uaddr2),
4803 (base_op == FUTEX_CMP_REQUEUE
4804 ? tswap32(val3)
4805 : val3)));
4806 default:
4807 return -TARGET_ENOSYS;
4808 }
4809 }
4810 #endif
4811
4812 /* Map host to target signal numbers for the wait family of syscalls.
4813 Assume all other status bits are the same. */
4814 int host_to_target_waitstatus(int status)
4815 {
4816 if (WIFSIGNALED(status)) {
4817 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4818 }
4819 if (WIFSTOPPED(status)) {
4820 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4821 | (status & 0xff);
4822 }
4823 return status;
4824 }
4825
4826 int get_osversion(void)
4827 {
4828 static int osversion;
4829 struct new_utsname buf;
4830 const char *s;
4831 int i, n, tmp;
4832 if (osversion)
4833 return osversion;
4834 if (qemu_uname_release && *qemu_uname_release) {
4835 s = qemu_uname_release;
4836 } else {
4837 if (sys_uname(&buf))
4838 return 0;
4839 s = buf.release;
4840 }
4841 tmp = 0;
4842 for (i = 0; i < 3; i++) {
4843 n = 0;
4844 while (*s >= '0' && *s <= '9') {
4845 n *= 10;
4846 n += *s - '0';
4847 s++;
4848 }
4849 tmp = (tmp << 8) + n;
4850 if (*s == '.')
4851 s++;
4852 }
4853 osversion = tmp;
4854 return osversion;
4855 }
4856
4857
4858 static int open_self_maps(void *cpu_env, int fd)
4859 {
4860 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4861 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4862 #endif
4863 FILE *fp;
4864 char *line = NULL;
4865 size_t len = 0;
4866 ssize_t read;
4867
4868 fp = fopen("/proc/self/maps", "r");
4869 if (fp == NULL) {
4870 return -EACCES;
4871 }
4872
4873 while ((read = getline(&line, &len, fp)) != -1) {
4874 int fields, dev_maj, dev_min, inode;
4875 uint64_t min, max, offset;
4876 char flag_r, flag_w, flag_x, flag_p;
4877 char path[512] = "";
4878 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
4879 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
4880 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
4881
4882 if ((fields < 10) || (fields > 11)) {
4883 continue;
4884 }
4885 if (!strncmp(path, "[stack]", 7)) {
4886 continue;
4887 }
4888 if (h2g_valid(min) && h2g_valid(max)) {
4889 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
4890 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
4891 h2g(min), h2g(max), flag_r, flag_w,
4892 flag_x, flag_p, offset, dev_maj, dev_min, inode,
4893 path[0] ? " " : "", path);
4894 }
4895 }
4896
4897 free(line);
4898 fclose(fp);
4899
4900 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4901 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4902 (unsigned long long)ts->info->stack_limit,
4903 (unsigned long long)(ts->info->start_stack +
4904 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
4905 (unsigned long long)0);
4906 #endif
4907
4908 return 0;
4909 }
4910
4911 static int open_self_stat(void *cpu_env, int fd)
4912 {
4913 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4914 abi_ulong start_stack = ts->info->start_stack;
4915 int i;
4916
4917 for (i = 0; i < 44; i++) {
4918 char buf[128];
4919 int len;
4920 uint64_t val = 0;
4921
4922 if (i == 0) {
4923 /* pid */
4924 val = getpid();
4925 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4926 } else if (i == 1) {
4927 /* app name */
4928 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
4929 } else if (i == 27) {
4930 /* stack bottom */
4931 val = start_stack;
4932 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4933 } else {
4934 /* for the rest, there is MasterCard */
4935 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
4936 }
4937
4938 len = strlen(buf);
4939 if (write(fd, buf, len) != len) {
4940 return -1;
4941 }
4942 }
4943
4944 return 0;
4945 }
4946
4947 static int open_self_auxv(void *cpu_env, int fd)
4948 {
4949 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4950 abi_ulong auxv = ts->info->saved_auxv;
4951 abi_ulong len = ts->info->auxv_len;
4952 char *ptr;
4953
4954 /*
4955 * Auxiliary vector is stored in target process stack.
4956 * read in whole auxv vector and copy it to file
4957 */
4958 ptr = lock_user(VERIFY_READ, auxv, len, 0);
4959 if (ptr != NULL) {
4960 while (len > 0) {
4961 ssize_t r;
4962 r = write(fd, ptr, len);
4963 if (r <= 0) {
4964 break;
4965 }
4966 len -= r;
4967 ptr += r;
4968 }
4969 lseek(fd, 0, SEEK_SET);
4970 unlock_user(ptr, auxv, len);
4971 }
4972
4973 return 0;
4974 }
4975
4976 static int is_proc_myself(const char *filename, const char *entry)
4977 {
4978 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
4979 filename += strlen("/proc/");
4980 if (!strncmp(filename, "self/", strlen("self/"))) {
4981 filename += strlen("self/");
4982 } else if (*filename >= '1' && *filename <= '9') {
4983 char myself[80];
4984 snprintf(myself, sizeof(myself), "%d/", getpid());
4985 if (!strncmp(filename, myself, strlen(myself))) {
4986 filename += strlen(myself);
4987 } else {
4988 return 0;
4989 }
4990 } else {
4991 return 0;
4992 }
4993 if (!strcmp(filename, entry)) {
4994 return 1;
4995 }
4996 }
4997 return 0;
4998 }
4999
5000 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5001 {
5002 struct fake_open {
5003 const char *filename;
5004 int (*fill)(void *cpu_env, int fd);
5005 };
5006 const struct fake_open *fake_open;
5007 static const struct fake_open fakes[] = {
5008 { "maps", open_self_maps },
5009 { "stat", open_self_stat },
5010 { "auxv", open_self_auxv },
5011 { NULL, NULL }
5012 };
5013
5014 for (fake_open = fakes; fake_open->filename; fake_open++) {
5015 if (is_proc_myself(pathname, fake_open->filename)) {
5016 break;
5017 }
5018 }
5019
5020 if (fake_open->filename) {
5021 const char *tmpdir;
5022 char filename[PATH_MAX];
5023 int fd, r;
5024
5025 /* create temporary file to map stat to */
5026 tmpdir = getenv("TMPDIR");
5027 if (!tmpdir)
5028 tmpdir = "/tmp";
5029 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5030 fd = mkstemp(filename);
5031 if (fd < 0) {
5032 return fd;
5033 }
5034 unlink(filename);
5035
5036 if ((r = fake_open->fill(cpu_env, fd))) {
5037 close(fd);
5038 return r;
5039 }
5040 lseek(fd, 0, SEEK_SET);
5041
5042 return fd;
5043 }
5044
5045 return get_errno(open(path(pathname), flags, mode));
5046 }
5047
5048 /* do_syscall() should always have a single exit point at the end so
5049 that actions, such as logging of syscall results, can be performed.
5050 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5051 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5052 abi_long arg2, abi_long arg3, abi_long arg4,
5053 abi_long arg5, abi_long arg6, abi_long arg7,
5054 abi_long arg8)
5055 {
5056 abi_long ret;
5057 struct stat st;
5058 struct statfs stfs;
5059 void *p;
5060
5061 #ifdef DEBUG
5062 gemu_log("syscall %d", num);
5063 #endif
5064 if(do_strace)
5065 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5066
5067 switch(num) {
5068 case TARGET_NR_exit:
5069 #ifdef CONFIG_USE_NPTL
5070 /* In old applications this may be used to implement _exit(2).
5071 However in threaded applictions it is used for thread termination,
5072 and _exit_group is used for application termination.
5073 Do thread termination if we have more then one thread. */
5074 /* FIXME: This probably breaks if a signal arrives. We should probably
5075 be disabling signals. */
5076 if (first_cpu->next_cpu) {
5077 TaskState *ts;
5078 CPUArchState **lastp;
5079 CPUArchState *p;
5080
5081 cpu_list_lock();
5082 lastp = &first_cpu;
5083 p = first_cpu;
5084 while (p && p != (CPUArchState *)cpu_env) {
5085 lastp = &p->next_cpu;
5086 p = p->next_cpu;
5087 }
5088 /* If we didn't find the CPU for this thread then something is
5089 horribly wrong. */
5090 if (!p)
5091 abort();
5092 /* Remove the CPU from the list. */
5093 *lastp = p->next_cpu;
5094 cpu_list_unlock();
5095 ts = ((CPUArchState *)cpu_env)->opaque;
5096 if (ts->child_tidptr) {
5097 put_user_u32(0, ts->child_tidptr);
5098 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5099 NULL, NULL, 0);
5100 }
5101 thread_env = NULL;
5102 object_unref(OBJECT(ENV_GET_CPU(cpu_env)));
5103 g_free(ts);
5104 pthread_exit(NULL);
5105 }
5106 #endif
5107 #ifdef TARGET_GPROF
5108 _mcleanup();
5109 #endif
5110 gdb_exit(cpu_env, arg1);
5111 _exit(arg1);
5112 ret = 0; /* avoid warning */
5113 break;
5114 case TARGET_NR_read:
5115 if (arg3 == 0)
5116 ret = 0;
5117 else {
5118 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5119 goto efault;
5120 ret = get_errno(read(arg1, p, arg3));
5121 unlock_user(p, arg2, ret);
5122 }
5123 break;
5124 case TARGET_NR_write:
5125 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5126 goto efault;
5127 ret = get_errno(write(arg1, p, arg3));
5128 unlock_user(p, arg2, 0);
5129 break;
5130 case TARGET_NR_open:
5131 if (!(p = lock_user_string(arg1)))
5132 goto efault;
5133 ret = get_errno(do_open(cpu_env, p,
5134 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5135 arg3));
5136 unlock_user(p, arg1, 0);
5137 break;
5138 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5139 case TARGET_NR_openat:
5140 if (!(p = lock_user_string(arg2)))
5141 goto efault;
5142 ret = get_errno(sys_openat(arg1,
5143 path(p),
5144 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5145 arg4));
5146 unlock_user(p, arg2, 0);
5147 break;
5148 #endif
5149 case TARGET_NR_close:
5150 ret = get_errno(close(arg1));
5151 break;
5152 case TARGET_NR_brk:
5153 ret = do_brk(arg1);
5154 break;
5155 case TARGET_NR_fork:
5156 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5157 break;
5158 #ifdef TARGET_NR_waitpid
5159 case TARGET_NR_waitpid:
5160 {
5161 int status;
5162 ret = get_errno(waitpid(arg1, &status, arg3));
5163 if (!is_error(ret) && arg2 && ret
5164 && put_user_s32(host_to_target_waitstatus(status), arg2))
5165 goto efault;
5166 }
5167 break;
5168 #endif
5169 #ifdef TARGET_NR_waitid
5170 case TARGET_NR_waitid:
5171 {
5172 siginfo_t info;
5173 info.si_pid = 0;
5174 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5175 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5176 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5177 goto efault;
5178 host_to_target_siginfo(p, &info);
5179 unlock_user(p, arg3, sizeof(target_siginfo_t));
5180 }
5181 }
5182 break;
5183 #endif
5184 #ifdef TARGET_NR_creat /* not on alpha */
5185 case TARGET_NR_creat:
5186 if (!(p = lock_user_string(arg1)))
5187 goto efault;
5188 ret = get_errno(creat(p, arg2));
5189 unlock_user(p, arg1, 0);
5190 break;
5191 #endif
5192 case TARGET_NR_link:
5193 {
5194 void * p2;
5195 p = lock_user_string(arg1);
5196 p2 = lock_user_string(arg2);
5197 if (!p || !p2)
5198 ret = -TARGET_EFAULT;
5199 else
5200 ret = get_errno(link(p, p2));
5201 unlock_user(p2, arg2, 0);
5202 unlock_user(p, arg1, 0);
5203 }
5204 break;
5205 #if defined(TARGET_NR_linkat)
5206 case TARGET_NR_linkat:
5207 {
5208 void * p2 = NULL;
5209 if (!arg2 || !arg4)
5210 goto efault;
5211 p = lock_user_string(arg2);
5212 p2 = lock_user_string(arg4);
5213 if (!p || !p2)
5214 ret = -TARGET_EFAULT;
5215 else
5216 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5217 unlock_user(p, arg2, 0);
5218 unlock_user(p2, arg4, 0);
5219 }
5220 break;
5221 #endif
5222 case TARGET_NR_unlink:
5223 if (!(p = lock_user_string(arg1)))
5224 goto efault;
5225 ret = get_errno(unlink(p));
5226 unlock_user(p, arg1, 0);
5227 break;
5228 #if defined(TARGET_NR_unlinkat)
5229 case TARGET_NR_unlinkat:
5230 if (!(p = lock_user_string(arg2)))
5231 goto efault;
5232 ret = get_errno(unlinkat(arg1, p, arg3));
5233 unlock_user(p, arg2, 0);
5234 break;
5235 #endif
5236 case TARGET_NR_execve:
5237 {
5238 char **argp, **envp;
5239 int argc, envc;
5240 abi_ulong gp;
5241 abi_ulong guest_argp;
5242 abi_ulong guest_envp;
5243 abi_ulong addr;
5244 char **q;
5245 int total_size = 0;
5246
5247 argc = 0;
5248 guest_argp = arg2;
5249 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5250 if (get_user_ual(addr, gp))
5251 goto efault;
5252 if (!addr)
5253 break;
5254 argc++;
5255 }
5256 envc = 0;
5257 guest_envp = arg3;
5258 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5259 if (get_user_ual(addr, gp))
5260 goto efault;
5261 if (!addr)
5262 break;
5263 envc++;
5264 }
5265
5266 argp = alloca((argc + 1) * sizeof(void *));
5267 envp = alloca((envc + 1) * sizeof(void *));
5268
5269 for (gp = guest_argp, q = argp; gp;
5270 gp += sizeof(abi_ulong), q++) {
5271 if (get_user_ual(addr, gp))
5272 goto execve_efault;
5273 if (!addr)
5274 break;
5275 if (!(*q = lock_user_string(addr)))
5276 goto execve_efault;
5277 total_size += strlen(*q) + 1;
5278 }
5279 *q = NULL;
5280
5281 for (gp = guest_envp, q = envp; gp;
5282 gp += sizeof(abi_ulong), q++) {
5283 if (get_user_ual(addr, gp))
5284 goto execve_efault;
5285 if (!addr)
5286 break;
5287 if (!(*q = lock_user_string(addr)))
5288 goto execve_efault;
5289 total_size += strlen(*q) + 1;
5290 }
5291 *q = NULL;
5292
5293 /* This case will not be caught by the host's execve() if its
5294 page size is bigger than the target's. */
5295 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5296 ret = -TARGET_E2BIG;
5297 goto execve_end;
5298 }
5299 if (!(p = lock_user_string(arg1)))
5300 goto execve_efault;
5301 ret = get_errno(execve(p, argp, envp));
5302 unlock_user(p, arg1, 0);
5303
5304 goto execve_end;
5305
5306 execve_efault:
5307 ret = -TARGET_EFAULT;
5308
5309 execve_end:
5310 for (gp = guest_argp, q = argp; *q;
5311 gp += sizeof(abi_ulong), q++) {
5312 if (get_user_ual(addr, gp)
5313 || !addr)
5314 break;
5315 unlock_user(*q, addr, 0);
5316 }
5317 for (gp = guest_envp, q = envp; *q;
5318 gp += sizeof(abi_ulong), q++) {
5319 if (get_user_ual(addr, gp)
5320 || !addr)
5321 break;
5322 unlock_user(*q, addr, 0);
5323 }
5324 }
5325 break;
5326 case TARGET_NR_chdir:
5327 if (!(p = lock_user_string(arg1)))
5328 goto efault;
5329 ret = get_errno(chdir(p));
5330 unlock_user(p, arg1, 0);
5331 break;
5332 #ifdef TARGET_NR_time
5333 case TARGET_NR_time:
5334 {
5335 time_t host_time;
5336 ret = get_errno(time(&host_time));
5337 if (!is_error(ret)
5338 && arg1
5339 && put_user_sal(host_time, arg1))
5340 goto efault;
5341 }
5342 break;
5343 #endif
5344 case TARGET_NR_mknod:
5345 if (!(p = lock_user_string(arg1)))
5346 goto efault;
5347 ret = get_errno(mknod(p, arg2, arg3));
5348 unlock_user(p, arg1, 0);
5349 break;
5350 #if defined(TARGET_NR_mknodat)
5351 case TARGET_NR_mknodat:
5352 if (!(p = lock_user_string(arg2)))
5353 goto efault;
5354 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5355 unlock_user(p, arg2, 0);
5356 break;
5357 #endif
5358 case TARGET_NR_chmod:
5359 if (!(p = lock_user_string(arg1)))
5360 goto efault;
5361 ret = get_errno(chmod(p, arg2));
5362 unlock_user(p, arg1, 0);
5363 break;
5364 #ifdef TARGET_NR_break
5365 case TARGET_NR_break:
5366 goto unimplemented;
5367 #endif
5368 #ifdef TARGET_NR_oldstat
5369 case TARGET_NR_oldstat:
5370 goto unimplemented;
5371 #endif
5372 case TARGET_NR_lseek:
5373 ret = get_errno(lseek(arg1, arg2, arg3));
5374 break;
5375 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5376 /* Alpha specific */
5377 case TARGET_NR_getxpid:
5378 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5379 ret = get_errno(getpid());
5380 break;
5381 #endif
5382 #ifdef TARGET_NR_getpid
5383 case TARGET_NR_getpid:
5384 ret = get_errno(getpid());
5385 break;
5386 #endif
5387 case TARGET_NR_mount:
5388 {
5389 /* need to look at the data field */
5390 void *p2, *p3;
5391 p = lock_user_string(arg1);
5392 p2 = lock_user_string(arg2);
5393 p3 = lock_user_string(arg3);
5394 if (!p || !p2 || !p3)
5395 ret = -TARGET_EFAULT;
5396 else {
5397 /* FIXME - arg5 should be locked, but it isn't clear how to
5398 * do that since it's not guaranteed to be a NULL-terminated
5399 * string.
5400 */
5401 if ( ! arg5 )
5402 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5403 else
5404 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5405 }
5406 unlock_user(p, arg1, 0);
5407 unlock_user(p2, arg2, 0);
5408 unlock_user(p3, arg3, 0);
5409 break;
5410 }
5411 #ifdef TARGET_NR_umount
5412 case TARGET_NR_umount:
5413 if (!(p = lock_user_string(arg1)))
5414 goto efault;
5415 ret = get_errno(umount(p));
5416 unlock_user(p, arg1, 0);
5417 break;
5418 #endif
5419 #ifdef TARGET_NR_stime /* not on alpha */
5420 case TARGET_NR_stime:
5421 {
5422 time_t host_time;
5423 if (get_user_sal(host_time, arg1))
5424 goto efault;
5425 ret = get_errno(stime(&host_time));
5426 }
5427 break;
5428 #endif
5429 case TARGET_NR_ptrace:
5430 goto unimplemented;
5431 #ifdef TARGET_NR_alarm /* not on alpha */
5432 case TARGET_NR_alarm:
5433 ret = alarm(arg1);
5434 break;
5435 #endif
5436 #ifdef TARGET_NR_oldfstat
5437 case TARGET_NR_oldfstat:
5438 goto unimplemented;
5439 #endif
5440 #ifdef TARGET_NR_pause /* not on alpha */
5441 case TARGET_NR_pause:
5442 ret = get_errno(pause());
5443 break;
5444 #endif
5445 #ifdef TARGET_NR_utime
5446 case TARGET_NR_utime:
5447 {
5448 struct utimbuf tbuf, *host_tbuf;
5449 struct target_utimbuf *target_tbuf;
5450 if (arg2) {
5451 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5452 goto efault;
5453 tbuf.actime = tswapal(target_tbuf->actime);
5454 tbuf.modtime = tswapal(target_tbuf->modtime);
5455 unlock_user_struct(target_tbuf, arg2, 0);
5456 host_tbuf = &tbuf;
5457 } else {
5458 host_tbuf = NULL;
5459 }
5460 if (!(p = lock_user_string(arg1)))
5461 goto efault;
5462 ret = get_errno(utime(p, host_tbuf));
5463 unlock_user(p, arg1, 0);
5464 }
5465 break;
5466 #endif
5467 case TARGET_NR_utimes:
5468 {
5469 struct timeval *tvp, tv[2];
5470 if (arg2) {
5471 if (copy_from_user_timeval(&tv[0], arg2)
5472 || copy_from_user_timeval(&tv[1],
5473 arg2 + sizeof(struct target_timeval)))
5474 goto efault;
5475 tvp = tv;
5476 } else {
5477 tvp = NULL;
5478 }
5479 if (!(p = lock_user_string(arg1)))
5480 goto efault;
5481 ret = get_errno(utimes(p, tvp));
5482 unlock_user(p, arg1, 0);
5483 }
5484 break;
5485 #if defined(TARGET_NR_futimesat)
5486 case TARGET_NR_futimesat:
5487 {
5488 struct timeval *tvp, tv[2];
5489 if (arg3) {
5490 if (copy_from_user_timeval(&tv[0], arg3)
5491 || copy_from_user_timeval(&tv[1],
5492 arg3 + sizeof(struct target_timeval)))
5493 goto efault;
5494 tvp = tv;
5495 } else {
5496 tvp = NULL;
5497 }
5498 if (!(p = lock_user_string(arg2)))
5499 goto efault;
5500 ret = get_errno(futimesat(arg1, path(p), tvp));
5501 unlock_user(p, arg2, 0);
5502 }
5503 break;
5504 #endif
5505 #ifdef TARGET_NR_stty
5506 case TARGET_NR_stty:
5507 goto unimplemented;
5508 #endif
5509 #ifdef TARGET_NR_gtty
5510 case TARGET_NR_gtty:
5511 goto unimplemented;
5512 #endif
5513 case TARGET_NR_access:
5514 if (!(p = lock_user_string(arg1)))
5515 goto efault;
5516 ret = get_errno(access(path(p), arg2));
5517 unlock_user(p, arg1, 0);
5518 break;
5519 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5520 case TARGET_NR_faccessat:
5521 if (!(p = lock_user_string(arg2)))
5522 goto efault;
5523 ret = get_errno(faccessat(arg1, p, arg3, 0));
5524 unlock_user(p, arg2, 0);
5525 break;
5526 #endif
5527 #ifdef TARGET_NR_nice /* not on alpha */
5528 case TARGET_NR_nice:
5529 ret = get_errno(nice(arg1));
5530 break;
5531 #endif
5532 #ifdef TARGET_NR_ftime
5533 case TARGET_NR_ftime:
5534 goto unimplemented;
5535 #endif
5536 case TARGET_NR_sync:
5537 sync();
5538 ret = 0;
5539 break;
5540 case TARGET_NR_kill:
5541 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5542 break;
5543 case TARGET_NR_rename:
5544 {
5545 void *p2;
5546 p = lock_user_string(arg1);
5547 p2 = lock_user_string(arg2);
5548 if (!p || !p2)
5549 ret = -TARGET_EFAULT;
5550 else
5551 ret = get_errno(rename(p, p2));
5552 unlock_user(p2, arg2, 0);
5553 unlock_user(p, arg1, 0);
5554 }
5555 break;
5556 #if defined(TARGET_NR_renameat)
5557 case TARGET_NR_renameat:
5558 {
5559 void *p2;
5560 p = lock_user_string(arg2);
5561 p2 = lock_user_string(arg4);
5562 if (!p || !p2)
5563 ret = -TARGET_EFAULT;
5564 else
5565 ret = get_errno(renameat(arg1, p, arg3, p2));
5566 unlock_user(p2, arg4, 0);
5567 unlock_user(p, arg2, 0);
5568 }
5569 break;
5570 #endif
5571 case TARGET_NR_mkdir:
5572 if (!(p = lock_user_string(arg1)))
5573 goto efault;
5574 ret = get_errno(mkdir(p, arg2));
5575 unlock_user(p, arg1, 0);
5576 break;
5577 #if defined(TARGET_NR_mkdirat)
5578 case TARGET_NR_mkdirat:
5579 if (!(p = lock_user_string(arg2)))
5580 goto efault;
5581 ret = get_errno(mkdirat(arg1, p, arg3));
5582 unlock_user(p, arg2, 0);
5583 break;
5584 #endif
5585 case TARGET_NR_rmdir:
5586 if (!(p = lock_user_string(arg1)))
5587 goto efault;
5588 ret = get_errno(rmdir(p));
5589 unlock_user(p, arg1, 0);
5590 break;
5591 case TARGET_NR_dup:
5592 ret = get_errno(dup(arg1));
5593 break;
5594 case TARGET_NR_pipe:
5595 ret = do_pipe(cpu_env, arg1, 0, 0);
5596 break;
5597 #ifdef TARGET_NR_pipe2
5598 case TARGET_NR_pipe2:
5599 ret = do_pipe(cpu_env, arg1,
5600 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5601 break;
5602 #endif
5603 case TARGET_NR_times:
5604 {
5605 struct target_tms *tmsp;
5606 struct tms tms;
5607 ret = get_errno(times(&tms));
5608 if (arg1) {
5609 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5610 if (!tmsp)
5611 goto efault;
5612 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5613 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5614 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5615 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5616 }
5617 if (!is_error(ret))
5618 ret = host_to_target_clock_t(ret);
5619 }
5620 break;
5621 #ifdef TARGET_NR_prof
5622 case TARGET_NR_prof:
5623 goto unimplemented;
5624 #endif
5625 #ifdef TARGET_NR_signal
5626 case TARGET_NR_signal:
5627 goto unimplemented;
5628 #endif
5629 case TARGET_NR_acct:
5630 if (arg1 == 0) {
5631 ret = get_errno(acct(NULL));
5632 } else {
5633 if (!(p = lock_user_string(arg1)))
5634 goto efault;
5635 ret = get_errno(acct(path(p)));
5636 unlock_user(p, arg1, 0);
5637 }
5638 break;
5639 #ifdef TARGET_NR_umount2 /* not on alpha */
5640 case TARGET_NR_umount2:
5641 if (!(p = lock_user_string(arg1)))
5642 goto efault;
5643 ret = get_errno(umount2(p, arg2));
5644 unlock_user(p, arg1, 0);
5645 break;
5646 #endif
5647 #ifdef TARGET_NR_lock
5648 case TARGET_NR_lock:
5649 goto unimplemented;
5650 #endif
5651 case TARGET_NR_ioctl:
5652 ret = do_ioctl(arg1, arg2, arg3);
5653 break;
5654 case TARGET_NR_fcntl:
5655 ret = do_fcntl(arg1, arg2, arg3);
5656 break;
5657 #ifdef TARGET_NR_mpx
5658 case TARGET_NR_mpx:
5659 goto unimplemented;
5660 #endif
5661 case TARGET_NR_setpgid:
5662 ret = get_errno(setpgid(arg1, arg2));
5663 break;
5664 #ifdef TARGET_NR_ulimit
5665 case TARGET_NR_ulimit:
5666 goto unimplemented;
5667 #endif
5668 #ifdef TARGET_NR_oldolduname
5669 case TARGET_NR_oldolduname:
5670 goto unimplemented;
5671 #endif
5672 case TARGET_NR_umask:
5673 ret = get_errno(umask(arg1));
5674 break;
5675 case TARGET_NR_chroot:
5676 if (!(p = lock_user_string(arg1)))
5677 goto efault;
5678 ret = get_errno(chroot(p));
5679 unlock_user(p, arg1, 0);
5680 break;
5681 case TARGET_NR_ustat:
5682 goto unimplemented;
5683 case TARGET_NR_dup2:
5684 ret = get_errno(dup2(arg1, arg2));
5685 break;
5686 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5687 case TARGET_NR_dup3:
5688 ret = get_errno(dup3(arg1, arg2, arg3));
5689 break;
5690 #endif
5691 #ifdef TARGET_NR_getppid /* not on alpha */
5692 case TARGET_NR_getppid:
5693 ret = get_errno(getppid());
5694 break;
5695 #endif
5696 case TARGET_NR_getpgrp:
5697 ret = get_errno(getpgrp());
5698 break;
5699 case TARGET_NR_setsid:
5700 ret = get_errno(setsid());
5701 break;
5702 #ifdef TARGET_NR_sigaction
5703 case TARGET_NR_sigaction:
5704 {
5705 #if defined(TARGET_ALPHA)
5706 struct target_sigaction act, oact, *pact = 0;
5707 struct target_old_sigaction *old_act;
5708 if (arg2) {
5709 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5710 goto efault;
5711 act._sa_handler = old_act->_sa_handler;
5712 target_siginitset(&act.sa_mask, old_act->sa_mask);
5713 act.sa_flags = old_act->sa_flags;
5714 act.sa_restorer = 0;
5715 unlock_user_struct(old_act, arg2, 0);
5716 pact = &act;
5717 }
5718 ret = get_errno(do_sigaction(arg1, pact, &oact));
5719 if (!is_error(ret) && arg3) {
5720 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5721 goto efault;
5722 old_act->_sa_handler = oact._sa_handler;
5723 old_act->sa_mask = oact.sa_mask.sig[0];
5724 old_act->sa_flags = oact.sa_flags;
5725 unlock_user_struct(old_act, arg3, 1);
5726 }
5727 #elif defined(TARGET_MIPS)
5728 struct target_sigaction act, oact, *pact, *old_act;
5729
5730 if (arg2) {
5731 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5732 goto efault;
5733 act._sa_handler = old_act->_sa_handler;
5734 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5735 act.sa_flags = old_act->sa_flags;
5736 unlock_user_struct(old_act, arg2, 0);
5737 pact = &act;
5738 } else {
5739 pact = NULL;
5740 }
5741
5742 ret = get_errno(do_sigaction(arg1, pact, &oact));
5743
5744 if (!is_error(ret) && arg3) {
5745 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5746 goto efault;
5747 old_act->_sa_handler = oact._sa_handler;
5748 old_act->sa_flags = oact.sa_flags;
5749 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5750 old_act->sa_mask.sig[1] = 0;
5751 old_act->sa_mask.sig[2] = 0;
5752 old_act->sa_mask.sig[3] = 0;
5753 unlock_user_struct(old_act, arg3, 1);
5754 }
5755 #else
5756 struct target_old_sigaction *old_act;
5757 struct target_sigaction act, oact, *pact;
5758 if (arg2) {
5759 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5760 goto efault;
5761 act._sa_handler = old_act->_sa_handler;
5762 target_siginitset(&act.sa_mask, old_act->sa_mask);
5763 act.sa_flags = old_act->sa_flags;
5764 act.sa_restorer = old_act->sa_restorer;
5765 unlock_user_struct(old_act, arg2, 0);
5766 pact = &act;
5767 } else {
5768 pact = NULL;
5769 }
5770 ret = get_errno(do_sigaction(arg1, pact, &oact));
5771 if (!is_error(ret) && arg3) {
5772 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5773 goto efault;
5774 old_act->_sa_handler = oact._sa_handler;
5775 old_act->sa_mask = oact.sa_mask.sig[0];
5776 old_act->sa_flags = oact.sa_flags;
5777 old_act->sa_restorer = oact.sa_restorer;
5778 unlock_user_struct(old_act, arg3, 1);
5779 }
5780 #endif
5781 }
5782 break;
5783 #endif
5784 case TARGET_NR_rt_sigaction:
5785 {
5786 #if defined(TARGET_ALPHA)
5787 struct target_sigaction act, oact, *pact = 0;
5788 struct target_rt_sigaction *rt_act;
5789 /* ??? arg4 == sizeof(sigset_t). */
5790 if (arg2) {
5791 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5792 goto efault;
5793 act._sa_handler = rt_act->_sa_handler;
5794 act.sa_mask = rt_act->sa_mask;
5795 act.sa_flags = rt_act->sa_flags;
5796 act.sa_restorer = arg5;
5797 unlock_user_struct(rt_act, arg2, 0);
5798 pact = &act;
5799 }
5800 ret = get_errno(do_sigaction(arg1, pact, &oact));
5801 if (!is_error(ret) && arg3) {
5802 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5803 goto efault;
5804 rt_act->_sa_handler = oact._sa_handler;
5805 rt_act->sa_mask = oact.sa_mask;
5806 rt_act->sa_flags = oact.sa_flags;
5807 unlock_user_struct(rt_act, arg3, 1);
5808 }
5809 #else
5810 struct target_sigaction *act;
5811 struct target_sigaction *oact;
5812
5813 if (arg2) {
5814 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5815 goto efault;
5816 } else
5817 act = NULL;
5818 if (arg3) {
5819 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5820 ret = -TARGET_EFAULT;
5821 goto rt_sigaction_fail;
5822 }
5823 } else
5824 oact = NULL;
5825 ret = get_errno(do_sigaction(arg1, act, oact));
5826 rt_sigaction_fail:
5827 if (act)
5828 unlock_user_struct(act, arg2, 0);
5829 if (oact)
5830 unlock_user_struct(oact, arg3, 1);
5831 #endif
5832 }
5833 break;
5834 #ifdef TARGET_NR_sgetmask /* not on alpha */
5835 case TARGET_NR_sgetmask:
5836 {
5837 sigset_t cur_set;
5838 abi_ulong target_set;
5839 sigprocmask(0, NULL, &cur_set);
5840 host_to_target_old_sigset(&target_set, &cur_set);
5841 ret = target_set;
5842 }
5843 break;
5844 #endif
5845 #ifdef TARGET_NR_ssetmask /* not on alpha */
5846 case TARGET_NR_ssetmask:
5847 {
5848 sigset_t set, oset, cur_set;
5849 abi_ulong target_set = arg1;
5850 sigprocmask(0, NULL, &cur_set);
5851 target_to_host_old_sigset(&set, &target_set);
5852 sigorset(&set, &set, &cur_set);
5853 sigprocmask(SIG_SETMASK, &set, &oset);
5854 host_to_target_old_sigset(&target_set, &oset);
5855 ret = target_set;
5856 }
5857 break;
5858 #endif
5859 #ifdef TARGET_NR_sigprocmask
5860 case TARGET_NR_sigprocmask:
5861 {
5862 #if defined(TARGET_ALPHA)
5863 sigset_t set, oldset;
5864 abi_ulong mask;
5865 int how;
5866
5867 switch (arg1) {
5868 case TARGET_SIG_BLOCK:
5869 how = SIG_BLOCK;
5870 break;
5871 case TARGET_SIG_UNBLOCK:
5872 how = SIG_UNBLOCK;
5873 break;
5874 case TARGET_SIG_SETMASK:
5875 how = SIG_SETMASK;
5876 break;
5877 default:
5878 ret = -TARGET_EINVAL;
5879 goto fail;
5880 }
5881 mask = arg2;
5882 target_to_host_old_sigset(&set, &mask);
5883
5884 ret = get_errno(sigprocmask(how, &set, &oldset));
5885 if (!is_error(ret)) {
5886 host_to_target_old_sigset(&mask, &oldset);
5887 ret = mask;
5888 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
5889 }
5890 #else
5891 sigset_t set, oldset, *set_ptr;
5892 int how;
5893
5894 if (arg2) {
5895 switch (arg1) {
5896 case TARGET_SIG_BLOCK:
5897 how = SIG_BLOCK;
5898 break;
5899 case TARGET_SIG_UNBLOCK:
5900 how = SIG_UNBLOCK;
5901 break;
5902 case TARGET_SIG_SETMASK:
5903 how = SIG_SETMASK;
5904 break;
5905 default:
5906 ret = -TARGET_EINVAL;
5907 goto fail;
5908 }
5909 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5910 goto efault;
5911 target_to_host_old_sigset(&set, p);
5912 unlock_user(p, arg2, 0);
5913 set_ptr = &set;
5914 } else {
5915 how = 0;
5916 set_ptr = NULL;
5917 }
5918 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5919 if (!is_error(ret) && arg3) {
5920 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5921 goto efault;
5922 host_to_target_old_sigset(p, &oldset);
5923 unlock_user(p, arg3, sizeof(target_sigset_t));
5924 }
5925 #endif
5926 }
5927 break;
5928 #endif
5929 case TARGET_NR_rt_sigprocmask:
5930 {
5931 int how = arg1;
5932 sigset_t set, oldset, *set_ptr;
5933
5934 if (arg2) {
5935 switch(how) {
5936 case TARGET_SIG_BLOCK:
5937 how = SIG_BLOCK;
5938 break;
5939 case TARGET_SIG_UNBLOCK:
5940 how = SIG_UNBLOCK;
5941 break;
5942 case TARGET_SIG_SETMASK:
5943 how = SIG_SETMASK;
5944 break;
5945 default:
5946 ret = -TARGET_EINVAL;
5947 goto fail;
5948 }
5949 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5950 goto efault;
5951 target_to_host_sigset(&set, p);
5952 unlock_user(p, arg2, 0);
5953 set_ptr = &set;
5954 } else {
5955 how = 0;
5956 set_ptr = NULL;
5957 }
5958 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5959 if (!is_error(ret) && arg3) {
5960 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5961 goto efault;
5962 host_to_target_sigset(p, &oldset);
5963 unlock_user(p, arg3, sizeof(target_sigset_t));
5964 }
5965 }
5966 break;
5967 #ifdef TARGET_NR_sigpending
5968 case TARGET_NR_sigpending:
5969 {
5970 sigset_t set;
5971 ret = get_errno(sigpending(&set));
5972 if (!is_error(ret)) {
5973 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5974 goto efault;
5975 host_to_target_old_sigset(p, &set);
5976 unlock_user(p, arg1, sizeof(target_sigset_t));
5977 }
5978 }
5979 break;
5980 #endif
5981 case TARGET_NR_rt_sigpending:
5982 {
5983 sigset_t set;
5984 ret = get_errno(sigpending(&set));
5985 if (!is_error(ret)) {
5986 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5987 goto efault;
5988 host_to_target_sigset(p, &set);
5989 unlock_user(p, arg1, sizeof(target_sigset_t));
5990 }
5991 }
5992 break;
5993 #ifdef TARGET_NR_sigsuspend
5994 case TARGET_NR_sigsuspend:
5995 {
5996 sigset_t set;
5997 #if defined(TARGET_ALPHA)
5998 abi_ulong mask = arg1;
5999 target_to_host_old_sigset(&set, &mask);
6000 #else
6001 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6002 goto efault;
6003 target_to_host_old_sigset(&set, p);
6004 unlock_user(p, arg1, 0);
6005 #endif
6006 ret = get_errno(sigsuspend(&set));
6007 }
6008 break;
6009 #endif
6010 case TARGET_NR_rt_sigsuspend:
6011 {
6012 sigset_t set;
6013 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6014 goto efault;
6015 target_to_host_sigset(&set, p);
6016 unlock_user(p, arg1, 0);
6017 ret = get_errno(sigsuspend(&set));
6018 }
6019 break;
6020 case TARGET_NR_rt_sigtimedwait:
6021 {
6022 sigset_t set;
6023 struct timespec uts, *puts;
6024 siginfo_t uinfo;
6025
6026 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6027 goto efault;
6028 target_to_host_sigset(&set, p);
6029 unlock_user(p, arg1, 0);
6030 if (arg3) {
6031 puts = &uts;
6032 target_to_host_timespec(puts, arg3);
6033 } else {
6034 puts = NULL;
6035 }
6036 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6037 if (!is_error(ret) && arg2) {
6038 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6039 goto efault;
6040 host_to_target_siginfo(p, &uinfo);
6041 unlock_user(p, arg2, sizeof(target_siginfo_t));
6042 }
6043 }
6044 break;
6045 case TARGET_NR_rt_sigqueueinfo:
6046 {
6047 siginfo_t uinfo;
6048 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6049 goto efault;
6050 target_to_host_siginfo(&uinfo, p);
6051 unlock_user(p, arg1, 0);
6052 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6053 }
6054 break;
6055 #ifdef TARGET_NR_sigreturn
6056 case TARGET_NR_sigreturn:
6057 /* NOTE: ret is eax, so not transcoding must be done */
6058 ret = do_sigreturn(cpu_env);
6059 break;
6060 #endif
6061 case TARGET_NR_rt_sigreturn:
6062 /* NOTE: ret is eax, so not transcoding must be done */
6063 ret = do_rt_sigreturn(cpu_env);
6064 break;
6065 case TARGET_NR_sethostname:
6066 if (!(p = lock_user_string(arg1)))
6067 goto efault;
6068 ret = get_errno(sethostname(p, arg2));
6069 unlock_user(p, arg1, 0);
6070 break;
6071 case TARGET_NR_setrlimit:
6072 {
6073 int resource = target_to_host_resource(arg1);
6074 struct target_rlimit *target_rlim;
6075 struct rlimit rlim;
6076 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6077 goto efault;
6078 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6079 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6080 unlock_user_struct(target_rlim, arg2, 0);
6081 ret = get_errno(setrlimit(resource, &rlim));
6082 }
6083 break;
6084 case TARGET_NR_getrlimit:
6085 {
6086 int resource = target_to_host_resource(arg1);
6087 struct target_rlimit *target_rlim;
6088 struct rlimit rlim;
6089
6090 ret = get_errno(getrlimit(resource, &rlim));
6091 if (!is_error(ret)) {
6092 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6093 goto efault;
6094 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6095 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6096 unlock_user_struct(target_rlim, arg2, 1);
6097 }
6098 }
6099 break;
6100 case TARGET_NR_getrusage:
6101 {
6102 struct rusage rusage;
6103 ret = get_errno(getrusage(arg1, &rusage));
6104 if (!is_error(ret)) {
6105 host_to_target_rusage(arg2, &rusage);
6106 }
6107 }
6108 break;
6109 case TARGET_NR_gettimeofday:
6110 {
6111 struct timeval tv;
6112 ret = get_errno(gettimeofday(&tv, NULL));
6113 if (!is_error(ret)) {
6114 if (copy_to_user_timeval(arg1, &tv))
6115 goto efault;
6116 }
6117 }
6118 break;
6119 case TARGET_NR_settimeofday:
6120 {
6121 struct timeval tv;
6122 if (copy_from_user_timeval(&tv, arg1))
6123 goto efault;
6124 ret = get_errno(settimeofday(&tv, NULL));
6125 }
6126 break;
6127 #if defined(TARGET_NR_select)
6128 case TARGET_NR_select:
6129 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6130 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6131 #else
6132 {
6133 struct target_sel_arg_struct *sel;
6134 abi_ulong inp, outp, exp, tvp;
6135 long nsel;
6136
6137 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6138 goto efault;
6139 nsel = tswapal(sel->n);
6140 inp = tswapal(sel->inp);
6141 outp = tswapal(sel->outp);
6142 exp = tswapal(sel->exp);
6143 tvp = tswapal(sel->tvp);
6144 unlock_user_struct(sel, arg1, 0);
6145 ret = do_select(nsel, inp, outp, exp, tvp);
6146 }
6147 #endif
6148 break;
6149 #endif
6150 #ifdef TARGET_NR_pselect6
6151 case TARGET_NR_pselect6:
6152 {
6153 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6154 fd_set rfds, wfds, efds;
6155 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6156 struct timespec ts, *ts_ptr;
6157
6158 /*
6159 * The 6th arg is actually two args smashed together,
6160 * so we cannot use the C library.
6161 */
6162 sigset_t set;
6163 struct {
6164 sigset_t *set;
6165 size_t size;
6166 } sig, *sig_ptr;
6167
6168 abi_ulong arg_sigset, arg_sigsize, *arg7;
6169 target_sigset_t *target_sigset;
6170
6171 n = arg1;
6172 rfd_addr = arg2;
6173 wfd_addr = arg3;
6174 efd_addr = arg4;
6175 ts_addr = arg5;
6176
6177 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6178 if (ret) {
6179 goto fail;
6180 }
6181 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6182 if (ret) {
6183 goto fail;
6184 }
6185 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6186 if (ret) {
6187 goto fail;
6188 }
6189
6190 /*
6191 * This takes a timespec, and not a timeval, so we cannot
6192 * use the do_select() helper ...
6193 */
6194 if (ts_addr) {
6195 if (target_to_host_timespec(&ts, ts_addr)) {
6196 goto efault;
6197 }
6198 ts_ptr = &ts;
6199 } else {
6200 ts_ptr = NULL;
6201 }
6202
6203 /* Extract the two packed args for the sigset */
6204 if (arg6) {
6205 sig_ptr = &sig;
6206 sig.size = _NSIG / 8;
6207
6208 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6209 if (!arg7) {
6210 goto efault;
6211 }
6212 arg_sigset = tswapal(arg7[0]);
6213 arg_sigsize = tswapal(arg7[1]);
6214 unlock_user(arg7, arg6, 0);
6215
6216 if (arg_sigset) {
6217 sig.set = &set;
6218 if (arg_sigsize != sizeof(*target_sigset)) {
6219 /* Like the kernel, we enforce correct size sigsets */
6220 ret = -TARGET_EINVAL;
6221 goto fail;
6222 }
6223 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6224 sizeof(*target_sigset), 1);
6225 if (!target_sigset) {
6226 goto efault;
6227 }
6228 target_to_host_sigset(&set, target_sigset);
6229 unlock_user(target_sigset, arg_sigset, 0);
6230 } else {
6231 sig.set = NULL;
6232 }
6233 } else {
6234 sig_ptr = NULL;
6235 }
6236
6237 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6238 ts_ptr, sig_ptr));
6239
6240 if (!is_error(ret)) {
6241 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6242 goto efault;
6243 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6244 goto efault;
6245 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6246 goto efault;
6247
6248 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6249 goto efault;
6250 }
6251 }
6252 break;
6253 #endif
6254 case TARGET_NR_symlink:
6255 {
6256 void *p2;
6257 p = lock_user_string(arg1);
6258 p2 = lock_user_string(arg2);
6259 if (!p || !p2)
6260 ret = -TARGET_EFAULT;
6261 else
6262 ret = get_errno(symlink(p, p2));
6263 unlock_user(p2, arg2, 0);
6264 unlock_user(p, arg1, 0);
6265 }
6266 break;
6267 #if defined(TARGET_NR_symlinkat)
6268 case TARGET_NR_symlinkat:
6269 {
6270 void *p2;
6271 p = lock_user_string(arg1);
6272 p2 = lock_user_string(arg3);
6273 if (!p || !p2)
6274 ret = -TARGET_EFAULT;
6275 else
6276 ret = get_errno(symlinkat(p, arg2, p2));
6277 unlock_user(p2, arg3, 0);
6278 unlock_user(p, arg1, 0);
6279 }
6280 break;
6281 #endif
6282 #ifdef TARGET_NR_oldlstat
6283 case TARGET_NR_oldlstat:
6284 goto unimplemented;
6285 #endif
6286 case TARGET_NR_readlink:
6287 {
6288 void *p2;
6289 p = lock_user_string(arg1);
6290 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6291 if (!p || !p2) {
6292 ret = -TARGET_EFAULT;
6293 } else if (is_proc_myself((const char *)p, "exe")) {
6294 char real[PATH_MAX], *temp;
6295 temp = realpath(exec_path, real);
6296 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6297 snprintf((char *)p2, arg3, "%s", real);
6298 } else {
6299 ret = get_errno(readlink(path(p), p2, arg3));
6300 }
6301 unlock_user(p2, arg2, ret);
6302 unlock_user(p, arg1, 0);
6303 }
6304 break;
6305 #if defined(TARGET_NR_readlinkat)
6306 case TARGET_NR_readlinkat:
6307 {
6308 void *p2;
6309 p = lock_user_string(arg2);
6310 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6311 if (!p || !p2) {
6312 ret = -TARGET_EFAULT;
6313 } else if (is_proc_myself((const char *)p, "exe")) {
6314 char real[PATH_MAX], *temp;
6315 temp = realpath(exec_path, real);
6316 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6317 snprintf((char *)p2, arg4, "%s", real);
6318 } else {
6319 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6320 }
6321 unlock_user(p2, arg3, ret);
6322 unlock_user(p, arg2, 0);
6323 }
6324 break;
6325 #endif
6326 #ifdef TARGET_NR_uselib
6327 case TARGET_NR_uselib:
6328 goto unimplemented;
6329 #endif
6330 #ifdef TARGET_NR_swapon
6331 case TARGET_NR_swapon:
6332 if (!(p = lock_user_string(arg1)))
6333 goto efault;
6334 ret = get_errno(swapon(p, arg2));
6335 unlock_user(p, arg1, 0);
6336 break;
6337 #endif
6338 case TARGET_NR_reboot:
6339 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6340 /* arg4 must be ignored in all other cases */
6341 p = lock_user_string(arg4);
6342 if (!p) {
6343 goto efault;
6344 }
6345 ret = get_errno(reboot(arg1, arg2, arg3, p));
6346 unlock_user(p, arg4, 0);
6347 } else {
6348 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6349 }
6350 break;
6351 #ifdef TARGET_NR_readdir
6352 case TARGET_NR_readdir:
6353 goto unimplemented;
6354 #endif
6355 #ifdef TARGET_NR_mmap
6356 case TARGET_NR_mmap:
6357 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6358 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6359 || defined(TARGET_S390X)
6360 {
6361 abi_ulong *v;
6362 abi_ulong v1, v2, v3, v4, v5, v6;
6363 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6364 goto efault;
6365 v1 = tswapal(v[0]);
6366 v2 = tswapal(v[1]);
6367 v3 = tswapal(v[2]);
6368 v4 = tswapal(v[3]);
6369 v5 = tswapal(v[4]);
6370 v6 = tswapal(v[5]);
6371 unlock_user(v, arg1, 0);
6372 ret = get_errno(target_mmap(v1, v2, v3,
6373 target_to_host_bitmask(v4, mmap_flags_tbl),
6374 v5, v6));
6375 }
6376 #else
6377 ret = get_errno(target_mmap(arg1, arg2, arg3,
6378 target_to_host_bitmask(arg4, mmap_flags_tbl),
6379 arg5,
6380 arg6));
6381 #endif
6382 break;
6383 #endif
6384 #ifdef TARGET_NR_mmap2
6385 case TARGET_NR_mmap2:
6386 #ifndef MMAP_SHIFT
6387 #define MMAP_SHIFT 12
6388 #endif
6389 ret = get_errno(target_mmap(arg1, arg2, arg3,
6390 target_to_host_bitmask(arg4, mmap_flags_tbl),
6391 arg5,
6392 arg6 << MMAP_SHIFT));
6393 break;
6394 #endif
6395 case TARGET_NR_munmap:
6396 ret = get_errno(target_munmap(arg1, arg2));
6397 break;
6398 case TARGET_NR_mprotect:
6399 {
6400 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6401 /* Special hack to detect libc making the stack executable. */
6402 if ((arg3 & PROT_GROWSDOWN)
6403 && arg1 >= ts->info->stack_limit
6404 && arg1 <= ts->info->start_stack) {
6405 arg3 &= ~PROT_GROWSDOWN;
6406 arg2 = arg2 + arg1 - ts->info->stack_limit;
6407 arg1 = ts->info->stack_limit;
6408 }
6409 }
6410 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6411 break;
6412 #ifdef TARGET_NR_mremap
6413 case TARGET_NR_mremap:
6414 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6415 break;
6416 #endif
6417 /* ??? msync/mlock/munlock are broken for softmmu. */
6418 #ifdef TARGET_NR_msync
6419 case TARGET_NR_msync:
6420 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6421 break;
6422 #endif
6423 #ifdef TARGET_NR_mlock
6424 case TARGET_NR_mlock:
6425 ret = get_errno(mlock(g2h(arg1), arg2));
6426 break;
6427 #endif
6428 #ifdef TARGET_NR_munlock
6429 case TARGET_NR_munlock:
6430 ret = get_errno(munlock(g2h(arg1), arg2));
6431 break;
6432 #endif
6433 #ifdef TARGET_NR_mlockall
6434 case TARGET_NR_mlockall:
6435 ret = get_errno(mlockall(arg1));
6436 break;
6437 #endif
6438 #ifdef TARGET_NR_munlockall
6439 case TARGET_NR_munlockall:
6440 ret = get_errno(munlockall());
6441 break;
6442 #endif
6443 case TARGET_NR_truncate:
6444 if (!(p = lock_user_string(arg1)))
6445 goto efault;
6446 ret = get_errno(truncate(p, arg2));
6447 unlock_user(p, arg1, 0);
6448 break;
6449 case TARGET_NR_ftruncate:
6450 ret = get_errno(ftruncate(arg1, arg2));
6451 break;
6452 case TARGET_NR_fchmod:
6453 ret = get_errno(fchmod(arg1, arg2));
6454 break;
6455 #if defined(TARGET_NR_fchmodat)
6456 case TARGET_NR_fchmodat:
6457 if (!(p = lock_user_string(arg2)))
6458 goto efault;
6459 ret = get_errno(fchmodat(arg1, p, arg3, 0));
6460 unlock_user(p, arg2, 0);
6461 break;
6462 #endif
6463 case TARGET_NR_getpriority:
6464 /* Note that negative values are valid for getpriority, so we must
6465 differentiate based on errno settings. */
6466 errno = 0;
6467 ret = getpriority(arg1, arg2);
6468 if (ret == -1 && errno != 0) {
6469 ret = -host_to_target_errno(errno);
6470 break;
6471 }
6472 #ifdef TARGET_ALPHA
6473 /* Return value is the unbiased priority. Signal no error. */
6474 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6475 #else
6476 /* Return value is a biased priority to avoid negative numbers. */
6477 ret = 20 - ret;
6478 #endif
6479 break;
6480 case TARGET_NR_setpriority:
6481 ret = get_errno(setpriority(arg1, arg2, arg3));
6482 break;
6483 #ifdef TARGET_NR_profil
6484 case TARGET_NR_profil:
6485 goto unimplemented;
6486 #endif
6487 case TARGET_NR_statfs:
6488 if (!(p = lock_user_string(arg1)))
6489 goto efault;
6490 ret = get_errno(statfs(path(p), &stfs));
6491 unlock_user(p, arg1, 0);
6492 convert_statfs:
6493 if (!is_error(ret)) {
6494 struct target_statfs *target_stfs;
6495
6496 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6497 goto efault;
6498 __put_user(stfs.f_type, &target_stfs->f_type);
6499 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6500 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6501 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6502 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6503 __put_user(stfs.f_files, &target_stfs->f_files);
6504 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6505 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6506 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6507 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6508 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6509 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6510 unlock_user_struct(target_stfs, arg2, 1);
6511 }
6512 break;
6513 case TARGET_NR_fstatfs:
6514 ret = get_errno(fstatfs(arg1, &stfs));
6515 goto convert_statfs;
6516 #ifdef TARGET_NR_statfs64
6517 case TARGET_NR_statfs64:
6518 if (!(p = lock_user_string(arg1)))
6519 goto efault;
6520 ret = get_errno(statfs(path(p), &stfs));
6521 unlock_user(p, arg1, 0);
6522 convert_statfs64:
6523 if (!is_error(ret)) {
6524 struct target_statfs64 *target_stfs;
6525
6526 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6527 goto efault;
6528 __put_user(stfs.f_type, &target_stfs->f_type);
6529 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6530 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6531 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6532 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6533 __put_user(stfs.f_files, &target_stfs->f_files);
6534 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6535 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6536 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6537 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6538 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6539 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6540 unlock_user_struct(target_stfs, arg3, 1);
6541 }
6542 break;
6543 case TARGET_NR_fstatfs64:
6544 ret = get_errno(fstatfs(arg1, &stfs));
6545 goto convert_statfs64;
6546 #endif
6547 #ifdef TARGET_NR_ioperm
6548 case TARGET_NR_ioperm:
6549 goto unimplemented;
6550 #endif
6551 #ifdef TARGET_NR_socketcall
6552 case TARGET_NR_socketcall:
6553 ret = do_socketcall(arg1, arg2);
6554 break;
6555 #endif
6556 #ifdef TARGET_NR_accept
6557 case TARGET_NR_accept:
6558 ret = do_accept4(arg1, arg2, arg3, 0);
6559 break;
6560 #endif
6561 #ifdef TARGET_NR_accept4
6562 case TARGET_NR_accept4:
6563 #ifdef CONFIG_ACCEPT4
6564 ret = do_accept4(arg1, arg2, arg3, arg4);
6565 #else
6566 goto unimplemented;
6567 #endif
6568 break;
6569 #endif
6570 #ifdef TARGET_NR_bind
6571 case TARGET_NR_bind:
6572 ret = do_bind(arg1, arg2, arg3);
6573 break;
6574 #endif
6575 #ifdef TARGET_NR_connect
6576 case TARGET_NR_connect:
6577 ret = do_connect(arg1, arg2, arg3);
6578 break;
6579 #endif
6580 #ifdef TARGET_NR_getpeername
6581 case TARGET_NR_getpeername:
6582 ret = do_getpeername(arg1, arg2, arg3);
6583 break;
6584 #endif
6585 #ifdef TARGET_NR_getsockname
6586 case TARGET_NR_getsockname:
6587 ret = do_getsockname(arg1, arg2, arg3);
6588 break;
6589 #endif
6590 #ifdef TARGET_NR_getsockopt
6591 case TARGET_NR_getsockopt:
6592 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6593 break;
6594 #endif
6595 #ifdef TARGET_NR_listen
6596 case TARGET_NR_listen:
6597 ret = get_errno(listen(arg1, arg2));
6598 break;
6599 #endif
6600 #ifdef TARGET_NR_recv
6601 case TARGET_NR_recv:
6602 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6603 break;
6604 #endif
6605 #ifdef TARGET_NR_recvfrom
6606 case TARGET_NR_recvfrom:
6607 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6608 break;
6609 #endif
6610 #ifdef TARGET_NR_recvmsg
6611 case TARGET_NR_recvmsg:
6612 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6613 break;
6614 #endif
6615 #ifdef TARGET_NR_send
6616 case TARGET_NR_send:
6617 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6618 break;
6619 #endif
6620 #ifdef TARGET_NR_sendmsg
6621 case TARGET_NR_sendmsg:
6622 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6623 break;
6624 #endif
6625 #ifdef TARGET_NR_sendto
6626 case TARGET_NR_sendto:
6627 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6628 break;
6629 #endif
6630 #ifdef TARGET_NR_shutdown
6631 case TARGET_NR_shutdown:
6632 ret = get_errno(shutdown(arg1, arg2));
6633 break;
6634 #endif
6635 #ifdef TARGET_NR_socket
6636 case TARGET_NR_socket:
6637 ret = do_socket(arg1, arg2, arg3);
6638 break;
6639 #endif
6640 #ifdef TARGET_NR_socketpair
6641 case TARGET_NR_socketpair:
6642 ret = do_socketpair(arg1, arg2, arg3, arg4);
6643 break;
6644 #endif
6645 #ifdef TARGET_NR_setsockopt
6646 case TARGET_NR_setsockopt:
6647 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6648 break;
6649 #endif
6650
6651 case TARGET_NR_syslog:
6652 if (!(p = lock_user_string(arg2)))
6653 goto efault;
6654 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6655 unlock_user(p, arg2, 0);
6656 break;
6657
6658 case TARGET_NR_setitimer:
6659 {
6660 struct itimerval value, ovalue, *pvalue;
6661
6662 if (arg2) {
6663 pvalue = &value;
6664 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6665 || copy_from_user_timeval(&pvalue->it_value,
6666 arg2 + sizeof(struct target_timeval)))
6667 goto efault;
6668 } else {
6669 pvalue = NULL;
6670 }
6671 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6672 if (!is_error(ret) && arg3) {
6673 if (copy_to_user_timeval(arg3,
6674 &ovalue.it_interval)
6675 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6676 &ovalue.it_value))
6677 goto efault;
6678 }
6679 }
6680 break;
6681 case TARGET_NR_getitimer:
6682 {
6683 struct itimerval value;
6684
6685 ret = get_errno(getitimer(arg1, &value));
6686 if (!is_error(ret) && arg2) {
6687 if (copy_to_user_timeval(arg2,
6688 &value.it_interval)
6689 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6690 &value.it_value))
6691 goto efault;
6692 }
6693 }
6694 break;
6695 case TARGET_NR_stat:
6696 if (!(p = lock_user_string(arg1)))
6697 goto efault;
6698 ret = get_errno(stat(path(p), &st));
6699 unlock_user(p, arg1, 0);
6700 goto do_stat;
6701 case TARGET_NR_lstat:
6702 if (!(p = lock_user_string(arg1)))
6703 goto efault;
6704 ret = get_errno(lstat(path(p), &st));
6705 unlock_user(p, arg1, 0);
6706 goto do_stat;
6707 case TARGET_NR_fstat:
6708 {
6709 ret = get_errno(fstat(arg1, &st));
6710 do_stat:
6711 if (!is_error(ret)) {
6712 struct target_stat *target_st;
6713
6714 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6715 goto efault;
6716 memset(target_st, 0, sizeof(*target_st));
6717 __put_user(st.st_dev, &target_st->st_dev);
6718 __put_user(st.st_ino, &target_st->st_ino);
6719 __put_user(st.st_mode, &target_st->st_mode);
6720 __put_user(st.st_uid, &target_st->st_uid);
6721 __put_user(st.st_gid, &target_st->st_gid);
6722 __put_user(st.st_nlink, &target_st->st_nlink);
6723 __put_user(st.st_rdev, &target_st->st_rdev);
6724 __put_user(st.st_size, &target_st->st_size);
6725 __put_user(st.st_blksize, &target_st->st_blksize);
6726 __put_user(st.st_blocks, &target_st->st_blocks);
6727 __put_user(st.st_atime, &target_st->target_st_atime);
6728 __put_user(st.st_mtime, &target_st->target_st_mtime);
6729 __put_user(st.st_ctime, &target_st->target_st_ctime);
6730 unlock_user_struct(target_st, arg2, 1);
6731 }
6732 }
6733 break;
6734 #ifdef TARGET_NR_olduname
6735 case TARGET_NR_olduname:
6736 goto unimplemented;
6737 #endif
6738 #ifdef TARGET_NR_iopl
6739 case TARGET_NR_iopl:
6740 goto unimplemented;
6741 #endif
6742 case TARGET_NR_vhangup:
6743 ret = get_errno(vhangup());
6744 break;
6745 #ifdef TARGET_NR_idle
6746 case TARGET_NR_idle:
6747 goto unimplemented;
6748 #endif
6749 #ifdef TARGET_NR_syscall
6750 case TARGET_NR_syscall:
6751 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6752 arg6, arg7, arg8, 0);
6753 break;
6754 #endif
6755 case TARGET_NR_wait4:
6756 {
6757 int status;
6758 abi_long status_ptr = arg2;
6759 struct rusage rusage, *rusage_ptr;
6760 abi_ulong target_rusage = arg4;
6761 if (target_rusage)
6762 rusage_ptr = &rusage;
6763 else
6764 rusage_ptr = NULL;
6765 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6766 if (!is_error(ret)) {
6767 if (status_ptr && ret) {
6768 status = host_to_target_waitstatus(status);
6769 if (put_user_s32(status, status_ptr))
6770 goto efault;
6771 }
6772 if (target_rusage)
6773 host_to_target_rusage(target_rusage, &rusage);
6774 }
6775 }
6776 break;
6777 #ifdef TARGET_NR_swapoff
6778 case TARGET_NR_swapoff:
6779 if (!(p = lock_user_string(arg1)))
6780 goto efault;
6781 ret = get_errno(swapoff(p));
6782 unlock_user(p, arg1, 0);
6783 break;
6784 #endif
6785 case TARGET_NR_sysinfo:
6786 {
6787 struct target_sysinfo *target_value;
6788 struct sysinfo value;
6789 ret = get_errno(sysinfo(&value));
6790 if (!is_error(ret) && arg1)
6791 {
6792 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6793 goto efault;
6794 __put_user(value.uptime, &target_value->uptime);
6795 __put_user(value.loads[0], &target_value->loads[0]);
6796 __put_user(value.loads[1], &target_value->loads[1]);
6797 __put_user(value.loads[2], &target_value->loads[2]);
6798 __put_user(value.totalram, &target_value->totalram);
6799 __put_user(value.freeram, &target_value->freeram);
6800 __put_user(value.sharedram, &target_value->sharedram);
6801 __put_user(value.bufferram, &target_value->bufferram);
6802 __put_user(value.totalswap, &target_value->totalswap);
6803 __put_user(value.freeswap, &target_value->freeswap);
6804 __put_user(value.procs, &target_value->procs);
6805 __put_user(value.totalhigh, &target_value->totalhigh);
6806 __put_user(value.freehigh, &target_value->freehigh);
6807 __put_user(value.mem_unit, &target_value->mem_unit);
6808 unlock_user_struct(target_value, arg1, 1);
6809 }
6810 }
6811 break;
6812 #ifdef TARGET_NR_ipc
6813 case TARGET_NR_ipc:
6814 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6815 break;
6816 #endif
6817 #ifdef TARGET_NR_semget
6818 case TARGET_NR_semget:
6819 ret = get_errno(semget(arg1, arg2, arg3));
6820 break;
6821 #endif
6822 #ifdef TARGET_NR_semop
6823 case TARGET_NR_semop:
6824 ret = do_semop(arg1, arg2, arg3);
6825 break;
6826 #endif
6827 #ifdef TARGET_NR_semctl
6828 case TARGET_NR_semctl:
6829 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6830 break;
6831 #endif
6832 #ifdef TARGET_NR_msgctl
6833 case TARGET_NR_msgctl:
6834 ret = do_msgctl(arg1, arg2, arg3);
6835 break;
6836 #endif
6837 #ifdef TARGET_NR_msgget
6838 case TARGET_NR_msgget:
6839 ret = get_errno(msgget(arg1, arg2));
6840 break;
6841 #endif
6842 #ifdef TARGET_NR_msgrcv
6843 case TARGET_NR_msgrcv:
6844 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6845 break;
6846 #endif
6847 #ifdef TARGET_NR_msgsnd
6848 case TARGET_NR_msgsnd:
6849 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6850 break;
6851 #endif
6852 #ifdef TARGET_NR_shmget
6853 case TARGET_NR_shmget:
6854 ret = get_errno(shmget(arg1, arg2, arg3));
6855 break;
6856 #endif
6857 #ifdef TARGET_NR_shmctl
6858 case TARGET_NR_shmctl:
6859 ret = do_shmctl(arg1, arg2, arg3);
6860 break;
6861 #endif
6862 #ifdef TARGET_NR_shmat
6863 case TARGET_NR_shmat:
6864 ret = do_shmat(arg1, arg2, arg3);
6865 break;
6866 #endif
6867 #ifdef TARGET_NR_shmdt
6868 case TARGET_NR_shmdt:
6869 ret = do_shmdt(arg1);
6870 break;
6871 #endif
6872 case TARGET_NR_fsync:
6873 ret = get_errno(fsync(arg1));
6874 break;
6875 case TARGET_NR_clone:
6876 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6877 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6878 #elif defined(TARGET_CRIS)
6879 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6880 #elif defined(TARGET_MICROBLAZE)
6881 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
6882 #elif defined(TARGET_S390X)
6883 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6884 #else
6885 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6886 #endif
6887 break;
6888 #ifdef __NR_exit_group
6889 /* new thread calls */
6890 case TARGET_NR_exit_group:
6891 #ifdef TARGET_GPROF
6892 _mcleanup();
6893 #endif
6894 gdb_exit(cpu_env, arg1);
6895 ret = get_errno(exit_group(arg1));
6896 break;
6897 #endif
6898 case TARGET_NR_setdomainname:
6899 if (!(p = lock_user_string(arg1)))
6900 goto efault;
6901 ret = get_errno(setdomainname(p, arg2));
6902 unlock_user(p, arg1, 0);
6903 break;
6904 case TARGET_NR_uname:
6905 /* no need to transcode because we use the linux syscall */
6906 {
6907 struct new_utsname * buf;
6908
6909 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6910 goto efault;
6911 ret = get_errno(sys_uname(buf));
6912 if (!is_error(ret)) {
6913 /* Overrite the native machine name with whatever is being
6914 emulated. */
6915 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6916 /* Allow the user to override the reported release. */
6917 if (qemu_uname_release && *qemu_uname_release)
6918 strcpy (buf->release, qemu_uname_release);
6919 }
6920 unlock_user_struct(buf, arg1, 1);
6921 }
6922 break;
6923 #ifdef TARGET_I386
6924 case TARGET_NR_modify_ldt:
6925 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6926 break;
6927 #if !defined(TARGET_X86_64)
6928 case TARGET_NR_vm86old:
6929 goto unimplemented;
6930 case TARGET_NR_vm86:
6931 ret = do_vm86(cpu_env, arg1, arg2);
6932 break;
6933 #endif
6934 #endif
6935 case TARGET_NR_adjtimex:
6936 goto unimplemented;
6937 #ifdef TARGET_NR_create_module
6938 case TARGET_NR_create_module:
6939 #endif
6940 case TARGET_NR_init_module:
6941 case TARGET_NR_delete_module:
6942 #ifdef TARGET_NR_get_kernel_syms
6943 case TARGET_NR_get_kernel_syms:
6944 #endif
6945 goto unimplemented;
6946 case TARGET_NR_quotactl:
6947 goto unimplemented;
6948 case TARGET_NR_getpgid:
6949 ret = get_errno(getpgid(arg1));
6950 break;
6951 case TARGET_NR_fchdir:
6952 ret = get_errno(fchdir(arg1));
6953 break;
6954 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6955 case TARGET_NR_bdflush:
6956 goto unimplemented;
6957 #endif
6958 #ifdef TARGET_NR_sysfs
6959 case TARGET_NR_sysfs:
6960 goto unimplemented;
6961 #endif
6962 case TARGET_NR_personality:
6963 ret = get_errno(personality(arg1));
6964 break;
6965 #ifdef TARGET_NR_afs_syscall
6966 case TARGET_NR_afs_syscall:
6967 goto unimplemented;
6968 #endif
6969 #ifdef TARGET_NR__llseek /* Not on alpha */
6970 case TARGET_NR__llseek:
6971 {
6972 int64_t res;
6973 #if !defined(__NR_llseek)
6974 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6975 if (res == -1) {
6976 ret = get_errno(res);
6977 } else {
6978 ret = 0;
6979 }
6980 #else
6981 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6982 #endif
6983 if ((ret == 0) && put_user_s64(res, arg4)) {
6984 goto efault;
6985 }
6986 }
6987 break;
6988 #endif
6989 case TARGET_NR_getdents:
6990 #ifdef __NR_getdents
6991 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6992 {
6993 struct target_dirent *target_dirp;
6994 struct linux_dirent *dirp;
6995 abi_long count = arg3;
6996
6997 dirp = malloc(count);
6998 if (!dirp) {
6999 ret = -TARGET_ENOMEM;
7000 goto fail;
7001 }
7002
7003 ret = get_errno(sys_getdents(arg1, dirp, count));
7004 if (!is_error(ret)) {
7005 struct linux_dirent *de;
7006 struct target_dirent *tde;
7007 int len = ret;
7008 int reclen, treclen;
7009 int count1, tnamelen;
7010
7011 count1 = 0;
7012 de = dirp;
7013 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7014 goto efault;
7015 tde = target_dirp;
7016 while (len > 0) {
7017 reclen = de->d_reclen;
7018 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7019 assert(tnamelen >= 0);
7020 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7021 assert(count1 + treclen <= count);
7022 tde->d_reclen = tswap16(treclen);
7023 tde->d_ino = tswapal(de->d_ino);
7024 tde->d_off = tswapal(de->d_off);
7025 memcpy(tde->d_name, de->d_name, tnamelen);
7026 de = (struct linux_dirent *)((char *)de + reclen);
7027 len -= reclen;
7028 tde = (struct target_dirent *)((char *)tde + treclen);
7029 count1 += treclen;
7030 }
7031 ret = count1;
7032 unlock_user(target_dirp, arg2, ret);
7033 }
7034 free(dirp);
7035 }
7036 #else
7037 {
7038 struct linux_dirent *dirp;
7039 abi_long count = arg3;
7040
7041 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7042 goto efault;
7043 ret = get_errno(sys_getdents(arg1, dirp, count));
7044 if (!is_error(ret)) {
7045 struct linux_dirent *de;
7046 int len = ret;
7047 int reclen;
7048 de = dirp;
7049 while (len > 0) {
7050 reclen = de->d_reclen;
7051 if (reclen > len)
7052 break;
7053 de->d_reclen = tswap16(reclen);
7054 tswapls(&de->d_ino);
7055 tswapls(&de->d_off);
7056 de = (struct linux_dirent *)((char *)de + reclen);
7057 len -= reclen;
7058 }
7059 }
7060 unlock_user(dirp, arg2, ret);
7061 }
7062 #endif
7063 #else
7064 /* Implement getdents in terms of getdents64 */
7065 {
7066 struct linux_dirent64 *dirp;
7067 abi_long count = arg3;
7068
7069 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7070 if (!dirp) {
7071 goto efault;
7072 }
7073 ret = get_errno(sys_getdents64(arg1, dirp, count));
7074 if (!is_error(ret)) {
7075 /* Convert the dirent64 structs to target dirent. We do this
7076 * in-place, since we can guarantee that a target_dirent is no
7077 * larger than a dirent64; however this means we have to be
7078 * careful to read everything before writing in the new format.
7079 */
7080 struct linux_dirent64 *de;
7081 struct target_dirent *tde;
7082 int len = ret;
7083 int tlen = 0;
7084
7085 de = dirp;
7086 tde = (struct target_dirent *)dirp;
7087 while (len > 0) {
7088 int namelen, treclen;
7089 int reclen = de->d_reclen;
7090 uint64_t ino = de->d_ino;
7091 int64_t off = de->d_off;
7092 uint8_t type = de->d_type;
7093
7094 namelen = strlen(de->d_name);
7095 treclen = offsetof(struct target_dirent, d_name)
7096 + namelen + 2;
7097 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7098
7099 memmove(tde->d_name, de->d_name, namelen + 1);
7100 tde->d_ino = tswapal(ino);
7101 tde->d_off = tswapal(off);
7102 tde->d_reclen = tswap16(treclen);
7103 /* The target_dirent type is in what was formerly a padding
7104 * byte at the end of the structure:
7105 */
7106 *(((char *)tde) + treclen - 1) = type;
7107
7108 de = (struct linux_dirent64 *)((char *)de + reclen);
7109 tde = (struct target_dirent *)((char *)tde + treclen);
7110 len -= reclen;
7111 tlen += treclen;
7112 }
7113 ret = tlen;
7114 }
7115 unlock_user(dirp, arg2, ret);
7116 }
7117 #endif
7118 break;
7119 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7120 case TARGET_NR_getdents64:
7121 {
7122 struct linux_dirent64 *dirp;
7123 abi_long count = arg3;
7124 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7125 goto efault;
7126 ret = get_errno(sys_getdents64(arg1, dirp, count));
7127 if (!is_error(ret)) {
7128 struct linux_dirent64 *de;
7129 int len = ret;
7130 int reclen;
7131 de = dirp;
7132 while (len > 0) {
7133 reclen = de->d_reclen;
7134 if (reclen > len)
7135 break;
7136 de->d_reclen = tswap16(reclen);
7137 tswap64s((uint64_t *)&de->d_ino);
7138 tswap64s((uint64_t *)&de->d_off);
7139 de = (struct linux_dirent64 *)((char *)de + reclen);
7140 len -= reclen;
7141 }
7142 }
7143 unlock_user(dirp, arg2, ret);
7144 }
7145 break;
7146 #endif /* TARGET_NR_getdents64 */
7147 #if defined(TARGET_NR__newselect)
7148 case TARGET_NR__newselect:
7149 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7150 break;
7151 #endif
7152 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7153 # ifdef TARGET_NR_poll
7154 case TARGET_NR_poll:
7155 # endif
7156 # ifdef TARGET_NR_ppoll
7157 case TARGET_NR_ppoll:
7158 # endif
7159 {
7160 struct target_pollfd *target_pfd;
7161 unsigned int nfds = arg2;
7162 int timeout = arg3;
7163 struct pollfd *pfd;
7164 unsigned int i;
7165
7166 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7167 if (!target_pfd)
7168 goto efault;
7169
7170 pfd = alloca(sizeof(struct pollfd) * nfds);
7171 for(i = 0; i < nfds; i++) {
7172 pfd[i].fd = tswap32(target_pfd[i].fd);
7173 pfd[i].events = tswap16(target_pfd[i].events);
7174 }
7175
7176 # ifdef TARGET_NR_ppoll
7177 if (num == TARGET_NR_ppoll) {
7178 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7179 target_sigset_t *target_set;
7180 sigset_t _set, *set = &_set;
7181
7182 if (arg3) {
7183 if (target_to_host_timespec(timeout_ts, arg3)) {
7184 unlock_user(target_pfd, arg1, 0);
7185 goto efault;
7186 }
7187 } else {
7188 timeout_ts = NULL;
7189 }
7190
7191 if (arg4) {
7192 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7193 if (!target_set) {
7194 unlock_user(target_pfd, arg1, 0);
7195 goto efault;
7196 }
7197 target_to_host_sigset(set, target_set);
7198 } else {
7199 set = NULL;
7200 }
7201
7202 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7203
7204 if (!is_error(ret) && arg3) {
7205 host_to_target_timespec(arg3, timeout_ts);
7206 }
7207 if (arg4) {
7208 unlock_user(target_set, arg4, 0);
7209 }
7210 } else
7211 # endif
7212 ret = get_errno(poll(pfd, nfds, timeout));
7213
7214 if (!is_error(ret)) {
7215 for(i = 0; i < nfds; i++) {
7216 target_pfd[i].revents = tswap16(pfd[i].revents);
7217 }
7218 }
7219 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7220 }
7221 break;
7222 #endif
7223 case TARGET_NR_flock:
7224 /* NOTE: the flock constant seems to be the same for every
7225 Linux platform */
7226 ret = get_errno(flock(arg1, arg2));
7227 break;
7228 case TARGET_NR_readv:
7229 {
7230 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7231 if (vec != NULL) {
7232 ret = get_errno(readv(arg1, vec, arg3));
7233 unlock_iovec(vec, arg2, arg3, 1);
7234 } else {
7235 ret = -host_to_target_errno(errno);
7236 }
7237 }
7238 break;
7239 case TARGET_NR_writev:
7240 {
7241 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7242 if (vec != NULL) {
7243 ret = get_errno(writev(arg1, vec, arg3));
7244 unlock_iovec(vec, arg2, arg3, 0);
7245 } else {
7246 ret = -host_to_target_errno(errno);
7247 }
7248 }
7249 break;
7250 case TARGET_NR_getsid:
7251 ret = get_errno(getsid(arg1));
7252 break;
7253 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7254 case TARGET_NR_fdatasync:
7255 ret = get_errno(fdatasync(arg1));
7256 break;
7257 #endif
7258 case TARGET_NR__sysctl:
7259 /* We don't implement this, but ENOTDIR is always a safe
7260 return value. */
7261 ret = -TARGET_ENOTDIR;
7262 break;
7263 case TARGET_NR_sched_getaffinity:
7264 {
7265 unsigned int mask_size;
7266 unsigned long *mask;
7267
7268 /*
7269 * sched_getaffinity needs multiples of ulong, so need to take
7270 * care of mismatches between target ulong and host ulong sizes.
7271 */
7272 if (arg2 & (sizeof(abi_ulong) - 1)) {
7273 ret = -TARGET_EINVAL;
7274 break;
7275 }
7276 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7277
7278 mask = alloca(mask_size);
7279 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7280
7281 if (!is_error(ret)) {
7282 if (copy_to_user(arg3, mask, ret)) {
7283 goto efault;
7284 }
7285 }
7286 }
7287 break;
7288 case TARGET_NR_sched_setaffinity:
7289 {
7290 unsigned int mask_size;
7291 unsigned long *mask;
7292
7293 /*
7294 * sched_setaffinity needs multiples of ulong, so need to take
7295 * care of mismatches between target ulong and host ulong sizes.
7296 */
7297 if (arg2 & (sizeof(abi_ulong) - 1)) {
7298 ret = -TARGET_EINVAL;
7299 break;
7300 }
7301 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7302
7303 mask = alloca(mask_size);
7304 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7305 goto efault;
7306 }
7307 memcpy(mask, p, arg2);
7308 unlock_user_struct(p, arg2, 0);
7309
7310 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7311 }
7312 break;
7313 case TARGET_NR_sched_setparam:
7314 {
7315 struct sched_param *target_schp;
7316 struct sched_param schp;
7317
7318 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7319 goto efault;
7320 schp.sched_priority = tswap32(target_schp->sched_priority);
7321 unlock_user_struct(target_schp, arg2, 0);
7322 ret = get_errno(sched_setparam(arg1, &schp));
7323 }
7324 break;
7325 case TARGET_NR_sched_getparam:
7326 {
7327 struct sched_param *target_schp;
7328 struct sched_param schp;
7329 ret = get_errno(sched_getparam(arg1, &schp));
7330 if (!is_error(ret)) {
7331 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7332 goto efault;
7333 target_schp->sched_priority = tswap32(schp.sched_priority);
7334 unlock_user_struct(target_schp, arg2, 1);
7335 }
7336 }
7337 break;
7338 case TARGET_NR_sched_setscheduler:
7339 {
7340 struct sched_param *target_schp;
7341 struct sched_param schp;
7342 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7343 goto efault;
7344 schp.sched_priority = tswap32(target_schp->sched_priority);
7345 unlock_user_struct(target_schp, arg3, 0);
7346 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7347 }
7348 break;
7349 case TARGET_NR_sched_getscheduler:
7350 ret = get_errno(sched_getscheduler(arg1));
7351 break;
7352 case TARGET_NR_sched_yield:
7353 ret = get_errno(sched_yield());
7354 break;
7355 case TARGET_NR_sched_get_priority_max:
7356 ret = get_errno(sched_get_priority_max(arg1));
7357 break;
7358 case TARGET_NR_sched_get_priority_min:
7359 ret = get_errno(sched_get_priority_min(arg1));
7360 break;
7361 case TARGET_NR_sched_rr_get_interval:
7362 {
7363 struct timespec ts;
7364 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7365 if (!is_error(ret)) {
7366 host_to_target_timespec(arg2, &ts);
7367 }
7368 }
7369 break;
7370 case TARGET_NR_nanosleep:
7371 {
7372 struct timespec req, rem;
7373 target_to_host_timespec(&req, arg1);
7374 ret = get_errno(nanosleep(&req, &rem));
7375 if (is_error(ret) && arg2) {
7376 host_to_target_timespec(arg2, &rem);
7377 }
7378 }
7379 break;
7380 #ifdef TARGET_NR_query_module
7381 case TARGET_NR_query_module:
7382 goto unimplemented;
7383 #endif
7384 #ifdef TARGET_NR_nfsservctl
7385 case TARGET_NR_nfsservctl:
7386 goto unimplemented;
7387 #endif
7388 case TARGET_NR_prctl:
7389 switch (arg1) {
7390 case PR_GET_PDEATHSIG:
7391 {
7392 int deathsig;
7393 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7394 if (!is_error(ret) && arg2
7395 && put_user_ual(deathsig, arg2)) {
7396 goto efault;
7397 }
7398 break;
7399 }
7400 #ifdef PR_GET_NAME
7401 case PR_GET_NAME:
7402 {
7403 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7404 if (!name) {
7405 goto efault;
7406 }
7407 ret = get_errno(prctl(arg1, (unsigned long)name,
7408 arg3, arg4, arg5));
7409 unlock_user(name, arg2, 16);
7410 break;
7411 }
7412 case PR_SET_NAME:
7413 {
7414 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7415 if (!name) {
7416 goto efault;
7417 }
7418 ret = get_errno(prctl(arg1, (unsigned long)name,
7419 arg3, arg4, arg5));
7420 unlock_user(name, arg2, 0);
7421 break;
7422 }
7423 #endif
7424 default:
7425 /* Most prctl options have no pointer arguments */
7426 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7427 break;
7428 }
7429 break;
7430 #ifdef TARGET_NR_arch_prctl
7431 case TARGET_NR_arch_prctl:
7432 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7433 ret = do_arch_prctl(cpu_env, arg1, arg2);
7434 break;
7435 #else
7436 goto unimplemented;
7437 #endif
7438 #endif
7439 #ifdef TARGET_NR_pread64
7440 case TARGET_NR_pread64:
7441 if (regpairs_aligned(cpu_env)) {
7442 arg4 = arg5;
7443 arg5 = arg6;
7444 }
7445 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7446 goto efault;
7447 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7448 unlock_user(p, arg2, ret);
7449 break;
7450 case TARGET_NR_pwrite64:
7451 if (regpairs_aligned(cpu_env)) {
7452 arg4 = arg5;
7453 arg5 = arg6;
7454 }
7455 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7456 goto efault;
7457 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7458 unlock_user(p, arg2, 0);
7459 break;
7460 #endif
7461 case TARGET_NR_getcwd:
7462 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7463 goto efault;
7464 ret = get_errno(sys_getcwd1(p, arg2));
7465 unlock_user(p, arg1, ret);
7466 break;
7467 case TARGET_NR_capget:
7468 goto unimplemented;
7469 case TARGET_NR_capset:
7470 goto unimplemented;
7471 case TARGET_NR_sigaltstack:
7472 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7473 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7474 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7475 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7476 break;
7477 #else
7478 goto unimplemented;
7479 #endif
7480
7481 #ifdef CONFIG_SENDFILE
7482 case TARGET_NR_sendfile:
7483 {
7484 off_t *offp = NULL;
7485 off_t off;
7486 if (arg3) {
7487 ret = get_user_sal(off, arg3);
7488 if (is_error(ret)) {
7489 break;
7490 }
7491 offp = &off;
7492 }
7493 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7494 if (!is_error(ret) && arg3) {
7495 abi_long ret2 = put_user_sal(off, arg3);
7496 if (is_error(ret2)) {
7497 ret = ret2;
7498 }
7499 }
7500 break;
7501 }
7502 #ifdef TARGET_NR_sendfile64
7503 case TARGET_NR_sendfile64:
7504 {
7505 off_t *offp = NULL;
7506 off_t off;
7507 if (arg3) {
7508 ret = get_user_s64(off, arg3);
7509 if (is_error(ret)) {
7510 break;
7511 }
7512 offp = &off;
7513 }
7514 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7515 if (!is_error(ret) && arg3) {
7516 abi_long ret2 = put_user_s64(off, arg3);
7517 if (is_error(ret2)) {
7518 ret = ret2;
7519 }
7520 }
7521 break;
7522 }
7523 #endif
7524 #else
7525 case TARGET_NR_sendfile:
7526 #ifdef TARGET_NR_sendfile64
7527 case TARGET_NR_sendfile64:
7528 #endif
7529 goto unimplemented;
7530 #endif
7531
7532 #ifdef TARGET_NR_getpmsg
7533 case TARGET_NR_getpmsg:
7534 goto unimplemented;
7535 #endif
7536 #ifdef TARGET_NR_putpmsg
7537 case TARGET_NR_putpmsg:
7538 goto unimplemented;
7539 #endif
7540 #ifdef TARGET_NR_vfork
7541 case TARGET_NR_vfork:
7542 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7543 0, 0, 0, 0));
7544 break;
7545 #endif
7546 #ifdef TARGET_NR_ugetrlimit
7547 case TARGET_NR_ugetrlimit:
7548 {
7549 struct rlimit rlim;
7550 int resource = target_to_host_resource(arg1);
7551 ret = get_errno(getrlimit(resource, &rlim));
7552 if (!is_error(ret)) {
7553 struct target_rlimit *target_rlim;
7554 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7555 goto efault;
7556 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7557 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7558 unlock_user_struct(target_rlim, arg2, 1);
7559 }
7560 break;
7561 }
7562 #endif
7563 #ifdef TARGET_NR_truncate64
7564 case TARGET_NR_truncate64:
7565 if (!(p = lock_user_string(arg1)))
7566 goto efault;
7567 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7568 unlock_user(p, arg1, 0);
7569 break;
7570 #endif
7571 #ifdef TARGET_NR_ftruncate64
7572 case TARGET_NR_ftruncate64:
7573 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7574 break;
7575 #endif
7576 #ifdef TARGET_NR_stat64
7577 case TARGET_NR_stat64:
7578 if (!(p = lock_user_string(arg1)))
7579 goto efault;
7580 ret = get_errno(stat(path(p), &st));
7581 unlock_user(p, arg1, 0);
7582 if (!is_error(ret))
7583 ret = host_to_target_stat64(cpu_env, arg2, &st);
7584 break;
7585 #endif
7586 #ifdef TARGET_NR_lstat64
7587 case TARGET_NR_lstat64:
7588 if (!(p = lock_user_string(arg1)))
7589 goto efault;
7590 ret = get_errno(lstat(path(p), &st));
7591 unlock_user(p, arg1, 0);
7592 if (!is_error(ret))
7593 ret = host_to_target_stat64(cpu_env, arg2, &st);
7594 break;
7595 #endif
7596 #ifdef TARGET_NR_fstat64
7597 case TARGET_NR_fstat64:
7598 ret = get_errno(fstat(arg1, &st));
7599 if (!is_error(ret))
7600 ret = host_to_target_stat64(cpu_env, arg2, &st);
7601 break;
7602 #endif
7603 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
7604 #ifdef TARGET_NR_fstatat64
7605 case TARGET_NR_fstatat64:
7606 #endif
7607 #ifdef TARGET_NR_newfstatat
7608 case TARGET_NR_newfstatat:
7609 #endif
7610 if (!(p = lock_user_string(arg2)))
7611 goto efault;
7612 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
7613 if (!is_error(ret))
7614 ret = host_to_target_stat64(cpu_env, arg3, &st);
7615 break;
7616 #endif
7617 case TARGET_NR_lchown:
7618 if (!(p = lock_user_string(arg1)))
7619 goto efault;
7620 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7621 unlock_user(p, arg1, 0);
7622 break;
7623 #ifdef TARGET_NR_getuid
7624 case TARGET_NR_getuid:
7625 ret = get_errno(high2lowuid(getuid()));
7626 break;
7627 #endif
7628 #ifdef TARGET_NR_getgid
7629 case TARGET_NR_getgid:
7630 ret = get_errno(high2lowgid(getgid()));
7631 break;
7632 #endif
7633 #ifdef TARGET_NR_geteuid
7634 case TARGET_NR_geteuid:
7635 ret = get_errno(high2lowuid(geteuid()));
7636 break;
7637 #endif
7638 #ifdef TARGET_NR_getegid
7639 case TARGET_NR_getegid:
7640 ret = get_errno(high2lowgid(getegid()));
7641 break;
7642 #endif
7643 case TARGET_NR_setreuid:
7644 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7645 break;
7646 case TARGET_NR_setregid:
7647 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7648 break;
7649 case TARGET_NR_getgroups:
7650 {
7651 int gidsetsize = arg1;
7652 target_id *target_grouplist;
7653 gid_t *grouplist;
7654 int i;
7655
7656 grouplist = alloca(gidsetsize * sizeof(gid_t));
7657 ret = get_errno(getgroups(gidsetsize, grouplist));
7658 if (gidsetsize == 0)
7659 break;
7660 if (!is_error(ret)) {
7661 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
7662 if (!target_grouplist)
7663 goto efault;
7664 for(i = 0;i < ret; i++)
7665 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7666 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
7667 }
7668 }
7669 break;
7670 case TARGET_NR_setgroups:
7671 {
7672 int gidsetsize = arg1;
7673 target_id *target_grouplist;
7674 gid_t *grouplist = NULL;
7675 int i;
7676 if (gidsetsize) {
7677 grouplist = alloca(gidsetsize * sizeof(gid_t));
7678 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
7679 if (!target_grouplist) {
7680 ret = -TARGET_EFAULT;
7681 goto fail;
7682 }
7683 for (i = 0; i < gidsetsize; i++) {
7684 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7685 }
7686 unlock_user(target_grouplist, arg2, 0);
7687 }
7688 ret = get_errno(setgroups(gidsetsize, grouplist));
7689 }
7690 break;
7691 case TARGET_NR_fchown:
7692 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7693 break;
7694 #if defined(TARGET_NR_fchownat)
7695 case TARGET_NR_fchownat:
7696 if (!(p = lock_user_string(arg2)))
7697 goto efault;
7698 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
7699 low2highgid(arg4), arg5));
7700 unlock_user(p, arg2, 0);
7701 break;
7702 #endif
7703 #ifdef TARGET_NR_setresuid
7704 case TARGET_NR_setresuid:
7705 ret = get_errno(setresuid(low2highuid(arg1),
7706 low2highuid(arg2),
7707 low2highuid(arg3)));
7708 break;
7709 #endif
7710 #ifdef TARGET_NR_getresuid
7711 case TARGET_NR_getresuid:
7712 {
7713 uid_t ruid, euid, suid;
7714 ret = get_errno(getresuid(&ruid, &euid, &suid));
7715 if (!is_error(ret)) {
7716 if (put_user_u16(high2lowuid(ruid), arg1)
7717 || put_user_u16(high2lowuid(euid), arg2)
7718 || put_user_u16(high2lowuid(suid), arg3))
7719 goto efault;
7720 }
7721 }
7722 break;
7723 #endif
7724 #ifdef TARGET_NR_getresgid
7725 case TARGET_NR_setresgid:
7726 ret = get_errno(setresgid(low2highgid(arg1),
7727 low2highgid(arg2),
7728 low2highgid(arg3)));
7729 break;
7730 #endif
7731 #ifdef TARGET_NR_getresgid
7732 case TARGET_NR_getresgid:
7733 {
7734 gid_t rgid, egid, sgid;
7735 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7736 if (!is_error(ret)) {
7737 if (put_user_u16(high2lowgid(rgid), arg1)
7738 || put_user_u16(high2lowgid(egid), arg2)
7739 || put_user_u16(high2lowgid(sgid), arg3))
7740 goto efault;
7741 }
7742 }
7743 break;
7744 #endif
7745 case TARGET_NR_chown:
7746 if (!(p = lock_user_string(arg1)))
7747 goto efault;
7748 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7749 unlock_user(p, arg1, 0);
7750 break;
7751 case TARGET_NR_setuid:
7752 ret = get_errno(setuid(low2highuid(arg1)));
7753 break;
7754 case TARGET_NR_setgid:
7755 ret = get_errno(setgid(low2highgid(arg1)));
7756 break;
7757 case TARGET_NR_setfsuid:
7758 ret = get_errno(setfsuid(arg1));
7759 break;
7760 case TARGET_NR_setfsgid:
7761 ret = get_errno(setfsgid(arg1));
7762 break;
7763
7764 #ifdef TARGET_NR_lchown32
7765 case TARGET_NR_lchown32:
7766 if (!(p = lock_user_string(arg1)))
7767 goto efault;
7768 ret = get_errno(lchown(p, arg2, arg3));
7769 unlock_user(p, arg1, 0);
7770 break;
7771 #endif
7772 #ifdef TARGET_NR_getuid32
7773 case TARGET_NR_getuid32:
7774 ret = get_errno(getuid());
7775 break;
7776 #endif
7777
7778 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7779 /* Alpha specific */
7780 case TARGET_NR_getxuid:
7781 {
7782 uid_t euid;
7783 euid=geteuid();
7784 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7785 }
7786 ret = get_errno(getuid());
7787 break;
7788 #endif
7789 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7790 /* Alpha specific */
7791 case TARGET_NR_getxgid:
7792 {
7793 uid_t egid;
7794 egid=getegid();
7795 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7796 }
7797 ret = get_errno(getgid());
7798 break;
7799 #endif
7800 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7801 /* Alpha specific */
7802 case TARGET_NR_osf_getsysinfo:
7803 ret = -TARGET_EOPNOTSUPP;
7804 switch (arg1) {
7805 case TARGET_GSI_IEEE_FP_CONTROL:
7806 {
7807 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7808
7809 /* Copied from linux ieee_fpcr_to_swcr. */
7810 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7811 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7812 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7813 | SWCR_TRAP_ENABLE_DZE
7814 | SWCR_TRAP_ENABLE_OVF);
7815 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7816 | SWCR_TRAP_ENABLE_INE);
7817 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7818 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7819
7820 if (put_user_u64 (swcr, arg2))
7821 goto efault;
7822 ret = 0;
7823 }
7824 break;
7825
7826 /* case GSI_IEEE_STATE_AT_SIGNAL:
7827 -- Not implemented in linux kernel.
7828 case GSI_UACPROC:
7829 -- Retrieves current unaligned access state; not much used.
7830 case GSI_PROC_TYPE:
7831 -- Retrieves implver information; surely not used.
7832 case GSI_GET_HWRPB:
7833 -- Grabs a copy of the HWRPB; surely not used.
7834 */
7835 }
7836 break;
7837 #endif
7838 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7839 /* Alpha specific */
7840 case TARGET_NR_osf_setsysinfo:
7841 ret = -TARGET_EOPNOTSUPP;
7842 switch (arg1) {
7843 case TARGET_SSI_IEEE_FP_CONTROL:
7844 {
7845 uint64_t swcr, fpcr, orig_fpcr;
7846
7847 if (get_user_u64 (swcr, arg2)) {
7848 goto efault;
7849 }
7850 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7851 fpcr = orig_fpcr & FPCR_DYN_MASK;
7852
7853 /* Copied from linux ieee_swcr_to_fpcr. */
7854 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7855 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7856 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7857 | SWCR_TRAP_ENABLE_DZE
7858 | SWCR_TRAP_ENABLE_OVF)) << 48;
7859 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7860 | SWCR_TRAP_ENABLE_INE)) << 57;
7861 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7862 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7863
7864 cpu_alpha_store_fpcr(cpu_env, fpcr);
7865 ret = 0;
7866 }
7867 break;
7868
7869 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7870 {
7871 uint64_t exc, fpcr, orig_fpcr;
7872 int si_code;
7873
7874 if (get_user_u64(exc, arg2)) {
7875 goto efault;
7876 }
7877
7878 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7879
7880 /* We only add to the exception status here. */
7881 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7882
7883 cpu_alpha_store_fpcr(cpu_env, fpcr);
7884 ret = 0;
7885
7886 /* Old exceptions are not signaled. */
7887 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7888
7889 /* If any exceptions set by this call,
7890 and are unmasked, send a signal. */
7891 si_code = 0;
7892 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7893 si_code = TARGET_FPE_FLTRES;
7894 }
7895 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7896 si_code = TARGET_FPE_FLTUND;
7897 }
7898 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7899 si_code = TARGET_FPE_FLTOVF;
7900 }
7901 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7902 si_code = TARGET_FPE_FLTDIV;
7903 }
7904 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7905 si_code = TARGET_FPE_FLTINV;
7906 }
7907 if (si_code != 0) {
7908 target_siginfo_t info;
7909 info.si_signo = SIGFPE;
7910 info.si_errno = 0;
7911 info.si_code = si_code;
7912 info._sifields._sigfault._addr
7913 = ((CPUArchState *)cpu_env)->pc;
7914 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
7915 }
7916 }
7917 break;
7918
7919 /* case SSI_NVPAIRS:
7920 -- Used with SSIN_UACPROC to enable unaligned accesses.
7921 case SSI_IEEE_STATE_AT_SIGNAL:
7922 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7923 -- Not implemented in linux kernel
7924 */
7925 }
7926 break;
7927 #endif
7928 #ifdef TARGET_NR_osf_sigprocmask
7929 /* Alpha specific. */
7930 case TARGET_NR_osf_sigprocmask:
7931 {
7932 abi_ulong mask;
7933 int how;
7934 sigset_t set, oldset;
7935
7936 switch(arg1) {
7937 case TARGET_SIG_BLOCK:
7938 how = SIG_BLOCK;
7939 break;
7940 case TARGET_SIG_UNBLOCK:
7941 how = SIG_UNBLOCK;
7942 break;
7943 case TARGET_SIG_SETMASK:
7944 how = SIG_SETMASK;
7945 break;
7946 default:
7947 ret = -TARGET_EINVAL;
7948 goto fail;
7949 }
7950 mask = arg2;
7951 target_to_host_old_sigset(&set, &mask);
7952 sigprocmask(how, &set, &oldset);
7953 host_to_target_old_sigset(&mask, &oldset);
7954 ret = mask;
7955 }
7956 break;
7957 #endif
7958
7959 #ifdef TARGET_NR_getgid32
7960 case TARGET_NR_getgid32:
7961 ret = get_errno(getgid());
7962 break;
7963 #endif
7964 #ifdef TARGET_NR_geteuid32
7965 case TARGET_NR_geteuid32:
7966 ret = get_errno(geteuid());
7967 break;
7968 #endif
7969 #ifdef TARGET_NR_getegid32
7970 case TARGET_NR_getegid32:
7971 ret = get_errno(getegid());
7972 break;
7973 #endif
7974 #ifdef TARGET_NR_setreuid32
7975 case TARGET_NR_setreuid32:
7976 ret = get_errno(setreuid(arg1, arg2));
7977 break;
7978 #endif
7979 #ifdef TARGET_NR_setregid32
7980 case TARGET_NR_setregid32:
7981 ret = get_errno(setregid(arg1, arg2));
7982 break;
7983 #endif
7984 #ifdef TARGET_NR_getgroups32
7985 case TARGET_NR_getgroups32:
7986 {
7987 int gidsetsize = arg1;
7988 uint32_t *target_grouplist;
7989 gid_t *grouplist;
7990 int i;
7991
7992 grouplist = alloca(gidsetsize * sizeof(gid_t));
7993 ret = get_errno(getgroups(gidsetsize, grouplist));
7994 if (gidsetsize == 0)
7995 break;
7996 if (!is_error(ret)) {
7997 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7998 if (!target_grouplist) {
7999 ret = -TARGET_EFAULT;
8000 goto fail;
8001 }
8002 for(i = 0;i < ret; i++)
8003 target_grouplist[i] = tswap32(grouplist[i]);
8004 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8005 }
8006 }
8007 break;
8008 #endif
8009 #ifdef TARGET_NR_setgroups32
8010 case TARGET_NR_setgroups32:
8011 {
8012 int gidsetsize = arg1;
8013 uint32_t *target_grouplist;
8014 gid_t *grouplist;
8015 int i;
8016
8017 grouplist = alloca(gidsetsize * sizeof(gid_t));
8018 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8019 if (!target_grouplist) {
8020 ret = -TARGET_EFAULT;
8021 goto fail;
8022 }
8023 for(i = 0;i < gidsetsize; i++)
8024 grouplist[i] = tswap32(target_grouplist[i]);
8025 unlock_user(target_grouplist, arg2, 0);
8026 ret = get_errno(setgroups(gidsetsize, grouplist));
8027 }
8028 break;
8029 #endif
8030 #ifdef TARGET_NR_fchown32
8031 case TARGET_NR_fchown32:
8032 ret = get_errno(fchown(arg1, arg2, arg3));
8033 break;
8034 #endif
8035 #ifdef TARGET_NR_setresuid32
8036 case TARGET_NR_setresuid32:
8037 ret = get_errno(setresuid(arg1, arg2, arg3));
8038 break;
8039 #endif
8040 #ifdef TARGET_NR_getresuid32
8041 case TARGET_NR_getresuid32:
8042 {
8043 uid_t ruid, euid, suid;
8044 ret = get_errno(getresuid(&ruid, &euid, &suid));
8045 if (!is_error(ret)) {
8046 if (put_user_u32(ruid, arg1)
8047 || put_user_u32(euid, arg2)
8048 || put_user_u32(suid, arg3))
8049 goto efault;
8050 }
8051 }
8052 break;
8053 #endif
8054 #ifdef TARGET_NR_setresgid32
8055 case TARGET_NR_setresgid32:
8056 ret = get_errno(setresgid(arg1, arg2, arg3));
8057 break;
8058 #endif
8059 #ifdef TARGET_NR_getresgid32
8060 case TARGET_NR_getresgid32:
8061 {
8062 gid_t rgid, egid, sgid;
8063 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8064 if (!is_error(ret)) {
8065 if (put_user_u32(rgid, arg1)
8066 || put_user_u32(egid, arg2)
8067 || put_user_u32(sgid, arg3))
8068 goto efault;
8069 }
8070 }
8071 break;
8072 #endif
8073 #ifdef TARGET_NR_chown32
8074 case TARGET_NR_chown32:
8075 if (!(p = lock_user_string(arg1)))
8076 goto efault;
8077 ret = get_errno(chown(p, arg2, arg3));
8078 unlock_user(p, arg1, 0);
8079 break;
8080 #endif
8081 #ifdef TARGET_NR_setuid32
8082 case TARGET_NR_setuid32:
8083 ret = get_errno(setuid(arg1));
8084 break;
8085 #endif
8086 #ifdef TARGET_NR_setgid32
8087 case TARGET_NR_setgid32:
8088 ret = get_errno(setgid(arg1));
8089 break;
8090 #endif
8091 #ifdef TARGET_NR_setfsuid32
8092 case TARGET_NR_setfsuid32:
8093 ret = get_errno(setfsuid(arg1));
8094 break;
8095 #endif
8096 #ifdef TARGET_NR_setfsgid32
8097 case TARGET_NR_setfsgid32:
8098 ret = get_errno(setfsgid(arg1));
8099 break;
8100 #endif
8101
8102 case TARGET_NR_pivot_root:
8103 goto unimplemented;
8104 #ifdef TARGET_NR_mincore
8105 case TARGET_NR_mincore:
8106 {
8107 void *a;
8108 ret = -TARGET_EFAULT;
8109 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8110 goto efault;
8111 if (!(p = lock_user_string(arg3)))
8112 goto mincore_fail;
8113 ret = get_errno(mincore(a, arg2, p));
8114 unlock_user(p, arg3, ret);
8115 mincore_fail:
8116 unlock_user(a, arg1, 0);
8117 }
8118 break;
8119 #endif
8120 #ifdef TARGET_NR_arm_fadvise64_64
8121 case TARGET_NR_arm_fadvise64_64:
8122 {
8123 /*
8124 * arm_fadvise64_64 looks like fadvise64_64 but
8125 * with different argument order
8126 */
8127 abi_long temp;
8128 temp = arg3;
8129 arg3 = arg4;
8130 arg4 = temp;
8131 }
8132 #endif
8133 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8134 #ifdef TARGET_NR_fadvise64_64
8135 case TARGET_NR_fadvise64_64:
8136 #endif
8137 #ifdef TARGET_NR_fadvise64
8138 case TARGET_NR_fadvise64:
8139 #endif
8140 #ifdef TARGET_S390X
8141 switch (arg4) {
8142 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8143 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8144 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8145 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8146 default: break;
8147 }
8148 #endif
8149 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8150 break;
8151 #endif
8152 #ifdef TARGET_NR_madvise
8153 case TARGET_NR_madvise:
8154 /* A straight passthrough may not be safe because qemu sometimes
8155 turns private file-backed mappings into anonymous mappings.
8156 This will break MADV_DONTNEED.
8157 This is a hint, so ignoring and returning success is ok. */
8158 ret = get_errno(0);
8159 break;
8160 #endif
8161 #if TARGET_ABI_BITS == 32
8162 case TARGET_NR_fcntl64:
8163 {
8164 int cmd;
8165 struct flock64 fl;
8166 struct target_flock64 *target_fl;
8167 #ifdef TARGET_ARM
8168 struct target_eabi_flock64 *target_efl;
8169 #endif
8170
8171 cmd = target_to_host_fcntl_cmd(arg2);
8172 if (cmd == -TARGET_EINVAL) {
8173 ret = cmd;
8174 break;
8175 }
8176
8177 switch(arg2) {
8178 case TARGET_F_GETLK64:
8179 #ifdef TARGET_ARM
8180 if (((CPUARMState *)cpu_env)->eabi) {
8181 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8182 goto efault;
8183 fl.l_type = tswap16(target_efl->l_type);
8184 fl.l_whence = tswap16(target_efl->l_whence);
8185 fl.l_start = tswap64(target_efl->l_start);
8186 fl.l_len = tswap64(target_efl->l_len);
8187 fl.l_pid = tswap32(target_efl->l_pid);
8188 unlock_user_struct(target_efl, arg3, 0);
8189 } else
8190 #endif
8191 {
8192 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8193 goto efault;
8194 fl.l_type = tswap16(target_fl->l_type);
8195 fl.l_whence = tswap16(target_fl->l_whence);
8196 fl.l_start = tswap64(target_fl->l_start);
8197 fl.l_len = tswap64(target_fl->l_len);
8198 fl.l_pid = tswap32(target_fl->l_pid);
8199 unlock_user_struct(target_fl, arg3, 0);
8200 }
8201 ret = get_errno(fcntl(arg1, cmd, &fl));
8202 if (ret == 0) {
8203 #ifdef TARGET_ARM
8204 if (((CPUARMState *)cpu_env)->eabi) {
8205 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8206 goto efault;
8207 target_efl->l_type = tswap16(fl.l_type);
8208 target_efl->l_whence = tswap16(fl.l_whence);
8209 target_efl->l_start = tswap64(fl.l_start);
8210 target_efl->l_len = tswap64(fl.l_len);
8211 target_efl->l_pid = tswap32(fl.l_pid);
8212 unlock_user_struct(target_efl, arg3, 1);
8213 } else
8214 #endif
8215 {
8216 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8217 goto efault;
8218 target_fl->l_type = tswap16(fl.l_type);
8219 target_fl->l_whence = tswap16(fl.l_whence);
8220 target_fl->l_start = tswap64(fl.l_start);
8221 target_fl->l_len = tswap64(fl.l_len);
8222 target_fl->l_pid = tswap32(fl.l_pid);
8223 unlock_user_struct(target_fl, arg3, 1);
8224 }
8225 }
8226 break;
8227
8228 case TARGET_F_SETLK64:
8229 case TARGET_F_SETLKW64:
8230 #ifdef TARGET_ARM
8231 if (((CPUARMState *)cpu_env)->eabi) {
8232 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8233 goto efault;
8234 fl.l_type = tswap16(target_efl->l_type);
8235 fl.l_whence = tswap16(target_efl->l_whence);
8236 fl.l_start = tswap64(target_efl->l_start);
8237 fl.l_len = tswap64(target_efl->l_len);
8238 fl.l_pid = tswap32(target_efl->l_pid);
8239 unlock_user_struct(target_efl, arg3, 0);
8240 } else
8241 #endif
8242 {
8243 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8244 goto efault;
8245 fl.l_type = tswap16(target_fl->l_type);
8246 fl.l_whence = tswap16(target_fl->l_whence);
8247 fl.l_start = tswap64(target_fl->l_start);
8248 fl.l_len = tswap64(target_fl->l_len);
8249 fl.l_pid = tswap32(target_fl->l_pid);
8250 unlock_user_struct(target_fl, arg3, 0);
8251 }
8252 ret = get_errno(fcntl(arg1, cmd, &fl));
8253 break;
8254 default:
8255 ret = do_fcntl(arg1, arg2, arg3);
8256 break;
8257 }
8258 break;
8259 }
8260 #endif
8261 #ifdef TARGET_NR_cacheflush
8262 case TARGET_NR_cacheflush:
8263 /* self-modifying code is handled automatically, so nothing needed */
8264 ret = 0;
8265 break;
8266 #endif
8267 #ifdef TARGET_NR_security
8268 case TARGET_NR_security:
8269 goto unimplemented;
8270 #endif
8271 #ifdef TARGET_NR_getpagesize
8272 case TARGET_NR_getpagesize:
8273 ret = TARGET_PAGE_SIZE;
8274 break;
8275 #endif
8276 case TARGET_NR_gettid:
8277 ret = get_errno(gettid());
8278 break;
8279 #ifdef TARGET_NR_readahead
8280 case TARGET_NR_readahead:
8281 #if TARGET_ABI_BITS == 32
8282 if (regpairs_aligned(cpu_env)) {
8283 arg2 = arg3;
8284 arg3 = arg4;
8285 arg4 = arg5;
8286 }
8287 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8288 #else
8289 ret = get_errno(readahead(arg1, arg2, arg3));
8290 #endif
8291 break;
8292 #endif
8293 #ifdef CONFIG_ATTR
8294 #ifdef TARGET_NR_setxattr
8295 case TARGET_NR_listxattr:
8296 case TARGET_NR_llistxattr:
8297 {
8298 void *p, *b = 0;
8299 if (arg2) {
8300 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8301 if (!b) {
8302 ret = -TARGET_EFAULT;
8303 break;
8304 }
8305 }
8306 p = lock_user_string(arg1);
8307 if (p) {
8308 if (num == TARGET_NR_listxattr) {
8309 ret = get_errno(listxattr(p, b, arg3));
8310 } else {
8311 ret = get_errno(llistxattr(p, b, arg3));
8312 }
8313 } else {
8314 ret = -TARGET_EFAULT;
8315 }
8316 unlock_user(p, arg1, 0);
8317 unlock_user(b, arg2, arg3);
8318 break;
8319 }
8320 case TARGET_NR_flistxattr:
8321 {
8322 void *b = 0;
8323 if (arg2) {
8324 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8325 if (!b) {
8326 ret = -TARGET_EFAULT;
8327 break;
8328 }
8329 }
8330 ret = get_errno(flistxattr(arg1, b, arg3));
8331 unlock_user(b, arg2, arg3);
8332 break;
8333 }
8334 case TARGET_NR_setxattr:
8335 case TARGET_NR_lsetxattr:
8336 {
8337 void *p, *n, *v = 0;
8338 if (arg3) {
8339 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8340 if (!v) {
8341 ret = -TARGET_EFAULT;
8342 break;
8343 }
8344 }
8345 p = lock_user_string(arg1);
8346 n = lock_user_string(arg2);
8347 if (p && n) {
8348 if (num == TARGET_NR_setxattr) {
8349 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8350 } else {
8351 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8352 }
8353 } else {
8354 ret = -TARGET_EFAULT;
8355 }
8356 unlock_user(p, arg1, 0);
8357 unlock_user(n, arg2, 0);
8358 unlock_user(v, arg3, 0);
8359 }
8360 break;
8361 case TARGET_NR_fsetxattr:
8362 {
8363 void *n, *v = 0;
8364 if (arg3) {
8365 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8366 if (!v) {
8367 ret = -TARGET_EFAULT;
8368 break;
8369 }
8370 }
8371 n = lock_user_string(arg2);
8372 if (n) {
8373 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8374 } else {
8375 ret = -TARGET_EFAULT;
8376 }
8377 unlock_user(n, arg2, 0);
8378 unlock_user(v, arg3, 0);
8379 }
8380 break;
8381 case TARGET_NR_getxattr:
8382 case TARGET_NR_lgetxattr:
8383 {
8384 void *p, *n, *v = 0;
8385 if (arg3) {
8386 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8387 if (!v) {
8388 ret = -TARGET_EFAULT;
8389 break;
8390 }
8391 }
8392 p = lock_user_string(arg1);
8393 n = lock_user_string(arg2);
8394 if (p && n) {
8395 if (num == TARGET_NR_getxattr) {
8396 ret = get_errno(getxattr(p, n, v, arg4));
8397 } else {
8398 ret = get_errno(lgetxattr(p, n, v, arg4));
8399 }
8400 } else {
8401 ret = -TARGET_EFAULT;
8402 }
8403 unlock_user(p, arg1, 0);
8404 unlock_user(n, arg2, 0);
8405 unlock_user(v, arg3, arg4);
8406 }
8407 break;
8408 case TARGET_NR_fgetxattr:
8409 {
8410 void *n, *v = 0;
8411 if (arg3) {
8412 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8413 if (!v) {
8414 ret = -TARGET_EFAULT;
8415 break;
8416 }
8417 }
8418 n = lock_user_string(arg2);
8419 if (n) {
8420 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8421 } else {
8422 ret = -TARGET_EFAULT;
8423 }
8424 unlock_user(n, arg2, 0);
8425 unlock_user(v, arg3, arg4);
8426 }
8427 break;
8428 case TARGET_NR_removexattr:
8429 case TARGET_NR_lremovexattr:
8430 {
8431 void *p, *n;
8432 p = lock_user_string(arg1);
8433 n = lock_user_string(arg2);
8434 if (p && n) {
8435 if (num == TARGET_NR_removexattr) {
8436 ret = get_errno(removexattr(p, n));
8437 } else {
8438 ret = get_errno(lremovexattr(p, n));
8439 }
8440 } else {
8441 ret = -TARGET_EFAULT;
8442 }
8443 unlock_user(p, arg1, 0);
8444 unlock_user(n, arg2, 0);
8445 }
8446 break;
8447 case TARGET_NR_fremovexattr:
8448 {
8449 void *n;
8450 n = lock_user_string(arg2);
8451 if (n) {
8452 ret = get_errno(fremovexattr(arg1, n));
8453 } else {
8454 ret = -TARGET_EFAULT;
8455 }
8456 unlock_user(n, arg2, 0);
8457 }
8458 break;
8459 #endif
8460 #endif /* CONFIG_ATTR */
8461 #ifdef TARGET_NR_set_thread_area
8462 case TARGET_NR_set_thread_area:
8463 #if defined(TARGET_MIPS)
8464 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8465 ret = 0;
8466 break;
8467 #elif defined(TARGET_CRIS)
8468 if (arg1 & 0xff)
8469 ret = -TARGET_EINVAL;
8470 else {
8471 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8472 ret = 0;
8473 }
8474 break;
8475 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8476 ret = do_set_thread_area(cpu_env, arg1);
8477 break;
8478 #else
8479 goto unimplemented_nowarn;
8480 #endif
8481 #endif
8482 #ifdef TARGET_NR_get_thread_area
8483 case TARGET_NR_get_thread_area:
8484 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8485 ret = do_get_thread_area(cpu_env, arg1);
8486 #else
8487 goto unimplemented_nowarn;
8488 #endif
8489 #endif
8490 #ifdef TARGET_NR_getdomainname
8491 case TARGET_NR_getdomainname:
8492 goto unimplemented_nowarn;
8493 #endif
8494
8495 #ifdef TARGET_NR_clock_gettime
8496 case TARGET_NR_clock_gettime:
8497 {
8498 struct timespec ts;
8499 ret = get_errno(clock_gettime(arg1, &ts));
8500 if (!is_error(ret)) {
8501 host_to_target_timespec(arg2, &ts);
8502 }
8503 break;
8504 }
8505 #endif
8506 #ifdef TARGET_NR_clock_getres
8507 case TARGET_NR_clock_getres:
8508 {
8509 struct timespec ts;
8510 ret = get_errno(clock_getres(arg1, &ts));
8511 if (!is_error(ret)) {
8512 host_to_target_timespec(arg2, &ts);
8513 }
8514 break;
8515 }
8516 #endif
8517 #ifdef TARGET_NR_clock_nanosleep
8518 case TARGET_NR_clock_nanosleep:
8519 {
8520 struct timespec ts;
8521 target_to_host_timespec(&ts, arg3);
8522 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8523 if (arg4)
8524 host_to_target_timespec(arg4, &ts);
8525 break;
8526 }
8527 #endif
8528
8529 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8530 case TARGET_NR_set_tid_address:
8531 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8532 break;
8533 #endif
8534
8535 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8536 case TARGET_NR_tkill:
8537 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8538 break;
8539 #endif
8540
8541 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8542 case TARGET_NR_tgkill:
8543 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8544 target_to_host_signal(arg3)));
8545 break;
8546 #endif
8547
8548 #ifdef TARGET_NR_set_robust_list
8549 case TARGET_NR_set_robust_list:
8550 case TARGET_NR_get_robust_list:
8551 /* The ABI for supporting robust futexes has userspace pass
8552 * the kernel a pointer to a linked list which is updated by
8553 * userspace after the syscall; the list is walked by the kernel
8554 * when the thread exits. Since the linked list in QEMU guest
8555 * memory isn't a valid linked list for the host and we have
8556 * no way to reliably intercept the thread-death event, we can't
8557 * support these. Silently return ENOSYS so that guest userspace
8558 * falls back to a non-robust futex implementation (which should
8559 * be OK except in the corner case of the guest crashing while
8560 * holding a mutex that is shared with another process via
8561 * shared memory).
8562 */
8563 goto unimplemented_nowarn;
8564 #endif
8565
8566 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8567 case TARGET_NR_utimensat:
8568 {
8569 struct timespec *tsp, ts[2];
8570 if (!arg3) {
8571 tsp = NULL;
8572 } else {
8573 target_to_host_timespec(ts, arg3);
8574 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8575 tsp = ts;
8576 }
8577 if (!arg2)
8578 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8579 else {
8580 if (!(p = lock_user_string(arg2))) {
8581 ret = -TARGET_EFAULT;
8582 goto fail;
8583 }
8584 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8585 unlock_user(p, arg2, 0);
8586 }
8587 }
8588 break;
8589 #endif
8590 #if defined(CONFIG_USE_NPTL)
8591 case TARGET_NR_futex:
8592 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8593 break;
8594 #endif
8595 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8596 case TARGET_NR_inotify_init:
8597 ret = get_errno(sys_inotify_init());
8598 break;
8599 #endif
8600 #ifdef CONFIG_INOTIFY1
8601 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8602 case TARGET_NR_inotify_init1:
8603 ret = get_errno(sys_inotify_init1(arg1));
8604 break;
8605 #endif
8606 #endif
8607 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8608 case TARGET_NR_inotify_add_watch:
8609 p = lock_user_string(arg2);
8610 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8611 unlock_user(p, arg2, 0);
8612 break;
8613 #endif
8614 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8615 case TARGET_NR_inotify_rm_watch:
8616 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8617 break;
8618 #endif
8619
8620 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8621 case TARGET_NR_mq_open:
8622 {
8623 struct mq_attr posix_mq_attr;
8624
8625 p = lock_user_string(arg1 - 1);
8626 if (arg4 != 0)
8627 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8628 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8629 unlock_user (p, arg1, 0);
8630 }
8631 break;
8632
8633 case TARGET_NR_mq_unlink:
8634 p = lock_user_string(arg1 - 1);
8635 ret = get_errno(mq_unlink(p));
8636 unlock_user (p, arg1, 0);
8637 break;
8638
8639 case TARGET_NR_mq_timedsend:
8640 {
8641 struct timespec ts;
8642
8643 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8644 if (arg5 != 0) {
8645 target_to_host_timespec(&ts, arg5);
8646 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8647 host_to_target_timespec(arg5, &ts);
8648 }
8649 else
8650 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8651 unlock_user (p, arg2, arg3);
8652 }
8653 break;
8654
8655 case TARGET_NR_mq_timedreceive:
8656 {
8657 struct timespec ts;
8658 unsigned int prio;
8659
8660 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8661 if (arg5 != 0) {
8662 target_to_host_timespec(&ts, arg5);
8663 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8664 host_to_target_timespec(arg5, &ts);
8665 }
8666 else
8667 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8668 unlock_user (p, arg2, arg3);
8669 if (arg4 != 0)
8670 put_user_u32(prio, arg4);
8671 }
8672 break;
8673
8674 /* Not implemented for now... */
8675 /* case TARGET_NR_mq_notify: */
8676 /* break; */
8677
8678 case TARGET_NR_mq_getsetattr:
8679 {
8680 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8681 ret = 0;
8682 if (arg3 != 0) {
8683 ret = mq_getattr(arg1, &posix_mq_attr_out);
8684 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8685 }
8686 if (arg2 != 0) {
8687 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8688 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8689 }
8690
8691 }
8692 break;
8693 #endif
8694
8695 #ifdef CONFIG_SPLICE
8696 #ifdef TARGET_NR_tee
8697 case TARGET_NR_tee:
8698 {
8699 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8700 }
8701 break;
8702 #endif
8703 #ifdef TARGET_NR_splice
8704 case TARGET_NR_splice:
8705 {
8706 loff_t loff_in, loff_out;
8707 loff_t *ploff_in = NULL, *ploff_out = NULL;
8708 if(arg2) {
8709 get_user_u64(loff_in, arg2);
8710 ploff_in = &loff_in;
8711 }
8712 if(arg4) {
8713 get_user_u64(loff_out, arg2);
8714 ploff_out = &loff_out;
8715 }
8716 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8717 }
8718 break;
8719 #endif
8720 #ifdef TARGET_NR_vmsplice
8721 case TARGET_NR_vmsplice:
8722 {
8723 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8724 if (vec != NULL) {
8725 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
8726 unlock_iovec(vec, arg2, arg3, 0);
8727 } else {
8728 ret = -host_to_target_errno(errno);
8729 }
8730 }
8731 break;
8732 #endif
8733 #endif /* CONFIG_SPLICE */
8734 #ifdef CONFIG_EVENTFD
8735 #if defined(TARGET_NR_eventfd)
8736 case TARGET_NR_eventfd:
8737 ret = get_errno(eventfd(arg1, 0));
8738 break;
8739 #endif
8740 #if defined(TARGET_NR_eventfd2)
8741 case TARGET_NR_eventfd2:
8742 {
8743 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
8744 if (arg2 & TARGET_O_NONBLOCK) {
8745 host_flags |= O_NONBLOCK;
8746 }
8747 if (arg2 & TARGET_O_CLOEXEC) {
8748 host_flags |= O_CLOEXEC;
8749 }
8750 ret = get_errno(eventfd(arg1, host_flags));
8751 break;
8752 }
8753 #endif
8754 #endif /* CONFIG_EVENTFD */
8755 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8756 case TARGET_NR_fallocate:
8757 #if TARGET_ABI_BITS == 32
8758 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8759 target_offset64(arg5, arg6)));
8760 #else
8761 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8762 #endif
8763 break;
8764 #endif
8765 #if defined(CONFIG_SYNC_FILE_RANGE)
8766 #if defined(TARGET_NR_sync_file_range)
8767 case TARGET_NR_sync_file_range:
8768 #if TARGET_ABI_BITS == 32
8769 #if defined(TARGET_MIPS)
8770 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8771 target_offset64(arg5, arg6), arg7));
8772 #else
8773 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8774 target_offset64(arg4, arg5), arg6));
8775 #endif /* !TARGET_MIPS */
8776 #else
8777 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8778 #endif
8779 break;
8780 #endif
8781 #if defined(TARGET_NR_sync_file_range2)
8782 case TARGET_NR_sync_file_range2:
8783 /* This is like sync_file_range but the arguments are reordered */
8784 #if TARGET_ABI_BITS == 32
8785 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8786 target_offset64(arg5, arg6), arg2));
8787 #else
8788 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8789 #endif
8790 break;
8791 #endif
8792 #endif
8793 #if defined(CONFIG_EPOLL)
8794 #if defined(TARGET_NR_epoll_create)
8795 case TARGET_NR_epoll_create:
8796 ret = get_errno(epoll_create(arg1));
8797 break;
8798 #endif
8799 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8800 case TARGET_NR_epoll_create1:
8801 ret = get_errno(epoll_create1(arg1));
8802 break;
8803 #endif
8804 #if defined(TARGET_NR_epoll_ctl)
8805 case TARGET_NR_epoll_ctl:
8806 {
8807 struct epoll_event ep;
8808 struct epoll_event *epp = 0;
8809 if (arg4) {
8810 struct target_epoll_event *target_ep;
8811 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8812 goto efault;
8813 }
8814 ep.events = tswap32(target_ep->events);
8815 /* The epoll_data_t union is just opaque data to the kernel,
8816 * so we transfer all 64 bits across and need not worry what
8817 * actual data type it is.
8818 */
8819 ep.data.u64 = tswap64(target_ep->data.u64);
8820 unlock_user_struct(target_ep, arg4, 0);
8821 epp = &ep;
8822 }
8823 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8824 break;
8825 }
8826 #endif
8827
8828 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8829 #define IMPLEMENT_EPOLL_PWAIT
8830 #endif
8831 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8832 #if defined(TARGET_NR_epoll_wait)
8833 case TARGET_NR_epoll_wait:
8834 #endif
8835 #if defined(IMPLEMENT_EPOLL_PWAIT)
8836 case TARGET_NR_epoll_pwait:
8837 #endif
8838 {
8839 struct target_epoll_event *target_ep;
8840 struct epoll_event *ep;
8841 int epfd = arg1;
8842 int maxevents = arg3;
8843 int timeout = arg4;
8844
8845 target_ep = lock_user(VERIFY_WRITE, arg2,
8846 maxevents * sizeof(struct target_epoll_event), 1);
8847 if (!target_ep) {
8848 goto efault;
8849 }
8850
8851 ep = alloca(maxevents * sizeof(struct epoll_event));
8852
8853 switch (num) {
8854 #if defined(IMPLEMENT_EPOLL_PWAIT)
8855 case TARGET_NR_epoll_pwait:
8856 {
8857 target_sigset_t *target_set;
8858 sigset_t _set, *set = &_set;
8859
8860 if (arg5) {
8861 target_set = lock_user(VERIFY_READ, arg5,
8862 sizeof(target_sigset_t), 1);
8863 if (!target_set) {
8864 unlock_user(target_ep, arg2, 0);
8865 goto efault;
8866 }
8867 target_to_host_sigset(set, target_set);
8868 unlock_user(target_set, arg5, 0);
8869 } else {
8870 set = NULL;
8871 }
8872
8873 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8874 break;
8875 }
8876 #endif
8877 #if defined(TARGET_NR_epoll_wait)
8878 case TARGET_NR_epoll_wait:
8879 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8880 break;
8881 #endif
8882 default:
8883 ret = -TARGET_ENOSYS;
8884 }
8885 if (!is_error(ret)) {
8886 int i;
8887 for (i = 0; i < ret; i++) {
8888 target_ep[i].events = tswap32(ep[i].events);
8889 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8890 }
8891 }
8892 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8893 break;
8894 }
8895 #endif
8896 #endif
8897 #ifdef TARGET_NR_prlimit64
8898 case TARGET_NR_prlimit64:
8899 {
8900 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8901 struct target_rlimit64 *target_rnew, *target_rold;
8902 struct host_rlimit64 rnew, rold, *rnewp = 0;
8903 if (arg3) {
8904 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8905 goto efault;
8906 }
8907 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8908 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8909 unlock_user_struct(target_rnew, arg3, 0);
8910 rnewp = &rnew;
8911 }
8912
8913 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8914 if (!is_error(ret) && arg4) {
8915 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8916 goto efault;
8917 }
8918 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8919 target_rold->rlim_max = tswap64(rold.rlim_max);
8920 unlock_user_struct(target_rold, arg4, 1);
8921 }
8922 break;
8923 }
8924 #endif
8925 #ifdef TARGET_NR_gethostname
8926 case TARGET_NR_gethostname:
8927 {
8928 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8929 if (name) {
8930 ret = get_errno(gethostname(name, arg2));
8931 unlock_user(name, arg1, arg2);
8932 } else {
8933 ret = -TARGET_EFAULT;
8934 }
8935 break;
8936 }
8937 #endif
8938 default:
8939 unimplemented:
8940 gemu_log("qemu: Unsupported syscall: %d\n", num);
8941 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8942 unimplemented_nowarn:
8943 #endif
8944 ret = -TARGET_ENOSYS;
8945 break;
8946 }
8947 fail:
8948 #ifdef DEBUG
8949 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8950 #endif
8951 if(do_strace)
8952 print_syscall_ret(num, ret);
8953 return ret;
8954 efault:
8955 ret = -TARGET_EFAULT;
8956 goto fail;
8957 }