]> git.proxmox.com Git - qemu.git/blob - linux-user/syscall.c
linux-user: improve target_to_host_sock_type conversion
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <signal.h>
47 #include <sched.h>
48 #ifdef __ia64__
49 int __clone2(int (*fn)(void *), void *child_stack_base,
50 size_t stack_size, int flags, void *arg, ...);
51 #endif
52 #include <sys/socket.h>
53 #include <sys/un.h>
54 #include <sys/uio.h>
55 #include <sys/poll.h>
56 #include <sys/times.h>
57 #include <sys/shm.h>
58 #include <sys/sem.h>
59 #include <sys/statfs.h>
60 #include <utime.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef TARGET_GPROF
70 #include <sys/gmon.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
84
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
91
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/utsname.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #if defined(CONFIG_FIEMAP)
102 #include <linux/fiemap.h>
103 #endif
104 #include <linux/fb.h>
105 #include <linux/vt.h>
106 #include <linux/dm-ioctl.h>
107 #include <linux/reboot.h>
108 #include <linux/route.h>
109 #include "linux_loop.h"
110 #include "cpu-uname.h"
111
112 #include "qemu.h"
113
114 #if defined(CONFIG_USE_NPTL)
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
117 #else
118 /* XXX: Hardcode the above values. */
119 #define CLONE_NPTL_FLAGS2 0
120 #endif
121
122 //#define DEBUG
123
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
127
128
129 #undef _syscall0
130 #undef _syscall1
131 #undef _syscall2
132 #undef _syscall3
133 #undef _syscall4
134 #undef _syscall5
135 #undef _syscall6
136
137 #define _syscall0(type,name) \
138 static type name (void) \
139 { \
140 return syscall(__NR_##name); \
141 }
142
143 #define _syscall1(type,name,type1,arg1) \
144 static type name (type1 arg1) \
145 { \
146 return syscall(__NR_##name, arg1); \
147 }
148
149 #define _syscall2(type,name,type1,arg1,type2,arg2) \
150 static type name (type1 arg1,type2 arg2) \
151 { \
152 return syscall(__NR_##name, arg1, arg2); \
153 }
154
155 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
156 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 { \
158 return syscall(__NR_##name, arg1, arg2, arg3); \
159 }
160
161 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
162 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 { \
164 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
165 }
166
167 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
168 type5,arg5) \
169 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 { \
171 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
172 }
173
174
175 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
176 type5,arg5,type6,arg6) \
177 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
178 type6 arg6) \
179 { \
180 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
181 }
182
183
184 #define __NR_sys_uname __NR_uname
185 #define __NR_sys_getcwd1 __NR_getcwd
186 #define __NR_sys_getdents __NR_getdents
187 #define __NR_sys_getdents64 __NR_getdents64
188 #define __NR_sys_getpriority __NR_getpriority
189 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_futex __NR_futex
194 #define __NR_sys_inotify_init __NR_inotify_init
195 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
196 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
197
198 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
199 defined(__s390x__)
200 #define __NR__llseek __NR_lseek
201 #endif
202
203 #ifdef __NR_gettid
204 _syscall0(int, gettid)
205 #else
206 /* This is a replacement for the host gettid() and must return a host
207 errno. */
208 static int gettid(void) {
209 return -ENOSYS;
210 }
211 #endif
212 #ifdef __NR_getdents
213 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
214 #endif
215 #if !defined(__NR_getdents) || \
216 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
217 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
218 #endif
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
221 loff_t *, res, uint, wh);
222 #endif
223 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
224 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
225 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
226 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
227 #endif
228 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
229 _syscall2(int,sys_tkill,int,tid,int,sig)
230 #endif
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group,int,error_code)
233 #endif
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address,int *,tidptr)
236 #endif
237 #if defined(CONFIG_USE_NPTL)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
240 const struct timespec *,timeout,int *,uaddr2,int,val3)
241 #endif
242 #endif
243 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
244 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
245 unsigned long *, user_mask_ptr);
246 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
247 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
249 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
250 void *, arg);
251
252 static bitmask_transtbl fcntl_flags_tbl[] = {
253 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
254 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
255 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
256 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
257 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
258 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
259 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
260 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
261 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
262 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
263 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
264 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
265 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
266 #if defined(O_DIRECT)
267 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
268 #endif
269 #if defined(O_NOATIME)
270 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
271 #endif
272 #if defined(O_CLOEXEC)
273 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
274 #endif
275 #if defined(O_PATH)
276 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
277 #endif
278 /* Don't terminate the list prematurely on 64-bit host+guest. */
279 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
280 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
281 #endif
282 { 0, 0, 0, 0 }
283 };
284
285 #define COPY_UTSNAME_FIELD(dest, src) \
286 do { \
287 /* __NEW_UTS_LEN doesn't include terminating null */ \
288 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
289 (dest)[__NEW_UTS_LEN] = '\0'; \
290 } while (0)
291
292 static int sys_uname(struct new_utsname *buf)
293 {
294 struct utsname uts_buf;
295
296 if (uname(&uts_buf) < 0)
297 return (-1);
298
299 /*
300 * Just in case these have some differences, we
301 * translate utsname to new_utsname (which is the
302 * struct linux kernel uses).
303 */
304
305 memset(buf, 0, sizeof(*buf));
306 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
307 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
308 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
309 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
310 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
311 #ifdef _GNU_SOURCE
312 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
313 #endif
314 return (0);
315
316 #undef COPY_UTSNAME_FIELD
317 }
318
319 static int sys_getcwd1(char *buf, size_t size)
320 {
321 if (getcwd(buf, size) == NULL) {
322 /* getcwd() sets errno */
323 return (-1);
324 }
325 return strlen(buf)+1;
326 }
327
328 #ifdef TARGET_NR_openat
329 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
330 {
331 /*
332 * open(2) has extra parameter 'mode' when called with
333 * flag O_CREAT.
334 */
335 if ((flags & O_CREAT) != 0) {
336 return (openat(dirfd, pathname, flags, mode));
337 }
338 return (openat(dirfd, pathname, flags));
339 }
340 #endif
341
342 #ifdef TARGET_NR_utimensat
343 #ifdef CONFIG_UTIMENSAT
344 static int sys_utimensat(int dirfd, const char *pathname,
345 const struct timespec times[2], int flags)
346 {
347 if (pathname == NULL)
348 return futimens(dirfd, times);
349 else
350 return utimensat(dirfd, pathname, times, flags);
351 }
352 #elif defined(__NR_utimensat)
353 #define __NR_sys_utimensat __NR_utimensat
354 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
355 const struct timespec *,tsp,int,flags)
356 #else
357 static int sys_utimensat(int dirfd, const char *pathname,
358 const struct timespec times[2], int flags)
359 {
360 errno = ENOSYS;
361 return -1;
362 }
363 #endif
364 #endif /* TARGET_NR_utimensat */
365
366 #ifdef CONFIG_INOTIFY
367 #include <sys/inotify.h>
368
369 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
370 static int sys_inotify_init(void)
371 {
372 return (inotify_init());
373 }
374 #endif
375 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
376 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
377 {
378 return (inotify_add_watch(fd, pathname, mask));
379 }
380 #endif
381 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
382 static int sys_inotify_rm_watch(int fd, int32_t wd)
383 {
384 return (inotify_rm_watch(fd, wd));
385 }
386 #endif
387 #ifdef CONFIG_INOTIFY1
388 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
389 static int sys_inotify_init1(int flags)
390 {
391 return (inotify_init1(flags));
392 }
393 #endif
394 #endif
395 #else
396 /* Userspace can usually survive runtime without inotify */
397 #undef TARGET_NR_inotify_init
398 #undef TARGET_NR_inotify_init1
399 #undef TARGET_NR_inotify_add_watch
400 #undef TARGET_NR_inotify_rm_watch
401 #endif /* CONFIG_INOTIFY */
402
403 #if defined(TARGET_NR_ppoll)
404 #ifndef __NR_ppoll
405 # define __NR_ppoll -1
406 #endif
407 #define __NR_sys_ppoll __NR_ppoll
408 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
409 struct timespec *, timeout, const __sigset_t *, sigmask,
410 size_t, sigsetsize)
411 #endif
412
413 #if defined(TARGET_NR_pselect6)
414 #ifndef __NR_pselect6
415 # define __NR_pselect6 -1
416 #endif
417 #define __NR_sys_pselect6 __NR_pselect6
418 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
419 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
420 #endif
421
422 #if defined(TARGET_NR_prlimit64)
423 #ifndef __NR_prlimit64
424 # define __NR_prlimit64 -1
425 #endif
426 #define __NR_sys_prlimit64 __NR_prlimit64
427 /* The glibc rlimit structure may not be that used by the underlying syscall */
428 struct host_rlimit64 {
429 uint64_t rlim_cur;
430 uint64_t rlim_max;
431 };
432 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
433 const struct host_rlimit64 *, new_limit,
434 struct host_rlimit64 *, old_limit)
435 #endif
436
437 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
438 #ifdef TARGET_ARM
439 static inline int regpairs_aligned(void *cpu_env) {
440 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
441 }
442 #elif defined(TARGET_MIPS)
443 static inline int regpairs_aligned(void *cpu_env) { return 1; }
444 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
445 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
446 * of registers which translates to the same as ARM/MIPS, because we start with
447 * r3 as arg1 */
448 static inline int regpairs_aligned(void *cpu_env) { return 1; }
449 #else
450 static inline int regpairs_aligned(void *cpu_env) { return 0; }
451 #endif
452
453 #define ERRNO_TABLE_SIZE 1200
454
455 /* target_to_host_errno_table[] is initialized from
456 * host_to_target_errno_table[] in syscall_init(). */
457 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
458 };
459
460 /*
461 * This list is the union of errno values overridden in asm-<arch>/errno.h
462 * minus the errnos that are not actually generic to all archs.
463 */
464 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
465 [EIDRM] = TARGET_EIDRM,
466 [ECHRNG] = TARGET_ECHRNG,
467 [EL2NSYNC] = TARGET_EL2NSYNC,
468 [EL3HLT] = TARGET_EL3HLT,
469 [EL3RST] = TARGET_EL3RST,
470 [ELNRNG] = TARGET_ELNRNG,
471 [EUNATCH] = TARGET_EUNATCH,
472 [ENOCSI] = TARGET_ENOCSI,
473 [EL2HLT] = TARGET_EL2HLT,
474 [EDEADLK] = TARGET_EDEADLK,
475 [ENOLCK] = TARGET_ENOLCK,
476 [EBADE] = TARGET_EBADE,
477 [EBADR] = TARGET_EBADR,
478 [EXFULL] = TARGET_EXFULL,
479 [ENOANO] = TARGET_ENOANO,
480 [EBADRQC] = TARGET_EBADRQC,
481 [EBADSLT] = TARGET_EBADSLT,
482 [EBFONT] = TARGET_EBFONT,
483 [ENOSTR] = TARGET_ENOSTR,
484 [ENODATA] = TARGET_ENODATA,
485 [ETIME] = TARGET_ETIME,
486 [ENOSR] = TARGET_ENOSR,
487 [ENONET] = TARGET_ENONET,
488 [ENOPKG] = TARGET_ENOPKG,
489 [EREMOTE] = TARGET_EREMOTE,
490 [ENOLINK] = TARGET_ENOLINK,
491 [EADV] = TARGET_EADV,
492 [ESRMNT] = TARGET_ESRMNT,
493 [ECOMM] = TARGET_ECOMM,
494 [EPROTO] = TARGET_EPROTO,
495 [EDOTDOT] = TARGET_EDOTDOT,
496 [EMULTIHOP] = TARGET_EMULTIHOP,
497 [EBADMSG] = TARGET_EBADMSG,
498 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
499 [EOVERFLOW] = TARGET_EOVERFLOW,
500 [ENOTUNIQ] = TARGET_ENOTUNIQ,
501 [EBADFD] = TARGET_EBADFD,
502 [EREMCHG] = TARGET_EREMCHG,
503 [ELIBACC] = TARGET_ELIBACC,
504 [ELIBBAD] = TARGET_ELIBBAD,
505 [ELIBSCN] = TARGET_ELIBSCN,
506 [ELIBMAX] = TARGET_ELIBMAX,
507 [ELIBEXEC] = TARGET_ELIBEXEC,
508 [EILSEQ] = TARGET_EILSEQ,
509 [ENOSYS] = TARGET_ENOSYS,
510 [ELOOP] = TARGET_ELOOP,
511 [ERESTART] = TARGET_ERESTART,
512 [ESTRPIPE] = TARGET_ESTRPIPE,
513 [ENOTEMPTY] = TARGET_ENOTEMPTY,
514 [EUSERS] = TARGET_EUSERS,
515 [ENOTSOCK] = TARGET_ENOTSOCK,
516 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
517 [EMSGSIZE] = TARGET_EMSGSIZE,
518 [EPROTOTYPE] = TARGET_EPROTOTYPE,
519 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
520 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
521 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
522 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
523 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
524 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
525 [EADDRINUSE] = TARGET_EADDRINUSE,
526 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
527 [ENETDOWN] = TARGET_ENETDOWN,
528 [ENETUNREACH] = TARGET_ENETUNREACH,
529 [ENETRESET] = TARGET_ENETRESET,
530 [ECONNABORTED] = TARGET_ECONNABORTED,
531 [ECONNRESET] = TARGET_ECONNRESET,
532 [ENOBUFS] = TARGET_ENOBUFS,
533 [EISCONN] = TARGET_EISCONN,
534 [ENOTCONN] = TARGET_ENOTCONN,
535 [EUCLEAN] = TARGET_EUCLEAN,
536 [ENOTNAM] = TARGET_ENOTNAM,
537 [ENAVAIL] = TARGET_ENAVAIL,
538 [EISNAM] = TARGET_EISNAM,
539 [EREMOTEIO] = TARGET_EREMOTEIO,
540 [ESHUTDOWN] = TARGET_ESHUTDOWN,
541 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
542 [ETIMEDOUT] = TARGET_ETIMEDOUT,
543 [ECONNREFUSED] = TARGET_ECONNREFUSED,
544 [EHOSTDOWN] = TARGET_EHOSTDOWN,
545 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
546 [EALREADY] = TARGET_EALREADY,
547 [EINPROGRESS] = TARGET_EINPROGRESS,
548 [ESTALE] = TARGET_ESTALE,
549 [ECANCELED] = TARGET_ECANCELED,
550 [ENOMEDIUM] = TARGET_ENOMEDIUM,
551 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
552 #ifdef ENOKEY
553 [ENOKEY] = TARGET_ENOKEY,
554 #endif
555 #ifdef EKEYEXPIRED
556 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
557 #endif
558 #ifdef EKEYREVOKED
559 [EKEYREVOKED] = TARGET_EKEYREVOKED,
560 #endif
561 #ifdef EKEYREJECTED
562 [EKEYREJECTED] = TARGET_EKEYREJECTED,
563 #endif
564 #ifdef EOWNERDEAD
565 [EOWNERDEAD] = TARGET_EOWNERDEAD,
566 #endif
567 #ifdef ENOTRECOVERABLE
568 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
569 #endif
570 };
571
572 static inline int host_to_target_errno(int err)
573 {
574 if(host_to_target_errno_table[err])
575 return host_to_target_errno_table[err];
576 return err;
577 }
578
579 static inline int target_to_host_errno(int err)
580 {
581 if (target_to_host_errno_table[err])
582 return target_to_host_errno_table[err];
583 return err;
584 }
585
586 static inline abi_long get_errno(abi_long ret)
587 {
588 if (ret == -1)
589 return -host_to_target_errno(errno);
590 else
591 return ret;
592 }
593
594 static inline int is_error(abi_long ret)
595 {
596 return (abi_ulong)ret >= (abi_ulong)(-4096);
597 }
598
599 char *target_strerror(int err)
600 {
601 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
602 return NULL;
603 }
604 return strerror(target_to_host_errno(err));
605 }
606
607 static abi_ulong target_brk;
608 static abi_ulong target_original_brk;
609 static abi_ulong brk_page;
610
611 void target_set_brk(abi_ulong new_brk)
612 {
613 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
614 brk_page = HOST_PAGE_ALIGN(target_brk);
615 }
616
617 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
618 #define DEBUGF_BRK(message, args...)
619
620 /* do_brk() must return target values and target errnos. */
621 abi_long do_brk(abi_ulong new_brk)
622 {
623 abi_long mapped_addr;
624 int new_alloc_size;
625
626 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
627
628 if (!new_brk) {
629 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
630 return target_brk;
631 }
632 if (new_brk < target_original_brk) {
633 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
634 target_brk);
635 return target_brk;
636 }
637
638 /* If the new brk is less than the highest page reserved to the
639 * target heap allocation, set it and we're almost done... */
640 if (new_brk <= brk_page) {
641 /* Heap contents are initialized to zero, as for anonymous
642 * mapped pages. */
643 if (new_brk > target_brk) {
644 memset(g2h(target_brk), 0, new_brk - target_brk);
645 }
646 target_brk = new_brk;
647 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
648 return target_brk;
649 }
650
651 /* We need to allocate more memory after the brk... Note that
652 * we don't use MAP_FIXED because that will map over the top of
653 * any existing mapping (like the one with the host libc or qemu
654 * itself); instead we treat "mapped but at wrong address" as
655 * a failure and unmap again.
656 */
657 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
658 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
659 PROT_READ|PROT_WRITE,
660 MAP_ANON|MAP_PRIVATE, 0, 0));
661
662 if (mapped_addr == brk_page) {
663 /* Heap contents are initialized to zero, as for anonymous
664 * mapped pages. Technically the new pages are already
665 * initialized to zero since they *are* anonymous mapped
666 * pages, however we have to take care with the contents that
667 * come from the remaining part of the previous page: it may
668 * contains garbage data due to a previous heap usage (grown
669 * then shrunken). */
670 memset(g2h(target_brk), 0, brk_page - target_brk);
671
672 target_brk = new_brk;
673 brk_page = HOST_PAGE_ALIGN(target_brk);
674 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
675 target_brk);
676 return target_brk;
677 } else if (mapped_addr != -1) {
678 /* Mapped but at wrong address, meaning there wasn't actually
679 * enough space for this brk.
680 */
681 target_munmap(mapped_addr, new_alloc_size);
682 mapped_addr = -1;
683 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
684 }
685 else {
686 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
687 }
688
689 #if defined(TARGET_ALPHA)
690 /* We (partially) emulate OSF/1 on Alpha, which requires we
691 return a proper errno, not an unchanged brk value. */
692 return -TARGET_ENOMEM;
693 #endif
694 /* For everything else, return the previous break. */
695 return target_brk;
696 }
697
698 static inline abi_long copy_from_user_fdset(fd_set *fds,
699 abi_ulong target_fds_addr,
700 int n)
701 {
702 int i, nw, j, k;
703 abi_ulong b, *target_fds;
704
705 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
706 if (!(target_fds = lock_user(VERIFY_READ,
707 target_fds_addr,
708 sizeof(abi_ulong) * nw,
709 1)))
710 return -TARGET_EFAULT;
711
712 FD_ZERO(fds);
713 k = 0;
714 for (i = 0; i < nw; i++) {
715 /* grab the abi_ulong */
716 __get_user(b, &target_fds[i]);
717 for (j = 0; j < TARGET_ABI_BITS; j++) {
718 /* check the bit inside the abi_ulong */
719 if ((b >> j) & 1)
720 FD_SET(k, fds);
721 k++;
722 }
723 }
724
725 unlock_user(target_fds, target_fds_addr, 0);
726
727 return 0;
728 }
729
730 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
731 abi_ulong target_fds_addr,
732 int n)
733 {
734 if (target_fds_addr) {
735 if (copy_from_user_fdset(fds, target_fds_addr, n))
736 return -TARGET_EFAULT;
737 *fds_ptr = fds;
738 } else {
739 *fds_ptr = NULL;
740 }
741 return 0;
742 }
743
744 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
745 const fd_set *fds,
746 int n)
747 {
748 int i, nw, j, k;
749 abi_long v;
750 abi_ulong *target_fds;
751
752 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
753 if (!(target_fds = lock_user(VERIFY_WRITE,
754 target_fds_addr,
755 sizeof(abi_ulong) * nw,
756 0)))
757 return -TARGET_EFAULT;
758
759 k = 0;
760 for (i = 0; i < nw; i++) {
761 v = 0;
762 for (j = 0; j < TARGET_ABI_BITS; j++) {
763 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
764 k++;
765 }
766 __put_user(v, &target_fds[i]);
767 }
768
769 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
770
771 return 0;
772 }
773
774 #if defined(__alpha__)
775 #define HOST_HZ 1024
776 #else
777 #define HOST_HZ 100
778 #endif
779
780 static inline abi_long host_to_target_clock_t(long ticks)
781 {
782 #if HOST_HZ == TARGET_HZ
783 return ticks;
784 #else
785 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
786 #endif
787 }
788
789 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
790 const struct rusage *rusage)
791 {
792 struct target_rusage *target_rusage;
793
794 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
795 return -TARGET_EFAULT;
796 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
797 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
798 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
799 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
800 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
801 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
802 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
803 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
804 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
805 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
806 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
807 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
808 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
809 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
810 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
811 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
812 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
813 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
814 unlock_user_struct(target_rusage, target_addr, 1);
815
816 return 0;
817 }
818
819 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
820 {
821 abi_ulong target_rlim_swap;
822 rlim_t result;
823
824 target_rlim_swap = tswapal(target_rlim);
825 if (target_rlim_swap == TARGET_RLIM_INFINITY)
826 return RLIM_INFINITY;
827
828 result = target_rlim_swap;
829 if (target_rlim_swap != (rlim_t)result)
830 return RLIM_INFINITY;
831
832 return result;
833 }
834
835 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
836 {
837 abi_ulong target_rlim_swap;
838 abi_ulong result;
839
840 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
841 target_rlim_swap = TARGET_RLIM_INFINITY;
842 else
843 target_rlim_swap = rlim;
844 result = tswapal(target_rlim_swap);
845
846 return result;
847 }
848
849 static inline int target_to_host_resource(int code)
850 {
851 switch (code) {
852 case TARGET_RLIMIT_AS:
853 return RLIMIT_AS;
854 case TARGET_RLIMIT_CORE:
855 return RLIMIT_CORE;
856 case TARGET_RLIMIT_CPU:
857 return RLIMIT_CPU;
858 case TARGET_RLIMIT_DATA:
859 return RLIMIT_DATA;
860 case TARGET_RLIMIT_FSIZE:
861 return RLIMIT_FSIZE;
862 case TARGET_RLIMIT_LOCKS:
863 return RLIMIT_LOCKS;
864 case TARGET_RLIMIT_MEMLOCK:
865 return RLIMIT_MEMLOCK;
866 case TARGET_RLIMIT_MSGQUEUE:
867 return RLIMIT_MSGQUEUE;
868 case TARGET_RLIMIT_NICE:
869 return RLIMIT_NICE;
870 case TARGET_RLIMIT_NOFILE:
871 return RLIMIT_NOFILE;
872 case TARGET_RLIMIT_NPROC:
873 return RLIMIT_NPROC;
874 case TARGET_RLIMIT_RSS:
875 return RLIMIT_RSS;
876 case TARGET_RLIMIT_RTPRIO:
877 return RLIMIT_RTPRIO;
878 case TARGET_RLIMIT_SIGPENDING:
879 return RLIMIT_SIGPENDING;
880 case TARGET_RLIMIT_STACK:
881 return RLIMIT_STACK;
882 default:
883 return code;
884 }
885 }
886
887 static inline abi_long copy_from_user_timeval(struct timeval *tv,
888 abi_ulong target_tv_addr)
889 {
890 struct target_timeval *target_tv;
891
892 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
893 return -TARGET_EFAULT;
894
895 __get_user(tv->tv_sec, &target_tv->tv_sec);
896 __get_user(tv->tv_usec, &target_tv->tv_usec);
897
898 unlock_user_struct(target_tv, target_tv_addr, 0);
899
900 return 0;
901 }
902
903 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
904 const struct timeval *tv)
905 {
906 struct target_timeval *target_tv;
907
908 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
909 return -TARGET_EFAULT;
910
911 __put_user(tv->tv_sec, &target_tv->tv_sec);
912 __put_user(tv->tv_usec, &target_tv->tv_usec);
913
914 unlock_user_struct(target_tv, target_tv_addr, 1);
915
916 return 0;
917 }
918
919 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
920 #include <mqueue.h>
921
922 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
923 abi_ulong target_mq_attr_addr)
924 {
925 struct target_mq_attr *target_mq_attr;
926
927 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
928 target_mq_attr_addr, 1))
929 return -TARGET_EFAULT;
930
931 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
932 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
933 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
934 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
935
936 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
937
938 return 0;
939 }
940
941 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
942 const struct mq_attr *attr)
943 {
944 struct target_mq_attr *target_mq_attr;
945
946 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
947 target_mq_attr_addr, 0))
948 return -TARGET_EFAULT;
949
950 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
951 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
952 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
953 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
954
955 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
956
957 return 0;
958 }
959 #endif
960
961 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
962 /* do_select() must return target values and target errnos. */
963 static abi_long do_select(int n,
964 abi_ulong rfd_addr, abi_ulong wfd_addr,
965 abi_ulong efd_addr, abi_ulong target_tv_addr)
966 {
967 fd_set rfds, wfds, efds;
968 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
969 struct timeval tv, *tv_ptr;
970 abi_long ret;
971
972 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
973 if (ret) {
974 return ret;
975 }
976 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
977 if (ret) {
978 return ret;
979 }
980 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
981 if (ret) {
982 return ret;
983 }
984
985 if (target_tv_addr) {
986 if (copy_from_user_timeval(&tv, target_tv_addr))
987 return -TARGET_EFAULT;
988 tv_ptr = &tv;
989 } else {
990 tv_ptr = NULL;
991 }
992
993 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
994
995 if (!is_error(ret)) {
996 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
997 return -TARGET_EFAULT;
998 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
999 return -TARGET_EFAULT;
1000 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1001 return -TARGET_EFAULT;
1002
1003 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1004 return -TARGET_EFAULT;
1005 }
1006
1007 return ret;
1008 }
1009 #endif
1010
1011 static abi_long do_pipe2(int host_pipe[], int flags)
1012 {
1013 #ifdef CONFIG_PIPE2
1014 return pipe2(host_pipe, flags);
1015 #else
1016 return -ENOSYS;
1017 #endif
1018 }
1019
1020 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1021 int flags, int is_pipe2)
1022 {
1023 int host_pipe[2];
1024 abi_long ret;
1025 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1026
1027 if (is_error(ret))
1028 return get_errno(ret);
1029
1030 /* Several targets have special calling conventions for the original
1031 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1032 if (!is_pipe2) {
1033 #if defined(TARGET_ALPHA)
1034 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1035 return host_pipe[0];
1036 #elif defined(TARGET_MIPS)
1037 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1038 return host_pipe[0];
1039 #elif defined(TARGET_SH4)
1040 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1041 return host_pipe[0];
1042 #endif
1043 }
1044
1045 if (put_user_s32(host_pipe[0], pipedes)
1046 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1047 return -TARGET_EFAULT;
1048 return get_errno(ret);
1049 }
1050
1051 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1052 abi_ulong target_addr,
1053 socklen_t len)
1054 {
1055 struct target_ip_mreqn *target_smreqn;
1056
1057 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1058 if (!target_smreqn)
1059 return -TARGET_EFAULT;
1060 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1061 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1062 if (len == sizeof(struct target_ip_mreqn))
1063 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1064 unlock_user(target_smreqn, target_addr, 0);
1065
1066 return 0;
1067 }
1068
1069 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1070 abi_ulong target_addr,
1071 socklen_t len)
1072 {
1073 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1074 sa_family_t sa_family;
1075 struct target_sockaddr *target_saddr;
1076
1077 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1078 if (!target_saddr)
1079 return -TARGET_EFAULT;
1080
1081 sa_family = tswap16(target_saddr->sa_family);
1082
1083 /* Oops. The caller might send a incomplete sun_path; sun_path
1084 * must be terminated by \0 (see the manual page), but
1085 * unfortunately it is quite common to specify sockaddr_un
1086 * length as "strlen(x->sun_path)" while it should be
1087 * "strlen(...) + 1". We'll fix that here if needed.
1088 * Linux kernel has a similar feature.
1089 */
1090
1091 if (sa_family == AF_UNIX) {
1092 if (len < unix_maxlen && len > 0) {
1093 char *cp = (char*)target_saddr;
1094
1095 if ( cp[len-1] && !cp[len] )
1096 len++;
1097 }
1098 if (len > unix_maxlen)
1099 len = unix_maxlen;
1100 }
1101
1102 memcpy(addr, target_saddr, len);
1103 addr->sa_family = sa_family;
1104 unlock_user(target_saddr, target_addr, 0);
1105
1106 return 0;
1107 }
1108
1109 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1110 struct sockaddr *addr,
1111 socklen_t len)
1112 {
1113 struct target_sockaddr *target_saddr;
1114
1115 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1116 if (!target_saddr)
1117 return -TARGET_EFAULT;
1118 memcpy(target_saddr, addr, len);
1119 target_saddr->sa_family = tswap16(addr->sa_family);
1120 unlock_user(target_saddr, target_addr, len);
1121
1122 return 0;
1123 }
1124
1125 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1126 struct target_msghdr *target_msgh)
1127 {
1128 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1129 abi_long msg_controllen;
1130 abi_ulong target_cmsg_addr;
1131 struct target_cmsghdr *target_cmsg;
1132 socklen_t space = 0;
1133
1134 msg_controllen = tswapal(target_msgh->msg_controllen);
1135 if (msg_controllen < sizeof (struct target_cmsghdr))
1136 goto the_end;
1137 target_cmsg_addr = tswapal(target_msgh->msg_control);
1138 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1139 if (!target_cmsg)
1140 return -TARGET_EFAULT;
1141
1142 while (cmsg && target_cmsg) {
1143 void *data = CMSG_DATA(cmsg);
1144 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1145
1146 int len = tswapal(target_cmsg->cmsg_len)
1147 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1148
1149 space += CMSG_SPACE(len);
1150 if (space > msgh->msg_controllen) {
1151 space -= CMSG_SPACE(len);
1152 gemu_log("Host cmsg overflow\n");
1153 break;
1154 }
1155
1156 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1157 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1158 cmsg->cmsg_len = CMSG_LEN(len);
1159
1160 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1161 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1162 memcpy(data, target_data, len);
1163 } else {
1164 int *fd = (int *)data;
1165 int *target_fd = (int *)target_data;
1166 int i, numfds = len / sizeof(int);
1167
1168 for (i = 0; i < numfds; i++)
1169 fd[i] = tswap32(target_fd[i]);
1170 }
1171
1172 cmsg = CMSG_NXTHDR(msgh, cmsg);
1173 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1174 }
1175 unlock_user(target_cmsg, target_cmsg_addr, 0);
1176 the_end:
1177 msgh->msg_controllen = space;
1178 return 0;
1179 }
1180
1181 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1182 struct msghdr *msgh)
1183 {
1184 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1185 abi_long msg_controllen;
1186 abi_ulong target_cmsg_addr;
1187 struct target_cmsghdr *target_cmsg;
1188 socklen_t space = 0;
1189
1190 msg_controllen = tswapal(target_msgh->msg_controllen);
1191 if (msg_controllen < sizeof (struct target_cmsghdr))
1192 goto the_end;
1193 target_cmsg_addr = tswapal(target_msgh->msg_control);
1194 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1195 if (!target_cmsg)
1196 return -TARGET_EFAULT;
1197
1198 while (cmsg && target_cmsg) {
1199 void *data = CMSG_DATA(cmsg);
1200 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1201
1202 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1203
1204 space += TARGET_CMSG_SPACE(len);
1205 if (space > msg_controllen) {
1206 space -= TARGET_CMSG_SPACE(len);
1207 gemu_log("Target cmsg overflow\n");
1208 break;
1209 }
1210
1211 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1212 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1213 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1214
1215 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1216 (cmsg->cmsg_type == SCM_RIGHTS)) {
1217 int *fd = (int *)data;
1218 int *target_fd = (int *)target_data;
1219 int i, numfds = len / sizeof(int);
1220
1221 for (i = 0; i < numfds; i++)
1222 target_fd[i] = tswap32(fd[i]);
1223 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1224 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1225 (len == sizeof(struct timeval))) {
1226 /* copy struct timeval to target */
1227 struct timeval *tv = (struct timeval *)data;
1228 struct target_timeval *target_tv =
1229 (struct target_timeval *)target_data;
1230
1231 target_tv->tv_sec = tswapal(tv->tv_sec);
1232 target_tv->tv_usec = tswapal(tv->tv_usec);
1233 } else {
1234 gemu_log("Unsupported ancillary data: %d/%d\n",
1235 cmsg->cmsg_level, cmsg->cmsg_type);
1236 memcpy(target_data, data, len);
1237 }
1238
1239 cmsg = CMSG_NXTHDR(msgh, cmsg);
1240 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1241 }
1242 unlock_user(target_cmsg, target_cmsg_addr, space);
1243 the_end:
1244 target_msgh->msg_controllen = tswapal(space);
1245 return 0;
1246 }
1247
1248 /* do_setsockopt() Must return target values and target errnos. */
1249 static abi_long do_setsockopt(int sockfd, int level, int optname,
1250 abi_ulong optval_addr, socklen_t optlen)
1251 {
1252 abi_long ret;
1253 int val;
1254 struct ip_mreqn *ip_mreq;
1255 struct ip_mreq_source *ip_mreq_source;
1256
1257 switch(level) {
1258 case SOL_TCP:
1259 /* TCP options all take an 'int' value. */
1260 if (optlen < sizeof(uint32_t))
1261 return -TARGET_EINVAL;
1262
1263 if (get_user_u32(val, optval_addr))
1264 return -TARGET_EFAULT;
1265 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1266 break;
1267 case SOL_IP:
1268 switch(optname) {
1269 case IP_TOS:
1270 case IP_TTL:
1271 case IP_HDRINCL:
1272 case IP_ROUTER_ALERT:
1273 case IP_RECVOPTS:
1274 case IP_RETOPTS:
1275 case IP_PKTINFO:
1276 case IP_MTU_DISCOVER:
1277 case IP_RECVERR:
1278 case IP_RECVTOS:
1279 #ifdef IP_FREEBIND
1280 case IP_FREEBIND:
1281 #endif
1282 case IP_MULTICAST_TTL:
1283 case IP_MULTICAST_LOOP:
1284 val = 0;
1285 if (optlen >= sizeof(uint32_t)) {
1286 if (get_user_u32(val, optval_addr))
1287 return -TARGET_EFAULT;
1288 } else if (optlen >= 1) {
1289 if (get_user_u8(val, optval_addr))
1290 return -TARGET_EFAULT;
1291 }
1292 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1293 break;
1294 case IP_ADD_MEMBERSHIP:
1295 case IP_DROP_MEMBERSHIP:
1296 if (optlen < sizeof (struct target_ip_mreq) ||
1297 optlen > sizeof (struct target_ip_mreqn))
1298 return -TARGET_EINVAL;
1299
1300 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1301 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1302 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1303 break;
1304
1305 case IP_BLOCK_SOURCE:
1306 case IP_UNBLOCK_SOURCE:
1307 case IP_ADD_SOURCE_MEMBERSHIP:
1308 case IP_DROP_SOURCE_MEMBERSHIP:
1309 if (optlen != sizeof (struct target_ip_mreq_source))
1310 return -TARGET_EINVAL;
1311
1312 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1313 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1314 unlock_user (ip_mreq_source, optval_addr, 0);
1315 break;
1316
1317 default:
1318 goto unimplemented;
1319 }
1320 break;
1321 case SOL_RAW:
1322 switch (optname) {
1323 case ICMP_FILTER:
1324 /* struct icmp_filter takes an u32 value */
1325 if (optlen < sizeof(uint32_t)) {
1326 return -TARGET_EINVAL;
1327 }
1328
1329 if (get_user_u32(val, optval_addr)) {
1330 return -TARGET_EFAULT;
1331 }
1332 ret = get_errno(setsockopt(sockfd, level, optname,
1333 &val, sizeof(val)));
1334 break;
1335
1336 default:
1337 goto unimplemented;
1338 }
1339 break;
1340 case TARGET_SOL_SOCKET:
1341 switch (optname) {
1342 case TARGET_SO_RCVTIMEO:
1343 {
1344 struct timeval tv;
1345
1346 optname = SO_RCVTIMEO;
1347
1348 set_timeout:
1349 if (optlen != sizeof(struct target_timeval)) {
1350 return -TARGET_EINVAL;
1351 }
1352
1353 if (copy_from_user_timeval(&tv, optval_addr)) {
1354 return -TARGET_EFAULT;
1355 }
1356
1357 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1358 &tv, sizeof(tv)));
1359 return ret;
1360 }
1361 case TARGET_SO_SNDTIMEO:
1362 optname = SO_SNDTIMEO;
1363 goto set_timeout;
1364 /* Options with 'int' argument. */
1365 case TARGET_SO_DEBUG:
1366 optname = SO_DEBUG;
1367 break;
1368 case TARGET_SO_REUSEADDR:
1369 optname = SO_REUSEADDR;
1370 break;
1371 case TARGET_SO_TYPE:
1372 optname = SO_TYPE;
1373 break;
1374 case TARGET_SO_ERROR:
1375 optname = SO_ERROR;
1376 break;
1377 case TARGET_SO_DONTROUTE:
1378 optname = SO_DONTROUTE;
1379 break;
1380 case TARGET_SO_BROADCAST:
1381 optname = SO_BROADCAST;
1382 break;
1383 case TARGET_SO_SNDBUF:
1384 optname = SO_SNDBUF;
1385 break;
1386 case TARGET_SO_RCVBUF:
1387 optname = SO_RCVBUF;
1388 break;
1389 case TARGET_SO_KEEPALIVE:
1390 optname = SO_KEEPALIVE;
1391 break;
1392 case TARGET_SO_OOBINLINE:
1393 optname = SO_OOBINLINE;
1394 break;
1395 case TARGET_SO_NO_CHECK:
1396 optname = SO_NO_CHECK;
1397 break;
1398 case TARGET_SO_PRIORITY:
1399 optname = SO_PRIORITY;
1400 break;
1401 #ifdef SO_BSDCOMPAT
1402 case TARGET_SO_BSDCOMPAT:
1403 optname = SO_BSDCOMPAT;
1404 break;
1405 #endif
1406 case TARGET_SO_PASSCRED:
1407 optname = SO_PASSCRED;
1408 break;
1409 case TARGET_SO_TIMESTAMP:
1410 optname = SO_TIMESTAMP;
1411 break;
1412 case TARGET_SO_RCVLOWAT:
1413 optname = SO_RCVLOWAT;
1414 break;
1415 break;
1416 default:
1417 goto unimplemented;
1418 }
1419 if (optlen < sizeof(uint32_t))
1420 return -TARGET_EINVAL;
1421
1422 if (get_user_u32(val, optval_addr))
1423 return -TARGET_EFAULT;
1424 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1425 break;
1426 default:
1427 unimplemented:
1428 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1429 ret = -TARGET_ENOPROTOOPT;
1430 }
1431 return ret;
1432 }
1433
1434 /* do_getsockopt() Must return target values and target errnos. */
1435 static abi_long do_getsockopt(int sockfd, int level, int optname,
1436 abi_ulong optval_addr, abi_ulong optlen)
1437 {
1438 abi_long ret;
1439 int len, val;
1440 socklen_t lv;
1441
1442 switch(level) {
1443 case TARGET_SOL_SOCKET:
1444 level = SOL_SOCKET;
1445 switch (optname) {
1446 /* These don't just return a single integer */
1447 case TARGET_SO_LINGER:
1448 case TARGET_SO_RCVTIMEO:
1449 case TARGET_SO_SNDTIMEO:
1450 case TARGET_SO_PEERNAME:
1451 goto unimplemented;
1452 case TARGET_SO_PEERCRED: {
1453 struct ucred cr;
1454 socklen_t crlen;
1455 struct target_ucred *tcr;
1456
1457 if (get_user_u32(len, optlen)) {
1458 return -TARGET_EFAULT;
1459 }
1460 if (len < 0) {
1461 return -TARGET_EINVAL;
1462 }
1463
1464 crlen = sizeof(cr);
1465 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1466 &cr, &crlen));
1467 if (ret < 0) {
1468 return ret;
1469 }
1470 if (len > crlen) {
1471 len = crlen;
1472 }
1473 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1474 return -TARGET_EFAULT;
1475 }
1476 __put_user(cr.pid, &tcr->pid);
1477 __put_user(cr.uid, &tcr->uid);
1478 __put_user(cr.gid, &tcr->gid);
1479 unlock_user_struct(tcr, optval_addr, 1);
1480 if (put_user_u32(len, optlen)) {
1481 return -TARGET_EFAULT;
1482 }
1483 break;
1484 }
1485 /* Options with 'int' argument. */
1486 case TARGET_SO_DEBUG:
1487 optname = SO_DEBUG;
1488 goto int_case;
1489 case TARGET_SO_REUSEADDR:
1490 optname = SO_REUSEADDR;
1491 goto int_case;
1492 case TARGET_SO_TYPE:
1493 optname = SO_TYPE;
1494 goto int_case;
1495 case TARGET_SO_ERROR:
1496 optname = SO_ERROR;
1497 goto int_case;
1498 case TARGET_SO_DONTROUTE:
1499 optname = SO_DONTROUTE;
1500 goto int_case;
1501 case TARGET_SO_BROADCAST:
1502 optname = SO_BROADCAST;
1503 goto int_case;
1504 case TARGET_SO_SNDBUF:
1505 optname = SO_SNDBUF;
1506 goto int_case;
1507 case TARGET_SO_RCVBUF:
1508 optname = SO_RCVBUF;
1509 goto int_case;
1510 case TARGET_SO_KEEPALIVE:
1511 optname = SO_KEEPALIVE;
1512 goto int_case;
1513 case TARGET_SO_OOBINLINE:
1514 optname = SO_OOBINLINE;
1515 goto int_case;
1516 case TARGET_SO_NO_CHECK:
1517 optname = SO_NO_CHECK;
1518 goto int_case;
1519 case TARGET_SO_PRIORITY:
1520 optname = SO_PRIORITY;
1521 goto int_case;
1522 #ifdef SO_BSDCOMPAT
1523 case TARGET_SO_BSDCOMPAT:
1524 optname = SO_BSDCOMPAT;
1525 goto int_case;
1526 #endif
1527 case TARGET_SO_PASSCRED:
1528 optname = SO_PASSCRED;
1529 goto int_case;
1530 case TARGET_SO_TIMESTAMP:
1531 optname = SO_TIMESTAMP;
1532 goto int_case;
1533 case TARGET_SO_RCVLOWAT:
1534 optname = SO_RCVLOWAT;
1535 goto int_case;
1536 default:
1537 goto int_case;
1538 }
1539 break;
1540 case SOL_TCP:
1541 /* TCP options all take an 'int' value. */
1542 int_case:
1543 if (get_user_u32(len, optlen))
1544 return -TARGET_EFAULT;
1545 if (len < 0)
1546 return -TARGET_EINVAL;
1547 lv = sizeof(lv);
1548 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1549 if (ret < 0)
1550 return ret;
1551 if (len > lv)
1552 len = lv;
1553 if (len == 4) {
1554 if (put_user_u32(val, optval_addr))
1555 return -TARGET_EFAULT;
1556 } else {
1557 if (put_user_u8(val, optval_addr))
1558 return -TARGET_EFAULT;
1559 }
1560 if (put_user_u32(len, optlen))
1561 return -TARGET_EFAULT;
1562 break;
1563 case SOL_IP:
1564 switch(optname) {
1565 case IP_TOS:
1566 case IP_TTL:
1567 case IP_HDRINCL:
1568 case IP_ROUTER_ALERT:
1569 case IP_RECVOPTS:
1570 case IP_RETOPTS:
1571 case IP_PKTINFO:
1572 case IP_MTU_DISCOVER:
1573 case IP_RECVERR:
1574 case IP_RECVTOS:
1575 #ifdef IP_FREEBIND
1576 case IP_FREEBIND:
1577 #endif
1578 case IP_MULTICAST_TTL:
1579 case IP_MULTICAST_LOOP:
1580 if (get_user_u32(len, optlen))
1581 return -TARGET_EFAULT;
1582 if (len < 0)
1583 return -TARGET_EINVAL;
1584 lv = sizeof(lv);
1585 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1586 if (ret < 0)
1587 return ret;
1588 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1589 len = 1;
1590 if (put_user_u32(len, optlen)
1591 || put_user_u8(val, optval_addr))
1592 return -TARGET_EFAULT;
1593 } else {
1594 if (len > sizeof(int))
1595 len = sizeof(int);
1596 if (put_user_u32(len, optlen)
1597 || put_user_u32(val, optval_addr))
1598 return -TARGET_EFAULT;
1599 }
1600 break;
1601 default:
1602 ret = -TARGET_ENOPROTOOPT;
1603 break;
1604 }
1605 break;
1606 default:
1607 unimplemented:
1608 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1609 level, optname);
1610 ret = -TARGET_EOPNOTSUPP;
1611 break;
1612 }
1613 return ret;
1614 }
1615
1616 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1617 int count, int copy)
1618 {
1619 struct target_iovec *target_vec;
1620 struct iovec *vec;
1621 abi_ulong total_len, max_len;
1622 int i;
1623
1624 if (count == 0) {
1625 errno = 0;
1626 return NULL;
1627 }
1628 if (count < 0 || count > IOV_MAX) {
1629 errno = EINVAL;
1630 return NULL;
1631 }
1632
1633 vec = calloc(count, sizeof(struct iovec));
1634 if (vec == NULL) {
1635 errno = ENOMEM;
1636 return NULL;
1637 }
1638
1639 target_vec = lock_user(VERIFY_READ, target_addr,
1640 count * sizeof(struct target_iovec), 1);
1641 if (target_vec == NULL) {
1642 errno = EFAULT;
1643 goto fail2;
1644 }
1645
1646 /* ??? If host page size > target page size, this will result in a
1647 value larger than what we can actually support. */
1648 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1649 total_len = 0;
1650
1651 for (i = 0; i < count; i++) {
1652 abi_ulong base = tswapal(target_vec[i].iov_base);
1653 abi_long len = tswapal(target_vec[i].iov_len);
1654
1655 if (len < 0) {
1656 errno = EINVAL;
1657 goto fail;
1658 } else if (len == 0) {
1659 /* Zero length pointer is ignored. */
1660 vec[i].iov_base = 0;
1661 } else {
1662 vec[i].iov_base = lock_user(type, base, len, copy);
1663 if (!vec[i].iov_base) {
1664 errno = EFAULT;
1665 goto fail;
1666 }
1667 if (len > max_len - total_len) {
1668 len = max_len - total_len;
1669 }
1670 }
1671 vec[i].iov_len = len;
1672 total_len += len;
1673 }
1674
1675 unlock_user(target_vec, target_addr, 0);
1676 return vec;
1677
1678 fail:
1679 free(vec);
1680 fail2:
1681 unlock_user(target_vec, target_addr, 0);
1682 return NULL;
1683 }
1684
1685 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1686 int count, int copy)
1687 {
1688 struct target_iovec *target_vec;
1689 int i;
1690
1691 target_vec = lock_user(VERIFY_READ, target_addr,
1692 count * sizeof(struct target_iovec), 1);
1693 if (target_vec) {
1694 for (i = 0; i < count; i++) {
1695 abi_ulong base = tswapal(target_vec[i].iov_base);
1696 abi_long len = tswapal(target_vec[i].iov_base);
1697 if (len < 0) {
1698 break;
1699 }
1700 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1701 }
1702 unlock_user(target_vec, target_addr, 0);
1703 }
1704
1705 free(vec);
1706 }
1707
1708 static inline void target_to_host_sock_type(int *type)
1709 {
1710 int host_type = 0;
1711 int target_type = *type;
1712
1713 switch (target_type & TARGET_SOCK_TYPE_MASK) {
1714 case TARGET_SOCK_DGRAM:
1715 host_type = SOCK_DGRAM;
1716 break;
1717 case TARGET_SOCK_STREAM:
1718 host_type = SOCK_STREAM;
1719 break;
1720 default:
1721 host_type = target_type & TARGET_SOCK_TYPE_MASK;
1722 break;
1723 }
1724 if (target_type & TARGET_SOCK_CLOEXEC) {
1725 host_type |= SOCK_CLOEXEC;
1726 }
1727 if (target_type & TARGET_SOCK_NONBLOCK) {
1728 host_type |= SOCK_NONBLOCK;
1729 }
1730 *type = host_type;
1731 }
1732
1733 /* do_socket() Must return target values and target errnos. */
1734 static abi_long do_socket(int domain, int type, int protocol)
1735 {
1736 target_to_host_sock_type(&type);
1737
1738 if (domain == PF_NETLINK)
1739 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1740 return get_errno(socket(domain, type, protocol));
1741 }
1742
1743 /* do_bind() Must return target values and target errnos. */
1744 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1745 socklen_t addrlen)
1746 {
1747 void *addr;
1748 abi_long ret;
1749
1750 if ((int)addrlen < 0) {
1751 return -TARGET_EINVAL;
1752 }
1753
1754 addr = alloca(addrlen+1);
1755
1756 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1757 if (ret)
1758 return ret;
1759
1760 return get_errno(bind(sockfd, addr, addrlen));
1761 }
1762
1763 /* do_connect() Must return target values and target errnos. */
1764 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1765 socklen_t addrlen)
1766 {
1767 void *addr;
1768 abi_long ret;
1769
1770 if ((int)addrlen < 0) {
1771 return -TARGET_EINVAL;
1772 }
1773
1774 addr = alloca(addrlen);
1775
1776 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1777 if (ret)
1778 return ret;
1779
1780 return get_errno(connect(sockfd, addr, addrlen));
1781 }
1782
1783 /* do_sendrecvmsg() Must return target values and target errnos. */
1784 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1785 int flags, int send)
1786 {
1787 abi_long ret, len;
1788 struct target_msghdr *msgp;
1789 struct msghdr msg;
1790 int count;
1791 struct iovec *vec;
1792 abi_ulong target_vec;
1793
1794 /* FIXME */
1795 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1796 msgp,
1797 target_msg,
1798 send ? 1 : 0))
1799 return -TARGET_EFAULT;
1800 if (msgp->msg_name) {
1801 msg.msg_namelen = tswap32(msgp->msg_namelen);
1802 msg.msg_name = alloca(msg.msg_namelen);
1803 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1804 msg.msg_namelen);
1805 if (ret) {
1806 goto out2;
1807 }
1808 } else {
1809 msg.msg_name = NULL;
1810 msg.msg_namelen = 0;
1811 }
1812 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1813 msg.msg_control = alloca(msg.msg_controllen);
1814 msg.msg_flags = tswap32(msgp->msg_flags);
1815
1816 count = tswapal(msgp->msg_iovlen);
1817 target_vec = tswapal(msgp->msg_iov);
1818 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1819 target_vec, count, send);
1820 if (vec == NULL) {
1821 ret = -host_to_target_errno(errno);
1822 goto out2;
1823 }
1824 msg.msg_iovlen = count;
1825 msg.msg_iov = vec;
1826
1827 if (send) {
1828 ret = target_to_host_cmsg(&msg, msgp);
1829 if (ret == 0)
1830 ret = get_errno(sendmsg(fd, &msg, flags));
1831 } else {
1832 ret = get_errno(recvmsg(fd, &msg, flags));
1833 if (!is_error(ret)) {
1834 len = ret;
1835 ret = host_to_target_cmsg(msgp, &msg);
1836 if (!is_error(ret)) {
1837 msgp->msg_namelen = tswap32(msg.msg_namelen);
1838 if (msg.msg_name != NULL) {
1839 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1840 msg.msg_name, msg.msg_namelen);
1841 if (ret) {
1842 goto out;
1843 }
1844 }
1845
1846 ret = len;
1847 }
1848 }
1849 }
1850
1851 out:
1852 unlock_iovec(vec, target_vec, count, !send);
1853 out2:
1854 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1855 return ret;
1856 }
1857
1858 /* If we don't have a system accept4() then just call accept.
1859 * The callsites to do_accept4() will ensure that they don't
1860 * pass a non-zero flags argument in this config.
1861 */
1862 #ifndef CONFIG_ACCEPT4
1863 static inline int accept4(int sockfd, struct sockaddr *addr,
1864 socklen_t *addrlen, int flags)
1865 {
1866 assert(flags == 0);
1867 return accept(sockfd, addr, addrlen);
1868 }
1869 #endif
1870
1871 /* do_accept4() Must return target values and target errnos. */
1872 static abi_long do_accept4(int fd, abi_ulong target_addr,
1873 abi_ulong target_addrlen_addr, int flags)
1874 {
1875 socklen_t addrlen;
1876 void *addr;
1877 abi_long ret;
1878
1879 if (target_addr == 0) {
1880 return get_errno(accept4(fd, NULL, NULL, flags));
1881 }
1882
1883 /* linux returns EINVAL if addrlen pointer is invalid */
1884 if (get_user_u32(addrlen, target_addrlen_addr))
1885 return -TARGET_EINVAL;
1886
1887 if ((int)addrlen < 0) {
1888 return -TARGET_EINVAL;
1889 }
1890
1891 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1892 return -TARGET_EINVAL;
1893
1894 addr = alloca(addrlen);
1895
1896 ret = get_errno(accept4(fd, addr, &addrlen, flags));
1897 if (!is_error(ret)) {
1898 host_to_target_sockaddr(target_addr, addr, addrlen);
1899 if (put_user_u32(addrlen, target_addrlen_addr))
1900 ret = -TARGET_EFAULT;
1901 }
1902 return ret;
1903 }
1904
1905 /* do_getpeername() Must return target values and target errnos. */
1906 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1907 abi_ulong target_addrlen_addr)
1908 {
1909 socklen_t addrlen;
1910 void *addr;
1911 abi_long ret;
1912
1913 if (get_user_u32(addrlen, target_addrlen_addr))
1914 return -TARGET_EFAULT;
1915
1916 if ((int)addrlen < 0) {
1917 return -TARGET_EINVAL;
1918 }
1919
1920 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1921 return -TARGET_EFAULT;
1922
1923 addr = alloca(addrlen);
1924
1925 ret = get_errno(getpeername(fd, addr, &addrlen));
1926 if (!is_error(ret)) {
1927 host_to_target_sockaddr(target_addr, addr, addrlen);
1928 if (put_user_u32(addrlen, target_addrlen_addr))
1929 ret = -TARGET_EFAULT;
1930 }
1931 return ret;
1932 }
1933
1934 /* do_getsockname() Must return target values and target errnos. */
1935 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1936 abi_ulong target_addrlen_addr)
1937 {
1938 socklen_t addrlen;
1939 void *addr;
1940 abi_long ret;
1941
1942 if (get_user_u32(addrlen, target_addrlen_addr))
1943 return -TARGET_EFAULT;
1944
1945 if ((int)addrlen < 0) {
1946 return -TARGET_EINVAL;
1947 }
1948
1949 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1950 return -TARGET_EFAULT;
1951
1952 addr = alloca(addrlen);
1953
1954 ret = get_errno(getsockname(fd, addr, &addrlen));
1955 if (!is_error(ret)) {
1956 host_to_target_sockaddr(target_addr, addr, addrlen);
1957 if (put_user_u32(addrlen, target_addrlen_addr))
1958 ret = -TARGET_EFAULT;
1959 }
1960 return ret;
1961 }
1962
1963 /* do_socketpair() Must return target values and target errnos. */
1964 static abi_long do_socketpair(int domain, int type, int protocol,
1965 abi_ulong target_tab_addr)
1966 {
1967 int tab[2];
1968 abi_long ret;
1969
1970 target_to_host_sock_type(&type);
1971
1972 ret = get_errno(socketpair(domain, type, protocol, tab));
1973 if (!is_error(ret)) {
1974 if (put_user_s32(tab[0], target_tab_addr)
1975 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1976 ret = -TARGET_EFAULT;
1977 }
1978 return ret;
1979 }
1980
1981 /* do_sendto() Must return target values and target errnos. */
1982 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1983 abi_ulong target_addr, socklen_t addrlen)
1984 {
1985 void *addr;
1986 void *host_msg;
1987 abi_long ret;
1988
1989 if ((int)addrlen < 0) {
1990 return -TARGET_EINVAL;
1991 }
1992
1993 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1994 if (!host_msg)
1995 return -TARGET_EFAULT;
1996 if (target_addr) {
1997 addr = alloca(addrlen);
1998 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1999 if (ret) {
2000 unlock_user(host_msg, msg, 0);
2001 return ret;
2002 }
2003 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2004 } else {
2005 ret = get_errno(send(fd, host_msg, len, flags));
2006 }
2007 unlock_user(host_msg, msg, 0);
2008 return ret;
2009 }
2010
2011 /* do_recvfrom() Must return target values and target errnos. */
2012 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2013 abi_ulong target_addr,
2014 abi_ulong target_addrlen)
2015 {
2016 socklen_t addrlen;
2017 void *addr;
2018 void *host_msg;
2019 abi_long ret;
2020
2021 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2022 if (!host_msg)
2023 return -TARGET_EFAULT;
2024 if (target_addr) {
2025 if (get_user_u32(addrlen, target_addrlen)) {
2026 ret = -TARGET_EFAULT;
2027 goto fail;
2028 }
2029 if ((int)addrlen < 0) {
2030 ret = -TARGET_EINVAL;
2031 goto fail;
2032 }
2033 addr = alloca(addrlen);
2034 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2035 } else {
2036 addr = NULL; /* To keep compiler quiet. */
2037 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2038 }
2039 if (!is_error(ret)) {
2040 if (target_addr) {
2041 host_to_target_sockaddr(target_addr, addr, addrlen);
2042 if (put_user_u32(addrlen, target_addrlen)) {
2043 ret = -TARGET_EFAULT;
2044 goto fail;
2045 }
2046 }
2047 unlock_user(host_msg, msg, len);
2048 } else {
2049 fail:
2050 unlock_user(host_msg, msg, 0);
2051 }
2052 return ret;
2053 }
2054
2055 #ifdef TARGET_NR_socketcall
2056 /* do_socketcall() Must return target values and target errnos. */
2057 static abi_long do_socketcall(int num, abi_ulong vptr)
2058 {
2059 abi_long ret;
2060 const int n = sizeof(abi_ulong);
2061
2062 switch(num) {
2063 case SOCKOP_socket:
2064 {
2065 abi_ulong domain, type, protocol;
2066
2067 if (get_user_ual(domain, vptr)
2068 || get_user_ual(type, vptr + n)
2069 || get_user_ual(protocol, vptr + 2 * n))
2070 return -TARGET_EFAULT;
2071
2072 ret = do_socket(domain, type, protocol);
2073 }
2074 break;
2075 case SOCKOP_bind:
2076 {
2077 abi_ulong sockfd;
2078 abi_ulong target_addr;
2079 socklen_t addrlen;
2080
2081 if (get_user_ual(sockfd, vptr)
2082 || get_user_ual(target_addr, vptr + n)
2083 || get_user_ual(addrlen, vptr + 2 * n))
2084 return -TARGET_EFAULT;
2085
2086 ret = do_bind(sockfd, target_addr, addrlen);
2087 }
2088 break;
2089 case SOCKOP_connect:
2090 {
2091 abi_ulong sockfd;
2092 abi_ulong target_addr;
2093 socklen_t addrlen;
2094
2095 if (get_user_ual(sockfd, vptr)
2096 || get_user_ual(target_addr, vptr + n)
2097 || get_user_ual(addrlen, vptr + 2 * n))
2098 return -TARGET_EFAULT;
2099
2100 ret = do_connect(sockfd, target_addr, addrlen);
2101 }
2102 break;
2103 case SOCKOP_listen:
2104 {
2105 abi_ulong sockfd, backlog;
2106
2107 if (get_user_ual(sockfd, vptr)
2108 || get_user_ual(backlog, vptr + n))
2109 return -TARGET_EFAULT;
2110
2111 ret = get_errno(listen(sockfd, backlog));
2112 }
2113 break;
2114 case SOCKOP_accept:
2115 {
2116 abi_ulong sockfd;
2117 abi_ulong target_addr, target_addrlen;
2118
2119 if (get_user_ual(sockfd, vptr)
2120 || get_user_ual(target_addr, vptr + n)
2121 || get_user_ual(target_addrlen, vptr + 2 * n))
2122 return -TARGET_EFAULT;
2123
2124 ret = do_accept4(sockfd, target_addr, target_addrlen, 0);
2125 }
2126 break;
2127 case SOCKOP_getsockname:
2128 {
2129 abi_ulong sockfd;
2130 abi_ulong target_addr, target_addrlen;
2131
2132 if (get_user_ual(sockfd, vptr)
2133 || get_user_ual(target_addr, vptr + n)
2134 || get_user_ual(target_addrlen, vptr + 2 * n))
2135 return -TARGET_EFAULT;
2136
2137 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2138 }
2139 break;
2140 case SOCKOP_getpeername:
2141 {
2142 abi_ulong sockfd;
2143 abi_ulong target_addr, target_addrlen;
2144
2145 if (get_user_ual(sockfd, vptr)
2146 || get_user_ual(target_addr, vptr + n)
2147 || get_user_ual(target_addrlen, vptr + 2 * n))
2148 return -TARGET_EFAULT;
2149
2150 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2151 }
2152 break;
2153 case SOCKOP_socketpair:
2154 {
2155 abi_ulong domain, type, protocol;
2156 abi_ulong tab;
2157
2158 if (get_user_ual(domain, vptr)
2159 || get_user_ual(type, vptr + n)
2160 || get_user_ual(protocol, vptr + 2 * n)
2161 || get_user_ual(tab, vptr + 3 * n))
2162 return -TARGET_EFAULT;
2163
2164 ret = do_socketpair(domain, type, protocol, tab);
2165 }
2166 break;
2167 case SOCKOP_send:
2168 {
2169 abi_ulong sockfd;
2170 abi_ulong msg;
2171 size_t len;
2172 abi_ulong flags;
2173
2174 if (get_user_ual(sockfd, vptr)
2175 || get_user_ual(msg, vptr + n)
2176 || get_user_ual(len, vptr + 2 * n)
2177 || get_user_ual(flags, vptr + 3 * n))
2178 return -TARGET_EFAULT;
2179
2180 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2181 }
2182 break;
2183 case SOCKOP_recv:
2184 {
2185 abi_ulong sockfd;
2186 abi_ulong msg;
2187 size_t len;
2188 abi_ulong flags;
2189
2190 if (get_user_ual(sockfd, vptr)
2191 || get_user_ual(msg, vptr + n)
2192 || get_user_ual(len, vptr + 2 * n)
2193 || get_user_ual(flags, vptr + 3 * n))
2194 return -TARGET_EFAULT;
2195
2196 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2197 }
2198 break;
2199 case SOCKOP_sendto:
2200 {
2201 abi_ulong sockfd;
2202 abi_ulong msg;
2203 size_t len;
2204 abi_ulong flags;
2205 abi_ulong addr;
2206 socklen_t addrlen;
2207
2208 if (get_user_ual(sockfd, vptr)
2209 || get_user_ual(msg, vptr + n)
2210 || get_user_ual(len, vptr + 2 * n)
2211 || get_user_ual(flags, vptr + 3 * n)
2212 || get_user_ual(addr, vptr + 4 * n)
2213 || get_user_ual(addrlen, vptr + 5 * n))
2214 return -TARGET_EFAULT;
2215
2216 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2217 }
2218 break;
2219 case SOCKOP_recvfrom:
2220 {
2221 abi_ulong sockfd;
2222 abi_ulong msg;
2223 size_t len;
2224 abi_ulong flags;
2225 abi_ulong addr;
2226 socklen_t addrlen;
2227
2228 if (get_user_ual(sockfd, vptr)
2229 || get_user_ual(msg, vptr + n)
2230 || get_user_ual(len, vptr + 2 * n)
2231 || get_user_ual(flags, vptr + 3 * n)
2232 || get_user_ual(addr, vptr + 4 * n)
2233 || get_user_ual(addrlen, vptr + 5 * n))
2234 return -TARGET_EFAULT;
2235
2236 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2237 }
2238 break;
2239 case SOCKOP_shutdown:
2240 {
2241 abi_ulong sockfd, how;
2242
2243 if (get_user_ual(sockfd, vptr)
2244 || get_user_ual(how, vptr + n))
2245 return -TARGET_EFAULT;
2246
2247 ret = get_errno(shutdown(sockfd, how));
2248 }
2249 break;
2250 case SOCKOP_sendmsg:
2251 case SOCKOP_recvmsg:
2252 {
2253 abi_ulong fd;
2254 abi_ulong target_msg;
2255 abi_ulong flags;
2256
2257 if (get_user_ual(fd, vptr)
2258 || get_user_ual(target_msg, vptr + n)
2259 || get_user_ual(flags, vptr + 2 * n))
2260 return -TARGET_EFAULT;
2261
2262 ret = do_sendrecvmsg(fd, target_msg, flags,
2263 (num == SOCKOP_sendmsg));
2264 }
2265 break;
2266 case SOCKOP_setsockopt:
2267 {
2268 abi_ulong sockfd;
2269 abi_ulong level;
2270 abi_ulong optname;
2271 abi_ulong optval;
2272 socklen_t optlen;
2273
2274 if (get_user_ual(sockfd, vptr)
2275 || get_user_ual(level, vptr + n)
2276 || get_user_ual(optname, vptr + 2 * n)
2277 || get_user_ual(optval, vptr + 3 * n)
2278 || get_user_ual(optlen, vptr + 4 * n))
2279 return -TARGET_EFAULT;
2280
2281 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2282 }
2283 break;
2284 case SOCKOP_getsockopt:
2285 {
2286 abi_ulong sockfd;
2287 abi_ulong level;
2288 abi_ulong optname;
2289 abi_ulong optval;
2290 socklen_t optlen;
2291
2292 if (get_user_ual(sockfd, vptr)
2293 || get_user_ual(level, vptr + n)
2294 || get_user_ual(optname, vptr + 2 * n)
2295 || get_user_ual(optval, vptr + 3 * n)
2296 || get_user_ual(optlen, vptr + 4 * n))
2297 return -TARGET_EFAULT;
2298
2299 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2300 }
2301 break;
2302 default:
2303 gemu_log("Unsupported socketcall: %d\n", num);
2304 ret = -TARGET_ENOSYS;
2305 break;
2306 }
2307 return ret;
2308 }
2309 #endif
2310
2311 #define N_SHM_REGIONS 32
2312
2313 static struct shm_region {
2314 abi_ulong start;
2315 abi_ulong size;
2316 } shm_regions[N_SHM_REGIONS];
2317
2318 struct target_ipc_perm
2319 {
2320 abi_long __key;
2321 abi_ulong uid;
2322 abi_ulong gid;
2323 abi_ulong cuid;
2324 abi_ulong cgid;
2325 unsigned short int mode;
2326 unsigned short int __pad1;
2327 unsigned short int __seq;
2328 unsigned short int __pad2;
2329 abi_ulong __unused1;
2330 abi_ulong __unused2;
2331 };
2332
2333 struct target_semid_ds
2334 {
2335 struct target_ipc_perm sem_perm;
2336 abi_ulong sem_otime;
2337 abi_ulong __unused1;
2338 abi_ulong sem_ctime;
2339 abi_ulong __unused2;
2340 abi_ulong sem_nsems;
2341 abi_ulong __unused3;
2342 abi_ulong __unused4;
2343 };
2344
2345 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2346 abi_ulong target_addr)
2347 {
2348 struct target_ipc_perm *target_ip;
2349 struct target_semid_ds *target_sd;
2350
2351 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2352 return -TARGET_EFAULT;
2353 target_ip = &(target_sd->sem_perm);
2354 host_ip->__key = tswapal(target_ip->__key);
2355 host_ip->uid = tswapal(target_ip->uid);
2356 host_ip->gid = tswapal(target_ip->gid);
2357 host_ip->cuid = tswapal(target_ip->cuid);
2358 host_ip->cgid = tswapal(target_ip->cgid);
2359 host_ip->mode = tswap16(target_ip->mode);
2360 unlock_user_struct(target_sd, target_addr, 0);
2361 return 0;
2362 }
2363
2364 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2365 struct ipc_perm *host_ip)
2366 {
2367 struct target_ipc_perm *target_ip;
2368 struct target_semid_ds *target_sd;
2369
2370 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2371 return -TARGET_EFAULT;
2372 target_ip = &(target_sd->sem_perm);
2373 target_ip->__key = tswapal(host_ip->__key);
2374 target_ip->uid = tswapal(host_ip->uid);
2375 target_ip->gid = tswapal(host_ip->gid);
2376 target_ip->cuid = tswapal(host_ip->cuid);
2377 target_ip->cgid = tswapal(host_ip->cgid);
2378 target_ip->mode = tswap16(host_ip->mode);
2379 unlock_user_struct(target_sd, target_addr, 1);
2380 return 0;
2381 }
2382
2383 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2384 abi_ulong target_addr)
2385 {
2386 struct target_semid_ds *target_sd;
2387
2388 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2389 return -TARGET_EFAULT;
2390 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2391 return -TARGET_EFAULT;
2392 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2393 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2394 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2395 unlock_user_struct(target_sd, target_addr, 0);
2396 return 0;
2397 }
2398
2399 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2400 struct semid_ds *host_sd)
2401 {
2402 struct target_semid_ds *target_sd;
2403
2404 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2405 return -TARGET_EFAULT;
2406 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2407 return -TARGET_EFAULT;
2408 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2409 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2410 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2411 unlock_user_struct(target_sd, target_addr, 1);
2412 return 0;
2413 }
2414
2415 struct target_seminfo {
2416 int semmap;
2417 int semmni;
2418 int semmns;
2419 int semmnu;
2420 int semmsl;
2421 int semopm;
2422 int semume;
2423 int semusz;
2424 int semvmx;
2425 int semaem;
2426 };
2427
2428 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2429 struct seminfo *host_seminfo)
2430 {
2431 struct target_seminfo *target_seminfo;
2432 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2433 return -TARGET_EFAULT;
2434 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2435 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2436 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2437 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2438 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2439 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2440 __put_user(host_seminfo->semume, &target_seminfo->semume);
2441 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2442 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2443 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2444 unlock_user_struct(target_seminfo, target_addr, 1);
2445 return 0;
2446 }
2447
2448 union semun {
2449 int val;
2450 struct semid_ds *buf;
2451 unsigned short *array;
2452 struct seminfo *__buf;
2453 };
2454
2455 union target_semun {
2456 int val;
2457 abi_ulong buf;
2458 abi_ulong array;
2459 abi_ulong __buf;
2460 };
2461
2462 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2463 abi_ulong target_addr)
2464 {
2465 int nsems;
2466 unsigned short *array;
2467 union semun semun;
2468 struct semid_ds semid_ds;
2469 int i, ret;
2470
2471 semun.buf = &semid_ds;
2472
2473 ret = semctl(semid, 0, IPC_STAT, semun);
2474 if (ret == -1)
2475 return get_errno(ret);
2476
2477 nsems = semid_ds.sem_nsems;
2478
2479 *host_array = malloc(nsems*sizeof(unsigned short));
2480 array = lock_user(VERIFY_READ, target_addr,
2481 nsems*sizeof(unsigned short), 1);
2482 if (!array)
2483 return -TARGET_EFAULT;
2484
2485 for(i=0; i<nsems; i++) {
2486 __get_user((*host_array)[i], &array[i]);
2487 }
2488 unlock_user(array, target_addr, 0);
2489
2490 return 0;
2491 }
2492
2493 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2494 unsigned short **host_array)
2495 {
2496 int nsems;
2497 unsigned short *array;
2498 union semun semun;
2499 struct semid_ds semid_ds;
2500 int i, ret;
2501
2502 semun.buf = &semid_ds;
2503
2504 ret = semctl(semid, 0, IPC_STAT, semun);
2505 if (ret == -1)
2506 return get_errno(ret);
2507
2508 nsems = semid_ds.sem_nsems;
2509
2510 array = lock_user(VERIFY_WRITE, target_addr,
2511 nsems*sizeof(unsigned short), 0);
2512 if (!array)
2513 return -TARGET_EFAULT;
2514
2515 for(i=0; i<nsems; i++) {
2516 __put_user((*host_array)[i], &array[i]);
2517 }
2518 free(*host_array);
2519 unlock_user(array, target_addr, 1);
2520
2521 return 0;
2522 }
2523
2524 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2525 union target_semun target_su)
2526 {
2527 union semun arg;
2528 struct semid_ds dsarg;
2529 unsigned short *array = NULL;
2530 struct seminfo seminfo;
2531 abi_long ret = -TARGET_EINVAL;
2532 abi_long err;
2533 cmd &= 0xff;
2534
2535 switch( cmd ) {
2536 case GETVAL:
2537 case SETVAL:
2538 arg.val = tswap32(target_su.val);
2539 ret = get_errno(semctl(semid, semnum, cmd, arg));
2540 target_su.val = tswap32(arg.val);
2541 break;
2542 case GETALL:
2543 case SETALL:
2544 err = target_to_host_semarray(semid, &array, target_su.array);
2545 if (err)
2546 return err;
2547 arg.array = array;
2548 ret = get_errno(semctl(semid, semnum, cmd, arg));
2549 err = host_to_target_semarray(semid, target_su.array, &array);
2550 if (err)
2551 return err;
2552 break;
2553 case IPC_STAT:
2554 case IPC_SET:
2555 case SEM_STAT:
2556 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2557 if (err)
2558 return err;
2559 arg.buf = &dsarg;
2560 ret = get_errno(semctl(semid, semnum, cmd, arg));
2561 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2562 if (err)
2563 return err;
2564 break;
2565 case IPC_INFO:
2566 case SEM_INFO:
2567 arg.__buf = &seminfo;
2568 ret = get_errno(semctl(semid, semnum, cmd, arg));
2569 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2570 if (err)
2571 return err;
2572 break;
2573 case IPC_RMID:
2574 case GETPID:
2575 case GETNCNT:
2576 case GETZCNT:
2577 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2578 break;
2579 }
2580
2581 return ret;
2582 }
2583
2584 struct target_sembuf {
2585 unsigned short sem_num;
2586 short sem_op;
2587 short sem_flg;
2588 };
2589
2590 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2591 abi_ulong target_addr,
2592 unsigned nsops)
2593 {
2594 struct target_sembuf *target_sembuf;
2595 int i;
2596
2597 target_sembuf = lock_user(VERIFY_READ, target_addr,
2598 nsops*sizeof(struct target_sembuf), 1);
2599 if (!target_sembuf)
2600 return -TARGET_EFAULT;
2601
2602 for(i=0; i<nsops; i++) {
2603 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2604 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2605 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2606 }
2607
2608 unlock_user(target_sembuf, target_addr, 0);
2609
2610 return 0;
2611 }
2612
2613 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2614 {
2615 struct sembuf sops[nsops];
2616
2617 if (target_to_host_sembuf(sops, ptr, nsops))
2618 return -TARGET_EFAULT;
2619
2620 return get_errno(semop(semid, sops, nsops));
2621 }
2622
2623 struct target_msqid_ds
2624 {
2625 struct target_ipc_perm msg_perm;
2626 abi_ulong msg_stime;
2627 #if TARGET_ABI_BITS == 32
2628 abi_ulong __unused1;
2629 #endif
2630 abi_ulong msg_rtime;
2631 #if TARGET_ABI_BITS == 32
2632 abi_ulong __unused2;
2633 #endif
2634 abi_ulong msg_ctime;
2635 #if TARGET_ABI_BITS == 32
2636 abi_ulong __unused3;
2637 #endif
2638 abi_ulong __msg_cbytes;
2639 abi_ulong msg_qnum;
2640 abi_ulong msg_qbytes;
2641 abi_ulong msg_lspid;
2642 abi_ulong msg_lrpid;
2643 abi_ulong __unused4;
2644 abi_ulong __unused5;
2645 };
2646
2647 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2648 abi_ulong target_addr)
2649 {
2650 struct target_msqid_ds *target_md;
2651
2652 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2653 return -TARGET_EFAULT;
2654 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2655 return -TARGET_EFAULT;
2656 host_md->msg_stime = tswapal(target_md->msg_stime);
2657 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2658 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2659 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2660 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2661 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2662 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2663 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2664 unlock_user_struct(target_md, target_addr, 0);
2665 return 0;
2666 }
2667
2668 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2669 struct msqid_ds *host_md)
2670 {
2671 struct target_msqid_ds *target_md;
2672
2673 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2674 return -TARGET_EFAULT;
2675 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2676 return -TARGET_EFAULT;
2677 target_md->msg_stime = tswapal(host_md->msg_stime);
2678 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2679 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2680 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2681 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2682 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2683 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2684 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2685 unlock_user_struct(target_md, target_addr, 1);
2686 return 0;
2687 }
2688
2689 struct target_msginfo {
2690 int msgpool;
2691 int msgmap;
2692 int msgmax;
2693 int msgmnb;
2694 int msgmni;
2695 int msgssz;
2696 int msgtql;
2697 unsigned short int msgseg;
2698 };
2699
2700 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2701 struct msginfo *host_msginfo)
2702 {
2703 struct target_msginfo *target_msginfo;
2704 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2705 return -TARGET_EFAULT;
2706 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2707 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2708 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2709 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2710 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2711 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2712 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2713 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2714 unlock_user_struct(target_msginfo, target_addr, 1);
2715 return 0;
2716 }
2717
2718 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2719 {
2720 struct msqid_ds dsarg;
2721 struct msginfo msginfo;
2722 abi_long ret = -TARGET_EINVAL;
2723
2724 cmd &= 0xff;
2725
2726 switch (cmd) {
2727 case IPC_STAT:
2728 case IPC_SET:
2729 case MSG_STAT:
2730 if (target_to_host_msqid_ds(&dsarg,ptr))
2731 return -TARGET_EFAULT;
2732 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2733 if (host_to_target_msqid_ds(ptr,&dsarg))
2734 return -TARGET_EFAULT;
2735 break;
2736 case IPC_RMID:
2737 ret = get_errno(msgctl(msgid, cmd, NULL));
2738 break;
2739 case IPC_INFO:
2740 case MSG_INFO:
2741 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2742 if (host_to_target_msginfo(ptr, &msginfo))
2743 return -TARGET_EFAULT;
2744 break;
2745 }
2746
2747 return ret;
2748 }
2749
2750 struct target_msgbuf {
2751 abi_long mtype;
2752 char mtext[1];
2753 };
2754
2755 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2756 unsigned int msgsz, int msgflg)
2757 {
2758 struct target_msgbuf *target_mb;
2759 struct msgbuf *host_mb;
2760 abi_long ret = 0;
2761
2762 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2763 return -TARGET_EFAULT;
2764 host_mb = malloc(msgsz+sizeof(long));
2765 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2766 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2767 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2768 free(host_mb);
2769 unlock_user_struct(target_mb, msgp, 0);
2770
2771 return ret;
2772 }
2773
2774 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2775 unsigned int msgsz, abi_long msgtyp,
2776 int msgflg)
2777 {
2778 struct target_msgbuf *target_mb;
2779 char *target_mtext;
2780 struct msgbuf *host_mb;
2781 abi_long ret = 0;
2782
2783 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2784 return -TARGET_EFAULT;
2785
2786 host_mb = g_malloc(msgsz+sizeof(long));
2787 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2788
2789 if (ret > 0) {
2790 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2791 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2792 if (!target_mtext) {
2793 ret = -TARGET_EFAULT;
2794 goto end;
2795 }
2796 memcpy(target_mb->mtext, host_mb->mtext, ret);
2797 unlock_user(target_mtext, target_mtext_addr, ret);
2798 }
2799
2800 target_mb->mtype = tswapal(host_mb->mtype);
2801
2802 end:
2803 if (target_mb)
2804 unlock_user_struct(target_mb, msgp, 1);
2805 g_free(host_mb);
2806 return ret;
2807 }
2808
2809 struct target_shmid_ds
2810 {
2811 struct target_ipc_perm shm_perm;
2812 abi_ulong shm_segsz;
2813 abi_ulong shm_atime;
2814 #if TARGET_ABI_BITS == 32
2815 abi_ulong __unused1;
2816 #endif
2817 abi_ulong shm_dtime;
2818 #if TARGET_ABI_BITS == 32
2819 abi_ulong __unused2;
2820 #endif
2821 abi_ulong shm_ctime;
2822 #if TARGET_ABI_BITS == 32
2823 abi_ulong __unused3;
2824 #endif
2825 int shm_cpid;
2826 int shm_lpid;
2827 abi_ulong shm_nattch;
2828 unsigned long int __unused4;
2829 unsigned long int __unused5;
2830 };
2831
2832 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2833 abi_ulong target_addr)
2834 {
2835 struct target_shmid_ds *target_sd;
2836
2837 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2838 return -TARGET_EFAULT;
2839 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2840 return -TARGET_EFAULT;
2841 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2842 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2843 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2844 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2845 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2846 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2847 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2848 unlock_user_struct(target_sd, target_addr, 0);
2849 return 0;
2850 }
2851
2852 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2853 struct shmid_ds *host_sd)
2854 {
2855 struct target_shmid_ds *target_sd;
2856
2857 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2858 return -TARGET_EFAULT;
2859 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2860 return -TARGET_EFAULT;
2861 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2862 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2863 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2864 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2865 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2866 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2867 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2868 unlock_user_struct(target_sd, target_addr, 1);
2869 return 0;
2870 }
2871
2872 struct target_shminfo {
2873 abi_ulong shmmax;
2874 abi_ulong shmmin;
2875 abi_ulong shmmni;
2876 abi_ulong shmseg;
2877 abi_ulong shmall;
2878 };
2879
2880 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2881 struct shminfo *host_shminfo)
2882 {
2883 struct target_shminfo *target_shminfo;
2884 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2885 return -TARGET_EFAULT;
2886 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2887 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2888 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2889 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2890 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2891 unlock_user_struct(target_shminfo, target_addr, 1);
2892 return 0;
2893 }
2894
2895 struct target_shm_info {
2896 int used_ids;
2897 abi_ulong shm_tot;
2898 abi_ulong shm_rss;
2899 abi_ulong shm_swp;
2900 abi_ulong swap_attempts;
2901 abi_ulong swap_successes;
2902 };
2903
2904 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2905 struct shm_info *host_shm_info)
2906 {
2907 struct target_shm_info *target_shm_info;
2908 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2909 return -TARGET_EFAULT;
2910 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2911 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2912 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2913 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2914 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2915 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2916 unlock_user_struct(target_shm_info, target_addr, 1);
2917 return 0;
2918 }
2919
2920 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2921 {
2922 struct shmid_ds dsarg;
2923 struct shminfo shminfo;
2924 struct shm_info shm_info;
2925 abi_long ret = -TARGET_EINVAL;
2926
2927 cmd &= 0xff;
2928
2929 switch(cmd) {
2930 case IPC_STAT:
2931 case IPC_SET:
2932 case SHM_STAT:
2933 if (target_to_host_shmid_ds(&dsarg, buf))
2934 return -TARGET_EFAULT;
2935 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2936 if (host_to_target_shmid_ds(buf, &dsarg))
2937 return -TARGET_EFAULT;
2938 break;
2939 case IPC_INFO:
2940 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2941 if (host_to_target_shminfo(buf, &shminfo))
2942 return -TARGET_EFAULT;
2943 break;
2944 case SHM_INFO:
2945 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2946 if (host_to_target_shm_info(buf, &shm_info))
2947 return -TARGET_EFAULT;
2948 break;
2949 case IPC_RMID:
2950 case SHM_LOCK:
2951 case SHM_UNLOCK:
2952 ret = get_errno(shmctl(shmid, cmd, NULL));
2953 break;
2954 }
2955
2956 return ret;
2957 }
2958
2959 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2960 {
2961 abi_long raddr;
2962 void *host_raddr;
2963 struct shmid_ds shm_info;
2964 int i,ret;
2965
2966 /* find out the length of the shared memory segment */
2967 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2968 if (is_error(ret)) {
2969 /* can't get length, bail out */
2970 return ret;
2971 }
2972
2973 mmap_lock();
2974
2975 if (shmaddr)
2976 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2977 else {
2978 abi_ulong mmap_start;
2979
2980 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2981
2982 if (mmap_start == -1) {
2983 errno = ENOMEM;
2984 host_raddr = (void *)-1;
2985 } else
2986 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2987 }
2988
2989 if (host_raddr == (void *)-1) {
2990 mmap_unlock();
2991 return get_errno((long)host_raddr);
2992 }
2993 raddr=h2g((unsigned long)host_raddr);
2994
2995 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2996 PAGE_VALID | PAGE_READ |
2997 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2998
2999 for (i = 0; i < N_SHM_REGIONS; i++) {
3000 if (shm_regions[i].start == 0) {
3001 shm_regions[i].start = raddr;
3002 shm_regions[i].size = shm_info.shm_segsz;
3003 break;
3004 }
3005 }
3006
3007 mmap_unlock();
3008 return raddr;
3009
3010 }
3011
3012 static inline abi_long do_shmdt(abi_ulong shmaddr)
3013 {
3014 int i;
3015
3016 for (i = 0; i < N_SHM_REGIONS; ++i) {
3017 if (shm_regions[i].start == shmaddr) {
3018 shm_regions[i].start = 0;
3019 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3020 break;
3021 }
3022 }
3023
3024 return get_errno(shmdt(g2h(shmaddr)));
3025 }
3026
3027 #ifdef TARGET_NR_ipc
3028 /* ??? This only works with linear mappings. */
3029 /* do_ipc() must return target values and target errnos. */
3030 static abi_long do_ipc(unsigned int call, int first,
3031 int second, int third,
3032 abi_long ptr, abi_long fifth)
3033 {
3034 int version;
3035 abi_long ret = 0;
3036
3037 version = call >> 16;
3038 call &= 0xffff;
3039
3040 switch (call) {
3041 case IPCOP_semop:
3042 ret = do_semop(first, ptr, second);
3043 break;
3044
3045 case IPCOP_semget:
3046 ret = get_errno(semget(first, second, third));
3047 break;
3048
3049 case IPCOP_semctl:
3050 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3051 break;
3052
3053 case IPCOP_msgget:
3054 ret = get_errno(msgget(first, second));
3055 break;
3056
3057 case IPCOP_msgsnd:
3058 ret = do_msgsnd(first, ptr, second, third);
3059 break;
3060
3061 case IPCOP_msgctl:
3062 ret = do_msgctl(first, second, ptr);
3063 break;
3064
3065 case IPCOP_msgrcv:
3066 switch (version) {
3067 case 0:
3068 {
3069 struct target_ipc_kludge {
3070 abi_long msgp;
3071 abi_long msgtyp;
3072 } *tmp;
3073
3074 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3075 ret = -TARGET_EFAULT;
3076 break;
3077 }
3078
3079 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3080
3081 unlock_user_struct(tmp, ptr, 0);
3082 break;
3083 }
3084 default:
3085 ret = do_msgrcv(first, ptr, second, fifth, third);
3086 }
3087 break;
3088
3089 case IPCOP_shmat:
3090 switch (version) {
3091 default:
3092 {
3093 abi_ulong raddr;
3094 raddr = do_shmat(first, ptr, second);
3095 if (is_error(raddr))
3096 return get_errno(raddr);
3097 if (put_user_ual(raddr, third))
3098 return -TARGET_EFAULT;
3099 break;
3100 }
3101 case 1:
3102 ret = -TARGET_EINVAL;
3103 break;
3104 }
3105 break;
3106 case IPCOP_shmdt:
3107 ret = do_shmdt(ptr);
3108 break;
3109
3110 case IPCOP_shmget:
3111 /* IPC_* flag values are the same on all linux platforms */
3112 ret = get_errno(shmget(first, second, third));
3113 break;
3114
3115 /* IPC_* and SHM_* command values are the same on all linux platforms */
3116 case IPCOP_shmctl:
3117 ret = do_shmctl(first, second, third);
3118 break;
3119 default:
3120 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3121 ret = -TARGET_ENOSYS;
3122 break;
3123 }
3124 return ret;
3125 }
3126 #endif
3127
3128 /* kernel structure types definitions */
3129
3130 #define STRUCT(name, ...) STRUCT_ ## name,
3131 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3132 enum {
3133 #include "syscall_types.h"
3134 };
3135 #undef STRUCT
3136 #undef STRUCT_SPECIAL
3137
3138 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3139 #define STRUCT_SPECIAL(name)
3140 #include "syscall_types.h"
3141 #undef STRUCT
3142 #undef STRUCT_SPECIAL
3143
3144 typedef struct IOCTLEntry IOCTLEntry;
3145
3146 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3147 int fd, abi_long cmd, abi_long arg);
3148
3149 struct IOCTLEntry {
3150 unsigned int target_cmd;
3151 unsigned int host_cmd;
3152 const char *name;
3153 int access;
3154 do_ioctl_fn *do_ioctl;
3155 const argtype arg_type[5];
3156 };
3157
3158 #define IOC_R 0x0001
3159 #define IOC_W 0x0002
3160 #define IOC_RW (IOC_R | IOC_W)
3161
3162 #define MAX_STRUCT_SIZE 4096
3163
3164 #ifdef CONFIG_FIEMAP
3165 /* So fiemap access checks don't overflow on 32 bit systems.
3166 * This is very slightly smaller than the limit imposed by
3167 * the underlying kernel.
3168 */
3169 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3170 / sizeof(struct fiemap_extent))
3171
3172 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3173 int fd, abi_long cmd, abi_long arg)
3174 {
3175 /* The parameter for this ioctl is a struct fiemap followed
3176 * by an array of struct fiemap_extent whose size is set
3177 * in fiemap->fm_extent_count. The array is filled in by the
3178 * ioctl.
3179 */
3180 int target_size_in, target_size_out;
3181 struct fiemap *fm;
3182 const argtype *arg_type = ie->arg_type;
3183 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3184 void *argptr, *p;
3185 abi_long ret;
3186 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3187 uint32_t outbufsz;
3188 int free_fm = 0;
3189
3190 assert(arg_type[0] == TYPE_PTR);
3191 assert(ie->access == IOC_RW);
3192 arg_type++;
3193 target_size_in = thunk_type_size(arg_type, 0);
3194 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3195 if (!argptr) {
3196 return -TARGET_EFAULT;
3197 }
3198 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3199 unlock_user(argptr, arg, 0);
3200 fm = (struct fiemap *)buf_temp;
3201 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3202 return -TARGET_EINVAL;
3203 }
3204
3205 outbufsz = sizeof (*fm) +
3206 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3207
3208 if (outbufsz > MAX_STRUCT_SIZE) {
3209 /* We can't fit all the extents into the fixed size buffer.
3210 * Allocate one that is large enough and use it instead.
3211 */
3212 fm = malloc(outbufsz);
3213 if (!fm) {
3214 return -TARGET_ENOMEM;
3215 }
3216 memcpy(fm, buf_temp, sizeof(struct fiemap));
3217 free_fm = 1;
3218 }
3219 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3220 if (!is_error(ret)) {
3221 target_size_out = target_size_in;
3222 /* An extent_count of 0 means we were only counting the extents
3223 * so there are no structs to copy
3224 */
3225 if (fm->fm_extent_count != 0) {
3226 target_size_out += fm->fm_mapped_extents * extent_size;
3227 }
3228 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3229 if (!argptr) {
3230 ret = -TARGET_EFAULT;
3231 } else {
3232 /* Convert the struct fiemap */
3233 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3234 if (fm->fm_extent_count != 0) {
3235 p = argptr + target_size_in;
3236 /* ...and then all the struct fiemap_extents */
3237 for (i = 0; i < fm->fm_mapped_extents; i++) {
3238 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3239 THUNK_TARGET);
3240 p += extent_size;
3241 }
3242 }
3243 unlock_user(argptr, arg, target_size_out);
3244 }
3245 }
3246 if (free_fm) {
3247 free(fm);
3248 }
3249 return ret;
3250 }
3251 #endif
3252
3253 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3254 int fd, abi_long cmd, abi_long arg)
3255 {
3256 const argtype *arg_type = ie->arg_type;
3257 int target_size;
3258 void *argptr;
3259 int ret;
3260 struct ifconf *host_ifconf;
3261 uint32_t outbufsz;
3262 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3263 int target_ifreq_size;
3264 int nb_ifreq;
3265 int free_buf = 0;
3266 int i;
3267 int target_ifc_len;
3268 abi_long target_ifc_buf;
3269 int host_ifc_len;
3270 char *host_ifc_buf;
3271
3272 assert(arg_type[0] == TYPE_PTR);
3273 assert(ie->access == IOC_RW);
3274
3275 arg_type++;
3276 target_size = thunk_type_size(arg_type, 0);
3277
3278 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3279 if (!argptr)
3280 return -TARGET_EFAULT;
3281 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3282 unlock_user(argptr, arg, 0);
3283
3284 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3285 target_ifc_len = host_ifconf->ifc_len;
3286 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3287
3288 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3289 nb_ifreq = target_ifc_len / target_ifreq_size;
3290 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3291
3292 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3293 if (outbufsz > MAX_STRUCT_SIZE) {
3294 /* We can't fit all the extents into the fixed size buffer.
3295 * Allocate one that is large enough and use it instead.
3296 */
3297 host_ifconf = malloc(outbufsz);
3298 if (!host_ifconf) {
3299 return -TARGET_ENOMEM;
3300 }
3301 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3302 free_buf = 1;
3303 }
3304 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3305
3306 host_ifconf->ifc_len = host_ifc_len;
3307 host_ifconf->ifc_buf = host_ifc_buf;
3308
3309 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3310 if (!is_error(ret)) {
3311 /* convert host ifc_len to target ifc_len */
3312
3313 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3314 target_ifc_len = nb_ifreq * target_ifreq_size;
3315 host_ifconf->ifc_len = target_ifc_len;
3316
3317 /* restore target ifc_buf */
3318
3319 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3320
3321 /* copy struct ifconf to target user */
3322
3323 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3324 if (!argptr)
3325 return -TARGET_EFAULT;
3326 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3327 unlock_user(argptr, arg, target_size);
3328
3329 /* copy ifreq[] to target user */
3330
3331 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3332 for (i = 0; i < nb_ifreq ; i++) {
3333 thunk_convert(argptr + i * target_ifreq_size,
3334 host_ifc_buf + i * sizeof(struct ifreq),
3335 ifreq_arg_type, THUNK_TARGET);
3336 }
3337 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3338 }
3339
3340 if (free_buf) {
3341 free(host_ifconf);
3342 }
3343
3344 return ret;
3345 }
3346
3347 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3348 abi_long cmd, abi_long arg)
3349 {
3350 void *argptr;
3351 struct dm_ioctl *host_dm;
3352 abi_long guest_data;
3353 uint32_t guest_data_size;
3354 int target_size;
3355 const argtype *arg_type = ie->arg_type;
3356 abi_long ret;
3357 void *big_buf = NULL;
3358 char *host_data;
3359
3360 arg_type++;
3361 target_size = thunk_type_size(arg_type, 0);
3362 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3363 if (!argptr) {
3364 ret = -TARGET_EFAULT;
3365 goto out;
3366 }
3367 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3368 unlock_user(argptr, arg, 0);
3369
3370 /* buf_temp is too small, so fetch things into a bigger buffer */
3371 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3372 memcpy(big_buf, buf_temp, target_size);
3373 buf_temp = big_buf;
3374 host_dm = big_buf;
3375
3376 guest_data = arg + host_dm->data_start;
3377 if ((guest_data - arg) < 0) {
3378 ret = -EINVAL;
3379 goto out;
3380 }
3381 guest_data_size = host_dm->data_size - host_dm->data_start;
3382 host_data = (char*)host_dm + host_dm->data_start;
3383
3384 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3385 switch (ie->host_cmd) {
3386 case DM_REMOVE_ALL:
3387 case DM_LIST_DEVICES:
3388 case DM_DEV_CREATE:
3389 case DM_DEV_REMOVE:
3390 case DM_DEV_SUSPEND:
3391 case DM_DEV_STATUS:
3392 case DM_DEV_WAIT:
3393 case DM_TABLE_STATUS:
3394 case DM_TABLE_CLEAR:
3395 case DM_TABLE_DEPS:
3396 case DM_LIST_VERSIONS:
3397 /* no input data */
3398 break;
3399 case DM_DEV_RENAME:
3400 case DM_DEV_SET_GEOMETRY:
3401 /* data contains only strings */
3402 memcpy(host_data, argptr, guest_data_size);
3403 break;
3404 case DM_TARGET_MSG:
3405 memcpy(host_data, argptr, guest_data_size);
3406 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3407 break;
3408 case DM_TABLE_LOAD:
3409 {
3410 void *gspec = argptr;
3411 void *cur_data = host_data;
3412 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3413 int spec_size = thunk_type_size(arg_type, 0);
3414 int i;
3415
3416 for (i = 0; i < host_dm->target_count; i++) {
3417 struct dm_target_spec *spec = cur_data;
3418 uint32_t next;
3419 int slen;
3420
3421 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3422 slen = strlen((char*)gspec + spec_size) + 1;
3423 next = spec->next;
3424 spec->next = sizeof(*spec) + slen;
3425 strcpy((char*)&spec[1], gspec + spec_size);
3426 gspec += next;
3427 cur_data += spec->next;
3428 }
3429 break;
3430 }
3431 default:
3432 ret = -TARGET_EINVAL;
3433 goto out;
3434 }
3435 unlock_user(argptr, guest_data, 0);
3436
3437 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3438 if (!is_error(ret)) {
3439 guest_data = arg + host_dm->data_start;
3440 guest_data_size = host_dm->data_size - host_dm->data_start;
3441 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3442 switch (ie->host_cmd) {
3443 case DM_REMOVE_ALL:
3444 case DM_DEV_CREATE:
3445 case DM_DEV_REMOVE:
3446 case DM_DEV_RENAME:
3447 case DM_DEV_SUSPEND:
3448 case DM_DEV_STATUS:
3449 case DM_TABLE_LOAD:
3450 case DM_TABLE_CLEAR:
3451 case DM_TARGET_MSG:
3452 case DM_DEV_SET_GEOMETRY:
3453 /* no return data */
3454 break;
3455 case DM_LIST_DEVICES:
3456 {
3457 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3458 uint32_t remaining_data = guest_data_size;
3459 void *cur_data = argptr;
3460 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3461 int nl_size = 12; /* can't use thunk_size due to alignment */
3462
3463 while (1) {
3464 uint32_t next = nl->next;
3465 if (next) {
3466 nl->next = nl_size + (strlen(nl->name) + 1);
3467 }
3468 if (remaining_data < nl->next) {
3469 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3470 break;
3471 }
3472 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3473 strcpy(cur_data + nl_size, nl->name);
3474 cur_data += nl->next;
3475 remaining_data -= nl->next;
3476 if (!next) {
3477 break;
3478 }
3479 nl = (void*)nl + next;
3480 }
3481 break;
3482 }
3483 case DM_DEV_WAIT:
3484 case DM_TABLE_STATUS:
3485 {
3486 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3487 void *cur_data = argptr;
3488 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3489 int spec_size = thunk_type_size(arg_type, 0);
3490 int i;
3491
3492 for (i = 0; i < host_dm->target_count; i++) {
3493 uint32_t next = spec->next;
3494 int slen = strlen((char*)&spec[1]) + 1;
3495 spec->next = (cur_data - argptr) + spec_size + slen;
3496 if (guest_data_size < spec->next) {
3497 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3498 break;
3499 }
3500 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3501 strcpy(cur_data + spec_size, (char*)&spec[1]);
3502 cur_data = argptr + spec->next;
3503 spec = (void*)host_dm + host_dm->data_start + next;
3504 }
3505 break;
3506 }
3507 case DM_TABLE_DEPS:
3508 {
3509 void *hdata = (void*)host_dm + host_dm->data_start;
3510 int count = *(uint32_t*)hdata;
3511 uint64_t *hdev = hdata + 8;
3512 uint64_t *gdev = argptr + 8;
3513 int i;
3514
3515 *(uint32_t*)argptr = tswap32(count);
3516 for (i = 0; i < count; i++) {
3517 *gdev = tswap64(*hdev);
3518 gdev++;
3519 hdev++;
3520 }
3521 break;
3522 }
3523 case DM_LIST_VERSIONS:
3524 {
3525 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3526 uint32_t remaining_data = guest_data_size;
3527 void *cur_data = argptr;
3528 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3529 int vers_size = thunk_type_size(arg_type, 0);
3530
3531 while (1) {
3532 uint32_t next = vers->next;
3533 if (next) {
3534 vers->next = vers_size + (strlen(vers->name) + 1);
3535 }
3536 if (remaining_data < vers->next) {
3537 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3538 break;
3539 }
3540 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3541 strcpy(cur_data + vers_size, vers->name);
3542 cur_data += vers->next;
3543 remaining_data -= vers->next;
3544 if (!next) {
3545 break;
3546 }
3547 vers = (void*)vers + next;
3548 }
3549 break;
3550 }
3551 default:
3552 ret = -TARGET_EINVAL;
3553 goto out;
3554 }
3555 unlock_user(argptr, guest_data, guest_data_size);
3556
3557 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3558 if (!argptr) {
3559 ret = -TARGET_EFAULT;
3560 goto out;
3561 }
3562 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3563 unlock_user(argptr, arg, target_size);
3564 }
3565 out:
3566 g_free(big_buf);
3567 return ret;
3568 }
3569
3570 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3571 int fd, abi_long cmd, abi_long arg)
3572 {
3573 const argtype *arg_type = ie->arg_type;
3574 const StructEntry *se;
3575 const argtype *field_types;
3576 const int *dst_offsets, *src_offsets;
3577 int target_size;
3578 void *argptr;
3579 abi_ulong *target_rt_dev_ptr;
3580 unsigned long *host_rt_dev_ptr;
3581 abi_long ret;
3582 int i;
3583
3584 assert(ie->access == IOC_W);
3585 assert(*arg_type == TYPE_PTR);
3586 arg_type++;
3587 assert(*arg_type == TYPE_STRUCT);
3588 target_size = thunk_type_size(arg_type, 0);
3589 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3590 if (!argptr) {
3591 return -TARGET_EFAULT;
3592 }
3593 arg_type++;
3594 assert(*arg_type == (int)STRUCT_rtentry);
3595 se = struct_entries + *arg_type++;
3596 assert(se->convert[0] == NULL);
3597 /* convert struct here to be able to catch rt_dev string */
3598 field_types = se->field_types;
3599 dst_offsets = se->field_offsets[THUNK_HOST];
3600 src_offsets = se->field_offsets[THUNK_TARGET];
3601 for (i = 0; i < se->nb_fields; i++) {
3602 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3603 assert(*field_types == TYPE_PTRVOID);
3604 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3605 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3606 if (*target_rt_dev_ptr != 0) {
3607 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3608 tswapal(*target_rt_dev_ptr));
3609 if (!*host_rt_dev_ptr) {
3610 unlock_user(argptr, arg, 0);
3611 return -TARGET_EFAULT;
3612 }
3613 } else {
3614 *host_rt_dev_ptr = 0;
3615 }
3616 field_types++;
3617 continue;
3618 }
3619 field_types = thunk_convert(buf_temp + dst_offsets[i],
3620 argptr + src_offsets[i],
3621 field_types, THUNK_HOST);
3622 }
3623 unlock_user(argptr, arg, 0);
3624
3625 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3626 if (*host_rt_dev_ptr != 0) {
3627 unlock_user((void *)*host_rt_dev_ptr,
3628 *target_rt_dev_ptr, 0);
3629 }
3630 return ret;
3631 }
3632
3633 static IOCTLEntry ioctl_entries[] = {
3634 #define IOCTL(cmd, access, ...) \
3635 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3636 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3637 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3638 #include "ioctls.h"
3639 { 0, 0, },
3640 };
3641
3642 /* ??? Implement proper locking for ioctls. */
3643 /* do_ioctl() Must return target values and target errnos. */
3644 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3645 {
3646 const IOCTLEntry *ie;
3647 const argtype *arg_type;
3648 abi_long ret;
3649 uint8_t buf_temp[MAX_STRUCT_SIZE];
3650 int target_size;
3651 void *argptr;
3652
3653 ie = ioctl_entries;
3654 for(;;) {
3655 if (ie->target_cmd == 0) {
3656 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3657 return -TARGET_ENOSYS;
3658 }
3659 if (ie->target_cmd == cmd)
3660 break;
3661 ie++;
3662 }
3663 arg_type = ie->arg_type;
3664 #if defined(DEBUG)
3665 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3666 #endif
3667 if (ie->do_ioctl) {
3668 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3669 }
3670
3671 switch(arg_type[0]) {
3672 case TYPE_NULL:
3673 /* no argument */
3674 ret = get_errno(ioctl(fd, ie->host_cmd));
3675 break;
3676 case TYPE_PTRVOID:
3677 case TYPE_INT:
3678 /* int argment */
3679 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3680 break;
3681 case TYPE_PTR:
3682 arg_type++;
3683 target_size = thunk_type_size(arg_type, 0);
3684 switch(ie->access) {
3685 case IOC_R:
3686 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3687 if (!is_error(ret)) {
3688 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3689 if (!argptr)
3690 return -TARGET_EFAULT;
3691 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3692 unlock_user(argptr, arg, target_size);
3693 }
3694 break;
3695 case IOC_W:
3696 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3697 if (!argptr)
3698 return -TARGET_EFAULT;
3699 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3700 unlock_user(argptr, arg, 0);
3701 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3702 break;
3703 default:
3704 case IOC_RW:
3705 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3706 if (!argptr)
3707 return -TARGET_EFAULT;
3708 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3709 unlock_user(argptr, arg, 0);
3710 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3711 if (!is_error(ret)) {
3712 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3713 if (!argptr)
3714 return -TARGET_EFAULT;
3715 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3716 unlock_user(argptr, arg, target_size);
3717 }
3718 break;
3719 }
3720 break;
3721 default:
3722 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3723 (long)cmd, arg_type[0]);
3724 ret = -TARGET_ENOSYS;
3725 break;
3726 }
3727 return ret;
3728 }
3729
3730 static const bitmask_transtbl iflag_tbl[] = {
3731 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3732 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3733 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3734 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3735 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3736 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3737 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3738 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3739 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3740 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3741 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3742 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3743 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3744 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3745 { 0, 0, 0, 0 }
3746 };
3747
3748 static const bitmask_transtbl oflag_tbl[] = {
3749 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3750 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3751 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3752 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3753 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3754 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3755 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3756 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3757 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3758 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3759 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3760 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3761 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3762 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3763 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3764 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3765 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3766 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3767 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3768 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3769 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3770 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3771 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3772 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3773 { 0, 0, 0, 0 }
3774 };
3775
3776 static const bitmask_transtbl cflag_tbl[] = {
3777 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3778 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3779 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3780 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3781 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3782 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3783 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3784 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3785 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3786 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3787 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3788 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3789 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3790 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3791 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3792 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3793 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3794 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3795 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3796 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3797 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3798 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3799 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3800 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3801 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3802 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3803 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3804 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3805 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3806 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3807 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3808 { 0, 0, 0, 0 }
3809 };
3810
3811 static const bitmask_transtbl lflag_tbl[] = {
3812 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3813 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3814 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3815 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3816 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3817 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3818 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3819 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3820 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3821 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3822 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3823 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3824 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3825 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3826 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3827 { 0, 0, 0, 0 }
3828 };
3829
3830 static void target_to_host_termios (void *dst, const void *src)
3831 {
3832 struct host_termios *host = dst;
3833 const struct target_termios *target = src;
3834
3835 host->c_iflag =
3836 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3837 host->c_oflag =
3838 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3839 host->c_cflag =
3840 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3841 host->c_lflag =
3842 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3843 host->c_line = target->c_line;
3844
3845 memset(host->c_cc, 0, sizeof(host->c_cc));
3846 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3847 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3848 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3849 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3850 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3851 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3852 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3853 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3854 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3855 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3856 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3857 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3858 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3859 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3860 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3861 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3862 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3863 }
3864
3865 static void host_to_target_termios (void *dst, const void *src)
3866 {
3867 struct target_termios *target = dst;
3868 const struct host_termios *host = src;
3869
3870 target->c_iflag =
3871 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3872 target->c_oflag =
3873 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3874 target->c_cflag =
3875 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3876 target->c_lflag =
3877 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3878 target->c_line = host->c_line;
3879
3880 memset(target->c_cc, 0, sizeof(target->c_cc));
3881 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3882 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3883 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3884 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3885 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3886 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3887 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3888 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3889 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3890 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3891 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3892 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3893 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3894 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3895 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3896 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3897 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3898 }
3899
3900 static const StructEntry struct_termios_def = {
3901 .convert = { host_to_target_termios, target_to_host_termios },
3902 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3903 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3904 };
3905
3906 static bitmask_transtbl mmap_flags_tbl[] = {
3907 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3908 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3909 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3910 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3911 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3912 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3913 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3914 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3915 { 0, 0, 0, 0 }
3916 };
3917
3918 #if defined(TARGET_I386)
3919
3920 /* NOTE: there is really one LDT for all the threads */
3921 static uint8_t *ldt_table;
3922
3923 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3924 {
3925 int size;
3926 void *p;
3927
3928 if (!ldt_table)
3929 return 0;
3930 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3931 if (size > bytecount)
3932 size = bytecount;
3933 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3934 if (!p)
3935 return -TARGET_EFAULT;
3936 /* ??? Should this by byteswapped? */
3937 memcpy(p, ldt_table, size);
3938 unlock_user(p, ptr, size);
3939 return size;
3940 }
3941
3942 /* XXX: add locking support */
3943 static abi_long write_ldt(CPUX86State *env,
3944 abi_ulong ptr, unsigned long bytecount, int oldmode)
3945 {
3946 struct target_modify_ldt_ldt_s ldt_info;
3947 struct target_modify_ldt_ldt_s *target_ldt_info;
3948 int seg_32bit, contents, read_exec_only, limit_in_pages;
3949 int seg_not_present, useable, lm;
3950 uint32_t *lp, entry_1, entry_2;
3951
3952 if (bytecount != sizeof(ldt_info))
3953 return -TARGET_EINVAL;
3954 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3955 return -TARGET_EFAULT;
3956 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3957 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3958 ldt_info.limit = tswap32(target_ldt_info->limit);
3959 ldt_info.flags = tswap32(target_ldt_info->flags);
3960 unlock_user_struct(target_ldt_info, ptr, 0);
3961
3962 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3963 return -TARGET_EINVAL;
3964 seg_32bit = ldt_info.flags & 1;
3965 contents = (ldt_info.flags >> 1) & 3;
3966 read_exec_only = (ldt_info.flags >> 3) & 1;
3967 limit_in_pages = (ldt_info.flags >> 4) & 1;
3968 seg_not_present = (ldt_info.flags >> 5) & 1;
3969 useable = (ldt_info.flags >> 6) & 1;
3970 #ifdef TARGET_ABI32
3971 lm = 0;
3972 #else
3973 lm = (ldt_info.flags >> 7) & 1;
3974 #endif
3975 if (contents == 3) {
3976 if (oldmode)
3977 return -TARGET_EINVAL;
3978 if (seg_not_present == 0)
3979 return -TARGET_EINVAL;
3980 }
3981 /* allocate the LDT */
3982 if (!ldt_table) {
3983 env->ldt.base = target_mmap(0,
3984 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3985 PROT_READ|PROT_WRITE,
3986 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3987 if (env->ldt.base == -1)
3988 return -TARGET_ENOMEM;
3989 memset(g2h(env->ldt.base), 0,
3990 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3991 env->ldt.limit = 0xffff;
3992 ldt_table = g2h(env->ldt.base);
3993 }
3994
3995 /* NOTE: same code as Linux kernel */
3996 /* Allow LDTs to be cleared by the user. */
3997 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3998 if (oldmode ||
3999 (contents == 0 &&
4000 read_exec_only == 1 &&
4001 seg_32bit == 0 &&
4002 limit_in_pages == 0 &&
4003 seg_not_present == 1 &&
4004 useable == 0 )) {
4005 entry_1 = 0;
4006 entry_2 = 0;
4007 goto install;
4008 }
4009 }
4010
4011 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4012 (ldt_info.limit & 0x0ffff);
4013 entry_2 = (ldt_info.base_addr & 0xff000000) |
4014 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4015 (ldt_info.limit & 0xf0000) |
4016 ((read_exec_only ^ 1) << 9) |
4017 (contents << 10) |
4018 ((seg_not_present ^ 1) << 15) |
4019 (seg_32bit << 22) |
4020 (limit_in_pages << 23) |
4021 (lm << 21) |
4022 0x7000;
4023 if (!oldmode)
4024 entry_2 |= (useable << 20);
4025
4026 /* Install the new entry ... */
4027 install:
4028 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4029 lp[0] = tswap32(entry_1);
4030 lp[1] = tswap32(entry_2);
4031 return 0;
4032 }
4033
4034 /* specific and weird i386 syscalls */
4035 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4036 unsigned long bytecount)
4037 {
4038 abi_long ret;
4039
4040 switch (func) {
4041 case 0:
4042 ret = read_ldt(ptr, bytecount);
4043 break;
4044 case 1:
4045 ret = write_ldt(env, ptr, bytecount, 1);
4046 break;
4047 case 0x11:
4048 ret = write_ldt(env, ptr, bytecount, 0);
4049 break;
4050 default:
4051 ret = -TARGET_ENOSYS;
4052 break;
4053 }
4054 return ret;
4055 }
4056
4057 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4058 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4059 {
4060 uint64_t *gdt_table = g2h(env->gdt.base);
4061 struct target_modify_ldt_ldt_s ldt_info;
4062 struct target_modify_ldt_ldt_s *target_ldt_info;
4063 int seg_32bit, contents, read_exec_only, limit_in_pages;
4064 int seg_not_present, useable, lm;
4065 uint32_t *lp, entry_1, entry_2;
4066 int i;
4067
4068 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4069 if (!target_ldt_info)
4070 return -TARGET_EFAULT;
4071 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4072 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4073 ldt_info.limit = tswap32(target_ldt_info->limit);
4074 ldt_info.flags = tswap32(target_ldt_info->flags);
4075 if (ldt_info.entry_number == -1) {
4076 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4077 if (gdt_table[i] == 0) {
4078 ldt_info.entry_number = i;
4079 target_ldt_info->entry_number = tswap32(i);
4080 break;
4081 }
4082 }
4083 }
4084 unlock_user_struct(target_ldt_info, ptr, 1);
4085
4086 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4087 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4088 return -TARGET_EINVAL;
4089 seg_32bit = ldt_info.flags & 1;
4090 contents = (ldt_info.flags >> 1) & 3;
4091 read_exec_only = (ldt_info.flags >> 3) & 1;
4092 limit_in_pages = (ldt_info.flags >> 4) & 1;
4093 seg_not_present = (ldt_info.flags >> 5) & 1;
4094 useable = (ldt_info.flags >> 6) & 1;
4095 #ifdef TARGET_ABI32
4096 lm = 0;
4097 #else
4098 lm = (ldt_info.flags >> 7) & 1;
4099 #endif
4100
4101 if (contents == 3) {
4102 if (seg_not_present == 0)
4103 return -TARGET_EINVAL;
4104 }
4105
4106 /* NOTE: same code as Linux kernel */
4107 /* Allow LDTs to be cleared by the user. */
4108 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4109 if ((contents == 0 &&
4110 read_exec_only == 1 &&
4111 seg_32bit == 0 &&
4112 limit_in_pages == 0 &&
4113 seg_not_present == 1 &&
4114 useable == 0 )) {
4115 entry_1 = 0;
4116 entry_2 = 0;
4117 goto install;
4118 }
4119 }
4120
4121 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4122 (ldt_info.limit & 0x0ffff);
4123 entry_2 = (ldt_info.base_addr & 0xff000000) |
4124 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4125 (ldt_info.limit & 0xf0000) |
4126 ((read_exec_only ^ 1) << 9) |
4127 (contents << 10) |
4128 ((seg_not_present ^ 1) << 15) |
4129 (seg_32bit << 22) |
4130 (limit_in_pages << 23) |
4131 (useable << 20) |
4132 (lm << 21) |
4133 0x7000;
4134
4135 /* Install the new entry ... */
4136 install:
4137 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4138 lp[0] = tswap32(entry_1);
4139 lp[1] = tswap32(entry_2);
4140 return 0;
4141 }
4142
4143 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4144 {
4145 struct target_modify_ldt_ldt_s *target_ldt_info;
4146 uint64_t *gdt_table = g2h(env->gdt.base);
4147 uint32_t base_addr, limit, flags;
4148 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4149 int seg_not_present, useable, lm;
4150 uint32_t *lp, entry_1, entry_2;
4151
4152 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4153 if (!target_ldt_info)
4154 return -TARGET_EFAULT;
4155 idx = tswap32(target_ldt_info->entry_number);
4156 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4157 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4158 unlock_user_struct(target_ldt_info, ptr, 1);
4159 return -TARGET_EINVAL;
4160 }
4161 lp = (uint32_t *)(gdt_table + idx);
4162 entry_1 = tswap32(lp[0]);
4163 entry_2 = tswap32(lp[1]);
4164
4165 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4166 contents = (entry_2 >> 10) & 3;
4167 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4168 seg_32bit = (entry_2 >> 22) & 1;
4169 limit_in_pages = (entry_2 >> 23) & 1;
4170 useable = (entry_2 >> 20) & 1;
4171 #ifdef TARGET_ABI32
4172 lm = 0;
4173 #else
4174 lm = (entry_2 >> 21) & 1;
4175 #endif
4176 flags = (seg_32bit << 0) | (contents << 1) |
4177 (read_exec_only << 3) | (limit_in_pages << 4) |
4178 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4179 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4180 base_addr = (entry_1 >> 16) |
4181 (entry_2 & 0xff000000) |
4182 ((entry_2 & 0xff) << 16);
4183 target_ldt_info->base_addr = tswapal(base_addr);
4184 target_ldt_info->limit = tswap32(limit);
4185 target_ldt_info->flags = tswap32(flags);
4186 unlock_user_struct(target_ldt_info, ptr, 1);
4187 return 0;
4188 }
4189 #endif /* TARGET_I386 && TARGET_ABI32 */
4190
4191 #ifndef TARGET_ABI32
4192 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4193 {
4194 abi_long ret = 0;
4195 abi_ulong val;
4196 int idx;
4197
4198 switch(code) {
4199 case TARGET_ARCH_SET_GS:
4200 case TARGET_ARCH_SET_FS:
4201 if (code == TARGET_ARCH_SET_GS)
4202 idx = R_GS;
4203 else
4204 idx = R_FS;
4205 cpu_x86_load_seg(env, idx, 0);
4206 env->segs[idx].base = addr;
4207 break;
4208 case TARGET_ARCH_GET_GS:
4209 case TARGET_ARCH_GET_FS:
4210 if (code == TARGET_ARCH_GET_GS)
4211 idx = R_GS;
4212 else
4213 idx = R_FS;
4214 val = env->segs[idx].base;
4215 if (put_user(val, addr, abi_ulong))
4216 ret = -TARGET_EFAULT;
4217 break;
4218 default:
4219 ret = -TARGET_EINVAL;
4220 break;
4221 }
4222 return ret;
4223 }
4224 #endif
4225
4226 #endif /* defined(TARGET_I386) */
4227
4228 #define NEW_STACK_SIZE 0x40000
4229
4230 #if defined(CONFIG_USE_NPTL)
4231
4232 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4233 typedef struct {
4234 CPUArchState *env;
4235 pthread_mutex_t mutex;
4236 pthread_cond_t cond;
4237 pthread_t thread;
4238 uint32_t tid;
4239 abi_ulong child_tidptr;
4240 abi_ulong parent_tidptr;
4241 sigset_t sigmask;
4242 } new_thread_info;
4243
4244 static void *clone_func(void *arg)
4245 {
4246 new_thread_info *info = arg;
4247 CPUArchState *env;
4248 CPUState *cpu;
4249 TaskState *ts;
4250
4251 env = info->env;
4252 cpu = ENV_GET_CPU(env);
4253 thread_env = env;
4254 ts = (TaskState *)thread_env->opaque;
4255 info->tid = gettid();
4256 cpu->host_tid = info->tid;
4257 task_settid(ts);
4258 if (info->child_tidptr)
4259 put_user_u32(info->tid, info->child_tidptr);
4260 if (info->parent_tidptr)
4261 put_user_u32(info->tid, info->parent_tidptr);
4262 /* Enable signals. */
4263 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4264 /* Signal to the parent that we're ready. */
4265 pthread_mutex_lock(&info->mutex);
4266 pthread_cond_broadcast(&info->cond);
4267 pthread_mutex_unlock(&info->mutex);
4268 /* Wait until the parent has finshed initializing the tls state. */
4269 pthread_mutex_lock(&clone_lock);
4270 pthread_mutex_unlock(&clone_lock);
4271 cpu_loop(env);
4272 /* never exits */
4273 return NULL;
4274 }
4275 #else
4276
4277 static int clone_func(void *arg)
4278 {
4279 CPUArchState *env = arg;
4280 cpu_loop(env);
4281 /* never exits */
4282 return 0;
4283 }
4284 #endif
4285
4286 /* do_fork() Must return host values and target errnos (unlike most
4287 do_*() functions). */
4288 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4289 abi_ulong parent_tidptr, target_ulong newtls,
4290 abi_ulong child_tidptr)
4291 {
4292 int ret;
4293 TaskState *ts;
4294 CPUArchState *new_env;
4295 #if defined(CONFIG_USE_NPTL)
4296 unsigned int nptl_flags;
4297 sigset_t sigmask;
4298 #else
4299 uint8_t *new_stack;
4300 #endif
4301
4302 /* Emulate vfork() with fork() */
4303 if (flags & CLONE_VFORK)
4304 flags &= ~(CLONE_VFORK | CLONE_VM);
4305
4306 if (flags & CLONE_VM) {
4307 TaskState *parent_ts = (TaskState *)env->opaque;
4308 #if defined(CONFIG_USE_NPTL)
4309 new_thread_info info;
4310 pthread_attr_t attr;
4311 #endif
4312 ts = g_malloc0(sizeof(TaskState));
4313 init_task_state(ts);
4314 /* we create a new CPU instance. */
4315 new_env = cpu_copy(env);
4316 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4317 cpu_reset(ENV_GET_CPU(new_env));
4318 #endif
4319 /* Init regs that differ from the parent. */
4320 cpu_clone_regs(new_env, newsp);
4321 new_env->opaque = ts;
4322 ts->bprm = parent_ts->bprm;
4323 ts->info = parent_ts->info;
4324 #if defined(CONFIG_USE_NPTL)
4325 nptl_flags = flags;
4326 flags &= ~CLONE_NPTL_FLAGS2;
4327
4328 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4329 ts->child_tidptr = child_tidptr;
4330 }
4331
4332 if (nptl_flags & CLONE_SETTLS)
4333 cpu_set_tls (new_env, newtls);
4334
4335 /* Grab a mutex so that thread setup appears atomic. */
4336 pthread_mutex_lock(&clone_lock);
4337
4338 memset(&info, 0, sizeof(info));
4339 pthread_mutex_init(&info.mutex, NULL);
4340 pthread_mutex_lock(&info.mutex);
4341 pthread_cond_init(&info.cond, NULL);
4342 info.env = new_env;
4343 if (nptl_flags & CLONE_CHILD_SETTID)
4344 info.child_tidptr = child_tidptr;
4345 if (nptl_flags & CLONE_PARENT_SETTID)
4346 info.parent_tidptr = parent_tidptr;
4347
4348 ret = pthread_attr_init(&attr);
4349 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4350 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4351 /* It is not safe to deliver signals until the child has finished
4352 initializing, so temporarily block all signals. */
4353 sigfillset(&sigmask);
4354 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4355
4356 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4357 /* TODO: Free new CPU state if thread creation failed. */
4358
4359 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4360 pthread_attr_destroy(&attr);
4361 if (ret == 0) {
4362 /* Wait for the child to initialize. */
4363 pthread_cond_wait(&info.cond, &info.mutex);
4364 ret = info.tid;
4365 if (flags & CLONE_PARENT_SETTID)
4366 put_user_u32(ret, parent_tidptr);
4367 } else {
4368 ret = -1;
4369 }
4370 pthread_mutex_unlock(&info.mutex);
4371 pthread_cond_destroy(&info.cond);
4372 pthread_mutex_destroy(&info.mutex);
4373 pthread_mutex_unlock(&clone_lock);
4374 #else
4375 if (flags & CLONE_NPTL_FLAGS2)
4376 return -EINVAL;
4377 /* This is probably going to die very quickly, but do it anyway. */
4378 new_stack = g_malloc0 (NEW_STACK_SIZE);
4379 #ifdef __ia64__
4380 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4381 #else
4382 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4383 #endif
4384 #endif
4385 } else {
4386 /* if no CLONE_VM, we consider it is a fork */
4387 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4388 return -EINVAL;
4389 fork_start();
4390 ret = fork();
4391 if (ret == 0) {
4392 /* Child Process. */
4393 cpu_clone_regs(env, newsp);
4394 fork_end(1);
4395 #if defined(CONFIG_USE_NPTL)
4396 /* There is a race condition here. The parent process could
4397 theoretically read the TID in the child process before the child
4398 tid is set. This would require using either ptrace
4399 (not implemented) or having *_tidptr to point at a shared memory
4400 mapping. We can't repeat the spinlock hack used above because
4401 the child process gets its own copy of the lock. */
4402 if (flags & CLONE_CHILD_SETTID)
4403 put_user_u32(gettid(), child_tidptr);
4404 if (flags & CLONE_PARENT_SETTID)
4405 put_user_u32(gettid(), parent_tidptr);
4406 ts = (TaskState *)env->opaque;
4407 if (flags & CLONE_SETTLS)
4408 cpu_set_tls (env, newtls);
4409 if (flags & CLONE_CHILD_CLEARTID)
4410 ts->child_tidptr = child_tidptr;
4411 #endif
4412 } else {
4413 fork_end(0);
4414 }
4415 }
4416 return ret;
4417 }
4418
4419 /* warning : doesn't handle linux specific flags... */
4420 static int target_to_host_fcntl_cmd(int cmd)
4421 {
4422 switch(cmd) {
4423 case TARGET_F_DUPFD:
4424 case TARGET_F_GETFD:
4425 case TARGET_F_SETFD:
4426 case TARGET_F_GETFL:
4427 case TARGET_F_SETFL:
4428 return cmd;
4429 case TARGET_F_GETLK:
4430 return F_GETLK;
4431 case TARGET_F_SETLK:
4432 return F_SETLK;
4433 case TARGET_F_SETLKW:
4434 return F_SETLKW;
4435 case TARGET_F_GETOWN:
4436 return F_GETOWN;
4437 case TARGET_F_SETOWN:
4438 return F_SETOWN;
4439 case TARGET_F_GETSIG:
4440 return F_GETSIG;
4441 case TARGET_F_SETSIG:
4442 return F_SETSIG;
4443 #if TARGET_ABI_BITS == 32
4444 case TARGET_F_GETLK64:
4445 return F_GETLK64;
4446 case TARGET_F_SETLK64:
4447 return F_SETLK64;
4448 case TARGET_F_SETLKW64:
4449 return F_SETLKW64;
4450 #endif
4451 case TARGET_F_SETLEASE:
4452 return F_SETLEASE;
4453 case TARGET_F_GETLEASE:
4454 return F_GETLEASE;
4455 #ifdef F_DUPFD_CLOEXEC
4456 case TARGET_F_DUPFD_CLOEXEC:
4457 return F_DUPFD_CLOEXEC;
4458 #endif
4459 case TARGET_F_NOTIFY:
4460 return F_NOTIFY;
4461 default:
4462 return -TARGET_EINVAL;
4463 }
4464 return -TARGET_EINVAL;
4465 }
4466
4467 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4468 static const bitmask_transtbl flock_tbl[] = {
4469 TRANSTBL_CONVERT(F_RDLCK),
4470 TRANSTBL_CONVERT(F_WRLCK),
4471 TRANSTBL_CONVERT(F_UNLCK),
4472 TRANSTBL_CONVERT(F_EXLCK),
4473 TRANSTBL_CONVERT(F_SHLCK),
4474 { 0, 0, 0, 0 }
4475 };
4476
4477 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4478 {
4479 struct flock fl;
4480 struct target_flock *target_fl;
4481 struct flock64 fl64;
4482 struct target_flock64 *target_fl64;
4483 abi_long ret;
4484 int host_cmd = target_to_host_fcntl_cmd(cmd);
4485
4486 if (host_cmd == -TARGET_EINVAL)
4487 return host_cmd;
4488
4489 switch(cmd) {
4490 case TARGET_F_GETLK:
4491 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4492 return -TARGET_EFAULT;
4493 fl.l_type =
4494 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4495 fl.l_whence = tswap16(target_fl->l_whence);
4496 fl.l_start = tswapal(target_fl->l_start);
4497 fl.l_len = tswapal(target_fl->l_len);
4498 fl.l_pid = tswap32(target_fl->l_pid);
4499 unlock_user_struct(target_fl, arg, 0);
4500 ret = get_errno(fcntl(fd, host_cmd, &fl));
4501 if (ret == 0) {
4502 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4503 return -TARGET_EFAULT;
4504 target_fl->l_type =
4505 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4506 target_fl->l_whence = tswap16(fl.l_whence);
4507 target_fl->l_start = tswapal(fl.l_start);
4508 target_fl->l_len = tswapal(fl.l_len);
4509 target_fl->l_pid = tswap32(fl.l_pid);
4510 unlock_user_struct(target_fl, arg, 1);
4511 }
4512 break;
4513
4514 case TARGET_F_SETLK:
4515 case TARGET_F_SETLKW:
4516 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4517 return -TARGET_EFAULT;
4518 fl.l_type =
4519 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4520 fl.l_whence = tswap16(target_fl->l_whence);
4521 fl.l_start = tswapal(target_fl->l_start);
4522 fl.l_len = tswapal(target_fl->l_len);
4523 fl.l_pid = tswap32(target_fl->l_pid);
4524 unlock_user_struct(target_fl, arg, 0);
4525 ret = get_errno(fcntl(fd, host_cmd, &fl));
4526 break;
4527
4528 case TARGET_F_GETLK64:
4529 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4530 return -TARGET_EFAULT;
4531 fl64.l_type =
4532 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4533 fl64.l_whence = tswap16(target_fl64->l_whence);
4534 fl64.l_start = tswap64(target_fl64->l_start);
4535 fl64.l_len = tswap64(target_fl64->l_len);
4536 fl64.l_pid = tswap32(target_fl64->l_pid);
4537 unlock_user_struct(target_fl64, arg, 0);
4538 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4539 if (ret == 0) {
4540 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4541 return -TARGET_EFAULT;
4542 target_fl64->l_type =
4543 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4544 target_fl64->l_whence = tswap16(fl64.l_whence);
4545 target_fl64->l_start = tswap64(fl64.l_start);
4546 target_fl64->l_len = tswap64(fl64.l_len);
4547 target_fl64->l_pid = tswap32(fl64.l_pid);
4548 unlock_user_struct(target_fl64, arg, 1);
4549 }
4550 break;
4551 case TARGET_F_SETLK64:
4552 case TARGET_F_SETLKW64:
4553 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4554 return -TARGET_EFAULT;
4555 fl64.l_type =
4556 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4557 fl64.l_whence = tswap16(target_fl64->l_whence);
4558 fl64.l_start = tswap64(target_fl64->l_start);
4559 fl64.l_len = tswap64(target_fl64->l_len);
4560 fl64.l_pid = tswap32(target_fl64->l_pid);
4561 unlock_user_struct(target_fl64, arg, 0);
4562 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4563 break;
4564
4565 case TARGET_F_GETFL:
4566 ret = get_errno(fcntl(fd, host_cmd, arg));
4567 if (ret >= 0) {
4568 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4569 }
4570 break;
4571
4572 case TARGET_F_SETFL:
4573 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4574 break;
4575
4576 case TARGET_F_SETOWN:
4577 case TARGET_F_GETOWN:
4578 case TARGET_F_SETSIG:
4579 case TARGET_F_GETSIG:
4580 case TARGET_F_SETLEASE:
4581 case TARGET_F_GETLEASE:
4582 ret = get_errno(fcntl(fd, host_cmd, arg));
4583 break;
4584
4585 default:
4586 ret = get_errno(fcntl(fd, cmd, arg));
4587 break;
4588 }
4589 return ret;
4590 }
4591
4592 #ifdef USE_UID16
4593
4594 static inline int high2lowuid(int uid)
4595 {
4596 if (uid > 65535)
4597 return 65534;
4598 else
4599 return uid;
4600 }
4601
4602 static inline int high2lowgid(int gid)
4603 {
4604 if (gid > 65535)
4605 return 65534;
4606 else
4607 return gid;
4608 }
4609
4610 static inline int low2highuid(int uid)
4611 {
4612 if ((int16_t)uid == -1)
4613 return -1;
4614 else
4615 return uid;
4616 }
4617
4618 static inline int low2highgid(int gid)
4619 {
4620 if ((int16_t)gid == -1)
4621 return -1;
4622 else
4623 return gid;
4624 }
4625 static inline int tswapid(int id)
4626 {
4627 return tswap16(id);
4628 }
4629 #else /* !USE_UID16 */
4630 static inline int high2lowuid(int uid)
4631 {
4632 return uid;
4633 }
4634 static inline int high2lowgid(int gid)
4635 {
4636 return gid;
4637 }
4638 static inline int low2highuid(int uid)
4639 {
4640 return uid;
4641 }
4642 static inline int low2highgid(int gid)
4643 {
4644 return gid;
4645 }
4646 static inline int tswapid(int id)
4647 {
4648 return tswap32(id);
4649 }
4650 #endif /* USE_UID16 */
4651
4652 void syscall_init(void)
4653 {
4654 IOCTLEntry *ie;
4655 const argtype *arg_type;
4656 int size;
4657 int i;
4658
4659 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4660 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4661 #include "syscall_types.h"
4662 #undef STRUCT
4663 #undef STRUCT_SPECIAL
4664
4665 /* Build target_to_host_errno_table[] table from
4666 * host_to_target_errno_table[]. */
4667 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4668 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4669 }
4670
4671 /* we patch the ioctl size if necessary. We rely on the fact that
4672 no ioctl has all the bits at '1' in the size field */
4673 ie = ioctl_entries;
4674 while (ie->target_cmd != 0) {
4675 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4676 TARGET_IOC_SIZEMASK) {
4677 arg_type = ie->arg_type;
4678 if (arg_type[0] != TYPE_PTR) {
4679 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4680 ie->target_cmd);
4681 exit(1);
4682 }
4683 arg_type++;
4684 size = thunk_type_size(arg_type, 0);
4685 ie->target_cmd = (ie->target_cmd &
4686 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4687 (size << TARGET_IOC_SIZESHIFT);
4688 }
4689
4690 /* automatic consistency check if same arch */
4691 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4692 (defined(__x86_64__) && defined(TARGET_X86_64))
4693 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4694 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4695 ie->name, ie->target_cmd, ie->host_cmd);
4696 }
4697 #endif
4698 ie++;
4699 }
4700 }
4701
4702 #if TARGET_ABI_BITS == 32
4703 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4704 {
4705 #ifdef TARGET_WORDS_BIGENDIAN
4706 return ((uint64_t)word0 << 32) | word1;
4707 #else
4708 return ((uint64_t)word1 << 32) | word0;
4709 #endif
4710 }
4711 #else /* TARGET_ABI_BITS == 32 */
4712 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4713 {
4714 return word0;
4715 }
4716 #endif /* TARGET_ABI_BITS != 32 */
4717
4718 #ifdef TARGET_NR_truncate64
4719 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4720 abi_long arg2,
4721 abi_long arg3,
4722 abi_long arg4)
4723 {
4724 if (regpairs_aligned(cpu_env)) {
4725 arg2 = arg3;
4726 arg3 = arg4;
4727 }
4728 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4729 }
4730 #endif
4731
4732 #ifdef TARGET_NR_ftruncate64
4733 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4734 abi_long arg2,
4735 abi_long arg3,
4736 abi_long arg4)
4737 {
4738 if (regpairs_aligned(cpu_env)) {
4739 arg2 = arg3;
4740 arg3 = arg4;
4741 }
4742 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4743 }
4744 #endif
4745
4746 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4747 abi_ulong target_addr)
4748 {
4749 struct target_timespec *target_ts;
4750
4751 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4752 return -TARGET_EFAULT;
4753 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4754 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4755 unlock_user_struct(target_ts, target_addr, 0);
4756 return 0;
4757 }
4758
4759 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4760 struct timespec *host_ts)
4761 {
4762 struct target_timespec *target_ts;
4763
4764 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4765 return -TARGET_EFAULT;
4766 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4767 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4768 unlock_user_struct(target_ts, target_addr, 1);
4769 return 0;
4770 }
4771
4772 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4773 static inline abi_long host_to_target_stat64(void *cpu_env,
4774 abi_ulong target_addr,
4775 struct stat *host_st)
4776 {
4777 #ifdef TARGET_ARM
4778 if (((CPUARMState *)cpu_env)->eabi) {
4779 struct target_eabi_stat64 *target_st;
4780
4781 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4782 return -TARGET_EFAULT;
4783 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4784 __put_user(host_st->st_dev, &target_st->st_dev);
4785 __put_user(host_st->st_ino, &target_st->st_ino);
4786 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4787 __put_user(host_st->st_ino, &target_st->__st_ino);
4788 #endif
4789 __put_user(host_st->st_mode, &target_st->st_mode);
4790 __put_user(host_st->st_nlink, &target_st->st_nlink);
4791 __put_user(host_st->st_uid, &target_st->st_uid);
4792 __put_user(host_st->st_gid, &target_st->st_gid);
4793 __put_user(host_st->st_rdev, &target_st->st_rdev);
4794 __put_user(host_st->st_size, &target_st->st_size);
4795 __put_user(host_st->st_blksize, &target_st->st_blksize);
4796 __put_user(host_st->st_blocks, &target_st->st_blocks);
4797 __put_user(host_st->st_atime, &target_st->target_st_atime);
4798 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4799 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4800 unlock_user_struct(target_st, target_addr, 1);
4801 } else
4802 #endif
4803 {
4804 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4805 struct target_stat *target_st;
4806 #else
4807 struct target_stat64 *target_st;
4808 #endif
4809
4810 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4811 return -TARGET_EFAULT;
4812 memset(target_st, 0, sizeof(*target_st));
4813 __put_user(host_st->st_dev, &target_st->st_dev);
4814 __put_user(host_st->st_ino, &target_st->st_ino);
4815 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4816 __put_user(host_st->st_ino, &target_st->__st_ino);
4817 #endif
4818 __put_user(host_st->st_mode, &target_st->st_mode);
4819 __put_user(host_st->st_nlink, &target_st->st_nlink);
4820 __put_user(host_st->st_uid, &target_st->st_uid);
4821 __put_user(host_st->st_gid, &target_st->st_gid);
4822 __put_user(host_st->st_rdev, &target_st->st_rdev);
4823 /* XXX: better use of kernel struct */
4824 __put_user(host_st->st_size, &target_st->st_size);
4825 __put_user(host_st->st_blksize, &target_st->st_blksize);
4826 __put_user(host_st->st_blocks, &target_st->st_blocks);
4827 __put_user(host_st->st_atime, &target_st->target_st_atime);
4828 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4829 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4830 unlock_user_struct(target_st, target_addr, 1);
4831 }
4832
4833 return 0;
4834 }
4835 #endif
4836
4837 #if defined(CONFIG_USE_NPTL)
4838 /* ??? Using host futex calls even when target atomic operations
4839 are not really atomic probably breaks things. However implementing
4840 futexes locally would make futexes shared between multiple processes
4841 tricky. However they're probably useless because guest atomic
4842 operations won't work either. */
4843 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4844 target_ulong uaddr2, int val3)
4845 {
4846 struct timespec ts, *pts;
4847 int base_op;
4848
4849 /* ??? We assume FUTEX_* constants are the same on both host
4850 and target. */
4851 #ifdef FUTEX_CMD_MASK
4852 base_op = op & FUTEX_CMD_MASK;
4853 #else
4854 base_op = op;
4855 #endif
4856 switch (base_op) {
4857 case FUTEX_WAIT:
4858 case FUTEX_WAIT_BITSET:
4859 if (timeout) {
4860 pts = &ts;
4861 target_to_host_timespec(pts, timeout);
4862 } else {
4863 pts = NULL;
4864 }
4865 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4866 pts, NULL, val3));
4867 case FUTEX_WAKE:
4868 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4869 case FUTEX_FD:
4870 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4871 case FUTEX_REQUEUE:
4872 case FUTEX_CMP_REQUEUE:
4873 case FUTEX_WAKE_OP:
4874 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4875 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4876 But the prototype takes a `struct timespec *'; insert casts
4877 to satisfy the compiler. We do not need to tswap TIMEOUT
4878 since it's not compared to guest memory. */
4879 pts = (struct timespec *)(uintptr_t) timeout;
4880 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4881 g2h(uaddr2),
4882 (base_op == FUTEX_CMP_REQUEUE
4883 ? tswap32(val3)
4884 : val3)));
4885 default:
4886 return -TARGET_ENOSYS;
4887 }
4888 }
4889 #endif
4890
4891 /* Map host to target signal numbers for the wait family of syscalls.
4892 Assume all other status bits are the same. */
4893 int host_to_target_waitstatus(int status)
4894 {
4895 if (WIFSIGNALED(status)) {
4896 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4897 }
4898 if (WIFSTOPPED(status)) {
4899 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4900 | (status & 0xff);
4901 }
4902 return status;
4903 }
4904
4905 int get_osversion(void)
4906 {
4907 static int osversion;
4908 struct new_utsname buf;
4909 const char *s;
4910 int i, n, tmp;
4911 if (osversion)
4912 return osversion;
4913 if (qemu_uname_release && *qemu_uname_release) {
4914 s = qemu_uname_release;
4915 } else {
4916 if (sys_uname(&buf))
4917 return 0;
4918 s = buf.release;
4919 }
4920 tmp = 0;
4921 for (i = 0; i < 3; i++) {
4922 n = 0;
4923 while (*s >= '0' && *s <= '9') {
4924 n *= 10;
4925 n += *s - '0';
4926 s++;
4927 }
4928 tmp = (tmp << 8) + n;
4929 if (*s == '.')
4930 s++;
4931 }
4932 osversion = tmp;
4933 return osversion;
4934 }
4935
4936
4937 static int open_self_maps(void *cpu_env, int fd)
4938 {
4939 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4940 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4941 #endif
4942 FILE *fp;
4943 char *line = NULL;
4944 size_t len = 0;
4945 ssize_t read;
4946
4947 fp = fopen("/proc/self/maps", "r");
4948 if (fp == NULL) {
4949 return -EACCES;
4950 }
4951
4952 while ((read = getline(&line, &len, fp)) != -1) {
4953 int fields, dev_maj, dev_min, inode;
4954 uint64_t min, max, offset;
4955 char flag_r, flag_w, flag_x, flag_p;
4956 char path[512] = "";
4957 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
4958 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
4959 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
4960
4961 if ((fields < 10) || (fields > 11)) {
4962 continue;
4963 }
4964 if (!strncmp(path, "[stack]", 7)) {
4965 continue;
4966 }
4967 if (h2g_valid(min) && h2g_valid(max)) {
4968 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
4969 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
4970 h2g(min), h2g(max), flag_r, flag_w,
4971 flag_x, flag_p, offset, dev_maj, dev_min, inode,
4972 path[0] ? " " : "", path);
4973 }
4974 }
4975
4976 free(line);
4977 fclose(fp);
4978
4979 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4980 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4981 (unsigned long long)ts->info->stack_limit,
4982 (unsigned long long)(ts->info->start_stack +
4983 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
4984 (unsigned long long)0);
4985 #endif
4986
4987 return 0;
4988 }
4989
4990 static int open_self_stat(void *cpu_env, int fd)
4991 {
4992 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4993 abi_ulong start_stack = ts->info->start_stack;
4994 int i;
4995
4996 for (i = 0; i < 44; i++) {
4997 char buf[128];
4998 int len;
4999 uint64_t val = 0;
5000
5001 if (i == 0) {
5002 /* pid */
5003 val = getpid();
5004 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5005 } else if (i == 1) {
5006 /* app name */
5007 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5008 } else if (i == 27) {
5009 /* stack bottom */
5010 val = start_stack;
5011 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5012 } else {
5013 /* for the rest, there is MasterCard */
5014 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5015 }
5016
5017 len = strlen(buf);
5018 if (write(fd, buf, len) != len) {
5019 return -1;
5020 }
5021 }
5022
5023 return 0;
5024 }
5025
5026 static int open_self_auxv(void *cpu_env, int fd)
5027 {
5028 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5029 abi_ulong auxv = ts->info->saved_auxv;
5030 abi_ulong len = ts->info->auxv_len;
5031 char *ptr;
5032
5033 /*
5034 * Auxiliary vector is stored in target process stack.
5035 * read in whole auxv vector and copy it to file
5036 */
5037 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5038 if (ptr != NULL) {
5039 while (len > 0) {
5040 ssize_t r;
5041 r = write(fd, ptr, len);
5042 if (r <= 0) {
5043 break;
5044 }
5045 len -= r;
5046 ptr += r;
5047 }
5048 lseek(fd, 0, SEEK_SET);
5049 unlock_user(ptr, auxv, len);
5050 }
5051
5052 return 0;
5053 }
5054
5055 static int is_proc_myself(const char *filename, const char *entry)
5056 {
5057 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5058 filename += strlen("/proc/");
5059 if (!strncmp(filename, "self/", strlen("self/"))) {
5060 filename += strlen("self/");
5061 } else if (*filename >= '1' && *filename <= '9') {
5062 char myself[80];
5063 snprintf(myself, sizeof(myself), "%d/", getpid());
5064 if (!strncmp(filename, myself, strlen(myself))) {
5065 filename += strlen(myself);
5066 } else {
5067 return 0;
5068 }
5069 } else {
5070 return 0;
5071 }
5072 if (!strcmp(filename, entry)) {
5073 return 1;
5074 }
5075 }
5076 return 0;
5077 }
5078
5079 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5080 {
5081 struct fake_open {
5082 const char *filename;
5083 int (*fill)(void *cpu_env, int fd);
5084 };
5085 const struct fake_open *fake_open;
5086 static const struct fake_open fakes[] = {
5087 { "maps", open_self_maps },
5088 { "stat", open_self_stat },
5089 { "auxv", open_self_auxv },
5090 { NULL, NULL }
5091 };
5092
5093 for (fake_open = fakes; fake_open->filename; fake_open++) {
5094 if (is_proc_myself(pathname, fake_open->filename)) {
5095 break;
5096 }
5097 }
5098
5099 if (fake_open->filename) {
5100 const char *tmpdir;
5101 char filename[PATH_MAX];
5102 int fd, r;
5103
5104 /* create temporary file to map stat to */
5105 tmpdir = getenv("TMPDIR");
5106 if (!tmpdir)
5107 tmpdir = "/tmp";
5108 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5109 fd = mkstemp(filename);
5110 if (fd < 0) {
5111 return fd;
5112 }
5113 unlink(filename);
5114
5115 if ((r = fake_open->fill(cpu_env, fd))) {
5116 close(fd);
5117 return r;
5118 }
5119 lseek(fd, 0, SEEK_SET);
5120
5121 return fd;
5122 }
5123
5124 return get_errno(open(path(pathname), flags, mode));
5125 }
5126
5127 /* do_syscall() should always have a single exit point at the end so
5128 that actions, such as logging of syscall results, can be performed.
5129 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5130 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5131 abi_long arg2, abi_long arg3, abi_long arg4,
5132 abi_long arg5, abi_long arg6, abi_long arg7,
5133 abi_long arg8)
5134 {
5135 abi_long ret;
5136 struct stat st;
5137 struct statfs stfs;
5138 void *p;
5139
5140 #ifdef DEBUG
5141 gemu_log("syscall %d", num);
5142 #endif
5143 if(do_strace)
5144 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5145
5146 switch(num) {
5147 case TARGET_NR_exit:
5148 #ifdef CONFIG_USE_NPTL
5149 /* In old applications this may be used to implement _exit(2).
5150 However in threaded applictions it is used for thread termination,
5151 and _exit_group is used for application termination.
5152 Do thread termination if we have more then one thread. */
5153 /* FIXME: This probably breaks if a signal arrives. We should probably
5154 be disabling signals. */
5155 if (first_cpu->next_cpu) {
5156 TaskState *ts;
5157 CPUArchState **lastp;
5158 CPUArchState *p;
5159
5160 cpu_list_lock();
5161 lastp = &first_cpu;
5162 p = first_cpu;
5163 while (p && p != (CPUArchState *)cpu_env) {
5164 lastp = &p->next_cpu;
5165 p = p->next_cpu;
5166 }
5167 /* If we didn't find the CPU for this thread then something is
5168 horribly wrong. */
5169 if (!p)
5170 abort();
5171 /* Remove the CPU from the list. */
5172 *lastp = p->next_cpu;
5173 cpu_list_unlock();
5174 ts = ((CPUArchState *)cpu_env)->opaque;
5175 if (ts->child_tidptr) {
5176 put_user_u32(0, ts->child_tidptr);
5177 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5178 NULL, NULL, 0);
5179 }
5180 thread_env = NULL;
5181 object_unref(OBJECT(ENV_GET_CPU(cpu_env)));
5182 g_free(ts);
5183 pthread_exit(NULL);
5184 }
5185 #endif
5186 #ifdef TARGET_GPROF
5187 _mcleanup();
5188 #endif
5189 gdb_exit(cpu_env, arg1);
5190 _exit(arg1);
5191 ret = 0; /* avoid warning */
5192 break;
5193 case TARGET_NR_read:
5194 if (arg3 == 0)
5195 ret = 0;
5196 else {
5197 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5198 goto efault;
5199 ret = get_errno(read(arg1, p, arg3));
5200 unlock_user(p, arg2, ret);
5201 }
5202 break;
5203 case TARGET_NR_write:
5204 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5205 goto efault;
5206 ret = get_errno(write(arg1, p, arg3));
5207 unlock_user(p, arg2, 0);
5208 break;
5209 case TARGET_NR_open:
5210 if (!(p = lock_user_string(arg1)))
5211 goto efault;
5212 ret = get_errno(do_open(cpu_env, p,
5213 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5214 arg3));
5215 unlock_user(p, arg1, 0);
5216 break;
5217 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5218 case TARGET_NR_openat:
5219 if (!(p = lock_user_string(arg2)))
5220 goto efault;
5221 ret = get_errno(sys_openat(arg1,
5222 path(p),
5223 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5224 arg4));
5225 unlock_user(p, arg2, 0);
5226 break;
5227 #endif
5228 case TARGET_NR_close:
5229 ret = get_errno(close(arg1));
5230 break;
5231 case TARGET_NR_brk:
5232 ret = do_brk(arg1);
5233 break;
5234 case TARGET_NR_fork:
5235 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5236 break;
5237 #ifdef TARGET_NR_waitpid
5238 case TARGET_NR_waitpid:
5239 {
5240 int status;
5241 ret = get_errno(waitpid(arg1, &status, arg3));
5242 if (!is_error(ret) && arg2 && ret
5243 && put_user_s32(host_to_target_waitstatus(status), arg2))
5244 goto efault;
5245 }
5246 break;
5247 #endif
5248 #ifdef TARGET_NR_waitid
5249 case TARGET_NR_waitid:
5250 {
5251 siginfo_t info;
5252 info.si_pid = 0;
5253 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5254 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5255 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5256 goto efault;
5257 host_to_target_siginfo(p, &info);
5258 unlock_user(p, arg3, sizeof(target_siginfo_t));
5259 }
5260 }
5261 break;
5262 #endif
5263 #ifdef TARGET_NR_creat /* not on alpha */
5264 case TARGET_NR_creat:
5265 if (!(p = lock_user_string(arg1)))
5266 goto efault;
5267 ret = get_errno(creat(p, arg2));
5268 unlock_user(p, arg1, 0);
5269 break;
5270 #endif
5271 case TARGET_NR_link:
5272 {
5273 void * p2;
5274 p = lock_user_string(arg1);
5275 p2 = lock_user_string(arg2);
5276 if (!p || !p2)
5277 ret = -TARGET_EFAULT;
5278 else
5279 ret = get_errno(link(p, p2));
5280 unlock_user(p2, arg2, 0);
5281 unlock_user(p, arg1, 0);
5282 }
5283 break;
5284 #if defined(TARGET_NR_linkat)
5285 case TARGET_NR_linkat:
5286 {
5287 void * p2 = NULL;
5288 if (!arg2 || !arg4)
5289 goto efault;
5290 p = lock_user_string(arg2);
5291 p2 = lock_user_string(arg4);
5292 if (!p || !p2)
5293 ret = -TARGET_EFAULT;
5294 else
5295 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5296 unlock_user(p, arg2, 0);
5297 unlock_user(p2, arg4, 0);
5298 }
5299 break;
5300 #endif
5301 case TARGET_NR_unlink:
5302 if (!(p = lock_user_string(arg1)))
5303 goto efault;
5304 ret = get_errno(unlink(p));
5305 unlock_user(p, arg1, 0);
5306 break;
5307 #if defined(TARGET_NR_unlinkat)
5308 case TARGET_NR_unlinkat:
5309 if (!(p = lock_user_string(arg2)))
5310 goto efault;
5311 ret = get_errno(unlinkat(arg1, p, arg3));
5312 unlock_user(p, arg2, 0);
5313 break;
5314 #endif
5315 case TARGET_NR_execve:
5316 {
5317 char **argp, **envp;
5318 int argc, envc;
5319 abi_ulong gp;
5320 abi_ulong guest_argp;
5321 abi_ulong guest_envp;
5322 abi_ulong addr;
5323 char **q;
5324 int total_size = 0;
5325
5326 argc = 0;
5327 guest_argp = arg2;
5328 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5329 if (get_user_ual(addr, gp))
5330 goto efault;
5331 if (!addr)
5332 break;
5333 argc++;
5334 }
5335 envc = 0;
5336 guest_envp = arg3;
5337 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5338 if (get_user_ual(addr, gp))
5339 goto efault;
5340 if (!addr)
5341 break;
5342 envc++;
5343 }
5344
5345 argp = alloca((argc + 1) * sizeof(void *));
5346 envp = alloca((envc + 1) * sizeof(void *));
5347
5348 for (gp = guest_argp, q = argp; gp;
5349 gp += sizeof(abi_ulong), q++) {
5350 if (get_user_ual(addr, gp))
5351 goto execve_efault;
5352 if (!addr)
5353 break;
5354 if (!(*q = lock_user_string(addr)))
5355 goto execve_efault;
5356 total_size += strlen(*q) + 1;
5357 }
5358 *q = NULL;
5359
5360 for (gp = guest_envp, q = envp; gp;
5361 gp += sizeof(abi_ulong), q++) {
5362 if (get_user_ual(addr, gp))
5363 goto execve_efault;
5364 if (!addr)
5365 break;
5366 if (!(*q = lock_user_string(addr)))
5367 goto execve_efault;
5368 total_size += strlen(*q) + 1;
5369 }
5370 *q = NULL;
5371
5372 /* This case will not be caught by the host's execve() if its
5373 page size is bigger than the target's. */
5374 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5375 ret = -TARGET_E2BIG;
5376 goto execve_end;
5377 }
5378 if (!(p = lock_user_string(arg1)))
5379 goto execve_efault;
5380 ret = get_errno(execve(p, argp, envp));
5381 unlock_user(p, arg1, 0);
5382
5383 goto execve_end;
5384
5385 execve_efault:
5386 ret = -TARGET_EFAULT;
5387
5388 execve_end:
5389 for (gp = guest_argp, q = argp; *q;
5390 gp += sizeof(abi_ulong), q++) {
5391 if (get_user_ual(addr, gp)
5392 || !addr)
5393 break;
5394 unlock_user(*q, addr, 0);
5395 }
5396 for (gp = guest_envp, q = envp; *q;
5397 gp += sizeof(abi_ulong), q++) {
5398 if (get_user_ual(addr, gp)
5399 || !addr)
5400 break;
5401 unlock_user(*q, addr, 0);
5402 }
5403 }
5404 break;
5405 case TARGET_NR_chdir:
5406 if (!(p = lock_user_string(arg1)))
5407 goto efault;
5408 ret = get_errno(chdir(p));
5409 unlock_user(p, arg1, 0);
5410 break;
5411 #ifdef TARGET_NR_time
5412 case TARGET_NR_time:
5413 {
5414 time_t host_time;
5415 ret = get_errno(time(&host_time));
5416 if (!is_error(ret)
5417 && arg1
5418 && put_user_sal(host_time, arg1))
5419 goto efault;
5420 }
5421 break;
5422 #endif
5423 case TARGET_NR_mknod:
5424 if (!(p = lock_user_string(arg1)))
5425 goto efault;
5426 ret = get_errno(mknod(p, arg2, arg3));
5427 unlock_user(p, arg1, 0);
5428 break;
5429 #if defined(TARGET_NR_mknodat)
5430 case TARGET_NR_mknodat:
5431 if (!(p = lock_user_string(arg2)))
5432 goto efault;
5433 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5434 unlock_user(p, arg2, 0);
5435 break;
5436 #endif
5437 case TARGET_NR_chmod:
5438 if (!(p = lock_user_string(arg1)))
5439 goto efault;
5440 ret = get_errno(chmod(p, arg2));
5441 unlock_user(p, arg1, 0);
5442 break;
5443 #ifdef TARGET_NR_break
5444 case TARGET_NR_break:
5445 goto unimplemented;
5446 #endif
5447 #ifdef TARGET_NR_oldstat
5448 case TARGET_NR_oldstat:
5449 goto unimplemented;
5450 #endif
5451 case TARGET_NR_lseek:
5452 ret = get_errno(lseek(arg1, arg2, arg3));
5453 break;
5454 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5455 /* Alpha specific */
5456 case TARGET_NR_getxpid:
5457 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5458 ret = get_errno(getpid());
5459 break;
5460 #endif
5461 #ifdef TARGET_NR_getpid
5462 case TARGET_NR_getpid:
5463 ret = get_errno(getpid());
5464 break;
5465 #endif
5466 case TARGET_NR_mount:
5467 {
5468 /* need to look at the data field */
5469 void *p2, *p3;
5470 p = lock_user_string(arg1);
5471 p2 = lock_user_string(arg2);
5472 p3 = lock_user_string(arg3);
5473 if (!p || !p2 || !p3)
5474 ret = -TARGET_EFAULT;
5475 else {
5476 /* FIXME - arg5 should be locked, but it isn't clear how to
5477 * do that since it's not guaranteed to be a NULL-terminated
5478 * string.
5479 */
5480 if ( ! arg5 )
5481 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5482 else
5483 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5484 }
5485 unlock_user(p, arg1, 0);
5486 unlock_user(p2, arg2, 0);
5487 unlock_user(p3, arg3, 0);
5488 break;
5489 }
5490 #ifdef TARGET_NR_umount
5491 case TARGET_NR_umount:
5492 if (!(p = lock_user_string(arg1)))
5493 goto efault;
5494 ret = get_errno(umount(p));
5495 unlock_user(p, arg1, 0);
5496 break;
5497 #endif
5498 #ifdef TARGET_NR_stime /* not on alpha */
5499 case TARGET_NR_stime:
5500 {
5501 time_t host_time;
5502 if (get_user_sal(host_time, arg1))
5503 goto efault;
5504 ret = get_errno(stime(&host_time));
5505 }
5506 break;
5507 #endif
5508 case TARGET_NR_ptrace:
5509 goto unimplemented;
5510 #ifdef TARGET_NR_alarm /* not on alpha */
5511 case TARGET_NR_alarm:
5512 ret = alarm(arg1);
5513 break;
5514 #endif
5515 #ifdef TARGET_NR_oldfstat
5516 case TARGET_NR_oldfstat:
5517 goto unimplemented;
5518 #endif
5519 #ifdef TARGET_NR_pause /* not on alpha */
5520 case TARGET_NR_pause:
5521 ret = get_errno(pause());
5522 break;
5523 #endif
5524 #ifdef TARGET_NR_utime
5525 case TARGET_NR_utime:
5526 {
5527 struct utimbuf tbuf, *host_tbuf;
5528 struct target_utimbuf *target_tbuf;
5529 if (arg2) {
5530 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5531 goto efault;
5532 tbuf.actime = tswapal(target_tbuf->actime);
5533 tbuf.modtime = tswapal(target_tbuf->modtime);
5534 unlock_user_struct(target_tbuf, arg2, 0);
5535 host_tbuf = &tbuf;
5536 } else {
5537 host_tbuf = NULL;
5538 }
5539 if (!(p = lock_user_string(arg1)))
5540 goto efault;
5541 ret = get_errno(utime(p, host_tbuf));
5542 unlock_user(p, arg1, 0);
5543 }
5544 break;
5545 #endif
5546 case TARGET_NR_utimes:
5547 {
5548 struct timeval *tvp, tv[2];
5549 if (arg2) {
5550 if (copy_from_user_timeval(&tv[0], arg2)
5551 || copy_from_user_timeval(&tv[1],
5552 arg2 + sizeof(struct target_timeval)))
5553 goto efault;
5554 tvp = tv;
5555 } else {
5556 tvp = NULL;
5557 }
5558 if (!(p = lock_user_string(arg1)))
5559 goto efault;
5560 ret = get_errno(utimes(p, tvp));
5561 unlock_user(p, arg1, 0);
5562 }
5563 break;
5564 #if defined(TARGET_NR_futimesat)
5565 case TARGET_NR_futimesat:
5566 {
5567 struct timeval *tvp, tv[2];
5568 if (arg3) {
5569 if (copy_from_user_timeval(&tv[0], arg3)
5570 || copy_from_user_timeval(&tv[1],
5571 arg3 + sizeof(struct target_timeval)))
5572 goto efault;
5573 tvp = tv;
5574 } else {
5575 tvp = NULL;
5576 }
5577 if (!(p = lock_user_string(arg2)))
5578 goto efault;
5579 ret = get_errno(futimesat(arg1, path(p), tvp));
5580 unlock_user(p, arg2, 0);
5581 }
5582 break;
5583 #endif
5584 #ifdef TARGET_NR_stty
5585 case TARGET_NR_stty:
5586 goto unimplemented;
5587 #endif
5588 #ifdef TARGET_NR_gtty
5589 case TARGET_NR_gtty:
5590 goto unimplemented;
5591 #endif
5592 case TARGET_NR_access:
5593 if (!(p = lock_user_string(arg1)))
5594 goto efault;
5595 ret = get_errno(access(path(p), arg2));
5596 unlock_user(p, arg1, 0);
5597 break;
5598 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5599 case TARGET_NR_faccessat:
5600 if (!(p = lock_user_string(arg2)))
5601 goto efault;
5602 ret = get_errno(faccessat(arg1, p, arg3, 0));
5603 unlock_user(p, arg2, 0);
5604 break;
5605 #endif
5606 #ifdef TARGET_NR_nice /* not on alpha */
5607 case TARGET_NR_nice:
5608 ret = get_errno(nice(arg1));
5609 break;
5610 #endif
5611 #ifdef TARGET_NR_ftime
5612 case TARGET_NR_ftime:
5613 goto unimplemented;
5614 #endif
5615 case TARGET_NR_sync:
5616 sync();
5617 ret = 0;
5618 break;
5619 case TARGET_NR_kill:
5620 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5621 break;
5622 case TARGET_NR_rename:
5623 {
5624 void *p2;
5625 p = lock_user_string(arg1);
5626 p2 = lock_user_string(arg2);
5627 if (!p || !p2)
5628 ret = -TARGET_EFAULT;
5629 else
5630 ret = get_errno(rename(p, p2));
5631 unlock_user(p2, arg2, 0);
5632 unlock_user(p, arg1, 0);
5633 }
5634 break;
5635 #if defined(TARGET_NR_renameat)
5636 case TARGET_NR_renameat:
5637 {
5638 void *p2;
5639 p = lock_user_string(arg2);
5640 p2 = lock_user_string(arg4);
5641 if (!p || !p2)
5642 ret = -TARGET_EFAULT;
5643 else
5644 ret = get_errno(renameat(arg1, p, arg3, p2));
5645 unlock_user(p2, arg4, 0);
5646 unlock_user(p, arg2, 0);
5647 }
5648 break;
5649 #endif
5650 case TARGET_NR_mkdir:
5651 if (!(p = lock_user_string(arg1)))
5652 goto efault;
5653 ret = get_errno(mkdir(p, arg2));
5654 unlock_user(p, arg1, 0);
5655 break;
5656 #if defined(TARGET_NR_mkdirat)
5657 case TARGET_NR_mkdirat:
5658 if (!(p = lock_user_string(arg2)))
5659 goto efault;
5660 ret = get_errno(mkdirat(arg1, p, arg3));
5661 unlock_user(p, arg2, 0);
5662 break;
5663 #endif
5664 case TARGET_NR_rmdir:
5665 if (!(p = lock_user_string(arg1)))
5666 goto efault;
5667 ret = get_errno(rmdir(p));
5668 unlock_user(p, arg1, 0);
5669 break;
5670 case TARGET_NR_dup:
5671 ret = get_errno(dup(arg1));
5672 break;
5673 case TARGET_NR_pipe:
5674 ret = do_pipe(cpu_env, arg1, 0, 0);
5675 break;
5676 #ifdef TARGET_NR_pipe2
5677 case TARGET_NR_pipe2:
5678 ret = do_pipe(cpu_env, arg1,
5679 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5680 break;
5681 #endif
5682 case TARGET_NR_times:
5683 {
5684 struct target_tms *tmsp;
5685 struct tms tms;
5686 ret = get_errno(times(&tms));
5687 if (arg1) {
5688 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5689 if (!tmsp)
5690 goto efault;
5691 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5692 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5693 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5694 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5695 }
5696 if (!is_error(ret))
5697 ret = host_to_target_clock_t(ret);
5698 }
5699 break;
5700 #ifdef TARGET_NR_prof
5701 case TARGET_NR_prof:
5702 goto unimplemented;
5703 #endif
5704 #ifdef TARGET_NR_signal
5705 case TARGET_NR_signal:
5706 goto unimplemented;
5707 #endif
5708 case TARGET_NR_acct:
5709 if (arg1 == 0) {
5710 ret = get_errno(acct(NULL));
5711 } else {
5712 if (!(p = lock_user_string(arg1)))
5713 goto efault;
5714 ret = get_errno(acct(path(p)));
5715 unlock_user(p, arg1, 0);
5716 }
5717 break;
5718 #ifdef TARGET_NR_umount2 /* not on alpha */
5719 case TARGET_NR_umount2:
5720 if (!(p = lock_user_string(arg1)))
5721 goto efault;
5722 ret = get_errno(umount2(p, arg2));
5723 unlock_user(p, arg1, 0);
5724 break;
5725 #endif
5726 #ifdef TARGET_NR_lock
5727 case TARGET_NR_lock:
5728 goto unimplemented;
5729 #endif
5730 case TARGET_NR_ioctl:
5731 ret = do_ioctl(arg1, arg2, arg3);
5732 break;
5733 case TARGET_NR_fcntl:
5734 ret = do_fcntl(arg1, arg2, arg3);
5735 break;
5736 #ifdef TARGET_NR_mpx
5737 case TARGET_NR_mpx:
5738 goto unimplemented;
5739 #endif
5740 case TARGET_NR_setpgid:
5741 ret = get_errno(setpgid(arg1, arg2));
5742 break;
5743 #ifdef TARGET_NR_ulimit
5744 case TARGET_NR_ulimit:
5745 goto unimplemented;
5746 #endif
5747 #ifdef TARGET_NR_oldolduname
5748 case TARGET_NR_oldolduname:
5749 goto unimplemented;
5750 #endif
5751 case TARGET_NR_umask:
5752 ret = get_errno(umask(arg1));
5753 break;
5754 case TARGET_NR_chroot:
5755 if (!(p = lock_user_string(arg1)))
5756 goto efault;
5757 ret = get_errno(chroot(p));
5758 unlock_user(p, arg1, 0);
5759 break;
5760 case TARGET_NR_ustat:
5761 goto unimplemented;
5762 case TARGET_NR_dup2:
5763 ret = get_errno(dup2(arg1, arg2));
5764 break;
5765 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5766 case TARGET_NR_dup3:
5767 ret = get_errno(dup3(arg1, arg2, arg3));
5768 break;
5769 #endif
5770 #ifdef TARGET_NR_getppid /* not on alpha */
5771 case TARGET_NR_getppid:
5772 ret = get_errno(getppid());
5773 break;
5774 #endif
5775 case TARGET_NR_getpgrp:
5776 ret = get_errno(getpgrp());
5777 break;
5778 case TARGET_NR_setsid:
5779 ret = get_errno(setsid());
5780 break;
5781 #ifdef TARGET_NR_sigaction
5782 case TARGET_NR_sigaction:
5783 {
5784 #if defined(TARGET_ALPHA)
5785 struct target_sigaction act, oact, *pact = 0;
5786 struct target_old_sigaction *old_act;
5787 if (arg2) {
5788 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5789 goto efault;
5790 act._sa_handler = old_act->_sa_handler;
5791 target_siginitset(&act.sa_mask, old_act->sa_mask);
5792 act.sa_flags = old_act->sa_flags;
5793 act.sa_restorer = 0;
5794 unlock_user_struct(old_act, arg2, 0);
5795 pact = &act;
5796 }
5797 ret = get_errno(do_sigaction(arg1, pact, &oact));
5798 if (!is_error(ret) && arg3) {
5799 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5800 goto efault;
5801 old_act->_sa_handler = oact._sa_handler;
5802 old_act->sa_mask = oact.sa_mask.sig[0];
5803 old_act->sa_flags = oact.sa_flags;
5804 unlock_user_struct(old_act, arg3, 1);
5805 }
5806 #elif defined(TARGET_MIPS)
5807 struct target_sigaction act, oact, *pact, *old_act;
5808
5809 if (arg2) {
5810 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5811 goto efault;
5812 act._sa_handler = old_act->_sa_handler;
5813 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5814 act.sa_flags = old_act->sa_flags;
5815 unlock_user_struct(old_act, arg2, 0);
5816 pact = &act;
5817 } else {
5818 pact = NULL;
5819 }
5820
5821 ret = get_errno(do_sigaction(arg1, pact, &oact));
5822
5823 if (!is_error(ret) && arg3) {
5824 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5825 goto efault;
5826 old_act->_sa_handler = oact._sa_handler;
5827 old_act->sa_flags = oact.sa_flags;
5828 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5829 old_act->sa_mask.sig[1] = 0;
5830 old_act->sa_mask.sig[2] = 0;
5831 old_act->sa_mask.sig[3] = 0;
5832 unlock_user_struct(old_act, arg3, 1);
5833 }
5834 #else
5835 struct target_old_sigaction *old_act;
5836 struct target_sigaction act, oact, *pact;
5837 if (arg2) {
5838 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5839 goto efault;
5840 act._sa_handler = old_act->_sa_handler;
5841 target_siginitset(&act.sa_mask, old_act->sa_mask);
5842 act.sa_flags = old_act->sa_flags;
5843 act.sa_restorer = old_act->sa_restorer;
5844 unlock_user_struct(old_act, arg2, 0);
5845 pact = &act;
5846 } else {
5847 pact = NULL;
5848 }
5849 ret = get_errno(do_sigaction(arg1, pact, &oact));
5850 if (!is_error(ret) && arg3) {
5851 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5852 goto efault;
5853 old_act->_sa_handler = oact._sa_handler;
5854 old_act->sa_mask = oact.sa_mask.sig[0];
5855 old_act->sa_flags = oact.sa_flags;
5856 old_act->sa_restorer = oact.sa_restorer;
5857 unlock_user_struct(old_act, arg3, 1);
5858 }
5859 #endif
5860 }
5861 break;
5862 #endif
5863 case TARGET_NR_rt_sigaction:
5864 {
5865 #if defined(TARGET_ALPHA)
5866 struct target_sigaction act, oact, *pact = 0;
5867 struct target_rt_sigaction *rt_act;
5868 /* ??? arg4 == sizeof(sigset_t). */
5869 if (arg2) {
5870 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5871 goto efault;
5872 act._sa_handler = rt_act->_sa_handler;
5873 act.sa_mask = rt_act->sa_mask;
5874 act.sa_flags = rt_act->sa_flags;
5875 act.sa_restorer = arg5;
5876 unlock_user_struct(rt_act, arg2, 0);
5877 pact = &act;
5878 }
5879 ret = get_errno(do_sigaction(arg1, pact, &oact));
5880 if (!is_error(ret) && arg3) {
5881 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5882 goto efault;
5883 rt_act->_sa_handler = oact._sa_handler;
5884 rt_act->sa_mask = oact.sa_mask;
5885 rt_act->sa_flags = oact.sa_flags;
5886 unlock_user_struct(rt_act, arg3, 1);
5887 }
5888 #else
5889 struct target_sigaction *act;
5890 struct target_sigaction *oact;
5891
5892 if (arg2) {
5893 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5894 goto efault;
5895 } else
5896 act = NULL;
5897 if (arg3) {
5898 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5899 ret = -TARGET_EFAULT;
5900 goto rt_sigaction_fail;
5901 }
5902 } else
5903 oact = NULL;
5904 ret = get_errno(do_sigaction(arg1, act, oact));
5905 rt_sigaction_fail:
5906 if (act)
5907 unlock_user_struct(act, arg2, 0);
5908 if (oact)
5909 unlock_user_struct(oact, arg3, 1);
5910 #endif
5911 }
5912 break;
5913 #ifdef TARGET_NR_sgetmask /* not on alpha */
5914 case TARGET_NR_sgetmask:
5915 {
5916 sigset_t cur_set;
5917 abi_ulong target_set;
5918 sigprocmask(0, NULL, &cur_set);
5919 host_to_target_old_sigset(&target_set, &cur_set);
5920 ret = target_set;
5921 }
5922 break;
5923 #endif
5924 #ifdef TARGET_NR_ssetmask /* not on alpha */
5925 case TARGET_NR_ssetmask:
5926 {
5927 sigset_t set, oset, cur_set;
5928 abi_ulong target_set = arg1;
5929 sigprocmask(0, NULL, &cur_set);
5930 target_to_host_old_sigset(&set, &target_set);
5931 sigorset(&set, &set, &cur_set);
5932 sigprocmask(SIG_SETMASK, &set, &oset);
5933 host_to_target_old_sigset(&target_set, &oset);
5934 ret = target_set;
5935 }
5936 break;
5937 #endif
5938 #ifdef TARGET_NR_sigprocmask
5939 case TARGET_NR_sigprocmask:
5940 {
5941 #if defined(TARGET_ALPHA)
5942 sigset_t set, oldset;
5943 abi_ulong mask;
5944 int how;
5945
5946 switch (arg1) {
5947 case TARGET_SIG_BLOCK:
5948 how = SIG_BLOCK;
5949 break;
5950 case TARGET_SIG_UNBLOCK:
5951 how = SIG_UNBLOCK;
5952 break;
5953 case TARGET_SIG_SETMASK:
5954 how = SIG_SETMASK;
5955 break;
5956 default:
5957 ret = -TARGET_EINVAL;
5958 goto fail;
5959 }
5960 mask = arg2;
5961 target_to_host_old_sigset(&set, &mask);
5962
5963 ret = get_errno(sigprocmask(how, &set, &oldset));
5964 if (!is_error(ret)) {
5965 host_to_target_old_sigset(&mask, &oldset);
5966 ret = mask;
5967 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
5968 }
5969 #else
5970 sigset_t set, oldset, *set_ptr;
5971 int how;
5972
5973 if (arg2) {
5974 switch (arg1) {
5975 case TARGET_SIG_BLOCK:
5976 how = SIG_BLOCK;
5977 break;
5978 case TARGET_SIG_UNBLOCK:
5979 how = SIG_UNBLOCK;
5980 break;
5981 case TARGET_SIG_SETMASK:
5982 how = SIG_SETMASK;
5983 break;
5984 default:
5985 ret = -TARGET_EINVAL;
5986 goto fail;
5987 }
5988 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5989 goto efault;
5990 target_to_host_old_sigset(&set, p);
5991 unlock_user(p, arg2, 0);
5992 set_ptr = &set;
5993 } else {
5994 how = 0;
5995 set_ptr = NULL;
5996 }
5997 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5998 if (!is_error(ret) && arg3) {
5999 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6000 goto efault;
6001 host_to_target_old_sigset(p, &oldset);
6002 unlock_user(p, arg3, sizeof(target_sigset_t));
6003 }
6004 #endif
6005 }
6006 break;
6007 #endif
6008 case TARGET_NR_rt_sigprocmask:
6009 {
6010 int how = arg1;
6011 sigset_t set, oldset, *set_ptr;
6012
6013 if (arg2) {
6014 switch(how) {
6015 case TARGET_SIG_BLOCK:
6016 how = SIG_BLOCK;
6017 break;
6018 case TARGET_SIG_UNBLOCK:
6019 how = SIG_UNBLOCK;
6020 break;
6021 case TARGET_SIG_SETMASK:
6022 how = SIG_SETMASK;
6023 break;
6024 default:
6025 ret = -TARGET_EINVAL;
6026 goto fail;
6027 }
6028 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6029 goto efault;
6030 target_to_host_sigset(&set, p);
6031 unlock_user(p, arg2, 0);
6032 set_ptr = &set;
6033 } else {
6034 how = 0;
6035 set_ptr = NULL;
6036 }
6037 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6038 if (!is_error(ret) && arg3) {
6039 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6040 goto efault;
6041 host_to_target_sigset(p, &oldset);
6042 unlock_user(p, arg3, sizeof(target_sigset_t));
6043 }
6044 }
6045 break;
6046 #ifdef TARGET_NR_sigpending
6047 case TARGET_NR_sigpending:
6048 {
6049 sigset_t set;
6050 ret = get_errno(sigpending(&set));
6051 if (!is_error(ret)) {
6052 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6053 goto efault;
6054 host_to_target_old_sigset(p, &set);
6055 unlock_user(p, arg1, sizeof(target_sigset_t));
6056 }
6057 }
6058 break;
6059 #endif
6060 case TARGET_NR_rt_sigpending:
6061 {
6062 sigset_t set;
6063 ret = get_errno(sigpending(&set));
6064 if (!is_error(ret)) {
6065 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6066 goto efault;
6067 host_to_target_sigset(p, &set);
6068 unlock_user(p, arg1, sizeof(target_sigset_t));
6069 }
6070 }
6071 break;
6072 #ifdef TARGET_NR_sigsuspend
6073 case TARGET_NR_sigsuspend:
6074 {
6075 sigset_t set;
6076 #if defined(TARGET_ALPHA)
6077 abi_ulong mask = arg1;
6078 target_to_host_old_sigset(&set, &mask);
6079 #else
6080 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6081 goto efault;
6082 target_to_host_old_sigset(&set, p);
6083 unlock_user(p, arg1, 0);
6084 #endif
6085 ret = get_errno(sigsuspend(&set));
6086 }
6087 break;
6088 #endif
6089 case TARGET_NR_rt_sigsuspend:
6090 {
6091 sigset_t set;
6092 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6093 goto efault;
6094 target_to_host_sigset(&set, p);
6095 unlock_user(p, arg1, 0);
6096 ret = get_errno(sigsuspend(&set));
6097 }
6098 break;
6099 case TARGET_NR_rt_sigtimedwait:
6100 {
6101 sigset_t set;
6102 struct timespec uts, *puts;
6103 siginfo_t uinfo;
6104
6105 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6106 goto efault;
6107 target_to_host_sigset(&set, p);
6108 unlock_user(p, arg1, 0);
6109 if (arg3) {
6110 puts = &uts;
6111 target_to_host_timespec(puts, arg3);
6112 } else {
6113 puts = NULL;
6114 }
6115 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6116 if (!is_error(ret) && arg2) {
6117 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6118 goto efault;
6119 host_to_target_siginfo(p, &uinfo);
6120 unlock_user(p, arg2, sizeof(target_siginfo_t));
6121 }
6122 }
6123 break;
6124 case TARGET_NR_rt_sigqueueinfo:
6125 {
6126 siginfo_t uinfo;
6127 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6128 goto efault;
6129 target_to_host_siginfo(&uinfo, p);
6130 unlock_user(p, arg1, 0);
6131 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6132 }
6133 break;
6134 #ifdef TARGET_NR_sigreturn
6135 case TARGET_NR_sigreturn:
6136 /* NOTE: ret is eax, so not transcoding must be done */
6137 ret = do_sigreturn(cpu_env);
6138 break;
6139 #endif
6140 case TARGET_NR_rt_sigreturn:
6141 /* NOTE: ret is eax, so not transcoding must be done */
6142 ret = do_rt_sigreturn(cpu_env);
6143 break;
6144 case TARGET_NR_sethostname:
6145 if (!(p = lock_user_string(arg1)))
6146 goto efault;
6147 ret = get_errno(sethostname(p, arg2));
6148 unlock_user(p, arg1, 0);
6149 break;
6150 case TARGET_NR_setrlimit:
6151 {
6152 int resource = target_to_host_resource(arg1);
6153 struct target_rlimit *target_rlim;
6154 struct rlimit rlim;
6155 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6156 goto efault;
6157 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6158 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6159 unlock_user_struct(target_rlim, arg2, 0);
6160 ret = get_errno(setrlimit(resource, &rlim));
6161 }
6162 break;
6163 case TARGET_NR_getrlimit:
6164 {
6165 int resource = target_to_host_resource(arg1);
6166 struct target_rlimit *target_rlim;
6167 struct rlimit rlim;
6168
6169 ret = get_errno(getrlimit(resource, &rlim));
6170 if (!is_error(ret)) {
6171 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6172 goto efault;
6173 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6174 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6175 unlock_user_struct(target_rlim, arg2, 1);
6176 }
6177 }
6178 break;
6179 case TARGET_NR_getrusage:
6180 {
6181 struct rusage rusage;
6182 ret = get_errno(getrusage(arg1, &rusage));
6183 if (!is_error(ret)) {
6184 host_to_target_rusage(arg2, &rusage);
6185 }
6186 }
6187 break;
6188 case TARGET_NR_gettimeofday:
6189 {
6190 struct timeval tv;
6191 ret = get_errno(gettimeofday(&tv, NULL));
6192 if (!is_error(ret)) {
6193 if (copy_to_user_timeval(arg1, &tv))
6194 goto efault;
6195 }
6196 }
6197 break;
6198 case TARGET_NR_settimeofday:
6199 {
6200 struct timeval tv;
6201 if (copy_from_user_timeval(&tv, arg1))
6202 goto efault;
6203 ret = get_errno(settimeofday(&tv, NULL));
6204 }
6205 break;
6206 #if defined(TARGET_NR_select)
6207 case TARGET_NR_select:
6208 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6209 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6210 #else
6211 {
6212 struct target_sel_arg_struct *sel;
6213 abi_ulong inp, outp, exp, tvp;
6214 long nsel;
6215
6216 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6217 goto efault;
6218 nsel = tswapal(sel->n);
6219 inp = tswapal(sel->inp);
6220 outp = tswapal(sel->outp);
6221 exp = tswapal(sel->exp);
6222 tvp = tswapal(sel->tvp);
6223 unlock_user_struct(sel, arg1, 0);
6224 ret = do_select(nsel, inp, outp, exp, tvp);
6225 }
6226 #endif
6227 break;
6228 #endif
6229 #ifdef TARGET_NR_pselect6
6230 case TARGET_NR_pselect6:
6231 {
6232 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6233 fd_set rfds, wfds, efds;
6234 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6235 struct timespec ts, *ts_ptr;
6236
6237 /*
6238 * The 6th arg is actually two args smashed together,
6239 * so we cannot use the C library.
6240 */
6241 sigset_t set;
6242 struct {
6243 sigset_t *set;
6244 size_t size;
6245 } sig, *sig_ptr;
6246
6247 abi_ulong arg_sigset, arg_sigsize, *arg7;
6248 target_sigset_t *target_sigset;
6249
6250 n = arg1;
6251 rfd_addr = arg2;
6252 wfd_addr = arg3;
6253 efd_addr = arg4;
6254 ts_addr = arg5;
6255
6256 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6257 if (ret) {
6258 goto fail;
6259 }
6260 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6261 if (ret) {
6262 goto fail;
6263 }
6264 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6265 if (ret) {
6266 goto fail;
6267 }
6268
6269 /*
6270 * This takes a timespec, and not a timeval, so we cannot
6271 * use the do_select() helper ...
6272 */
6273 if (ts_addr) {
6274 if (target_to_host_timespec(&ts, ts_addr)) {
6275 goto efault;
6276 }
6277 ts_ptr = &ts;
6278 } else {
6279 ts_ptr = NULL;
6280 }
6281
6282 /* Extract the two packed args for the sigset */
6283 if (arg6) {
6284 sig_ptr = &sig;
6285 sig.size = _NSIG / 8;
6286
6287 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6288 if (!arg7) {
6289 goto efault;
6290 }
6291 arg_sigset = tswapal(arg7[0]);
6292 arg_sigsize = tswapal(arg7[1]);
6293 unlock_user(arg7, arg6, 0);
6294
6295 if (arg_sigset) {
6296 sig.set = &set;
6297 if (arg_sigsize != sizeof(*target_sigset)) {
6298 /* Like the kernel, we enforce correct size sigsets */
6299 ret = -TARGET_EINVAL;
6300 goto fail;
6301 }
6302 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6303 sizeof(*target_sigset), 1);
6304 if (!target_sigset) {
6305 goto efault;
6306 }
6307 target_to_host_sigset(&set, target_sigset);
6308 unlock_user(target_sigset, arg_sigset, 0);
6309 } else {
6310 sig.set = NULL;
6311 }
6312 } else {
6313 sig_ptr = NULL;
6314 }
6315
6316 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6317 ts_ptr, sig_ptr));
6318
6319 if (!is_error(ret)) {
6320 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6321 goto efault;
6322 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6323 goto efault;
6324 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6325 goto efault;
6326
6327 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6328 goto efault;
6329 }
6330 }
6331 break;
6332 #endif
6333 case TARGET_NR_symlink:
6334 {
6335 void *p2;
6336 p = lock_user_string(arg1);
6337 p2 = lock_user_string(arg2);
6338 if (!p || !p2)
6339 ret = -TARGET_EFAULT;
6340 else
6341 ret = get_errno(symlink(p, p2));
6342 unlock_user(p2, arg2, 0);
6343 unlock_user(p, arg1, 0);
6344 }
6345 break;
6346 #if defined(TARGET_NR_symlinkat)
6347 case TARGET_NR_symlinkat:
6348 {
6349 void *p2;
6350 p = lock_user_string(arg1);
6351 p2 = lock_user_string(arg3);
6352 if (!p || !p2)
6353 ret = -TARGET_EFAULT;
6354 else
6355 ret = get_errno(symlinkat(p, arg2, p2));
6356 unlock_user(p2, arg3, 0);
6357 unlock_user(p, arg1, 0);
6358 }
6359 break;
6360 #endif
6361 #ifdef TARGET_NR_oldlstat
6362 case TARGET_NR_oldlstat:
6363 goto unimplemented;
6364 #endif
6365 case TARGET_NR_readlink:
6366 {
6367 void *p2;
6368 p = lock_user_string(arg1);
6369 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6370 if (!p || !p2) {
6371 ret = -TARGET_EFAULT;
6372 } else if (is_proc_myself((const char *)p, "exe")) {
6373 char real[PATH_MAX], *temp;
6374 temp = realpath(exec_path, real);
6375 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6376 snprintf((char *)p2, arg3, "%s", real);
6377 } else {
6378 ret = get_errno(readlink(path(p), p2, arg3));
6379 }
6380 unlock_user(p2, arg2, ret);
6381 unlock_user(p, arg1, 0);
6382 }
6383 break;
6384 #if defined(TARGET_NR_readlinkat)
6385 case TARGET_NR_readlinkat:
6386 {
6387 void *p2;
6388 p = lock_user_string(arg2);
6389 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6390 if (!p || !p2) {
6391 ret = -TARGET_EFAULT;
6392 } else if (is_proc_myself((const char *)p, "exe")) {
6393 char real[PATH_MAX], *temp;
6394 temp = realpath(exec_path, real);
6395 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6396 snprintf((char *)p2, arg4, "%s", real);
6397 } else {
6398 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6399 }
6400 unlock_user(p2, arg3, ret);
6401 unlock_user(p, arg2, 0);
6402 }
6403 break;
6404 #endif
6405 #ifdef TARGET_NR_uselib
6406 case TARGET_NR_uselib:
6407 goto unimplemented;
6408 #endif
6409 #ifdef TARGET_NR_swapon
6410 case TARGET_NR_swapon:
6411 if (!(p = lock_user_string(arg1)))
6412 goto efault;
6413 ret = get_errno(swapon(p, arg2));
6414 unlock_user(p, arg1, 0);
6415 break;
6416 #endif
6417 case TARGET_NR_reboot:
6418 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6419 /* arg4 must be ignored in all other cases */
6420 p = lock_user_string(arg4);
6421 if (!p) {
6422 goto efault;
6423 }
6424 ret = get_errno(reboot(arg1, arg2, arg3, p));
6425 unlock_user(p, arg4, 0);
6426 } else {
6427 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6428 }
6429 break;
6430 #ifdef TARGET_NR_readdir
6431 case TARGET_NR_readdir:
6432 goto unimplemented;
6433 #endif
6434 #ifdef TARGET_NR_mmap
6435 case TARGET_NR_mmap:
6436 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6437 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6438 || defined(TARGET_S390X)
6439 {
6440 abi_ulong *v;
6441 abi_ulong v1, v2, v3, v4, v5, v6;
6442 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6443 goto efault;
6444 v1 = tswapal(v[0]);
6445 v2 = tswapal(v[1]);
6446 v3 = tswapal(v[2]);
6447 v4 = tswapal(v[3]);
6448 v5 = tswapal(v[4]);
6449 v6 = tswapal(v[5]);
6450 unlock_user(v, arg1, 0);
6451 ret = get_errno(target_mmap(v1, v2, v3,
6452 target_to_host_bitmask(v4, mmap_flags_tbl),
6453 v5, v6));
6454 }
6455 #else
6456 ret = get_errno(target_mmap(arg1, arg2, arg3,
6457 target_to_host_bitmask(arg4, mmap_flags_tbl),
6458 arg5,
6459 arg6));
6460 #endif
6461 break;
6462 #endif
6463 #ifdef TARGET_NR_mmap2
6464 case TARGET_NR_mmap2:
6465 #ifndef MMAP_SHIFT
6466 #define MMAP_SHIFT 12
6467 #endif
6468 ret = get_errno(target_mmap(arg1, arg2, arg3,
6469 target_to_host_bitmask(arg4, mmap_flags_tbl),
6470 arg5,
6471 arg6 << MMAP_SHIFT));
6472 break;
6473 #endif
6474 case TARGET_NR_munmap:
6475 ret = get_errno(target_munmap(arg1, arg2));
6476 break;
6477 case TARGET_NR_mprotect:
6478 {
6479 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6480 /* Special hack to detect libc making the stack executable. */
6481 if ((arg3 & PROT_GROWSDOWN)
6482 && arg1 >= ts->info->stack_limit
6483 && arg1 <= ts->info->start_stack) {
6484 arg3 &= ~PROT_GROWSDOWN;
6485 arg2 = arg2 + arg1 - ts->info->stack_limit;
6486 arg1 = ts->info->stack_limit;
6487 }
6488 }
6489 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6490 break;
6491 #ifdef TARGET_NR_mremap
6492 case TARGET_NR_mremap:
6493 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6494 break;
6495 #endif
6496 /* ??? msync/mlock/munlock are broken for softmmu. */
6497 #ifdef TARGET_NR_msync
6498 case TARGET_NR_msync:
6499 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6500 break;
6501 #endif
6502 #ifdef TARGET_NR_mlock
6503 case TARGET_NR_mlock:
6504 ret = get_errno(mlock(g2h(arg1), arg2));
6505 break;
6506 #endif
6507 #ifdef TARGET_NR_munlock
6508 case TARGET_NR_munlock:
6509 ret = get_errno(munlock(g2h(arg1), arg2));
6510 break;
6511 #endif
6512 #ifdef TARGET_NR_mlockall
6513 case TARGET_NR_mlockall:
6514 ret = get_errno(mlockall(arg1));
6515 break;
6516 #endif
6517 #ifdef TARGET_NR_munlockall
6518 case TARGET_NR_munlockall:
6519 ret = get_errno(munlockall());
6520 break;
6521 #endif
6522 case TARGET_NR_truncate:
6523 if (!(p = lock_user_string(arg1)))
6524 goto efault;
6525 ret = get_errno(truncate(p, arg2));
6526 unlock_user(p, arg1, 0);
6527 break;
6528 case TARGET_NR_ftruncate:
6529 ret = get_errno(ftruncate(arg1, arg2));
6530 break;
6531 case TARGET_NR_fchmod:
6532 ret = get_errno(fchmod(arg1, arg2));
6533 break;
6534 #if defined(TARGET_NR_fchmodat)
6535 case TARGET_NR_fchmodat:
6536 if (!(p = lock_user_string(arg2)))
6537 goto efault;
6538 ret = get_errno(fchmodat(arg1, p, arg3, 0));
6539 unlock_user(p, arg2, 0);
6540 break;
6541 #endif
6542 case TARGET_NR_getpriority:
6543 /* Note that negative values are valid for getpriority, so we must
6544 differentiate based on errno settings. */
6545 errno = 0;
6546 ret = getpriority(arg1, arg2);
6547 if (ret == -1 && errno != 0) {
6548 ret = -host_to_target_errno(errno);
6549 break;
6550 }
6551 #ifdef TARGET_ALPHA
6552 /* Return value is the unbiased priority. Signal no error. */
6553 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6554 #else
6555 /* Return value is a biased priority to avoid negative numbers. */
6556 ret = 20 - ret;
6557 #endif
6558 break;
6559 case TARGET_NR_setpriority:
6560 ret = get_errno(setpriority(arg1, arg2, arg3));
6561 break;
6562 #ifdef TARGET_NR_profil
6563 case TARGET_NR_profil:
6564 goto unimplemented;
6565 #endif
6566 case TARGET_NR_statfs:
6567 if (!(p = lock_user_string(arg1)))
6568 goto efault;
6569 ret = get_errno(statfs(path(p), &stfs));
6570 unlock_user(p, arg1, 0);
6571 convert_statfs:
6572 if (!is_error(ret)) {
6573 struct target_statfs *target_stfs;
6574
6575 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6576 goto efault;
6577 __put_user(stfs.f_type, &target_stfs->f_type);
6578 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6579 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6580 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6581 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6582 __put_user(stfs.f_files, &target_stfs->f_files);
6583 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6584 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6585 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6586 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6587 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6588 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6589 unlock_user_struct(target_stfs, arg2, 1);
6590 }
6591 break;
6592 case TARGET_NR_fstatfs:
6593 ret = get_errno(fstatfs(arg1, &stfs));
6594 goto convert_statfs;
6595 #ifdef TARGET_NR_statfs64
6596 case TARGET_NR_statfs64:
6597 if (!(p = lock_user_string(arg1)))
6598 goto efault;
6599 ret = get_errno(statfs(path(p), &stfs));
6600 unlock_user(p, arg1, 0);
6601 convert_statfs64:
6602 if (!is_error(ret)) {
6603 struct target_statfs64 *target_stfs;
6604
6605 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6606 goto efault;
6607 __put_user(stfs.f_type, &target_stfs->f_type);
6608 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6609 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6610 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6611 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6612 __put_user(stfs.f_files, &target_stfs->f_files);
6613 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6614 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6615 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6616 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6617 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6618 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6619 unlock_user_struct(target_stfs, arg3, 1);
6620 }
6621 break;
6622 case TARGET_NR_fstatfs64:
6623 ret = get_errno(fstatfs(arg1, &stfs));
6624 goto convert_statfs64;
6625 #endif
6626 #ifdef TARGET_NR_ioperm
6627 case TARGET_NR_ioperm:
6628 goto unimplemented;
6629 #endif
6630 #ifdef TARGET_NR_socketcall
6631 case TARGET_NR_socketcall:
6632 ret = do_socketcall(arg1, arg2);
6633 break;
6634 #endif
6635 #ifdef TARGET_NR_accept
6636 case TARGET_NR_accept:
6637 ret = do_accept4(arg1, arg2, arg3, 0);
6638 break;
6639 #endif
6640 #ifdef TARGET_NR_accept4
6641 case TARGET_NR_accept4:
6642 #ifdef CONFIG_ACCEPT4
6643 ret = do_accept4(arg1, arg2, arg3, arg4);
6644 #else
6645 goto unimplemented;
6646 #endif
6647 break;
6648 #endif
6649 #ifdef TARGET_NR_bind
6650 case TARGET_NR_bind:
6651 ret = do_bind(arg1, arg2, arg3);
6652 break;
6653 #endif
6654 #ifdef TARGET_NR_connect
6655 case TARGET_NR_connect:
6656 ret = do_connect(arg1, arg2, arg3);
6657 break;
6658 #endif
6659 #ifdef TARGET_NR_getpeername
6660 case TARGET_NR_getpeername:
6661 ret = do_getpeername(arg1, arg2, arg3);
6662 break;
6663 #endif
6664 #ifdef TARGET_NR_getsockname
6665 case TARGET_NR_getsockname:
6666 ret = do_getsockname(arg1, arg2, arg3);
6667 break;
6668 #endif
6669 #ifdef TARGET_NR_getsockopt
6670 case TARGET_NR_getsockopt:
6671 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6672 break;
6673 #endif
6674 #ifdef TARGET_NR_listen
6675 case TARGET_NR_listen:
6676 ret = get_errno(listen(arg1, arg2));
6677 break;
6678 #endif
6679 #ifdef TARGET_NR_recv
6680 case TARGET_NR_recv:
6681 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6682 break;
6683 #endif
6684 #ifdef TARGET_NR_recvfrom
6685 case TARGET_NR_recvfrom:
6686 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6687 break;
6688 #endif
6689 #ifdef TARGET_NR_recvmsg
6690 case TARGET_NR_recvmsg:
6691 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6692 break;
6693 #endif
6694 #ifdef TARGET_NR_send
6695 case TARGET_NR_send:
6696 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6697 break;
6698 #endif
6699 #ifdef TARGET_NR_sendmsg
6700 case TARGET_NR_sendmsg:
6701 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6702 break;
6703 #endif
6704 #ifdef TARGET_NR_sendto
6705 case TARGET_NR_sendto:
6706 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6707 break;
6708 #endif
6709 #ifdef TARGET_NR_shutdown
6710 case TARGET_NR_shutdown:
6711 ret = get_errno(shutdown(arg1, arg2));
6712 break;
6713 #endif
6714 #ifdef TARGET_NR_socket
6715 case TARGET_NR_socket:
6716 ret = do_socket(arg1, arg2, arg3);
6717 break;
6718 #endif
6719 #ifdef TARGET_NR_socketpair
6720 case TARGET_NR_socketpair:
6721 ret = do_socketpair(arg1, arg2, arg3, arg4);
6722 break;
6723 #endif
6724 #ifdef TARGET_NR_setsockopt
6725 case TARGET_NR_setsockopt:
6726 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6727 break;
6728 #endif
6729
6730 case TARGET_NR_syslog:
6731 if (!(p = lock_user_string(arg2)))
6732 goto efault;
6733 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6734 unlock_user(p, arg2, 0);
6735 break;
6736
6737 case TARGET_NR_setitimer:
6738 {
6739 struct itimerval value, ovalue, *pvalue;
6740
6741 if (arg2) {
6742 pvalue = &value;
6743 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6744 || copy_from_user_timeval(&pvalue->it_value,
6745 arg2 + sizeof(struct target_timeval)))
6746 goto efault;
6747 } else {
6748 pvalue = NULL;
6749 }
6750 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6751 if (!is_error(ret) && arg3) {
6752 if (copy_to_user_timeval(arg3,
6753 &ovalue.it_interval)
6754 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6755 &ovalue.it_value))
6756 goto efault;
6757 }
6758 }
6759 break;
6760 case TARGET_NR_getitimer:
6761 {
6762 struct itimerval value;
6763
6764 ret = get_errno(getitimer(arg1, &value));
6765 if (!is_error(ret) && arg2) {
6766 if (copy_to_user_timeval(arg2,
6767 &value.it_interval)
6768 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6769 &value.it_value))
6770 goto efault;
6771 }
6772 }
6773 break;
6774 case TARGET_NR_stat:
6775 if (!(p = lock_user_string(arg1)))
6776 goto efault;
6777 ret = get_errno(stat(path(p), &st));
6778 unlock_user(p, arg1, 0);
6779 goto do_stat;
6780 case TARGET_NR_lstat:
6781 if (!(p = lock_user_string(arg1)))
6782 goto efault;
6783 ret = get_errno(lstat(path(p), &st));
6784 unlock_user(p, arg1, 0);
6785 goto do_stat;
6786 case TARGET_NR_fstat:
6787 {
6788 ret = get_errno(fstat(arg1, &st));
6789 do_stat:
6790 if (!is_error(ret)) {
6791 struct target_stat *target_st;
6792
6793 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6794 goto efault;
6795 memset(target_st, 0, sizeof(*target_st));
6796 __put_user(st.st_dev, &target_st->st_dev);
6797 __put_user(st.st_ino, &target_st->st_ino);
6798 __put_user(st.st_mode, &target_st->st_mode);
6799 __put_user(st.st_uid, &target_st->st_uid);
6800 __put_user(st.st_gid, &target_st->st_gid);
6801 __put_user(st.st_nlink, &target_st->st_nlink);
6802 __put_user(st.st_rdev, &target_st->st_rdev);
6803 __put_user(st.st_size, &target_st->st_size);
6804 __put_user(st.st_blksize, &target_st->st_blksize);
6805 __put_user(st.st_blocks, &target_st->st_blocks);
6806 __put_user(st.st_atime, &target_st->target_st_atime);
6807 __put_user(st.st_mtime, &target_st->target_st_mtime);
6808 __put_user(st.st_ctime, &target_st->target_st_ctime);
6809 unlock_user_struct(target_st, arg2, 1);
6810 }
6811 }
6812 break;
6813 #ifdef TARGET_NR_olduname
6814 case TARGET_NR_olduname:
6815 goto unimplemented;
6816 #endif
6817 #ifdef TARGET_NR_iopl
6818 case TARGET_NR_iopl:
6819 goto unimplemented;
6820 #endif
6821 case TARGET_NR_vhangup:
6822 ret = get_errno(vhangup());
6823 break;
6824 #ifdef TARGET_NR_idle
6825 case TARGET_NR_idle:
6826 goto unimplemented;
6827 #endif
6828 #ifdef TARGET_NR_syscall
6829 case TARGET_NR_syscall:
6830 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6831 arg6, arg7, arg8, 0);
6832 break;
6833 #endif
6834 case TARGET_NR_wait4:
6835 {
6836 int status;
6837 abi_long status_ptr = arg2;
6838 struct rusage rusage, *rusage_ptr;
6839 abi_ulong target_rusage = arg4;
6840 if (target_rusage)
6841 rusage_ptr = &rusage;
6842 else
6843 rusage_ptr = NULL;
6844 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6845 if (!is_error(ret)) {
6846 if (status_ptr && ret) {
6847 status = host_to_target_waitstatus(status);
6848 if (put_user_s32(status, status_ptr))
6849 goto efault;
6850 }
6851 if (target_rusage)
6852 host_to_target_rusage(target_rusage, &rusage);
6853 }
6854 }
6855 break;
6856 #ifdef TARGET_NR_swapoff
6857 case TARGET_NR_swapoff:
6858 if (!(p = lock_user_string(arg1)))
6859 goto efault;
6860 ret = get_errno(swapoff(p));
6861 unlock_user(p, arg1, 0);
6862 break;
6863 #endif
6864 case TARGET_NR_sysinfo:
6865 {
6866 struct target_sysinfo *target_value;
6867 struct sysinfo value;
6868 ret = get_errno(sysinfo(&value));
6869 if (!is_error(ret) && arg1)
6870 {
6871 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6872 goto efault;
6873 __put_user(value.uptime, &target_value->uptime);
6874 __put_user(value.loads[0], &target_value->loads[0]);
6875 __put_user(value.loads[1], &target_value->loads[1]);
6876 __put_user(value.loads[2], &target_value->loads[2]);
6877 __put_user(value.totalram, &target_value->totalram);
6878 __put_user(value.freeram, &target_value->freeram);
6879 __put_user(value.sharedram, &target_value->sharedram);
6880 __put_user(value.bufferram, &target_value->bufferram);
6881 __put_user(value.totalswap, &target_value->totalswap);
6882 __put_user(value.freeswap, &target_value->freeswap);
6883 __put_user(value.procs, &target_value->procs);
6884 __put_user(value.totalhigh, &target_value->totalhigh);
6885 __put_user(value.freehigh, &target_value->freehigh);
6886 __put_user(value.mem_unit, &target_value->mem_unit);
6887 unlock_user_struct(target_value, arg1, 1);
6888 }
6889 }
6890 break;
6891 #ifdef TARGET_NR_ipc
6892 case TARGET_NR_ipc:
6893 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6894 break;
6895 #endif
6896 #ifdef TARGET_NR_semget
6897 case TARGET_NR_semget:
6898 ret = get_errno(semget(arg1, arg2, arg3));
6899 break;
6900 #endif
6901 #ifdef TARGET_NR_semop
6902 case TARGET_NR_semop:
6903 ret = do_semop(arg1, arg2, arg3);
6904 break;
6905 #endif
6906 #ifdef TARGET_NR_semctl
6907 case TARGET_NR_semctl:
6908 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6909 break;
6910 #endif
6911 #ifdef TARGET_NR_msgctl
6912 case TARGET_NR_msgctl:
6913 ret = do_msgctl(arg1, arg2, arg3);
6914 break;
6915 #endif
6916 #ifdef TARGET_NR_msgget
6917 case TARGET_NR_msgget:
6918 ret = get_errno(msgget(arg1, arg2));
6919 break;
6920 #endif
6921 #ifdef TARGET_NR_msgrcv
6922 case TARGET_NR_msgrcv:
6923 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6924 break;
6925 #endif
6926 #ifdef TARGET_NR_msgsnd
6927 case TARGET_NR_msgsnd:
6928 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6929 break;
6930 #endif
6931 #ifdef TARGET_NR_shmget
6932 case TARGET_NR_shmget:
6933 ret = get_errno(shmget(arg1, arg2, arg3));
6934 break;
6935 #endif
6936 #ifdef TARGET_NR_shmctl
6937 case TARGET_NR_shmctl:
6938 ret = do_shmctl(arg1, arg2, arg3);
6939 break;
6940 #endif
6941 #ifdef TARGET_NR_shmat
6942 case TARGET_NR_shmat:
6943 ret = do_shmat(arg1, arg2, arg3);
6944 break;
6945 #endif
6946 #ifdef TARGET_NR_shmdt
6947 case TARGET_NR_shmdt:
6948 ret = do_shmdt(arg1);
6949 break;
6950 #endif
6951 case TARGET_NR_fsync:
6952 ret = get_errno(fsync(arg1));
6953 break;
6954 case TARGET_NR_clone:
6955 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6956 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6957 #elif defined(TARGET_CRIS)
6958 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6959 #elif defined(TARGET_MICROBLAZE)
6960 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
6961 #elif defined(TARGET_S390X)
6962 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6963 #else
6964 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6965 #endif
6966 break;
6967 #ifdef __NR_exit_group
6968 /* new thread calls */
6969 case TARGET_NR_exit_group:
6970 #ifdef TARGET_GPROF
6971 _mcleanup();
6972 #endif
6973 gdb_exit(cpu_env, arg1);
6974 ret = get_errno(exit_group(arg1));
6975 break;
6976 #endif
6977 case TARGET_NR_setdomainname:
6978 if (!(p = lock_user_string(arg1)))
6979 goto efault;
6980 ret = get_errno(setdomainname(p, arg2));
6981 unlock_user(p, arg1, 0);
6982 break;
6983 case TARGET_NR_uname:
6984 /* no need to transcode because we use the linux syscall */
6985 {
6986 struct new_utsname * buf;
6987
6988 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6989 goto efault;
6990 ret = get_errno(sys_uname(buf));
6991 if (!is_error(ret)) {
6992 /* Overrite the native machine name with whatever is being
6993 emulated. */
6994 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6995 /* Allow the user to override the reported release. */
6996 if (qemu_uname_release && *qemu_uname_release)
6997 strcpy (buf->release, qemu_uname_release);
6998 }
6999 unlock_user_struct(buf, arg1, 1);
7000 }
7001 break;
7002 #ifdef TARGET_I386
7003 case TARGET_NR_modify_ldt:
7004 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7005 break;
7006 #if !defined(TARGET_X86_64)
7007 case TARGET_NR_vm86old:
7008 goto unimplemented;
7009 case TARGET_NR_vm86:
7010 ret = do_vm86(cpu_env, arg1, arg2);
7011 break;
7012 #endif
7013 #endif
7014 case TARGET_NR_adjtimex:
7015 goto unimplemented;
7016 #ifdef TARGET_NR_create_module
7017 case TARGET_NR_create_module:
7018 #endif
7019 case TARGET_NR_init_module:
7020 case TARGET_NR_delete_module:
7021 #ifdef TARGET_NR_get_kernel_syms
7022 case TARGET_NR_get_kernel_syms:
7023 #endif
7024 goto unimplemented;
7025 case TARGET_NR_quotactl:
7026 goto unimplemented;
7027 case TARGET_NR_getpgid:
7028 ret = get_errno(getpgid(arg1));
7029 break;
7030 case TARGET_NR_fchdir:
7031 ret = get_errno(fchdir(arg1));
7032 break;
7033 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7034 case TARGET_NR_bdflush:
7035 goto unimplemented;
7036 #endif
7037 #ifdef TARGET_NR_sysfs
7038 case TARGET_NR_sysfs:
7039 goto unimplemented;
7040 #endif
7041 case TARGET_NR_personality:
7042 ret = get_errno(personality(arg1));
7043 break;
7044 #ifdef TARGET_NR_afs_syscall
7045 case TARGET_NR_afs_syscall:
7046 goto unimplemented;
7047 #endif
7048 #ifdef TARGET_NR__llseek /* Not on alpha */
7049 case TARGET_NR__llseek:
7050 {
7051 int64_t res;
7052 #if !defined(__NR_llseek)
7053 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7054 if (res == -1) {
7055 ret = get_errno(res);
7056 } else {
7057 ret = 0;
7058 }
7059 #else
7060 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7061 #endif
7062 if ((ret == 0) && put_user_s64(res, arg4)) {
7063 goto efault;
7064 }
7065 }
7066 break;
7067 #endif
7068 case TARGET_NR_getdents:
7069 #ifdef __NR_getdents
7070 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7071 {
7072 struct target_dirent *target_dirp;
7073 struct linux_dirent *dirp;
7074 abi_long count = arg3;
7075
7076 dirp = malloc(count);
7077 if (!dirp) {
7078 ret = -TARGET_ENOMEM;
7079 goto fail;
7080 }
7081
7082 ret = get_errno(sys_getdents(arg1, dirp, count));
7083 if (!is_error(ret)) {
7084 struct linux_dirent *de;
7085 struct target_dirent *tde;
7086 int len = ret;
7087 int reclen, treclen;
7088 int count1, tnamelen;
7089
7090 count1 = 0;
7091 de = dirp;
7092 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7093 goto efault;
7094 tde = target_dirp;
7095 while (len > 0) {
7096 reclen = de->d_reclen;
7097 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7098 assert(tnamelen >= 0);
7099 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7100 assert(count1 + treclen <= count);
7101 tde->d_reclen = tswap16(treclen);
7102 tde->d_ino = tswapal(de->d_ino);
7103 tde->d_off = tswapal(de->d_off);
7104 memcpy(tde->d_name, de->d_name, tnamelen);
7105 de = (struct linux_dirent *)((char *)de + reclen);
7106 len -= reclen;
7107 tde = (struct target_dirent *)((char *)tde + treclen);
7108 count1 += treclen;
7109 }
7110 ret = count1;
7111 unlock_user(target_dirp, arg2, ret);
7112 }
7113 free(dirp);
7114 }
7115 #else
7116 {
7117 struct linux_dirent *dirp;
7118 abi_long count = arg3;
7119
7120 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7121 goto efault;
7122 ret = get_errno(sys_getdents(arg1, dirp, count));
7123 if (!is_error(ret)) {
7124 struct linux_dirent *de;
7125 int len = ret;
7126 int reclen;
7127 de = dirp;
7128 while (len > 0) {
7129 reclen = de->d_reclen;
7130 if (reclen > len)
7131 break;
7132 de->d_reclen = tswap16(reclen);
7133 tswapls(&de->d_ino);
7134 tswapls(&de->d_off);
7135 de = (struct linux_dirent *)((char *)de + reclen);
7136 len -= reclen;
7137 }
7138 }
7139 unlock_user(dirp, arg2, ret);
7140 }
7141 #endif
7142 #else
7143 /* Implement getdents in terms of getdents64 */
7144 {
7145 struct linux_dirent64 *dirp;
7146 abi_long count = arg3;
7147
7148 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7149 if (!dirp) {
7150 goto efault;
7151 }
7152 ret = get_errno(sys_getdents64(arg1, dirp, count));
7153 if (!is_error(ret)) {
7154 /* Convert the dirent64 structs to target dirent. We do this
7155 * in-place, since we can guarantee that a target_dirent is no
7156 * larger than a dirent64; however this means we have to be
7157 * careful to read everything before writing in the new format.
7158 */
7159 struct linux_dirent64 *de;
7160 struct target_dirent *tde;
7161 int len = ret;
7162 int tlen = 0;
7163
7164 de = dirp;
7165 tde = (struct target_dirent *)dirp;
7166 while (len > 0) {
7167 int namelen, treclen;
7168 int reclen = de->d_reclen;
7169 uint64_t ino = de->d_ino;
7170 int64_t off = de->d_off;
7171 uint8_t type = de->d_type;
7172
7173 namelen = strlen(de->d_name);
7174 treclen = offsetof(struct target_dirent, d_name)
7175 + namelen + 2;
7176 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7177
7178 memmove(tde->d_name, de->d_name, namelen + 1);
7179 tde->d_ino = tswapal(ino);
7180 tde->d_off = tswapal(off);
7181 tde->d_reclen = tswap16(treclen);
7182 /* The target_dirent type is in what was formerly a padding
7183 * byte at the end of the structure:
7184 */
7185 *(((char *)tde) + treclen - 1) = type;
7186
7187 de = (struct linux_dirent64 *)((char *)de + reclen);
7188 tde = (struct target_dirent *)((char *)tde + treclen);
7189 len -= reclen;
7190 tlen += treclen;
7191 }
7192 ret = tlen;
7193 }
7194 unlock_user(dirp, arg2, ret);
7195 }
7196 #endif
7197 break;
7198 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7199 case TARGET_NR_getdents64:
7200 {
7201 struct linux_dirent64 *dirp;
7202 abi_long count = arg3;
7203 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7204 goto efault;
7205 ret = get_errno(sys_getdents64(arg1, dirp, count));
7206 if (!is_error(ret)) {
7207 struct linux_dirent64 *de;
7208 int len = ret;
7209 int reclen;
7210 de = dirp;
7211 while (len > 0) {
7212 reclen = de->d_reclen;
7213 if (reclen > len)
7214 break;
7215 de->d_reclen = tswap16(reclen);
7216 tswap64s((uint64_t *)&de->d_ino);
7217 tswap64s((uint64_t *)&de->d_off);
7218 de = (struct linux_dirent64 *)((char *)de + reclen);
7219 len -= reclen;
7220 }
7221 }
7222 unlock_user(dirp, arg2, ret);
7223 }
7224 break;
7225 #endif /* TARGET_NR_getdents64 */
7226 #if defined(TARGET_NR__newselect)
7227 case TARGET_NR__newselect:
7228 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7229 break;
7230 #endif
7231 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7232 # ifdef TARGET_NR_poll
7233 case TARGET_NR_poll:
7234 # endif
7235 # ifdef TARGET_NR_ppoll
7236 case TARGET_NR_ppoll:
7237 # endif
7238 {
7239 struct target_pollfd *target_pfd;
7240 unsigned int nfds = arg2;
7241 int timeout = arg3;
7242 struct pollfd *pfd;
7243 unsigned int i;
7244
7245 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7246 if (!target_pfd)
7247 goto efault;
7248
7249 pfd = alloca(sizeof(struct pollfd) * nfds);
7250 for(i = 0; i < nfds; i++) {
7251 pfd[i].fd = tswap32(target_pfd[i].fd);
7252 pfd[i].events = tswap16(target_pfd[i].events);
7253 }
7254
7255 # ifdef TARGET_NR_ppoll
7256 if (num == TARGET_NR_ppoll) {
7257 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7258 target_sigset_t *target_set;
7259 sigset_t _set, *set = &_set;
7260
7261 if (arg3) {
7262 if (target_to_host_timespec(timeout_ts, arg3)) {
7263 unlock_user(target_pfd, arg1, 0);
7264 goto efault;
7265 }
7266 } else {
7267 timeout_ts = NULL;
7268 }
7269
7270 if (arg4) {
7271 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7272 if (!target_set) {
7273 unlock_user(target_pfd, arg1, 0);
7274 goto efault;
7275 }
7276 target_to_host_sigset(set, target_set);
7277 } else {
7278 set = NULL;
7279 }
7280
7281 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7282
7283 if (!is_error(ret) && arg3) {
7284 host_to_target_timespec(arg3, timeout_ts);
7285 }
7286 if (arg4) {
7287 unlock_user(target_set, arg4, 0);
7288 }
7289 } else
7290 # endif
7291 ret = get_errno(poll(pfd, nfds, timeout));
7292
7293 if (!is_error(ret)) {
7294 for(i = 0; i < nfds; i++) {
7295 target_pfd[i].revents = tswap16(pfd[i].revents);
7296 }
7297 }
7298 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7299 }
7300 break;
7301 #endif
7302 case TARGET_NR_flock:
7303 /* NOTE: the flock constant seems to be the same for every
7304 Linux platform */
7305 ret = get_errno(flock(arg1, arg2));
7306 break;
7307 case TARGET_NR_readv:
7308 {
7309 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7310 if (vec != NULL) {
7311 ret = get_errno(readv(arg1, vec, arg3));
7312 unlock_iovec(vec, arg2, arg3, 1);
7313 } else {
7314 ret = -host_to_target_errno(errno);
7315 }
7316 }
7317 break;
7318 case TARGET_NR_writev:
7319 {
7320 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7321 if (vec != NULL) {
7322 ret = get_errno(writev(arg1, vec, arg3));
7323 unlock_iovec(vec, arg2, arg3, 0);
7324 } else {
7325 ret = -host_to_target_errno(errno);
7326 }
7327 }
7328 break;
7329 case TARGET_NR_getsid:
7330 ret = get_errno(getsid(arg1));
7331 break;
7332 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7333 case TARGET_NR_fdatasync:
7334 ret = get_errno(fdatasync(arg1));
7335 break;
7336 #endif
7337 case TARGET_NR__sysctl:
7338 /* We don't implement this, but ENOTDIR is always a safe
7339 return value. */
7340 ret = -TARGET_ENOTDIR;
7341 break;
7342 case TARGET_NR_sched_getaffinity:
7343 {
7344 unsigned int mask_size;
7345 unsigned long *mask;
7346
7347 /*
7348 * sched_getaffinity needs multiples of ulong, so need to take
7349 * care of mismatches between target ulong and host ulong sizes.
7350 */
7351 if (arg2 & (sizeof(abi_ulong) - 1)) {
7352 ret = -TARGET_EINVAL;
7353 break;
7354 }
7355 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7356
7357 mask = alloca(mask_size);
7358 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7359
7360 if (!is_error(ret)) {
7361 if (copy_to_user(arg3, mask, ret)) {
7362 goto efault;
7363 }
7364 }
7365 }
7366 break;
7367 case TARGET_NR_sched_setaffinity:
7368 {
7369 unsigned int mask_size;
7370 unsigned long *mask;
7371
7372 /*
7373 * sched_setaffinity needs multiples of ulong, so need to take
7374 * care of mismatches between target ulong and host ulong sizes.
7375 */
7376 if (arg2 & (sizeof(abi_ulong) - 1)) {
7377 ret = -TARGET_EINVAL;
7378 break;
7379 }
7380 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7381
7382 mask = alloca(mask_size);
7383 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7384 goto efault;
7385 }
7386 memcpy(mask, p, arg2);
7387 unlock_user_struct(p, arg2, 0);
7388
7389 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7390 }
7391 break;
7392 case TARGET_NR_sched_setparam:
7393 {
7394 struct sched_param *target_schp;
7395 struct sched_param schp;
7396
7397 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7398 goto efault;
7399 schp.sched_priority = tswap32(target_schp->sched_priority);
7400 unlock_user_struct(target_schp, arg2, 0);
7401 ret = get_errno(sched_setparam(arg1, &schp));
7402 }
7403 break;
7404 case TARGET_NR_sched_getparam:
7405 {
7406 struct sched_param *target_schp;
7407 struct sched_param schp;
7408 ret = get_errno(sched_getparam(arg1, &schp));
7409 if (!is_error(ret)) {
7410 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7411 goto efault;
7412 target_schp->sched_priority = tswap32(schp.sched_priority);
7413 unlock_user_struct(target_schp, arg2, 1);
7414 }
7415 }
7416 break;
7417 case TARGET_NR_sched_setscheduler:
7418 {
7419 struct sched_param *target_schp;
7420 struct sched_param schp;
7421 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7422 goto efault;
7423 schp.sched_priority = tswap32(target_schp->sched_priority);
7424 unlock_user_struct(target_schp, arg3, 0);
7425 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7426 }
7427 break;
7428 case TARGET_NR_sched_getscheduler:
7429 ret = get_errno(sched_getscheduler(arg1));
7430 break;
7431 case TARGET_NR_sched_yield:
7432 ret = get_errno(sched_yield());
7433 break;
7434 case TARGET_NR_sched_get_priority_max:
7435 ret = get_errno(sched_get_priority_max(arg1));
7436 break;
7437 case TARGET_NR_sched_get_priority_min:
7438 ret = get_errno(sched_get_priority_min(arg1));
7439 break;
7440 case TARGET_NR_sched_rr_get_interval:
7441 {
7442 struct timespec ts;
7443 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7444 if (!is_error(ret)) {
7445 host_to_target_timespec(arg2, &ts);
7446 }
7447 }
7448 break;
7449 case TARGET_NR_nanosleep:
7450 {
7451 struct timespec req, rem;
7452 target_to_host_timespec(&req, arg1);
7453 ret = get_errno(nanosleep(&req, &rem));
7454 if (is_error(ret) && arg2) {
7455 host_to_target_timespec(arg2, &rem);
7456 }
7457 }
7458 break;
7459 #ifdef TARGET_NR_query_module
7460 case TARGET_NR_query_module:
7461 goto unimplemented;
7462 #endif
7463 #ifdef TARGET_NR_nfsservctl
7464 case TARGET_NR_nfsservctl:
7465 goto unimplemented;
7466 #endif
7467 case TARGET_NR_prctl:
7468 switch (arg1) {
7469 case PR_GET_PDEATHSIG:
7470 {
7471 int deathsig;
7472 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7473 if (!is_error(ret) && arg2
7474 && put_user_ual(deathsig, arg2)) {
7475 goto efault;
7476 }
7477 break;
7478 }
7479 #ifdef PR_GET_NAME
7480 case PR_GET_NAME:
7481 {
7482 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7483 if (!name) {
7484 goto efault;
7485 }
7486 ret = get_errno(prctl(arg1, (unsigned long)name,
7487 arg3, arg4, arg5));
7488 unlock_user(name, arg2, 16);
7489 break;
7490 }
7491 case PR_SET_NAME:
7492 {
7493 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7494 if (!name) {
7495 goto efault;
7496 }
7497 ret = get_errno(prctl(arg1, (unsigned long)name,
7498 arg3, arg4, arg5));
7499 unlock_user(name, arg2, 0);
7500 break;
7501 }
7502 #endif
7503 default:
7504 /* Most prctl options have no pointer arguments */
7505 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7506 break;
7507 }
7508 break;
7509 #ifdef TARGET_NR_arch_prctl
7510 case TARGET_NR_arch_prctl:
7511 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7512 ret = do_arch_prctl(cpu_env, arg1, arg2);
7513 break;
7514 #else
7515 goto unimplemented;
7516 #endif
7517 #endif
7518 #ifdef TARGET_NR_pread64
7519 case TARGET_NR_pread64:
7520 if (regpairs_aligned(cpu_env)) {
7521 arg4 = arg5;
7522 arg5 = arg6;
7523 }
7524 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7525 goto efault;
7526 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7527 unlock_user(p, arg2, ret);
7528 break;
7529 case TARGET_NR_pwrite64:
7530 if (regpairs_aligned(cpu_env)) {
7531 arg4 = arg5;
7532 arg5 = arg6;
7533 }
7534 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7535 goto efault;
7536 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7537 unlock_user(p, arg2, 0);
7538 break;
7539 #endif
7540 case TARGET_NR_getcwd:
7541 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7542 goto efault;
7543 ret = get_errno(sys_getcwd1(p, arg2));
7544 unlock_user(p, arg1, ret);
7545 break;
7546 case TARGET_NR_capget:
7547 goto unimplemented;
7548 case TARGET_NR_capset:
7549 goto unimplemented;
7550 case TARGET_NR_sigaltstack:
7551 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7552 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7553 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7554 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7555 break;
7556 #else
7557 goto unimplemented;
7558 #endif
7559
7560 #ifdef CONFIG_SENDFILE
7561 case TARGET_NR_sendfile:
7562 {
7563 off_t *offp = NULL;
7564 off_t off;
7565 if (arg3) {
7566 ret = get_user_sal(off, arg3);
7567 if (is_error(ret)) {
7568 break;
7569 }
7570 offp = &off;
7571 }
7572 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7573 if (!is_error(ret) && arg3) {
7574 abi_long ret2 = put_user_sal(off, arg3);
7575 if (is_error(ret2)) {
7576 ret = ret2;
7577 }
7578 }
7579 break;
7580 }
7581 #ifdef TARGET_NR_sendfile64
7582 case TARGET_NR_sendfile64:
7583 {
7584 off_t *offp = NULL;
7585 off_t off;
7586 if (arg3) {
7587 ret = get_user_s64(off, arg3);
7588 if (is_error(ret)) {
7589 break;
7590 }
7591 offp = &off;
7592 }
7593 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7594 if (!is_error(ret) && arg3) {
7595 abi_long ret2 = put_user_s64(off, arg3);
7596 if (is_error(ret2)) {
7597 ret = ret2;
7598 }
7599 }
7600 break;
7601 }
7602 #endif
7603 #else
7604 case TARGET_NR_sendfile:
7605 #ifdef TARGET_NR_sendfile64
7606 case TARGET_NR_sendfile64:
7607 #endif
7608 goto unimplemented;
7609 #endif
7610
7611 #ifdef TARGET_NR_getpmsg
7612 case TARGET_NR_getpmsg:
7613 goto unimplemented;
7614 #endif
7615 #ifdef TARGET_NR_putpmsg
7616 case TARGET_NR_putpmsg:
7617 goto unimplemented;
7618 #endif
7619 #ifdef TARGET_NR_vfork
7620 case TARGET_NR_vfork:
7621 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7622 0, 0, 0, 0));
7623 break;
7624 #endif
7625 #ifdef TARGET_NR_ugetrlimit
7626 case TARGET_NR_ugetrlimit:
7627 {
7628 struct rlimit rlim;
7629 int resource = target_to_host_resource(arg1);
7630 ret = get_errno(getrlimit(resource, &rlim));
7631 if (!is_error(ret)) {
7632 struct target_rlimit *target_rlim;
7633 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7634 goto efault;
7635 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7636 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7637 unlock_user_struct(target_rlim, arg2, 1);
7638 }
7639 break;
7640 }
7641 #endif
7642 #ifdef TARGET_NR_truncate64
7643 case TARGET_NR_truncate64:
7644 if (!(p = lock_user_string(arg1)))
7645 goto efault;
7646 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7647 unlock_user(p, arg1, 0);
7648 break;
7649 #endif
7650 #ifdef TARGET_NR_ftruncate64
7651 case TARGET_NR_ftruncate64:
7652 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7653 break;
7654 #endif
7655 #ifdef TARGET_NR_stat64
7656 case TARGET_NR_stat64:
7657 if (!(p = lock_user_string(arg1)))
7658 goto efault;
7659 ret = get_errno(stat(path(p), &st));
7660 unlock_user(p, arg1, 0);
7661 if (!is_error(ret))
7662 ret = host_to_target_stat64(cpu_env, arg2, &st);
7663 break;
7664 #endif
7665 #ifdef TARGET_NR_lstat64
7666 case TARGET_NR_lstat64:
7667 if (!(p = lock_user_string(arg1)))
7668 goto efault;
7669 ret = get_errno(lstat(path(p), &st));
7670 unlock_user(p, arg1, 0);
7671 if (!is_error(ret))
7672 ret = host_to_target_stat64(cpu_env, arg2, &st);
7673 break;
7674 #endif
7675 #ifdef TARGET_NR_fstat64
7676 case TARGET_NR_fstat64:
7677 ret = get_errno(fstat(arg1, &st));
7678 if (!is_error(ret))
7679 ret = host_to_target_stat64(cpu_env, arg2, &st);
7680 break;
7681 #endif
7682 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
7683 #ifdef TARGET_NR_fstatat64
7684 case TARGET_NR_fstatat64:
7685 #endif
7686 #ifdef TARGET_NR_newfstatat
7687 case TARGET_NR_newfstatat:
7688 #endif
7689 if (!(p = lock_user_string(arg2)))
7690 goto efault;
7691 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
7692 if (!is_error(ret))
7693 ret = host_to_target_stat64(cpu_env, arg3, &st);
7694 break;
7695 #endif
7696 case TARGET_NR_lchown:
7697 if (!(p = lock_user_string(arg1)))
7698 goto efault;
7699 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7700 unlock_user(p, arg1, 0);
7701 break;
7702 #ifdef TARGET_NR_getuid
7703 case TARGET_NR_getuid:
7704 ret = get_errno(high2lowuid(getuid()));
7705 break;
7706 #endif
7707 #ifdef TARGET_NR_getgid
7708 case TARGET_NR_getgid:
7709 ret = get_errno(high2lowgid(getgid()));
7710 break;
7711 #endif
7712 #ifdef TARGET_NR_geteuid
7713 case TARGET_NR_geteuid:
7714 ret = get_errno(high2lowuid(geteuid()));
7715 break;
7716 #endif
7717 #ifdef TARGET_NR_getegid
7718 case TARGET_NR_getegid:
7719 ret = get_errno(high2lowgid(getegid()));
7720 break;
7721 #endif
7722 case TARGET_NR_setreuid:
7723 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7724 break;
7725 case TARGET_NR_setregid:
7726 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7727 break;
7728 case TARGET_NR_getgroups:
7729 {
7730 int gidsetsize = arg1;
7731 target_id *target_grouplist;
7732 gid_t *grouplist;
7733 int i;
7734
7735 grouplist = alloca(gidsetsize * sizeof(gid_t));
7736 ret = get_errno(getgroups(gidsetsize, grouplist));
7737 if (gidsetsize == 0)
7738 break;
7739 if (!is_error(ret)) {
7740 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
7741 if (!target_grouplist)
7742 goto efault;
7743 for(i = 0;i < ret; i++)
7744 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7745 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
7746 }
7747 }
7748 break;
7749 case TARGET_NR_setgroups:
7750 {
7751 int gidsetsize = arg1;
7752 target_id *target_grouplist;
7753 gid_t *grouplist = NULL;
7754 int i;
7755 if (gidsetsize) {
7756 grouplist = alloca(gidsetsize * sizeof(gid_t));
7757 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
7758 if (!target_grouplist) {
7759 ret = -TARGET_EFAULT;
7760 goto fail;
7761 }
7762 for (i = 0; i < gidsetsize; i++) {
7763 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7764 }
7765 unlock_user(target_grouplist, arg2, 0);
7766 }
7767 ret = get_errno(setgroups(gidsetsize, grouplist));
7768 }
7769 break;
7770 case TARGET_NR_fchown:
7771 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7772 break;
7773 #if defined(TARGET_NR_fchownat)
7774 case TARGET_NR_fchownat:
7775 if (!(p = lock_user_string(arg2)))
7776 goto efault;
7777 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
7778 low2highgid(arg4), arg5));
7779 unlock_user(p, arg2, 0);
7780 break;
7781 #endif
7782 #ifdef TARGET_NR_setresuid
7783 case TARGET_NR_setresuid:
7784 ret = get_errno(setresuid(low2highuid(arg1),
7785 low2highuid(arg2),
7786 low2highuid(arg3)));
7787 break;
7788 #endif
7789 #ifdef TARGET_NR_getresuid
7790 case TARGET_NR_getresuid:
7791 {
7792 uid_t ruid, euid, suid;
7793 ret = get_errno(getresuid(&ruid, &euid, &suid));
7794 if (!is_error(ret)) {
7795 if (put_user_u16(high2lowuid(ruid), arg1)
7796 || put_user_u16(high2lowuid(euid), arg2)
7797 || put_user_u16(high2lowuid(suid), arg3))
7798 goto efault;
7799 }
7800 }
7801 break;
7802 #endif
7803 #ifdef TARGET_NR_getresgid
7804 case TARGET_NR_setresgid:
7805 ret = get_errno(setresgid(low2highgid(arg1),
7806 low2highgid(arg2),
7807 low2highgid(arg3)));
7808 break;
7809 #endif
7810 #ifdef TARGET_NR_getresgid
7811 case TARGET_NR_getresgid:
7812 {
7813 gid_t rgid, egid, sgid;
7814 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7815 if (!is_error(ret)) {
7816 if (put_user_u16(high2lowgid(rgid), arg1)
7817 || put_user_u16(high2lowgid(egid), arg2)
7818 || put_user_u16(high2lowgid(sgid), arg3))
7819 goto efault;
7820 }
7821 }
7822 break;
7823 #endif
7824 case TARGET_NR_chown:
7825 if (!(p = lock_user_string(arg1)))
7826 goto efault;
7827 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7828 unlock_user(p, arg1, 0);
7829 break;
7830 case TARGET_NR_setuid:
7831 ret = get_errno(setuid(low2highuid(arg1)));
7832 break;
7833 case TARGET_NR_setgid:
7834 ret = get_errno(setgid(low2highgid(arg1)));
7835 break;
7836 case TARGET_NR_setfsuid:
7837 ret = get_errno(setfsuid(arg1));
7838 break;
7839 case TARGET_NR_setfsgid:
7840 ret = get_errno(setfsgid(arg1));
7841 break;
7842
7843 #ifdef TARGET_NR_lchown32
7844 case TARGET_NR_lchown32:
7845 if (!(p = lock_user_string(arg1)))
7846 goto efault;
7847 ret = get_errno(lchown(p, arg2, arg3));
7848 unlock_user(p, arg1, 0);
7849 break;
7850 #endif
7851 #ifdef TARGET_NR_getuid32
7852 case TARGET_NR_getuid32:
7853 ret = get_errno(getuid());
7854 break;
7855 #endif
7856
7857 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7858 /* Alpha specific */
7859 case TARGET_NR_getxuid:
7860 {
7861 uid_t euid;
7862 euid=geteuid();
7863 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7864 }
7865 ret = get_errno(getuid());
7866 break;
7867 #endif
7868 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7869 /* Alpha specific */
7870 case TARGET_NR_getxgid:
7871 {
7872 uid_t egid;
7873 egid=getegid();
7874 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7875 }
7876 ret = get_errno(getgid());
7877 break;
7878 #endif
7879 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7880 /* Alpha specific */
7881 case TARGET_NR_osf_getsysinfo:
7882 ret = -TARGET_EOPNOTSUPP;
7883 switch (arg1) {
7884 case TARGET_GSI_IEEE_FP_CONTROL:
7885 {
7886 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7887
7888 /* Copied from linux ieee_fpcr_to_swcr. */
7889 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7890 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7891 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7892 | SWCR_TRAP_ENABLE_DZE
7893 | SWCR_TRAP_ENABLE_OVF);
7894 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7895 | SWCR_TRAP_ENABLE_INE);
7896 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7897 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7898
7899 if (put_user_u64 (swcr, arg2))
7900 goto efault;
7901 ret = 0;
7902 }
7903 break;
7904
7905 /* case GSI_IEEE_STATE_AT_SIGNAL:
7906 -- Not implemented in linux kernel.
7907 case GSI_UACPROC:
7908 -- Retrieves current unaligned access state; not much used.
7909 case GSI_PROC_TYPE:
7910 -- Retrieves implver information; surely not used.
7911 case GSI_GET_HWRPB:
7912 -- Grabs a copy of the HWRPB; surely not used.
7913 */
7914 }
7915 break;
7916 #endif
7917 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7918 /* Alpha specific */
7919 case TARGET_NR_osf_setsysinfo:
7920 ret = -TARGET_EOPNOTSUPP;
7921 switch (arg1) {
7922 case TARGET_SSI_IEEE_FP_CONTROL:
7923 {
7924 uint64_t swcr, fpcr, orig_fpcr;
7925
7926 if (get_user_u64 (swcr, arg2)) {
7927 goto efault;
7928 }
7929 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7930 fpcr = orig_fpcr & FPCR_DYN_MASK;
7931
7932 /* Copied from linux ieee_swcr_to_fpcr. */
7933 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7934 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7935 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7936 | SWCR_TRAP_ENABLE_DZE
7937 | SWCR_TRAP_ENABLE_OVF)) << 48;
7938 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7939 | SWCR_TRAP_ENABLE_INE)) << 57;
7940 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7941 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7942
7943 cpu_alpha_store_fpcr(cpu_env, fpcr);
7944 ret = 0;
7945 }
7946 break;
7947
7948 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7949 {
7950 uint64_t exc, fpcr, orig_fpcr;
7951 int si_code;
7952
7953 if (get_user_u64(exc, arg2)) {
7954 goto efault;
7955 }
7956
7957 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7958
7959 /* We only add to the exception status here. */
7960 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7961
7962 cpu_alpha_store_fpcr(cpu_env, fpcr);
7963 ret = 0;
7964
7965 /* Old exceptions are not signaled. */
7966 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7967
7968 /* If any exceptions set by this call,
7969 and are unmasked, send a signal. */
7970 si_code = 0;
7971 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7972 si_code = TARGET_FPE_FLTRES;
7973 }
7974 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7975 si_code = TARGET_FPE_FLTUND;
7976 }
7977 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7978 si_code = TARGET_FPE_FLTOVF;
7979 }
7980 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7981 si_code = TARGET_FPE_FLTDIV;
7982 }
7983 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7984 si_code = TARGET_FPE_FLTINV;
7985 }
7986 if (si_code != 0) {
7987 target_siginfo_t info;
7988 info.si_signo = SIGFPE;
7989 info.si_errno = 0;
7990 info.si_code = si_code;
7991 info._sifields._sigfault._addr
7992 = ((CPUArchState *)cpu_env)->pc;
7993 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
7994 }
7995 }
7996 break;
7997
7998 /* case SSI_NVPAIRS:
7999 -- Used with SSIN_UACPROC to enable unaligned accesses.
8000 case SSI_IEEE_STATE_AT_SIGNAL:
8001 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8002 -- Not implemented in linux kernel
8003 */
8004 }
8005 break;
8006 #endif
8007 #ifdef TARGET_NR_osf_sigprocmask
8008 /* Alpha specific. */
8009 case TARGET_NR_osf_sigprocmask:
8010 {
8011 abi_ulong mask;
8012 int how;
8013 sigset_t set, oldset;
8014
8015 switch(arg1) {
8016 case TARGET_SIG_BLOCK:
8017 how = SIG_BLOCK;
8018 break;
8019 case TARGET_SIG_UNBLOCK:
8020 how = SIG_UNBLOCK;
8021 break;
8022 case TARGET_SIG_SETMASK:
8023 how = SIG_SETMASK;
8024 break;
8025 default:
8026 ret = -TARGET_EINVAL;
8027 goto fail;
8028 }
8029 mask = arg2;
8030 target_to_host_old_sigset(&set, &mask);
8031 sigprocmask(how, &set, &oldset);
8032 host_to_target_old_sigset(&mask, &oldset);
8033 ret = mask;
8034 }
8035 break;
8036 #endif
8037
8038 #ifdef TARGET_NR_getgid32
8039 case TARGET_NR_getgid32:
8040 ret = get_errno(getgid());
8041 break;
8042 #endif
8043 #ifdef TARGET_NR_geteuid32
8044 case TARGET_NR_geteuid32:
8045 ret = get_errno(geteuid());
8046 break;
8047 #endif
8048 #ifdef TARGET_NR_getegid32
8049 case TARGET_NR_getegid32:
8050 ret = get_errno(getegid());
8051 break;
8052 #endif
8053 #ifdef TARGET_NR_setreuid32
8054 case TARGET_NR_setreuid32:
8055 ret = get_errno(setreuid(arg1, arg2));
8056 break;
8057 #endif
8058 #ifdef TARGET_NR_setregid32
8059 case TARGET_NR_setregid32:
8060 ret = get_errno(setregid(arg1, arg2));
8061 break;
8062 #endif
8063 #ifdef TARGET_NR_getgroups32
8064 case TARGET_NR_getgroups32:
8065 {
8066 int gidsetsize = arg1;
8067 uint32_t *target_grouplist;
8068 gid_t *grouplist;
8069 int i;
8070
8071 grouplist = alloca(gidsetsize * sizeof(gid_t));
8072 ret = get_errno(getgroups(gidsetsize, grouplist));
8073 if (gidsetsize == 0)
8074 break;
8075 if (!is_error(ret)) {
8076 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8077 if (!target_grouplist) {
8078 ret = -TARGET_EFAULT;
8079 goto fail;
8080 }
8081 for(i = 0;i < ret; i++)
8082 target_grouplist[i] = tswap32(grouplist[i]);
8083 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8084 }
8085 }
8086 break;
8087 #endif
8088 #ifdef TARGET_NR_setgroups32
8089 case TARGET_NR_setgroups32:
8090 {
8091 int gidsetsize = arg1;
8092 uint32_t *target_grouplist;
8093 gid_t *grouplist;
8094 int i;
8095
8096 grouplist = alloca(gidsetsize * sizeof(gid_t));
8097 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8098 if (!target_grouplist) {
8099 ret = -TARGET_EFAULT;
8100 goto fail;
8101 }
8102 for(i = 0;i < gidsetsize; i++)
8103 grouplist[i] = tswap32(target_grouplist[i]);
8104 unlock_user(target_grouplist, arg2, 0);
8105 ret = get_errno(setgroups(gidsetsize, grouplist));
8106 }
8107 break;
8108 #endif
8109 #ifdef TARGET_NR_fchown32
8110 case TARGET_NR_fchown32:
8111 ret = get_errno(fchown(arg1, arg2, arg3));
8112 break;
8113 #endif
8114 #ifdef TARGET_NR_setresuid32
8115 case TARGET_NR_setresuid32:
8116 ret = get_errno(setresuid(arg1, arg2, arg3));
8117 break;
8118 #endif
8119 #ifdef TARGET_NR_getresuid32
8120 case TARGET_NR_getresuid32:
8121 {
8122 uid_t ruid, euid, suid;
8123 ret = get_errno(getresuid(&ruid, &euid, &suid));
8124 if (!is_error(ret)) {
8125 if (put_user_u32(ruid, arg1)
8126 || put_user_u32(euid, arg2)
8127 || put_user_u32(suid, arg3))
8128 goto efault;
8129 }
8130 }
8131 break;
8132 #endif
8133 #ifdef TARGET_NR_setresgid32
8134 case TARGET_NR_setresgid32:
8135 ret = get_errno(setresgid(arg1, arg2, arg3));
8136 break;
8137 #endif
8138 #ifdef TARGET_NR_getresgid32
8139 case TARGET_NR_getresgid32:
8140 {
8141 gid_t rgid, egid, sgid;
8142 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8143 if (!is_error(ret)) {
8144 if (put_user_u32(rgid, arg1)
8145 || put_user_u32(egid, arg2)
8146 || put_user_u32(sgid, arg3))
8147 goto efault;
8148 }
8149 }
8150 break;
8151 #endif
8152 #ifdef TARGET_NR_chown32
8153 case TARGET_NR_chown32:
8154 if (!(p = lock_user_string(arg1)))
8155 goto efault;
8156 ret = get_errno(chown(p, arg2, arg3));
8157 unlock_user(p, arg1, 0);
8158 break;
8159 #endif
8160 #ifdef TARGET_NR_setuid32
8161 case TARGET_NR_setuid32:
8162 ret = get_errno(setuid(arg1));
8163 break;
8164 #endif
8165 #ifdef TARGET_NR_setgid32
8166 case TARGET_NR_setgid32:
8167 ret = get_errno(setgid(arg1));
8168 break;
8169 #endif
8170 #ifdef TARGET_NR_setfsuid32
8171 case TARGET_NR_setfsuid32:
8172 ret = get_errno(setfsuid(arg1));
8173 break;
8174 #endif
8175 #ifdef TARGET_NR_setfsgid32
8176 case TARGET_NR_setfsgid32:
8177 ret = get_errno(setfsgid(arg1));
8178 break;
8179 #endif
8180
8181 case TARGET_NR_pivot_root:
8182 goto unimplemented;
8183 #ifdef TARGET_NR_mincore
8184 case TARGET_NR_mincore:
8185 {
8186 void *a;
8187 ret = -TARGET_EFAULT;
8188 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8189 goto efault;
8190 if (!(p = lock_user_string(arg3)))
8191 goto mincore_fail;
8192 ret = get_errno(mincore(a, arg2, p));
8193 unlock_user(p, arg3, ret);
8194 mincore_fail:
8195 unlock_user(a, arg1, 0);
8196 }
8197 break;
8198 #endif
8199 #ifdef TARGET_NR_arm_fadvise64_64
8200 case TARGET_NR_arm_fadvise64_64:
8201 {
8202 /*
8203 * arm_fadvise64_64 looks like fadvise64_64 but
8204 * with different argument order
8205 */
8206 abi_long temp;
8207 temp = arg3;
8208 arg3 = arg4;
8209 arg4 = temp;
8210 }
8211 #endif
8212 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8213 #ifdef TARGET_NR_fadvise64_64
8214 case TARGET_NR_fadvise64_64:
8215 #endif
8216 #ifdef TARGET_NR_fadvise64
8217 case TARGET_NR_fadvise64:
8218 #endif
8219 #ifdef TARGET_S390X
8220 switch (arg4) {
8221 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8222 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8223 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8224 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8225 default: break;
8226 }
8227 #endif
8228 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8229 break;
8230 #endif
8231 #ifdef TARGET_NR_madvise
8232 case TARGET_NR_madvise:
8233 /* A straight passthrough may not be safe because qemu sometimes
8234 turns private file-backed mappings into anonymous mappings.
8235 This will break MADV_DONTNEED.
8236 This is a hint, so ignoring and returning success is ok. */
8237 ret = get_errno(0);
8238 break;
8239 #endif
8240 #if TARGET_ABI_BITS == 32
8241 case TARGET_NR_fcntl64:
8242 {
8243 int cmd;
8244 struct flock64 fl;
8245 struct target_flock64 *target_fl;
8246 #ifdef TARGET_ARM
8247 struct target_eabi_flock64 *target_efl;
8248 #endif
8249
8250 cmd = target_to_host_fcntl_cmd(arg2);
8251 if (cmd == -TARGET_EINVAL) {
8252 ret = cmd;
8253 break;
8254 }
8255
8256 switch(arg2) {
8257 case TARGET_F_GETLK64:
8258 #ifdef TARGET_ARM
8259 if (((CPUARMState *)cpu_env)->eabi) {
8260 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8261 goto efault;
8262 fl.l_type = tswap16(target_efl->l_type);
8263 fl.l_whence = tswap16(target_efl->l_whence);
8264 fl.l_start = tswap64(target_efl->l_start);
8265 fl.l_len = tswap64(target_efl->l_len);
8266 fl.l_pid = tswap32(target_efl->l_pid);
8267 unlock_user_struct(target_efl, arg3, 0);
8268 } else
8269 #endif
8270 {
8271 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8272 goto efault;
8273 fl.l_type = tswap16(target_fl->l_type);
8274 fl.l_whence = tswap16(target_fl->l_whence);
8275 fl.l_start = tswap64(target_fl->l_start);
8276 fl.l_len = tswap64(target_fl->l_len);
8277 fl.l_pid = tswap32(target_fl->l_pid);
8278 unlock_user_struct(target_fl, arg3, 0);
8279 }
8280 ret = get_errno(fcntl(arg1, cmd, &fl));
8281 if (ret == 0) {
8282 #ifdef TARGET_ARM
8283 if (((CPUARMState *)cpu_env)->eabi) {
8284 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8285 goto efault;
8286 target_efl->l_type = tswap16(fl.l_type);
8287 target_efl->l_whence = tswap16(fl.l_whence);
8288 target_efl->l_start = tswap64(fl.l_start);
8289 target_efl->l_len = tswap64(fl.l_len);
8290 target_efl->l_pid = tswap32(fl.l_pid);
8291 unlock_user_struct(target_efl, arg3, 1);
8292 } else
8293 #endif
8294 {
8295 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8296 goto efault;
8297 target_fl->l_type = tswap16(fl.l_type);
8298 target_fl->l_whence = tswap16(fl.l_whence);
8299 target_fl->l_start = tswap64(fl.l_start);
8300 target_fl->l_len = tswap64(fl.l_len);
8301 target_fl->l_pid = tswap32(fl.l_pid);
8302 unlock_user_struct(target_fl, arg3, 1);
8303 }
8304 }
8305 break;
8306
8307 case TARGET_F_SETLK64:
8308 case TARGET_F_SETLKW64:
8309 #ifdef TARGET_ARM
8310 if (((CPUARMState *)cpu_env)->eabi) {
8311 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8312 goto efault;
8313 fl.l_type = tswap16(target_efl->l_type);
8314 fl.l_whence = tswap16(target_efl->l_whence);
8315 fl.l_start = tswap64(target_efl->l_start);
8316 fl.l_len = tswap64(target_efl->l_len);
8317 fl.l_pid = tswap32(target_efl->l_pid);
8318 unlock_user_struct(target_efl, arg3, 0);
8319 } else
8320 #endif
8321 {
8322 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8323 goto efault;
8324 fl.l_type = tswap16(target_fl->l_type);
8325 fl.l_whence = tswap16(target_fl->l_whence);
8326 fl.l_start = tswap64(target_fl->l_start);
8327 fl.l_len = tswap64(target_fl->l_len);
8328 fl.l_pid = tswap32(target_fl->l_pid);
8329 unlock_user_struct(target_fl, arg3, 0);
8330 }
8331 ret = get_errno(fcntl(arg1, cmd, &fl));
8332 break;
8333 default:
8334 ret = do_fcntl(arg1, arg2, arg3);
8335 break;
8336 }
8337 break;
8338 }
8339 #endif
8340 #ifdef TARGET_NR_cacheflush
8341 case TARGET_NR_cacheflush:
8342 /* self-modifying code is handled automatically, so nothing needed */
8343 ret = 0;
8344 break;
8345 #endif
8346 #ifdef TARGET_NR_security
8347 case TARGET_NR_security:
8348 goto unimplemented;
8349 #endif
8350 #ifdef TARGET_NR_getpagesize
8351 case TARGET_NR_getpagesize:
8352 ret = TARGET_PAGE_SIZE;
8353 break;
8354 #endif
8355 case TARGET_NR_gettid:
8356 ret = get_errno(gettid());
8357 break;
8358 #ifdef TARGET_NR_readahead
8359 case TARGET_NR_readahead:
8360 #if TARGET_ABI_BITS == 32
8361 if (regpairs_aligned(cpu_env)) {
8362 arg2 = arg3;
8363 arg3 = arg4;
8364 arg4 = arg5;
8365 }
8366 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8367 #else
8368 ret = get_errno(readahead(arg1, arg2, arg3));
8369 #endif
8370 break;
8371 #endif
8372 #ifdef CONFIG_ATTR
8373 #ifdef TARGET_NR_setxattr
8374 case TARGET_NR_listxattr:
8375 case TARGET_NR_llistxattr:
8376 {
8377 void *p, *b = 0;
8378 if (arg2) {
8379 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8380 if (!b) {
8381 ret = -TARGET_EFAULT;
8382 break;
8383 }
8384 }
8385 p = lock_user_string(arg1);
8386 if (p) {
8387 if (num == TARGET_NR_listxattr) {
8388 ret = get_errno(listxattr(p, b, arg3));
8389 } else {
8390 ret = get_errno(llistxattr(p, b, arg3));
8391 }
8392 } else {
8393 ret = -TARGET_EFAULT;
8394 }
8395 unlock_user(p, arg1, 0);
8396 unlock_user(b, arg2, arg3);
8397 break;
8398 }
8399 case TARGET_NR_flistxattr:
8400 {
8401 void *b = 0;
8402 if (arg2) {
8403 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8404 if (!b) {
8405 ret = -TARGET_EFAULT;
8406 break;
8407 }
8408 }
8409 ret = get_errno(flistxattr(arg1, b, arg3));
8410 unlock_user(b, arg2, arg3);
8411 break;
8412 }
8413 case TARGET_NR_setxattr:
8414 case TARGET_NR_lsetxattr:
8415 {
8416 void *p, *n, *v = 0;
8417 if (arg3) {
8418 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8419 if (!v) {
8420 ret = -TARGET_EFAULT;
8421 break;
8422 }
8423 }
8424 p = lock_user_string(arg1);
8425 n = lock_user_string(arg2);
8426 if (p && n) {
8427 if (num == TARGET_NR_setxattr) {
8428 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8429 } else {
8430 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8431 }
8432 } else {
8433 ret = -TARGET_EFAULT;
8434 }
8435 unlock_user(p, arg1, 0);
8436 unlock_user(n, arg2, 0);
8437 unlock_user(v, arg3, 0);
8438 }
8439 break;
8440 case TARGET_NR_fsetxattr:
8441 {
8442 void *n, *v = 0;
8443 if (arg3) {
8444 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8445 if (!v) {
8446 ret = -TARGET_EFAULT;
8447 break;
8448 }
8449 }
8450 n = lock_user_string(arg2);
8451 if (n) {
8452 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8453 } else {
8454 ret = -TARGET_EFAULT;
8455 }
8456 unlock_user(n, arg2, 0);
8457 unlock_user(v, arg3, 0);
8458 }
8459 break;
8460 case TARGET_NR_getxattr:
8461 case TARGET_NR_lgetxattr:
8462 {
8463 void *p, *n, *v = 0;
8464 if (arg3) {
8465 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8466 if (!v) {
8467 ret = -TARGET_EFAULT;
8468 break;
8469 }
8470 }
8471 p = lock_user_string(arg1);
8472 n = lock_user_string(arg2);
8473 if (p && n) {
8474 if (num == TARGET_NR_getxattr) {
8475 ret = get_errno(getxattr(p, n, v, arg4));
8476 } else {
8477 ret = get_errno(lgetxattr(p, n, v, arg4));
8478 }
8479 } else {
8480 ret = -TARGET_EFAULT;
8481 }
8482 unlock_user(p, arg1, 0);
8483 unlock_user(n, arg2, 0);
8484 unlock_user(v, arg3, arg4);
8485 }
8486 break;
8487 case TARGET_NR_fgetxattr:
8488 {
8489 void *n, *v = 0;
8490 if (arg3) {
8491 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8492 if (!v) {
8493 ret = -TARGET_EFAULT;
8494 break;
8495 }
8496 }
8497 n = lock_user_string(arg2);
8498 if (n) {
8499 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8500 } else {
8501 ret = -TARGET_EFAULT;
8502 }
8503 unlock_user(n, arg2, 0);
8504 unlock_user(v, arg3, arg4);
8505 }
8506 break;
8507 case TARGET_NR_removexattr:
8508 case TARGET_NR_lremovexattr:
8509 {
8510 void *p, *n;
8511 p = lock_user_string(arg1);
8512 n = lock_user_string(arg2);
8513 if (p && n) {
8514 if (num == TARGET_NR_removexattr) {
8515 ret = get_errno(removexattr(p, n));
8516 } else {
8517 ret = get_errno(lremovexattr(p, n));
8518 }
8519 } else {
8520 ret = -TARGET_EFAULT;
8521 }
8522 unlock_user(p, arg1, 0);
8523 unlock_user(n, arg2, 0);
8524 }
8525 break;
8526 case TARGET_NR_fremovexattr:
8527 {
8528 void *n;
8529 n = lock_user_string(arg2);
8530 if (n) {
8531 ret = get_errno(fremovexattr(arg1, n));
8532 } else {
8533 ret = -TARGET_EFAULT;
8534 }
8535 unlock_user(n, arg2, 0);
8536 }
8537 break;
8538 #endif
8539 #endif /* CONFIG_ATTR */
8540 #ifdef TARGET_NR_set_thread_area
8541 case TARGET_NR_set_thread_area:
8542 #if defined(TARGET_MIPS)
8543 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8544 ret = 0;
8545 break;
8546 #elif defined(TARGET_CRIS)
8547 if (arg1 & 0xff)
8548 ret = -TARGET_EINVAL;
8549 else {
8550 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8551 ret = 0;
8552 }
8553 break;
8554 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8555 ret = do_set_thread_area(cpu_env, arg1);
8556 break;
8557 #else
8558 goto unimplemented_nowarn;
8559 #endif
8560 #endif
8561 #ifdef TARGET_NR_get_thread_area
8562 case TARGET_NR_get_thread_area:
8563 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8564 ret = do_get_thread_area(cpu_env, arg1);
8565 #else
8566 goto unimplemented_nowarn;
8567 #endif
8568 #endif
8569 #ifdef TARGET_NR_getdomainname
8570 case TARGET_NR_getdomainname:
8571 goto unimplemented_nowarn;
8572 #endif
8573
8574 #ifdef TARGET_NR_clock_gettime
8575 case TARGET_NR_clock_gettime:
8576 {
8577 struct timespec ts;
8578 ret = get_errno(clock_gettime(arg1, &ts));
8579 if (!is_error(ret)) {
8580 host_to_target_timespec(arg2, &ts);
8581 }
8582 break;
8583 }
8584 #endif
8585 #ifdef TARGET_NR_clock_getres
8586 case TARGET_NR_clock_getres:
8587 {
8588 struct timespec ts;
8589 ret = get_errno(clock_getres(arg1, &ts));
8590 if (!is_error(ret)) {
8591 host_to_target_timespec(arg2, &ts);
8592 }
8593 break;
8594 }
8595 #endif
8596 #ifdef TARGET_NR_clock_nanosleep
8597 case TARGET_NR_clock_nanosleep:
8598 {
8599 struct timespec ts;
8600 target_to_host_timespec(&ts, arg3);
8601 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8602 if (arg4)
8603 host_to_target_timespec(arg4, &ts);
8604 break;
8605 }
8606 #endif
8607
8608 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8609 case TARGET_NR_set_tid_address:
8610 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8611 break;
8612 #endif
8613
8614 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8615 case TARGET_NR_tkill:
8616 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8617 break;
8618 #endif
8619
8620 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8621 case TARGET_NR_tgkill:
8622 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8623 target_to_host_signal(arg3)));
8624 break;
8625 #endif
8626
8627 #ifdef TARGET_NR_set_robust_list
8628 case TARGET_NR_set_robust_list:
8629 case TARGET_NR_get_robust_list:
8630 /* The ABI for supporting robust futexes has userspace pass
8631 * the kernel a pointer to a linked list which is updated by
8632 * userspace after the syscall; the list is walked by the kernel
8633 * when the thread exits. Since the linked list in QEMU guest
8634 * memory isn't a valid linked list for the host and we have
8635 * no way to reliably intercept the thread-death event, we can't
8636 * support these. Silently return ENOSYS so that guest userspace
8637 * falls back to a non-robust futex implementation (which should
8638 * be OK except in the corner case of the guest crashing while
8639 * holding a mutex that is shared with another process via
8640 * shared memory).
8641 */
8642 goto unimplemented_nowarn;
8643 #endif
8644
8645 #if defined(TARGET_NR_utimensat)
8646 case TARGET_NR_utimensat:
8647 {
8648 struct timespec *tsp, ts[2];
8649 if (!arg3) {
8650 tsp = NULL;
8651 } else {
8652 target_to_host_timespec(ts, arg3);
8653 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8654 tsp = ts;
8655 }
8656 if (!arg2)
8657 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8658 else {
8659 if (!(p = lock_user_string(arg2))) {
8660 ret = -TARGET_EFAULT;
8661 goto fail;
8662 }
8663 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8664 unlock_user(p, arg2, 0);
8665 }
8666 }
8667 break;
8668 #endif
8669 #if defined(CONFIG_USE_NPTL)
8670 case TARGET_NR_futex:
8671 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8672 break;
8673 #endif
8674 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8675 case TARGET_NR_inotify_init:
8676 ret = get_errno(sys_inotify_init());
8677 break;
8678 #endif
8679 #ifdef CONFIG_INOTIFY1
8680 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8681 case TARGET_NR_inotify_init1:
8682 ret = get_errno(sys_inotify_init1(arg1));
8683 break;
8684 #endif
8685 #endif
8686 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8687 case TARGET_NR_inotify_add_watch:
8688 p = lock_user_string(arg2);
8689 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8690 unlock_user(p, arg2, 0);
8691 break;
8692 #endif
8693 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8694 case TARGET_NR_inotify_rm_watch:
8695 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8696 break;
8697 #endif
8698
8699 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8700 case TARGET_NR_mq_open:
8701 {
8702 struct mq_attr posix_mq_attr;
8703
8704 p = lock_user_string(arg1 - 1);
8705 if (arg4 != 0)
8706 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8707 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8708 unlock_user (p, arg1, 0);
8709 }
8710 break;
8711
8712 case TARGET_NR_mq_unlink:
8713 p = lock_user_string(arg1 - 1);
8714 ret = get_errno(mq_unlink(p));
8715 unlock_user (p, arg1, 0);
8716 break;
8717
8718 case TARGET_NR_mq_timedsend:
8719 {
8720 struct timespec ts;
8721
8722 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8723 if (arg5 != 0) {
8724 target_to_host_timespec(&ts, arg5);
8725 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8726 host_to_target_timespec(arg5, &ts);
8727 }
8728 else
8729 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8730 unlock_user (p, arg2, arg3);
8731 }
8732 break;
8733
8734 case TARGET_NR_mq_timedreceive:
8735 {
8736 struct timespec ts;
8737 unsigned int prio;
8738
8739 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8740 if (arg5 != 0) {
8741 target_to_host_timespec(&ts, arg5);
8742 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8743 host_to_target_timespec(arg5, &ts);
8744 }
8745 else
8746 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8747 unlock_user (p, arg2, arg3);
8748 if (arg4 != 0)
8749 put_user_u32(prio, arg4);
8750 }
8751 break;
8752
8753 /* Not implemented for now... */
8754 /* case TARGET_NR_mq_notify: */
8755 /* break; */
8756
8757 case TARGET_NR_mq_getsetattr:
8758 {
8759 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8760 ret = 0;
8761 if (arg3 != 0) {
8762 ret = mq_getattr(arg1, &posix_mq_attr_out);
8763 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8764 }
8765 if (arg2 != 0) {
8766 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8767 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8768 }
8769
8770 }
8771 break;
8772 #endif
8773
8774 #ifdef CONFIG_SPLICE
8775 #ifdef TARGET_NR_tee
8776 case TARGET_NR_tee:
8777 {
8778 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8779 }
8780 break;
8781 #endif
8782 #ifdef TARGET_NR_splice
8783 case TARGET_NR_splice:
8784 {
8785 loff_t loff_in, loff_out;
8786 loff_t *ploff_in = NULL, *ploff_out = NULL;
8787 if(arg2) {
8788 get_user_u64(loff_in, arg2);
8789 ploff_in = &loff_in;
8790 }
8791 if(arg4) {
8792 get_user_u64(loff_out, arg2);
8793 ploff_out = &loff_out;
8794 }
8795 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8796 }
8797 break;
8798 #endif
8799 #ifdef TARGET_NR_vmsplice
8800 case TARGET_NR_vmsplice:
8801 {
8802 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8803 if (vec != NULL) {
8804 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
8805 unlock_iovec(vec, arg2, arg3, 0);
8806 } else {
8807 ret = -host_to_target_errno(errno);
8808 }
8809 }
8810 break;
8811 #endif
8812 #endif /* CONFIG_SPLICE */
8813 #ifdef CONFIG_EVENTFD
8814 #if defined(TARGET_NR_eventfd)
8815 case TARGET_NR_eventfd:
8816 ret = get_errno(eventfd(arg1, 0));
8817 break;
8818 #endif
8819 #if defined(TARGET_NR_eventfd2)
8820 case TARGET_NR_eventfd2:
8821 {
8822 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
8823 if (arg2 & TARGET_O_NONBLOCK) {
8824 host_flags |= O_NONBLOCK;
8825 }
8826 if (arg2 & TARGET_O_CLOEXEC) {
8827 host_flags |= O_CLOEXEC;
8828 }
8829 ret = get_errno(eventfd(arg1, host_flags));
8830 break;
8831 }
8832 #endif
8833 #endif /* CONFIG_EVENTFD */
8834 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8835 case TARGET_NR_fallocate:
8836 #if TARGET_ABI_BITS == 32
8837 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8838 target_offset64(arg5, arg6)));
8839 #else
8840 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8841 #endif
8842 break;
8843 #endif
8844 #if defined(CONFIG_SYNC_FILE_RANGE)
8845 #if defined(TARGET_NR_sync_file_range)
8846 case TARGET_NR_sync_file_range:
8847 #if TARGET_ABI_BITS == 32
8848 #if defined(TARGET_MIPS)
8849 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8850 target_offset64(arg5, arg6), arg7));
8851 #else
8852 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8853 target_offset64(arg4, arg5), arg6));
8854 #endif /* !TARGET_MIPS */
8855 #else
8856 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8857 #endif
8858 break;
8859 #endif
8860 #if defined(TARGET_NR_sync_file_range2)
8861 case TARGET_NR_sync_file_range2:
8862 /* This is like sync_file_range but the arguments are reordered */
8863 #if TARGET_ABI_BITS == 32
8864 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8865 target_offset64(arg5, arg6), arg2));
8866 #else
8867 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8868 #endif
8869 break;
8870 #endif
8871 #endif
8872 #if defined(CONFIG_EPOLL)
8873 #if defined(TARGET_NR_epoll_create)
8874 case TARGET_NR_epoll_create:
8875 ret = get_errno(epoll_create(arg1));
8876 break;
8877 #endif
8878 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8879 case TARGET_NR_epoll_create1:
8880 ret = get_errno(epoll_create1(arg1));
8881 break;
8882 #endif
8883 #if defined(TARGET_NR_epoll_ctl)
8884 case TARGET_NR_epoll_ctl:
8885 {
8886 struct epoll_event ep;
8887 struct epoll_event *epp = 0;
8888 if (arg4) {
8889 struct target_epoll_event *target_ep;
8890 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8891 goto efault;
8892 }
8893 ep.events = tswap32(target_ep->events);
8894 /* The epoll_data_t union is just opaque data to the kernel,
8895 * so we transfer all 64 bits across and need not worry what
8896 * actual data type it is.
8897 */
8898 ep.data.u64 = tswap64(target_ep->data.u64);
8899 unlock_user_struct(target_ep, arg4, 0);
8900 epp = &ep;
8901 }
8902 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8903 break;
8904 }
8905 #endif
8906
8907 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8908 #define IMPLEMENT_EPOLL_PWAIT
8909 #endif
8910 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8911 #if defined(TARGET_NR_epoll_wait)
8912 case TARGET_NR_epoll_wait:
8913 #endif
8914 #if defined(IMPLEMENT_EPOLL_PWAIT)
8915 case TARGET_NR_epoll_pwait:
8916 #endif
8917 {
8918 struct target_epoll_event *target_ep;
8919 struct epoll_event *ep;
8920 int epfd = arg1;
8921 int maxevents = arg3;
8922 int timeout = arg4;
8923
8924 target_ep = lock_user(VERIFY_WRITE, arg2,
8925 maxevents * sizeof(struct target_epoll_event), 1);
8926 if (!target_ep) {
8927 goto efault;
8928 }
8929
8930 ep = alloca(maxevents * sizeof(struct epoll_event));
8931
8932 switch (num) {
8933 #if defined(IMPLEMENT_EPOLL_PWAIT)
8934 case TARGET_NR_epoll_pwait:
8935 {
8936 target_sigset_t *target_set;
8937 sigset_t _set, *set = &_set;
8938
8939 if (arg5) {
8940 target_set = lock_user(VERIFY_READ, arg5,
8941 sizeof(target_sigset_t), 1);
8942 if (!target_set) {
8943 unlock_user(target_ep, arg2, 0);
8944 goto efault;
8945 }
8946 target_to_host_sigset(set, target_set);
8947 unlock_user(target_set, arg5, 0);
8948 } else {
8949 set = NULL;
8950 }
8951
8952 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8953 break;
8954 }
8955 #endif
8956 #if defined(TARGET_NR_epoll_wait)
8957 case TARGET_NR_epoll_wait:
8958 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8959 break;
8960 #endif
8961 default:
8962 ret = -TARGET_ENOSYS;
8963 }
8964 if (!is_error(ret)) {
8965 int i;
8966 for (i = 0; i < ret; i++) {
8967 target_ep[i].events = tswap32(ep[i].events);
8968 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8969 }
8970 }
8971 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8972 break;
8973 }
8974 #endif
8975 #endif
8976 #ifdef TARGET_NR_prlimit64
8977 case TARGET_NR_prlimit64:
8978 {
8979 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8980 struct target_rlimit64 *target_rnew, *target_rold;
8981 struct host_rlimit64 rnew, rold, *rnewp = 0;
8982 if (arg3) {
8983 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8984 goto efault;
8985 }
8986 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8987 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8988 unlock_user_struct(target_rnew, arg3, 0);
8989 rnewp = &rnew;
8990 }
8991
8992 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8993 if (!is_error(ret) && arg4) {
8994 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8995 goto efault;
8996 }
8997 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8998 target_rold->rlim_max = tswap64(rold.rlim_max);
8999 unlock_user_struct(target_rold, arg4, 1);
9000 }
9001 break;
9002 }
9003 #endif
9004 #ifdef TARGET_NR_gethostname
9005 case TARGET_NR_gethostname:
9006 {
9007 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9008 if (name) {
9009 ret = get_errno(gethostname(name, arg2));
9010 unlock_user(name, arg1, arg2);
9011 } else {
9012 ret = -TARGET_EFAULT;
9013 }
9014 break;
9015 }
9016 #endif
9017 default:
9018 unimplemented:
9019 gemu_log("qemu: Unsupported syscall: %d\n", num);
9020 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9021 unimplemented_nowarn:
9022 #endif
9023 ret = -TARGET_ENOSYS;
9024 break;
9025 }
9026 fail:
9027 #ifdef DEBUG
9028 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
9029 #endif
9030 if(do_strace)
9031 print_syscall_ret(num, ret);
9032 return ret;
9033 efault:
9034 ret = -TARGET_EFAULT;
9035 goto fail;
9036 }