]> git.proxmox.com Git - qemu.git/blob - linux-user/syscall.c
linux-user: Fix sys_utimensat (would not compile on old glibc)
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <signal.h>
47 #include <sched.h>
48 #ifdef __ia64__
49 int __clone2(int (*fn)(void *), void *child_stack_base,
50 size_t stack_size, int flags, void *arg, ...);
51 #endif
52 #include <sys/socket.h>
53 #include <sys/un.h>
54 #include <sys/uio.h>
55 #include <sys/poll.h>
56 #include <sys/times.h>
57 #include <sys/shm.h>
58 #include <sys/sem.h>
59 #include <sys/statfs.h>
60 #include <utime.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef TARGET_GPROF
70 #include <sys/gmon.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
84
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
91
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/utsname.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #if defined(CONFIG_FIEMAP)
102 #include <linux/fiemap.h>
103 #endif
104 #include <linux/fb.h>
105 #include <linux/vt.h>
106 #include <linux/dm-ioctl.h>
107 #include <linux/reboot.h>
108 #include <linux/route.h>
109 #include "linux_loop.h"
110 #include "cpu-uname.h"
111
112 #include "qemu.h"
113
114 #if defined(CONFIG_USE_NPTL)
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
117 #else
118 /* XXX: Hardcode the above values. */
119 #define CLONE_NPTL_FLAGS2 0
120 #endif
121
122 //#define DEBUG
123
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
127
128
129 #undef _syscall0
130 #undef _syscall1
131 #undef _syscall2
132 #undef _syscall3
133 #undef _syscall4
134 #undef _syscall5
135 #undef _syscall6
136
137 #define _syscall0(type,name) \
138 static type name (void) \
139 { \
140 return syscall(__NR_##name); \
141 }
142
143 #define _syscall1(type,name,type1,arg1) \
144 static type name (type1 arg1) \
145 { \
146 return syscall(__NR_##name, arg1); \
147 }
148
149 #define _syscall2(type,name,type1,arg1,type2,arg2) \
150 static type name (type1 arg1,type2 arg2) \
151 { \
152 return syscall(__NR_##name, arg1, arg2); \
153 }
154
155 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
156 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 { \
158 return syscall(__NR_##name, arg1, arg2, arg3); \
159 }
160
161 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
162 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 { \
164 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
165 }
166
167 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
168 type5,arg5) \
169 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 { \
171 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
172 }
173
174
175 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
176 type5,arg5,type6,arg6) \
177 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
178 type6 arg6) \
179 { \
180 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
181 }
182
183
184 #define __NR_sys_uname __NR_uname
185 #define __NR_sys_getcwd1 __NR_getcwd
186 #define __NR_sys_getdents __NR_getdents
187 #define __NR_sys_getdents64 __NR_getdents64
188 #define __NR_sys_getpriority __NR_getpriority
189 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_futex __NR_futex
194 #define __NR_sys_inotify_init __NR_inotify_init
195 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
196 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
197
198 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
199 defined(__s390x__)
200 #define __NR__llseek __NR_lseek
201 #endif
202
203 #ifdef __NR_gettid
204 _syscall0(int, gettid)
205 #else
206 /* This is a replacement for the host gettid() and must return a host
207 errno. */
208 static int gettid(void) {
209 return -ENOSYS;
210 }
211 #endif
212 #ifdef __NR_getdents
213 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
214 #endif
215 #if !defined(__NR_getdents) || \
216 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
217 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
218 #endif
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
221 loff_t *, res, uint, wh);
222 #endif
223 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
224 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
225 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
226 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
227 #endif
228 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
229 _syscall2(int,sys_tkill,int,tid,int,sig)
230 #endif
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group,int,error_code)
233 #endif
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address,int *,tidptr)
236 #endif
237 #if defined(CONFIG_USE_NPTL)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
240 const struct timespec *,timeout,int *,uaddr2,int,val3)
241 #endif
242 #endif
243 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
244 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
245 unsigned long *, user_mask_ptr);
246 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
247 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
249 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
250 void *, arg);
251
252 static bitmask_transtbl fcntl_flags_tbl[] = {
253 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
254 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
255 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
256 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
257 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
258 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
259 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
260 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
261 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
262 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
263 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
264 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
265 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
266 #if defined(O_DIRECT)
267 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
268 #endif
269 #if defined(O_NOATIME)
270 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
271 #endif
272 #if defined(O_CLOEXEC)
273 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
274 #endif
275 #if defined(O_PATH)
276 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
277 #endif
278 /* Don't terminate the list prematurely on 64-bit host+guest. */
279 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
280 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
281 #endif
282 { 0, 0, 0, 0 }
283 };
284
285 #define COPY_UTSNAME_FIELD(dest, src) \
286 do { \
287 /* __NEW_UTS_LEN doesn't include terminating null */ \
288 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
289 (dest)[__NEW_UTS_LEN] = '\0'; \
290 } while (0)
291
292 static int sys_uname(struct new_utsname *buf)
293 {
294 struct utsname uts_buf;
295
296 if (uname(&uts_buf) < 0)
297 return (-1);
298
299 /*
300 * Just in case these have some differences, we
301 * translate utsname to new_utsname (which is the
302 * struct linux kernel uses).
303 */
304
305 memset(buf, 0, sizeof(*buf));
306 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
307 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
308 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
309 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
310 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
311 #ifdef _GNU_SOURCE
312 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
313 #endif
314 return (0);
315
316 #undef COPY_UTSNAME_FIELD
317 }
318
319 static int sys_getcwd1(char *buf, size_t size)
320 {
321 if (getcwd(buf, size) == NULL) {
322 /* getcwd() sets errno */
323 return (-1);
324 }
325 return strlen(buf)+1;
326 }
327
328 #ifdef TARGET_NR_openat
329 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
330 {
331 /*
332 * open(2) has extra parameter 'mode' when called with
333 * flag O_CREAT.
334 */
335 if ((flags & O_CREAT) != 0) {
336 return (openat(dirfd, pathname, flags, mode));
337 }
338 return (openat(dirfd, pathname, flags));
339 }
340 #endif
341
342 #ifdef TARGET_NR_utimensat
343 #ifdef CONFIG_UTIMENSAT
344 static int sys_utimensat(int dirfd, const char *pathname,
345 const struct timespec times[2], int flags)
346 {
347 if (pathname == NULL)
348 return futimens(dirfd, times);
349 else
350 return utimensat(dirfd, pathname, times, flags);
351 }
352 #elif defined(__NR_utimensat)
353 #define __NR_sys_utimensat __NR_utimensat
354 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
355 const struct timespec *,tsp,int,flags)
356 #else
357 static int sys_utimensat(int dirfd, const char *pathname,
358 const struct timespec times[2], int flags)
359 {
360 errno = ENOSYS;
361 return -1;
362 }
363 #endif
364 #endif /* TARGET_NR_utimensat */
365
366 #ifdef CONFIG_INOTIFY
367 #include <sys/inotify.h>
368
369 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
370 static int sys_inotify_init(void)
371 {
372 return (inotify_init());
373 }
374 #endif
375 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
376 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
377 {
378 return (inotify_add_watch(fd, pathname, mask));
379 }
380 #endif
381 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
382 static int sys_inotify_rm_watch(int fd, int32_t wd)
383 {
384 return (inotify_rm_watch(fd, wd));
385 }
386 #endif
387 #ifdef CONFIG_INOTIFY1
388 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
389 static int sys_inotify_init1(int flags)
390 {
391 return (inotify_init1(flags));
392 }
393 #endif
394 #endif
395 #else
396 /* Userspace can usually survive runtime without inotify */
397 #undef TARGET_NR_inotify_init
398 #undef TARGET_NR_inotify_init1
399 #undef TARGET_NR_inotify_add_watch
400 #undef TARGET_NR_inotify_rm_watch
401 #endif /* CONFIG_INOTIFY */
402
403 #if defined(TARGET_NR_ppoll)
404 #ifndef __NR_ppoll
405 # define __NR_ppoll -1
406 #endif
407 #define __NR_sys_ppoll __NR_ppoll
408 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
409 struct timespec *, timeout, const __sigset_t *, sigmask,
410 size_t, sigsetsize)
411 #endif
412
413 #if defined(TARGET_NR_pselect6)
414 #ifndef __NR_pselect6
415 # define __NR_pselect6 -1
416 #endif
417 #define __NR_sys_pselect6 __NR_pselect6
418 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
419 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
420 #endif
421
422 #if defined(TARGET_NR_prlimit64)
423 #ifndef __NR_prlimit64
424 # define __NR_prlimit64 -1
425 #endif
426 #define __NR_sys_prlimit64 __NR_prlimit64
427 /* The glibc rlimit structure may not be that used by the underlying syscall */
428 struct host_rlimit64 {
429 uint64_t rlim_cur;
430 uint64_t rlim_max;
431 };
432 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
433 const struct host_rlimit64 *, new_limit,
434 struct host_rlimit64 *, old_limit)
435 #endif
436
437 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
438 #ifdef TARGET_ARM
439 static inline int regpairs_aligned(void *cpu_env) {
440 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
441 }
442 #elif defined(TARGET_MIPS)
443 static inline int regpairs_aligned(void *cpu_env) { return 1; }
444 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
445 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
446 * of registers which translates to the same as ARM/MIPS, because we start with
447 * r3 as arg1 */
448 static inline int regpairs_aligned(void *cpu_env) { return 1; }
449 #else
450 static inline int regpairs_aligned(void *cpu_env) { return 0; }
451 #endif
452
453 #define ERRNO_TABLE_SIZE 1200
454
455 /* target_to_host_errno_table[] is initialized from
456 * host_to_target_errno_table[] in syscall_init(). */
457 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
458 };
459
460 /*
461 * This list is the union of errno values overridden in asm-<arch>/errno.h
462 * minus the errnos that are not actually generic to all archs.
463 */
464 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
465 [EIDRM] = TARGET_EIDRM,
466 [ECHRNG] = TARGET_ECHRNG,
467 [EL2NSYNC] = TARGET_EL2NSYNC,
468 [EL3HLT] = TARGET_EL3HLT,
469 [EL3RST] = TARGET_EL3RST,
470 [ELNRNG] = TARGET_ELNRNG,
471 [EUNATCH] = TARGET_EUNATCH,
472 [ENOCSI] = TARGET_ENOCSI,
473 [EL2HLT] = TARGET_EL2HLT,
474 [EDEADLK] = TARGET_EDEADLK,
475 [ENOLCK] = TARGET_ENOLCK,
476 [EBADE] = TARGET_EBADE,
477 [EBADR] = TARGET_EBADR,
478 [EXFULL] = TARGET_EXFULL,
479 [ENOANO] = TARGET_ENOANO,
480 [EBADRQC] = TARGET_EBADRQC,
481 [EBADSLT] = TARGET_EBADSLT,
482 [EBFONT] = TARGET_EBFONT,
483 [ENOSTR] = TARGET_ENOSTR,
484 [ENODATA] = TARGET_ENODATA,
485 [ETIME] = TARGET_ETIME,
486 [ENOSR] = TARGET_ENOSR,
487 [ENONET] = TARGET_ENONET,
488 [ENOPKG] = TARGET_ENOPKG,
489 [EREMOTE] = TARGET_EREMOTE,
490 [ENOLINK] = TARGET_ENOLINK,
491 [EADV] = TARGET_EADV,
492 [ESRMNT] = TARGET_ESRMNT,
493 [ECOMM] = TARGET_ECOMM,
494 [EPROTO] = TARGET_EPROTO,
495 [EDOTDOT] = TARGET_EDOTDOT,
496 [EMULTIHOP] = TARGET_EMULTIHOP,
497 [EBADMSG] = TARGET_EBADMSG,
498 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
499 [EOVERFLOW] = TARGET_EOVERFLOW,
500 [ENOTUNIQ] = TARGET_ENOTUNIQ,
501 [EBADFD] = TARGET_EBADFD,
502 [EREMCHG] = TARGET_EREMCHG,
503 [ELIBACC] = TARGET_ELIBACC,
504 [ELIBBAD] = TARGET_ELIBBAD,
505 [ELIBSCN] = TARGET_ELIBSCN,
506 [ELIBMAX] = TARGET_ELIBMAX,
507 [ELIBEXEC] = TARGET_ELIBEXEC,
508 [EILSEQ] = TARGET_EILSEQ,
509 [ENOSYS] = TARGET_ENOSYS,
510 [ELOOP] = TARGET_ELOOP,
511 [ERESTART] = TARGET_ERESTART,
512 [ESTRPIPE] = TARGET_ESTRPIPE,
513 [ENOTEMPTY] = TARGET_ENOTEMPTY,
514 [EUSERS] = TARGET_EUSERS,
515 [ENOTSOCK] = TARGET_ENOTSOCK,
516 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
517 [EMSGSIZE] = TARGET_EMSGSIZE,
518 [EPROTOTYPE] = TARGET_EPROTOTYPE,
519 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
520 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
521 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
522 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
523 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
524 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
525 [EADDRINUSE] = TARGET_EADDRINUSE,
526 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
527 [ENETDOWN] = TARGET_ENETDOWN,
528 [ENETUNREACH] = TARGET_ENETUNREACH,
529 [ENETRESET] = TARGET_ENETRESET,
530 [ECONNABORTED] = TARGET_ECONNABORTED,
531 [ECONNRESET] = TARGET_ECONNRESET,
532 [ENOBUFS] = TARGET_ENOBUFS,
533 [EISCONN] = TARGET_EISCONN,
534 [ENOTCONN] = TARGET_ENOTCONN,
535 [EUCLEAN] = TARGET_EUCLEAN,
536 [ENOTNAM] = TARGET_ENOTNAM,
537 [ENAVAIL] = TARGET_ENAVAIL,
538 [EISNAM] = TARGET_EISNAM,
539 [EREMOTEIO] = TARGET_EREMOTEIO,
540 [ESHUTDOWN] = TARGET_ESHUTDOWN,
541 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
542 [ETIMEDOUT] = TARGET_ETIMEDOUT,
543 [ECONNREFUSED] = TARGET_ECONNREFUSED,
544 [EHOSTDOWN] = TARGET_EHOSTDOWN,
545 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
546 [EALREADY] = TARGET_EALREADY,
547 [EINPROGRESS] = TARGET_EINPROGRESS,
548 [ESTALE] = TARGET_ESTALE,
549 [ECANCELED] = TARGET_ECANCELED,
550 [ENOMEDIUM] = TARGET_ENOMEDIUM,
551 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
552 #ifdef ENOKEY
553 [ENOKEY] = TARGET_ENOKEY,
554 #endif
555 #ifdef EKEYEXPIRED
556 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
557 #endif
558 #ifdef EKEYREVOKED
559 [EKEYREVOKED] = TARGET_EKEYREVOKED,
560 #endif
561 #ifdef EKEYREJECTED
562 [EKEYREJECTED] = TARGET_EKEYREJECTED,
563 #endif
564 #ifdef EOWNERDEAD
565 [EOWNERDEAD] = TARGET_EOWNERDEAD,
566 #endif
567 #ifdef ENOTRECOVERABLE
568 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
569 #endif
570 };
571
572 static inline int host_to_target_errno(int err)
573 {
574 if(host_to_target_errno_table[err])
575 return host_to_target_errno_table[err];
576 return err;
577 }
578
579 static inline int target_to_host_errno(int err)
580 {
581 if (target_to_host_errno_table[err])
582 return target_to_host_errno_table[err];
583 return err;
584 }
585
586 static inline abi_long get_errno(abi_long ret)
587 {
588 if (ret == -1)
589 return -host_to_target_errno(errno);
590 else
591 return ret;
592 }
593
594 static inline int is_error(abi_long ret)
595 {
596 return (abi_ulong)ret >= (abi_ulong)(-4096);
597 }
598
599 char *target_strerror(int err)
600 {
601 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
602 return NULL;
603 }
604 return strerror(target_to_host_errno(err));
605 }
606
607 static abi_ulong target_brk;
608 static abi_ulong target_original_brk;
609 static abi_ulong brk_page;
610
611 void target_set_brk(abi_ulong new_brk)
612 {
613 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
614 brk_page = HOST_PAGE_ALIGN(target_brk);
615 }
616
617 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
618 #define DEBUGF_BRK(message, args...)
619
620 /* do_brk() must return target values and target errnos. */
621 abi_long do_brk(abi_ulong new_brk)
622 {
623 abi_long mapped_addr;
624 int new_alloc_size;
625
626 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
627
628 if (!new_brk) {
629 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
630 return target_brk;
631 }
632 if (new_brk < target_original_brk) {
633 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
634 target_brk);
635 return target_brk;
636 }
637
638 /* If the new brk is less than the highest page reserved to the
639 * target heap allocation, set it and we're almost done... */
640 if (new_brk <= brk_page) {
641 /* Heap contents are initialized to zero, as for anonymous
642 * mapped pages. */
643 if (new_brk > target_brk) {
644 memset(g2h(target_brk), 0, new_brk - target_brk);
645 }
646 target_brk = new_brk;
647 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
648 return target_brk;
649 }
650
651 /* We need to allocate more memory after the brk... Note that
652 * we don't use MAP_FIXED because that will map over the top of
653 * any existing mapping (like the one with the host libc or qemu
654 * itself); instead we treat "mapped but at wrong address" as
655 * a failure and unmap again.
656 */
657 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
658 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
659 PROT_READ|PROT_WRITE,
660 MAP_ANON|MAP_PRIVATE, 0, 0));
661
662 if (mapped_addr == brk_page) {
663 /* Heap contents are initialized to zero, as for anonymous
664 * mapped pages. Technically the new pages are already
665 * initialized to zero since they *are* anonymous mapped
666 * pages, however we have to take care with the contents that
667 * come from the remaining part of the previous page: it may
668 * contains garbage data due to a previous heap usage (grown
669 * then shrunken). */
670 memset(g2h(target_brk), 0, brk_page - target_brk);
671
672 target_brk = new_brk;
673 brk_page = HOST_PAGE_ALIGN(target_brk);
674 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
675 target_brk);
676 return target_brk;
677 } else if (mapped_addr != -1) {
678 /* Mapped but at wrong address, meaning there wasn't actually
679 * enough space for this brk.
680 */
681 target_munmap(mapped_addr, new_alloc_size);
682 mapped_addr = -1;
683 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
684 }
685 else {
686 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
687 }
688
689 #if defined(TARGET_ALPHA)
690 /* We (partially) emulate OSF/1 on Alpha, which requires we
691 return a proper errno, not an unchanged brk value. */
692 return -TARGET_ENOMEM;
693 #endif
694 /* For everything else, return the previous break. */
695 return target_brk;
696 }
697
698 static inline abi_long copy_from_user_fdset(fd_set *fds,
699 abi_ulong target_fds_addr,
700 int n)
701 {
702 int i, nw, j, k;
703 abi_ulong b, *target_fds;
704
705 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
706 if (!(target_fds = lock_user(VERIFY_READ,
707 target_fds_addr,
708 sizeof(abi_ulong) * nw,
709 1)))
710 return -TARGET_EFAULT;
711
712 FD_ZERO(fds);
713 k = 0;
714 for (i = 0; i < nw; i++) {
715 /* grab the abi_ulong */
716 __get_user(b, &target_fds[i]);
717 for (j = 0; j < TARGET_ABI_BITS; j++) {
718 /* check the bit inside the abi_ulong */
719 if ((b >> j) & 1)
720 FD_SET(k, fds);
721 k++;
722 }
723 }
724
725 unlock_user(target_fds, target_fds_addr, 0);
726
727 return 0;
728 }
729
730 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
731 abi_ulong target_fds_addr,
732 int n)
733 {
734 if (target_fds_addr) {
735 if (copy_from_user_fdset(fds, target_fds_addr, n))
736 return -TARGET_EFAULT;
737 *fds_ptr = fds;
738 } else {
739 *fds_ptr = NULL;
740 }
741 return 0;
742 }
743
744 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
745 const fd_set *fds,
746 int n)
747 {
748 int i, nw, j, k;
749 abi_long v;
750 abi_ulong *target_fds;
751
752 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
753 if (!(target_fds = lock_user(VERIFY_WRITE,
754 target_fds_addr,
755 sizeof(abi_ulong) * nw,
756 0)))
757 return -TARGET_EFAULT;
758
759 k = 0;
760 for (i = 0; i < nw; i++) {
761 v = 0;
762 for (j = 0; j < TARGET_ABI_BITS; j++) {
763 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
764 k++;
765 }
766 __put_user(v, &target_fds[i]);
767 }
768
769 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
770
771 return 0;
772 }
773
774 #if defined(__alpha__)
775 #define HOST_HZ 1024
776 #else
777 #define HOST_HZ 100
778 #endif
779
780 static inline abi_long host_to_target_clock_t(long ticks)
781 {
782 #if HOST_HZ == TARGET_HZ
783 return ticks;
784 #else
785 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
786 #endif
787 }
788
789 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
790 const struct rusage *rusage)
791 {
792 struct target_rusage *target_rusage;
793
794 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
795 return -TARGET_EFAULT;
796 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
797 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
798 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
799 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
800 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
801 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
802 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
803 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
804 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
805 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
806 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
807 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
808 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
809 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
810 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
811 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
812 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
813 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
814 unlock_user_struct(target_rusage, target_addr, 1);
815
816 return 0;
817 }
818
819 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
820 {
821 abi_ulong target_rlim_swap;
822 rlim_t result;
823
824 target_rlim_swap = tswapal(target_rlim);
825 if (target_rlim_swap == TARGET_RLIM_INFINITY)
826 return RLIM_INFINITY;
827
828 result = target_rlim_swap;
829 if (target_rlim_swap != (rlim_t)result)
830 return RLIM_INFINITY;
831
832 return result;
833 }
834
835 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
836 {
837 abi_ulong target_rlim_swap;
838 abi_ulong result;
839
840 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
841 target_rlim_swap = TARGET_RLIM_INFINITY;
842 else
843 target_rlim_swap = rlim;
844 result = tswapal(target_rlim_swap);
845
846 return result;
847 }
848
849 static inline int target_to_host_resource(int code)
850 {
851 switch (code) {
852 case TARGET_RLIMIT_AS:
853 return RLIMIT_AS;
854 case TARGET_RLIMIT_CORE:
855 return RLIMIT_CORE;
856 case TARGET_RLIMIT_CPU:
857 return RLIMIT_CPU;
858 case TARGET_RLIMIT_DATA:
859 return RLIMIT_DATA;
860 case TARGET_RLIMIT_FSIZE:
861 return RLIMIT_FSIZE;
862 case TARGET_RLIMIT_LOCKS:
863 return RLIMIT_LOCKS;
864 case TARGET_RLIMIT_MEMLOCK:
865 return RLIMIT_MEMLOCK;
866 case TARGET_RLIMIT_MSGQUEUE:
867 return RLIMIT_MSGQUEUE;
868 case TARGET_RLIMIT_NICE:
869 return RLIMIT_NICE;
870 case TARGET_RLIMIT_NOFILE:
871 return RLIMIT_NOFILE;
872 case TARGET_RLIMIT_NPROC:
873 return RLIMIT_NPROC;
874 case TARGET_RLIMIT_RSS:
875 return RLIMIT_RSS;
876 case TARGET_RLIMIT_RTPRIO:
877 return RLIMIT_RTPRIO;
878 case TARGET_RLIMIT_SIGPENDING:
879 return RLIMIT_SIGPENDING;
880 case TARGET_RLIMIT_STACK:
881 return RLIMIT_STACK;
882 default:
883 return code;
884 }
885 }
886
887 static inline abi_long copy_from_user_timeval(struct timeval *tv,
888 abi_ulong target_tv_addr)
889 {
890 struct target_timeval *target_tv;
891
892 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
893 return -TARGET_EFAULT;
894
895 __get_user(tv->tv_sec, &target_tv->tv_sec);
896 __get_user(tv->tv_usec, &target_tv->tv_usec);
897
898 unlock_user_struct(target_tv, target_tv_addr, 0);
899
900 return 0;
901 }
902
903 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
904 const struct timeval *tv)
905 {
906 struct target_timeval *target_tv;
907
908 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
909 return -TARGET_EFAULT;
910
911 __put_user(tv->tv_sec, &target_tv->tv_sec);
912 __put_user(tv->tv_usec, &target_tv->tv_usec);
913
914 unlock_user_struct(target_tv, target_tv_addr, 1);
915
916 return 0;
917 }
918
919 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
920 #include <mqueue.h>
921
922 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
923 abi_ulong target_mq_attr_addr)
924 {
925 struct target_mq_attr *target_mq_attr;
926
927 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
928 target_mq_attr_addr, 1))
929 return -TARGET_EFAULT;
930
931 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
932 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
933 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
934 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
935
936 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
937
938 return 0;
939 }
940
941 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
942 const struct mq_attr *attr)
943 {
944 struct target_mq_attr *target_mq_attr;
945
946 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
947 target_mq_attr_addr, 0))
948 return -TARGET_EFAULT;
949
950 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
951 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
952 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
953 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
954
955 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
956
957 return 0;
958 }
959 #endif
960
961 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
962 /* do_select() must return target values and target errnos. */
963 static abi_long do_select(int n,
964 abi_ulong rfd_addr, abi_ulong wfd_addr,
965 abi_ulong efd_addr, abi_ulong target_tv_addr)
966 {
967 fd_set rfds, wfds, efds;
968 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
969 struct timeval tv, *tv_ptr;
970 abi_long ret;
971
972 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
973 if (ret) {
974 return ret;
975 }
976 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
977 if (ret) {
978 return ret;
979 }
980 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
981 if (ret) {
982 return ret;
983 }
984
985 if (target_tv_addr) {
986 if (copy_from_user_timeval(&tv, target_tv_addr))
987 return -TARGET_EFAULT;
988 tv_ptr = &tv;
989 } else {
990 tv_ptr = NULL;
991 }
992
993 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
994
995 if (!is_error(ret)) {
996 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
997 return -TARGET_EFAULT;
998 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
999 return -TARGET_EFAULT;
1000 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1001 return -TARGET_EFAULT;
1002
1003 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1004 return -TARGET_EFAULT;
1005 }
1006
1007 return ret;
1008 }
1009 #endif
1010
1011 static abi_long do_pipe2(int host_pipe[], int flags)
1012 {
1013 #ifdef CONFIG_PIPE2
1014 return pipe2(host_pipe, flags);
1015 #else
1016 return -ENOSYS;
1017 #endif
1018 }
1019
1020 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1021 int flags, int is_pipe2)
1022 {
1023 int host_pipe[2];
1024 abi_long ret;
1025 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1026
1027 if (is_error(ret))
1028 return get_errno(ret);
1029
1030 /* Several targets have special calling conventions for the original
1031 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1032 if (!is_pipe2) {
1033 #if defined(TARGET_ALPHA)
1034 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1035 return host_pipe[0];
1036 #elif defined(TARGET_MIPS)
1037 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1038 return host_pipe[0];
1039 #elif defined(TARGET_SH4)
1040 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1041 return host_pipe[0];
1042 #endif
1043 }
1044
1045 if (put_user_s32(host_pipe[0], pipedes)
1046 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1047 return -TARGET_EFAULT;
1048 return get_errno(ret);
1049 }
1050
1051 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1052 abi_ulong target_addr,
1053 socklen_t len)
1054 {
1055 struct target_ip_mreqn *target_smreqn;
1056
1057 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1058 if (!target_smreqn)
1059 return -TARGET_EFAULT;
1060 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1061 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1062 if (len == sizeof(struct target_ip_mreqn))
1063 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1064 unlock_user(target_smreqn, target_addr, 0);
1065
1066 return 0;
1067 }
1068
1069 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1070 abi_ulong target_addr,
1071 socklen_t len)
1072 {
1073 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1074 sa_family_t sa_family;
1075 struct target_sockaddr *target_saddr;
1076
1077 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1078 if (!target_saddr)
1079 return -TARGET_EFAULT;
1080
1081 sa_family = tswap16(target_saddr->sa_family);
1082
1083 /* Oops. The caller might send a incomplete sun_path; sun_path
1084 * must be terminated by \0 (see the manual page), but
1085 * unfortunately it is quite common to specify sockaddr_un
1086 * length as "strlen(x->sun_path)" while it should be
1087 * "strlen(...) + 1". We'll fix that here if needed.
1088 * Linux kernel has a similar feature.
1089 */
1090
1091 if (sa_family == AF_UNIX) {
1092 if (len < unix_maxlen && len > 0) {
1093 char *cp = (char*)target_saddr;
1094
1095 if ( cp[len-1] && !cp[len] )
1096 len++;
1097 }
1098 if (len > unix_maxlen)
1099 len = unix_maxlen;
1100 }
1101
1102 memcpy(addr, target_saddr, len);
1103 addr->sa_family = sa_family;
1104 unlock_user(target_saddr, target_addr, 0);
1105
1106 return 0;
1107 }
1108
1109 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1110 struct sockaddr *addr,
1111 socklen_t len)
1112 {
1113 struct target_sockaddr *target_saddr;
1114
1115 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1116 if (!target_saddr)
1117 return -TARGET_EFAULT;
1118 memcpy(target_saddr, addr, len);
1119 target_saddr->sa_family = tswap16(addr->sa_family);
1120 unlock_user(target_saddr, target_addr, len);
1121
1122 return 0;
1123 }
1124
1125 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1126 struct target_msghdr *target_msgh)
1127 {
1128 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1129 abi_long msg_controllen;
1130 abi_ulong target_cmsg_addr;
1131 struct target_cmsghdr *target_cmsg;
1132 socklen_t space = 0;
1133
1134 msg_controllen = tswapal(target_msgh->msg_controllen);
1135 if (msg_controllen < sizeof (struct target_cmsghdr))
1136 goto the_end;
1137 target_cmsg_addr = tswapal(target_msgh->msg_control);
1138 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1139 if (!target_cmsg)
1140 return -TARGET_EFAULT;
1141
1142 while (cmsg && target_cmsg) {
1143 void *data = CMSG_DATA(cmsg);
1144 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1145
1146 int len = tswapal(target_cmsg->cmsg_len)
1147 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1148
1149 space += CMSG_SPACE(len);
1150 if (space > msgh->msg_controllen) {
1151 space -= CMSG_SPACE(len);
1152 gemu_log("Host cmsg overflow\n");
1153 break;
1154 }
1155
1156 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1157 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1158 cmsg->cmsg_len = CMSG_LEN(len);
1159
1160 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1161 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1162 memcpy(data, target_data, len);
1163 } else {
1164 int *fd = (int *)data;
1165 int *target_fd = (int *)target_data;
1166 int i, numfds = len / sizeof(int);
1167
1168 for (i = 0; i < numfds; i++)
1169 fd[i] = tswap32(target_fd[i]);
1170 }
1171
1172 cmsg = CMSG_NXTHDR(msgh, cmsg);
1173 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1174 }
1175 unlock_user(target_cmsg, target_cmsg_addr, 0);
1176 the_end:
1177 msgh->msg_controllen = space;
1178 return 0;
1179 }
1180
1181 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1182 struct msghdr *msgh)
1183 {
1184 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1185 abi_long msg_controllen;
1186 abi_ulong target_cmsg_addr;
1187 struct target_cmsghdr *target_cmsg;
1188 socklen_t space = 0;
1189
1190 msg_controllen = tswapal(target_msgh->msg_controllen);
1191 if (msg_controllen < sizeof (struct target_cmsghdr))
1192 goto the_end;
1193 target_cmsg_addr = tswapal(target_msgh->msg_control);
1194 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1195 if (!target_cmsg)
1196 return -TARGET_EFAULT;
1197
1198 while (cmsg && target_cmsg) {
1199 void *data = CMSG_DATA(cmsg);
1200 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1201
1202 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1203
1204 space += TARGET_CMSG_SPACE(len);
1205 if (space > msg_controllen) {
1206 space -= TARGET_CMSG_SPACE(len);
1207 gemu_log("Target cmsg overflow\n");
1208 break;
1209 }
1210
1211 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1212 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1213 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1214
1215 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1216 (cmsg->cmsg_type == SCM_RIGHTS)) {
1217 int *fd = (int *)data;
1218 int *target_fd = (int *)target_data;
1219 int i, numfds = len / sizeof(int);
1220
1221 for (i = 0; i < numfds; i++)
1222 target_fd[i] = tswap32(fd[i]);
1223 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1224 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1225 (len == sizeof(struct timeval))) {
1226 /* copy struct timeval to target */
1227 struct timeval *tv = (struct timeval *)data;
1228 struct target_timeval *target_tv =
1229 (struct target_timeval *)target_data;
1230
1231 target_tv->tv_sec = tswapal(tv->tv_sec);
1232 target_tv->tv_usec = tswapal(tv->tv_usec);
1233 } else {
1234 gemu_log("Unsupported ancillary data: %d/%d\n",
1235 cmsg->cmsg_level, cmsg->cmsg_type);
1236 memcpy(target_data, data, len);
1237 }
1238
1239 cmsg = CMSG_NXTHDR(msgh, cmsg);
1240 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1241 }
1242 unlock_user(target_cmsg, target_cmsg_addr, space);
1243 the_end:
1244 target_msgh->msg_controllen = tswapal(space);
1245 return 0;
1246 }
1247
1248 /* do_setsockopt() Must return target values and target errnos. */
1249 static abi_long do_setsockopt(int sockfd, int level, int optname,
1250 abi_ulong optval_addr, socklen_t optlen)
1251 {
1252 abi_long ret;
1253 int val;
1254 struct ip_mreqn *ip_mreq;
1255 struct ip_mreq_source *ip_mreq_source;
1256
1257 switch(level) {
1258 case SOL_TCP:
1259 /* TCP options all take an 'int' value. */
1260 if (optlen < sizeof(uint32_t))
1261 return -TARGET_EINVAL;
1262
1263 if (get_user_u32(val, optval_addr))
1264 return -TARGET_EFAULT;
1265 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1266 break;
1267 case SOL_IP:
1268 switch(optname) {
1269 case IP_TOS:
1270 case IP_TTL:
1271 case IP_HDRINCL:
1272 case IP_ROUTER_ALERT:
1273 case IP_RECVOPTS:
1274 case IP_RETOPTS:
1275 case IP_PKTINFO:
1276 case IP_MTU_DISCOVER:
1277 case IP_RECVERR:
1278 case IP_RECVTOS:
1279 #ifdef IP_FREEBIND
1280 case IP_FREEBIND:
1281 #endif
1282 case IP_MULTICAST_TTL:
1283 case IP_MULTICAST_LOOP:
1284 val = 0;
1285 if (optlen >= sizeof(uint32_t)) {
1286 if (get_user_u32(val, optval_addr))
1287 return -TARGET_EFAULT;
1288 } else if (optlen >= 1) {
1289 if (get_user_u8(val, optval_addr))
1290 return -TARGET_EFAULT;
1291 }
1292 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1293 break;
1294 case IP_ADD_MEMBERSHIP:
1295 case IP_DROP_MEMBERSHIP:
1296 if (optlen < sizeof (struct target_ip_mreq) ||
1297 optlen > sizeof (struct target_ip_mreqn))
1298 return -TARGET_EINVAL;
1299
1300 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1301 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1302 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1303 break;
1304
1305 case IP_BLOCK_SOURCE:
1306 case IP_UNBLOCK_SOURCE:
1307 case IP_ADD_SOURCE_MEMBERSHIP:
1308 case IP_DROP_SOURCE_MEMBERSHIP:
1309 if (optlen != sizeof (struct target_ip_mreq_source))
1310 return -TARGET_EINVAL;
1311
1312 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1313 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1314 unlock_user (ip_mreq_source, optval_addr, 0);
1315 break;
1316
1317 default:
1318 goto unimplemented;
1319 }
1320 break;
1321 case SOL_RAW:
1322 switch (optname) {
1323 case ICMP_FILTER:
1324 /* struct icmp_filter takes an u32 value */
1325 if (optlen < sizeof(uint32_t)) {
1326 return -TARGET_EINVAL;
1327 }
1328
1329 if (get_user_u32(val, optval_addr)) {
1330 return -TARGET_EFAULT;
1331 }
1332 ret = get_errno(setsockopt(sockfd, level, optname,
1333 &val, sizeof(val)));
1334 break;
1335
1336 default:
1337 goto unimplemented;
1338 }
1339 break;
1340 case TARGET_SOL_SOCKET:
1341 switch (optname) {
1342 case TARGET_SO_RCVTIMEO:
1343 {
1344 struct timeval tv;
1345
1346 optname = SO_RCVTIMEO;
1347
1348 set_timeout:
1349 if (optlen != sizeof(struct target_timeval)) {
1350 return -TARGET_EINVAL;
1351 }
1352
1353 if (copy_from_user_timeval(&tv, optval_addr)) {
1354 return -TARGET_EFAULT;
1355 }
1356
1357 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1358 &tv, sizeof(tv)));
1359 return ret;
1360 }
1361 case TARGET_SO_SNDTIMEO:
1362 optname = SO_SNDTIMEO;
1363 goto set_timeout;
1364 /* Options with 'int' argument. */
1365 case TARGET_SO_DEBUG:
1366 optname = SO_DEBUG;
1367 break;
1368 case TARGET_SO_REUSEADDR:
1369 optname = SO_REUSEADDR;
1370 break;
1371 case TARGET_SO_TYPE:
1372 optname = SO_TYPE;
1373 break;
1374 case TARGET_SO_ERROR:
1375 optname = SO_ERROR;
1376 break;
1377 case TARGET_SO_DONTROUTE:
1378 optname = SO_DONTROUTE;
1379 break;
1380 case TARGET_SO_BROADCAST:
1381 optname = SO_BROADCAST;
1382 break;
1383 case TARGET_SO_SNDBUF:
1384 optname = SO_SNDBUF;
1385 break;
1386 case TARGET_SO_RCVBUF:
1387 optname = SO_RCVBUF;
1388 break;
1389 case TARGET_SO_KEEPALIVE:
1390 optname = SO_KEEPALIVE;
1391 break;
1392 case TARGET_SO_OOBINLINE:
1393 optname = SO_OOBINLINE;
1394 break;
1395 case TARGET_SO_NO_CHECK:
1396 optname = SO_NO_CHECK;
1397 break;
1398 case TARGET_SO_PRIORITY:
1399 optname = SO_PRIORITY;
1400 break;
1401 #ifdef SO_BSDCOMPAT
1402 case TARGET_SO_BSDCOMPAT:
1403 optname = SO_BSDCOMPAT;
1404 break;
1405 #endif
1406 case TARGET_SO_PASSCRED:
1407 optname = SO_PASSCRED;
1408 break;
1409 case TARGET_SO_TIMESTAMP:
1410 optname = SO_TIMESTAMP;
1411 break;
1412 case TARGET_SO_RCVLOWAT:
1413 optname = SO_RCVLOWAT;
1414 break;
1415 break;
1416 default:
1417 goto unimplemented;
1418 }
1419 if (optlen < sizeof(uint32_t))
1420 return -TARGET_EINVAL;
1421
1422 if (get_user_u32(val, optval_addr))
1423 return -TARGET_EFAULT;
1424 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1425 break;
1426 default:
1427 unimplemented:
1428 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1429 ret = -TARGET_ENOPROTOOPT;
1430 }
1431 return ret;
1432 }
1433
1434 /* do_getsockopt() Must return target values and target errnos. */
1435 static abi_long do_getsockopt(int sockfd, int level, int optname,
1436 abi_ulong optval_addr, abi_ulong optlen)
1437 {
1438 abi_long ret;
1439 int len, val;
1440 socklen_t lv;
1441
1442 switch(level) {
1443 case TARGET_SOL_SOCKET:
1444 level = SOL_SOCKET;
1445 switch (optname) {
1446 /* These don't just return a single integer */
1447 case TARGET_SO_LINGER:
1448 case TARGET_SO_RCVTIMEO:
1449 case TARGET_SO_SNDTIMEO:
1450 case TARGET_SO_PEERNAME:
1451 goto unimplemented;
1452 case TARGET_SO_PEERCRED: {
1453 struct ucred cr;
1454 socklen_t crlen;
1455 struct target_ucred *tcr;
1456
1457 if (get_user_u32(len, optlen)) {
1458 return -TARGET_EFAULT;
1459 }
1460 if (len < 0) {
1461 return -TARGET_EINVAL;
1462 }
1463
1464 crlen = sizeof(cr);
1465 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1466 &cr, &crlen));
1467 if (ret < 0) {
1468 return ret;
1469 }
1470 if (len > crlen) {
1471 len = crlen;
1472 }
1473 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1474 return -TARGET_EFAULT;
1475 }
1476 __put_user(cr.pid, &tcr->pid);
1477 __put_user(cr.uid, &tcr->uid);
1478 __put_user(cr.gid, &tcr->gid);
1479 unlock_user_struct(tcr, optval_addr, 1);
1480 if (put_user_u32(len, optlen)) {
1481 return -TARGET_EFAULT;
1482 }
1483 break;
1484 }
1485 /* Options with 'int' argument. */
1486 case TARGET_SO_DEBUG:
1487 optname = SO_DEBUG;
1488 goto int_case;
1489 case TARGET_SO_REUSEADDR:
1490 optname = SO_REUSEADDR;
1491 goto int_case;
1492 case TARGET_SO_TYPE:
1493 optname = SO_TYPE;
1494 goto int_case;
1495 case TARGET_SO_ERROR:
1496 optname = SO_ERROR;
1497 goto int_case;
1498 case TARGET_SO_DONTROUTE:
1499 optname = SO_DONTROUTE;
1500 goto int_case;
1501 case TARGET_SO_BROADCAST:
1502 optname = SO_BROADCAST;
1503 goto int_case;
1504 case TARGET_SO_SNDBUF:
1505 optname = SO_SNDBUF;
1506 goto int_case;
1507 case TARGET_SO_RCVBUF:
1508 optname = SO_RCVBUF;
1509 goto int_case;
1510 case TARGET_SO_KEEPALIVE:
1511 optname = SO_KEEPALIVE;
1512 goto int_case;
1513 case TARGET_SO_OOBINLINE:
1514 optname = SO_OOBINLINE;
1515 goto int_case;
1516 case TARGET_SO_NO_CHECK:
1517 optname = SO_NO_CHECK;
1518 goto int_case;
1519 case TARGET_SO_PRIORITY:
1520 optname = SO_PRIORITY;
1521 goto int_case;
1522 #ifdef SO_BSDCOMPAT
1523 case TARGET_SO_BSDCOMPAT:
1524 optname = SO_BSDCOMPAT;
1525 goto int_case;
1526 #endif
1527 case TARGET_SO_PASSCRED:
1528 optname = SO_PASSCRED;
1529 goto int_case;
1530 case TARGET_SO_TIMESTAMP:
1531 optname = SO_TIMESTAMP;
1532 goto int_case;
1533 case TARGET_SO_RCVLOWAT:
1534 optname = SO_RCVLOWAT;
1535 goto int_case;
1536 default:
1537 goto int_case;
1538 }
1539 break;
1540 case SOL_TCP:
1541 /* TCP options all take an 'int' value. */
1542 int_case:
1543 if (get_user_u32(len, optlen))
1544 return -TARGET_EFAULT;
1545 if (len < 0)
1546 return -TARGET_EINVAL;
1547 lv = sizeof(lv);
1548 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1549 if (ret < 0)
1550 return ret;
1551 if (len > lv)
1552 len = lv;
1553 if (len == 4) {
1554 if (put_user_u32(val, optval_addr))
1555 return -TARGET_EFAULT;
1556 } else {
1557 if (put_user_u8(val, optval_addr))
1558 return -TARGET_EFAULT;
1559 }
1560 if (put_user_u32(len, optlen))
1561 return -TARGET_EFAULT;
1562 break;
1563 case SOL_IP:
1564 switch(optname) {
1565 case IP_TOS:
1566 case IP_TTL:
1567 case IP_HDRINCL:
1568 case IP_ROUTER_ALERT:
1569 case IP_RECVOPTS:
1570 case IP_RETOPTS:
1571 case IP_PKTINFO:
1572 case IP_MTU_DISCOVER:
1573 case IP_RECVERR:
1574 case IP_RECVTOS:
1575 #ifdef IP_FREEBIND
1576 case IP_FREEBIND:
1577 #endif
1578 case IP_MULTICAST_TTL:
1579 case IP_MULTICAST_LOOP:
1580 if (get_user_u32(len, optlen))
1581 return -TARGET_EFAULT;
1582 if (len < 0)
1583 return -TARGET_EINVAL;
1584 lv = sizeof(lv);
1585 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1586 if (ret < 0)
1587 return ret;
1588 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1589 len = 1;
1590 if (put_user_u32(len, optlen)
1591 || put_user_u8(val, optval_addr))
1592 return -TARGET_EFAULT;
1593 } else {
1594 if (len > sizeof(int))
1595 len = sizeof(int);
1596 if (put_user_u32(len, optlen)
1597 || put_user_u32(val, optval_addr))
1598 return -TARGET_EFAULT;
1599 }
1600 break;
1601 default:
1602 ret = -TARGET_ENOPROTOOPT;
1603 break;
1604 }
1605 break;
1606 default:
1607 unimplemented:
1608 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1609 level, optname);
1610 ret = -TARGET_EOPNOTSUPP;
1611 break;
1612 }
1613 return ret;
1614 }
1615
1616 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1617 int count, int copy)
1618 {
1619 struct target_iovec *target_vec;
1620 struct iovec *vec;
1621 abi_ulong total_len, max_len;
1622 int i;
1623
1624 if (count == 0) {
1625 errno = 0;
1626 return NULL;
1627 }
1628 if (count < 0 || count > IOV_MAX) {
1629 errno = EINVAL;
1630 return NULL;
1631 }
1632
1633 vec = calloc(count, sizeof(struct iovec));
1634 if (vec == NULL) {
1635 errno = ENOMEM;
1636 return NULL;
1637 }
1638
1639 target_vec = lock_user(VERIFY_READ, target_addr,
1640 count * sizeof(struct target_iovec), 1);
1641 if (target_vec == NULL) {
1642 errno = EFAULT;
1643 goto fail2;
1644 }
1645
1646 /* ??? If host page size > target page size, this will result in a
1647 value larger than what we can actually support. */
1648 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1649 total_len = 0;
1650
1651 for (i = 0; i < count; i++) {
1652 abi_ulong base = tswapal(target_vec[i].iov_base);
1653 abi_long len = tswapal(target_vec[i].iov_len);
1654
1655 if (len < 0) {
1656 errno = EINVAL;
1657 goto fail;
1658 } else if (len == 0) {
1659 /* Zero length pointer is ignored. */
1660 vec[i].iov_base = 0;
1661 } else {
1662 vec[i].iov_base = lock_user(type, base, len, copy);
1663 if (!vec[i].iov_base) {
1664 errno = EFAULT;
1665 goto fail;
1666 }
1667 if (len > max_len - total_len) {
1668 len = max_len - total_len;
1669 }
1670 }
1671 vec[i].iov_len = len;
1672 total_len += len;
1673 }
1674
1675 unlock_user(target_vec, target_addr, 0);
1676 return vec;
1677
1678 fail:
1679 free(vec);
1680 fail2:
1681 unlock_user(target_vec, target_addr, 0);
1682 return NULL;
1683 }
1684
1685 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1686 int count, int copy)
1687 {
1688 struct target_iovec *target_vec;
1689 int i;
1690
1691 target_vec = lock_user(VERIFY_READ, target_addr,
1692 count * sizeof(struct target_iovec), 1);
1693 if (target_vec) {
1694 for (i = 0; i < count; i++) {
1695 abi_ulong base = tswapal(target_vec[i].iov_base);
1696 abi_long len = tswapal(target_vec[i].iov_base);
1697 if (len < 0) {
1698 break;
1699 }
1700 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1701 }
1702 unlock_user(target_vec, target_addr, 0);
1703 }
1704
1705 free(vec);
1706 }
1707
1708 /* do_socket() Must return target values and target errnos. */
1709 static abi_long do_socket(int domain, int type, int protocol)
1710 {
1711 #if defined(TARGET_MIPS)
1712 switch(type) {
1713 case TARGET_SOCK_DGRAM:
1714 type = SOCK_DGRAM;
1715 break;
1716 case TARGET_SOCK_STREAM:
1717 type = SOCK_STREAM;
1718 break;
1719 case TARGET_SOCK_RAW:
1720 type = SOCK_RAW;
1721 break;
1722 case TARGET_SOCK_RDM:
1723 type = SOCK_RDM;
1724 break;
1725 case TARGET_SOCK_SEQPACKET:
1726 type = SOCK_SEQPACKET;
1727 break;
1728 case TARGET_SOCK_PACKET:
1729 type = SOCK_PACKET;
1730 break;
1731 }
1732 #endif
1733 if (domain == PF_NETLINK)
1734 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1735 return get_errno(socket(domain, type, protocol));
1736 }
1737
1738 /* do_bind() Must return target values and target errnos. */
1739 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1740 socklen_t addrlen)
1741 {
1742 void *addr;
1743 abi_long ret;
1744
1745 if ((int)addrlen < 0) {
1746 return -TARGET_EINVAL;
1747 }
1748
1749 addr = alloca(addrlen+1);
1750
1751 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1752 if (ret)
1753 return ret;
1754
1755 return get_errno(bind(sockfd, addr, addrlen));
1756 }
1757
1758 /* do_connect() Must return target values and target errnos. */
1759 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1760 socklen_t addrlen)
1761 {
1762 void *addr;
1763 abi_long ret;
1764
1765 if ((int)addrlen < 0) {
1766 return -TARGET_EINVAL;
1767 }
1768
1769 addr = alloca(addrlen);
1770
1771 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1772 if (ret)
1773 return ret;
1774
1775 return get_errno(connect(sockfd, addr, addrlen));
1776 }
1777
1778 /* do_sendrecvmsg() Must return target values and target errnos. */
1779 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1780 int flags, int send)
1781 {
1782 abi_long ret, len;
1783 struct target_msghdr *msgp;
1784 struct msghdr msg;
1785 int count;
1786 struct iovec *vec;
1787 abi_ulong target_vec;
1788
1789 /* FIXME */
1790 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1791 msgp,
1792 target_msg,
1793 send ? 1 : 0))
1794 return -TARGET_EFAULT;
1795 if (msgp->msg_name) {
1796 msg.msg_namelen = tswap32(msgp->msg_namelen);
1797 msg.msg_name = alloca(msg.msg_namelen);
1798 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1799 msg.msg_namelen);
1800 if (ret) {
1801 goto out2;
1802 }
1803 } else {
1804 msg.msg_name = NULL;
1805 msg.msg_namelen = 0;
1806 }
1807 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1808 msg.msg_control = alloca(msg.msg_controllen);
1809 msg.msg_flags = tswap32(msgp->msg_flags);
1810
1811 count = tswapal(msgp->msg_iovlen);
1812 target_vec = tswapal(msgp->msg_iov);
1813 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1814 target_vec, count, send);
1815 if (vec == NULL) {
1816 ret = -host_to_target_errno(errno);
1817 goto out2;
1818 }
1819 msg.msg_iovlen = count;
1820 msg.msg_iov = vec;
1821
1822 if (send) {
1823 ret = target_to_host_cmsg(&msg, msgp);
1824 if (ret == 0)
1825 ret = get_errno(sendmsg(fd, &msg, flags));
1826 } else {
1827 ret = get_errno(recvmsg(fd, &msg, flags));
1828 if (!is_error(ret)) {
1829 len = ret;
1830 ret = host_to_target_cmsg(msgp, &msg);
1831 if (!is_error(ret)) {
1832 msgp->msg_namelen = tswap32(msg.msg_namelen);
1833 if (msg.msg_name != NULL) {
1834 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1835 msg.msg_name, msg.msg_namelen);
1836 if (ret) {
1837 goto out;
1838 }
1839 }
1840
1841 ret = len;
1842 }
1843 }
1844 }
1845
1846 out:
1847 unlock_iovec(vec, target_vec, count, !send);
1848 out2:
1849 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1850 return ret;
1851 }
1852
1853 /* If we don't have a system accept4() then just call accept.
1854 * The callsites to do_accept4() will ensure that they don't
1855 * pass a non-zero flags argument in this config.
1856 */
1857 #ifndef CONFIG_ACCEPT4
1858 static inline int accept4(int sockfd, struct sockaddr *addr,
1859 socklen_t *addrlen, int flags)
1860 {
1861 assert(flags == 0);
1862 return accept(sockfd, addr, addrlen);
1863 }
1864 #endif
1865
1866 /* do_accept4() Must return target values and target errnos. */
1867 static abi_long do_accept4(int fd, abi_ulong target_addr,
1868 abi_ulong target_addrlen_addr, int flags)
1869 {
1870 socklen_t addrlen;
1871 void *addr;
1872 abi_long ret;
1873
1874 if (target_addr == 0) {
1875 return get_errno(accept4(fd, NULL, NULL, flags));
1876 }
1877
1878 /* linux returns EINVAL if addrlen pointer is invalid */
1879 if (get_user_u32(addrlen, target_addrlen_addr))
1880 return -TARGET_EINVAL;
1881
1882 if ((int)addrlen < 0) {
1883 return -TARGET_EINVAL;
1884 }
1885
1886 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1887 return -TARGET_EINVAL;
1888
1889 addr = alloca(addrlen);
1890
1891 ret = get_errno(accept4(fd, addr, &addrlen, flags));
1892 if (!is_error(ret)) {
1893 host_to_target_sockaddr(target_addr, addr, addrlen);
1894 if (put_user_u32(addrlen, target_addrlen_addr))
1895 ret = -TARGET_EFAULT;
1896 }
1897 return ret;
1898 }
1899
1900 /* do_getpeername() Must return target values and target errnos. */
1901 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1902 abi_ulong target_addrlen_addr)
1903 {
1904 socklen_t addrlen;
1905 void *addr;
1906 abi_long ret;
1907
1908 if (get_user_u32(addrlen, target_addrlen_addr))
1909 return -TARGET_EFAULT;
1910
1911 if ((int)addrlen < 0) {
1912 return -TARGET_EINVAL;
1913 }
1914
1915 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1916 return -TARGET_EFAULT;
1917
1918 addr = alloca(addrlen);
1919
1920 ret = get_errno(getpeername(fd, addr, &addrlen));
1921 if (!is_error(ret)) {
1922 host_to_target_sockaddr(target_addr, addr, addrlen);
1923 if (put_user_u32(addrlen, target_addrlen_addr))
1924 ret = -TARGET_EFAULT;
1925 }
1926 return ret;
1927 }
1928
1929 /* do_getsockname() Must return target values and target errnos. */
1930 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1931 abi_ulong target_addrlen_addr)
1932 {
1933 socklen_t addrlen;
1934 void *addr;
1935 abi_long ret;
1936
1937 if (get_user_u32(addrlen, target_addrlen_addr))
1938 return -TARGET_EFAULT;
1939
1940 if ((int)addrlen < 0) {
1941 return -TARGET_EINVAL;
1942 }
1943
1944 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1945 return -TARGET_EFAULT;
1946
1947 addr = alloca(addrlen);
1948
1949 ret = get_errno(getsockname(fd, addr, &addrlen));
1950 if (!is_error(ret)) {
1951 host_to_target_sockaddr(target_addr, addr, addrlen);
1952 if (put_user_u32(addrlen, target_addrlen_addr))
1953 ret = -TARGET_EFAULT;
1954 }
1955 return ret;
1956 }
1957
1958 /* do_socketpair() Must return target values and target errnos. */
1959 static abi_long do_socketpair(int domain, int type, int protocol,
1960 abi_ulong target_tab_addr)
1961 {
1962 int tab[2];
1963 abi_long ret;
1964
1965 ret = get_errno(socketpair(domain, type, protocol, tab));
1966 if (!is_error(ret)) {
1967 if (put_user_s32(tab[0], target_tab_addr)
1968 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1969 ret = -TARGET_EFAULT;
1970 }
1971 return ret;
1972 }
1973
1974 /* do_sendto() Must return target values and target errnos. */
1975 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1976 abi_ulong target_addr, socklen_t addrlen)
1977 {
1978 void *addr;
1979 void *host_msg;
1980 abi_long ret;
1981
1982 if ((int)addrlen < 0) {
1983 return -TARGET_EINVAL;
1984 }
1985
1986 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1987 if (!host_msg)
1988 return -TARGET_EFAULT;
1989 if (target_addr) {
1990 addr = alloca(addrlen);
1991 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1992 if (ret) {
1993 unlock_user(host_msg, msg, 0);
1994 return ret;
1995 }
1996 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1997 } else {
1998 ret = get_errno(send(fd, host_msg, len, flags));
1999 }
2000 unlock_user(host_msg, msg, 0);
2001 return ret;
2002 }
2003
2004 /* do_recvfrom() Must return target values and target errnos. */
2005 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2006 abi_ulong target_addr,
2007 abi_ulong target_addrlen)
2008 {
2009 socklen_t addrlen;
2010 void *addr;
2011 void *host_msg;
2012 abi_long ret;
2013
2014 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2015 if (!host_msg)
2016 return -TARGET_EFAULT;
2017 if (target_addr) {
2018 if (get_user_u32(addrlen, target_addrlen)) {
2019 ret = -TARGET_EFAULT;
2020 goto fail;
2021 }
2022 if ((int)addrlen < 0) {
2023 ret = -TARGET_EINVAL;
2024 goto fail;
2025 }
2026 addr = alloca(addrlen);
2027 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2028 } else {
2029 addr = NULL; /* To keep compiler quiet. */
2030 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2031 }
2032 if (!is_error(ret)) {
2033 if (target_addr) {
2034 host_to_target_sockaddr(target_addr, addr, addrlen);
2035 if (put_user_u32(addrlen, target_addrlen)) {
2036 ret = -TARGET_EFAULT;
2037 goto fail;
2038 }
2039 }
2040 unlock_user(host_msg, msg, len);
2041 } else {
2042 fail:
2043 unlock_user(host_msg, msg, 0);
2044 }
2045 return ret;
2046 }
2047
2048 #ifdef TARGET_NR_socketcall
2049 /* do_socketcall() Must return target values and target errnos. */
2050 static abi_long do_socketcall(int num, abi_ulong vptr)
2051 {
2052 abi_long ret;
2053 const int n = sizeof(abi_ulong);
2054
2055 switch(num) {
2056 case SOCKOP_socket:
2057 {
2058 abi_ulong domain, type, protocol;
2059
2060 if (get_user_ual(domain, vptr)
2061 || get_user_ual(type, vptr + n)
2062 || get_user_ual(protocol, vptr + 2 * n))
2063 return -TARGET_EFAULT;
2064
2065 ret = do_socket(domain, type, protocol);
2066 }
2067 break;
2068 case SOCKOP_bind:
2069 {
2070 abi_ulong sockfd;
2071 abi_ulong target_addr;
2072 socklen_t addrlen;
2073
2074 if (get_user_ual(sockfd, vptr)
2075 || get_user_ual(target_addr, vptr + n)
2076 || get_user_ual(addrlen, vptr + 2 * n))
2077 return -TARGET_EFAULT;
2078
2079 ret = do_bind(sockfd, target_addr, addrlen);
2080 }
2081 break;
2082 case SOCKOP_connect:
2083 {
2084 abi_ulong sockfd;
2085 abi_ulong target_addr;
2086 socklen_t addrlen;
2087
2088 if (get_user_ual(sockfd, vptr)
2089 || get_user_ual(target_addr, vptr + n)
2090 || get_user_ual(addrlen, vptr + 2 * n))
2091 return -TARGET_EFAULT;
2092
2093 ret = do_connect(sockfd, target_addr, addrlen);
2094 }
2095 break;
2096 case SOCKOP_listen:
2097 {
2098 abi_ulong sockfd, backlog;
2099
2100 if (get_user_ual(sockfd, vptr)
2101 || get_user_ual(backlog, vptr + n))
2102 return -TARGET_EFAULT;
2103
2104 ret = get_errno(listen(sockfd, backlog));
2105 }
2106 break;
2107 case SOCKOP_accept:
2108 {
2109 abi_ulong sockfd;
2110 abi_ulong target_addr, target_addrlen;
2111
2112 if (get_user_ual(sockfd, vptr)
2113 || get_user_ual(target_addr, vptr + n)
2114 || get_user_ual(target_addrlen, vptr + 2 * n))
2115 return -TARGET_EFAULT;
2116
2117 ret = do_accept4(sockfd, target_addr, target_addrlen, 0);
2118 }
2119 break;
2120 case SOCKOP_getsockname:
2121 {
2122 abi_ulong sockfd;
2123 abi_ulong target_addr, target_addrlen;
2124
2125 if (get_user_ual(sockfd, vptr)
2126 || get_user_ual(target_addr, vptr + n)
2127 || get_user_ual(target_addrlen, vptr + 2 * n))
2128 return -TARGET_EFAULT;
2129
2130 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2131 }
2132 break;
2133 case SOCKOP_getpeername:
2134 {
2135 abi_ulong sockfd;
2136 abi_ulong target_addr, target_addrlen;
2137
2138 if (get_user_ual(sockfd, vptr)
2139 || get_user_ual(target_addr, vptr + n)
2140 || get_user_ual(target_addrlen, vptr + 2 * n))
2141 return -TARGET_EFAULT;
2142
2143 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2144 }
2145 break;
2146 case SOCKOP_socketpair:
2147 {
2148 abi_ulong domain, type, protocol;
2149 abi_ulong tab;
2150
2151 if (get_user_ual(domain, vptr)
2152 || get_user_ual(type, vptr + n)
2153 || get_user_ual(protocol, vptr + 2 * n)
2154 || get_user_ual(tab, vptr + 3 * n))
2155 return -TARGET_EFAULT;
2156
2157 ret = do_socketpair(domain, type, protocol, tab);
2158 }
2159 break;
2160 case SOCKOP_send:
2161 {
2162 abi_ulong sockfd;
2163 abi_ulong msg;
2164 size_t len;
2165 abi_ulong flags;
2166
2167 if (get_user_ual(sockfd, vptr)
2168 || get_user_ual(msg, vptr + n)
2169 || get_user_ual(len, vptr + 2 * n)
2170 || get_user_ual(flags, vptr + 3 * n))
2171 return -TARGET_EFAULT;
2172
2173 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2174 }
2175 break;
2176 case SOCKOP_recv:
2177 {
2178 abi_ulong sockfd;
2179 abi_ulong msg;
2180 size_t len;
2181 abi_ulong flags;
2182
2183 if (get_user_ual(sockfd, vptr)
2184 || get_user_ual(msg, vptr + n)
2185 || get_user_ual(len, vptr + 2 * n)
2186 || get_user_ual(flags, vptr + 3 * n))
2187 return -TARGET_EFAULT;
2188
2189 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2190 }
2191 break;
2192 case SOCKOP_sendto:
2193 {
2194 abi_ulong sockfd;
2195 abi_ulong msg;
2196 size_t len;
2197 abi_ulong flags;
2198 abi_ulong addr;
2199 socklen_t addrlen;
2200
2201 if (get_user_ual(sockfd, vptr)
2202 || get_user_ual(msg, vptr + n)
2203 || get_user_ual(len, vptr + 2 * n)
2204 || get_user_ual(flags, vptr + 3 * n)
2205 || get_user_ual(addr, vptr + 4 * n)
2206 || get_user_ual(addrlen, vptr + 5 * n))
2207 return -TARGET_EFAULT;
2208
2209 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2210 }
2211 break;
2212 case SOCKOP_recvfrom:
2213 {
2214 abi_ulong sockfd;
2215 abi_ulong msg;
2216 size_t len;
2217 abi_ulong flags;
2218 abi_ulong addr;
2219 socklen_t addrlen;
2220
2221 if (get_user_ual(sockfd, vptr)
2222 || get_user_ual(msg, vptr + n)
2223 || get_user_ual(len, vptr + 2 * n)
2224 || get_user_ual(flags, vptr + 3 * n)
2225 || get_user_ual(addr, vptr + 4 * n)
2226 || get_user_ual(addrlen, vptr + 5 * n))
2227 return -TARGET_EFAULT;
2228
2229 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2230 }
2231 break;
2232 case SOCKOP_shutdown:
2233 {
2234 abi_ulong sockfd, how;
2235
2236 if (get_user_ual(sockfd, vptr)
2237 || get_user_ual(how, vptr + n))
2238 return -TARGET_EFAULT;
2239
2240 ret = get_errno(shutdown(sockfd, how));
2241 }
2242 break;
2243 case SOCKOP_sendmsg:
2244 case SOCKOP_recvmsg:
2245 {
2246 abi_ulong fd;
2247 abi_ulong target_msg;
2248 abi_ulong flags;
2249
2250 if (get_user_ual(fd, vptr)
2251 || get_user_ual(target_msg, vptr + n)
2252 || get_user_ual(flags, vptr + 2 * n))
2253 return -TARGET_EFAULT;
2254
2255 ret = do_sendrecvmsg(fd, target_msg, flags,
2256 (num == SOCKOP_sendmsg));
2257 }
2258 break;
2259 case SOCKOP_setsockopt:
2260 {
2261 abi_ulong sockfd;
2262 abi_ulong level;
2263 abi_ulong optname;
2264 abi_ulong optval;
2265 socklen_t optlen;
2266
2267 if (get_user_ual(sockfd, vptr)
2268 || get_user_ual(level, vptr + n)
2269 || get_user_ual(optname, vptr + 2 * n)
2270 || get_user_ual(optval, vptr + 3 * n)
2271 || get_user_ual(optlen, vptr + 4 * n))
2272 return -TARGET_EFAULT;
2273
2274 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2275 }
2276 break;
2277 case SOCKOP_getsockopt:
2278 {
2279 abi_ulong sockfd;
2280 abi_ulong level;
2281 abi_ulong optname;
2282 abi_ulong optval;
2283 socklen_t optlen;
2284
2285 if (get_user_ual(sockfd, vptr)
2286 || get_user_ual(level, vptr + n)
2287 || get_user_ual(optname, vptr + 2 * n)
2288 || get_user_ual(optval, vptr + 3 * n)
2289 || get_user_ual(optlen, vptr + 4 * n))
2290 return -TARGET_EFAULT;
2291
2292 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2293 }
2294 break;
2295 default:
2296 gemu_log("Unsupported socketcall: %d\n", num);
2297 ret = -TARGET_ENOSYS;
2298 break;
2299 }
2300 return ret;
2301 }
2302 #endif
2303
2304 #define N_SHM_REGIONS 32
2305
2306 static struct shm_region {
2307 abi_ulong start;
2308 abi_ulong size;
2309 } shm_regions[N_SHM_REGIONS];
2310
2311 struct target_ipc_perm
2312 {
2313 abi_long __key;
2314 abi_ulong uid;
2315 abi_ulong gid;
2316 abi_ulong cuid;
2317 abi_ulong cgid;
2318 unsigned short int mode;
2319 unsigned short int __pad1;
2320 unsigned short int __seq;
2321 unsigned short int __pad2;
2322 abi_ulong __unused1;
2323 abi_ulong __unused2;
2324 };
2325
2326 struct target_semid_ds
2327 {
2328 struct target_ipc_perm sem_perm;
2329 abi_ulong sem_otime;
2330 abi_ulong __unused1;
2331 abi_ulong sem_ctime;
2332 abi_ulong __unused2;
2333 abi_ulong sem_nsems;
2334 abi_ulong __unused3;
2335 abi_ulong __unused4;
2336 };
2337
2338 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2339 abi_ulong target_addr)
2340 {
2341 struct target_ipc_perm *target_ip;
2342 struct target_semid_ds *target_sd;
2343
2344 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2345 return -TARGET_EFAULT;
2346 target_ip = &(target_sd->sem_perm);
2347 host_ip->__key = tswapal(target_ip->__key);
2348 host_ip->uid = tswapal(target_ip->uid);
2349 host_ip->gid = tswapal(target_ip->gid);
2350 host_ip->cuid = tswapal(target_ip->cuid);
2351 host_ip->cgid = tswapal(target_ip->cgid);
2352 host_ip->mode = tswap16(target_ip->mode);
2353 unlock_user_struct(target_sd, target_addr, 0);
2354 return 0;
2355 }
2356
2357 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2358 struct ipc_perm *host_ip)
2359 {
2360 struct target_ipc_perm *target_ip;
2361 struct target_semid_ds *target_sd;
2362
2363 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2364 return -TARGET_EFAULT;
2365 target_ip = &(target_sd->sem_perm);
2366 target_ip->__key = tswapal(host_ip->__key);
2367 target_ip->uid = tswapal(host_ip->uid);
2368 target_ip->gid = tswapal(host_ip->gid);
2369 target_ip->cuid = tswapal(host_ip->cuid);
2370 target_ip->cgid = tswapal(host_ip->cgid);
2371 target_ip->mode = tswap16(host_ip->mode);
2372 unlock_user_struct(target_sd, target_addr, 1);
2373 return 0;
2374 }
2375
2376 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2377 abi_ulong target_addr)
2378 {
2379 struct target_semid_ds *target_sd;
2380
2381 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2382 return -TARGET_EFAULT;
2383 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2384 return -TARGET_EFAULT;
2385 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2386 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2387 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2388 unlock_user_struct(target_sd, target_addr, 0);
2389 return 0;
2390 }
2391
2392 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2393 struct semid_ds *host_sd)
2394 {
2395 struct target_semid_ds *target_sd;
2396
2397 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2398 return -TARGET_EFAULT;
2399 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2400 return -TARGET_EFAULT;
2401 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2402 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2403 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2404 unlock_user_struct(target_sd, target_addr, 1);
2405 return 0;
2406 }
2407
2408 struct target_seminfo {
2409 int semmap;
2410 int semmni;
2411 int semmns;
2412 int semmnu;
2413 int semmsl;
2414 int semopm;
2415 int semume;
2416 int semusz;
2417 int semvmx;
2418 int semaem;
2419 };
2420
2421 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2422 struct seminfo *host_seminfo)
2423 {
2424 struct target_seminfo *target_seminfo;
2425 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2426 return -TARGET_EFAULT;
2427 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2428 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2429 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2430 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2431 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2432 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2433 __put_user(host_seminfo->semume, &target_seminfo->semume);
2434 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2435 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2436 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2437 unlock_user_struct(target_seminfo, target_addr, 1);
2438 return 0;
2439 }
2440
2441 union semun {
2442 int val;
2443 struct semid_ds *buf;
2444 unsigned short *array;
2445 struct seminfo *__buf;
2446 };
2447
2448 union target_semun {
2449 int val;
2450 abi_ulong buf;
2451 abi_ulong array;
2452 abi_ulong __buf;
2453 };
2454
2455 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2456 abi_ulong target_addr)
2457 {
2458 int nsems;
2459 unsigned short *array;
2460 union semun semun;
2461 struct semid_ds semid_ds;
2462 int i, ret;
2463
2464 semun.buf = &semid_ds;
2465
2466 ret = semctl(semid, 0, IPC_STAT, semun);
2467 if (ret == -1)
2468 return get_errno(ret);
2469
2470 nsems = semid_ds.sem_nsems;
2471
2472 *host_array = malloc(nsems*sizeof(unsigned short));
2473 array = lock_user(VERIFY_READ, target_addr,
2474 nsems*sizeof(unsigned short), 1);
2475 if (!array)
2476 return -TARGET_EFAULT;
2477
2478 for(i=0; i<nsems; i++) {
2479 __get_user((*host_array)[i], &array[i]);
2480 }
2481 unlock_user(array, target_addr, 0);
2482
2483 return 0;
2484 }
2485
2486 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2487 unsigned short **host_array)
2488 {
2489 int nsems;
2490 unsigned short *array;
2491 union semun semun;
2492 struct semid_ds semid_ds;
2493 int i, ret;
2494
2495 semun.buf = &semid_ds;
2496
2497 ret = semctl(semid, 0, IPC_STAT, semun);
2498 if (ret == -1)
2499 return get_errno(ret);
2500
2501 nsems = semid_ds.sem_nsems;
2502
2503 array = lock_user(VERIFY_WRITE, target_addr,
2504 nsems*sizeof(unsigned short), 0);
2505 if (!array)
2506 return -TARGET_EFAULT;
2507
2508 for(i=0; i<nsems; i++) {
2509 __put_user((*host_array)[i], &array[i]);
2510 }
2511 free(*host_array);
2512 unlock_user(array, target_addr, 1);
2513
2514 return 0;
2515 }
2516
2517 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2518 union target_semun target_su)
2519 {
2520 union semun arg;
2521 struct semid_ds dsarg;
2522 unsigned short *array = NULL;
2523 struct seminfo seminfo;
2524 abi_long ret = -TARGET_EINVAL;
2525 abi_long err;
2526 cmd &= 0xff;
2527
2528 switch( cmd ) {
2529 case GETVAL:
2530 case SETVAL:
2531 arg.val = tswap32(target_su.val);
2532 ret = get_errno(semctl(semid, semnum, cmd, arg));
2533 target_su.val = tswap32(arg.val);
2534 break;
2535 case GETALL:
2536 case SETALL:
2537 err = target_to_host_semarray(semid, &array, target_su.array);
2538 if (err)
2539 return err;
2540 arg.array = array;
2541 ret = get_errno(semctl(semid, semnum, cmd, arg));
2542 err = host_to_target_semarray(semid, target_su.array, &array);
2543 if (err)
2544 return err;
2545 break;
2546 case IPC_STAT:
2547 case IPC_SET:
2548 case SEM_STAT:
2549 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2550 if (err)
2551 return err;
2552 arg.buf = &dsarg;
2553 ret = get_errno(semctl(semid, semnum, cmd, arg));
2554 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2555 if (err)
2556 return err;
2557 break;
2558 case IPC_INFO:
2559 case SEM_INFO:
2560 arg.__buf = &seminfo;
2561 ret = get_errno(semctl(semid, semnum, cmd, arg));
2562 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2563 if (err)
2564 return err;
2565 break;
2566 case IPC_RMID:
2567 case GETPID:
2568 case GETNCNT:
2569 case GETZCNT:
2570 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2571 break;
2572 }
2573
2574 return ret;
2575 }
2576
2577 struct target_sembuf {
2578 unsigned short sem_num;
2579 short sem_op;
2580 short sem_flg;
2581 };
2582
2583 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2584 abi_ulong target_addr,
2585 unsigned nsops)
2586 {
2587 struct target_sembuf *target_sembuf;
2588 int i;
2589
2590 target_sembuf = lock_user(VERIFY_READ, target_addr,
2591 nsops*sizeof(struct target_sembuf), 1);
2592 if (!target_sembuf)
2593 return -TARGET_EFAULT;
2594
2595 for(i=0; i<nsops; i++) {
2596 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2597 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2598 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2599 }
2600
2601 unlock_user(target_sembuf, target_addr, 0);
2602
2603 return 0;
2604 }
2605
2606 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2607 {
2608 struct sembuf sops[nsops];
2609
2610 if (target_to_host_sembuf(sops, ptr, nsops))
2611 return -TARGET_EFAULT;
2612
2613 return get_errno(semop(semid, sops, nsops));
2614 }
2615
2616 struct target_msqid_ds
2617 {
2618 struct target_ipc_perm msg_perm;
2619 abi_ulong msg_stime;
2620 #if TARGET_ABI_BITS == 32
2621 abi_ulong __unused1;
2622 #endif
2623 abi_ulong msg_rtime;
2624 #if TARGET_ABI_BITS == 32
2625 abi_ulong __unused2;
2626 #endif
2627 abi_ulong msg_ctime;
2628 #if TARGET_ABI_BITS == 32
2629 abi_ulong __unused3;
2630 #endif
2631 abi_ulong __msg_cbytes;
2632 abi_ulong msg_qnum;
2633 abi_ulong msg_qbytes;
2634 abi_ulong msg_lspid;
2635 abi_ulong msg_lrpid;
2636 abi_ulong __unused4;
2637 abi_ulong __unused5;
2638 };
2639
2640 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2641 abi_ulong target_addr)
2642 {
2643 struct target_msqid_ds *target_md;
2644
2645 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2646 return -TARGET_EFAULT;
2647 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2648 return -TARGET_EFAULT;
2649 host_md->msg_stime = tswapal(target_md->msg_stime);
2650 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2651 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2652 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2653 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2654 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2655 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2656 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2657 unlock_user_struct(target_md, target_addr, 0);
2658 return 0;
2659 }
2660
2661 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2662 struct msqid_ds *host_md)
2663 {
2664 struct target_msqid_ds *target_md;
2665
2666 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2667 return -TARGET_EFAULT;
2668 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2669 return -TARGET_EFAULT;
2670 target_md->msg_stime = tswapal(host_md->msg_stime);
2671 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2672 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2673 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2674 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2675 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2676 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2677 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2678 unlock_user_struct(target_md, target_addr, 1);
2679 return 0;
2680 }
2681
2682 struct target_msginfo {
2683 int msgpool;
2684 int msgmap;
2685 int msgmax;
2686 int msgmnb;
2687 int msgmni;
2688 int msgssz;
2689 int msgtql;
2690 unsigned short int msgseg;
2691 };
2692
2693 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2694 struct msginfo *host_msginfo)
2695 {
2696 struct target_msginfo *target_msginfo;
2697 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2698 return -TARGET_EFAULT;
2699 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2700 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2701 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2702 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2703 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2704 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2705 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2706 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2707 unlock_user_struct(target_msginfo, target_addr, 1);
2708 return 0;
2709 }
2710
2711 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2712 {
2713 struct msqid_ds dsarg;
2714 struct msginfo msginfo;
2715 abi_long ret = -TARGET_EINVAL;
2716
2717 cmd &= 0xff;
2718
2719 switch (cmd) {
2720 case IPC_STAT:
2721 case IPC_SET:
2722 case MSG_STAT:
2723 if (target_to_host_msqid_ds(&dsarg,ptr))
2724 return -TARGET_EFAULT;
2725 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2726 if (host_to_target_msqid_ds(ptr,&dsarg))
2727 return -TARGET_EFAULT;
2728 break;
2729 case IPC_RMID:
2730 ret = get_errno(msgctl(msgid, cmd, NULL));
2731 break;
2732 case IPC_INFO:
2733 case MSG_INFO:
2734 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2735 if (host_to_target_msginfo(ptr, &msginfo))
2736 return -TARGET_EFAULT;
2737 break;
2738 }
2739
2740 return ret;
2741 }
2742
2743 struct target_msgbuf {
2744 abi_long mtype;
2745 char mtext[1];
2746 };
2747
2748 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2749 unsigned int msgsz, int msgflg)
2750 {
2751 struct target_msgbuf *target_mb;
2752 struct msgbuf *host_mb;
2753 abi_long ret = 0;
2754
2755 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2756 return -TARGET_EFAULT;
2757 host_mb = malloc(msgsz+sizeof(long));
2758 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2759 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2760 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2761 free(host_mb);
2762 unlock_user_struct(target_mb, msgp, 0);
2763
2764 return ret;
2765 }
2766
2767 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2768 unsigned int msgsz, abi_long msgtyp,
2769 int msgflg)
2770 {
2771 struct target_msgbuf *target_mb;
2772 char *target_mtext;
2773 struct msgbuf *host_mb;
2774 abi_long ret = 0;
2775
2776 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2777 return -TARGET_EFAULT;
2778
2779 host_mb = g_malloc(msgsz+sizeof(long));
2780 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2781
2782 if (ret > 0) {
2783 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2784 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2785 if (!target_mtext) {
2786 ret = -TARGET_EFAULT;
2787 goto end;
2788 }
2789 memcpy(target_mb->mtext, host_mb->mtext, ret);
2790 unlock_user(target_mtext, target_mtext_addr, ret);
2791 }
2792
2793 target_mb->mtype = tswapal(host_mb->mtype);
2794
2795 end:
2796 if (target_mb)
2797 unlock_user_struct(target_mb, msgp, 1);
2798 g_free(host_mb);
2799 return ret;
2800 }
2801
2802 struct target_shmid_ds
2803 {
2804 struct target_ipc_perm shm_perm;
2805 abi_ulong shm_segsz;
2806 abi_ulong shm_atime;
2807 #if TARGET_ABI_BITS == 32
2808 abi_ulong __unused1;
2809 #endif
2810 abi_ulong shm_dtime;
2811 #if TARGET_ABI_BITS == 32
2812 abi_ulong __unused2;
2813 #endif
2814 abi_ulong shm_ctime;
2815 #if TARGET_ABI_BITS == 32
2816 abi_ulong __unused3;
2817 #endif
2818 int shm_cpid;
2819 int shm_lpid;
2820 abi_ulong shm_nattch;
2821 unsigned long int __unused4;
2822 unsigned long int __unused5;
2823 };
2824
2825 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2826 abi_ulong target_addr)
2827 {
2828 struct target_shmid_ds *target_sd;
2829
2830 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2831 return -TARGET_EFAULT;
2832 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2833 return -TARGET_EFAULT;
2834 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2835 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2836 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2837 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2838 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2839 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2840 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2841 unlock_user_struct(target_sd, target_addr, 0);
2842 return 0;
2843 }
2844
2845 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2846 struct shmid_ds *host_sd)
2847 {
2848 struct target_shmid_ds *target_sd;
2849
2850 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2851 return -TARGET_EFAULT;
2852 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2853 return -TARGET_EFAULT;
2854 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2855 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2856 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2857 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2858 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2859 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2860 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2861 unlock_user_struct(target_sd, target_addr, 1);
2862 return 0;
2863 }
2864
2865 struct target_shminfo {
2866 abi_ulong shmmax;
2867 abi_ulong shmmin;
2868 abi_ulong shmmni;
2869 abi_ulong shmseg;
2870 abi_ulong shmall;
2871 };
2872
2873 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2874 struct shminfo *host_shminfo)
2875 {
2876 struct target_shminfo *target_shminfo;
2877 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2878 return -TARGET_EFAULT;
2879 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2880 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2881 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2882 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2883 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2884 unlock_user_struct(target_shminfo, target_addr, 1);
2885 return 0;
2886 }
2887
2888 struct target_shm_info {
2889 int used_ids;
2890 abi_ulong shm_tot;
2891 abi_ulong shm_rss;
2892 abi_ulong shm_swp;
2893 abi_ulong swap_attempts;
2894 abi_ulong swap_successes;
2895 };
2896
2897 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2898 struct shm_info *host_shm_info)
2899 {
2900 struct target_shm_info *target_shm_info;
2901 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2902 return -TARGET_EFAULT;
2903 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2904 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2905 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2906 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2907 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2908 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2909 unlock_user_struct(target_shm_info, target_addr, 1);
2910 return 0;
2911 }
2912
2913 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2914 {
2915 struct shmid_ds dsarg;
2916 struct shminfo shminfo;
2917 struct shm_info shm_info;
2918 abi_long ret = -TARGET_EINVAL;
2919
2920 cmd &= 0xff;
2921
2922 switch(cmd) {
2923 case IPC_STAT:
2924 case IPC_SET:
2925 case SHM_STAT:
2926 if (target_to_host_shmid_ds(&dsarg, buf))
2927 return -TARGET_EFAULT;
2928 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2929 if (host_to_target_shmid_ds(buf, &dsarg))
2930 return -TARGET_EFAULT;
2931 break;
2932 case IPC_INFO:
2933 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2934 if (host_to_target_shminfo(buf, &shminfo))
2935 return -TARGET_EFAULT;
2936 break;
2937 case SHM_INFO:
2938 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2939 if (host_to_target_shm_info(buf, &shm_info))
2940 return -TARGET_EFAULT;
2941 break;
2942 case IPC_RMID:
2943 case SHM_LOCK:
2944 case SHM_UNLOCK:
2945 ret = get_errno(shmctl(shmid, cmd, NULL));
2946 break;
2947 }
2948
2949 return ret;
2950 }
2951
2952 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2953 {
2954 abi_long raddr;
2955 void *host_raddr;
2956 struct shmid_ds shm_info;
2957 int i,ret;
2958
2959 /* find out the length of the shared memory segment */
2960 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2961 if (is_error(ret)) {
2962 /* can't get length, bail out */
2963 return ret;
2964 }
2965
2966 mmap_lock();
2967
2968 if (shmaddr)
2969 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2970 else {
2971 abi_ulong mmap_start;
2972
2973 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2974
2975 if (mmap_start == -1) {
2976 errno = ENOMEM;
2977 host_raddr = (void *)-1;
2978 } else
2979 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2980 }
2981
2982 if (host_raddr == (void *)-1) {
2983 mmap_unlock();
2984 return get_errno((long)host_raddr);
2985 }
2986 raddr=h2g((unsigned long)host_raddr);
2987
2988 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2989 PAGE_VALID | PAGE_READ |
2990 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2991
2992 for (i = 0; i < N_SHM_REGIONS; i++) {
2993 if (shm_regions[i].start == 0) {
2994 shm_regions[i].start = raddr;
2995 shm_regions[i].size = shm_info.shm_segsz;
2996 break;
2997 }
2998 }
2999
3000 mmap_unlock();
3001 return raddr;
3002
3003 }
3004
3005 static inline abi_long do_shmdt(abi_ulong shmaddr)
3006 {
3007 int i;
3008
3009 for (i = 0; i < N_SHM_REGIONS; ++i) {
3010 if (shm_regions[i].start == shmaddr) {
3011 shm_regions[i].start = 0;
3012 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3013 break;
3014 }
3015 }
3016
3017 return get_errno(shmdt(g2h(shmaddr)));
3018 }
3019
3020 #ifdef TARGET_NR_ipc
3021 /* ??? This only works with linear mappings. */
3022 /* do_ipc() must return target values and target errnos. */
3023 static abi_long do_ipc(unsigned int call, int first,
3024 int second, int third,
3025 abi_long ptr, abi_long fifth)
3026 {
3027 int version;
3028 abi_long ret = 0;
3029
3030 version = call >> 16;
3031 call &= 0xffff;
3032
3033 switch (call) {
3034 case IPCOP_semop:
3035 ret = do_semop(first, ptr, second);
3036 break;
3037
3038 case IPCOP_semget:
3039 ret = get_errno(semget(first, second, third));
3040 break;
3041
3042 case IPCOP_semctl:
3043 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3044 break;
3045
3046 case IPCOP_msgget:
3047 ret = get_errno(msgget(first, second));
3048 break;
3049
3050 case IPCOP_msgsnd:
3051 ret = do_msgsnd(first, ptr, second, third);
3052 break;
3053
3054 case IPCOP_msgctl:
3055 ret = do_msgctl(first, second, ptr);
3056 break;
3057
3058 case IPCOP_msgrcv:
3059 switch (version) {
3060 case 0:
3061 {
3062 struct target_ipc_kludge {
3063 abi_long msgp;
3064 abi_long msgtyp;
3065 } *tmp;
3066
3067 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3068 ret = -TARGET_EFAULT;
3069 break;
3070 }
3071
3072 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3073
3074 unlock_user_struct(tmp, ptr, 0);
3075 break;
3076 }
3077 default:
3078 ret = do_msgrcv(first, ptr, second, fifth, third);
3079 }
3080 break;
3081
3082 case IPCOP_shmat:
3083 switch (version) {
3084 default:
3085 {
3086 abi_ulong raddr;
3087 raddr = do_shmat(first, ptr, second);
3088 if (is_error(raddr))
3089 return get_errno(raddr);
3090 if (put_user_ual(raddr, third))
3091 return -TARGET_EFAULT;
3092 break;
3093 }
3094 case 1:
3095 ret = -TARGET_EINVAL;
3096 break;
3097 }
3098 break;
3099 case IPCOP_shmdt:
3100 ret = do_shmdt(ptr);
3101 break;
3102
3103 case IPCOP_shmget:
3104 /* IPC_* flag values are the same on all linux platforms */
3105 ret = get_errno(shmget(first, second, third));
3106 break;
3107
3108 /* IPC_* and SHM_* command values are the same on all linux platforms */
3109 case IPCOP_shmctl:
3110 ret = do_shmctl(first, second, third);
3111 break;
3112 default:
3113 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3114 ret = -TARGET_ENOSYS;
3115 break;
3116 }
3117 return ret;
3118 }
3119 #endif
3120
3121 /* kernel structure types definitions */
3122
3123 #define STRUCT(name, ...) STRUCT_ ## name,
3124 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3125 enum {
3126 #include "syscall_types.h"
3127 };
3128 #undef STRUCT
3129 #undef STRUCT_SPECIAL
3130
3131 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3132 #define STRUCT_SPECIAL(name)
3133 #include "syscall_types.h"
3134 #undef STRUCT
3135 #undef STRUCT_SPECIAL
3136
3137 typedef struct IOCTLEntry IOCTLEntry;
3138
3139 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3140 int fd, abi_long cmd, abi_long arg);
3141
3142 struct IOCTLEntry {
3143 unsigned int target_cmd;
3144 unsigned int host_cmd;
3145 const char *name;
3146 int access;
3147 do_ioctl_fn *do_ioctl;
3148 const argtype arg_type[5];
3149 };
3150
3151 #define IOC_R 0x0001
3152 #define IOC_W 0x0002
3153 #define IOC_RW (IOC_R | IOC_W)
3154
3155 #define MAX_STRUCT_SIZE 4096
3156
3157 #ifdef CONFIG_FIEMAP
3158 /* So fiemap access checks don't overflow on 32 bit systems.
3159 * This is very slightly smaller than the limit imposed by
3160 * the underlying kernel.
3161 */
3162 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3163 / sizeof(struct fiemap_extent))
3164
3165 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3166 int fd, abi_long cmd, abi_long arg)
3167 {
3168 /* The parameter for this ioctl is a struct fiemap followed
3169 * by an array of struct fiemap_extent whose size is set
3170 * in fiemap->fm_extent_count. The array is filled in by the
3171 * ioctl.
3172 */
3173 int target_size_in, target_size_out;
3174 struct fiemap *fm;
3175 const argtype *arg_type = ie->arg_type;
3176 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3177 void *argptr, *p;
3178 abi_long ret;
3179 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3180 uint32_t outbufsz;
3181 int free_fm = 0;
3182
3183 assert(arg_type[0] == TYPE_PTR);
3184 assert(ie->access == IOC_RW);
3185 arg_type++;
3186 target_size_in = thunk_type_size(arg_type, 0);
3187 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3188 if (!argptr) {
3189 return -TARGET_EFAULT;
3190 }
3191 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3192 unlock_user(argptr, arg, 0);
3193 fm = (struct fiemap *)buf_temp;
3194 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3195 return -TARGET_EINVAL;
3196 }
3197
3198 outbufsz = sizeof (*fm) +
3199 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3200
3201 if (outbufsz > MAX_STRUCT_SIZE) {
3202 /* We can't fit all the extents into the fixed size buffer.
3203 * Allocate one that is large enough and use it instead.
3204 */
3205 fm = malloc(outbufsz);
3206 if (!fm) {
3207 return -TARGET_ENOMEM;
3208 }
3209 memcpy(fm, buf_temp, sizeof(struct fiemap));
3210 free_fm = 1;
3211 }
3212 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3213 if (!is_error(ret)) {
3214 target_size_out = target_size_in;
3215 /* An extent_count of 0 means we were only counting the extents
3216 * so there are no structs to copy
3217 */
3218 if (fm->fm_extent_count != 0) {
3219 target_size_out += fm->fm_mapped_extents * extent_size;
3220 }
3221 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3222 if (!argptr) {
3223 ret = -TARGET_EFAULT;
3224 } else {
3225 /* Convert the struct fiemap */
3226 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3227 if (fm->fm_extent_count != 0) {
3228 p = argptr + target_size_in;
3229 /* ...and then all the struct fiemap_extents */
3230 for (i = 0; i < fm->fm_mapped_extents; i++) {
3231 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3232 THUNK_TARGET);
3233 p += extent_size;
3234 }
3235 }
3236 unlock_user(argptr, arg, target_size_out);
3237 }
3238 }
3239 if (free_fm) {
3240 free(fm);
3241 }
3242 return ret;
3243 }
3244 #endif
3245
3246 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3247 int fd, abi_long cmd, abi_long arg)
3248 {
3249 const argtype *arg_type = ie->arg_type;
3250 int target_size;
3251 void *argptr;
3252 int ret;
3253 struct ifconf *host_ifconf;
3254 uint32_t outbufsz;
3255 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3256 int target_ifreq_size;
3257 int nb_ifreq;
3258 int free_buf = 0;
3259 int i;
3260 int target_ifc_len;
3261 abi_long target_ifc_buf;
3262 int host_ifc_len;
3263 char *host_ifc_buf;
3264
3265 assert(arg_type[0] == TYPE_PTR);
3266 assert(ie->access == IOC_RW);
3267
3268 arg_type++;
3269 target_size = thunk_type_size(arg_type, 0);
3270
3271 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3272 if (!argptr)
3273 return -TARGET_EFAULT;
3274 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3275 unlock_user(argptr, arg, 0);
3276
3277 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3278 target_ifc_len = host_ifconf->ifc_len;
3279 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3280
3281 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3282 nb_ifreq = target_ifc_len / target_ifreq_size;
3283 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3284
3285 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3286 if (outbufsz > MAX_STRUCT_SIZE) {
3287 /* We can't fit all the extents into the fixed size buffer.
3288 * Allocate one that is large enough and use it instead.
3289 */
3290 host_ifconf = malloc(outbufsz);
3291 if (!host_ifconf) {
3292 return -TARGET_ENOMEM;
3293 }
3294 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3295 free_buf = 1;
3296 }
3297 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3298
3299 host_ifconf->ifc_len = host_ifc_len;
3300 host_ifconf->ifc_buf = host_ifc_buf;
3301
3302 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3303 if (!is_error(ret)) {
3304 /* convert host ifc_len to target ifc_len */
3305
3306 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3307 target_ifc_len = nb_ifreq * target_ifreq_size;
3308 host_ifconf->ifc_len = target_ifc_len;
3309
3310 /* restore target ifc_buf */
3311
3312 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3313
3314 /* copy struct ifconf to target user */
3315
3316 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3317 if (!argptr)
3318 return -TARGET_EFAULT;
3319 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3320 unlock_user(argptr, arg, target_size);
3321
3322 /* copy ifreq[] to target user */
3323
3324 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3325 for (i = 0; i < nb_ifreq ; i++) {
3326 thunk_convert(argptr + i * target_ifreq_size,
3327 host_ifc_buf + i * sizeof(struct ifreq),
3328 ifreq_arg_type, THUNK_TARGET);
3329 }
3330 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3331 }
3332
3333 if (free_buf) {
3334 free(host_ifconf);
3335 }
3336
3337 return ret;
3338 }
3339
3340 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3341 abi_long cmd, abi_long arg)
3342 {
3343 void *argptr;
3344 struct dm_ioctl *host_dm;
3345 abi_long guest_data;
3346 uint32_t guest_data_size;
3347 int target_size;
3348 const argtype *arg_type = ie->arg_type;
3349 abi_long ret;
3350 void *big_buf = NULL;
3351 char *host_data;
3352
3353 arg_type++;
3354 target_size = thunk_type_size(arg_type, 0);
3355 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3356 if (!argptr) {
3357 ret = -TARGET_EFAULT;
3358 goto out;
3359 }
3360 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3361 unlock_user(argptr, arg, 0);
3362
3363 /* buf_temp is too small, so fetch things into a bigger buffer */
3364 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3365 memcpy(big_buf, buf_temp, target_size);
3366 buf_temp = big_buf;
3367 host_dm = big_buf;
3368
3369 guest_data = arg + host_dm->data_start;
3370 if ((guest_data - arg) < 0) {
3371 ret = -EINVAL;
3372 goto out;
3373 }
3374 guest_data_size = host_dm->data_size - host_dm->data_start;
3375 host_data = (char*)host_dm + host_dm->data_start;
3376
3377 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3378 switch (ie->host_cmd) {
3379 case DM_REMOVE_ALL:
3380 case DM_LIST_DEVICES:
3381 case DM_DEV_CREATE:
3382 case DM_DEV_REMOVE:
3383 case DM_DEV_SUSPEND:
3384 case DM_DEV_STATUS:
3385 case DM_DEV_WAIT:
3386 case DM_TABLE_STATUS:
3387 case DM_TABLE_CLEAR:
3388 case DM_TABLE_DEPS:
3389 case DM_LIST_VERSIONS:
3390 /* no input data */
3391 break;
3392 case DM_DEV_RENAME:
3393 case DM_DEV_SET_GEOMETRY:
3394 /* data contains only strings */
3395 memcpy(host_data, argptr, guest_data_size);
3396 break;
3397 case DM_TARGET_MSG:
3398 memcpy(host_data, argptr, guest_data_size);
3399 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3400 break;
3401 case DM_TABLE_LOAD:
3402 {
3403 void *gspec = argptr;
3404 void *cur_data = host_data;
3405 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3406 int spec_size = thunk_type_size(arg_type, 0);
3407 int i;
3408
3409 for (i = 0; i < host_dm->target_count; i++) {
3410 struct dm_target_spec *spec = cur_data;
3411 uint32_t next;
3412 int slen;
3413
3414 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3415 slen = strlen((char*)gspec + spec_size) + 1;
3416 next = spec->next;
3417 spec->next = sizeof(*spec) + slen;
3418 strcpy((char*)&spec[1], gspec + spec_size);
3419 gspec += next;
3420 cur_data += spec->next;
3421 }
3422 break;
3423 }
3424 default:
3425 ret = -TARGET_EINVAL;
3426 goto out;
3427 }
3428 unlock_user(argptr, guest_data, 0);
3429
3430 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3431 if (!is_error(ret)) {
3432 guest_data = arg + host_dm->data_start;
3433 guest_data_size = host_dm->data_size - host_dm->data_start;
3434 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3435 switch (ie->host_cmd) {
3436 case DM_REMOVE_ALL:
3437 case DM_DEV_CREATE:
3438 case DM_DEV_REMOVE:
3439 case DM_DEV_RENAME:
3440 case DM_DEV_SUSPEND:
3441 case DM_DEV_STATUS:
3442 case DM_TABLE_LOAD:
3443 case DM_TABLE_CLEAR:
3444 case DM_TARGET_MSG:
3445 case DM_DEV_SET_GEOMETRY:
3446 /* no return data */
3447 break;
3448 case DM_LIST_DEVICES:
3449 {
3450 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3451 uint32_t remaining_data = guest_data_size;
3452 void *cur_data = argptr;
3453 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3454 int nl_size = 12; /* can't use thunk_size due to alignment */
3455
3456 while (1) {
3457 uint32_t next = nl->next;
3458 if (next) {
3459 nl->next = nl_size + (strlen(nl->name) + 1);
3460 }
3461 if (remaining_data < nl->next) {
3462 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3463 break;
3464 }
3465 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3466 strcpy(cur_data + nl_size, nl->name);
3467 cur_data += nl->next;
3468 remaining_data -= nl->next;
3469 if (!next) {
3470 break;
3471 }
3472 nl = (void*)nl + next;
3473 }
3474 break;
3475 }
3476 case DM_DEV_WAIT:
3477 case DM_TABLE_STATUS:
3478 {
3479 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3480 void *cur_data = argptr;
3481 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3482 int spec_size = thunk_type_size(arg_type, 0);
3483 int i;
3484
3485 for (i = 0; i < host_dm->target_count; i++) {
3486 uint32_t next = spec->next;
3487 int slen = strlen((char*)&spec[1]) + 1;
3488 spec->next = (cur_data - argptr) + spec_size + slen;
3489 if (guest_data_size < spec->next) {
3490 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3491 break;
3492 }
3493 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3494 strcpy(cur_data + spec_size, (char*)&spec[1]);
3495 cur_data = argptr + spec->next;
3496 spec = (void*)host_dm + host_dm->data_start + next;
3497 }
3498 break;
3499 }
3500 case DM_TABLE_DEPS:
3501 {
3502 void *hdata = (void*)host_dm + host_dm->data_start;
3503 int count = *(uint32_t*)hdata;
3504 uint64_t *hdev = hdata + 8;
3505 uint64_t *gdev = argptr + 8;
3506 int i;
3507
3508 *(uint32_t*)argptr = tswap32(count);
3509 for (i = 0; i < count; i++) {
3510 *gdev = tswap64(*hdev);
3511 gdev++;
3512 hdev++;
3513 }
3514 break;
3515 }
3516 case DM_LIST_VERSIONS:
3517 {
3518 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3519 uint32_t remaining_data = guest_data_size;
3520 void *cur_data = argptr;
3521 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3522 int vers_size = thunk_type_size(arg_type, 0);
3523
3524 while (1) {
3525 uint32_t next = vers->next;
3526 if (next) {
3527 vers->next = vers_size + (strlen(vers->name) + 1);
3528 }
3529 if (remaining_data < vers->next) {
3530 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3531 break;
3532 }
3533 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3534 strcpy(cur_data + vers_size, vers->name);
3535 cur_data += vers->next;
3536 remaining_data -= vers->next;
3537 if (!next) {
3538 break;
3539 }
3540 vers = (void*)vers + next;
3541 }
3542 break;
3543 }
3544 default:
3545 ret = -TARGET_EINVAL;
3546 goto out;
3547 }
3548 unlock_user(argptr, guest_data, guest_data_size);
3549
3550 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3551 if (!argptr) {
3552 ret = -TARGET_EFAULT;
3553 goto out;
3554 }
3555 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3556 unlock_user(argptr, arg, target_size);
3557 }
3558 out:
3559 g_free(big_buf);
3560 return ret;
3561 }
3562
3563 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3564 int fd, abi_long cmd, abi_long arg)
3565 {
3566 const argtype *arg_type = ie->arg_type;
3567 const StructEntry *se;
3568 const argtype *field_types;
3569 const int *dst_offsets, *src_offsets;
3570 int target_size;
3571 void *argptr;
3572 abi_ulong *target_rt_dev_ptr;
3573 unsigned long *host_rt_dev_ptr;
3574 abi_long ret;
3575 int i;
3576
3577 assert(ie->access == IOC_W);
3578 assert(*arg_type == TYPE_PTR);
3579 arg_type++;
3580 assert(*arg_type == TYPE_STRUCT);
3581 target_size = thunk_type_size(arg_type, 0);
3582 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3583 if (!argptr) {
3584 return -TARGET_EFAULT;
3585 }
3586 arg_type++;
3587 assert(*arg_type == (int)STRUCT_rtentry);
3588 se = struct_entries + *arg_type++;
3589 assert(se->convert[0] == NULL);
3590 /* convert struct here to be able to catch rt_dev string */
3591 field_types = se->field_types;
3592 dst_offsets = se->field_offsets[THUNK_HOST];
3593 src_offsets = se->field_offsets[THUNK_TARGET];
3594 for (i = 0; i < se->nb_fields; i++) {
3595 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3596 assert(*field_types == TYPE_PTRVOID);
3597 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3598 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3599 if (*target_rt_dev_ptr != 0) {
3600 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3601 tswapal(*target_rt_dev_ptr));
3602 if (!*host_rt_dev_ptr) {
3603 unlock_user(argptr, arg, 0);
3604 return -TARGET_EFAULT;
3605 }
3606 } else {
3607 *host_rt_dev_ptr = 0;
3608 }
3609 field_types++;
3610 continue;
3611 }
3612 field_types = thunk_convert(buf_temp + dst_offsets[i],
3613 argptr + src_offsets[i],
3614 field_types, THUNK_HOST);
3615 }
3616 unlock_user(argptr, arg, 0);
3617
3618 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3619 if (*host_rt_dev_ptr != 0) {
3620 unlock_user((void *)*host_rt_dev_ptr,
3621 *target_rt_dev_ptr, 0);
3622 }
3623 return ret;
3624 }
3625
3626 static IOCTLEntry ioctl_entries[] = {
3627 #define IOCTL(cmd, access, ...) \
3628 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3629 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3630 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3631 #include "ioctls.h"
3632 { 0, 0, },
3633 };
3634
3635 /* ??? Implement proper locking for ioctls. */
3636 /* do_ioctl() Must return target values and target errnos. */
3637 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3638 {
3639 const IOCTLEntry *ie;
3640 const argtype *arg_type;
3641 abi_long ret;
3642 uint8_t buf_temp[MAX_STRUCT_SIZE];
3643 int target_size;
3644 void *argptr;
3645
3646 ie = ioctl_entries;
3647 for(;;) {
3648 if (ie->target_cmd == 0) {
3649 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3650 return -TARGET_ENOSYS;
3651 }
3652 if (ie->target_cmd == cmd)
3653 break;
3654 ie++;
3655 }
3656 arg_type = ie->arg_type;
3657 #if defined(DEBUG)
3658 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3659 #endif
3660 if (ie->do_ioctl) {
3661 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3662 }
3663
3664 switch(arg_type[0]) {
3665 case TYPE_NULL:
3666 /* no argument */
3667 ret = get_errno(ioctl(fd, ie->host_cmd));
3668 break;
3669 case TYPE_PTRVOID:
3670 case TYPE_INT:
3671 /* int argment */
3672 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3673 break;
3674 case TYPE_PTR:
3675 arg_type++;
3676 target_size = thunk_type_size(arg_type, 0);
3677 switch(ie->access) {
3678 case IOC_R:
3679 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3680 if (!is_error(ret)) {
3681 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3682 if (!argptr)
3683 return -TARGET_EFAULT;
3684 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3685 unlock_user(argptr, arg, target_size);
3686 }
3687 break;
3688 case IOC_W:
3689 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3690 if (!argptr)
3691 return -TARGET_EFAULT;
3692 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3693 unlock_user(argptr, arg, 0);
3694 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3695 break;
3696 default:
3697 case IOC_RW:
3698 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3699 if (!argptr)
3700 return -TARGET_EFAULT;
3701 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3702 unlock_user(argptr, arg, 0);
3703 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3704 if (!is_error(ret)) {
3705 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3706 if (!argptr)
3707 return -TARGET_EFAULT;
3708 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3709 unlock_user(argptr, arg, target_size);
3710 }
3711 break;
3712 }
3713 break;
3714 default:
3715 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3716 (long)cmd, arg_type[0]);
3717 ret = -TARGET_ENOSYS;
3718 break;
3719 }
3720 return ret;
3721 }
3722
3723 static const bitmask_transtbl iflag_tbl[] = {
3724 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3725 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3726 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3727 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3728 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3729 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3730 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3731 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3732 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3733 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3734 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3735 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3736 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3737 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3738 { 0, 0, 0, 0 }
3739 };
3740
3741 static const bitmask_transtbl oflag_tbl[] = {
3742 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3743 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3744 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3745 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3746 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3747 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3748 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3749 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3750 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3751 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3752 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3753 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3754 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3755 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3756 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3757 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3758 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3759 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3760 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3761 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3762 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3763 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3764 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3765 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3766 { 0, 0, 0, 0 }
3767 };
3768
3769 static const bitmask_transtbl cflag_tbl[] = {
3770 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3771 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3772 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3773 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3774 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3775 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3776 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3777 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3778 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3779 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3780 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3781 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3782 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3783 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3784 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3785 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3786 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3787 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3788 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3789 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3790 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3791 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3792 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3793 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3794 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3795 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3796 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3797 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3798 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3799 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3800 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3801 { 0, 0, 0, 0 }
3802 };
3803
3804 static const bitmask_transtbl lflag_tbl[] = {
3805 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3806 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3807 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3808 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3809 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3810 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3811 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3812 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3813 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3814 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3815 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3816 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3817 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3818 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3819 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3820 { 0, 0, 0, 0 }
3821 };
3822
3823 static void target_to_host_termios (void *dst, const void *src)
3824 {
3825 struct host_termios *host = dst;
3826 const struct target_termios *target = src;
3827
3828 host->c_iflag =
3829 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3830 host->c_oflag =
3831 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3832 host->c_cflag =
3833 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3834 host->c_lflag =
3835 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3836 host->c_line = target->c_line;
3837
3838 memset(host->c_cc, 0, sizeof(host->c_cc));
3839 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3840 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3841 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3842 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3843 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3844 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3845 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3846 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3847 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3848 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3849 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3850 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3851 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3852 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3853 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3854 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3855 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3856 }
3857
3858 static void host_to_target_termios (void *dst, const void *src)
3859 {
3860 struct target_termios *target = dst;
3861 const struct host_termios *host = src;
3862
3863 target->c_iflag =
3864 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3865 target->c_oflag =
3866 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3867 target->c_cflag =
3868 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3869 target->c_lflag =
3870 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3871 target->c_line = host->c_line;
3872
3873 memset(target->c_cc, 0, sizeof(target->c_cc));
3874 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3875 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3876 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3877 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3878 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3879 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3880 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3881 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3882 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3883 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3884 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3885 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3886 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3887 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3888 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3889 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3890 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3891 }
3892
3893 static const StructEntry struct_termios_def = {
3894 .convert = { host_to_target_termios, target_to_host_termios },
3895 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3896 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3897 };
3898
3899 static bitmask_transtbl mmap_flags_tbl[] = {
3900 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3901 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3902 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3903 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3904 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3905 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3906 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3907 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3908 { 0, 0, 0, 0 }
3909 };
3910
3911 #if defined(TARGET_I386)
3912
3913 /* NOTE: there is really one LDT for all the threads */
3914 static uint8_t *ldt_table;
3915
3916 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3917 {
3918 int size;
3919 void *p;
3920
3921 if (!ldt_table)
3922 return 0;
3923 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3924 if (size > bytecount)
3925 size = bytecount;
3926 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3927 if (!p)
3928 return -TARGET_EFAULT;
3929 /* ??? Should this by byteswapped? */
3930 memcpy(p, ldt_table, size);
3931 unlock_user(p, ptr, size);
3932 return size;
3933 }
3934
3935 /* XXX: add locking support */
3936 static abi_long write_ldt(CPUX86State *env,
3937 abi_ulong ptr, unsigned long bytecount, int oldmode)
3938 {
3939 struct target_modify_ldt_ldt_s ldt_info;
3940 struct target_modify_ldt_ldt_s *target_ldt_info;
3941 int seg_32bit, contents, read_exec_only, limit_in_pages;
3942 int seg_not_present, useable, lm;
3943 uint32_t *lp, entry_1, entry_2;
3944
3945 if (bytecount != sizeof(ldt_info))
3946 return -TARGET_EINVAL;
3947 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3948 return -TARGET_EFAULT;
3949 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3950 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3951 ldt_info.limit = tswap32(target_ldt_info->limit);
3952 ldt_info.flags = tswap32(target_ldt_info->flags);
3953 unlock_user_struct(target_ldt_info, ptr, 0);
3954
3955 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3956 return -TARGET_EINVAL;
3957 seg_32bit = ldt_info.flags & 1;
3958 contents = (ldt_info.flags >> 1) & 3;
3959 read_exec_only = (ldt_info.flags >> 3) & 1;
3960 limit_in_pages = (ldt_info.flags >> 4) & 1;
3961 seg_not_present = (ldt_info.flags >> 5) & 1;
3962 useable = (ldt_info.flags >> 6) & 1;
3963 #ifdef TARGET_ABI32
3964 lm = 0;
3965 #else
3966 lm = (ldt_info.flags >> 7) & 1;
3967 #endif
3968 if (contents == 3) {
3969 if (oldmode)
3970 return -TARGET_EINVAL;
3971 if (seg_not_present == 0)
3972 return -TARGET_EINVAL;
3973 }
3974 /* allocate the LDT */
3975 if (!ldt_table) {
3976 env->ldt.base = target_mmap(0,
3977 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3978 PROT_READ|PROT_WRITE,
3979 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3980 if (env->ldt.base == -1)
3981 return -TARGET_ENOMEM;
3982 memset(g2h(env->ldt.base), 0,
3983 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3984 env->ldt.limit = 0xffff;
3985 ldt_table = g2h(env->ldt.base);
3986 }
3987
3988 /* NOTE: same code as Linux kernel */
3989 /* Allow LDTs to be cleared by the user. */
3990 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3991 if (oldmode ||
3992 (contents == 0 &&
3993 read_exec_only == 1 &&
3994 seg_32bit == 0 &&
3995 limit_in_pages == 0 &&
3996 seg_not_present == 1 &&
3997 useable == 0 )) {
3998 entry_1 = 0;
3999 entry_2 = 0;
4000 goto install;
4001 }
4002 }
4003
4004 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4005 (ldt_info.limit & 0x0ffff);
4006 entry_2 = (ldt_info.base_addr & 0xff000000) |
4007 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4008 (ldt_info.limit & 0xf0000) |
4009 ((read_exec_only ^ 1) << 9) |
4010 (contents << 10) |
4011 ((seg_not_present ^ 1) << 15) |
4012 (seg_32bit << 22) |
4013 (limit_in_pages << 23) |
4014 (lm << 21) |
4015 0x7000;
4016 if (!oldmode)
4017 entry_2 |= (useable << 20);
4018
4019 /* Install the new entry ... */
4020 install:
4021 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4022 lp[0] = tswap32(entry_1);
4023 lp[1] = tswap32(entry_2);
4024 return 0;
4025 }
4026
4027 /* specific and weird i386 syscalls */
4028 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4029 unsigned long bytecount)
4030 {
4031 abi_long ret;
4032
4033 switch (func) {
4034 case 0:
4035 ret = read_ldt(ptr, bytecount);
4036 break;
4037 case 1:
4038 ret = write_ldt(env, ptr, bytecount, 1);
4039 break;
4040 case 0x11:
4041 ret = write_ldt(env, ptr, bytecount, 0);
4042 break;
4043 default:
4044 ret = -TARGET_ENOSYS;
4045 break;
4046 }
4047 return ret;
4048 }
4049
4050 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4051 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4052 {
4053 uint64_t *gdt_table = g2h(env->gdt.base);
4054 struct target_modify_ldt_ldt_s ldt_info;
4055 struct target_modify_ldt_ldt_s *target_ldt_info;
4056 int seg_32bit, contents, read_exec_only, limit_in_pages;
4057 int seg_not_present, useable, lm;
4058 uint32_t *lp, entry_1, entry_2;
4059 int i;
4060
4061 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4062 if (!target_ldt_info)
4063 return -TARGET_EFAULT;
4064 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4065 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4066 ldt_info.limit = tswap32(target_ldt_info->limit);
4067 ldt_info.flags = tswap32(target_ldt_info->flags);
4068 if (ldt_info.entry_number == -1) {
4069 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4070 if (gdt_table[i] == 0) {
4071 ldt_info.entry_number = i;
4072 target_ldt_info->entry_number = tswap32(i);
4073 break;
4074 }
4075 }
4076 }
4077 unlock_user_struct(target_ldt_info, ptr, 1);
4078
4079 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4080 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4081 return -TARGET_EINVAL;
4082 seg_32bit = ldt_info.flags & 1;
4083 contents = (ldt_info.flags >> 1) & 3;
4084 read_exec_only = (ldt_info.flags >> 3) & 1;
4085 limit_in_pages = (ldt_info.flags >> 4) & 1;
4086 seg_not_present = (ldt_info.flags >> 5) & 1;
4087 useable = (ldt_info.flags >> 6) & 1;
4088 #ifdef TARGET_ABI32
4089 lm = 0;
4090 #else
4091 lm = (ldt_info.flags >> 7) & 1;
4092 #endif
4093
4094 if (contents == 3) {
4095 if (seg_not_present == 0)
4096 return -TARGET_EINVAL;
4097 }
4098
4099 /* NOTE: same code as Linux kernel */
4100 /* Allow LDTs to be cleared by the user. */
4101 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4102 if ((contents == 0 &&
4103 read_exec_only == 1 &&
4104 seg_32bit == 0 &&
4105 limit_in_pages == 0 &&
4106 seg_not_present == 1 &&
4107 useable == 0 )) {
4108 entry_1 = 0;
4109 entry_2 = 0;
4110 goto install;
4111 }
4112 }
4113
4114 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4115 (ldt_info.limit & 0x0ffff);
4116 entry_2 = (ldt_info.base_addr & 0xff000000) |
4117 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4118 (ldt_info.limit & 0xf0000) |
4119 ((read_exec_only ^ 1) << 9) |
4120 (contents << 10) |
4121 ((seg_not_present ^ 1) << 15) |
4122 (seg_32bit << 22) |
4123 (limit_in_pages << 23) |
4124 (useable << 20) |
4125 (lm << 21) |
4126 0x7000;
4127
4128 /* Install the new entry ... */
4129 install:
4130 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4131 lp[0] = tswap32(entry_1);
4132 lp[1] = tswap32(entry_2);
4133 return 0;
4134 }
4135
4136 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4137 {
4138 struct target_modify_ldt_ldt_s *target_ldt_info;
4139 uint64_t *gdt_table = g2h(env->gdt.base);
4140 uint32_t base_addr, limit, flags;
4141 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4142 int seg_not_present, useable, lm;
4143 uint32_t *lp, entry_1, entry_2;
4144
4145 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4146 if (!target_ldt_info)
4147 return -TARGET_EFAULT;
4148 idx = tswap32(target_ldt_info->entry_number);
4149 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4150 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4151 unlock_user_struct(target_ldt_info, ptr, 1);
4152 return -TARGET_EINVAL;
4153 }
4154 lp = (uint32_t *)(gdt_table + idx);
4155 entry_1 = tswap32(lp[0]);
4156 entry_2 = tswap32(lp[1]);
4157
4158 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4159 contents = (entry_2 >> 10) & 3;
4160 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4161 seg_32bit = (entry_2 >> 22) & 1;
4162 limit_in_pages = (entry_2 >> 23) & 1;
4163 useable = (entry_2 >> 20) & 1;
4164 #ifdef TARGET_ABI32
4165 lm = 0;
4166 #else
4167 lm = (entry_2 >> 21) & 1;
4168 #endif
4169 flags = (seg_32bit << 0) | (contents << 1) |
4170 (read_exec_only << 3) | (limit_in_pages << 4) |
4171 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4172 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4173 base_addr = (entry_1 >> 16) |
4174 (entry_2 & 0xff000000) |
4175 ((entry_2 & 0xff) << 16);
4176 target_ldt_info->base_addr = tswapal(base_addr);
4177 target_ldt_info->limit = tswap32(limit);
4178 target_ldt_info->flags = tswap32(flags);
4179 unlock_user_struct(target_ldt_info, ptr, 1);
4180 return 0;
4181 }
4182 #endif /* TARGET_I386 && TARGET_ABI32 */
4183
4184 #ifndef TARGET_ABI32
4185 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4186 {
4187 abi_long ret = 0;
4188 abi_ulong val;
4189 int idx;
4190
4191 switch(code) {
4192 case TARGET_ARCH_SET_GS:
4193 case TARGET_ARCH_SET_FS:
4194 if (code == TARGET_ARCH_SET_GS)
4195 idx = R_GS;
4196 else
4197 idx = R_FS;
4198 cpu_x86_load_seg(env, idx, 0);
4199 env->segs[idx].base = addr;
4200 break;
4201 case TARGET_ARCH_GET_GS:
4202 case TARGET_ARCH_GET_FS:
4203 if (code == TARGET_ARCH_GET_GS)
4204 idx = R_GS;
4205 else
4206 idx = R_FS;
4207 val = env->segs[idx].base;
4208 if (put_user(val, addr, abi_ulong))
4209 ret = -TARGET_EFAULT;
4210 break;
4211 default:
4212 ret = -TARGET_EINVAL;
4213 break;
4214 }
4215 return ret;
4216 }
4217 #endif
4218
4219 #endif /* defined(TARGET_I386) */
4220
4221 #define NEW_STACK_SIZE 0x40000
4222
4223 #if defined(CONFIG_USE_NPTL)
4224
4225 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4226 typedef struct {
4227 CPUArchState *env;
4228 pthread_mutex_t mutex;
4229 pthread_cond_t cond;
4230 pthread_t thread;
4231 uint32_t tid;
4232 abi_ulong child_tidptr;
4233 abi_ulong parent_tidptr;
4234 sigset_t sigmask;
4235 } new_thread_info;
4236
4237 static void *clone_func(void *arg)
4238 {
4239 new_thread_info *info = arg;
4240 CPUArchState *env;
4241 CPUState *cpu;
4242 TaskState *ts;
4243
4244 env = info->env;
4245 cpu = ENV_GET_CPU(env);
4246 thread_env = env;
4247 ts = (TaskState *)thread_env->opaque;
4248 info->tid = gettid();
4249 cpu->host_tid = info->tid;
4250 task_settid(ts);
4251 if (info->child_tidptr)
4252 put_user_u32(info->tid, info->child_tidptr);
4253 if (info->parent_tidptr)
4254 put_user_u32(info->tid, info->parent_tidptr);
4255 /* Enable signals. */
4256 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4257 /* Signal to the parent that we're ready. */
4258 pthread_mutex_lock(&info->mutex);
4259 pthread_cond_broadcast(&info->cond);
4260 pthread_mutex_unlock(&info->mutex);
4261 /* Wait until the parent has finshed initializing the tls state. */
4262 pthread_mutex_lock(&clone_lock);
4263 pthread_mutex_unlock(&clone_lock);
4264 cpu_loop(env);
4265 /* never exits */
4266 return NULL;
4267 }
4268 #else
4269
4270 static int clone_func(void *arg)
4271 {
4272 CPUArchState *env = arg;
4273 cpu_loop(env);
4274 /* never exits */
4275 return 0;
4276 }
4277 #endif
4278
4279 /* do_fork() Must return host values and target errnos (unlike most
4280 do_*() functions). */
4281 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4282 abi_ulong parent_tidptr, target_ulong newtls,
4283 abi_ulong child_tidptr)
4284 {
4285 int ret;
4286 TaskState *ts;
4287 CPUArchState *new_env;
4288 #if defined(CONFIG_USE_NPTL)
4289 unsigned int nptl_flags;
4290 sigset_t sigmask;
4291 #else
4292 uint8_t *new_stack;
4293 #endif
4294
4295 /* Emulate vfork() with fork() */
4296 if (flags & CLONE_VFORK)
4297 flags &= ~(CLONE_VFORK | CLONE_VM);
4298
4299 if (flags & CLONE_VM) {
4300 TaskState *parent_ts = (TaskState *)env->opaque;
4301 #if defined(CONFIG_USE_NPTL)
4302 new_thread_info info;
4303 pthread_attr_t attr;
4304 #endif
4305 ts = g_malloc0(sizeof(TaskState));
4306 init_task_state(ts);
4307 /* we create a new CPU instance. */
4308 new_env = cpu_copy(env);
4309 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4310 cpu_reset(ENV_GET_CPU(new_env));
4311 #endif
4312 /* Init regs that differ from the parent. */
4313 cpu_clone_regs(new_env, newsp);
4314 new_env->opaque = ts;
4315 ts->bprm = parent_ts->bprm;
4316 ts->info = parent_ts->info;
4317 #if defined(CONFIG_USE_NPTL)
4318 nptl_flags = flags;
4319 flags &= ~CLONE_NPTL_FLAGS2;
4320
4321 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4322 ts->child_tidptr = child_tidptr;
4323 }
4324
4325 if (nptl_flags & CLONE_SETTLS)
4326 cpu_set_tls (new_env, newtls);
4327
4328 /* Grab a mutex so that thread setup appears atomic. */
4329 pthread_mutex_lock(&clone_lock);
4330
4331 memset(&info, 0, sizeof(info));
4332 pthread_mutex_init(&info.mutex, NULL);
4333 pthread_mutex_lock(&info.mutex);
4334 pthread_cond_init(&info.cond, NULL);
4335 info.env = new_env;
4336 if (nptl_flags & CLONE_CHILD_SETTID)
4337 info.child_tidptr = child_tidptr;
4338 if (nptl_flags & CLONE_PARENT_SETTID)
4339 info.parent_tidptr = parent_tidptr;
4340
4341 ret = pthread_attr_init(&attr);
4342 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4343 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4344 /* It is not safe to deliver signals until the child has finished
4345 initializing, so temporarily block all signals. */
4346 sigfillset(&sigmask);
4347 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4348
4349 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4350 /* TODO: Free new CPU state if thread creation failed. */
4351
4352 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4353 pthread_attr_destroy(&attr);
4354 if (ret == 0) {
4355 /* Wait for the child to initialize. */
4356 pthread_cond_wait(&info.cond, &info.mutex);
4357 ret = info.tid;
4358 if (flags & CLONE_PARENT_SETTID)
4359 put_user_u32(ret, parent_tidptr);
4360 } else {
4361 ret = -1;
4362 }
4363 pthread_mutex_unlock(&info.mutex);
4364 pthread_cond_destroy(&info.cond);
4365 pthread_mutex_destroy(&info.mutex);
4366 pthread_mutex_unlock(&clone_lock);
4367 #else
4368 if (flags & CLONE_NPTL_FLAGS2)
4369 return -EINVAL;
4370 /* This is probably going to die very quickly, but do it anyway. */
4371 new_stack = g_malloc0 (NEW_STACK_SIZE);
4372 #ifdef __ia64__
4373 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4374 #else
4375 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4376 #endif
4377 #endif
4378 } else {
4379 /* if no CLONE_VM, we consider it is a fork */
4380 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4381 return -EINVAL;
4382 fork_start();
4383 ret = fork();
4384 if (ret == 0) {
4385 /* Child Process. */
4386 cpu_clone_regs(env, newsp);
4387 fork_end(1);
4388 #if defined(CONFIG_USE_NPTL)
4389 /* There is a race condition here. The parent process could
4390 theoretically read the TID in the child process before the child
4391 tid is set. This would require using either ptrace
4392 (not implemented) or having *_tidptr to point at a shared memory
4393 mapping. We can't repeat the spinlock hack used above because
4394 the child process gets its own copy of the lock. */
4395 if (flags & CLONE_CHILD_SETTID)
4396 put_user_u32(gettid(), child_tidptr);
4397 if (flags & CLONE_PARENT_SETTID)
4398 put_user_u32(gettid(), parent_tidptr);
4399 ts = (TaskState *)env->opaque;
4400 if (flags & CLONE_SETTLS)
4401 cpu_set_tls (env, newtls);
4402 if (flags & CLONE_CHILD_CLEARTID)
4403 ts->child_tidptr = child_tidptr;
4404 #endif
4405 } else {
4406 fork_end(0);
4407 }
4408 }
4409 return ret;
4410 }
4411
4412 /* warning : doesn't handle linux specific flags... */
4413 static int target_to_host_fcntl_cmd(int cmd)
4414 {
4415 switch(cmd) {
4416 case TARGET_F_DUPFD:
4417 case TARGET_F_GETFD:
4418 case TARGET_F_SETFD:
4419 case TARGET_F_GETFL:
4420 case TARGET_F_SETFL:
4421 return cmd;
4422 case TARGET_F_GETLK:
4423 return F_GETLK;
4424 case TARGET_F_SETLK:
4425 return F_SETLK;
4426 case TARGET_F_SETLKW:
4427 return F_SETLKW;
4428 case TARGET_F_GETOWN:
4429 return F_GETOWN;
4430 case TARGET_F_SETOWN:
4431 return F_SETOWN;
4432 case TARGET_F_GETSIG:
4433 return F_GETSIG;
4434 case TARGET_F_SETSIG:
4435 return F_SETSIG;
4436 #if TARGET_ABI_BITS == 32
4437 case TARGET_F_GETLK64:
4438 return F_GETLK64;
4439 case TARGET_F_SETLK64:
4440 return F_SETLK64;
4441 case TARGET_F_SETLKW64:
4442 return F_SETLKW64;
4443 #endif
4444 case TARGET_F_SETLEASE:
4445 return F_SETLEASE;
4446 case TARGET_F_GETLEASE:
4447 return F_GETLEASE;
4448 #ifdef F_DUPFD_CLOEXEC
4449 case TARGET_F_DUPFD_CLOEXEC:
4450 return F_DUPFD_CLOEXEC;
4451 #endif
4452 case TARGET_F_NOTIFY:
4453 return F_NOTIFY;
4454 default:
4455 return -TARGET_EINVAL;
4456 }
4457 return -TARGET_EINVAL;
4458 }
4459
4460 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4461 static const bitmask_transtbl flock_tbl[] = {
4462 TRANSTBL_CONVERT(F_RDLCK),
4463 TRANSTBL_CONVERT(F_WRLCK),
4464 TRANSTBL_CONVERT(F_UNLCK),
4465 TRANSTBL_CONVERT(F_EXLCK),
4466 TRANSTBL_CONVERT(F_SHLCK),
4467 { 0, 0, 0, 0 }
4468 };
4469
4470 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4471 {
4472 struct flock fl;
4473 struct target_flock *target_fl;
4474 struct flock64 fl64;
4475 struct target_flock64 *target_fl64;
4476 abi_long ret;
4477 int host_cmd = target_to_host_fcntl_cmd(cmd);
4478
4479 if (host_cmd == -TARGET_EINVAL)
4480 return host_cmd;
4481
4482 switch(cmd) {
4483 case TARGET_F_GETLK:
4484 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4485 return -TARGET_EFAULT;
4486 fl.l_type =
4487 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4488 fl.l_whence = tswap16(target_fl->l_whence);
4489 fl.l_start = tswapal(target_fl->l_start);
4490 fl.l_len = tswapal(target_fl->l_len);
4491 fl.l_pid = tswap32(target_fl->l_pid);
4492 unlock_user_struct(target_fl, arg, 0);
4493 ret = get_errno(fcntl(fd, host_cmd, &fl));
4494 if (ret == 0) {
4495 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4496 return -TARGET_EFAULT;
4497 target_fl->l_type =
4498 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4499 target_fl->l_whence = tswap16(fl.l_whence);
4500 target_fl->l_start = tswapal(fl.l_start);
4501 target_fl->l_len = tswapal(fl.l_len);
4502 target_fl->l_pid = tswap32(fl.l_pid);
4503 unlock_user_struct(target_fl, arg, 1);
4504 }
4505 break;
4506
4507 case TARGET_F_SETLK:
4508 case TARGET_F_SETLKW:
4509 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4510 return -TARGET_EFAULT;
4511 fl.l_type =
4512 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4513 fl.l_whence = tswap16(target_fl->l_whence);
4514 fl.l_start = tswapal(target_fl->l_start);
4515 fl.l_len = tswapal(target_fl->l_len);
4516 fl.l_pid = tswap32(target_fl->l_pid);
4517 unlock_user_struct(target_fl, arg, 0);
4518 ret = get_errno(fcntl(fd, host_cmd, &fl));
4519 break;
4520
4521 case TARGET_F_GETLK64:
4522 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4523 return -TARGET_EFAULT;
4524 fl64.l_type =
4525 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4526 fl64.l_whence = tswap16(target_fl64->l_whence);
4527 fl64.l_start = tswap64(target_fl64->l_start);
4528 fl64.l_len = tswap64(target_fl64->l_len);
4529 fl64.l_pid = tswap32(target_fl64->l_pid);
4530 unlock_user_struct(target_fl64, arg, 0);
4531 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4532 if (ret == 0) {
4533 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4534 return -TARGET_EFAULT;
4535 target_fl64->l_type =
4536 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4537 target_fl64->l_whence = tswap16(fl64.l_whence);
4538 target_fl64->l_start = tswap64(fl64.l_start);
4539 target_fl64->l_len = tswap64(fl64.l_len);
4540 target_fl64->l_pid = tswap32(fl64.l_pid);
4541 unlock_user_struct(target_fl64, arg, 1);
4542 }
4543 break;
4544 case TARGET_F_SETLK64:
4545 case TARGET_F_SETLKW64:
4546 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4547 return -TARGET_EFAULT;
4548 fl64.l_type =
4549 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4550 fl64.l_whence = tswap16(target_fl64->l_whence);
4551 fl64.l_start = tswap64(target_fl64->l_start);
4552 fl64.l_len = tswap64(target_fl64->l_len);
4553 fl64.l_pid = tswap32(target_fl64->l_pid);
4554 unlock_user_struct(target_fl64, arg, 0);
4555 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4556 break;
4557
4558 case TARGET_F_GETFL:
4559 ret = get_errno(fcntl(fd, host_cmd, arg));
4560 if (ret >= 0) {
4561 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4562 }
4563 break;
4564
4565 case TARGET_F_SETFL:
4566 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4567 break;
4568
4569 case TARGET_F_SETOWN:
4570 case TARGET_F_GETOWN:
4571 case TARGET_F_SETSIG:
4572 case TARGET_F_GETSIG:
4573 case TARGET_F_SETLEASE:
4574 case TARGET_F_GETLEASE:
4575 ret = get_errno(fcntl(fd, host_cmd, arg));
4576 break;
4577
4578 default:
4579 ret = get_errno(fcntl(fd, cmd, arg));
4580 break;
4581 }
4582 return ret;
4583 }
4584
4585 #ifdef USE_UID16
4586
4587 static inline int high2lowuid(int uid)
4588 {
4589 if (uid > 65535)
4590 return 65534;
4591 else
4592 return uid;
4593 }
4594
4595 static inline int high2lowgid(int gid)
4596 {
4597 if (gid > 65535)
4598 return 65534;
4599 else
4600 return gid;
4601 }
4602
4603 static inline int low2highuid(int uid)
4604 {
4605 if ((int16_t)uid == -1)
4606 return -1;
4607 else
4608 return uid;
4609 }
4610
4611 static inline int low2highgid(int gid)
4612 {
4613 if ((int16_t)gid == -1)
4614 return -1;
4615 else
4616 return gid;
4617 }
4618 static inline int tswapid(int id)
4619 {
4620 return tswap16(id);
4621 }
4622 #else /* !USE_UID16 */
4623 static inline int high2lowuid(int uid)
4624 {
4625 return uid;
4626 }
4627 static inline int high2lowgid(int gid)
4628 {
4629 return gid;
4630 }
4631 static inline int low2highuid(int uid)
4632 {
4633 return uid;
4634 }
4635 static inline int low2highgid(int gid)
4636 {
4637 return gid;
4638 }
4639 static inline int tswapid(int id)
4640 {
4641 return tswap32(id);
4642 }
4643 #endif /* USE_UID16 */
4644
4645 void syscall_init(void)
4646 {
4647 IOCTLEntry *ie;
4648 const argtype *arg_type;
4649 int size;
4650 int i;
4651
4652 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4653 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4654 #include "syscall_types.h"
4655 #undef STRUCT
4656 #undef STRUCT_SPECIAL
4657
4658 /* Build target_to_host_errno_table[] table from
4659 * host_to_target_errno_table[]. */
4660 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4661 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4662 }
4663
4664 /* we patch the ioctl size if necessary. We rely on the fact that
4665 no ioctl has all the bits at '1' in the size field */
4666 ie = ioctl_entries;
4667 while (ie->target_cmd != 0) {
4668 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4669 TARGET_IOC_SIZEMASK) {
4670 arg_type = ie->arg_type;
4671 if (arg_type[0] != TYPE_PTR) {
4672 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4673 ie->target_cmd);
4674 exit(1);
4675 }
4676 arg_type++;
4677 size = thunk_type_size(arg_type, 0);
4678 ie->target_cmd = (ie->target_cmd &
4679 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4680 (size << TARGET_IOC_SIZESHIFT);
4681 }
4682
4683 /* automatic consistency check if same arch */
4684 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4685 (defined(__x86_64__) && defined(TARGET_X86_64))
4686 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4687 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4688 ie->name, ie->target_cmd, ie->host_cmd);
4689 }
4690 #endif
4691 ie++;
4692 }
4693 }
4694
4695 #if TARGET_ABI_BITS == 32
4696 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4697 {
4698 #ifdef TARGET_WORDS_BIGENDIAN
4699 return ((uint64_t)word0 << 32) | word1;
4700 #else
4701 return ((uint64_t)word1 << 32) | word0;
4702 #endif
4703 }
4704 #else /* TARGET_ABI_BITS == 32 */
4705 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4706 {
4707 return word0;
4708 }
4709 #endif /* TARGET_ABI_BITS != 32 */
4710
4711 #ifdef TARGET_NR_truncate64
4712 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4713 abi_long arg2,
4714 abi_long arg3,
4715 abi_long arg4)
4716 {
4717 if (regpairs_aligned(cpu_env)) {
4718 arg2 = arg3;
4719 arg3 = arg4;
4720 }
4721 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4722 }
4723 #endif
4724
4725 #ifdef TARGET_NR_ftruncate64
4726 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4727 abi_long arg2,
4728 abi_long arg3,
4729 abi_long arg4)
4730 {
4731 if (regpairs_aligned(cpu_env)) {
4732 arg2 = arg3;
4733 arg3 = arg4;
4734 }
4735 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4736 }
4737 #endif
4738
4739 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4740 abi_ulong target_addr)
4741 {
4742 struct target_timespec *target_ts;
4743
4744 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4745 return -TARGET_EFAULT;
4746 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4747 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4748 unlock_user_struct(target_ts, target_addr, 0);
4749 return 0;
4750 }
4751
4752 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4753 struct timespec *host_ts)
4754 {
4755 struct target_timespec *target_ts;
4756
4757 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4758 return -TARGET_EFAULT;
4759 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4760 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4761 unlock_user_struct(target_ts, target_addr, 1);
4762 return 0;
4763 }
4764
4765 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4766 static inline abi_long host_to_target_stat64(void *cpu_env,
4767 abi_ulong target_addr,
4768 struct stat *host_st)
4769 {
4770 #ifdef TARGET_ARM
4771 if (((CPUARMState *)cpu_env)->eabi) {
4772 struct target_eabi_stat64 *target_st;
4773
4774 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4775 return -TARGET_EFAULT;
4776 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4777 __put_user(host_st->st_dev, &target_st->st_dev);
4778 __put_user(host_st->st_ino, &target_st->st_ino);
4779 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4780 __put_user(host_st->st_ino, &target_st->__st_ino);
4781 #endif
4782 __put_user(host_st->st_mode, &target_st->st_mode);
4783 __put_user(host_st->st_nlink, &target_st->st_nlink);
4784 __put_user(host_st->st_uid, &target_st->st_uid);
4785 __put_user(host_st->st_gid, &target_st->st_gid);
4786 __put_user(host_st->st_rdev, &target_st->st_rdev);
4787 __put_user(host_st->st_size, &target_st->st_size);
4788 __put_user(host_st->st_blksize, &target_st->st_blksize);
4789 __put_user(host_st->st_blocks, &target_st->st_blocks);
4790 __put_user(host_st->st_atime, &target_st->target_st_atime);
4791 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4792 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4793 unlock_user_struct(target_st, target_addr, 1);
4794 } else
4795 #endif
4796 {
4797 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4798 struct target_stat *target_st;
4799 #else
4800 struct target_stat64 *target_st;
4801 #endif
4802
4803 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4804 return -TARGET_EFAULT;
4805 memset(target_st, 0, sizeof(*target_st));
4806 __put_user(host_st->st_dev, &target_st->st_dev);
4807 __put_user(host_st->st_ino, &target_st->st_ino);
4808 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4809 __put_user(host_st->st_ino, &target_st->__st_ino);
4810 #endif
4811 __put_user(host_st->st_mode, &target_st->st_mode);
4812 __put_user(host_st->st_nlink, &target_st->st_nlink);
4813 __put_user(host_st->st_uid, &target_st->st_uid);
4814 __put_user(host_st->st_gid, &target_st->st_gid);
4815 __put_user(host_st->st_rdev, &target_st->st_rdev);
4816 /* XXX: better use of kernel struct */
4817 __put_user(host_st->st_size, &target_st->st_size);
4818 __put_user(host_st->st_blksize, &target_st->st_blksize);
4819 __put_user(host_st->st_blocks, &target_st->st_blocks);
4820 __put_user(host_st->st_atime, &target_st->target_st_atime);
4821 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4822 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4823 unlock_user_struct(target_st, target_addr, 1);
4824 }
4825
4826 return 0;
4827 }
4828 #endif
4829
4830 #if defined(CONFIG_USE_NPTL)
4831 /* ??? Using host futex calls even when target atomic operations
4832 are not really atomic probably breaks things. However implementing
4833 futexes locally would make futexes shared between multiple processes
4834 tricky. However they're probably useless because guest atomic
4835 operations won't work either. */
4836 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4837 target_ulong uaddr2, int val3)
4838 {
4839 struct timespec ts, *pts;
4840 int base_op;
4841
4842 /* ??? We assume FUTEX_* constants are the same on both host
4843 and target. */
4844 #ifdef FUTEX_CMD_MASK
4845 base_op = op & FUTEX_CMD_MASK;
4846 #else
4847 base_op = op;
4848 #endif
4849 switch (base_op) {
4850 case FUTEX_WAIT:
4851 case FUTEX_WAIT_BITSET:
4852 if (timeout) {
4853 pts = &ts;
4854 target_to_host_timespec(pts, timeout);
4855 } else {
4856 pts = NULL;
4857 }
4858 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4859 pts, NULL, val3));
4860 case FUTEX_WAKE:
4861 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4862 case FUTEX_FD:
4863 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4864 case FUTEX_REQUEUE:
4865 case FUTEX_CMP_REQUEUE:
4866 case FUTEX_WAKE_OP:
4867 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4868 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4869 But the prototype takes a `struct timespec *'; insert casts
4870 to satisfy the compiler. We do not need to tswap TIMEOUT
4871 since it's not compared to guest memory. */
4872 pts = (struct timespec *)(uintptr_t) timeout;
4873 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4874 g2h(uaddr2),
4875 (base_op == FUTEX_CMP_REQUEUE
4876 ? tswap32(val3)
4877 : val3)));
4878 default:
4879 return -TARGET_ENOSYS;
4880 }
4881 }
4882 #endif
4883
4884 /* Map host to target signal numbers for the wait family of syscalls.
4885 Assume all other status bits are the same. */
4886 int host_to_target_waitstatus(int status)
4887 {
4888 if (WIFSIGNALED(status)) {
4889 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4890 }
4891 if (WIFSTOPPED(status)) {
4892 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4893 | (status & 0xff);
4894 }
4895 return status;
4896 }
4897
4898 int get_osversion(void)
4899 {
4900 static int osversion;
4901 struct new_utsname buf;
4902 const char *s;
4903 int i, n, tmp;
4904 if (osversion)
4905 return osversion;
4906 if (qemu_uname_release && *qemu_uname_release) {
4907 s = qemu_uname_release;
4908 } else {
4909 if (sys_uname(&buf))
4910 return 0;
4911 s = buf.release;
4912 }
4913 tmp = 0;
4914 for (i = 0; i < 3; i++) {
4915 n = 0;
4916 while (*s >= '0' && *s <= '9') {
4917 n *= 10;
4918 n += *s - '0';
4919 s++;
4920 }
4921 tmp = (tmp << 8) + n;
4922 if (*s == '.')
4923 s++;
4924 }
4925 osversion = tmp;
4926 return osversion;
4927 }
4928
4929
4930 static int open_self_maps(void *cpu_env, int fd)
4931 {
4932 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4933 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4934 #endif
4935 FILE *fp;
4936 char *line = NULL;
4937 size_t len = 0;
4938 ssize_t read;
4939
4940 fp = fopen("/proc/self/maps", "r");
4941 if (fp == NULL) {
4942 return -EACCES;
4943 }
4944
4945 while ((read = getline(&line, &len, fp)) != -1) {
4946 int fields, dev_maj, dev_min, inode;
4947 uint64_t min, max, offset;
4948 char flag_r, flag_w, flag_x, flag_p;
4949 char path[512] = "";
4950 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
4951 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
4952 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
4953
4954 if ((fields < 10) || (fields > 11)) {
4955 continue;
4956 }
4957 if (!strncmp(path, "[stack]", 7)) {
4958 continue;
4959 }
4960 if (h2g_valid(min) && h2g_valid(max)) {
4961 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
4962 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
4963 h2g(min), h2g(max), flag_r, flag_w,
4964 flag_x, flag_p, offset, dev_maj, dev_min, inode,
4965 path[0] ? " " : "", path);
4966 }
4967 }
4968
4969 free(line);
4970 fclose(fp);
4971
4972 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4973 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4974 (unsigned long long)ts->info->stack_limit,
4975 (unsigned long long)(ts->info->start_stack +
4976 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
4977 (unsigned long long)0);
4978 #endif
4979
4980 return 0;
4981 }
4982
4983 static int open_self_stat(void *cpu_env, int fd)
4984 {
4985 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4986 abi_ulong start_stack = ts->info->start_stack;
4987 int i;
4988
4989 for (i = 0; i < 44; i++) {
4990 char buf[128];
4991 int len;
4992 uint64_t val = 0;
4993
4994 if (i == 0) {
4995 /* pid */
4996 val = getpid();
4997 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4998 } else if (i == 1) {
4999 /* app name */
5000 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5001 } else if (i == 27) {
5002 /* stack bottom */
5003 val = start_stack;
5004 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5005 } else {
5006 /* for the rest, there is MasterCard */
5007 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5008 }
5009
5010 len = strlen(buf);
5011 if (write(fd, buf, len) != len) {
5012 return -1;
5013 }
5014 }
5015
5016 return 0;
5017 }
5018
5019 static int open_self_auxv(void *cpu_env, int fd)
5020 {
5021 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5022 abi_ulong auxv = ts->info->saved_auxv;
5023 abi_ulong len = ts->info->auxv_len;
5024 char *ptr;
5025
5026 /*
5027 * Auxiliary vector is stored in target process stack.
5028 * read in whole auxv vector and copy it to file
5029 */
5030 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5031 if (ptr != NULL) {
5032 while (len > 0) {
5033 ssize_t r;
5034 r = write(fd, ptr, len);
5035 if (r <= 0) {
5036 break;
5037 }
5038 len -= r;
5039 ptr += r;
5040 }
5041 lseek(fd, 0, SEEK_SET);
5042 unlock_user(ptr, auxv, len);
5043 }
5044
5045 return 0;
5046 }
5047
5048 static int is_proc_myself(const char *filename, const char *entry)
5049 {
5050 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5051 filename += strlen("/proc/");
5052 if (!strncmp(filename, "self/", strlen("self/"))) {
5053 filename += strlen("self/");
5054 } else if (*filename >= '1' && *filename <= '9') {
5055 char myself[80];
5056 snprintf(myself, sizeof(myself), "%d/", getpid());
5057 if (!strncmp(filename, myself, strlen(myself))) {
5058 filename += strlen(myself);
5059 } else {
5060 return 0;
5061 }
5062 } else {
5063 return 0;
5064 }
5065 if (!strcmp(filename, entry)) {
5066 return 1;
5067 }
5068 }
5069 return 0;
5070 }
5071
5072 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5073 {
5074 struct fake_open {
5075 const char *filename;
5076 int (*fill)(void *cpu_env, int fd);
5077 };
5078 const struct fake_open *fake_open;
5079 static const struct fake_open fakes[] = {
5080 { "maps", open_self_maps },
5081 { "stat", open_self_stat },
5082 { "auxv", open_self_auxv },
5083 { NULL, NULL }
5084 };
5085
5086 for (fake_open = fakes; fake_open->filename; fake_open++) {
5087 if (is_proc_myself(pathname, fake_open->filename)) {
5088 break;
5089 }
5090 }
5091
5092 if (fake_open->filename) {
5093 const char *tmpdir;
5094 char filename[PATH_MAX];
5095 int fd, r;
5096
5097 /* create temporary file to map stat to */
5098 tmpdir = getenv("TMPDIR");
5099 if (!tmpdir)
5100 tmpdir = "/tmp";
5101 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5102 fd = mkstemp(filename);
5103 if (fd < 0) {
5104 return fd;
5105 }
5106 unlink(filename);
5107
5108 if ((r = fake_open->fill(cpu_env, fd))) {
5109 close(fd);
5110 return r;
5111 }
5112 lseek(fd, 0, SEEK_SET);
5113
5114 return fd;
5115 }
5116
5117 return get_errno(open(path(pathname), flags, mode));
5118 }
5119
5120 /* do_syscall() should always have a single exit point at the end so
5121 that actions, such as logging of syscall results, can be performed.
5122 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5123 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5124 abi_long arg2, abi_long arg3, abi_long arg4,
5125 abi_long arg5, abi_long arg6, abi_long arg7,
5126 abi_long arg8)
5127 {
5128 abi_long ret;
5129 struct stat st;
5130 struct statfs stfs;
5131 void *p;
5132
5133 #ifdef DEBUG
5134 gemu_log("syscall %d", num);
5135 #endif
5136 if(do_strace)
5137 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5138
5139 switch(num) {
5140 case TARGET_NR_exit:
5141 #ifdef CONFIG_USE_NPTL
5142 /* In old applications this may be used to implement _exit(2).
5143 However in threaded applictions it is used for thread termination,
5144 and _exit_group is used for application termination.
5145 Do thread termination if we have more then one thread. */
5146 /* FIXME: This probably breaks if a signal arrives. We should probably
5147 be disabling signals. */
5148 if (first_cpu->next_cpu) {
5149 TaskState *ts;
5150 CPUArchState **lastp;
5151 CPUArchState *p;
5152
5153 cpu_list_lock();
5154 lastp = &first_cpu;
5155 p = first_cpu;
5156 while (p && p != (CPUArchState *)cpu_env) {
5157 lastp = &p->next_cpu;
5158 p = p->next_cpu;
5159 }
5160 /* If we didn't find the CPU for this thread then something is
5161 horribly wrong. */
5162 if (!p)
5163 abort();
5164 /* Remove the CPU from the list. */
5165 *lastp = p->next_cpu;
5166 cpu_list_unlock();
5167 ts = ((CPUArchState *)cpu_env)->opaque;
5168 if (ts->child_tidptr) {
5169 put_user_u32(0, ts->child_tidptr);
5170 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5171 NULL, NULL, 0);
5172 }
5173 thread_env = NULL;
5174 object_unref(OBJECT(ENV_GET_CPU(cpu_env)));
5175 g_free(ts);
5176 pthread_exit(NULL);
5177 }
5178 #endif
5179 #ifdef TARGET_GPROF
5180 _mcleanup();
5181 #endif
5182 gdb_exit(cpu_env, arg1);
5183 _exit(arg1);
5184 ret = 0; /* avoid warning */
5185 break;
5186 case TARGET_NR_read:
5187 if (arg3 == 0)
5188 ret = 0;
5189 else {
5190 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5191 goto efault;
5192 ret = get_errno(read(arg1, p, arg3));
5193 unlock_user(p, arg2, ret);
5194 }
5195 break;
5196 case TARGET_NR_write:
5197 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5198 goto efault;
5199 ret = get_errno(write(arg1, p, arg3));
5200 unlock_user(p, arg2, 0);
5201 break;
5202 case TARGET_NR_open:
5203 if (!(p = lock_user_string(arg1)))
5204 goto efault;
5205 ret = get_errno(do_open(cpu_env, p,
5206 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5207 arg3));
5208 unlock_user(p, arg1, 0);
5209 break;
5210 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5211 case TARGET_NR_openat:
5212 if (!(p = lock_user_string(arg2)))
5213 goto efault;
5214 ret = get_errno(sys_openat(arg1,
5215 path(p),
5216 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5217 arg4));
5218 unlock_user(p, arg2, 0);
5219 break;
5220 #endif
5221 case TARGET_NR_close:
5222 ret = get_errno(close(arg1));
5223 break;
5224 case TARGET_NR_brk:
5225 ret = do_brk(arg1);
5226 break;
5227 case TARGET_NR_fork:
5228 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5229 break;
5230 #ifdef TARGET_NR_waitpid
5231 case TARGET_NR_waitpid:
5232 {
5233 int status;
5234 ret = get_errno(waitpid(arg1, &status, arg3));
5235 if (!is_error(ret) && arg2 && ret
5236 && put_user_s32(host_to_target_waitstatus(status), arg2))
5237 goto efault;
5238 }
5239 break;
5240 #endif
5241 #ifdef TARGET_NR_waitid
5242 case TARGET_NR_waitid:
5243 {
5244 siginfo_t info;
5245 info.si_pid = 0;
5246 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5247 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5248 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5249 goto efault;
5250 host_to_target_siginfo(p, &info);
5251 unlock_user(p, arg3, sizeof(target_siginfo_t));
5252 }
5253 }
5254 break;
5255 #endif
5256 #ifdef TARGET_NR_creat /* not on alpha */
5257 case TARGET_NR_creat:
5258 if (!(p = lock_user_string(arg1)))
5259 goto efault;
5260 ret = get_errno(creat(p, arg2));
5261 unlock_user(p, arg1, 0);
5262 break;
5263 #endif
5264 case TARGET_NR_link:
5265 {
5266 void * p2;
5267 p = lock_user_string(arg1);
5268 p2 = lock_user_string(arg2);
5269 if (!p || !p2)
5270 ret = -TARGET_EFAULT;
5271 else
5272 ret = get_errno(link(p, p2));
5273 unlock_user(p2, arg2, 0);
5274 unlock_user(p, arg1, 0);
5275 }
5276 break;
5277 #if defined(TARGET_NR_linkat)
5278 case TARGET_NR_linkat:
5279 {
5280 void * p2 = NULL;
5281 if (!arg2 || !arg4)
5282 goto efault;
5283 p = lock_user_string(arg2);
5284 p2 = lock_user_string(arg4);
5285 if (!p || !p2)
5286 ret = -TARGET_EFAULT;
5287 else
5288 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5289 unlock_user(p, arg2, 0);
5290 unlock_user(p2, arg4, 0);
5291 }
5292 break;
5293 #endif
5294 case TARGET_NR_unlink:
5295 if (!(p = lock_user_string(arg1)))
5296 goto efault;
5297 ret = get_errno(unlink(p));
5298 unlock_user(p, arg1, 0);
5299 break;
5300 #if defined(TARGET_NR_unlinkat)
5301 case TARGET_NR_unlinkat:
5302 if (!(p = lock_user_string(arg2)))
5303 goto efault;
5304 ret = get_errno(unlinkat(arg1, p, arg3));
5305 unlock_user(p, arg2, 0);
5306 break;
5307 #endif
5308 case TARGET_NR_execve:
5309 {
5310 char **argp, **envp;
5311 int argc, envc;
5312 abi_ulong gp;
5313 abi_ulong guest_argp;
5314 abi_ulong guest_envp;
5315 abi_ulong addr;
5316 char **q;
5317 int total_size = 0;
5318
5319 argc = 0;
5320 guest_argp = arg2;
5321 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5322 if (get_user_ual(addr, gp))
5323 goto efault;
5324 if (!addr)
5325 break;
5326 argc++;
5327 }
5328 envc = 0;
5329 guest_envp = arg3;
5330 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5331 if (get_user_ual(addr, gp))
5332 goto efault;
5333 if (!addr)
5334 break;
5335 envc++;
5336 }
5337
5338 argp = alloca((argc + 1) * sizeof(void *));
5339 envp = alloca((envc + 1) * sizeof(void *));
5340
5341 for (gp = guest_argp, q = argp; gp;
5342 gp += sizeof(abi_ulong), q++) {
5343 if (get_user_ual(addr, gp))
5344 goto execve_efault;
5345 if (!addr)
5346 break;
5347 if (!(*q = lock_user_string(addr)))
5348 goto execve_efault;
5349 total_size += strlen(*q) + 1;
5350 }
5351 *q = NULL;
5352
5353 for (gp = guest_envp, q = envp; gp;
5354 gp += sizeof(abi_ulong), q++) {
5355 if (get_user_ual(addr, gp))
5356 goto execve_efault;
5357 if (!addr)
5358 break;
5359 if (!(*q = lock_user_string(addr)))
5360 goto execve_efault;
5361 total_size += strlen(*q) + 1;
5362 }
5363 *q = NULL;
5364
5365 /* This case will not be caught by the host's execve() if its
5366 page size is bigger than the target's. */
5367 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5368 ret = -TARGET_E2BIG;
5369 goto execve_end;
5370 }
5371 if (!(p = lock_user_string(arg1)))
5372 goto execve_efault;
5373 ret = get_errno(execve(p, argp, envp));
5374 unlock_user(p, arg1, 0);
5375
5376 goto execve_end;
5377
5378 execve_efault:
5379 ret = -TARGET_EFAULT;
5380
5381 execve_end:
5382 for (gp = guest_argp, q = argp; *q;
5383 gp += sizeof(abi_ulong), q++) {
5384 if (get_user_ual(addr, gp)
5385 || !addr)
5386 break;
5387 unlock_user(*q, addr, 0);
5388 }
5389 for (gp = guest_envp, q = envp; *q;
5390 gp += sizeof(abi_ulong), q++) {
5391 if (get_user_ual(addr, gp)
5392 || !addr)
5393 break;
5394 unlock_user(*q, addr, 0);
5395 }
5396 }
5397 break;
5398 case TARGET_NR_chdir:
5399 if (!(p = lock_user_string(arg1)))
5400 goto efault;
5401 ret = get_errno(chdir(p));
5402 unlock_user(p, arg1, 0);
5403 break;
5404 #ifdef TARGET_NR_time
5405 case TARGET_NR_time:
5406 {
5407 time_t host_time;
5408 ret = get_errno(time(&host_time));
5409 if (!is_error(ret)
5410 && arg1
5411 && put_user_sal(host_time, arg1))
5412 goto efault;
5413 }
5414 break;
5415 #endif
5416 case TARGET_NR_mknod:
5417 if (!(p = lock_user_string(arg1)))
5418 goto efault;
5419 ret = get_errno(mknod(p, arg2, arg3));
5420 unlock_user(p, arg1, 0);
5421 break;
5422 #if defined(TARGET_NR_mknodat)
5423 case TARGET_NR_mknodat:
5424 if (!(p = lock_user_string(arg2)))
5425 goto efault;
5426 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5427 unlock_user(p, arg2, 0);
5428 break;
5429 #endif
5430 case TARGET_NR_chmod:
5431 if (!(p = lock_user_string(arg1)))
5432 goto efault;
5433 ret = get_errno(chmod(p, arg2));
5434 unlock_user(p, arg1, 0);
5435 break;
5436 #ifdef TARGET_NR_break
5437 case TARGET_NR_break:
5438 goto unimplemented;
5439 #endif
5440 #ifdef TARGET_NR_oldstat
5441 case TARGET_NR_oldstat:
5442 goto unimplemented;
5443 #endif
5444 case TARGET_NR_lseek:
5445 ret = get_errno(lseek(arg1, arg2, arg3));
5446 break;
5447 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5448 /* Alpha specific */
5449 case TARGET_NR_getxpid:
5450 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5451 ret = get_errno(getpid());
5452 break;
5453 #endif
5454 #ifdef TARGET_NR_getpid
5455 case TARGET_NR_getpid:
5456 ret = get_errno(getpid());
5457 break;
5458 #endif
5459 case TARGET_NR_mount:
5460 {
5461 /* need to look at the data field */
5462 void *p2, *p3;
5463 p = lock_user_string(arg1);
5464 p2 = lock_user_string(arg2);
5465 p3 = lock_user_string(arg3);
5466 if (!p || !p2 || !p3)
5467 ret = -TARGET_EFAULT;
5468 else {
5469 /* FIXME - arg5 should be locked, but it isn't clear how to
5470 * do that since it's not guaranteed to be a NULL-terminated
5471 * string.
5472 */
5473 if ( ! arg5 )
5474 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5475 else
5476 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5477 }
5478 unlock_user(p, arg1, 0);
5479 unlock_user(p2, arg2, 0);
5480 unlock_user(p3, arg3, 0);
5481 break;
5482 }
5483 #ifdef TARGET_NR_umount
5484 case TARGET_NR_umount:
5485 if (!(p = lock_user_string(arg1)))
5486 goto efault;
5487 ret = get_errno(umount(p));
5488 unlock_user(p, arg1, 0);
5489 break;
5490 #endif
5491 #ifdef TARGET_NR_stime /* not on alpha */
5492 case TARGET_NR_stime:
5493 {
5494 time_t host_time;
5495 if (get_user_sal(host_time, arg1))
5496 goto efault;
5497 ret = get_errno(stime(&host_time));
5498 }
5499 break;
5500 #endif
5501 case TARGET_NR_ptrace:
5502 goto unimplemented;
5503 #ifdef TARGET_NR_alarm /* not on alpha */
5504 case TARGET_NR_alarm:
5505 ret = alarm(arg1);
5506 break;
5507 #endif
5508 #ifdef TARGET_NR_oldfstat
5509 case TARGET_NR_oldfstat:
5510 goto unimplemented;
5511 #endif
5512 #ifdef TARGET_NR_pause /* not on alpha */
5513 case TARGET_NR_pause:
5514 ret = get_errno(pause());
5515 break;
5516 #endif
5517 #ifdef TARGET_NR_utime
5518 case TARGET_NR_utime:
5519 {
5520 struct utimbuf tbuf, *host_tbuf;
5521 struct target_utimbuf *target_tbuf;
5522 if (arg2) {
5523 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5524 goto efault;
5525 tbuf.actime = tswapal(target_tbuf->actime);
5526 tbuf.modtime = tswapal(target_tbuf->modtime);
5527 unlock_user_struct(target_tbuf, arg2, 0);
5528 host_tbuf = &tbuf;
5529 } else {
5530 host_tbuf = NULL;
5531 }
5532 if (!(p = lock_user_string(arg1)))
5533 goto efault;
5534 ret = get_errno(utime(p, host_tbuf));
5535 unlock_user(p, arg1, 0);
5536 }
5537 break;
5538 #endif
5539 case TARGET_NR_utimes:
5540 {
5541 struct timeval *tvp, tv[2];
5542 if (arg2) {
5543 if (copy_from_user_timeval(&tv[0], arg2)
5544 || copy_from_user_timeval(&tv[1],
5545 arg2 + sizeof(struct target_timeval)))
5546 goto efault;
5547 tvp = tv;
5548 } else {
5549 tvp = NULL;
5550 }
5551 if (!(p = lock_user_string(arg1)))
5552 goto efault;
5553 ret = get_errno(utimes(p, tvp));
5554 unlock_user(p, arg1, 0);
5555 }
5556 break;
5557 #if defined(TARGET_NR_futimesat)
5558 case TARGET_NR_futimesat:
5559 {
5560 struct timeval *tvp, tv[2];
5561 if (arg3) {
5562 if (copy_from_user_timeval(&tv[0], arg3)
5563 || copy_from_user_timeval(&tv[1],
5564 arg3 + sizeof(struct target_timeval)))
5565 goto efault;
5566 tvp = tv;
5567 } else {
5568 tvp = NULL;
5569 }
5570 if (!(p = lock_user_string(arg2)))
5571 goto efault;
5572 ret = get_errno(futimesat(arg1, path(p), tvp));
5573 unlock_user(p, arg2, 0);
5574 }
5575 break;
5576 #endif
5577 #ifdef TARGET_NR_stty
5578 case TARGET_NR_stty:
5579 goto unimplemented;
5580 #endif
5581 #ifdef TARGET_NR_gtty
5582 case TARGET_NR_gtty:
5583 goto unimplemented;
5584 #endif
5585 case TARGET_NR_access:
5586 if (!(p = lock_user_string(arg1)))
5587 goto efault;
5588 ret = get_errno(access(path(p), arg2));
5589 unlock_user(p, arg1, 0);
5590 break;
5591 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5592 case TARGET_NR_faccessat:
5593 if (!(p = lock_user_string(arg2)))
5594 goto efault;
5595 ret = get_errno(faccessat(arg1, p, arg3, 0));
5596 unlock_user(p, arg2, 0);
5597 break;
5598 #endif
5599 #ifdef TARGET_NR_nice /* not on alpha */
5600 case TARGET_NR_nice:
5601 ret = get_errno(nice(arg1));
5602 break;
5603 #endif
5604 #ifdef TARGET_NR_ftime
5605 case TARGET_NR_ftime:
5606 goto unimplemented;
5607 #endif
5608 case TARGET_NR_sync:
5609 sync();
5610 ret = 0;
5611 break;
5612 case TARGET_NR_kill:
5613 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5614 break;
5615 case TARGET_NR_rename:
5616 {
5617 void *p2;
5618 p = lock_user_string(arg1);
5619 p2 = lock_user_string(arg2);
5620 if (!p || !p2)
5621 ret = -TARGET_EFAULT;
5622 else
5623 ret = get_errno(rename(p, p2));
5624 unlock_user(p2, arg2, 0);
5625 unlock_user(p, arg1, 0);
5626 }
5627 break;
5628 #if defined(TARGET_NR_renameat)
5629 case TARGET_NR_renameat:
5630 {
5631 void *p2;
5632 p = lock_user_string(arg2);
5633 p2 = lock_user_string(arg4);
5634 if (!p || !p2)
5635 ret = -TARGET_EFAULT;
5636 else
5637 ret = get_errno(renameat(arg1, p, arg3, p2));
5638 unlock_user(p2, arg4, 0);
5639 unlock_user(p, arg2, 0);
5640 }
5641 break;
5642 #endif
5643 case TARGET_NR_mkdir:
5644 if (!(p = lock_user_string(arg1)))
5645 goto efault;
5646 ret = get_errno(mkdir(p, arg2));
5647 unlock_user(p, arg1, 0);
5648 break;
5649 #if defined(TARGET_NR_mkdirat)
5650 case TARGET_NR_mkdirat:
5651 if (!(p = lock_user_string(arg2)))
5652 goto efault;
5653 ret = get_errno(mkdirat(arg1, p, arg3));
5654 unlock_user(p, arg2, 0);
5655 break;
5656 #endif
5657 case TARGET_NR_rmdir:
5658 if (!(p = lock_user_string(arg1)))
5659 goto efault;
5660 ret = get_errno(rmdir(p));
5661 unlock_user(p, arg1, 0);
5662 break;
5663 case TARGET_NR_dup:
5664 ret = get_errno(dup(arg1));
5665 break;
5666 case TARGET_NR_pipe:
5667 ret = do_pipe(cpu_env, arg1, 0, 0);
5668 break;
5669 #ifdef TARGET_NR_pipe2
5670 case TARGET_NR_pipe2:
5671 ret = do_pipe(cpu_env, arg1,
5672 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5673 break;
5674 #endif
5675 case TARGET_NR_times:
5676 {
5677 struct target_tms *tmsp;
5678 struct tms tms;
5679 ret = get_errno(times(&tms));
5680 if (arg1) {
5681 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5682 if (!tmsp)
5683 goto efault;
5684 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5685 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5686 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5687 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5688 }
5689 if (!is_error(ret))
5690 ret = host_to_target_clock_t(ret);
5691 }
5692 break;
5693 #ifdef TARGET_NR_prof
5694 case TARGET_NR_prof:
5695 goto unimplemented;
5696 #endif
5697 #ifdef TARGET_NR_signal
5698 case TARGET_NR_signal:
5699 goto unimplemented;
5700 #endif
5701 case TARGET_NR_acct:
5702 if (arg1 == 0) {
5703 ret = get_errno(acct(NULL));
5704 } else {
5705 if (!(p = lock_user_string(arg1)))
5706 goto efault;
5707 ret = get_errno(acct(path(p)));
5708 unlock_user(p, arg1, 0);
5709 }
5710 break;
5711 #ifdef TARGET_NR_umount2 /* not on alpha */
5712 case TARGET_NR_umount2:
5713 if (!(p = lock_user_string(arg1)))
5714 goto efault;
5715 ret = get_errno(umount2(p, arg2));
5716 unlock_user(p, arg1, 0);
5717 break;
5718 #endif
5719 #ifdef TARGET_NR_lock
5720 case TARGET_NR_lock:
5721 goto unimplemented;
5722 #endif
5723 case TARGET_NR_ioctl:
5724 ret = do_ioctl(arg1, arg2, arg3);
5725 break;
5726 case TARGET_NR_fcntl:
5727 ret = do_fcntl(arg1, arg2, arg3);
5728 break;
5729 #ifdef TARGET_NR_mpx
5730 case TARGET_NR_mpx:
5731 goto unimplemented;
5732 #endif
5733 case TARGET_NR_setpgid:
5734 ret = get_errno(setpgid(arg1, arg2));
5735 break;
5736 #ifdef TARGET_NR_ulimit
5737 case TARGET_NR_ulimit:
5738 goto unimplemented;
5739 #endif
5740 #ifdef TARGET_NR_oldolduname
5741 case TARGET_NR_oldolduname:
5742 goto unimplemented;
5743 #endif
5744 case TARGET_NR_umask:
5745 ret = get_errno(umask(arg1));
5746 break;
5747 case TARGET_NR_chroot:
5748 if (!(p = lock_user_string(arg1)))
5749 goto efault;
5750 ret = get_errno(chroot(p));
5751 unlock_user(p, arg1, 0);
5752 break;
5753 case TARGET_NR_ustat:
5754 goto unimplemented;
5755 case TARGET_NR_dup2:
5756 ret = get_errno(dup2(arg1, arg2));
5757 break;
5758 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5759 case TARGET_NR_dup3:
5760 ret = get_errno(dup3(arg1, arg2, arg3));
5761 break;
5762 #endif
5763 #ifdef TARGET_NR_getppid /* not on alpha */
5764 case TARGET_NR_getppid:
5765 ret = get_errno(getppid());
5766 break;
5767 #endif
5768 case TARGET_NR_getpgrp:
5769 ret = get_errno(getpgrp());
5770 break;
5771 case TARGET_NR_setsid:
5772 ret = get_errno(setsid());
5773 break;
5774 #ifdef TARGET_NR_sigaction
5775 case TARGET_NR_sigaction:
5776 {
5777 #if defined(TARGET_ALPHA)
5778 struct target_sigaction act, oact, *pact = 0;
5779 struct target_old_sigaction *old_act;
5780 if (arg2) {
5781 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5782 goto efault;
5783 act._sa_handler = old_act->_sa_handler;
5784 target_siginitset(&act.sa_mask, old_act->sa_mask);
5785 act.sa_flags = old_act->sa_flags;
5786 act.sa_restorer = 0;
5787 unlock_user_struct(old_act, arg2, 0);
5788 pact = &act;
5789 }
5790 ret = get_errno(do_sigaction(arg1, pact, &oact));
5791 if (!is_error(ret) && arg3) {
5792 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5793 goto efault;
5794 old_act->_sa_handler = oact._sa_handler;
5795 old_act->sa_mask = oact.sa_mask.sig[0];
5796 old_act->sa_flags = oact.sa_flags;
5797 unlock_user_struct(old_act, arg3, 1);
5798 }
5799 #elif defined(TARGET_MIPS)
5800 struct target_sigaction act, oact, *pact, *old_act;
5801
5802 if (arg2) {
5803 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5804 goto efault;
5805 act._sa_handler = old_act->_sa_handler;
5806 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5807 act.sa_flags = old_act->sa_flags;
5808 unlock_user_struct(old_act, arg2, 0);
5809 pact = &act;
5810 } else {
5811 pact = NULL;
5812 }
5813
5814 ret = get_errno(do_sigaction(arg1, pact, &oact));
5815
5816 if (!is_error(ret) && arg3) {
5817 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5818 goto efault;
5819 old_act->_sa_handler = oact._sa_handler;
5820 old_act->sa_flags = oact.sa_flags;
5821 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5822 old_act->sa_mask.sig[1] = 0;
5823 old_act->sa_mask.sig[2] = 0;
5824 old_act->sa_mask.sig[3] = 0;
5825 unlock_user_struct(old_act, arg3, 1);
5826 }
5827 #else
5828 struct target_old_sigaction *old_act;
5829 struct target_sigaction act, oact, *pact;
5830 if (arg2) {
5831 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5832 goto efault;
5833 act._sa_handler = old_act->_sa_handler;
5834 target_siginitset(&act.sa_mask, old_act->sa_mask);
5835 act.sa_flags = old_act->sa_flags;
5836 act.sa_restorer = old_act->sa_restorer;
5837 unlock_user_struct(old_act, arg2, 0);
5838 pact = &act;
5839 } else {
5840 pact = NULL;
5841 }
5842 ret = get_errno(do_sigaction(arg1, pact, &oact));
5843 if (!is_error(ret) && arg3) {
5844 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5845 goto efault;
5846 old_act->_sa_handler = oact._sa_handler;
5847 old_act->sa_mask = oact.sa_mask.sig[0];
5848 old_act->sa_flags = oact.sa_flags;
5849 old_act->sa_restorer = oact.sa_restorer;
5850 unlock_user_struct(old_act, arg3, 1);
5851 }
5852 #endif
5853 }
5854 break;
5855 #endif
5856 case TARGET_NR_rt_sigaction:
5857 {
5858 #if defined(TARGET_ALPHA)
5859 struct target_sigaction act, oact, *pact = 0;
5860 struct target_rt_sigaction *rt_act;
5861 /* ??? arg4 == sizeof(sigset_t). */
5862 if (arg2) {
5863 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5864 goto efault;
5865 act._sa_handler = rt_act->_sa_handler;
5866 act.sa_mask = rt_act->sa_mask;
5867 act.sa_flags = rt_act->sa_flags;
5868 act.sa_restorer = arg5;
5869 unlock_user_struct(rt_act, arg2, 0);
5870 pact = &act;
5871 }
5872 ret = get_errno(do_sigaction(arg1, pact, &oact));
5873 if (!is_error(ret) && arg3) {
5874 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5875 goto efault;
5876 rt_act->_sa_handler = oact._sa_handler;
5877 rt_act->sa_mask = oact.sa_mask;
5878 rt_act->sa_flags = oact.sa_flags;
5879 unlock_user_struct(rt_act, arg3, 1);
5880 }
5881 #else
5882 struct target_sigaction *act;
5883 struct target_sigaction *oact;
5884
5885 if (arg2) {
5886 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5887 goto efault;
5888 } else
5889 act = NULL;
5890 if (arg3) {
5891 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5892 ret = -TARGET_EFAULT;
5893 goto rt_sigaction_fail;
5894 }
5895 } else
5896 oact = NULL;
5897 ret = get_errno(do_sigaction(arg1, act, oact));
5898 rt_sigaction_fail:
5899 if (act)
5900 unlock_user_struct(act, arg2, 0);
5901 if (oact)
5902 unlock_user_struct(oact, arg3, 1);
5903 #endif
5904 }
5905 break;
5906 #ifdef TARGET_NR_sgetmask /* not on alpha */
5907 case TARGET_NR_sgetmask:
5908 {
5909 sigset_t cur_set;
5910 abi_ulong target_set;
5911 sigprocmask(0, NULL, &cur_set);
5912 host_to_target_old_sigset(&target_set, &cur_set);
5913 ret = target_set;
5914 }
5915 break;
5916 #endif
5917 #ifdef TARGET_NR_ssetmask /* not on alpha */
5918 case TARGET_NR_ssetmask:
5919 {
5920 sigset_t set, oset, cur_set;
5921 abi_ulong target_set = arg1;
5922 sigprocmask(0, NULL, &cur_set);
5923 target_to_host_old_sigset(&set, &target_set);
5924 sigorset(&set, &set, &cur_set);
5925 sigprocmask(SIG_SETMASK, &set, &oset);
5926 host_to_target_old_sigset(&target_set, &oset);
5927 ret = target_set;
5928 }
5929 break;
5930 #endif
5931 #ifdef TARGET_NR_sigprocmask
5932 case TARGET_NR_sigprocmask:
5933 {
5934 #if defined(TARGET_ALPHA)
5935 sigset_t set, oldset;
5936 abi_ulong mask;
5937 int how;
5938
5939 switch (arg1) {
5940 case TARGET_SIG_BLOCK:
5941 how = SIG_BLOCK;
5942 break;
5943 case TARGET_SIG_UNBLOCK:
5944 how = SIG_UNBLOCK;
5945 break;
5946 case TARGET_SIG_SETMASK:
5947 how = SIG_SETMASK;
5948 break;
5949 default:
5950 ret = -TARGET_EINVAL;
5951 goto fail;
5952 }
5953 mask = arg2;
5954 target_to_host_old_sigset(&set, &mask);
5955
5956 ret = get_errno(sigprocmask(how, &set, &oldset));
5957 if (!is_error(ret)) {
5958 host_to_target_old_sigset(&mask, &oldset);
5959 ret = mask;
5960 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
5961 }
5962 #else
5963 sigset_t set, oldset, *set_ptr;
5964 int how;
5965
5966 if (arg2) {
5967 switch (arg1) {
5968 case TARGET_SIG_BLOCK:
5969 how = SIG_BLOCK;
5970 break;
5971 case TARGET_SIG_UNBLOCK:
5972 how = SIG_UNBLOCK;
5973 break;
5974 case TARGET_SIG_SETMASK:
5975 how = SIG_SETMASK;
5976 break;
5977 default:
5978 ret = -TARGET_EINVAL;
5979 goto fail;
5980 }
5981 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5982 goto efault;
5983 target_to_host_old_sigset(&set, p);
5984 unlock_user(p, arg2, 0);
5985 set_ptr = &set;
5986 } else {
5987 how = 0;
5988 set_ptr = NULL;
5989 }
5990 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5991 if (!is_error(ret) && arg3) {
5992 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5993 goto efault;
5994 host_to_target_old_sigset(p, &oldset);
5995 unlock_user(p, arg3, sizeof(target_sigset_t));
5996 }
5997 #endif
5998 }
5999 break;
6000 #endif
6001 case TARGET_NR_rt_sigprocmask:
6002 {
6003 int how = arg1;
6004 sigset_t set, oldset, *set_ptr;
6005
6006 if (arg2) {
6007 switch(how) {
6008 case TARGET_SIG_BLOCK:
6009 how = SIG_BLOCK;
6010 break;
6011 case TARGET_SIG_UNBLOCK:
6012 how = SIG_UNBLOCK;
6013 break;
6014 case TARGET_SIG_SETMASK:
6015 how = SIG_SETMASK;
6016 break;
6017 default:
6018 ret = -TARGET_EINVAL;
6019 goto fail;
6020 }
6021 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6022 goto efault;
6023 target_to_host_sigset(&set, p);
6024 unlock_user(p, arg2, 0);
6025 set_ptr = &set;
6026 } else {
6027 how = 0;
6028 set_ptr = NULL;
6029 }
6030 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6031 if (!is_error(ret) && arg3) {
6032 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6033 goto efault;
6034 host_to_target_sigset(p, &oldset);
6035 unlock_user(p, arg3, sizeof(target_sigset_t));
6036 }
6037 }
6038 break;
6039 #ifdef TARGET_NR_sigpending
6040 case TARGET_NR_sigpending:
6041 {
6042 sigset_t set;
6043 ret = get_errno(sigpending(&set));
6044 if (!is_error(ret)) {
6045 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6046 goto efault;
6047 host_to_target_old_sigset(p, &set);
6048 unlock_user(p, arg1, sizeof(target_sigset_t));
6049 }
6050 }
6051 break;
6052 #endif
6053 case TARGET_NR_rt_sigpending:
6054 {
6055 sigset_t set;
6056 ret = get_errno(sigpending(&set));
6057 if (!is_error(ret)) {
6058 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6059 goto efault;
6060 host_to_target_sigset(p, &set);
6061 unlock_user(p, arg1, sizeof(target_sigset_t));
6062 }
6063 }
6064 break;
6065 #ifdef TARGET_NR_sigsuspend
6066 case TARGET_NR_sigsuspend:
6067 {
6068 sigset_t set;
6069 #if defined(TARGET_ALPHA)
6070 abi_ulong mask = arg1;
6071 target_to_host_old_sigset(&set, &mask);
6072 #else
6073 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6074 goto efault;
6075 target_to_host_old_sigset(&set, p);
6076 unlock_user(p, arg1, 0);
6077 #endif
6078 ret = get_errno(sigsuspend(&set));
6079 }
6080 break;
6081 #endif
6082 case TARGET_NR_rt_sigsuspend:
6083 {
6084 sigset_t set;
6085 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6086 goto efault;
6087 target_to_host_sigset(&set, p);
6088 unlock_user(p, arg1, 0);
6089 ret = get_errno(sigsuspend(&set));
6090 }
6091 break;
6092 case TARGET_NR_rt_sigtimedwait:
6093 {
6094 sigset_t set;
6095 struct timespec uts, *puts;
6096 siginfo_t uinfo;
6097
6098 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6099 goto efault;
6100 target_to_host_sigset(&set, p);
6101 unlock_user(p, arg1, 0);
6102 if (arg3) {
6103 puts = &uts;
6104 target_to_host_timespec(puts, arg3);
6105 } else {
6106 puts = NULL;
6107 }
6108 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6109 if (!is_error(ret) && arg2) {
6110 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6111 goto efault;
6112 host_to_target_siginfo(p, &uinfo);
6113 unlock_user(p, arg2, sizeof(target_siginfo_t));
6114 }
6115 }
6116 break;
6117 case TARGET_NR_rt_sigqueueinfo:
6118 {
6119 siginfo_t uinfo;
6120 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6121 goto efault;
6122 target_to_host_siginfo(&uinfo, p);
6123 unlock_user(p, arg1, 0);
6124 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6125 }
6126 break;
6127 #ifdef TARGET_NR_sigreturn
6128 case TARGET_NR_sigreturn:
6129 /* NOTE: ret is eax, so not transcoding must be done */
6130 ret = do_sigreturn(cpu_env);
6131 break;
6132 #endif
6133 case TARGET_NR_rt_sigreturn:
6134 /* NOTE: ret is eax, so not transcoding must be done */
6135 ret = do_rt_sigreturn(cpu_env);
6136 break;
6137 case TARGET_NR_sethostname:
6138 if (!(p = lock_user_string(arg1)))
6139 goto efault;
6140 ret = get_errno(sethostname(p, arg2));
6141 unlock_user(p, arg1, 0);
6142 break;
6143 case TARGET_NR_setrlimit:
6144 {
6145 int resource = target_to_host_resource(arg1);
6146 struct target_rlimit *target_rlim;
6147 struct rlimit rlim;
6148 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6149 goto efault;
6150 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6151 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6152 unlock_user_struct(target_rlim, arg2, 0);
6153 ret = get_errno(setrlimit(resource, &rlim));
6154 }
6155 break;
6156 case TARGET_NR_getrlimit:
6157 {
6158 int resource = target_to_host_resource(arg1);
6159 struct target_rlimit *target_rlim;
6160 struct rlimit rlim;
6161
6162 ret = get_errno(getrlimit(resource, &rlim));
6163 if (!is_error(ret)) {
6164 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6165 goto efault;
6166 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6167 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6168 unlock_user_struct(target_rlim, arg2, 1);
6169 }
6170 }
6171 break;
6172 case TARGET_NR_getrusage:
6173 {
6174 struct rusage rusage;
6175 ret = get_errno(getrusage(arg1, &rusage));
6176 if (!is_error(ret)) {
6177 host_to_target_rusage(arg2, &rusage);
6178 }
6179 }
6180 break;
6181 case TARGET_NR_gettimeofday:
6182 {
6183 struct timeval tv;
6184 ret = get_errno(gettimeofday(&tv, NULL));
6185 if (!is_error(ret)) {
6186 if (copy_to_user_timeval(arg1, &tv))
6187 goto efault;
6188 }
6189 }
6190 break;
6191 case TARGET_NR_settimeofday:
6192 {
6193 struct timeval tv;
6194 if (copy_from_user_timeval(&tv, arg1))
6195 goto efault;
6196 ret = get_errno(settimeofday(&tv, NULL));
6197 }
6198 break;
6199 #if defined(TARGET_NR_select)
6200 case TARGET_NR_select:
6201 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6202 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6203 #else
6204 {
6205 struct target_sel_arg_struct *sel;
6206 abi_ulong inp, outp, exp, tvp;
6207 long nsel;
6208
6209 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6210 goto efault;
6211 nsel = tswapal(sel->n);
6212 inp = tswapal(sel->inp);
6213 outp = tswapal(sel->outp);
6214 exp = tswapal(sel->exp);
6215 tvp = tswapal(sel->tvp);
6216 unlock_user_struct(sel, arg1, 0);
6217 ret = do_select(nsel, inp, outp, exp, tvp);
6218 }
6219 #endif
6220 break;
6221 #endif
6222 #ifdef TARGET_NR_pselect6
6223 case TARGET_NR_pselect6:
6224 {
6225 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6226 fd_set rfds, wfds, efds;
6227 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6228 struct timespec ts, *ts_ptr;
6229
6230 /*
6231 * The 6th arg is actually two args smashed together,
6232 * so we cannot use the C library.
6233 */
6234 sigset_t set;
6235 struct {
6236 sigset_t *set;
6237 size_t size;
6238 } sig, *sig_ptr;
6239
6240 abi_ulong arg_sigset, arg_sigsize, *arg7;
6241 target_sigset_t *target_sigset;
6242
6243 n = arg1;
6244 rfd_addr = arg2;
6245 wfd_addr = arg3;
6246 efd_addr = arg4;
6247 ts_addr = arg5;
6248
6249 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6250 if (ret) {
6251 goto fail;
6252 }
6253 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6254 if (ret) {
6255 goto fail;
6256 }
6257 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6258 if (ret) {
6259 goto fail;
6260 }
6261
6262 /*
6263 * This takes a timespec, and not a timeval, so we cannot
6264 * use the do_select() helper ...
6265 */
6266 if (ts_addr) {
6267 if (target_to_host_timespec(&ts, ts_addr)) {
6268 goto efault;
6269 }
6270 ts_ptr = &ts;
6271 } else {
6272 ts_ptr = NULL;
6273 }
6274
6275 /* Extract the two packed args for the sigset */
6276 if (arg6) {
6277 sig_ptr = &sig;
6278 sig.size = _NSIG / 8;
6279
6280 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6281 if (!arg7) {
6282 goto efault;
6283 }
6284 arg_sigset = tswapal(arg7[0]);
6285 arg_sigsize = tswapal(arg7[1]);
6286 unlock_user(arg7, arg6, 0);
6287
6288 if (arg_sigset) {
6289 sig.set = &set;
6290 if (arg_sigsize != sizeof(*target_sigset)) {
6291 /* Like the kernel, we enforce correct size sigsets */
6292 ret = -TARGET_EINVAL;
6293 goto fail;
6294 }
6295 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6296 sizeof(*target_sigset), 1);
6297 if (!target_sigset) {
6298 goto efault;
6299 }
6300 target_to_host_sigset(&set, target_sigset);
6301 unlock_user(target_sigset, arg_sigset, 0);
6302 } else {
6303 sig.set = NULL;
6304 }
6305 } else {
6306 sig_ptr = NULL;
6307 }
6308
6309 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6310 ts_ptr, sig_ptr));
6311
6312 if (!is_error(ret)) {
6313 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6314 goto efault;
6315 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6316 goto efault;
6317 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6318 goto efault;
6319
6320 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6321 goto efault;
6322 }
6323 }
6324 break;
6325 #endif
6326 case TARGET_NR_symlink:
6327 {
6328 void *p2;
6329 p = lock_user_string(arg1);
6330 p2 = lock_user_string(arg2);
6331 if (!p || !p2)
6332 ret = -TARGET_EFAULT;
6333 else
6334 ret = get_errno(symlink(p, p2));
6335 unlock_user(p2, arg2, 0);
6336 unlock_user(p, arg1, 0);
6337 }
6338 break;
6339 #if defined(TARGET_NR_symlinkat)
6340 case TARGET_NR_symlinkat:
6341 {
6342 void *p2;
6343 p = lock_user_string(arg1);
6344 p2 = lock_user_string(arg3);
6345 if (!p || !p2)
6346 ret = -TARGET_EFAULT;
6347 else
6348 ret = get_errno(symlinkat(p, arg2, p2));
6349 unlock_user(p2, arg3, 0);
6350 unlock_user(p, arg1, 0);
6351 }
6352 break;
6353 #endif
6354 #ifdef TARGET_NR_oldlstat
6355 case TARGET_NR_oldlstat:
6356 goto unimplemented;
6357 #endif
6358 case TARGET_NR_readlink:
6359 {
6360 void *p2;
6361 p = lock_user_string(arg1);
6362 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6363 if (!p || !p2) {
6364 ret = -TARGET_EFAULT;
6365 } else if (is_proc_myself((const char *)p, "exe")) {
6366 char real[PATH_MAX], *temp;
6367 temp = realpath(exec_path, real);
6368 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6369 snprintf((char *)p2, arg3, "%s", real);
6370 } else {
6371 ret = get_errno(readlink(path(p), p2, arg3));
6372 }
6373 unlock_user(p2, arg2, ret);
6374 unlock_user(p, arg1, 0);
6375 }
6376 break;
6377 #if defined(TARGET_NR_readlinkat)
6378 case TARGET_NR_readlinkat:
6379 {
6380 void *p2;
6381 p = lock_user_string(arg2);
6382 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6383 if (!p || !p2) {
6384 ret = -TARGET_EFAULT;
6385 } else if (is_proc_myself((const char *)p, "exe")) {
6386 char real[PATH_MAX], *temp;
6387 temp = realpath(exec_path, real);
6388 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6389 snprintf((char *)p2, arg4, "%s", real);
6390 } else {
6391 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6392 }
6393 unlock_user(p2, arg3, ret);
6394 unlock_user(p, arg2, 0);
6395 }
6396 break;
6397 #endif
6398 #ifdef TARGET_NR_uselib
6399 case TARGET_NR_uselib:
6400 goto unimplemented;
6401 #endif
6402 #ifdef TARGET_NR_swapon
6403 case TARGET_NR_swapon:
6404 if (!(p = lock_user_string(arg1)))
6405 goto efault;
6406 ret = get_errno(swapon(p, arg2));
6407 unlock_user(p, arg1, 0);
6408 break;
6409 #endif
6410 case TARGET_NR_reboot:
6411 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6412 /* arg4 must be ignored in all other cases */
6413 p = lock_user_string(arg4);
6414 if (!p) {
6415 goto efault;
6416 }
6417 ret = get_errno(reboot(arg1, arg2, arg3, p));
6418 unlock_user(p, arg4, 0);
6419 } else {
6420 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6421 }
6422 break;
6423 #ifdef TARGET_NR_readdir
6424 case TARGET_NR_readdir:
6425 goto unimplemented;
6426 #endif
6427 #ifdef TARGET_NR_mmap
6428 case TARGET_NR_mmap:
6429 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6430 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6431 || defined(TARGET_S390X)
6432 {
6433 abi_ulong *v;
6434 abi_ulong v1, v2, v3, v4, v5, v6;
6435 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6436 goto efault;
6437 v1 = tswapal(v[0]);
6438 v2 = tswapal(v[1]);
6439 v3 = tswapal(v[2]);
6440 v4 = tswapal(v[3]);
6441 v5 = tswapal(v[4]);
6442 v6 = tswapal(v[5]);
6443 unlock_user(v, arg1, 0);
6444 ret = get_errno(target_mmap(v1, v2, v3,
6445 target_to_host_bitmask(v4, mmap_flags_tbl),
6446 v5, v6));
6447 }
6448 #else
6449 ret = get_errno(target_mmap(arg1, arg2, arg3,
6450 target_to_host_bitmask(arg4, mmap_flags_tbl),
6451 arg5,
6452 arg6));
6453 #endif
6454 break;
6455 #endif
6456 #ifdef TARGET_NR_mmap2
6457 case TARGET_NR_mmap2:
6458 #ifndef MMAP_SHIFT
6459 #define MMAP_SHIFT 12
6460 #endif
6461 ret = get_errno(target_mmap(arg1, arg2, arg3,
6462 target_to_host_bitmask(arg4, mmap_flags_tbl),
6463 arg5,
6464 arg6 << MMAP_SHIFT));
6465 break;
6466 #endif
6467 case TARGET_NR_munmap:
6468 ret = get_errno(target_munmap(arg1, arg2));
6469 break;
6470 case TARGET_NR_mprotect:
6471 {
6472 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6473 /* Special hack to detect libc making the stack executable. */
6474 if ((arg3 & PROT_GROWSDOWN)
6475 && arg1 >= ts->info->stack_limit
6476 && arg1 <= ts->info->start_stack) {
6477 arg3 &= ~PROT_GROWSDOWN;
6478 arg2 = arg2 + arg1 - ts->info->stack_limit;
6479 arg1 = ts->info->stack_limit;
6480 }
6481 }
6482 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6483 break;
6484 #ifdef TARGET_NR_mremap
6485 case TARGET_NR_mremap:
6486 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6487 break;
6488 #endif
6489 /* ??? msync/mlock/munlock are broken for softmmu. */
6490 #ifdef TARGET_NR_msync
6491 case TARGET_NR_msync:
6492 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6493 break;
6494 #endif
6495 #ifdef TARGET_NR_mlock
6496 case TARGET_NR_mlock:
6497 ret = get_errno(mlock(g2h(arg1), arg2));
6498 break;
6499 #endif
6500 #ifdef TARGET_NR_munlock
6501 case TARGET_NR_munlock:
6502 ret = get_errno(munlock(g2h(arg1), arg2));
6503 break;
6504 #endif
6505 #ifdef TARGET_NR_mlockall
6506 case TARGET_NR_mlockall:
6507 ret = get_errno(mlockall(arg1));
6508 break;
6509 #endif
6510 #ifdef TARGET_NR_munlockall
6511 case TARGET_NR_munlockall:
6512 ret = get_errno(munlockall());
6513 break;
6514 #endif
6515 case TARGET_NR_truncate:
6516 if (!(p = lock_user_string(arg1)))
6517 goto efault;
6518 ret = get_errno(truncate(p, arg2));
6519 unlock_user(p, arg1, 0);
6520 break;
6521 case TARGET_NR_ftruncate:
6522 ret = get_errno(ftruncate(arg1, arg2));
6523 break;
6524 case TARGET_NR_fchmod:
6525 ret = get_errno(fchmod(arg1, arg2));
6526 break;
6527 #if defined(TARGET_NR_fchmodat)
6528 case TARGET_NR_fchmodat:
6529 if (!(p = lock_user_string(arg2)))
6530 goto efault;
6531 ret = get_errno(fchmodat(arg1, p, arg3, 0));
6532 unlock_user(p, arg2, 0);
6533 break;
6534 #endif
6535 case TARGET_NR_getpriority:
6536 /* Note that negative values are valid for getpriority, so we must
6537 differentiate based on errno settings. */
6538 errno = 0;
6539 ret = getpriority(arg1, arg2);
6540 if (ret == -1 && errno != 0) {
6541 ret = -host_to_target_errno(errno);
6542 break;
6543 }
6544 #ifdef TARGET_ALPHA
6545 /* Return value is the unbiased priority. Signal no error. */
6546 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6547 #else
6548 /* Return value is a biased priority to avoid negative numbers. */
6549 ret = 20 - ret;
6550 #endif
6551 break;
6552 case TARGET_NR_setpriority:
6553 ret = get_errno(setpriority(arg1, arg2, arg3));
6554 break;
6555 #ifdef TARGET_NR_profil
6556 case TARGET_NR_profil:
6557 goto unimplemented;
6558 #endif
6559 case TARGET_NR_statfs:
6560 if (!(p = lock_user_string(arg1)))
6561 goto efault;
6562 ret = get_errno(statfs(path(p), &stfs));
6563 unlock_user(p, arg1, 0);
6564 convert_statfs:
6565 if (!is_error(ret)) {
6566 struct target_statfs *target_stfs;
6567
6568 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6569 goto efault;
6570 __put_user(stfs.f_type, &target_stfs->f_type);
6571 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6572 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6573 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6574 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6575 __put_user(stfs.f_files, &target_stfs->f_files);
6576 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6577 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6578 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6579 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6580 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6581 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6582 unlock_user_struct(target_stfs, arg2, 1);
6583 }
6584 break;
6585 case TARGET_NR_fstatfs:
6586 ret = get_errno(fstatfs(arg1, &stfs));
6587 goto convert_statfs;
6588 #ifdef TARGET_NR_statfs64
6589 case TARGET_NR_statfs64:
6590 if (!(p = lock_user_string(arg1)))
6591 goto efault;
6592 ret = get_errno(statfs(path(p), &stfs));
6593 unlock_user(p, arg1, 0);
6594 convert_statfs64:
6595 if (!is_error(ret)) {
6596 struct target_statfs64 *target_stfs;
6597
6598 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6599 goto efault;
6600 __put_user(stfs.f_type, &target_stfs->f_type);
6601 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6602 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6603 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6604 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6605 __put_user(stfs.f_files, &target_stfs->f_files);
6606 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6607 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6608 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6609 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6610 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6611 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6612 unlock_user_struct(target_stfs, arg3, 1);
6613 }
6614 break;
6615 case TARGET_NR_fstatfs64:
6616 ret = get_errno(fstatfs(arg1, &stfs));
6617 goto convert_statfs64;
6618 #endif
6619 #ifdef TARGET_NR_ioperm
6620 case TARGET_NR_ioperm:
6621 goto unimplemented;
6622 #endif
6623 #ifdef TARGET_NR_socketcall
6624 case TARGET_NR_socketcall:
6625 ret = do_socketcall(arg1, arg2);
6626 break;
6627 #endif
6628 #ifdef TARGET_NR_accept
6629 case TARGET_NR_accept:
6630 ret = do_accept4(arg1, arg2, arg3, 0);
6631 break;
6632 #endif
6633 #ifdef TARGET_NR_accept4
6634 case TARGET_NR_accept4:
6635 #ifdef CONFIG_ACCEPT4
6636 ret = do_accept4(arg1, arg2, arg3, arg4);
6637 #else
6638 goto unimplemented;
6639 #endif
6640 break;
6641 #endif
6642 #ifdef TARGET_NR_bind
6643 case TARGET_NR_bind:
6644 ret = do_bind(arg1, arg2, arg3);
6645 break;
6646 #endif
6647 #ifdef TARGET_NR_connect
6648 case TARGET_NR_connect:
6649 ret = do_connect(arg1, arg2, arg3);
6650 break;
6651 #endif
6652 #ifdef TARGET_NR_getpeername
6653 case TARGET_NR_getpeername:
6654 ret = do_getpeername(arg1, arg2, arg3);
6655 break;
6656 #endif
6657 #ifdef TARGET_NR_getsockname
6658 case TARGET_NR_getsockname:
6659 ret = do_getsockname(arg1, arg2, arg3);
6660 break;
6661 #endif
6662 #ifdef TARGET_NR_getsockopt
6663 case TARGET_NR_getsockopt:
6664 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6665 break;
6666 #endif
6667 #ifdef TARGET_NR_listen
6668 case TARGET_NR_listen:
6669 ret = get_errno(listen(arg1, arg2));
6670 break;
6671 #endif
6672 #ifdef TARGET_NR_recv
6673 case TARGET_NR_recv:
6674 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6675 break;
6676 #endif
6677 #ifdef TARGET_NR_recvfrom
6678 case TARGET_NR_recvfrom:
6679 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6680 break;
6681 #endif
6682 #ifdef TARGET_NR_recvmsg
6683 case TARGET_NR_recvmsg:
6684 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6685 break;
6686 #endif
6687 #ifdef TARGET_NR_send
6688 case TARGET_NR_send:
6689 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6690 break;
6691 #endif
6692 #ifdef TARGET_NR_sendmsg
6693 case TARGET_NR_sendmsg:
6694 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6695 break;
6696 #endif
6697 #ifdef TARGET_NR_sendto
6698 case TARGET_NR_sendto:
6699 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6700 break;
6701 #endif
6702 #ifdef TARGET_NR_shutdown
6703 case TARGET_NR_shutdown:
6704 ret = get_errno(shutdown(arg1, arg2));
6705 break;
6706 #endif
6707 #ifdef TARGET_NR_socket
6708 case TARGET_NR_socket:
6709 ret = do_socket(arg1, arg2, arg3);
6710 break;
6711 #endif
6712 #ifdef TARGET_NR_socketpair
6713 case TARGET_NR_socketpair:
6714 ret = do_socketpair(arg1, arg2, arg3, arg4);
6715 break;
6716 #endif
6717 #ifdef TARGET_NR_setsockopt
6718 case TARGET_NR_setsockopt:
6719 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6720 break;
6721 #endif
6722
6723 case TARGET_NR_syslog:
6724 if (!(p = lock_user_string(arg2)))
6725 goto efault;
6726 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6727 unlock_user(p, arg2, 0);
6728 break;
6729
6730 case TARGET_NR_setitimer:
6731 {
6732 struct itimerval value, ovalue, *pvalue;
6733
6734 if (arg2) {
6735 pvalue = &value;
6736 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6737 || copy_from_user_timeval(&pvalue->it_value,
6738 arg2 + sizeof(struct target_timeval)))
6739 goto efault;
6740 } else {
6741 pvalue = NULL;
6742 }
6743 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6744 if (!is_error(ret) && arg3) {
6745 if (copy_to_user_timeval(arg3,
6746 &ovalue.it_interval)
6747 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6748 &ovalue.it_value))
6749 goto efault;
6750 }
6751 }
6752 break;
6753 case TARGET_NR_getitimer:
6754 {
6755 struct itimerval value;
6756
6757 ret = get_errno(getitimer(arg1, &value));
6758 if (!is_error(ret) && arg2) {
6759 if (copy_to_user_timeval(arg2,
6760 &value.it_interval)
6761 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6762 &value.it_value))
6763 goto efault;
6764 }
6765 }
6766 break;
6767 case TARGET_NR_stat:
6768 if (!(p = lock_user_string(arg1)))
6769 goto efault;
6770 ret = get_errno(stat(path(p), &st));
6771 unlock_user(p, arg1, 0);
6772 goto do_stat;
6773 case TARGET_NR_lstat:
6774 if (!(p = lock_user_string(arg1)))
6775 goto efault;
6776 ret = get_errno(lstat(path(p), &st));
6777 unlock_user(p, arg1, 0);
6778 goto do_stat;
6779 case TARGET_NR_fstat:
6780 {
6781 ret = get_errno(fstat(arg1, &st));
6782 do_stat:
6783 if (!is_error(ret)) {
6784 struct target_stat *target_st;
6785
6786 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6787 goto efault;
6788 memset(target_st, 0, sizeof(*target_st));
6789 __put_user(st.st_dev, &target_st->st_dev);
6790 __put_user(st.st_ino, &target_st->st_ino);
6791 __put_user(st.st_mode, &target_st->st_mode);
6792 __put_user(st.st_uid, &target_st->st_uid);
6793 __put_user(st.st_gid, &target_st->st_gid);
6794 __put_user(st.st_nlink, &target_st->st_nlink);
6795 __put_user(st.st_rdev, &target_st->st_rdev);
6796 __put_user(st.st_size, &target_st->st_size);
6797 __put_user(st.st_blksize, &target_st->st_blksize);
6798 __put_user(st.st_blocks, &target_st->st_blocks);
6799 __put_user(st.st_atime, &target_st->target_st_atime);
6800 __put_user(st.st_mtime, &target_st->target_st_mtime);
6801 __put_user(st.st_ctime, &target_st->target_st_ctime);
6802 unlock_user_struct(target_st, arg2, 1);
6803 }
6804 }
6805 break;
6806 #ifdef TARGET_NR_olduname
6807 case TARGET_NR_olduname:
6808 goto unimplemented;
6809 #endif
6810 #ifdef TARGET_NR_iopl
6811 case TARGET_NR_iopl:
6812 goto unimplemented;
6813 #endif
6814 case TARGET_NR_vhangup:
6815 ret = get_errno(vhangup());
6816 break;
6817 #ifdef TARGET_NR_idle
6818 case TARGET_NR_idle:
6819 goto unimplemented;
6820 #endif
6821 #ifdef TARGET_NR_syscall
6822 case TARGET_NR_syscall:
6823 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6824 arg6, arg7, arg8, 0);
6825 break;
6826 #endif
6827 case TARGET_NR_wait4:
6828 {
6829 int status;
6830 abi_long status_ptr = arg2;
6831 struct rusage rusage, *rusage_ptr;
6832 abi_ulong target_rusage = arg4;
6833 if (target_rusage)
6834 rusage_ptr = &rusage;
6835 else
6836 rusage_ptr = NULL;
6837 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6838 if (!is_error(ret)) {
6839 if (status_ptr && ret) {
6840 status = host_to_target_waitstatus(status);
6841 if (put_user_s32(status, status_ptr))
6842 goto efault;
6843 }
6844 if (target_rusage)
6845 host_to_target_rusage(target_rusage, &rusage);
6846 }
6847 }
6848 break;
6849 #ifdef TARGET_NR_swapoff
6850 case TARGET_NR_swapoff:
6851 if (!(p = lock_user_string(arg1)))
6852 goto efault;
6853 ret = get_errno(swapoff(p));
6854 unlock_user(p, arg1, 0);
6855 break;
6856 #endif
6857 case TARGET_NR_sysinfo:
6858 {
6859 struct target_sysinfo *target_value;
6860 struct sysinfo value;
6861 ret = get_errno(sysinfo(&value));
6862 if (!is_error(ret) && arg1)
6863 {
6864 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6865 goto efault;
6866 __put_user(value.uptime, &target_value->uptime);
6867 __put_user(value.loads[0], &target_value->loads[0]);
6868 __put_user(value.loads[1], &target_value->loads[1]);
6869 __put_user(value.loads[2], &target_value->loads[2]);
6870 __put_user(value.totalram, &target_value->totalram);
6871 __put_user(value.freeram, &target_value->freeram);
6872 __put_user(value.sharedram, &target_value->sharedram);
6873 __put_user(value.bufferram, &target_value->bufferram);
6874 __put_user(value.totalswap, &target_value->totalswap);
6875 __put_user(value.freeswap, &target_value->freeswap);
6876 __put_user(value.procs, &target_value->procs);
6877 __put_user(value.totalhigh, &target_value->totalhigh);
6878 __put_user(value.freehigh, &target_value->freehigh);
6879 __put_user(value.mem_unit, &target_value->mem_unit);
6880 unlock_user_struct(target_value, arg1, 1);
6881 }
6882 }
6883 break;
6884 #ifdef TARGET_NR_ipc
6885 case TARGET_NR_ipc:
6886 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6887 break;
6888 #endif
6889 #ifdef TARGET_NR_semget
6890 case TARGET_NR_semget:
6891 ret = get_errno(semget(arg1, arg2, arg3));
6892 break;
6893 #endif
6894 #ifdef TARGET_NR_semop
6895 case TARGET_NR_semop:
6896 ret = do_semop(arg1, arg2, arg3);
6897 break;
6898 #endif
6899 #ifdef TARGET_NR_semctl
6900 case TARGET_NR_semctl:
6901 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6902 break;
6903 #endif
6904 #ifdef TARGET_NR_msgctl
6905 case TARGET_NR_msgctl:
6906 ret = do_msgctl(arg1, arg2, arg3);
6907 break;
6908 #endif
6909 #ifdef TARGET_NR_msgget
6910 case TARGET_NR_msgget:
6911 ret = get_errno(msgget(arg1, arg2));
6912 break;
6913 #endif
6914 #ifdef TARGET_NR_msgrcv
6915 case TARGET_NR_msgrcv:
6916 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6917 break;
6918 #endif
6919 #ifdef TARGET_NR_msgsnd
6920 case TARGET_NR_msgsnd:
6921 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6922 break;
6923 #endif
6924 #ifdef TARGET_NR_shmget
6925 case TARGET_NR_shmget:
6926 ret = get_errno(shmget(arg1, arg2, arg3));
6927 break;
6928 #endif
6929 #ifdef TARGET_NR_shmctl
6930 case TARGET_NR_shmctl:
6931 ret = do_shmctl(arg1, arg2, arg3);
6932 break;
6933 #endif
6934 #ifdef TARGET_NR_shmat
6935 case TARGET_NR_shmat:
6936 ret = do_shmat(arg1, arg2, arg3);
6937 break;
6938 #endif
6939 #ifdef TARGET_NR_shmdt
6940 case TARGET_NR_shmdt:
6941 ret = do_shmdt(arg1);
6942 break;
6943 #endif
6944 case TARGET_NR_fsync:
6945 ret = get_errno(fsync(arg1));
6946 break;
6947 case TARGET_NR_clone:
6948 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6949 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6950 #elif defined(TARGET_CRIS)
6951 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6952 #elif defined(TARGET_MICROBLAZE)
6953 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
6954 #elif defined(TARGET_S390X)
6955 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6956 #else
6957 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6958 #endif
6959 break;
6960 #ifdef __NR_exit_group
6961 /* new thread calls */
6962 case TARGET_NR_exit_group:
6963 #ifdef TARGET_GPROF
6964 _mcleanup();
6965 #endif
6966 gdb_exit(cpu_env, arg1);
6967 ret = get_errno(exit_group(arg1));
6968 break;
6969 #endif
6970 case TARGET_NR_setdomainname:
6971 if (!(p = lock_user_string(arg1)))
6972 goto efault;
6973 ret = get_errno(setdomainname(p, arg2));
6974 unlock_user(p, arg1, 0);
6975 break;
6976 case TARGET_NR_uname:
6977 /* no need to transcode because we use the linux syscall */
6978 {
6979 struct new_utsname * buf;
6980
6981 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6982 goto efault;
6983 ret = get_errno(sys_uname(buf));
6984 if (!is_error(ret)) {
6985 /* Overrite the native machine name with whatever is being
6986 emulated. */
6987 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6988 /* Allow the user to override the reported release. */
6989 if (qemu_uname_release && *qemu_uname_release)
6990 strcpy (buf->release, qemu_uname_release);
6991 }
6992 unlock_user_struct(buf, arg1, 1);
6993 }
6994 break;
6995 #ifdef TARGET_I386
6996 case TARGET_NR_modify_ldt:
6997 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6998 break;
6999 #if !defined(TARGET_X86_64)
7000 case TARGET_NR_vm86old:
7001 goto unimplemented;
7002 case TARGET_NR_vm86:
7003 ret = do_vm86(cpu_env, arg1, arg2);
7004 break;
7005 #endif
7006 #endif
7007 case TARGET_NR_adjtimex:
7008 goto unimplemented;
7009 #ifdef TARGET_NR_create_module
7010 case TARGET_NR_create_module:
7011 #endif
7012 case TARGET_NR_init_module:
7013 case TARGET_NR_delete_module:
7014 #ifdef TARGET_NR_get_kernel_syms
7015 case TARGET_NR_get_kernel_syms:
7016 #endif
7017 goto unimplemented;
7018 case TARGET_NR_quotactl:
7019 goto unimplemented;
7020 case TARGET_NR_getpgid:
7021 ret = get_errno(getpgid(arg1));
7022 break;
7023 case TARGET_NR_fchdir:
7024 ret = get_errno(fchdir(arg1));
7025 break;
7026 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7027 case TARGET_NR_bdflush:
7028 goto unimplemented;
7029 #endif
7030 #ifdef TARGET_NR_sysfs
7031 case TARGET_NR_sysfs:
7032 goto unimplemented;
7033 #endif
7034 case TARGET_NR_personality:
7035 ret = get_errno(personality(arg1));
7036 break;
7037 #ifdef TARGET_NR_afs_syscall
7038 case TARGET_NR_afs_syscall:
7039 goto unimplemented;
7040 #endif
7041 #ifdef TARGET_NR__llseek /* Not on alpha */
7042 case TARGET_NR__llseek:
7043 {
7044 int64_t res;
7045 #if !defined(__NR_llseek)
7046 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7047 if (res == -1) {
7048 ret = get_errno(res);
7049 } else {
7050 ret = 0;
7051 }
7052 #else
7053 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7054 #endif
7055 if ((ret == 0) && put_user_s64(res, arg4)) {
7056 goto efault;
7057 }
7058 }
7059 break;
7060 #endif
7061 case TARGET_NR_getdents:
7062 #ifdef __NR_getdents
7063 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7064 {
7065 struct target_dirent *target_dirp;
7066 struct linux_dirent *dirp;
7067 abi_long count = arg3;
7068
7069 dirp = malloc(count);
7070 if (!dirp) {
7071 ret = -TARGET_ENOMEM;
7072 goto fail;
7073 }
7074
7075 ret = get_errno(sys_getdents(arg1, dirp, count));
7076 if (!is_error(ret)) {
7077 struct linux_dirent *de;
7078 struct target_dirent *tde;
7079 int len = ret;
7080 int reclen, treclen;
7081 int count1, tnamelen;
7082
7083 count1 = 0;
7084 de = dirp;
7085 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7086 goto efault;
7087 tde = target_dirp;
7088 while (len > 0) {
7089 reclen = de->d_reclen;
7090 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7091 assert(tnamelen >= 0);
7092 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7093 assert(count1 + treclen <= count);
7094 tde->d_reclen = tswap16(treclen);
7095 tde->d_ino = tswapal(de->d_ino);
7096 tde->d_off = tswapal(de->d_off);
7097 memcpy(tde->d_name, de->d_name, tnamelen);
7098 de = (struct linux_dirent *)((char *)de + reclen);
7099 len -= reclen;
7100 tde = (struct target_dirent *)((char *)tde + treclen);
7101 count1 += treclen;
7102 }
7103 ret = count1;
7104 unlock_user(target_dirp, arg2, ret);
7105 }
7106 free(dirp);
7107 }
7108 #else
7109 {
7110 struct linux_dirent *dirp;
7111 abi_long count = arg3;
7112
7113 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7114 goto efault;
7115 ret = get_errno(sys_getdents(arg1, dirp, count));
7116 if (!is_error(ret)) {
7117 struct linux_dirent *de;
7118 int len = ret;
7119 int reclen;
7120 de = dirp;
7121 while (len > 0) {
7122 reclen = de->d_reclen;
7123 if (reclen > len)
7124 break;
7125 de->d_reclen = tswap16(reclen);
7126 tswapls(&de->d_ino);
7127 tswapls(&de->d_off);
7128 de = (struct linux_dirent *)((char *)de + reclen);
7129 len -= reclen;
7130 }
7131 }
7132 unlock_user(dirp, arg2, ret);
7133 }
7134 #endif
7135 #else
7136 /* Implement getdents in terms of getdents64 */
7137 {
7138 struct linux_dirent64 *dirp;
7139 abi_long count = arg3;
7140
7141 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7142 if (!dirp) {
7143 goto efault;
7144 }
7145 ret = get_errno(sys_getdents64(arg1, dirp, count));
7146 if (!is_error(ret)) {
7147 /* Convert the dirent64 structs to target dirent. We do this
7148 * in-place, since we can guarantee that a target_dirent is no
7149 * larger than a dirent64; however this means we have to be
7150 * careful to read everything before writing in the new format.
7151 */
7152 struct linux_dirent64 *de;
7153 struct target_dirent *tde;
7154 int len = ret;
7155 int tlen = 0;
7156
7157 de = dirp;
7158 tde = (struct target_dirent *)dirp;
7159 while (len > 0) {
7160 int namelen, treclen;
7161 int reclen = de->d_reclen;
7162 uint64_t ino = de->d_ino;
7163 int64_t off = de->d_off;
7164 uint8_t type = de->d_type;
7165
7166 namelen = strlen(de->d_name);
7167 treclen = offsetof(struct target_dirent, d_name)
7168 + namelen + 2;
7169 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7170
7171 memmove(tde->d_name, de->d_name, namelen + 1);
7172 tde->d_ino = tswapal(ino);
7173 tde->d_off = tswapal(off);
7174 tde->d_reclen = tswap16(treclen);
7175 /* The target_dirent type is in what was formerly a padding
7176 * byte at the end of the structure:
7177 */
7178 *(((char *)tde) + treclen - 1) = type;
7179
7180 de = (struct linux_dirent64 *)((char *)de + reclen);
7181 tde = (struct target_dirent *)((char *)tde + treclen);
7182 len -= reclen;
7183 tlen += treclen;
7184 }
7185 ret = tlen;
7186 }
7187 unlock_user(dirp, arg2, ret);
7188 }
7189 #endif
7190 break;
7191 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7192 case TARGET_NR_getdents64:
7193 {
7194 struct linux_dirent64 *dirp;
7195 abi_long count = arg3;
7196 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7197 goto efault;
7198 ret = get_errno(sys_getdents64(arg1, dirp, count));
7199 if (!is_error(ret)) {
7200 struct linux_dirent64 *de;
7201 int len = ret;
7202 int reclen;
7203 de = dirp;
7204 while (len > 0) {
7205 reclen = de->d_reclen;
7206 if (reclen > len)
7207 break;
7208 de->d_reclen = tswap16(reclen);
7209 tswap64s((uint64_t *)&de->d_ino);
7210 tswap64s((uint64_t *)&de->d_off);
7211 de = (struct linux_dirent64 *)((char *)de + reclen);
7212 len -= reclen;
7213 }
7214 }
7215 unlock_user(dirp, arg2, ret);
7216 }
7217 break;
7218 #endif /* TARGET_NR_getdents64 */
7219 #if defined(TARGET_NR__newselect)
7220 case TARGET_NR__newselect:
7221 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7222 break;
7223 #endif
7224 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7225 # ifdef TARGET_NR_poll
7226 case TARGET_NR_poll:
7227 # endif
7228 # ifdef TARGET_NR_ppoll
7229 case TARGET_NR_ppoll:
7230 # endif
7231 {
7232 struct target_pollfd *target_pfd;
7233 unsigned int nfds = arg2;
7234 int timeout = arg3;
7235 struct pollfd *pfd;
7236 unsigned int i;
7237
7238 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7239 if (!target_pfd)
7240 goto efault;
7241
7242 pfd = alloca(sizeof(struct pollfd) * nfds);
7243 for(i = 0; i < nfds; i++) {
7244 pfd[i].fd = tswap32(target_pfd[i].fd);
7245 pfd[i].events = tswap16(target_pfd[i].events);
7246 }
7247
7248 # ifdef TARGET_NR_ppoll
7249 if (num == TARGET_NR_ppoll) {
7250 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7251 target_sigset_t *target_set;
7252 sigset_t _set, *set = &_set;
7253
7254 if (arg3) {
7255 if (target_to_host_timespec(timeout_ts, arg3)) {
7256 unlock_user(target_pfd, arg1, 0);
7257 goto efault;
7258 }
7259 } else {
7260 timeout_ts = NULL;
7261 }
7262
7263 if (arg4) {
7264 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7265 if (!target_set) {
7266 unlock_user(target_pfd, arg1, 0);
7267 goto efault;
7268 }
7269 target_to_host_sigset(set, target_set);
7270 } else {
7271 set = NULL;
7272 }
7273
7274 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7275
7276 if (!is_error(ret) && arg3) {
7277 host_to_target_timespec(arg3, timeout_ts);
7278 }
7279 if (arg4) {
7280 unlock_user(target_set, arg4, 0);
7281 }
7282 } else
7283 # endif
7284 ret = get_errno(poll(pfd, nfds, timeout));
7285
7286 if (!is_error(ret)) {
7287 for(i = 0; i < nfds; i++) {
7288 target_pfd[i].revents = tswap16(pfd[i].revents);
7289 }
7290 }
7291 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7292 }
7293 break;
7294 #endif
7295 case TARGET_NR_flock:
7296 /* NOTE: the flock constant seems to be the same for every
7297 Linux platform */
7298 ret = get_errno(flock(arg1, arg2));
7299 break;
7300 case TARGET_NR_readv:
7301 {
7302 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7303 if (vec != NULL) {
7304 ret = get_errno(readv(arg1, vec, arg3));
7305 unlock_iovec(vec, arg2, arg3, 1);
7306 } else {
7307 ret = -host_to_target_errno(errno);
7308 }
7309 }
7310 break;
7311 case TARGET_NR_writev:
7312 {
7313 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7314 if (vec != NULL) {
7315 ret = get_errno(writev(arg1, vec, arg3));
7316 unlock_iovec(vec, arg2, arg3, 0);
7317 } else {
7318 ret = -host_to_target_errno(errno);
7319 }
7320 }
7321 break;
7322 case TARGET_NR_getsid:
7323 ret = get_errno(getsid(arg1));
7324 break;
7325 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7326 case TARGET_NR_fdatasync:
7327 ret = get_errno(fdatasync(arg1));
7328 break;
7329 #endif
7330 case TARGET_NR__sysctl:
7331 /* We don't implement this, but ENOTDIR is always a safe
7332 return value. */
7333 ret = -TARGET_ENOTDIR;
7334 break;
7335 case TARGET_NR_sched_getaffinity:
7336 {
7337 unsigned int mask_size;
7338 unsigned long *mask;
7339
7340 /*
7341 * sched_getaffinity needs multiples of ulong, so need to take
7342 * care of mismatches between target ulong and host ulong sizes.
7343 */
7344 if (arg2 & (sizeof(abi_ulong) - 1)) {
7345 ret = -TARGET_EINVAL;
7346 break;
7347 }
7348 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7349
7350 mask = alloca(mask_size);
7351 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7352
7353 if (!is_error(ret)) {
7354 if (copy_to_user(arg3, mask, ret)) {
7355 goto efault;
7356 }
7357 }
7358 }
7359 break;
7360 case TARGET_NR_sched_setaffinity:
7361 {
7362 unsigned int mask_size;
7363 unsigned long *mask;
7364
7365 /*
7366 * sched_setaffinity needs multiples of ulong, so need to take
7367 * care of mismatches between target ulong and host ulong sizes.
7368 */
7369 if (arg2 & (sizeof(abi_ulong) - 1)) {
7370 ret = -TARGET_EINVAL;
7371 break;
7372 }
7373 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7374
7375 mask = alloca(mask_size);
7376 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7377 goto efault;
7378 }
7379 memcpy(mask, p, arg2);
7380 unlock_user_struct(p, arg2, 0);
7381
7382 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7383 }
7384 break;
7385 case TARGET_NR_sched_setparam:
7386 {
7387 struct sched_param *target_schp;
7388 struct sched_param schp;
7389
7390 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7391 goto efault;
7392 schp.sched_priority = tswap32(target_schp->sched_priority);
7393 unlock_user_struct(target_schp, arg2, 0);
7394 ret = get_errno(sched_setparam(arg1, &schp));
7395 }
7396 break;
7397 case TARGET_NR_sched_getparam:
7398 {
7399 struct sched_param *target_schp;
7400 struct sched_param schp;
7401 ret = get_errno(sched_getparam(arg1, &schp));
7402 if (!is_error(ret)) {
7403 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7404 goto efault;
7405 target_schp->sched_priority = tswap32(schp.sched_priority);
7406 unlock_user_struct(target_schp, arg2, 1);
7407 }
7408 }
7409 break;
7410 case TARGET_NR_sched_setscheduler:
7411 {
7412 struct sched_param *target_schp;
7413 struct sched_param schp;
7414 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7415 goto efault;
7416 schp.sched_priority = tswap32(target_schp->sched_priority);
7417 unlock_user_struct(target_schp, arg3, 0);
7418 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7419 }
7420 break;
7421 case TARGET_NR_sched_getscheduler:
7422 ret = get_errno(sched_getscheduler(arg1));
7423 break;
7424 case TARGET_NR_sched_yield:
7425 ret = get_errno(sched_yield());
7426 break;
7427 case TARGET_NR_sched_get_priority_max:
7428 ret = get_errno(sched_get_priority_max(arg1));
7429 break;
7430 case TARGET_NR_sched_get_priority_min:
7431 ret = get_errno(sched_get_priority_min(arg1));
7432 break;
7433 case TARGET_NR_sched_rr_get_interval:
7434 {
7435 struct timespec ts;
7436 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7437 if (!is_error(ret)) {
7438 host_to_target_timespec(arg2, &ts);
7439 }
7440 }
7441 break;
7442 case TARGET_NR_nanosleep:
7443 {
7444 struct timespec req, rem;
7445 target_to_host_timespec(&req, arg1);
7446 ret = get_errno(nanosleep(&req, &rem));
7447 if (is_error(ret) && arg2) {
7448 host_to_target_timespec(arg2, &rem);
7449 }
7450 }
7451 break;
7452 #ifdef TARGET_NR_query_module
7453 case TARGET_NR_query_module:
7454 goto unimplemented;
7455 #endif
7456 #ifdef TARGET_NR_nfsservctl
7457 case TARGET_NR_nfsservctl:
7458 goto unimplemented;
7459 #endif
7460 case TARGET_NR_prctl:
7461 switch (arg1) {
7462 case PR_GET_PDEATHSIG:
7463 {
7464 int deathsig;
7465 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7466 if (!is_error(ret) && arg2
7467 && put_user_ual(deathsig, arg2)) {
7468 goto efault;
7469 }
7470 break;
7471 }
7472 #ifdef PR_GET_NAME
7473 case PR_GET_NAME:
7474 {
7475 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7476 if (!name) {
7477 goto efault;
7478 }
7479 ret = get_errno(prctl(arg1, (unsigned long)name,
7480 arg3, arg4, arg5));
7481 unlock_user(name, arg2, 16);
7482 break;
7483 }
7484 case PR_SET_NAME:
7485 {
7486 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7487 if (!name) {
7488 goto efault;
7489 }
7490 ret = get_errno(prctl(arg1, (unsigned long)name,
7491 arg3, arg4, arg5));
7492 unlock_user(name, arg2, 0);
7493 break;
7494 }
7495 #endif
7496 default:
7497 /* Most prctl options have no pointer arguments */
7498 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7499 break;
7500 }
7501 break;
7502 #ifdef TARGET_NR_arch_prctl
7503 case TARGET_NR_arch_prctl:
7504 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7505 ret = do_arch_prctl(cpu_env, arg1, arg2);
7506 break;
7507 #else
7508 goto unimplemented;
7509 #endif
7510 #endif
7511 #ifdef TARGET_NR_pread64
7512 case TARGET_NR_pread64:
7513 if (regpairs_aligned(cpu_env)) {
7514 arg4 = arg5;
7515 arg5 = arg6;
7516 }
7517 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7518 goto efault;
7519 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7520 unlock_user(p, arg2, ret);
7521 break;
7522 case TARGET_NR_pwrite64:
7523 if (regpairs_aligned(cpu_env)) {
7524 arg4 = arg5;
7525 arg5 = arg6;
7526 }
7527 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7528 goto efault;
7529 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7530 unlock_user(p, arg2, 0);
7531 break;
7532 #endif
7533 case TARGET_NR_getcwd:
7534 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7535 goto efault;
7536 ret = get_errno(sys_getcwd1(p, arg2));
7537 unlock_user(p, arg1, ret);
7538 break;
7539 case TARGET_NR_capget:
7540 goto unimplemented;
7541 case TARGET_NR_capset:
7542 goto unimplemented;
7543 case TARGET_NR_sigaltstack:
7544 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7545 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7546 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7547 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7548 break;
7549 #else
7550 goto unimplemented;
7551 #endif
7552
7553 #ifdef CONFIG_SENDFILE
7554 case TARGET_NR_sendfile:
7555 {
7556 off_t *offp = NULL;
7557 off_t off;
7558 if (arg3) {
7559 ret = get_user_sal(off, arg3);
7560 if (is_error(ret)) {
7561 break;
7562 }
7563 offp = &off;
7564 }
7565 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7566 if (!is_error(ret) && arg3) {
7567 abi_long ret2 = put_user_sal(off, arg3);
7568 if (is_error(ret2)) {
7569 ret = ret2;
7570 }
7571 }
7572 break;
7573 }
7574 #ifdef TARGET_NR_sendfile64
7575 case TARGET_NR_sendfile64:
7576 {
7577 off_t *offp = NULL;
7578 off_t off;
7579 if (arg3) {
7580 ret = get_user_s64(off, arg3);
7581 if (is_error(ret)) {
7582 break;
7583 }
7584 offp = &off;
7585 }
7586 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7587 if (!is_error(ret) && arg3) {
7588 abi_long ret2 = put_user_s64(off, arg3);
7589 if (is_error(ret2)) {
7590 ret = ret2;
7591 }
7592 }
7593 break;
7594 }
7595 #endif
7596 #else
7597 case TARGET_NR_sendfile:
7598 #ifdef TARGET_NR_sendfile64
7599 case TARGET_NR_sendfile64:
7600 #endif
7601 goto unimplemented;
7602 #endif
7603
7604 #ifdef TARGET_NR_getpmsg
7605 case TARGET_NR_getpmsg:
7606 goto unimplemented;
7607 #endif
7608 #ifdef TARGET_NR_putpmsg
7609 case TARGET_NR_putpmsg:
7610 goto unimplemented;
7611 #endif
7612 #ifdef TARGET_NR_vfork
7613 case TARGET_NR_vfork:
7614 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7615 0, 0, 0, 0));
7616 break;
7617 #endif
7618 #ifdef TARGET_NR_ugetrlimit
7619 case TARGET_NR_ugetrlimit:
7620 {
7621 struct rlimit rlim;
7622 int resource = target_to_host_resource(arg1);
7623 ret = get_errno(getrlimit(resource, &rlim));
7624 if (!is_error(ret)) {
7625 struct target_rlimit *target_rlim;
7626 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7627 goto efault;
7628 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7629 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7630 unlock_user_struct(target_rlim, arg2, 1);
7631 }
7632 break;
7633 }
7634 #endif
7635 #ifdef TARGET_NR_truncate64
7636 case TARGET_NR_truncate64:
7637 if (!(p = lock_user_string(arg1)))
7638 goto efault;
7639 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7640 unlock_user(p, arg1, 0);
7641 break;
7642 #endif
7643 #ifdef TARGET_NR_ftruncate64
7644 case TARGET_NR_ftruncate64:
7645 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7646 break;
7647 #endif
7648 #ifdef TARGET_NR_stat64
7649 case TARGET_NR_stat64:
7650 if (!(p = lock_user_string(arg1)))
7651 goto efault;
7652 ret = get_errno(stat(path(p), &st));
7653 unlock_user(p, arg1, 0);
7654 if (!is_error(ret))
7655 ret = host_to_target_stat64(cpu_env, arg2, &st);
7656 break;
7657 #endif
7658 #ifdef TARGET_NR_lstat64
7659 case TARGET_NR_lstat64:
7660 if (!(p = lock_user_string(arg1)))
7661 goto efault;
7662 ret = get_errno(lstat(path(p), &st));
7663 unlock_user(p, arg1, 0);
7664 if (!is_error(ret))
7665 ret = host_to_target_stat64(cpu_env, arg2, &st);
7666 break;
7667 #endif
7668 #ifdef TARGET_NR_fstat64
7669 case TARGET_NR_fstat64:
7670 ret = get_errno(fstat(arg1, &st));
7671 if (!is_error(ret))
7672 ret = host_to_target_stat64(cpu_env, arg2, &st);
7673 break;
7674 #endif
7675 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
7676 #ifdef TARGET_NR_fstatat64
7677 case TARGET_NR_fstatat64:
7678 #endif
7679 #ifdef TARGET_NR_newfstatat
7680 case TARGET_NR_newfstatat:
7681 #endif
7682 if (!(p = lock_user_string(arg2)))
7683 goto efault;
7684 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
7685 if (!is_error(ret))
7686 ret = host_to_target_stat64(cpu_env, arg3, &st);
7687 break;
7688 #endif
7689 case TARGET_NR_lchown:
7690 if (!(p = lock_user_string(arg1)))
7691 goto efault;
7692 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7693 unlock_user(p, arg1, 0);
7694 break;
7695 #ifdef TARGET_NR_getuid
7696 case TARGET_NR_getuid:
7697 ret = get_errno(high2lowuid(getuid()));
7698 break;
7699 #endif
7700 #ifdef TARGET_NR_getgid
7701 case TARGET_NR_getgid:
7702 ret = get_errno(high2lowgid(getgid()));
7703 break;
7704 #endif
7705 #ifdef TARGET_NR_geteuid
7706 case TARGET_NR_geteuid:
7707 ret = get_errno(high2lowuid(geteuid()));
7708 break;
7709 #endif
7710 #ifdef TARGET_NR_getegid
7711 case TARGET_NR_getegid:
7712 ret = get_errno(high2lowgid(getegid()));
7713 break;
7714 #endif
7715 case TARGET_NR_setreuid:
7716 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7717 break;
7718 case TARGET_NR_setregid:
7719 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7720 break;
7721 case TARGET_NR_getgroups:
7722 {
7723 int gidsetsize = arg1;
7724 target_id *target_grouplist;
7725 gid_t *grouplist;
7726 int i;
7727
7728 grouplist = alloca(gidsetsize * sizeof(gid_t));
7729 ret = get_errno(getgroups(gidsetsize, grouplist));
7730 if (gidsetsize == 0)
7731 break;
7732 if (!is_error(ret)) {
7733 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
7734 if (!target_grouplist)
7735 goto efault;
7736 for(i = 0;i < ret; i++)
7737 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7738 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
7739 }
7740 }
7741 break;
7742 case TARGET_NR_setgroups:
7743 {
7744 int gidsetsize = arg1;
7745 target_id *target_grouplist;
7746 gid_t *grouplist = NULL;
7747 int i;
7748 if (gidsetsize) {
7749 grouplist = alloca(gidsetsize * sizeof(gid_t));
7750 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
7751 if (!target_grouplist) {
7752 ret = -TARGET_EFAULT;
7753 goto fail;
7754 }
7755 for (i = 0; i < gidsetsize; i++) {
7756 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7757 }
7758 unlock_user(target_grouplist, arg2, 0);
7759 }
7760 ret = get_errno(setgroups(gidsetsize, grouplist));
7761 }
7762 break;
7763 case TARGET_NR_fchown:
7764 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7765 break;
7766 #if defined(TARGET_NR_fchownat)
7767 case TARGET_NR_fchownat:
7768 if (!(p = lock_user_string(arg2)))
7769 goto efault;
7770 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
7771 low2highgid(arg4), arg5));
7772 unlock_user(p, arg2, 0);
7773 break;
7774 #endif
7775 #ifdef TARGET_NR_setresuid
7776 case TARGET_NR_setresuid:
7777 ret = get_errno(setresuid(low2highuid(arg1),
7778 low2highuid(arg2),
7779 low2highuid(arg3)));
7780 break;
7781 #endif
7782 #ifdef TARGET_NR_getresuid
7783 case TARGET_NR_getresuid:
7784 {
7785 uid_t ruid, euid, suid;
7786 ret = get_errno(getresuid(&ruid, &euid, &suid));
7787 if (!is_error(ret)) {
7788 if (put_user_u16(high2lowuid(ruid), arg1)
7789 || put_user_u16(high2lowuid(euid), arg2)
7790 || put_user_u16(high2lowuid(suid), arg3))
7791 goto efault;
7792 }
7793 }
7794 break;
7795 #endif
7796 #ifdef TARGET_NR_getresgid
7797 case TARGET_NR_setresgid:
7798 ret = get_errno(setresgid(low2highgid(arg1),
7799 low2highgid(arg2),
7800 low2highgid(arg3)));
7801 break;
7802 #endif
7803 #ifdef TARGET_NR_getresgid
7804 case TARGET_NR_getresgid:
7805 {
7806 gid_t rgid, egid, sgid;
7807 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7808 if (!is_error(ret)) {
7809 if (put_user_u16(high2lowgid(rgid), arg1)
7810 || put_user_u16(high2lowgid(egid), arg2)
7811 || put_user_u16(high2lowgid(sgid), arg3))
7812 goto efault;
7813 }
7814 }
7815 break;
7816 #endif
7817 case TARGET_NR_chown:
7818 if (!(p = lock_user_string(arg1)))
7819 goto efault;
7820 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7821 unlock_user(p, arg1, 0);
7822 break;
7823 case TARGET_NR_setuid:
7824 ret = get_errno(setuid(low2highuid(arg1)));
7825 break;
7826 case TARGET_NR_setgid:
7827 ret = get_errno(setgid(low2highgid(arg1)));
7828 break;
7829 case TARGET_NR_setfsuid:
7830 ret = get_errno(setfsuid(arg1));
7831 break;
7832 case TARGET_NR_setfsgid:
7833 ret = get_errno(setfsgid(arg1));
7834 break;
7835
7836 #ifdef TARGET_NR_lchown32
7837 case TARGET_NR_lchown32:
7838 if (!(p = lock_user_string(arg1)))
7839 goto efault;
7840 ret = get_errno(lchown(p, arg2, arg3));
7841 unlock_user(p, arg1, 0);
7842 break;
7843 #endif
7844 #ifdef TARGET_NR_getuid32
7845 case TARGET_NR_getuid32:
7846 ret = get_errno(getuid());
7847 break;
7848 #endif
7849
7850 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7851 /* Alpha specific */
7852 case TARGET_NR_getxuid:
7853 {
7854 uid_t euid;
7855 euid=geteuid();
7856 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7857 }
7858 ret = get_errno(getuid());
7859 break;
7860 #endif
7861 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7862 /* Alpha specific */
7863 case TARGET_NR_getxgid:
7864 {
7865 uid_t egid;
7866 egid=getegid();
7867 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7868 }
7869 ret = get_errno(getgid());
7870 break;
7871 #endif
7872 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7873 /* Alpha specific */
7874 case TARGET_NR_osf_getsysinfo:
7875 ret = -TARGET_EOPNOTSUPP;
7876 switch (arg1) {
7877 case TARGET_GSI_IEEE_FP_CONTROL:
7878 {
7879 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7880
7881 /* Copied from linux ieee_fpcr_to_swcr. */
7882 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7883 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7884 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7885 | SWCR_TRAP_ENABLE_DZE
7886 | SWCR_TRAP_ENABLE_OVF);
7887 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7888 | SWCR_TRAP_ENABLE_INE);
7889 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7890 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7891
7892 if (put_user_u64 (swcr, arg2))
7893 goto efault;
7894 ret = 0;
7895 }
7896 break;
7897
7898 /* case GSI_IEEE_STATE_AT_SIGNAL:
7899 -- Not implemented in linux kernel.
7900 case GSI_UACPROC:
7901 -- Retrieves current unaligned access state; not much used.
7902 case GSI_PROC_TYPE:
7903 -- Retrieves implver information; surely not used.
7904 case GSI_GET_HWRPB:
7905 -- Grabs a copy of the HWRPB; surely not used.
7906 */
7907 }
7908 break;
7909 #endif
7910 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7911 /* Alpha specific */
7912 case TARGET_NR_osf_setsysinfo:
7913 ret = -TARGET_EOPNOTSUPP;
7914 switch (arg1) {
7915 case TARGET_SSI_IEEE_FP_CONTROL:
7916 {
7917 uint64_t swcr, fpcr, orig_fpcr;
7918
7919 if (get_user_u64 (swcr, arg2)) {
7920 goto efault;
7921 }
7922 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7923 fpcr = orig_fpcr & FPCR_DYN_MASK;
7924
7925 /* Copied from linux ieee_swcr_to_fpcr. */
7926 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7927 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7928 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7929 | SWCR_TRAP_ENABLE_DZE
7930 | SWCR_TRAP_ENABLE_OVF)) << 48;
7931 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7932 | SWCR_TRAP_ENABLE_INE)) << 57;
7933 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7934 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7935
7936 cpu_alpha_store_fpcr(cpu_env, fpcr);
7937 ret = 0;
7938 }
7939 break;
7940
7941 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7942 {
7943 uint64_t exc, fpcr, orig_fpcr;
7944 int si_code;
7945
7946 if (get_user_u64(exc, arg2)) {
7947 goto efault;
7948 }
7949
7950 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7951
7952 /* We only add to the exception status here. */
7953 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7954
7955 cpu_alpha_store_fpcr(cpu_env, fpcr);
7956 ret = 0;
7957
7958 /* Old exceptions are not signaled. */
7959 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7960
7961 /* If any exceptions set by this call,
7962 and are unmasked, send a signal. */
7963 si_code = 0;
7964 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7965 si_code = TARGET_FPE_FLTRES;
7966 }
7967 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7968 si_code = TARGET_FPE_FLTUND;
7969 }
7970 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7971 si_code = TARGET_FPE_FLTOVF;
7972 }
7973 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7974 si_code = TARGET_FPE_FLTDIV;
7975 }
7976 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7977 si_code = TARGET_FPE_FLTINV;
7978 }
7979 if (si_code != 0) {
7980 target_siginfo_t info;
7981 info.si_signo = SIGFPE;
7982 info.si_errno = 0;
7983 info.si_code = si_code;
7984 info._sifields._sigfault._addr
7985 = ((CPUArchState *)cpu_env)->pc;
7986 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
7987 }
7988 }
7989 break;
7990
7991 /* case SSI_NVPAIRS:
7992 -- Used with SSIN_UACPROC to enable unaligned accesses.
7993 case SSI_IEEE_STATE_AT_SIGNAL:
7994 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7995 -- Not implemented in linux kernel
7996 */
7997 }
7998 break;
7999 #endif
8000 #ifdef TARGET_NR_osf_sigprocmask
8001 /* Alpha specific. */
8002 case TARGET_NR_osf_sigprocmask:
8003 {
8004 abi_ulong mask;
8005 int how;
8006 sigset_t set, oldset;
8007
8008 switch(arg1) {
8009 case TARGET_SIG_BLOCK:
8010 how = SIG_BLOCK;
8011 break;
8012 case TARGET_SIG_UNBLOCK:
8013 how = SIG_UNBLOCK;
8014 break;
8015 case TARGET_SIG_SETMASK:
8016 how = SIG_SETMASK;
8017 break;
8018 default:
8019 ret = -TARGET_EINVAL;
8020 goto fail;
8021 }
8022 mask = arg2;
8023 target_to_host_old_sigset(&set, &mask);
8024 sigprocmask(how, &set, &oldset);
8025 host_to_target_old_sigset(&mask, &oldset);
8026 ret = mask;
8027 }
8028 break;
8029 #endif
8030
8031 #ifdef TARGET_NR_getgid32
8032 case TARGET_NR_getgid32:
8033 ret = get_errno(getgid());
8034 break;
8035 #endif
8036 #ifdef TARGET_NR_geteuid32
8037 case TARGET_NR_geteuid32:
8038 ret = get_errno(geteuid());
8039 break;
8040 #endif
8041 #ifdef TARGET_NR_getegid32
8042 case TARGET_NR_getegid32:
8043 ret = get_errno(getegid());
8044 break;
8045 #endif
8046 #ifdef TARGET_NR_setreuid32
8047 case TARGET_NR_setreuid32:
8048 ret = get_errno(setreuid(arg1, arg2));
8049 break;
8050 #endif
8051 #ifdef TARGET_NR_setregid32
8052 case TARGET_NR_setregid32:
8053 ret = get_errno(setregid(arg1, arg2));
8054 break;
8055 #endif
8056 #ifdef TARGET_NR_getgroups32
8057 case TARGET_NR_getgroups32:
8058 {
8059 int gidsetsize = arg1;
8060 uint32_t *target_grouplist;
8061 gid_t *grouplist;
8062 int i;
8063
8064 grouplist = alloca(gidsetsize * sizeof(gid_t));
8065 ret = get_errno(getgroups(gidsetsize, grouplist));
8066 if (gidsetsize == 0)
8067 break;
8068 if (!is_error(ret)) {
8069 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8070 if (!target_grouplist) {
8071 ret = -TARGET_EFAULT;
8072 goto fail;
8073 }
8074 for(i = 0;i < ret; i++)
8075 target_grouplist[i] = tswap32(grouplist[i]);
8076 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8077 }
8078 }
8079 break;
8080 #endif
8081 #ifdef TARGET_NR_setgroups32
8082 case TARGET_NR_setgroups32:
8083 {
8084 int gidsetsize = arg1;
8085 uint32_t *target_grouplist;
8086 gid_t *grouplist;
8087 int i;
8088
8089 grouplist = alloca(gidsetsize * sizeof(gid_t));
8090 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8091 if (!target_grouplist) {
8092 ret = -TARGET_EFAULT;
8093 goto fail;
8094 }
8095 for(i = 0;i < gidsetsize; i++)
8096 grouplist[i] = tswap32(target_grouplist[i]);
8097 unlock_user(target_grouplist, arg2, 0);
8098 ret = get_errno(setgroups(gidsetsize, grouplist));
8099 }
8100 break;
8101 #endif
8102 #ifdef TARGET_NR_fchown32
8103 case TARGET_NR_fchown32:
8104 ret = get_errno(fchown(arg1, arg2, arg3));
8105 break;
8106 #endif
8107 #ifdef TARGET_NR_setresuid32
8108 case TARGET_NR_setresuid32:
8109 ret = get_errno(setresuid(arg1, arg2, arg3));
8110 break;
8111 #endif
8112 #ifdef TARGET_NR_getresuid32
8113 case TARGET_NR_getresuid32:
8114 {
8115 uid_t ruid, euid, suid;
8116 ret = get_errno(getresuid(&ruid, &euid, &suid));
8117 if (!is_error(ret)) {
8118 if (put_user_u32(ruid, arg1)
8119 || put_user_u32(euid, arg2)
8120 || put_user_u32(suid, arg3))
8121 goto efault;
8122 }
8123 }
8124 break;
8125 #endif
8126 #ifdef TARGET_NR_setresgid32
8127 case TARGET_NR_setresgid32:
8128 ret = get_errno(setresgid(arg1, arg2, arg3));
8129 break;
8130 #endif
8131 #ifdef TARGET_NR_getresgid32
8132 case TARGET_NR_getresgid32:
8133 {
8134 gid_t rgid, egid, sgid;
8135 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8136 if (!is_error(ret)) {
8137 if (put_user_u32(rgid, arg1)
8138 || put_user_u32(egid, arg2)
8139 || put_user_u32(sgid, arg3))
8140 goto efault;
8141 }
8142 }
8143 break;
8144 #endif
8145 #ifdef TARGET_NR_chown32
8146 case TARGET_NR_chown32:
8147 if (!(p = lock_user_string(arg1)))
8148 goto efault;
8149 ret = get_errno(chown(p, arg2, arg3));
8150 unlock_user(p, arg1, 0);
8151 break;
8152 #endif
8153 #ifdef TARGET_NR_setuid32
8154 case TARGET_NR_setuid32:
8155 ret = get_errno(setuid(arg1));
8156 break;
8157 #endif
8158 #ifdef TARGET_NR_setgid32
8159 case TARGET_NR_setgid32:
8160 ret = get_errno(setgid(arg1));
8161 break;
8162 #endif
8163 #ifdef TARGET_NR_setfsuid32
8164 case TARGET_NR_setfsuid32:
8165 ret = get_errno(setfsuid(arg1));
8166 break;
8167 #endif
8168 #ifdef TARGET_NR_setfsgid32
8169 case TARGET_NR_setfsgid32:
8170 ret = get_errno(setfsgid(arg1));
8171 break;
8172 #endif
8173
8174 case TARGET_NR_pivot_root:
8175 goto unimplemented;
8176 #ifdef TARGET_NR_mincore
8177 case TARGET_NR_mincore:
8178 {
8179 void *a;
8180 ret = -TARGET_EFAULT;
8181 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8182 goto efault;
8183 if (!(p = lock_user_string(arg3)))
8184 goto mincore_fail;
8185 ret = get_errno(mincore(a, arg2, p));
8186 unlock_user(p, arg3, ret);
8187 mincore_fail:
8188 unlock_user(a, arg1, 0);
8189 }
8190 break;
8191 #endif
8192 #ifdef TARGET_NR_arm_fadvise64_64
8193 case TARGET_NR_arm_fadvise64_64:
8194 {
8195 /*
8196 * arm_fadvise64_64 looks like fadvise64_64 but
8197 * with different argument order
8198 */
8199 abi_long temp;
8200 temp = arg3;
8201 arg3 = arg4;
8202 arg4 = temp;
8203 }
8204 #endif
8205 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8206 #ifdef TARGET_NR_fadvise64_64
8207 case TARGET_NR_fadvise64_64:
8208 #endif
8209 #ifdef TARGET_NR_fadvise64
8210 case TARGET_NR_fadvise64:
8211 #endif
8212 #ifdef TARGET_S390X
8213 switch (arg4) {
8214 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8215 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8216 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8217 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8218 default: break;
8219 }
8220 #endif
8221 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8222 break;
8223 #endif
8224 #ifdef TARGET_NR_madvise
8225 case TARGET_NR_madvise:
8226 /* A straight passthrough may not be safe because qemu sometimes
8227 turns private file-backed mappings into anonymous mappings.
8228 This will break MADV_DONTNEED.
8229 This is a hint, so ignoring and returning success is ok. */
8230 ret = get_errno(0);
8231 break;
8232 #endif
8233 #if TARGET_ABI_BITS == 32
8234 case TARGET_NR_fcntl64:
8235 {
8236 int cmd;
8237 struct flock64 fl;
8238 struct target_flock64 *target_fl;
8239 #ifdef TARGET_ARM
8240 struct target_eabi_flock64 *target_efl;
8241 #endif
8242
8243 cmd = target_to_host_fcntl_cmd(arg2);
8244 if (cmd == -TARGET_EINVAL) {
8245 ret = cmd;
8246 break;
8247 }
8248
8249 switch(arg2) {
8250 case TARGET_F_GETLK64:
8251 #ifdef TARGET_ARM
8252 if (((CPUARMState *)cpu_env)->eabi) {
8253 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8254 goto efault;
8255 fl.l_type = tswap16(target_efl->l_type);
8256 fl.l_whence = tswap16(target_efl->l_whence);
8257 fl.l_start = tswap64(target_efl->l_start);
8258 fl.l_len = tswap64(target_efl->l_len);
8259 fl.l_pid = tswap32(target_efl->l_pid);
8260 unlock_user_struct(target_efl, arg3, 0);
8261 } else
8262 #endif
8263 {
8264 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8265 goto efault;
8266 fl.l_type = tswap16(target_fl->l_type);
8267 fl.l_whence = tswap16(target_fl->l_whence);
8268 fl.l_start = tswap64(target_fl->l_start);
8269 fl.l_len = tswap64(target_fl->l_len);
8270 fl.l_pid = tswap32(target_fl->l_pid);
8271 unlock_user_struct(target_fl, arg3, 0);
8272 }
8273 ret = get_errno(fcntl(arg1, cmd, &fl));
8274 if (ret == 0) {
8275 #ifdef TARGET_ARM
8276 if (((CPUARMState *)cpu_env)->eabi) {
8277 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8278 goto efault;
8279 target_efl->l_type = tswap16(fl.l_type);
8280 target_efl->l_whence = tswap16(fl.l_whence);
8281 target_efl->l_start = tswap64(fl.l_start);
8282 target_efl->l_len = tswap64(fl.l_len);
8283 target_efl->l_pid = tswap32(fl.l_pid);
8284 unlock_user_struct(target_efl, arg3, 1);
8285 } else
8286 #endif
8287 {
8288 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8289 goto efault;
8290 target_fl->l_type = tswap16(fl.l_type);
8291 target_fl->l_whence = tswap16(fl.l_whence);
8292 target_fl->l_start = tswap64(fl.l_start);
8293 target_fl->l_len = tswap64(fl.l_len);
8294 target_fl->l_pid = tswap32(fl.l_pid);
8295 unlock_user_struct(target_fl, arg3, 1);
8296 }
8297 }
8298 break;
8299
8300 case TARGET_F_SETLK64:
8301 case TARGET_F_SETLKW64:
8302 #ifdef TARGET_ARM
8303 if (((CPUARMState *)cpu_env)->eabi) {
8304 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8305 goto efault;
8306 fl.l_type = tswap16(target_efl->l_type);
8307 fl.l_whence = tswap16(target_efl->l_whence);
8308 fl.l_start = tswap64(target_efl->l_start);
8309 fl.l_len = tswap64(target_efl->l_len);
8310 fl.l_pid = tswap32(target_efl->l_pid);
8311 unlock_user_struct(target_efl, arg3, 0);
8312 } else
8313 #endif
8314 {
8315 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8316 goto efault;
8317 fl.l_type = tswap16(target_fl->l_type);
8318 fl.l_whence = tswap16(target_fl->l_whence);
8319 fl.l_start = tswap64(target_fl->l_start);
8320 fl.l_len = tswap64(target_fl->l_len);
8321 fl.l_pid = tswap32(target_fl->l_pid);
8322 unlock_user_struct(target_fl, arg3, 0);
8323 }
8324 ret = get_errno(fcntl(arg1, cmd, &fl));
8325 break;
8326 default:
8327 ret = do_fcntl(arg1, arg2, arg3);
8328 break;
8329 }
8330 break;
8331 }
8332 #endif
8333 #ifdef TARGET_NR_cacheflush
8334 case TARGET_NR_cacheflush:
8335 /* self-modifying code is handled automatically, so nothing needed */
8336 ret = 0;
8337 break;
8338 #endif
8339 #ifdef TARGET_NR_security
8340 case TARGET_NR_security:
8341 goto unimplemented;
8342 #endif
8343 #ifdef TARGET_NR_getpagesize
8344 case TARGET_NR_getpagesize:
8345 ret = TARGET_PAGE_SIZE;
8346 break;
8347 #endif
8348 case TARGET_NR_gettid:
8349 ret = get_errno(gettid());
8350 break;
8351 #ifdef TARGET_NR_readahead
8352 case TARGET_NR_readahead:
8353 #if TARGET_ABI_BITS == 32
8354 if (regpairs_aligned(cpu_env)) {
8355 arg2 = arg3;
8356 arg3 = arg4;
8357 arg4 = arg5;
8358 }
8359 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8360 #else
8361 ret = get_errno(readahead(arg1, arg2, arg3));
8362 #endif
8363 break;
8364 #endif
8365 #ifdef CONFIG_ATTR
8366 #ifdef TARGET_NR_setxattr
8367 case TARGET_NR_listxattr:
8368 case TARGET_NR_llistxattr:
8369 {
8370 void *p, *b = 0;
8371 if (arg2) {
8372 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8373 if (!b) {
8374 ret = -TARGET_EFAULT;
8375 break;
8376 }
8377 }
8378 p = lock_user_string(arg1);
8379 if (p) {
8380 if (num == TARGET_NR_listxattr) {
8381 ret = get_errno(listxattr(p, b, arg3));
8382 } else {
8383 ret = get_errno(llistxattr(p, b, arg3));
8384 }
8385 } else {
8386 ret = -TARGET_EFAULT;
8387 }
8388 unlock_user(p, arg1, 0);
8389 unlock_user(b, arg2, arg3);
8390 break;
8391 }
8392 case TARGET_NR_flistxattr:
8393 {
8394 void *b = 0;
8395 if (arg2) {
8396 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8397 if (!b) {
8398 ret = -TARGET_EFAULT;
8399 break;
8400 }
8401 }
8402 ret = get_errno(flistxattr(arg1, b, arg3));
8403 unlock_user(b, arg2, arg3);
8404 break;
8405 }
8406 case TARGET_NR_setxattr:
8407 case TARGET_NR_lsetxattr:
8408 {
8409 void *p, *n, *v = 0;
8410 if (arg3) {
8411 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8412 if (!v) {
8413 ret = -TARGET_EFAULT;
8414 break;
8415 }
8416 }
8417 p = lock_user_string(arg1);
8418 n = lock_user_string(arg2);
8419 if (p && n) {
8420 if (num == TARGET_NR_setxattr) {
8421 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8422 } else {
8423 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8424 }
8425 } else {
8426 ret = -TARGET_EFAULT;
8427 }
8428 unlock_user(p, arg1, 0);
8429 unlock_user(n, arg2, 0);
8430 unlock_user(v, arg3, 0);
8431 }
8432 break;
8433 case TARGET_NR_fsetxattr:
8434 {
8435 void *n, *v = 0;
8436 if (arg3) {
8437 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8438 if (!v) {
8439 ret = -TARGET_EFAULT;
8440 break;
8441 }
8442 }
8443 n = lock_user_string(arg2);
8444 if (n) {
8445 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8446 } else {
8447 ret = -TARGET_EFAULT;
8448 }
8449 unlock_user(n, arg2, 0);
8450 unlock_user(v, arg3, 0);
8451 }
8452 break;
8453 case TARGET_NR_getxattr:
8454 case TARGET_NR_lgetxattr:
8455 {
8456 void *p, *n, *v = 0;
8457 if (arg3) {
8458 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8459 if (!v) {
8460 ret = -TARGET_EFAULT;
8461 break;
8462 }
8463 }
8464 p = lock_user_string(arg1);
8465 n = lock_user_string(arg2);
8466 if (p && n) {
8467 if (num == TARGET_NR_getxattr) {
8468 ret = get_errno(getxattr(p, n, v, arg4));
8469 } else {
8470 ret = get_errno(lgetxattr(p, n, v, arg4));
8471 }
8472 } else {
8473 ret = -TARGET_EFAULT;
8474 }
8475 unlock_user(p, arg1, 0);
8476 unlock_user(n, arg2, 0);
8477 unlock_user(v, arg3, arg4);
8478 }
8479 break;
8480 case TARGET_NR_fgetxattr:
8481 {
8482 void *n, *v = 0;
8483 if (arg3) {
8484 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8485 if (!v) {
8486 ret = -TARGET_EFAULT;
8487 break;
8488 }
8489 }
8490 n = lock_user_string(arg2);
8491 if (n) {
8492 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8493 } else {
8494 ret = -TARGET_EFAULT;
8495 }
8496 unlock_user(n, arg2, 0);
8497 unlock_user(v, arg3, arg4);
8498 }
8499 break;
8500 case TARGET_NR_removexattr:
8501 case TARGET_NR_lremovexattr:
8502 {
8503 void *p, *n;
8504 p = lock_user_string(arg1);
8505 n = lock_user_string(arg2);
8506 if (p && n) {
8507 if (num == TARGET_NR_removexattr) {
8508 ret = get_errno(removexattr(p, n));
8509 } else {
8510 ret = get_errno(lremovexattr(p, n));
8511 }
8512 } else {
8513 ret = -TARGET_EFAULT;
8514 }
8515 unlock_user(p, arg1, 0);
8516 unlock_user(n, arg2, 0);
8517 }
8518 break;
8519 case TARGET_NR_fremovexattr:
8520 {
8521 void *n;
8522 n = lock_user_string(arg2);
8523 if (n) {
8524 ret = get_errno(fremovexattr(arg1, n));
8525 } else {
8526 ret = -TARGET_EFAULT;
8527 }
8528 unlock_user(n, arg2, 0);
8529 }
8530 break;
8531 #endif
8532 #endif /* CONFIG_ATTR */
8533 #ifdef TARGET_NR_set_thread_area
8534 case TARGET_NR_set_thread_area:
8535 #if defined(TARGET_MIPS)
8536 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8537 ret = 0;
8538 break;
8539 #elif defined(TARGET_CRIS)
8540 if (arg1 & 0xff)
8541 ret = -TARGET_EINVAL;
8542 else {
8543 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8544 ret = 0;
8545 }
8546 break;
8547 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8548 ret = do_set_thread_area(cpu_env, arg1);
8549 break;
8550 #else
8551 goto unimplemented_nowarn;
8552 #endif
8553 #endif
8554 #ifdef TARGET_NR_get_thread_area
8555 case TARGET_NR_get_thread_area:
8556 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8557 ret = do_get_thread_area(cpu_env, arg1);
8558 #else
8559 goto unimplemented_nowarn;
8560 #endif
8561 #endif
8562 #ifdef TARGET_NR_getdomainname
8563 case TARGET_NR_getdomainname:
8564 goto unimplemented_nowarn;
8565 #endif
8566
8567 #ifdef TARGET_NR_clock_gettime
8568 case TARGET_NR_clock_gettime:
8569 {
8570 struct timespec ts;
8571 ret = get_errno(clock_gettime(arg1, &ts));
8572 if (!is_error(ret)) {
8573 host_to_target_timespec(arg2, &ts);
8574 }
8575 break;
8576 }
8577 #endif
8578 #ifdef TARGET_NR_clock_getres
8579 case TARGET_NR_clock_getres:
8580 {
8581 struct timespec ts;
8582 ret = get_errno(clock_getres(arg1, &ts));
8583 if (!is_error(ret)) {
8584 host_to_target_timespec(arg2, &ts);
8585 }
8586 break;
8587 }
8588 #endif
8589 #ifdef TARGET_NR_clock_nanosleep
8590 case TARGET_NR_clock_nanosleep:
8591 {
8592 struct timespec ts;
8593 target_to_host_timespec(&ts, arg3);
8594 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8595 if (arg4)
8596 host_to_target_timespec(arg4, &ts);
8597 break;
8598 }
8599 #endif
8600
8601 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8602 case TARGET_NR_set_tid_address:
8603 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8604 break;
8605 #endif
8606
8607 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8608 case TARGET_NR_tkill:
8609 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8610 break;
8611 #endif
8612
8613 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8614 case TARGET_NR_tgkill:
8615 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8616 target_to_host_signal(arg3)));
8617 break;
8618 #endif
8619
8620 #ifdef TARGET_NR_set_robust_list
8621 case TARGET_NR_set_robust_list:
8622 case TARGET_NR_get_robust_list:
8623 /* The ABI for supporting robust futexes has userspace pass
8624 * the kernel a pointer to a linked list which is updated by
8625 * userspace after the syscall; the list is walked by the kernel
8626 * when the thread exits. Since the linked list in QEMU guest
8627 * memory isn't a valid linked list for the host and we have
8628 * no way to reliably intercept the thread-death event, we can't
8629 * support these. Silently return ENOSYS so that guest userspace
8630 * falls back to a non-robust futex implementation (which should
8631 * be OK except in the corner case of the guest crashing while
8632 * holding a mutex that is shared with another process via
8633 * shared memory).
8634 */
8635 goto unimplemented_nowarn;
8636 #endif
8637
8638 #if defined(TARGET_NR_utimensat)
8639 case TARGET_NR_utimensat:
8640 {
8641 struct timespec *tsp, ts[2];
8642 if (!arg3) {
8643 tsp = NULL;
8644 } else {
8645 target_to_host_timespec(ts, arg3);
8646 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8647 tsp = ts;
8648 }
8649 if (!arg2)
8650 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8651 else {
8652 if (!(p = lock_user_string(arg2))) {
8653 ret = -TARGET_EFAULT;
8654 goto fail;
8655 }
8656 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8657 unlock_user(p, arg2, 0);
8658 }
8659 }
8660 break;
8661 #endif
8662 #if defined(CONFIG_USE_NPTL)
8663 case TARGET_NR_futex:
8664 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8665 break;
8666 #endif
8667 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8668 case TARGET_NR_inotify_init:
8669 ret = get_errno(sys_inotify_init());
8670 break;
8671 #endif
8672 #ifdef CONFIG_INOTIFY1
8673 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8674 case TARGET_NR_inotify_init1:
8675 ret = get_errno(sys_inotify_init1(arg1));
8676 break;
8677 #endif
8678 #endif
8679 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8680 case TARGET_NR_inotify_add_watch:
8681 p = lock_user_string(arg2);
8682 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8683 unlock_user(p, arg2, 0);
8684 break;
8685 #endif
8686 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8687 case TARGET_NR_inotify_rm_watch:
8688 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8689 break;
8690 #endif
8691
8692 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8693 case TARGET_NR_mq_open:
8694 {
8695 struct mq_attr posix_mq_attr;
8696
8697 p = lock_user_string(arg1 - 1);
8698 if (arg4 != 0)
8699 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8700 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8701 unlock_user (p, arg1, 0);
8702 }
8703 break;
8704
8705 case TARGET_NR_mq_unlink:
8706 p = lock_user_string(arg1 - 1);
8707 ret = get_errno(mq_unlink(p));
8708 unlock_user (p, arg1, 0);
8709 break;
8710
8711 case TARGET_NR_mq_timedsend:
8712 {
8713 struct timespec ts;
8714
8715 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8716 if (arg5 != 0) {
8717 target_to_host_timespec(&ts, arg5);
8718 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8719 host_to_target_timespec(arg5, &ts);
8720 }
8721 else
8722 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8723 unlock_user (p, arg2, arg3);
8724 }
8725 break;
8726
8727 case TARGET_NR_mq_timedreceive:
8728 {
8729 struct timespec ts;
8730 unsigned int prio;
8731
8732 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8733 if (arg5 != 0) {
8734 target_to_host_timespec(&ts, arg5);
8735 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8736 host_to_target_timespec(arg5, &ts);
8737 }
8738 else
8739 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8740 unlock_user (p, arg2, arg3);
8741 if (arg4 != 0)
8742 put_user_u32(prio, arg4);
8743 }
8744 break;
8745
8746 /* Not implemented for now... */
8747 /* case TARGET_NR_mq_notify: */
8748 /* break; */
8749
8750 case TARGET_NR_mq_getsetattr:
8751 {
8752 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8753 ret = 0;
8754 if (arg3 != 0) {
8755 ret = mq_getattr(arg1, &posix_mq_attr_out);
8756 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8757 }
8758 if (arg2 != 0) {
8759 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8760 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8761 }
8762
8763 }
8764 break;
8765 #endif
8766
8767 #ifdef CONFIG_SPLICE
8768 #ifdef TARGET_NR_tee
8769 case TARGET_NR_tee:
8770 {
8771 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8772 }
8773 break;
8774 #endif
8775 #ifdef TARGET_NR_splice
8776 case TARGET_NR_splice:
8777 {
8778 loff_t loff_in, loff_out;
8779 loff_t *ploff_in = NULL, *ploff_out = NULL;
8780 if(arg2) {
8781 get_user_u64(loff_in, arg2);
8782 ploff_in = &loff_in;
8783 }
8784 if(arg4) {
8785 get_user_u64(loff_out, arg2);
8786 ploff_out = &loff_out;
8787 }
8788 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8789 }
8790 break;
8791 #endif
8792 #ifdef TARGET_NR_vmsplice
8793 case TARGET_NR_vmsplice:
8794 {
8795 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8796 if (vec != NULL) {
8797 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
8798 unlock_iovec(vec, arg2, arg3, 0);
8799 } else {
8800 ret = -host_to_target_errno(errno);
8801 }
8802 }
8803 break;
8804 #endif
8805 #endif /* CONFIG_SPLICE */
8806 #ifdef CONFIG_EVENTFD
8807 #if defined(TARGET_NR_eventfd)
8808 case TARGET_NR_eventfd:
8809 ret = get_errno(eventfd(arg1, 0));
8810 break;
8811 #endif
8812 #if defined(TARGET_NR_eventfd2)
8813 case TARGET_NR_eventfd2:
8814 {
8815 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
8816 if (arg2 & TARGET_O_NONBLOCK) {
8817 host_flags |= O_NONBLOCK;
8818 }
8819 if (arg2 & TARGET_O_CLOEXEC) {
8820 host_flags |= O_CLOEXEC;
8821 }
8822 ret = get_errno(eventfd(arg1, host_flags));
8823 break;
8824 }
8825 #endif
8826 #endif /* CONFIG_EVENTFD */
8827 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8828 case TARGET_NR_fallocate:
8829 #if TARGET_ABI_BITS == 32
8830 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8831 target_offset64(arg5, arg6)));
8832 #else
8833 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8834 #endif
8835 break;
8836 #endif
8837 #if defined(CONFIG_SYNC_FILE_RANGE)
8838 #if defined(TARGET_NR_sync_file_range)
8839 case TARGET_NR_sync_file_range:
8840 #if TARGET_ABI_BITS == 32
8841 #if defined(TARGET_MIPS)
8842 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8843 target_offset64(arg5, arg6), arg7));
8844 #else
8845 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8846 target_offset64(arg4, arg5), arg6));
8847 #endif /* !TARGET_MIPS */
8848 #else
8849 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8850 #endif
8851 break;
8852 #endif
8853 #if defined(TARGET_NR_sync_file_range2)
8854 case TARGET_NR_sync_file_range2:
8855 /* This is like sync_file_range but the arguments are reordered */
8856 #if TARGET_ABI_BITS == 32
8857 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8858 target_offset64(arg5, arg6), arg2));
8859 #else
8860 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8861 #endif
8862 break;
8863 #endif
8864 #endif
8865 #if defined(CONFIG_EPOLL)
8866 #if defined(TARGET_NR_epoll_create)
8867 case TARGET_NR_epoll_create:
8868 ret = get_errno(epoll_create(arg1));
8869 break;
8870 #endif
8871 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8872 case TARGET_NR_epoll_create1:
8873 ret = get_errno(epoll_create1(arg1));
8874 break;
8875 #endif
8876 #if defined(TARGET_NR_epoll_ctl)
8877 case TARGET_NR_epoll_ctl:
8878 {
8879 struct epoll_event ep;
8880 struct epoll_event *epp = 0;
8881 if (arg4) {
8882 struct target_epoll_event *target_ep;
8883 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8884 goto efault;
8885 }
8886 ep.events = tswap32(target_ep->events);
8887 /* The epoll_data_t union is just opaque data to the kernel,
8888 * so we transfer all 64 bits across and need not worry what
8889 * actual data type it is.
8890 */
8891 ep.data.u64 = tswap64(target_ep->data.u64);
8892 unlock_user_struct(target_ep, arg4, 0);
8893 epp = &ep;
8894 }
8895 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8896 break;
8897 }
8898 #endif
8899
8900 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8901 #define IMPLEMENT_EPOLL_PWAIT
8902 #endif
8903 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8904 #if defined(TARGET_NR_epoll_wait)
8905 case TARGET_NR_epoll_wait:
8906 #endif
8907 #if defined(IMPLEMENT_EPOLL_PWAIT)
8908 case TARGET_NR_epoll_pwait:
8909 #endif
8910 {
8911 struct target_epoll_event *target_ep;
8912 struct epoll_event *ep;
8913 int epfd = arg1;
8914 int maxevents = arg3;
8915 int timeout = arg4;
8916
8917 target_ep = lock_user(VERIFY_WRITE, arg2,
8918 maxevents * sizeof(struct target_epoll_event), 1);
8919 if (!target_ep) {
8920 goto efault;
8921 }
8922
8923 ep = alloca(maxevents * sizeof(struct epoll_event));
8924
8925 switch (num) {
8926 #if defined(IMPLEMENT_EPOLL_PWAIT)
8927 case TARGET_NR_epoll_pwait:
8928 {
8929 target_sigset_t *target_set;
8930 sigset_t _set, *set = &_set;
8931
8932 if (arg5) {
8933 target_set = lock_user(VERIFY_READ, arg5,
8934 sizeof(target_sigset_t), 1);
8935 if (!target_set) {
8936 unlock_user(target_ep, arg2, 0);
8937 goto efault;
8938 }
8939 target_to_host_sigset(set, target_set);
8940 unlock_user(target_set, arg5, 0);
8941 } else {
8942 set = NULL;
8943 }
8944
8945 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8946 break;
8947 }
8948 #endif
8949 #if defined(TARGET_NR_epoll_wait)
8950 case TARGET_NR_epoll_wait:
8951 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8952 break;
8953 #endif
8954 default:
8955 ret = -TARGET_ENOSYS;
8956 }
8957 if (!is_error(ret)) {
8958 int i;
8959 for (i = 0; i < ret; i++) {
8960 target_ep[i].events = tswap32(ep[i].events);
8961 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8962 }
8963 }
8964 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8965 break;
8966 }
8967 #endif
8968 #endif
8969 #ifdef TARGET_NR_prlimit64
8970 case TARGET_NR_prlimit64:
8971 {
8972 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8973 struct target_rlimit64 *target_rnew, *target_rold;
8974 struct host_rlimit64 rnew, rold, *rnewp = 0;
8975 if (arg3) {
8976 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8977 goto efault;
8978 }
8979 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8980 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8981 unlock_user_struct(target_rnew, arg3, 0);
8982 rnewp = &rnew;
8983 }
8984
8985 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8986 if (!is_error(ret) && arg4) {
8987 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8988 goto efault;
8989 }
8990 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8991 target_rold->rlim_max = tswap64(rold.rlim_max);
8992 unlock_user_struct(target_rold, arg4, 1);
8993 }
8994 break;
8995 }
8996 #endif
8997 #ifdef TARGET_NR_gethostname
8998 case TARGET_NR_gethostname:
8999 {
9000 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9001 if (name) {
9002 ret = get_errno(gethostname(name, arg2));
9003 unlock_user(name, arg1, arg2);
9004 } else {
9005 ret = -TARGET_EFAULT;
9006 }
9007 break;
9008 }
9009 #endif
9010 default:
9011 unimplemented:
9012 gemu_log("qemu: Unsupported syscall: %d\n", num);
9013 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9014 unimplemented_nowarn:
9015 #endif
9016 ret = -TARGET_ENOSYS;
9017 break;
9018 }
9019 fail:
9020 #ifdef DEBUG
9021 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
9022 #endif
9023 if(do_strace)
9024 print_syscall_ret(num, ret);
9025 return ret;
9026 efault:
9027 ret = -TARGET_EFAULT;
9028 goto fail;
9029 }