]> git.proxmox.com Git - qemu.git/blob - linux-user/syscall.c
linux-user: Add setsockopt(SO_ATTACH_FILTER)
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <signal.h>
47 #include <sched.h>
48 #ifdef __ia64__
49 int __clone2(int (*fn)(void *), void *child_stack_base,
50 size_t stack_size, int flags, void *arg, ...);
51 #endif
52 #include <sys/socket.h>
53 #include <sys/un.h>
54 #include <sys/uio.h>
55 #include <sys/poll.h>
56 #include <sys/times.h>
57 #include <sys/shm.h>
58 #include <sys/sem.h>
59 #include <sys/statfs.h>
60 #include <utime.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef TARGET_GPROF
70 #include <sys/gmon.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
84
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
91
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/utsname.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #if defined(CONFIG_FIEMAP)
102 #include <linux/fiemap.h>
103 #endif
104 #include <linux/fb.h>
105 #include <linux/vt.h>
106 #include <linux/dm-ioctl.h>
107 #include <linux/reboot.h>
108 #include <linux/route.h>
109 #include <linux/filter.h>
110 #include "linux_loop.h"
111 #include "cpu-uname.h"
112
113 #include "qemu.h"
114
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
117
118 //#define DEBUG
119
120 //#include <linux/msdos_fs.h>
121 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
122 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
123
124
125 #undef _syscall0
126 #undef _syscall1
127 #undef _syscall2
128 #undef _syscall3
129 #undef _syscall4
130 #undef _syscall5
131 #undef _syscall6
132
133 #define _syscall0(type,name) \
134 static type name (void) \
135 { \
136 return syscall(__NR_##name); \
137 }
138
139 #define _syscall1(type,name,type1,arg1) \
140 static type name (type1 arg1) \
141 { \
142 return syscall(__NR_##name, arg1); \
143 }
144
145 #define _syscall2(type,name,type1,arg1,type2,arg2) \
146 static type name (type1 arg1,type2 arg2) \
147 { \
148 return syscall(__NR_##name, arg1, arg2); \
149 }
150
151 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
152 static type name (type1 arg1,type2 arg2,type3 arg3) \
153 { \
154 return syscall(__NR_##name, arg1, arg2, arg3); \
155 }
156
157 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
159 { \
160 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
161 }
162
163 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
164 type5,arg5) \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
166 { \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
168 }
169
170
171 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
172 type5,arg5,type6,arg6) \
173 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
174 type6 arg6) \
175 { \
176 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
177 }
178
179
180 #define __NR_sys_uname __NR_uname
181 #define __NR_sys_getcwd1 __NR_getcwd
182 #define __NR_sys_getdents __NR_getdents
183 #define __NR_sys_getdents64 __NR_getdents64
184 #define __NR_sys_getpriority __NR_getpriority
185 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
186 #define __NR_sys_syslog __NR_syslog
187 #define __NR_sys_tgkill __NR_tgkill
188 #define __NR_sys_tkill __NR_tkill
189 #define __NR_sys_futex __NR_futex
190 #define __NR_sys_inotify_init __NR_inotify_init
191 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
192 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
193
194 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
195 defined(__s390x__)
196 #define __NR__llseek __NR_lseek
197 #endif
198
199 #ifdef __NR_gettid
200 _syscall0(int, gettid)
201 #else
202 /* This is a replacement for the host gettid() and must return a host
203 errno. */
204 static int gettid(void) {
205 return -ENOSYS;
206 }
207 #endif
208 #ifdef __NR_getdents
209 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
210 #endif
211 #if !defined(__NR_getdents) || \
212 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
213 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
214 #endif
215 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
216 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
217 loff_t *, res, uint, wh);
218 #endif
219 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
220 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
221 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
222 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
223 #endif
224 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
225 _syscall2(int,sys_tkill,int,tid,int,sig)
226 #endif
227 #ifdef __NR_exit_group
228 _syscall1(int,exit_group,int,error_code)
229 #endif
230 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
231 _syscall1(int,set_tid_address,int *,tidptr)
232 #endif
233 #if defined(TARGET_NR_futex) && defined(__NR_futex)
234 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
235 const struct timespec *,timeout,int *,uaddr2,int,val3)
236 #endif
237 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
238 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
239 unsigned long *, user_mask_ptr);
240 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
241 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
242 unsigned long *, user_mask_ptr);
243 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
244 void *, arg);
245
246 static bitmask_transtbl fcntl_flags_tbl[] = {
247 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
248 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
249 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
250 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
251 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
252 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
253 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
254 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
255 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
256 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
257 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
258 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
259 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
260 #if defined(O_DIRECT)
261 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
262 #endif
263 #if defined(O_NOATIME)
264 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
265 #endif
266 #if defined(O_CLOEXEC)
267 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
268 #endif
269 #if defined(O_PATH)
270 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
271 #endif
272 /* Don't terminate the list prematurely on 64-bit host+guest. */
273 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
274 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
275 #endif
276 { 0, 0, 0, 0 }
277 };
278
279 #define COPY_UTSNAME_FIELD(dest, src) \
280 do { \
281 /* __NEW_UTS_LEN doesn't include terminating null */ \
282 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
283 (dest)[__NEW_UTS_LEN] = '\0'; \
284 } while (0)
285
286 static int sys_uname(struct new_utsname *buf)
287 {
288 struct utsname uts_buf;
289
290 if (uname(&uts_buf) < 0)
291 return (-1);
292
293 /*
294 * Just in case these have some differences, we
295 * translate utsname to new_utsname (which is the
296 * struct linux kernel uses).
297 */
298
299 memset(buf, 0, sizeof(*buf));
300 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
301 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
302 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
303 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
304 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
305 #ifdef _GNU_SOURCE
306 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
307 #endif
308 return (0);
309
310 #undef COPY_UTSNAME_FIELD
311 }
312
313 static int sys_getcwd1(char *buf, size_t size)
314 {
315 if (getcwd(buf, size) == NULL) {
316 /* getcwd() sets errno */
317 return (-1);
318 }
319 return strlen(buf)+1;
320 }
321
322 #ifdef TARGET_NR_openat
323 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
324 {
325 /*
326 * open(2) has extra parameter 'mode' when called with
327 * flag O_CREAT.
328 */
329 if ((flags & O_CREAT) != 0) {
330 return (openat(dirfd, pathname, flags, mode));
331 }
332 return (openat(dirfd, pathname, flags));
333 }
334 #endif
335
336 #ifdef TARGET_NR_utimensat
337 #ifdef CONFIG_UTIMENSAT
338 static int sys_utimensat(int dirfd, const char *pathname,
339 const struct timespec times[2], int flags)
340 {
341 if (pathname == NULL)
342 return futimens(dirfd, times);
343 else
344 return utimensat(dirfd, pathname, times, flags);
345 }
346 #elif defined(__NR_utimensat)
347 #define __NR_sys_utimensat __NR_utimensat
348 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
349 const struct timespec *,tsp,int,flags)
350 #else
351 static int sys_utimensat(int dirfd, const char *pathname,
352 const struct timespec times[2], int flags)
353 {
354 errno = ENOSYS;
355 return -1;
356 }
357 #endif
358 #endif /* TARGET_NR_utimensat */
359
360 #ifdef CONFIG_INOTIFY
361 #include <sys/inotify.h>
362
363 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
364 static int sys_inotify_init(void)
365 {
366 return (inotify_init());
367 }
368 #endif
369 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
370 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
371 {
372 return (inotify_add_watch(fd, pathname, mask));
373 }
374 #endif
375 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
376 static int sys_inotify_rm_watch(int fd, int32_t wd)
377 {
378 return (inotify_rm_watch(fd, wd));
379 }
380 #endif
381 #ifdef CONFIG_INOTIFY1
382 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
383 static int sys_inotify_init1(int flags)
384 {
385 return (inotify_init1(flags));
386 }
387 #endif
388 #endif
389 #else
390 /* Userspace can usually survive runtime without inotify */
391 #undef TARGET_NR_inotify_init
392 #undef TARGET_NR_inotify_init1
393 #undef TARGET_NR_inotify_add_watch
394 #undef TARGET_NR_inotify_rm_watch
395 #endif /* CONFIG_INOTIFY */
396
397 #if defined(TARGET_NR_ppoll)
398 #ifndef __NR_ppoll
399 # define __NR_ppoll -1
400 #endif
401 #define __NR_sys_ppoll __NR_ppoll
402 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
403 struct timespec *, timeout, const __sigset_t *, sigmask,
404 size_t, sigsetsize)
405 #endif
406
407 #if defined(TARGET_NR_pselect6)
408 #ifndef __NR_pselect6
409 # define __NR_pselect6 -1
410 #endif
411 #define __NR_sys_pselect6 __NR_pselect6
412 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
413 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
414 #endif
415
416 #if defined(TARGET_NR_prlimit64)
417 #ifndef __NR_prlimit64
418 # define __NR_prlimit64 -1
419 #endif
420 #define __NR_sys_prlimit64 __NR_prlimit64
421 /* The glibc rlimit structure may not be that used by the underlying syscall */
422 struct host_rlimit64 {
423 uint64_t rlim_cur;
424 uint64_t rlim_max;
425 };
426 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
427 const struct host_rlimit64 *, new_limit,
428 struct host_rlimit64 *, old_limit)
429 #endif
430
431 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
432 #ifdef TARGET_ARM
433 static inline int regpairs_aligned(void *cpu_env) {
434 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
435 }
436 #elif defined(TARGET_MIPS)
437 static inline int regpairs_aligned(void *cpu_env) { return 1; }
438 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
439 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
440 * of registers which translates to the same as ARM/MIPS, because we start with
441 * r3 as arg1 */
442 static inline int regpairs_aligned(void *cpu_env) { return 1; }
443 #else
444 static inline int regpairs_aligned(void *cpu_env) { return 0; }
445 #endif
446
447 #define ERRNO_TABLE_SIZE 1200
448
449 /* target_to_host_errno_table[] is initialized from
450 * host_to_target_errno_table[] in syscall_init(). */
451 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
452 };
453
454 /*
455 * This list is the union of errno values overridden in asm-<arch>/errno.h
456 * minus the errnos that are not actually generic to all archs.
457 */
458 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
459 [EIDRM] = TARGET_EIDRM,
460 [ECHRNG] = TARGET_ECHRNG,
461 [EL2NSYNC] = TARGET_EL2NSYNC,
462 [EL3HLT] = TARGET_EL3HLT,
463 [EL3RST] = TARGET_EL3RST,
464 [ELNRNG] = TARGET_ELNRNG,
465 [EUNATCH] = TARGET_EUNATCH,
466 [ENOCSI] = TARGET_ENOCSI,
467 [EL2HLT] = TARGET_EL2HLT,
468 [EDEADLK] = TARGET_EDEADLK,
469 [ENOLCK] = TARGET_ENOLCK,
470 [EBADE] = TARGET_EBADE,
471 [EBADR] = TARGET_EBADR,
472 [EXFULL] = TARGET_EXFULL,
473 [ENOANO] = TARGET_ENOANO,
474 [EBADRQC] = TARGET_EBADRQC,
475 [EBADSLT] = TARGET_EBADSLT,
476 [EBFONT] = TARGET_EBFONT,
477 [ENOSTR] = TARGET_ENOSTR,
478 [ENODATA] = TARGET_ENODATA,
479 [ETIME] = TARGET_ETIME,
480 [ENOSR] = TARGET_ENOSR,
481 [ENONET] = TARGET_ENONET,
482 [ENOPKG] = TARGET_ENOPKG,
483 [EREMOTE] = TARGET_EREMOTE,
484 [ENOLINK] = TARGET_ENOLINK,
485 [EADV] = TARGET_EADV,
486 [ESRMNT] = TARGET_ESRMNT,
487 [ECOMM] = TARGET_ECOMM,
488 [EPROTO] = TARGET_EPROTO,
489 [EDOTDOT] = TARGET_EDOTDOT,
490 [EMULTIHOP] = TARGET_EMULTIHOP,
491 [EBADMSG] = TARGET_EBADMSG,
492 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
493 [EOVERFLOW] = TARGET_EOVERFLOW,
494 [ENOTUNIQ] = TARGET_ENOTUNIQ,
495 [EBADFD] = TARGET_EBADFD,
496 [EREMCHG] = TARGET_EREMCHG,
497 [ELIBACC] = TARGET_ELIBACC,
498 [ELIBBAD] = TARGET_ELIBBAD,
499 [ELIBSCN] = TARGET_ELIBSCN,
500 [ELIBMAX] = TARGET_ELIBMAX,
501 [ELIBEXEC] = TARGET_ELIBEXEC,
502 [EILSEQ] = TARGET_EILSEQ,
503 [ENOSYS] = TARGET_ENOSYS,
504 [ELOOP] = TARGET_ELOOP,
505 [ERESTART] = TARGET_ERESTART,
506 [ESTRPIPE] = TARGET_ESTRPIPE,
507 [ENOTEMPTY] = TARGET_ENOTEMPTY,
508 [EUSERS] = TARGET_EUSERS,
509 [ENOTSOCK] = TARGET_ENOTSOCK,
510 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
511 [EMSGSIZE] = TARGET_EMSGSIZE,
512 [EPROTOTYPE] = TARGET_EPROTOTYPE,
513 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
514 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
515 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
516 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
517 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
518 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
519 [EADDRINUSE] = TARGET_EADDRINUSE,
520 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
521 [ENETDOWN] = TARGET_ENETDOWN,
522 [ENETUNREACH] = TARGET_ENETUNREACH,
523 [ENETRESET] = TARGET_ENETRESET,
524 [ECONNABORTED] = TARGET_ECONNABORTED,
525 [ECONNRESET] = TARGET_ECONNRESET,
526 [ENOBUFS] = TARGET_ENOBUFS,
527 [EISCONN] = TARGET_EISCONN,
528 [ENOTCONN] = TARGET_ENOTCONN,
529 [EUCLEAN] = TARGET_EUCLEAN,
530 [ENOTNAM] = TARGET_ENOTNAM,
531 [ENAVAIL] = TARGET_ENAVAIL,
532 [EISNAM] = TARGET_EISNAM,
533 [EREMOTEIO] = TARGET_EREMOTEIO,
534 [ESHUTDOWN] = TARGET_ESHUTDOWN,
535 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
536 [ETIMEDOUT] = TARGET_ETIMEDOUT,
537 [ECONNREFUSED] = TARGET_ECONNREFUSED,
538 [EHOSTDOWN] = TARGET_EHOSTDOWN,
539 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
540 [EALREADY] = TARGET_EALREADY,
541 [EINPROGRESS] = TARGET_EINPROGRESS,
542 [ESTALE] = TARGET_ESTALE,
543 [ECANCELED] = TARGET_ECANCELED,
544 [ENOMEDIUM] = TARGET_ENOMEDIUM,
545 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
546 #ifdef ENOKEY
547 [ENOKEY] = TARGET_ENOKEY,
548 #endif
549 #ifdef EKEYEXPIRED
550 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
551 #endif
552 #ifdef EKEYREVOKED
553 [EKEYREVOKED] = TARGET_EKEYREVOKED,
554 #endif
555 #ifdef EKEYREJECTED
556 [EKEYREJECTED] = TARGET_EKEYREJECTED,
557 #endif
558 #ifdef EOWNERDEAD
559 [EOWNERDEAD] = TARGET_EOWNERDEAD,
560 #endif
561 #ifdef ENOTRECOVERABLE
562 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
563 #endif
564 };
565
566 static inline int host_to_target_errno(int err)
567 {
568 if(host_to_target_errno_table[err])
569 return host_to_target_errno_table[err];
570 return err;
571 }
572
573 static inline int target_to_host_errno(int err)
574 {
575 if (target_to_host_errno_table[err])
576 return target_to_host_errno_table[err];
577 return err;
578 }
579
580 static inline abi_long get_errno(abi_long ret)
581 {
582 if (ret == -1)
583 return -host_to_target_errno(errno);
584 else
585 return ret;
586 }
587
588 static inline int is_error(abi_long ret)
589 {
590 return (abi_ulong)ret >= (abi_ulong)(-4096);
591 }
592
593 char *target_strerror(int err)
594 {
595 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
596 return NULL;
597 }
598 return strerror(target_to_host_errno(err));
599 }
600
601 static abi_ulong target_brk;
602 static abi_ulong target_original_brk;
603 static abi_ulong brk_page;
604
605 void target_set_brk(abi_ulong new_brk)
606 {
607 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
608 brk_page = HOST_PAGE_ALIGN(target_brk);
609 }
610
611 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
612 #define DEBUGF_BRK(message, args...)
613
614 /* do_brk() must return target values and target errnos. */
615 abi_long do_brk(abi_ulong new_brk)
616 {
617 abi_long mapped_addr;
618 int new_alloc_size;
619
620 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
621
622 if (!new_brk) {
623 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
624 return target_brk;
625 }
626 if (new_brk < target_original_brk) {
627 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
628 target_brk);
629 return target_brk;
630 }
631
632 /* If the new brk is less than the highest page reserved to the
633 * target heap allocation, set it and we're almost done... */
634 if (new_brk <= brk_page) {
635 /* Heap contents are initialized to zero, as for anonymous
636 * mapped pages. */
637 if (new_brk > target_brk) {
638 memset(g2h(target_brk), 0, new_brk - target_brk);
639 }
640 target_brk = new_brk;
641 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
642 return target_brk;
643 }
644
645 /* We need to allocate more memory after the brk... Note that
646 * we don't use MAP_FIXED because that will map over the top of
647 * any existing mapping (like the one with the host libc or qemu
648 * itself); instead we treat "mapped but at wrong address" as
649 * a failure and unmap again.
650 */
651 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
652 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
653 PROT_READ|PROT_WRITE,
654 MAP_ANON|MAP_PRIVATE, 0, 0));
655
656 if (mapped_addr == brk_page) {
657 /* Heap contents are initialized to zero, as for anonymous
658 * mapped pages. Technically the new pages are already
659 * initialized to zero since they *are* anonymous mapped
660 * pages, however we have to take care with the contents that
661 * come from the remaining part of the previous page: it may
662 * contains garbage data due to a previous heap usage (grown
663 * then shrunken). */
664 memset(g2h(target_brk), 0, brk_page - target_brk);
665
666 target_brk = new_brk;
667 brk_page = HOST_PAGE_ALIGN(target_brk);
668 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
669 target_brk);
670 return target_brk;
671 } else if (mapped_addr != -1) {
672 /* Mapped but at wrong address, meaning there wasn't actually
673 * enough space for this brk.
674 */
675 target_munmap(mapped_addr, new_alloc_size);
676 mapped_addr = -1;
677 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
678 }
679 else {
680 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
681 }
682
683 #if defined(TARGET_ALPHA)
684 /* We (partially) emulate OSF/1 on Alpha, which requires we
685 return a proper errno, not an unchanged brk value. */
686 return -TARGET_ENOMEM;
687 #endif
688 /* For everything else, return the previous break. */
689 return target_brk;
690 }
691
692 static inline abi_long copy_from_user_fdset(fd_set *fds,
693 abi_ulong target_fds_addr,
694 int n)
695 {
696 int i, nw, j, k;
697 abi_ulong b, *target_fds;
698
699 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
700 if (!(target_fds = lock_user(VERIFY_READ,
701 target_fds_addr,
702 sizeof(abi_ulong) * nw,
703 1)))
704 return -TARGET_EFAULT;
705
706 FD_ZERO(fds);
707 k = 0;
708 for (i = 0; i < nw; i++) {
709 /* grab the abi_ulong */
710 __get_user(b, &target_fds[i]);
711 for (j = 0; j < TARGET_ABI_BITS; j++) {
712 /* check the bit inside the abi_ulong */
713 if ((b >> j) & 1)
714 FD_SET(k, fds);
715 k++;
716 }
717 }
718
719 unlock_user(target_fds, target_fds_addr, 0);
720
721 return 0;
722 }
723
724 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
725 abi_ulong target_fds_addr,
726 int n)
727 {
728 if (target_fds_addr) {
729 if (copy_from_user_fdset(fds, target_fds_addr, n))
730 return -TARGET_EFAULT;
731 *fds_ptr = fds;
732 } else {
733 *fds_ptr = NULL;
734 }
735 return 0;
736 }
737
738 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
739 const fd_set *fds,
740 int n)
741 {
742 int i, nw, j, k;
743 abi_long v;
744 abi_ulong *target_fds;
745
746 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
747 if (!(target_fds = lock_user(VERIFY_WRITE,
748 target_fds_addr,
749 sizeof(abi_ulong) * nw,
750 0)))
751 return -TARGET_EFAULT;
752
753 k = 0;
754 for (i = 0; i < nw; i++) {
755 v = 0;
756 for (j = 0; j < TARGET_ABI_BITS; j++) {
757 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
758 k++;
759 }
760 __put_user(v, &target_fds[i]);
761 }
762
763 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
764
765 return 0;
766 }
767
768 #if defined(__alpha__)
769 #define HOST_HZ 1024
770 #else
771 #define HOST_HZ 100
772 #endif
773
774 static inline abi_long host_to_target_clock_t(long ticks)
775 {
776 #if HOST_HZ == TARGET_HZ
777 return ticks;
778 #else
779 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
780 #endif
781 }
782
783 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
784 const struct rusage *rusage)
785 {
786 struct target_rusage *target_rusage;
787
788 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
789 return -TARGET_EFAULT;
790 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
791 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
792 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
793 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
794 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
795 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
796 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
797 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
798 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
799 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
800 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
801 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
802 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
803 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
804 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
805 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
806 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
807 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
808 unlock_user_struct(target_rusage, target_addr, 1);
809
810 return 0;
811 }
812
813 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
814 {
815 abi_ulong target_rlim_swap;
816 rlim_t result;
817
818 target_rlim_swap = tswapal(target_rlim);
819 if (target_rlim_swap == TARGET_RLIM_INFINITY)
820 return RLIM_INFINITY;
821
822 result = target_rlim_swap;
823 if (target_rlim_swap != (rlim_t)result)
824 return RLIM_INFINITY;
825
826 return result;
827 }
828
829 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
830 {
831 abi_ulong target_rlim_swap;
832 abi_ulong result;
833
834 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
835 target_rlim_swap = TARGET_RLIM_INFINITY;
836 else
837 target_rlim_swap = rlim;
838 result = tswapal(target_rlim_swap);
839
840 return result;
841 }
842
843 static inline int target_to_host_resource(int code)
844 {
845 switch (code) {
846 case TARGET_RLIMIT_AS:
847 return RLIMIT_AS;
848 case TARGET_RLIMIT_CORE:
849 return RLIMIT_CORE;
850 case TARGET_RLIMIT_CPU:
851 return RLIMIT_CPU;
852 case TARGET_RLIMIT_DATA:
853 return RLIMIT_DATA;
854 case TARGET_RLIMIT_FSIZE:
855 return RLIMIT_FSIZE;
856 case TARGET_RLIMIT_LOCKS:
857 return RLIMIT_LOCKS;
858 case TARGET_RLIMIT_MEMLOCK:
859 return RLIMIT_MEMLOCK;
860 case TARGET_RLIMIT_MSGQUEUE:
861 return RLIMIT_MSGQUEUE;
862 case TARGET_RLIMIT_NICE:
863 return RLIMIT_NICE;
864 case TARGET_RLIMIT_NOFILE:
865 return RLIMIT_NOFILE;
866 case TARGET_RLIMIT_NPROC:
867 return RLIMIT_NPROC;
868 case TARGET_RLIMIT_RSS:
869 return RLIMIT_RSS;
870 case TARGET_RLIMIT_RTPRIO:
871 return RLIMIT_RTPRIO;
872 case TARGET_RLIMIT_SIGPENDING:
873 return RLIMIT_SIGPENDING;
874 case TARGET_RLIMIT_STACK:
875 return RLIMIT_STACK;
876 default:
877 return code;
878 }
879 }
880
881 static inline abi_long copy_from_user_timeval(struct timeval *tv,
882 abi_ulong target_tv_addr)
883 {
884 struct target_timeval *target_tv;
885
886 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
887 return -TARGET_EFAULT;
888
889 __get_user(tv->tv_sec, &target_tv->tv_sec);
890 __get_user(tv->tv_usec, &target_tv->tv_usec);
891
892 unlock_user_struct(target_tv, target_tv_addr, 0);
893
894 return 0;
895 }
896
897 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
898 const struct timeval *tv)
899 {
900 struct target_timeval *target_tv;
901
902 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
903 return -TARGET_EFAULT;
904
905 __put_user(tv->tv_sec, &target_tv->tv_sec);
906 __put_user(tv->tv_usec, &target_tv->tv_usec);
907
908 unlock_user_struct(target_tv, target_tv_addr, 1);
909
910 return 0;
911 }
912
913 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
914 #include <mqueue.h>
915
916 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
917 abi_ulong target_mq_attr_addr)
918 {
919 struct target_mq_attr *target_mq_attr;
920
921 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
922 target_mq_attr_addr, 1))
923 return -TARGET_EFAULT;
924
925 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
926 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
927 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
928 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
929
930 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
931
932 return 0;
933 }
934
935 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
936 const struct mq_attr *attr)
937 {
938 struct target_mq_attr *target_mq_attr;
939
940 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
941 target_mq_attr_addr, 0))
942 return -TARGET_EFAULT;
943
944 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
945 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
946 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
947 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
948
949 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
950
951 return 0;
952 }
953 #endif
954
955 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
956 /* do_select() must return target values and target errnos. */
957 static abi_long do_select(int n,
958 abi_ulong rfd_addr, abi_ulong wfd_addr,
959 abi_ulong efd_addr, abi_ulong target_tv_addr)
960 {
961 fd_set rfds, wfds, efds;
962 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
963 struct timeval tv, *tv_ptr;
964 abi_long ret;
965
966 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
967 if (ret) {
968 return ret;
969 }
970 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
971 if (ret) {
972 return ret;
973 }
974 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
975 if (ret) {
976 return ret;
977 }
978
979 if (target_tv_addr) {
980 if (copy_from_user_timeval(&tv, target_tv_addr))
981 return -TARGET_EFAULT;
982 tv_ptr = &tv;
983 } else {
984 tv_ptr = NULL;
985 }
986
987 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
988
989 if (!is_error(ret)) {
990 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
991 return -TARGET_EFAULT;
992 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
993 return -TARGET_EFAULT;
994 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
995 return -TARGET_EFAULT;
996
997 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
998 return -TARGET_EFAULT;
999 }
1000
1001 return ret;
1002 }
1003 #endif
1004
1005 static abi_long do_pipe2(int host_pipe[], int flags)
1006 {
1007 #ifdef CONFIG_PIPE2
1008 return pipe2(host_pipe, flags);
1009 #else
1010 return -ENOSYS;
1011 #endif
1012 }
1013
1014 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1015 int flags, int is_pipe2)
1016 {
1017 int host_pipe[2];
1018 abi_long ret;
1019 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1020
1021 if (is_error(ret))
1022 return get_errno(ret);
1023
1024 /* Several targets have special calling conventions for the original
1025 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1026 if (!is_pipe2) {
1027 #if defined(TARGET_ALPHA)
1028 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1029 return host_pipe[0];
1030 #elif defined(TARGET_MIPS)
1031 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1032 return host_pipe[0];
1033 #elif defined(TARGET_SH4)
1034 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1035 return host_pipe[0];
1036 #elif defined(TARGET_SPARC)
1037 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1038 return host_pipe[0];
1039 #endif
1040 }
1041
1042 if (put_user_s32(host_pipe[0], pipedes)
1043 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1044 return -TARGET_EFAULT;
1045 return get_errno(ret);
1046 }
1047
1048 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1049 abi_ulong target_addr,
1050 socklen_t len)
1051 {
1052 struct target_ip_mreqn *target_smreqn;
1053
1054 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1055 if (!target_smreqn)
1056 return -TARGET_EFAULT;
1057 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1058 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1059 if (len == sizeof(struct target_ip_mreqn))
1060 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1061 unlock_user(target_smreqn, target_addr, 0);
1062
1063 return 0;
1064 }
1065
1066 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1067 abi_ulong target_addr,
1068 socklen_t len)
1069 {
1070 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1071 sa_family_t sa_family;
1072 struct target_sockaddr *target_saddr;
1073
1074 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1075 if (!target_saddr)
1076 return -TARGET_EFAULT;
1077
1078 sa_family = tswap16(target_saddr->sa_family);
1079
1080 /* Oops. The caller might send a incomplete sun_path; sun_path
1081 * must be terminated by \0 (see the manual page), but
1082 * unfortunately it is quite common to specify sockaddr_un
1083 * length as "strlen(x->sun_path)" while it should be
1084 * "strlen(...) + 1". We'll fix that here if needed.
1085 * Linux kernel has a similar feature.
1086 */
1087
1088 if (sa_family == AF_UNIX) {
1089 if (len < unix_maxlen && len > 0) {
1090 char *cp = (char*)target_saddr;
1091
1092 if ( cp[len-1] && !cp[len] )
1093 len++;
1094 }
1095 if (len > unix_maxlen)
1096 len = unix_maxlen;
1097 }
1098
1099 memcpy(addr, target_saddr, len);
1100 addr->sa_family = sa_family;
1101 unlock_user(target_saddr, target_addr, 0);
1102
1103 return 0;
1104 }
1105
1106 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1107 struct sockaddr *addr,
1108 socklen_t len)
1109 {
1110 struct target_sockaddr *target_saddr;
1111
1112 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1113 if (!target_saddr)
1114 return -TARGET_EFAULT;
1115 memcpy(target_saddr, addr, len);
1116 target_saddr->sa_family = tswap16(addr->sa_family);
1117 unlock_user(target_saddr, target_addr, len);
1118
1119 return 0;
1120 }
1121
1122 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1123 struct target_msghdr *target_msgh)
1124 {
1125 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1126 abi_long msg_controllen;
1127 abi_ulong target_cmsg_addr;
1128 struct target_cmsghdr *target_cmsg;
1129 socklen_t space = 0;
1130
1131 msg_controllen = tswapal(target_msgh->msg_controllen);
1132 if (msg_controllen < sizeof (struct target_cmsghdr))
1133 goto the_end;
1134 target_cmsg_addr = tswapal(target_msgh->msg_control);
1135 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1136 if (!target_cmsg)
1137 return -TARGET_EFAULT;
1138
1139 while (cmsg && target_cmsg) {
1140 void *data = CMSG_DATA(cmsg);
1141 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1142
1143 int len = tswapal(target_cmsg->cmsg_len)
1144 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1145
1146 space += CMSG_SPACE(len);
1147 if (space > msgh->msg_controllen) {
1148 space -= CMSG_SPACE(len);
1149 gemu_log("Host cmsg overflow\n");
1150 break;
1151 }
1152
1153 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1154 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1155 cmsg->cmsg_len = CMSG_LEN(len);
1156
1157 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1158 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1159 memcpy(data, target_data, len);
1160 } else {
1161 int *fd = (int *)data;
1162 int *target_fd = (int *)target_data;
1163 int i, numfds = len / sizeof(int);
1164
1165 for (i = 0; i < numfds; i++)
1166 fd[i] = tswap32(target_fd[i]);
1167 }
1168
1169 cmsg = CMSG_NXTHDR(msgh, cmsg);
1170 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1171 }
1172 unlock_user(target_cmsg, target_cmsg_addr, 0);
1173 the_end:
1174 msgh->msg_controllen = space;
1175 return 0;
1176 }
1177
1178 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1179 struct msghdr *msgh)
1180 {
1181 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1182 abi_long msg_controllen;
1183 abi_ulong target_cmsg_addr;
1184 struct target_cmsghdr *target_cmsg;
1185 socklen_t space = 0;
1186
1187 msg_controllen = tswapal(target_msgh->msg_controllen);
1188 if (msg_controllen < sizeof (struct target_cmsghdr))
1189 goto the_end;
1190 target_cmsg_addr = tswapal(target_msgh->msg_control);
1191 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1192 if (!target_cmsg)
1193 return -TARGET_EFAULT;
1194
1195 while (cmsg && target_cmsg) {
1196 void *data = CMSG_DATA(cmsg);
1197 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1198
1199 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1200
1201 space += TARGET_CMSG_SPACE(len);
1202 if (space > msg_controllen) {
1203 space -= TARGET_CMSG_SPACE(len);
1204 gemu_log("Target cmsg overflow\n");
1205 break;
1206 }
1207
1208 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1209 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1210 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1211
1212 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1213 (cmsg->cmsg_type == SCM_RIGHTS)) {
1214 int *fd = (int *)data;
1215 int *target_fd = (int *)target_data;
1216 int i, numfds = len / sizeof(int);
1217
1218 for (i = 0; i < numfds; i++)
1219 target_fd[i] = tswap32(fd[i]);
1220 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1221 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1222 (len == sizeof(struct timeval))) {
1223 /* copy struct timeval to target */
1224 struct timeval *tv = (struct timeval *)data;
1225 struct target_timeval *target_tv =
1226 (struct target_timeval *)target_data;
1227
1228 target_tv->tv_sec = tswapal(tv->tv_sec);
1229 target_tv->tv_usec = tswapal(tv->tv_usec);
1230 } else {
1231 gemu_log("Unsupported ancillary data: %d/%d\n",
1232 cmsg->cmsg_level, cmsg->cmsg_type);
1233 memcpy(target_data, data, len);
1234 }
1235
1236 cmsg = CMSG_NXTHDR(msgh, cmsg);
1237 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1238 }
1239 unlock_user(target_cmsg, target_cmsg_addr, space);
1240 the_end:
1241 target_msgh->msg_controllen = tswapal(space);
1242 return 0;
1243 }
1244
1245 /* do_setsockopt() Must return target values and target errnos. */
1246 static abi_long do_setsockopt(int sockfd, int level, int optname,
1247 abi_ulong optval_addr, socklen_t optlen)
1248 {
1249 abi_long ret;
1250 int val;
1251 struct ip_mreqn *ip_mreq;
1252 struct ip_mreq_source *ip_mreq_source;
1253
1254 switch(level) {
1255 case SOL_TCP:
1256 /* TCP options all take an 'int' value. */
1257 if (optlen < sizeof(uint32_t))
1258 return -TARGET_EINVAL;
1259
1260 if (get_user_u32(val, optval_addr))
1261 return -TARGET_EFAULT;
1262 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1263 break;
1264 case SOL_IP:
1265 switch(optname) {
1266 case IP_TOS:
1267 case IP_TTL:
1268 case IP_HDRINCL:
1269 case IP_ROUTER_ALERT:
1270 case IP_RECVOPTS:
1271 case IP_RETOPTS:
1272 case IP_PKTINFO:
1273 case IP_MTU_DISCOVER:
1274 case IP_RECVERR:
1275 case IP_RECVTOS:
1276 #ifdef IP_FREEBIND
1277 case IP_FREEBIND:
1278 #endif
1279 case IP_MULTICAST_TTL:
1280 case IP_MULTICAST_LOOP:
1281 val = 0;
1282 if (optlen >= sizeof(uint32_t)) {
1283 if (get_user_u32(val, optval_addr))
1284 return -TARGET_EFAULT;
1285 } else if (optlen >= 1) {
1286 if (get_user_u8(val, optval_addr))
1287 return -TARGET_EFAULT;
1288 }
1289 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1290 break;
1291 case IP_ADD_MEMBERSHIP:
1292 case IP_DROP_MEMBERSHIP:
1293 if (optlen < sizeof (struct target_ip_mreq) ||
1294 optlen > sizeof (struct target_ip_mreqn))
1295 return -TARGET_EINVAL;
1296
1297 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1298 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1299 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1300 break;
1301
1302 case IP_BLOCK_SOURCE:
1303 case IP_UNBLOCK_SOURCE:
1304 case IP_ADD_SOURCE_MEMBERSHIP:
1305 case IP_DROP_SOURCE_MEMBERSHIP:
1306 if (optlen != sizeof (struct target_ip_mreq_source))
1307 return -TARGET_EINVAL;
1308
1309 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1310 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1311 unlock_user (ip_mreq_source, optval_addr, 0);
1312 break;
1313
1314 default:
1315 goto unimplemented;
1316 }
1317 break;
1318 case SOL_RAW:
1319 switch (optname) {
1320 case ICMP_FILTER:
1321 /* struct icmp_filter takes an u32 value */
1322 if (optlen < sizeof(uint32_t)) {
1323 return -TARGET_EINVAL;
1324 }
1325
1326 if (get_user_u32(val, optval_addr)) {
1327 return -TARGET_EFAULT;
1328 }
1329 ret = get_errno(setsockopt(sockfd, level, optname,
1330 &val, sizeof(val)));
1331 break;
1332
1333 default:
1334 goto unimplemented;
1335 }
1336 break;
1337 case TARGET_SOL_SOCKET:
1338 switch (optname) {
1339 case TARGET_SO_RCVTIMEO:
1340 {
1341 struct timeval tv;
1342
1343 optname = SO_RCVTIMEO;
1344
1345 set_timeout:
1346 if (optlen != sizeof(struct target_timeval)) {
1347 return -TARGET_EINVAL;
1348 }
1349
1350 if (copy_from_user_timeval(&tv, optval_addr)) {
1351 return -TARGET_EFAULT;
1352 }
1353
1354 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1355 &tv, sizeof(tv)));
1356 return ret;
1357 }
1358 case TARGET_SO_SNDTIMEO:
1359 optname = SO_SNDTIMEO;
1360 goto set_timeout;
1361 case TARGET_SO_ATTACH_FILTER:
1362 {
1363 struct target_sock_fprog *tfprog;
1364 struct target_sock_filter *tfilter;
1365 struct sock_fprog fprog;
1366 struct sock_filter *filter;
1367 int i;
1368
1369 if (optlen != sizeof(*tfprog)) {
1370 return -TARGET_EINVAL;
1371 }
1372 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1373 return -TARGET_EFAULT;
1374 }
1375 if (!lock_user_struct(VERIFY_READ, tfilter,
1376 tswapal(tfprog->filter), 0)) {
1377 unlock_user_struct(tfprog, optval_addr, 1);
1378 return -TARGET_EFAULT;
1379 }
1380
1381 fprog.len = tswap16(tfprog->len);
1382 filter = malloc(fprog.len * sizeof(*filter));
1383 if (filter == NULL) {
1384 unlock_user_struct(tfilter, tfprog->filter, 1);
1385 unlock_user_struct(tfprog, optval_addr, 1);
1386 return -TARGET_ENOMEM;
1387 }
1388 for (i = 0; i < fprog.len; i++) {
1389 filter[i].code = tswap16(tfilter[i].code);
1390 filter[i].jt = tfilter[i].jt;
1391 filter[i].jf = tfilter[i].jf;
1392 filter[i].k = tswap32(tfilter[i].k);
1393 }
1394 fprog.filter = filter;
1395
1396 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1397 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1398 free(filter);
1399
1400 unlock_user_struct(tfilter, tfprog->filter, 1);
1401 unlock_user_struct(tfprog, optval_addr, 1);
1402 return ret;
1403 }
1404 /* Options with 'int' argument. */
1405 case TARGET_SO_DEBUG:
1406 optname = SO_DEBUG;
1407 break;
1408 case TARGET_SO_REUSEADDR:
1409 optname = SO_REUSEADDR;
1410 break;
1411 case TARGET_SO_TYPE:
1412 optname = SO_TYPE;
1413 break;
1414 case TARGET_SO_ERROR:
1415 optname = SO_ERROR;
1416 break;
1417 case TARGET_SO_DONTROUTE:
1418 optname = SO_DONTROUTE;
1419 break;
1420 case TARGET_SO_BROADCAST:
1421 optname = SO_BROADCAST;
1422 break;
1423 case TARGET_SO_SNDBUF:
1424 optname = SO_SNDBUF;
1425 break;
1426 case TARGET_SO_RCVBUF:
1427 optname = SO_RCVBUF;
1428 break;
1429 case TARGET_SO_KEEPALIVE:
1430 optname = SO_KEEPALIVE;
1431 break;
1432 case TARGET_SO_OOBINLINE:
1433 optname = SO_OOBINLINE;
1434 break;
1435 case TARGET_SO_NO_CHECK:
1436 optname = SO_NO_CHECK;
1437 break;
1438 case TARGET_SO_PRIORITY:
1439 optname = SO_PRIORITY;
1440 break;
1441 #ifdef SO_BSDCOMPAT
1442 case TARGET_SO_BSDCOMPAT:
1443 optname = SO_BSDCOMPAT;
1444 break;
1445 #endif
1446 case TARGET_SO_PASSCRED:
1447 optname = SO_PASSCRED;
1448 break;
1449 case TARGET_SO_TIMESTAMP:
1450 optname = SO_TIMESTAMP;
1451 break;
1452 case TARGET_SO_RCVLOWAT:
1453 optname = SO_RCVLOWAT;
1454 break;
1455 break;
1456 default:
1457 goto unimplemented;
1458 }
1459 if (optlen < sizeof(uint32_t))
1460 return -TARGET_EINVAL;
1461
1462 if (get_user_u32(val, optval_addr))
1463 return -TARGET_EFAULT;
1464 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1465 break;
1466 default:
1467 unimplemented:
1468 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1469 ret = -TARGET_ENOPROTOOPT;
1470 }
1471 return ret;
1472 }
1473
1474 /* do_getsockopt() Must return target values and target errnos. */
1475 static abi_long do_getsockopt(int sockfd, int level, int optname,
1476 abi_ulong optval_addr, abi_ulong optlen)
1477 {
1478 abi_long ret;
1479 int len, val;
1480 socklen_t lv;
1481
1482 switch(level) {
1483 case TARGET_SOL_SOCKET:
1484 level = SOL_SOCKET;
1485 switch (optname) {
1486 /* These don't just return a single integer */
1487 case TARGET_SO_LINGER:
1488 case TARGET_SO_RCVTIMEO:
1489 case TARGET_SO_SNDTIMEO:
1490 case TARGET_SO_PEERNAME:
1491 goto unimplemented;
1492 case TARGET_SO_PEERCRED: {
1493 struct ucred cr;
1494 socklen_t crlen;
1495 struct target_ucred *tcr;
1496
1497 if (get_user_u32(len, optlen)) {
1498 return -TARGET_EFAULT;
1499 }
1500 if (len < 0) {
1501 return -TARGET_EINVAL;
1502 }
1503
1504 crlen = sizeof(cr);
1505 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1506 &cr, &crlen));
1507 if (ret < 0) {
1508 return ret;
1509 }
1510 if (len > crlen) {
1511 len = crlen;
1512 }
1513 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1514 return -TARGET_EFAULT;
1515 }
1516 __put_user(cr.pid, &tcr->pid);
1517 __put_user(cr.uid, &tcr->uid);
1518 __put_user(cr.gid, &tcr->gid);
1519 unlock_user_struct(tcr, optval_addr, 1);
1520 if (put_user_u32(len, optlen)) {
1521 return -TARGET_EFAULT;
1522 }
1523 break;
1524 }
1525 /* Options with 'int' argument. */
1526 case TARGET_SO_DEBUG:
1527 optname = SO_DEBUG;
1528 goto int_case;
1529 case TARGET_SO_REUSEADDR:
1530 optname = SO_REUSEADDR;
1531 goto int_case;
1532 case TARGET_SO_TYPE:
1533 optname = SO_TYPE;
1534 goto int_case;
1535 case TARGET_SO_ERROR:
1536 optname = SO_ERROR;
1537 goto int_case;
1538 case TARGET_SO_DONTROUTE:
1539 optname = SO_DONTROUTE;
1540 goto int_case;
1541 case TARGET_SO_BROADCAST:
1542 optname = SO_BROADCAST;
1543 goto int_case;
1544 case TARGET_SO_SNDBUF:
1545 optname = SO_SNDBUF;
1546 goto int_case;
1547 case TARGET_SO_RCVBUF:
1548 optname = SO_RCVBUF;
1549 goto int_case;
1550 case TARGET_SO_KEEPALIVE:
1551 optname = SO_KEEPALIVE;
1552 goto int_case;
1553 case TARGET_SO_OOBINLINE:
1554 optname = SO_OOBINLINE;
1555 goto int_case;
1556 case TARGET_SO_NO_CHECK:
1557 optname = SO_NO_CHECK;
1558 goto int_case;
1559 case TARGET_SO_PRIORITY:
1560 optname = SO_PRIORITY;
1561 goto int_case;
1562 #ifdef SO_BSDCOMPAT
1563 case TARGET_SO_BSDCOMPAT:
1564 optname = SO_BSDCOMPAT;
1565 goto int_case;
1566 #endif
1567 case TARGET_SO_PASSCRED:
1568 optname = SO_PASSCRED;
1569 goto int_case;
1570 case TARGET_SO_TIMESTAMP:
1571 optname = SO_TIMESTAMP;
1572 goto int_case;
1573 case TARGET_SO_RCVLOWAT:
1574 optname = SO_RCVLOWAT;
1575 goto int_case;
1576 default:
1577 goto int_case;
1578 }
1579 break;
1580 case SOL_TCP:
1581 /* TCP options all take an 'int' value. */
1582 int_case:
1583 if (get_user_u32(len, optlen))
1584 return -TARGET_EFAULT;
1585 if (len < 0)
1586 return -TARGET_EINVAL;
1587 lv = sizeof(lv);
1588 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1589 if (ret < 0)
1590 return ret;
1591 if (len > lv)
1592 len = lv;
1593 if (len == 4) {
1594 if (put_user_u32(val, optval_addr))
1595 return -TARGET_EFAULT;
1596 } else {
1597 if (put_user_u8(val, optval_addr))
1598 return -TARGET_EFAULT;
1599 }
1600 if (put_user_u32(len, optlen))
1601 return -TARGET_EFAULT;
1602 break;
1603 case SOL_IP:
1604 switch(optname) {
1605 case IP_TOS:
1606 case IP_TTL:
1607 case IP_HDRINCL:
1608 case IP_ROUTER_ALERT:
1609 case IP_RECVOPTS:
1610 case IP_RETOPTS:
1611 case IP_PKTINFO:
1612 case IP_MTU_DISCOVER:
1613 case IP_RECVERR:
1614 case IP_RECVTOS:
1615 #ifdef IP_FREEBIND
1616 case IP_FREEBIND:
1617 #endif
1618 case IP_MULTICAST_TTL:
1619 case IP_MULTICAST_LOOP:
1620 if (get_user_u32(len, optlen))
1621 return -TARGET_EFAULT;
1622 if (len < 0)
1623 return -TARGET_EINVAL;
1624 lv = sizeof(lv);
1625 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1626 if (ret < 0)
1627 return ret;
1628 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1629 len = 1;
1630 if (put_user_u32(len, optlen)
1631 || put_user_u8(val, optval_addr))
1632 return -TARGET_EFAULT;
1633 } else {
1634 if (len > sizeof(int))
1635 len = sizeof(int);
1636 if (put_user_u32(len, optlen)
1637 || put_user_u32(val, optval_addr))
1638 return -TARGET_EFAULT;
1639 }
1640 break;
1641 default:
1642 ret = -TARGET_ENOPROTOOPT;
1643 break;
1644 }
1645 break;
1646 default:
1647 unimplemented:
1648 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1649 level, optname);
1650 ret = -TARGET_EOPNOTSUPP;
1651 break;
1652 }
1653 return ret;
1654 }
1655
1656 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1657 int count, int copy)
1658 {
1659 struct target_iovec *target_vec;
1660 struct iovec *vec;
1661 abi_ulong total_len, max_len;
1662 int i;
1663
1664 if (count == 0) {
1665 errno = 0;
1666 return NULL;
1667 }
1668 if (count < 0 || count > IOV_MAX) {
1669 errno = EINVAL;
1670 return NULL;
1671 }
1672
1673 vec = calloc(count, sizeof(struct iovec));
1674 if (vec == NULL) {
1675 errno = ENOMEM;
1676 return NULL;
1677 }
1678
1679 target_vec = lock_user(VERIFY_READ, target_addr,
1680 count * sizeof(struct target_iovec), 1);
1681 if (target_vec == NULL) {
1682 errno = EFAULT;
1683 goto fail2;
1684 }
1685
1686 /* ??? If host page size > target page size, this will result in a
1687 value larger than what we can actually support. */
1688 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1689 total_len = 0;
1690
1691 for (i = 0; i < count; i++) {
1692 abi_ulong base = tswapal(target_vec[i].iov_base);
1693 abi_long len = tswapal(target_vec[i].iov_len);
1694
1695 if (len < 0) {
1696 errno = EINVAL;
1697 goto fail;
1698 } else if (len == 0) {
1699 /* Zero length pointer is ignored. */
1700 vec[i].iov_base = 0;
1701 } else {
1702 vec[i].iov_base = lock_user(type, base, len, copy);
1703 if (!vec[i].iov_base) {
1704 errno = EFAULT;
1705 goto fail;
1706 }
1707 if (len > max_len - total_len) {
1708 len = max_len - total_len;
1709 }
1710 }
1711 vec[i].iov_len = len;
1712 total_len += len;
1713 }
1714
1715 unlock_user(target_vec, target_addr, 0);
1716 return vec;
1717
1718 fail:
1719 free(vec);
1720 fail2:
1721 unlock_user(target_vec, target_addr, 0);
1722 return NULL;
1723 }
1724
1725 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1726 int count, int copy)
1727 {
1728 struct target_iovec *target_vec;
1729 int i;
1730
1731 target_vec = lock_user(VERIFY_READ, target_addr,
1732 count * sizeof(struct target_iovec), 1);
1733 if (target_vec) {
1734 for (i = 0; i < count; i++) {
1735 abi_ulong base = tswapal(target_vec[i].iov_base);
1736 abi_long len = tswapal(target_vec[i].iov_base);
1737 if (len < 0) {
1738 break;
1739 }
1740 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1741 }
1742 unlock_user(target_vec, target_addr, 0);
1743 }
1744
1745 free(vec);
1746 }
1747
1748 static inline void target_to_host_sock_type(int *type)
1749 {
1750 int host_type = 0;
1751 int target_type = *type;
1752
1753 switch (target_type & TARGET_SOCK_TYPE_MASK) {
1754 case TARGET_SOCK_DGRAM:
1755 host_type = SOCK_DGRAM;
1756 break;
1757 case TARGET_SOCK_STREAM:
1758 host_type = SOCK_STREAM;
1759 break;
1760 default:
1761 host_type = target_type & TARGET_SOCK_TYPE_MASK;
1762 break;
1763 }
1764 if (target_type & TARGET_SOCK_CLOEXEC) {
1765 host_type |= SOCK_CLOEXEC;
1766 }
1767 if (target_type & TARGET_SOCK_NONBLOCK) {
1768 host_type |= SOCK_NONBLOCK;
1769 }
1770 *type = host_type;
1771 }
1772
1773 /* do_socket() Must return target values and target errnos. */
1774 static abi_long do_socket(int domain, int type, int protocol)
1775 {
1776 target_to_host_sock_type(&type);
1777
1778 if (domain == PF_NETLINK)
1779 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1780 return get_errno(socket(domain, type, protocol));
1781 }
1782
1783 /* do_bind() Must return target values and target errnos. */
1784 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1785 socklen_t addrlen)
1786 {
1787 void *addr;
1788 abi_long ret;
1789
1790 if ((int)addrlen < 0) {
1791 return -TARGET_EINVAL;
1792 }
1793
1794 addr = alloca(addrlen+1);
1795
1796 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1797 if (ret)
1798 return ret;
1799
1800 return get_errno(bind(sockfd, addr, addrlen));
1801 }
1802
1803 /* do_connect() Must return target values and target errnos. */
1804 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1805 socklen_t addrlen)
1806 {
1807 void *addr;
1808 abi_long ret;
1809
1810 if ((int)addrlen < 0) {
1811 return -TARGET_EINVAL;
1812 }
1813
1814 addr = alloca(addrlen);
1815
1816 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1817 if (ret)
1818 return ret;
1819
1820 return get_errno(connect(sockfd, addr, addrlen));
1821 }
1822
1823 /* do_sendrecvmsg() Must return target values and target errnos. */
1824 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1825 int flags, int send)
1826 {
1827 abi_long ret, len;
1828 struct target_msghdr *msgp;
1829 struct msghdr msg;
1830 int count;
1831 struct iovec *vec;
1832 abi_ulong target_vec;
1833
1834 /* FIXME */
1835 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1836 msgp,
1837 target_msg,
1838 send ? 1 : 0))
1839 return -TARGET_EFAULT;
1840 if (msgp->msg_name) {
1841 msg.msg_namelen = tswap32(msgp->msg_namelen);
1842 msg.msg_name = alloca(msg.msg_namelen);
1843 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1844 msg.msg_namelen);
1845 if (ret) {
1846 goto out2;
1847 }
1848 } else {
1849 msg.msg_name = NULL;
1850 msg.msg_namelen = 0;
1851 }
1852 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1853 msg.msg_control = alloca(msg.msg_controllen);
1854 msg.msg_flags = tswap32(msgp->msg_flags);
1855
1856 count = tswapal(msgp->msg_iovlen);
1857 target_vec = tswapal(msgp->msg_iov);
1858 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1859 target_vec, count, send);
1860 if (vec == NULL) {
1861 ret = -host_to_target_errno(errno);
1862 goto out2;
1863 }
1864 msg.msg_iovlen = count;
1865 msg.msg_iov = vec;
1866
1867 if (send) {
1868 ret = target_to_host_cmsg(&msg, msgp);
1869 if (ret == 0)
1870 ret = get_errno(sendmsg(fd, &msg, flags));
1871 } else {
1872 ret = get_errno(recvmsg(fd, &msg, flags));
1873 if (!is_error(ret)) {
1874 len = ret;
1875 ret = host_to_target_cmsg(msgp, &msg);
1876 if (!is_error(ret)) {
1877 msgp->msg_namelen = tswap32(msg.msg_namelen);
1878 if (msg.msg_name != NULL) {
1879 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1880 msg.msg_name, msg.msg_namelen);
1881 if (ret) {
1882 goto out;
1883 }
1884 }
1885
1886 ret = len;
1887 }
1888 }
1889 }
1890
1891 out:
1892 unlock_iovec(vec, target_vec, count, !send);
1893 out2:
1894 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1895 return ret;
1896 }
1897
1898 /* If we don't have a system accept4() then just call accept.
1899 * The callsites to do_accept4() will ensure that they don't
1900 * pass a non-zero flags argument in this config.
1901 */
1902 #ifndef CONFIG_ACCEPT4
1903 static inline int accept4(int sockfd, struct sockaddr *addr,
1904 socklen_t *addrlen, int flags)
1905 {
1906 assert(flags == 0);
1907 return accept(sockfd, addr, addrlen);
1908 }
1909 #endif
1910
1911 /* do_accept4() Must return target values and target errnos. */
1912 static abi_long do_accept4(int fd, abi_ulong target_addr,
1913 abi_ulong target_addrlen_addr, int flags)
1914 {
1915 socklen_t addrlen;
1916 void *addr;
1917 abi_long ret;
1918
1919 if (target_addr == 0) {
1920 return get_errno(accept4(fd, NULL, NULL, flags));
1921 }
1922
1923 /* linux returns EINVAL if addrlen pointer is invalid */
1924 if (get_user_u32(addrlen, target_addrlen_addr))
1925 return -TARGET_EINVAL;
1926
1927 if ((int)addrlen < 0) {
1928 return -TARGET_EINVAL;
1929 }
1930
1931 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1932 return -TARGET_EINVAL;
1933
1934 addr = alloca(addrlen);
1935
1936 ret = get_errno(accept4(fd, addr, &addrlen, flags));
1937 if (!is_error(ret)) {
1938 host_to_target_sockaddr(target_addr, addr, addrlen);
1939 if (put_user_u32(addrlen, target_addrlen_addr))
1940 ret = -TARGET_EFAULT;
1941 }
1942 return ret;
1943 }
1944
1945 /* do_getpeername() Must return target values and target errnos. */
1946 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1947 abi_ulong target_addrlen_addr)
1948 {
1949 socklen_t addrlen;
1950 void *addr;
1951 abi_long ret;
1952
1953 if (get_user_u32(addrlen, target_addrlen_addr))
1954 return -TARGET_EFAULT;
1955
1956 if ((int)addrlen < 0) {
1957 return -TARGET_EINVAL;
1958 }
1959
1960 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1961 return -TARGET_EFAULT;
1962
1963 addr = alloca(addrlen);
1964
1965 ret = get_errno(getpeername(fd, addr, &addrlen));
1966 if (!is_error(ret)) {
1967 host_to_target_sockaddr(target_addr, addr, addrlen);
1968 if (put_user_u32(addrlen, target_addrlen_addr))
1969 ret = -TARGET_EFAULT;
1970 }
1971 return ret;
1972 }
1973
1974 /* do_getsockname() Must return target values and target errnos. */
1975 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1976 abi_ulong target_addrlen_addr)
1977 {
1978 socklen_t addrlen;
1979 void *addr;
1980 abi_long ret;
1981
1982 if (get_user_u32(addrlen, target_addrlen_addr))
1983 return -TARGET_EFAULT;
1984
1985 if ((int)addrlen < 0) {
1986 return -TARGET_EINVAL;
1987 }
1988
1989 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1990 return -TARGET_EFAULT;
1991
1992 addr = alloca(addrlen);
1993
1994 ret = get_errno(getsockname(fd, addr, &addrlen));
1995 if (!is_error(ret)) {
1996 host_to_target_sockaddr(target_addr, addr, addrlen);
1997 if (put_user_u32(addrlen, target_addrlen_addr))
1998 ret = -TARGET_EFAULT;
1999 }
2000 return ret;
2001 }
2002
2003 /* do_socketpair() Must return target values and target errnos. */
2004 static abi_long do_socketpair(int domain, int type, int protocol,
2005 abi_ulong target_tab_addr)
2006 {
2007 int tab[2];
2008 abi_long ret;
2009
2010 target_to_host_sock_type(&type);
2011
2012 ret = get_errno(socketpair(domain, type, protocol, tab));
2013 if (!is_error(ret)) {
2014 if (put_user_s32(tab[0], target_tab_addr)
2015 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2016 ret = -TARGET_EFAULT;
2017 }
2018 return ret;
2019 }
2020
2021 /* do_sendto() Must return target values and target errnos. */
2022 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2023 abi_ulong target_addr, socklen_t addrlen)
2024 {
2025 void *addr;
2026 void *host_msg;
2027 abi_long ret;
2028
2029 if ((int)addrlen < 0) {
2030 return -TARGET_EINVAL;
2031 }
2032
2033 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2034 if (!host_msg)
2035 return -TARGET_EFAULT;
2036 if (target_addr) {
2037 addr = alloca(addrlen);
2038 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2039 if (ret) {
2040 unlock_user(host_msg, msg, 0);
2041 return ret;
2042 }
2043 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2044 } else {
2045 ret = get_errno(send(fd, host_msg, len, flags));
2046 }
2047 unlock_user(host_msg, msg, 0);
2048 return ret;
2049 }
2050
2051 /* do_recvfrom() Must return target values and target errnos. */
2052 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2053 abi_ulong target_addr,
2054 abi_ulong target_addrlen)
2055 {
2056 socklen_t addrlen;
2057 void *addr;
2058 void *host_msg;
2059 abi_long ret;
2060
2061 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2062 if (!host_msg)
2063 return -TARGET_EFAULT;
2064 if (target_addr) {
2065 if (get_user_u32(addrlen, target_addrlen)) {
2066 ret = -TARGET_EFAULT;
2067 goto fail;
2068 }
2069 if ((int)addrlen < 0) {
2070 ret = -TARGET_EINVAL;
2071 goto fail;
2072 }
2073 addr = alloca(addrlen);
2074 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2075 } else {
2076 addr = NULL; /* To keep compiler quiet. */
2077 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2078 }
2079 if (!is_error(ret)) {
2080 if (target_addr) {
2081 host_to_target_sockaddr(target_addr, addr, addrlen);
2082 if (put_user_u32(addrlen, target_addrlen)) {
2083 ret = -TARGET_EFAULT;
2084 goto fail;
2085 }
2086 }
2087 unlock_user(host_msg, msg, len);
2088 } else {
2089 fail:
2090 unlock_user(host_msg, msg, 0);
2091 }
2092 return ret;
2093 }
2094
2095 #ifdef TARGET_NR_socketcall
2096 /* do_socketcall() Must return target values and target errnos. */
2097 static abi_long do_socketcall(int num, abi_ulong vptr)
2098 {
2099 abi_long ret;
2100 const int n = sizeof(abi_ulong);
2101
2102 switch(num) {
2103 case SOCKOP_socket:
2104 {
2105 abi_ulong domain, type, protocol;
2106
2107 if (get_user_ual(domain, vptr)
2108 || get_user_ual(type, vptr + n)
2109 || get_user_ual(protocol, vptr + 2 * n))
2110 return -TARGET_EFAULT;
2111
2112 ret = do_socket(domain, type, protocol);
2113 }
2114 break;
2115 case SOCKOP_bind:
2116 {
2117 abi_ulong sockfd;
2118 abi_ulong target_addr;
2119 socklen_t addrlen;
2120
2121 if (get_user_ual(sockfd, vptr)
2122 || get_user_ual(target_addr, vptr + n)
2123 || get_user_ual(addrlen, vptr + 2 * n))
2124 return -TARGET_EFAULT;
2125
2126 ret = do_bind(sockfd, target_addr, addrlen);
2127 }
2128 break;
2129 case SOCKOP_connect:
2130 {
2131 abi_ulong sockfd;
2132 abi_ulong target_addr;
2133 socklen_t addrlen;
2134
2135 if (get_user_ual(sockfd, vptr)
2136 || get_user_ual(target_addr, vptr + n)
2137 || get_user_ual(addrlen, vptr + 2 * n))
2138 return -TARGET_EFAULT;
2139
2140 ret = do_connect(sockfd, target_addr, addrlen);
2141 }
2142 break;
2143 case SOCKOP_listen:
2144 {
2145 abi_ulong sockfd, backlog;
2146
2147 if (get_user_ual(sockfd, vptr)
2148 || get_user_ual(backlog, vptr + n))
2149 return -TARGET_EFAULT;
2150
2151 ret = get_errno(listen(sockfd, backlog));
2152 }
2153 break;
2154 case SOCKOP_accept:
2155 {
2156 abi_ulong sockfd;
2157 abi_ulong target_addr, target_addrlen;
2158
2159 if (get_user_ual(sockfd, vptr)
2160 || get_user_ual(target_addr, vptr + n)
2161 || get_user_ual(target_addrlen, vptr + 2 * n))
2162 return -TARGET_EFAULT;
2163
2164 ret = do_accept4(sockfd, target_addr, target_addrlen, 0);
2165 }
2166 break;
2167 case SOCKOP_getsockname:
2168 {
2169 abi_ulong sockfd;
2170 abi_ulong target_addr, target_addrlen;
2171
2172 if (get_user_ual(sockfd, vptr)
2173 || get_user_ual(target_addr, vptr + n)
2174 || get_user_ual(target_addrlen, vptr + 2 * n))
2175 return -TARGET_EFAULT;
2176
2177 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2178 }
2179 break;
2180 case SOCKOP_getpeername:
2181 {
2182 abi_ulong sockfd;
2183 abi_ulong target_addr, target_addrlen;
2184
2185 if (get_user_ual(sockfd, vptr)
2186 || get_user_ual(target_addr, vptr + n)
2187 || get_user_ual(target_addrlen, vptr + 2 * n))
2188 return -TARGET_EFAULT;
2189
2190 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2191 }
2192 break;
2193 case SOCKOP_socketpair:
2194 {
2195 abi_ulong domain, type, protocol;
2196 abi_ulong tab;
2197
2198 if (get_user_ual(domain, vptr)
2199 || get_user_ual(type, vptr + n)
2200 || get_user_ual(protocol, vptr + 2 * n)
2201 || get_user_ual(tab, vptr + 3 * n))
2202 return -TARGET_EFAULT;
2203
2204 ret = do_socketpair(domain, type, protocol, tab);
2205 }
2206 break;
2207 case SOCKOP_send:
2208 {
2209 abi_ulong sockfd;
2210 abi_ulong msg;
2211 size_t len;
2212 abi_ulong flags;
2213
2214 if (get_user_ual(sockfd, vptr)
2215 || get_user_ual(msg, vptr + n)
2216 || get_user_ual(len, vptr + 2 * n)
2217 || get_user_ual(flags, vptr + 3 * n))
2218 return -TARGET_EFAULT;
2219
2220 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2221 }
2222 break;
2223 case SOCKOP_recv:
2224 {
2225 abi_ulong sockfd;
2226 abi_ulong msg;
2227 size_t len;
2228 abi_ulong flags;
2229
2230 if (get_user_ual(sockfd, vptr)
2231 || get_user_ual(msg, vptr + n)
2232 || get_user_ual(len, vptr + 2 * n)
2233 || get_user_ual(flags, vptr + 3 * n))
2234 return -TARGET_EFAULT;
2235
2236 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2237 }
2238 break;
2239 case SOCKOP_sendto:
2240 {
2241 abi_ulong sockfd;
2242 abi_ulong msg;
2243 size_t len;
2244 abi_ulong flags;
2245 abi_ulong addr;
2246 socklen_t addrlen;
2247
2248 if (get_user_ual(sockfd, vptr)
2249 || get_user_ual(msg, vptr + n)
2250 || get_user_ual(len, vptr + 2 * n)
2251 || get_user_ual(flags, vptr + 3 * n)
2252 || get_user_ual(addr, vptr + 4 * n)
2253 || get_user_ual(addrlen, vptr + 5 * n))
2254 return -TARGET_EFAULT;
2255
2256 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2257 }
2258 break;
2259 case SOCKOP_recvfrom:
2260 {
2261 abi_ulong sockfd;
2262 abi_ulong msg;
2263 size_t len;
2264 abi_ulong flags;
2265 abi_ulong addr;
2266 socklen_t addrlen;
2267
2268 if (get_user_ual(sockfd, vptr)
2269 || get_user_ual(msg, vptr + n)
2270 || get_user_ual(len, vptr + 2 * n)
2271 || get_user_ual(flags, vptr + 3 * n)
2272 || get_user_ual(addr, vptr + 4 * n)
2273 || get_user_ual(addrlen, vptr + 5 * n))
2274 return -TARGET_EFAULT;
2275
2276 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2277 }
2278 break;
2279 case SOCKOP_shutdown:
2280 {
2281 abi_ulong sockfd, how;
2282
2283 if (get_user_ual(sockfd, vptr)
2284 || get_user_ual(how, vptr + n))
2285 return -TARGET_EFAULT;
2286
2287 ret = get_errno(shutdown(sockfd, how));
2288 }
2289 break;
2290 case SOCKOP_sendmsg:
2291 case SOCKOP_recvmsg:
2292 {
2293 abi_ulong fd;
2294 abi_ulong target_msg;
2295 abi_ulong flags;
2296
2297 if (get_user_ual(fd, vptr)
2298 || get_user_ual(target_msg, vptr + n)
2299 || get_user_ual(flags, vptr + 2 * n))
2300 return -TARGET_EFAULT;
2301
2302 ret = do_sendrecvmsg(fd, target_msg, flags,
2303 (num == SOCKOP_sendmsg));
2304 }
2305 break;
2306 case SOCKOP_setsockopt:
2307 {
2308 abi_ulong sockfd;
2309 abi_ulong level;
2310 abi_ulong optname;
2311 abi_ulong optval;
2312 socklen_t optlen;
2313
2314 if (get_user_ual(sockfd, vptr)
2315 || get_user_ual(level, vptr + n)
2316 || get_user_ual(optname, vptr + 2 * n)
2317 || get_user_ual(optval, vptr + 3 * n)
2318 || get_user_ual(optlen, vptr + 4 * n))
2319 return -TARGET_EFAULT;
2320
2321 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2322 }
2323 break;
2324 case SOCKOP_getsockopt:
2325 {
2326 abi_ulong sockfd;
2327 abi_ulong level;
2328 abi_ulong optname;
2329 abi_ulong optval;
2330 socklen_t optlen;
2331
2332 if (get_user_ual(sockfd, vptr)
2333 || get_user_ual(level, vptr + n)
2334 || get_user_ual(optname, vptr + 2 * n)
2335 || get_user_ual(optval, vptr + 3 * n)
2336 || get_user_ual(optlen, vptr + 4 * n))
2337 return -TARGET_EFAULT;
2338
2339 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2340 }
2341 break;
2342 default:
2343 gemu_log("Unsupported socketcall: %d\n", num);
2344 ret = -TARGET_ENOSYS;
2345 break;
2346 }
2347 return ret;
2348 }
2349 #endif
2350
2351 #define N_SHM_REGIONS 32
2352
2353 static struct shm_region {
2354 abi_ulong start;
2355 abi_ulong size;
2356 } shm_regions[N_SHM_REGIONS];
2357
2358 struct target_ipc_perm
2359 {
2360 abi_long __key;
2361 abi_ulong uid;
2362 abi_ulong gid;
2363 abi_ulong cuid;
2364 abi_ulong cgid;
2365 unsigned short int mode;
2366 unsigned short int __pad1;
2367 unsigned short int __seq;
2368 unsigned short int __pad2;
2369 abi_ulong __unused1;
2370 abi_ulong __unused2;
2371 };
2372
2373 struct target_semid_ds
2374 {
2375 struct target_ipc_perm sem_perm;
2376 abi_ulong sem_otime;
2377 abi_ulong __unused1;
2378 abi_ulong sem_ctime;
2379 abi_ulong __unused2;
2380 abi_ulong sem_nsems;
2381 abi_ulong __unused3;
2382 abi_ulong __unused4;
2383 };
2384
2385 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2386 abi_ulong target_addr)
2387 {
2388 struct target_ipc_perm *target_ip;
2389 struct target_semid_ds *target_sd;
2390
2391 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2392 return -TARGET_EFAULT;
2393 target_ip = &(target_sd->sem_perm);
2394 host_ip->__key = tswapal(target_ip->__key);
2395 host_ip->uid = tswapal(target_ip->uid);
2396 host_ip->gid = tswapal(target_ip->gid);
2397 host_ip->cuid = tswapal(target_ip->cuid);
2398 host_ip->cgid = tswapal(target_ip->cgid);
2399 host_ip->mode = tswap16(target_ip->mode);
2400 unlock_user_struct(target_sd, target_addr, 0);
2401 return 0;
2402 }
2403
2404 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2405 struct ipc_perm *host_ip)
2406 {
2407 struct target_ipc_perm *target_ip;
2408 struct target_semid_ds *target_sd;
2409
2410 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2411 return -TARGET_EFAULT;
2412 target_ip = &(target_sd->sem_perm);
2413 target_ip->__key = tswapal(host_ip->__key);
2414 target_ip->uid = tswapal(host_ip->uid);
2415 target_ip->gid = tswapal(host_ip->gid);
2416 target_ip->cuid = tswapal(host_ip->cuid);
2417 target_ip->cgid = tswapal(host_ip->cgid);
2418 target_ip->mode = tswap16(host_ip->mode);
2419 unlock_user_struct(target_sd, target_addr, 1);
2420 return 0;
2421 }
2422
2423 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2424 abi_ulong target_addr)
2425 {
2426 struct target_semid_ds *target_sd;
2427
2428 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2429 return -TARGET_EFAULT;
2430 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2431 return -TARGET_EFAULT;
2432 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2433 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2434 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2435 unlock_user_struct(target_sd, target_addr, 0);
2436 return 0;
2437 }
2438
2439 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2440 struct semid_ds *host_sd)
2441 {
2442 struct target_semid_ds *target_sd;
2443
2444 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2445 return -TARGET_EFAULT;
2446 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2447 return -TARGET_EFAULT;
2448 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2449 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2450 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2451 unlock_user_struct(target_sd, target_addr, 1);
2452 return 0;
2453 }
2454
2455 struct target_seminfo {
2456 int semmap;
2457 int semmni;
2458 int semmns;
2459 int semmnu;
2460 int semmsl;
2461 int semopm;
2462 int semume;
2463 int semusz;
2464 int semvmx;
2465 int semaem;
2466 };
2467
2468 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2469 struct seminfo *host_seminfo)
2470 {
2471 struct target_seminfo *target_seminfo;
2472 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2473 return -TARGET_EFAULT;
2474 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2475 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2476 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2477 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2478 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2479 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2480 __put_user(host_seminfo->semume, &target_seminfo->semume);
2481 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2482 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2483 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2484 unlock_user_struct(target_seminfo, target_addr, 1);
2485 return 0;
2486 }
2487
2488 union semun {
2489 int val;
2490 struct semid_ds *buf;
2491 unsigned short *array;
2492 struct seminfo *__buf;
2493 };
2494
2495 union target_semun {
2496 int val;
2497 abi_ulong buf;
2498 abi_ulong array;
2499 abi_ulong __buf;
2500 };
2501
2502 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2503 abi_ulong target_addr)
2504 {
2505 int nsems;
2506 unsigned short *array;
2507 union semun semun;
2508 struct semid_ds semid_ds;
2509 int i, ret;
2510
2511 semun.buf = &semid_ds;
2512
2513 ret = semctl(semid, 0, IPC_STAT, semun);
2514 if (ret == -1)
2515 return get_errno(ret);
2516
2517 nsems = semid_ds.sem_nsems;
2518
2519 *host_array = malloc(nsems*sizeof(unsigned short));
2520 array = lock_user(VERIFY_READ, target_addr,
2521 nsems*sizeof(unsigned short), 1);
2522 if (!array)
2523 return -TARGET_EFAULT;
2524
2525 for(i=0; i<nsems; i++) {
2526 __get_user((*host_array)[i], &array[i]);
2527 }
2528 unlock_user(array, target_addr, 0);
2529
2530 return 0;
2531 }
2532
2533 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2534 unsigned short **host_array)
2535 {
2536 int nsems;
2537 unsigned short *array;
2538 union semun semun;
2539 struct semid_ds semid_ds;
2540 int i, ret;
2541
2542 semun.buf = &semid_ds;
2543
2544 ret = semctl(semid, 0, IPC_STAT, semun);
2545 if (ret == -1)
2546 return get_errno(ret);
2547
2548 nsems = semid_ds.sem_nsems;
2549
2550 array = lock_user(VERIFY_WRITE, target_addr,
2551 nsems*sizeof(unsigned short), 0);
2552 if (!array)
2553 return -TARGET_EFAULT;
2554
2555 for(i=0; i<nsems; i++) {
2556 __put_user((*host_array)[i], &array[i]);
2557 }
2558 free(*host_array);
2559 unlock_user(array, target_addr, 1);
2560
2561 return 0;
2562 }
2563
2564 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2565 union target_semun target_su)
2566 {
2567 union semun arg;
2568 struct semid_ds dsarg;
2569 unsigned short *array = NULL;
2570 struct seminfo seminfo;
2571 abi_long ret = -TARGET_EINVAL;
2572 abi_long err;
2573 cmd &= 0xff;
2574
2575 switch( cmd ) {
2576 case GETVAL:
2577 case SETVAL:
2578 arg.val = tswap32(target_su.val);
2579 ret = get_errno(semctl(semid, semnum, cmd, arg));
2580 target_su.val = tswap32(arg.val);
2581 break;
2582 case GETALL:
2583 case SETALL:
2584 err = target_to_host_semarray(semid, &array, target_su.array);
2585 if (err)
2586 return err;
2587 arg.array = array;
2588 ret = get_errno(semctl(semid, semnum, cmd, arg));
2589 err = host_to_target_semarray(semid, target_su.array, &array);
2590 if (err)
2591 return err;
2592 break;
2593 case IPC_STAT:
2594 case IPC_SET:
2595 case SEM_STAT:
2596 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2597 if (err)
2598 return err;
2599 arg.buf = &dsarg;
2600 ret = get_errno(semctl(semid, semnum, cmd, arg));
2601 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2602 if (err)
2603 return err;
2604 break;
2605 case IPC_INFO:
2606 case SEM_INFO:
2607 arg.__buf = &seminfo;
2608 ret = get_errno(semctl(semid, semnum, cmd, arg));
2609 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2610 if (err)
2611 return err;
2612 break;
2613 case IPC_RMID:
2614 case GETPID:
2615 case GETNCNT:
2616 case GETZCNT:
2617 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2618 break;
2619 }
2620
2621 return ret;
2622 }
2623
2624 struct target_sembuf {
2625 unsigned short sem_num;
2626 short sem_op;
2627 short sem_flg;
2628 };
2629
2630 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2631 abi_ulong target_addr,
2632 unsigned nsops)
2633 {
2634 struct target_sembuf *target_sembuf;
2635 int i;
2636
2637 target_sembuf = lock_user(VERIFY_READ, target_addr,
2638 nsops*sizeof(struct target_sembuf), 1);
2639 if (!target_sembuf)
2640 return -TARGET_EFAULT;
2641
2642 for(i=0; i<nsops; i++) {
2643 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2644 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2645 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2646 }
2647
2648 unlock_user(target_sembuf, target_addr, 0);
2649
2650 return 0;
2651 }
2652
2653 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2654 {
2655 struct sembuf sops[nsops];
2656
2657 if (target_to_host_sembuf(sops, ptr, nsops))
2658 return -TARGET_EFAULT;
2659
2660 return get_errno(semop(semid, sops, nsops));
2661 }
2662
2663 struct target_msqid_ds
2664 {
2665 struct target_ipc_perm msg_perm;
2666 abi_ulong msg_stime;
2667 #if TARGET_ABI_BITS == 32
2668 abi_ulong __unused1;
2669 #endif
2670 abi_ulong msg_rtime;
2671 #if TARGET_ABI_BITS == 32
2672 abi_ulong __unused2;
2673 #endif
2674 abi_ulong msg_ctime;
2675 #if TARGET_ABI_BITS == 32
2676 abi_ulong __unused3;
2677 #endif
2678 abi_ulong __msg_cbytes;
2679 abi_ulong msg_qnum;
2680 abi_ulong msg_qbytes;
2681 abi_ulong msg_lspid;
2682 abi_ulong msg_lrpid;
2683 abi_ulong __unused4;
2684 abi_ulong __unused5;
2685 };
2686
2687 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2688 abi_ulong target_addr)
2689 {
2690 struct target_msqid_ds *target_md;
2691
2692 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2693 return -TARGET_EFAULT;
2694 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2695 return -TARGET_EFAULT;
2696 host_md->msg_stime = tswapal(target_md->msg_stime);
2697 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2698 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2699 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2700 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2701 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2702 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2703 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2704 unlock_user_struct(target_md, target_addr, 0);
2705 return 0;
2706 }
2707
2708 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2709 struct msqid_ds *host_md)
2710 {
2711 struct target_msqid_ds *target_md;
2712
2713 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2714 return -TARGET_EFAULT;
2715 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2716 return -TARGET_EFAULT;
2717 target_md->msg_stime = tswapal(host_md->msg_stime);
2718 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2719 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2720 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2721 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2722 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2723 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2724 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2725 unlock_user_struct(target_md, target_addr, 1);
2726 return 0;
2727 }
2728
2729 struct target_msginfo {
2730 int msgpool;
2731 int msgmap;
2732 int msgmax;
2733 int msgmnb;
2734 int msgmni;
2735 int msgssz;
2736 int msgtql;
2737 unsigned short int msgseg;
2738 };
2739
2740 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2741 struct msginfo *host_msginfo)
2742 {
2743 struct target_msginfo *target_msginfo;
2744 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2745 return -TARGET_EFAULT;
2746 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2747 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2748 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2749 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2750 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2751 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2752 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2753 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2754 unlock_user_struct(target_msginfo, target_addr, 1);
2755 return 0;
2756 }
2757
2758 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2759 {
2760 struct msqid_ds dsarg;
2761 struct msginfo msginfo;
2762 abi_long ret = -TARGET_EINVAL;
2763
2764 cmd &= 0xff;
2765
2766 switch (cmd) {
2767 case IPC_STAT:
2768 case IPC_SET:
2769 case MSG_STAT:
2770 if (target_to_host_msqid_ds(&dsarg,ptr))
2771 return -TARGET_EFAULT;
2772 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2773 if (host_to_target_msqid_ds(ptr,&dsarg))
2774 return -TARGET_EFAULT;
2775 break;
2776 case IPC_RMID:
2777 ret = get_errno(msgctl(msgid, cmd, NULL));
2778 break;
2779 case IPC_INFO:
2780 case MSG_INFO:
2781 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2782 if (host_to_target_msginfo(ptr, &msginfo))
2783 return -TARGET_EFAULT;
2784 break;
2785 }
2786
2787 return ret;
2788 }
2789
2790 struct target_msgbuf {
2791 abi_long mtype;
2792 char mtext[1];
2793 };
2794
2795 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2796 unsigned int msgsz, int msgflg)
2797 {
2798 struct target_msgbuf *target_mb;
2799 struct msgbuf *host_mb;
2800 abi_long ret = 0;
2801
2802 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2803 return -TARGET_EFAULT;
2804 host_mb = malloc(msgsz+sizeof(long));
2805 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2806 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2807 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2808 free(host_mb);
2809 unlock_user_struct(target_mb, msgp, 0);
2810
2811 return ret;
2812 }
2813
2814 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2815 unsigned int msgsz, abi_long msgtyp,
2816 int msgflg)
2817 {
2818 struct target_msgbuf *target_mb;
2819 char *target_mtext;
2820 struct msgbuf *host_mb;
2821 abi_long ret = 0;
2822
2823 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2824 return -TARGET_EFAULT;
2825
2826 host_mb = g_malloc(msgsz+sizeof(long));
2827 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2828
2829 if (ret > 0) {
2830 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2831 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2832 if (!target_mtext) {
2833 ret = -TARGET_EFAULT;
2834 goto end;
2835 }
2836 memcpy(target_mb->mtext, host_mb->mtext, ret);
2837 unlock_user(target_mtext, target_mtext_addr, ret);
2838 }
2839
2840 target_mb->mtype = tswapal(host_mb->mtype);
2841
2842 end:
2843 if (target_mb)
2844 unlock_user_struct(target_mb, msgp, 1);
2845 g_free(host_mb);
2846 return ret;
2847 }
2848
2849 struct target_shmid_ds
2850 {
2851 struct target_ipc_perm shm_perm;
2852 abi_ulong shm_segsz;
2853 abi_ulong shm_atime;
2854 #if TARGET_ABI_BITS == 32
2855 abi_ulong __unused1;
2856 #endif
2857 abi_ulong shm_dtime;
2858 #if TARGET_ABI_BITS == 32
2859 abi_ulong __unused2;
2860 #endif
2861 abi_ulong shm_ctime;
2862 #if TARGET_ABI_BITS == 32
2863 abi_ulong __unused3;
2864 #endif
2865 int shm_cpid;
2866 int shm_lpid;
2867 abi_ulong shm_nattch;
2868 unsigned long int __unused4;
2869 unsigned long int __unused5;
2870 };
2871
2872 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2873 abi_ulong target_addr)
2874 {
2875 struct target_shmid_ds *target_sd;
2876
2877 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2878 return -TARGET_EFAULT;
2879 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2880 return -TARGET_EFAULT;
2881 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2882 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2883 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2884 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2885 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2886 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2887 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2888 unlock_user_struct(target_sd, target_addr, 0);
2889 return 0;
2890 }
2891
2892 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2893 struct shmid_ds *host_sd)
2894 {
2895 struct target_shmid_ds *target_sd;
2896
2897 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2898 return -TARGET_EFAULT;
2899 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2900 return -TARGET_EFAULT;
2901 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2902 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2903 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2904 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2905 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2906 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2907 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2908 unlock_user_struct(target_sd, target_addr, 1);
2909 return 0;
2910 }
2911
2912 struct target_shminfo {
2913 abi_ulong shmmax;
2914 abi_ulong shmmin;
2915 abi_ulong shmmni;
2916 abi_ulong shmseg;
2917 abi_ulong shmall;
2918 };
2919
2920 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2921 struct shminfo *host_shminfo)
2922 {
2923 struct target_shminfo *target_shminfo;
2924 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2925 return -TARGET_EFAULT;
2926 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2927 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2928 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2929 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2930 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2931 unlock_user_struct(target_shminfo, target_addr, 1);
2932 return 0;
2933 }
2934
2935 struct target_shm_info {
2936 int used_ids;
2937 abi_ulong shm_tot;
2938 abi_ulong shm_rss;
2939 abi_ulong shm_swp;
2940 abi_ulong swap_attempts;
2941 abi_ulong swap_successes;
2942 };
2943
2944 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2945 struct shm_info *host_shm_info)
2946 {
2947 struct target_shm_info *target_shm_info;
2948 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2949 return -TARGET_EFAULT;
2950 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2951 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2952 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2953 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2954 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2955 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2956 unlock_user_struct(target_shm_info, target_addr, 1);
2957 return 0;
2958 }
2959
2960 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2961 {
2962 struct shmid_ds dsarg;
2963 struct shminfo shminfo;
2964 struct shm_info shm_info;
2965 abi_long ret = -TARGET_EINVAL;
2966
2967 cmd &= 0xff;
2968
2969 switch(cmd) {
2970 case IPC_STAT:
2971 case IPC_SET:
2972 case SHM_STAT:
2973 if (target_to_host_shmid_ds(&dsarg, buf))
2974 return -TARGET_EFAULT;
2975 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2976 if (host_to_target_shmid_ds(buf, &dsarg))
2977 return -TARGET_EFAULT;
2978 break;
2979 case IPC_INFO:
2980 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2981 if (host_to_target_shminfo(buf, &shminfo))
2982 return -TARGET_EFAULT;
2983 break;
2984 case SHM_INFO:
2985 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2986 if (host_to_target_shm_info(buf, &shm_info))
2987 return -TARGET_EFAULT;
2988 break;
2989 case IPC_RMID:
2990 case SHM_LOCK:
2991 case SHM_UNLOCK:
2992 ret = get_errno(shmctl(shmid, cmd, NULL));
2993 break;
2994 }
2995
2996 return ret;
2997 }
2998
2999 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3000 {
3001 abi_long raddr;
3002 void *host_raddr;
3003 struct shmid_ds shm_info;
3004 int i,ret;
3005
3006 /* find out the length of the shared memory segment */
3007 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3008 if (is_error(ret)) {
3009 /* can't get length, bail out */
3010 return ret;
3011 }
3012
3013 mmap_lock();
3014
3015 if (shmaddr)
3016 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3017 else {
3018 abi_ulong mmap_start;
3019
3020 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3021
3022 if (mmap_start == -1) {
3023 errno = ENOMEM;
3024 host_raddr = (void *)-1;
3025 } else
3026 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3027 }
3028
3029 if (host_raddr == (void *)-1) {
3030 mmap_unlock();
3031 return get_errno((long)host_raddr);
3032 }
3033 raddr=h2g((unsigned long)host_raddr);
3034
3035 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3036 PAGE_VALID | PAGE_READ |
3037 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3038
3039 for (i = 0; i < N_SHM_REGIONS; i++) {
3040 if (shm_regions[i].start == 0) {
3041 shm_regions[i].start = raddr;
3042 shm_regions[i].size = shm_info.shm_segsz;
3043 break;
3044 }
3045 }
3046
3047 mmap_unlock();
3048 return raddr;
3049
3050 }
3051
3052 static inline abi_long do_shmdt(abi_ulong shmaddr)
3053 {
3054 int i;
3055
3056 for (i = 0; i < N_SHM_REGIONS; ++i) {
3057 if (shm_regions[i].start == shmaddr) {
3058 shm_regions[i].start = 0;
3059 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3060 break;
3061 }
3062 }
3063
3064 return get_errno(shmdt(g2h(shmaddr)));
3065 }
3066
3067 #ifdef TARGET_NR_ipc
3068 /* ??? This only works with linear mappings. */
3069 /* do_ipc() must return target values and target errnos. */
3070 static abi_long do_ipc(unsigned int call, int first,
3071 int second, int third,
3072 abi_long ptr, abi_long fifth)
3073 {
3074 int version;
3075 abi_long ret = 0;
3076
3077 version = call >> 16;
3078 call &= 0xffff;
3079
3080 switch (call) {
3081 case IPCOP_semop:
3082 ret = do_semop(first, ptr, second);
3083 break;
3084
3085 case IPCOP_semget:
3086 ret = get_errno(semget(first, second, third));
3087 break;
3088
3089 case IPCOP_semctl:
3090 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3091 break;
3092
3093 case IPCOP_msgget:
3094 ret = get_errno(msgget(first, second));
3095 break;
3096
3097 case IPCOP_msgsnd:
3098 ret = do_msgsnd(first, ptr, second, third);
3099 break;
3100
3101 case IPCOP_msgctl:
3102 ret = do_msgctl(first, second, ptr);
3103 break;
3104
3105 case IPCOP_msgrcv:
3106 switch (version) {
3107 case 0:
3108 {
3109 struct target_ipc_kludge {
3110 abi_long msgp;
3111 abi_long msgtyp;
3112 } *tmp;
3113
3114 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3115 ret = -TARGET_EFAULT;
3116 break;
3117 }
3118
3119 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3120
3121 unlock_user_struct(tmp, ptr, 0);
3122 break;
3123 }
3124 default:
3125 ret = do_msgrcv(first, ptr, second, fifth, third);
3126 }
3127 break;
3128
3129 case IPCOP_shmat:
3130 switch (version) {
3131 default:
3132 {
3133 abi_ulong raddr;
3134 raddr = do_shmat(first, ptr, second);
3135 if (is_error(raddr))
3136 return get_errno(raddr);
3137 if (put_user_ual(raddr, third))
3138 return -TARGET_EFAULT;
3139 break;
3140 }
3141 case 1:
3142 ret = -TARGET_EINVAL;
3143 break;
3144 }
3145 break;
3146 case IPCOP_shmdt:
3147 ret = do_shmdt(ptr);
3148 break;
3149
3150 case IPCOP_shmget:
3151 /* IPC_* flag values are the same on all linux platforms */
3152 ret = get_errno(shmget(first, second, third));
3153 break;
3154
3155 /* IPC_* and SHM_* command values are the same on all linux platforms */
3156 case IPCOP_shmctl:
3157 ret = do_shmctl(first, second, third);
3158 break;
3159 default:
3160 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3161 ret = -TARGET_ENOSYS;
3162 break;
3163 }
3164 return ret;
3165 }
3166 #endif
3167
3168 /* kernel structure types definitions */
3169
3170 #define STRUCT(name, ...) STRUCT_ ## name,
3171 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3172 enum {
3173 #include "syscall_types.h"
3174 };
3175 #undef STRUCT
3176 #undef STRUCT_SPECIAL
3177
3178 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3179 #define STRUCT_SPECIAL(name)
3180 #include "syscall_types.h"
3181 #undef STRUCT
3182 #undef STRUCT_SPECIAL
3183
3184 typedef struct IOCTLEntry IOCTLEntry;
3185
3186 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3187 int fd, abi_long cmd, abi_long arg);
3188
3189 struct IOCTLEntry {
3190 unsigned int target_cmd;
3191 unsigned int host_cmd;
3192 const char *name;
3193 int access;
3194 do_ioctl_fn *do_ioctl;
3195 const argtype arg_type[5];
3196 };
3197
3198 #define IOC_R 0x0001
3199 #define IOC_W 0x0002
3200 #define IOC_RW (IOC_R | IOC_W)
3201
3202 #define MAX_STRUCT_SIZE 4096
3203
3204 #ifdef CONFIG_FIEMAP
3205 /* So fiemap access checks don't overflow on 32 bit systems.
3206 * This is very slightly smaller than the limit imposed by
3207 * the underlying kernel.
3208 */
3209 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3210 / sizeof(struct fiemap_extent))
3211
3212 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3213 int fd, abi_long cmd, abi_long arg)
3214 {
3215 /* The parameter for this ioctl is a struct fiemap followed
3216 * by an array of struct fiemap_extent whose size is set
3217 * in fiemap->fm_extent_count. The array is filled in by the
3218 * ioctl.
3219 */
3220 int target_size_in, target_size_out;
3221 struct fiemap *fm;
3222 const argtype *arg_type = ie->arg_type;
3223 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3224 void *argptr, *p;
3225 abi_long ret;
3226 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3227 uint32_t outbufsz;
3228 int free_fm = 0;
3229
3230 assert(arg_type[0] == TYPE_PTR);
3231 assert(ie->access == IOC_RW);
3232 arg_type++;
3233 target_size_in = thunk_type_size(arg_type, 0);
3234 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3235 if (!argptr) {
3236 return -TARGET_EFAULT;
3237 }
3238 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3239 unlock_user(argptr, arg, 0);
3240 fm = (struct fiemap *)buf_temp;
3241 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3242 return -TARGET_EINVAL;
3243 }
3244
3245 outbufsz = sizeof (*fm) +
3246 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3247
3248 if (outbufsz > MAX_STRUCT_SIZE) {
3249 /* We can't fit all the extents into the fixed size buffer.
3250 * Allocate one that is large enough and use it instead.
3251 */
3252 fm = malloc(outbufsz);
3253 if (!fm) {
3254 return -TARGET_ENOMEM;
3255 }
3256 memcpy(fm, buf_temp, sizeof(struct fiemap));
3257 free_fm = 1;
3258 }
3259 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3260 if (!is_error(ret)) {
3261 target_size_out = target_size_in;
3262 /* An extent_count of 0 means we were only counting the extents
3263 * so there are no structs to copy
3264 */
3265 if (fm->fm_extent_count != 0) {
3266 target_size_out += fm->fm_mapped_extents * extent_size;
3267 }
3268 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3269 if (!argptr) {
3270 ret = -TARGET_EFAULT;
3271 } else {
3272 /* Convert the struct fiemap */
3273 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3274 if (fm->fm_extent_count != 0) {
3275 p = argptr + target_size_in;
3276 /* ...and then all the struct fiemap_extents */
3277 for (i = 0; i < fm->fm_mapped_extents; i++) {
3278 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3279 THUNK_TARGET);
3280 p += extent_size;
3281 }
3282 }
3283 unlock_user(argptr, arg, target_size_out);
3284 }
3285 }
3286 if (free_fm) {
3287 free(fm);
3288 }
3289 return ret;
3290 }
3291 #endif
3292
3293 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3294 int fd, abi_long cmd, abi_long arg)
3295 {
3296 const argtype *arg_type = ie->arg_type;
3297 int target_size;
3298 void *argptr;
3299 int ret;
3300 struct ifconf *host_ifconf;
3301 uint32_t outbufsz;
3302 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3303 int target_ifreq_size;
3304 int nb_ifreq;
3305 int free_buf = 0;
3306 int i;
3307 int target_ifc_len;
3308 abi_long target_ifc_buf;
3309 int host_ifc_len;
3310 char *host_ifc_buf;
3311
3312 assert(arg_type[0] == TYPE_PTR);
3313 assert(ie->access == IOC_RW);
3314
3315 arg_type++;
3316 target_size = thunk_type_size(arg_type, 0);
3317
3318 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3319 if (!argptr)
3320 return -TARGET_EFAULT;
3321 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3322 unlock_user(argptr, arg, 0);
3323
3324 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3325 target_ifc_len = host_ifconf->ifc_len;
3326 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3327
3328 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3329 nb_ifreq = target_ifc_len / target_ifreq_size;
3330 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3331
3332 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3333 if (outbufsz > MAX_STRUCT_SIZE) {
3334 /* We can't fit all the extents into the fixed size buffer.
3335 * Allocate one that is large enough and use it instead.
3336 */
3337 host_ifconf = malloc(outbufsz);
3338 if (!host_ifconf) {
3339 return -TARGET_ENOMEM;
3340 }
3341 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3342 free_buf = 1;
3343 }
3344 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3345
3346 host_ifconf->ifc_len = host_ifc_len;
3347 host_ifconf->ifc_buf = host_ifc_buf;
3348
3349 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3350 if (!is_error(ret)) {
3351 /* convert host ifc_len to target ifc_len */
3352
3353 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3354 target_ifc_len = nb_ifreq * target_ifreq_size;
3355 host_ifconf->ifc_len = target_ifc_len;
3356
3357 /* restore target ifc_buf */
3358
3359 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3360
3361 /* copy struct ifconf to target user */
3362
3363 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3364 if (!argptr)
3365 return -TARGET_EFAULT;
3366 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3367 unlock_user(argptr, arg, target_size);
3368
3369 /* copy ifreq[] to target user */
3370
3371 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3372 for (i = 0; i < nb_ifreq ; i++) {
3373 thunk_convert(argptr + i * target_ifreq_size,
3374 host_ifc_buf + i * sizeof(struct ifreq),
3375 ifreq_arg_type, THUNK_TARGET);
3376 }
3377 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3378 }
3379
3380 if (free_buf) {
3381 free(host_ifconf);
3382 }
3383
3384 return ret;
3385 }
3386
3387 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3388 abi_long cmd, abi_long arg)
3389 {
3390 void *argptr;
3391 struct dm_ioctl *host_dm;
3392 abi_long guest_data;
3393 uint32_t guest_data_size;
3394 int target_size;
3395 const argtype *arg_type = ie->arg_type;
3396 abi_long ret;
3397 void *big_buf = NULL;
3398 char *host_data;
3399
3400 arg_type++;
3401 target_size = thunk_type_size(arg_type, 0);
3402 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3403 if (!argptr) {
3404 ret = -TARGET_EFAULT;
3405 goto out;
3406 }
3407 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3408 unlock_user(argptr, arg, 0);
3409
3410 /* buf_temp is too small, so fetch things into a bigger buffer */
3411 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3412 memcpy(big_buf, buf_temp, target_size);
3413 buf_temp = big_buf;
3414 host_dm = big_buf;
3415
3416 guest_data = arg + host_dm->data_start;
3417 if ((guest_data - arg) < 0) {
3418 ret = -EINVAL;
3419 goto out;
3420 }
3421 guest_data_size = host_dm->data_size - host_dm->data_start;
3422 host_data = (char*)host_dm + host_dm->data_start;
3423
3424 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3425 switch (ie->host_cmd) {
3426 case DM_REMOVE_ALL:
3427 case DM_LIST_DEVICES:
3428 case DM_DEV_CREATE:
3429 case DM_DEV_REMOVE:
3430 case DM_DEV_SUSPEND:
3431 case DM_DEV_STATUS:
3432 case DM_DEV_WAIT:
3433 case DM_TABLE_STATUS:
3434 case DM_TABLE_CLEAR:
3435 case DM_TABLE_DEPS:
3436 case DM_LIST_VERSIONS:
3437 /* no input data */
3438 break;
3439 case DM_DEV_RENAME:
3440 case DM_DEV_SET_GEOMETRY:
3441 /* data contains only strings */
3442 memcpy(host_data, argptr, guest_data_size);
3443 break;
3444 case DM_TARGET_MSG:
3445 memcpy(host_data, argptr, guest_data_size);
3446 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3447 break;
3448 case DM_TABLE_LOAD:
3449 {
3450 void *gspec = argptr;
3451 void *cur_data = host_data;
3452 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3453 int spec_size = thunk_type_size(arg_type, 0);
3454 int i;
3455
3456 for (i = 0; i < host_dm->target_count; i++) {
3457 struct dm_target_spec *spec = cur_data;
3458 uint32_t next;
3459 int slen;
3460
3461 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3462 slen = strlen((char*)gspec + spec_size) + 1;
3463 next = spec->next;
3464 spec->next = sizeof(*spec) + slen;
3465 strcpy((char*)&spec[1], gspec + spec_size);
3466 gspec += next;
3467 cur_data += spec->next;
3468 }
3469 break;
3470 }
3471 default:
3472 ret = -TARGET_EINVAL;
3473 goto out;
3474 }
3475 unlock_user(argptr, guest_data, 0);
3476
3477 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3478 if (!is_error(ret)) {
3479 guest_data = arg + host_dm->data_start;
3480 guest_data_size = host_dm->data_size - host_dm->data_start;
3481 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3482 switch (ie->host_cmd) {
3483 case DM_REMOVE_ALL:
3484 case DM_DEV_CREATE:
3485 case DM_DEV_REMOVE:
3486 case DM_DEV_RENAME:
3487 case DM_DEV_SUSPEND:
3488 case DM_DEV_STATUS:
3489 case DM_TABLE_LOAD:
3490 case DM_TABLE_CLEAR:
3491 case DM_TARGET_MSG:
3492 case DM_DEV_SET_GEOMETRY:
3493 /* no return data */
3494 break;
3495 case DM_LIST_DEVICES:
3496 {
3497 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3498 uint32_t remaining_data = guest_data_size;
3499 void *cur_data = argptr;
3500 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3501 int nl_size = 12; /* can't use thunk_size due to alignment */
3502
3503 while (1) {
3504 uint32_t next = nl->next;
3505 if (next) {
3506 nl->next = nl_size + (strlen(nl->name) + 1);
3507 }
3508 if (remaining_data < nl->next) {
3509 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3510 break;
3511 }
3512 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3513 strcpy(cur_data + nl_size, nl->name);
3514 cur_data += nl->next;
3515 remaining_data -= nl->next;
3516 if (!next) {
3517 break;
3518 }
3519 nl = (void*)nl + next;
3520 }
3521 break;
3522 }
3523 case DM_DEV_WAIT:
3524 case DM_TABLE_STATUS:
3525 {
3526 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3527 void *cur_data = argptr;
3528 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3529 int spec_size = thunk_type_size(arg_type, 0);
3530 int i;
3531
3532 for (i = 0; i < host_dm->target_count; i++) {
3533 uint32_t next = spec->next;
3534 int slen = strlen((char*)&spec[1]) + 1;
3535 spec->next = (cur_data - argptr) + spec_size + slen;
3536 if (guest_data_size < spec->next) {
3537 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3538 break;
3539 }
3540 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3541 strcpy(cur_data + spec_size, (char*)&spec[1]);
3542 cur_data = argptr + spec->next;
3543 spec = (void*)host_dm + host_dm->data_start + next;
3544 }
3545 break;
3546 }
3547 case DM_TABLE_DEPS:
3548 {
3549 void *hdata = (void*)host_dm + host_dm->data_start;
3550 int count = *(uint32_t*)hdata;
3551 uint64_t *hdev = hdata + 8;
3552 uint64_t *gdev = argptr + 8;
3553 int i;
3554
3555 *(uint32_t*)argptr = tswap32(count);
3556 for (i = 0; i < count; i++) {
3557 *gdev = tswap64(*hdev);
3558 gdev++;
3559 hdev++;
3560 }
3561 break;
3562 }
3563 case DM_LIST_VERSIONS:
3564 {
3565 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3566 uint32_t remaining_data = guest_data_size;
3567 void *cur_data = argptr;
3568 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3569 int vers_size = thunk_type_size(arg_type, 0);
3570
3571 while (1) {
3572 uint32_t next = vers->next;
3573 if (next) {
3574 vers->next = vers_size + (strlen(vers->name) + 1);
3575 }
3576 if (remaining_data < vers->next) {
3577 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3578 break;
3579 }
3580 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3581 strcpy(cur_data + vers_size, vers->name);
3582 cur_data += vers->next;
3583 remaining_data -= vers->next;
3584 if (!next) {
3585 break;
3586 }
3587 vers = (void*)vers + next;
3588 }
3589 break;
3590 }
3591 default:
3592 ret = -TARGET_EINVAL;
3593 goto out;
3594 }
3595 unlock_user(argptr, guest_data, guest_data_size);
3596
3597 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3598 if (!argptr) {
3599 ret = -TARGET_EFAULT;
3600 goto out;
3601 }
3602 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3603 unlock_user(argptr, arg, target_size);
3604 }
3605 out:
3606 g_free(big_buf);
3607 return ret;
3608 }
3609
3610 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3611 int fd, abi_long cmd, abi_long arg)
3612 {
3613 const argtype *arg_type = ie->arg_type;
3614 const StructEntry *se;
3615 const argtype *field_types;
3616 const int *dst_offsets, *src_offsets;
3617 int target_size;
3618 void *argptr;
3619 abi_ulong *target_rt_dev_ptr;
3620 unsigned long *host_rt_dev_ptr;
3621 abi_long ret;
3622 int i;
3623
3624 assert(ie->access == IOC_W);
3625 assert(*arg_type == TYPE_PTR);
3626 arg_type++;
3627 assert(*arg_type == TYPE_STRUCT);
3628 target_size = thunk_type_size(arg_type, 0);
3629 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3630 if (!argptr) {
3631 return -TARGET_EFAULT;
3632 }
3633 arg_type++;
3634 assert(*arg_type == (int)STRUCT_rtentry);
3635 se = struct_entries + *arg_type++;
3636 assert(se->convert[0] == NULL);
3637 /* convert struct here to be able to catch rt_dev string */
3638 field_types = se->field_types;
3639 dst_offsets = se->field_offsets[THUNK_HOST];
3640 src_offsets = se->field_offsets[THUNK_TARGET];
3641 for (i = 0; i < se->nb_fields; i++) {
3642 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3643 assert(*field_types == TYPE_PTRVOID);
3644 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3645 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3646 if (*target_rt_dev_ptr != 0) {
3647 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3648 tswapal(*target_rt_dev_ptr));
3649 if (!*host_rt_dev_ptr) {
3650 unlock_user(argptr, arg, 0);
3651 return -TARGET_EFAULT;
3652 }
3653 } else {
3654 *host_rt_dev_ptr = 0;
3655 }
3656 field_types++;
3657 continue;
3658 }
3659 field_types = thunk_convert(buf_temp + dst_offsets[i],
3660 argptr + src_offsets[i],
3661 field_types, THUNK_HOST);
3662 }
3663 unlock_user(argptr, arg, 0);
3664
3665 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3666 if (*host_rt_dev_ptr != 0) {
3667 unlock_user((void *)*host_rt_dev_ptr,
3668 *target_rt_dev_ptr, 0);
3669 }
3670 return ret;
3671 }
3672
3673 static IOCTLEntry ioctl_entries[] = {
3674 #define IOCTL(cmd, access, ...) \
3675 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3676 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3677 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3678 #include "ioctls.h"
3679 { 0, 0, },
3680 };
3681
3682 /* ??? Implement proper locking for ioctls. */
3683 /* do_ioctl() Must return target values and target errnos. */
3684 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3685 {
3686 const IOCTLEntry *ie;
3687 const argtype *arg_type;
3688 abi_long ret;
3689 uint8_t buf_temp[MAX_STRUCT_SIZE];
3690 int target_size;
3691 void *argptr;
3692
3693 ie = ioctl_entries;
3694 for(;;) {
3695 if (ie->target_cmd == 0) {
3696 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3697 return -TARGET_ENOSYS;
3698 }
3699 if (ie->target_cmd == cmd)
3700 break;
3701 ie++;
3702 }
3703 arg_type = ie->arg_type;
3704 #if defined(DEBUG)
3705 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3706 #endif
3707 if (ie->do_ioctl) {
3708 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3709 }
3710
3711 switch(arg_type[0]) {
3712 case TYPE_NULL:
3713 /* no argument */
3714 ret = get_errno(ioctl(fd, ie->host_cmd));
3715 break;
3716 case TYPE_PTRVOID:
3717 case TYPE_INT:
3718 /* int argment */
3719 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3720 break;
3721 case TYPE_PTR:
3722 arg_type++;
3723 target_size = thunk_type_size(arg_type, 0);
3724 switch(ie->access) {
3725 case IOC_R:
3726 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3727 if (!is_error(ret)) {
3728 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3729 if (!argptr)
3730 return -TARGET_EFAULT;
3731 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3732 unlock_user(argptr, arg, target_size);
3733 }
3734 break;
3735 case IOC_W:
3736 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3737 if (!argptr)
3738 return -TARGET_EFAULT;
3739 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3740 unlock_user(argptr, arg, 0);
3741 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3742 break;
3743 default:
3744 case IOC_RW:
3745 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3746 if (!argptr)
3747 return -TARGET_EFAULT;
3748 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3749 unlock_user(argptr, arg, 0);
3750 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3751 if (!is_error(ret)) {
3752 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3753 if (!argptr)
3754 return -TARGET_EFAULT;
3755 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3756 unlock_user(argptr, arg, target_size);
3757 }
3758 break;
3759 }
3760 break;
3761 default:
3762 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3763 (long)cmd, arg_type[0]);
3764 ret = -TARGET_ENOSYS;
3765 break;
3766 }
3767 return ret;
3768 }
3769
3770 static const bitmask_transtbl iflag_tbl[] = {
3771 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3772 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3773 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3774 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3775 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3776 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3777 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3778 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3779 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3780 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3781 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3782 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3783 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3784 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3785 { 0, 0, 0, 0 }
3786 };
3787
3788 static const bitmask_transtbl oflag_tbl[] = {
3789 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3790 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3791 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3792 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3793 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3794 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3795 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3796 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3797 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3798 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3799 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3800 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3801 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3802 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3803 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3804 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3805 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3806 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3807 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3808 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3809 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3810 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3811 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3812 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3813 { 0, 0, 0, 0 }
3814 };
3815
3816 static const bitmask_transtbl cflag_tbl[] = {
3817 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3818 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3819 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3820 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3821 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3822 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3823 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3824 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3825 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3826 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3827 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3828 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3829 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3830 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3831 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3832 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3833 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3834 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3835 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3836 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3837 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3838 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3839 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3840 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3841 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3842 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3843 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3844 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3845 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3846 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3847 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3848 { 0, 0, 0, 0 }
3849 };
3850
3851 static const bitmask_transtbl lflag_tbl[] = {
3852 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3853 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3854 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3855 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3856 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3857 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3858 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3859 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3860 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3861 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3862 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3863 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3864 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3865 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3866 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3867 { 0, 0, 0, 0 }
3868 };
3869
3870 static void target_to_host_termios (void *dst, const void *src)
3871 {
3872 struct host_termios *host = dst;
3873 const struct target_termios *target = src;
3874
3875 host->c_iflag =
3876 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3877 host->c_oflag =
3878 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3879 host->c_cflag =
3880 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3881 host->c_lflag =
3882 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3883 host->c_line = target->c_line;
3884
3885 memset(host->c_cc, 0, sizeof(host->c_cc));
3886 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3887 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3888 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3889 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3890 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3891 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3892 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3893 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3894 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3895 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3896 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3897 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3898 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3899 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3900 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3901 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3902 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3903 }
3904
3905 static void host_to_target_termios (void *dst, const void *src)
3906 {
3907 struct target_termios *target = dst;
3908 const struct host_termios *host = src;
3909
3910 target->c_iflag =
3911 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3912 target->c_oflag =
3913 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3914 target->c_cflag =
3915 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3916 target->c_lflag =
3917 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3918 target->c_line = host->c_line;
3919
3920 memset(target->c_cc, 0, sizeof(target->c_cc));
3921 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3922 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3923 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3924 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3925 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3926 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3927 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3928 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3929 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3930 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3931 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3932 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3933 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3934 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3935 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3936 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3937 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3938 }
3939
3940 static const StructEntry struct_termios_def = {
3941 .convert = { host_to_target_termios, target_to_host_termios },
3942 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3943 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3944 };
3945
3946 static bitmask_transtbl mmap_flags_tbl[] = {
3947 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3948 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3949 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3950 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3951 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3952 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3953 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3954 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3955 { 0, 0, 0, 0 }
3956 };
3957
3958 #if defined(TARGET_I386)
3959
3960 /* NOTE: there is really one LDT for all the threads */
3961 static uint8_t *ldt_table;
3962
3963 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3964 {
3965 int size;
3966 void *p;
3967
3968 if (!ldt_table)
3969 return 0;
3970 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3971 if (size > bytecount)
3972 size = bytecount;
3973 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3974 if (!p)
3975 return -TARGET_EFAULT;
3976 /* ??? Should this by byteswapped? */
3977 memcpy(p, ldt_table, size);
3978 unlock_user(p, ptr, size);
3979 return size;
3980 }
3981
3982 /* XXX: add locking support */
3983 static abi_long write_ldt(CPUX86State *env,
3984 abi_ulong ptr, unsigned long bytecount, int oldmode)
3985 {
3986 struct target_modify_ldt_ldt_s ldt_info;
3987 struct target_modify_ldt_ldt_s *target_ldt_info;
3988 int seg_32bit, contents, read_exec_only, limit_in_pages;
3989 int seg_not_present, useable, lm;
3990 uint32_t *lp, entry_1, entry_2;
3991
3992 if (bytecount != sizeof(ldt_info))
3993 return -TARGET_EINVAL;
3994 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3995 return -TARGET_EFAULT;
3996 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3997 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3998 ldt_info.limit = tswap32(target_ldt_info->limit);
3999 ldt_info.flags = tswap32(target_ldt_info->flags);
4000 unlock_user_struct(target_ldt_info, ptr, 0);
4001
4002 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4003 return -TARGET_EINVAL;
4004 seg_32bit = ldt_info.flags & 1;
4005 contents = (ldt_info.flags >> 1) & 3;
4006 read_exec_only = (ldt_info.flags >> 3) & 1;
4007 limit_in_pages = (ldt_info.flags >> 4) & 1;
4008 seg_not_present = (ldt_info.flags >> 5) & 1;
4009 useable = (ldt_info.flags >> 6) & 1;
4010 #ifdef TARGET_ABI32
4011 lm = 0;
4012 #else
4013 lm = (ldt_info.flags >> 7) & 1;
4014 #endif
4015 if (contents == 3) {
4016 if (oldmode)
4017 return -TARGET_EINVAL;
4018 if (seg_not_present == 0)
4019 return -TARGET_EINVAL;
4020 }
4021 /* allocate the LDT */
4022 if (!ldt_table) {
4023 env->ldt.base = target_mmap(0,
4024 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4025 PROT_READ|PROT_WRITE,
4026 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4027 if (env->ldt.base == -1)
4028 return -TARGET_ENOMEM;
4029 memset(g2h(env->ldt.base), 0,
4030 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4031 env->ldt.limit = 0xffff;
4032 ldt_table = g2h(env->ldt.base);
4033 }
4034
4035 /* NOTE: same code as Linux kernel */
4036 /* Allow LDTs to be cleared by the user. */
4037 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4038 if (oldmode ||
4039 (contents == 0 &&
4040 read_exec_only == 1 &&
4041 seg_32bit == 0 &&
4042 limit_in_pages == 0 &&
4043 seg_not_present == 1 &&
4044 useable == 0 )) {
4045 entry_1 = 0;
4046 entry_2 = 0;
4047 goto install;
4048 }
4049 }
4050
4051 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4052 (ldt_info.limit & 0x0ffff);
4053 entry_2 = (ldt_info.base_addr & 0xff000000) |
4054 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4055 (ldt_info.limit & 0xf0000) |
4056 ((read_exec_only ^ 1) << 9) |
4057 (contents << 10) |
4058 ((seg_not_present ^ 1) << 15) |
4059 (seg_32bit << 22) |
4060 (limit_in_pages << 23) |
4061 (lm << 21) |
4062 0x7000;
4063 if (!oldmode)
4064 entry_2 |= (useable << 20);
4065
4066 /* Install the new entry ... */
4067 install:
4068 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4069 lp[0] = tswap32(entry_1);
4070 lp[1] = tswap32(entry_2);
4071 return 0;
4072 }
4073
4074 /* specific and weird i386 syscalls */
4075 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4076 unsigned long bytecount)
4077 {
4078 abi_long ret;
4079
4080 switch (func) {
4081 case 0:
4082 ret = read_ldt(ptr, bytecount);
4083 break;
4084 case 1:
4085 ret = write_ldt(env, ptr, bytecount, 1);
4086 break;
4087 case 0x11:
4088 ret = write_ldt(env, ptr, bytecount, 0);
4089 break;
4090 default:
4091 ret = -TARGET_ENOSYS;
4092 break;
4093 }
4094 return ret;
4095 }
4096
4097 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4098 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4099 {
4100 uint64_t *gdt_table = g2h(env->gdt.base);
4101 struct target_modify_ldt_ldt_s ldt_info;
4102 struct target_modify_ldt_ldt_s *target_ldt_info;
4103 int seg_32bit, contents, read_exec_only, limit_in_pages;
4104 int seg_not_present, useable, lm;
4105 uint32_t *lp, entry_1, entry_2;
4106 int i;
4107
4108 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4109 if (!target_ldt_info)
4110 return -TARGET_EFAULT;
4111 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4112 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4113 ldt_info.limit = tswap32(target_ldt_info->limit);
4114 ldt_info.flags = tswap32(target_ldt_info->flags);
4115 if (ldt_info.entry_number == -1) {
4116 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4117 if (gdt_table[i] == 0) {
4118 ldt_info.entry_number = i;
4119 target_ldt_info->entry_number = tswap32(i);
4120 break;
4121 }
4122 }
4123 }
4124 unlock_user_struct(target_ldt_info, ptr, 1);
4125
4126 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4127 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4128 return -TARGET_EINVAL;
4129 seg_32bit = ldt_info.flags & 1;
4130 contents = (ldt_info.flags >> 1) & 3;
4131 read_exec_only = (ldt_info.flags >> 3) & 1;
4132 limit_in_pages = (ldt_info.flags >> 4) & 1;
4133 seg_not_present = (ldt_info.flags >> 5) & 1;
4134 useable = (ldt_info.flags >> 6) & 1;
4135 #ifdef TARGET_ABI32
4136 lm = 0;
4137 #else
4138 lm = (ldt_info.flags >> 7) & 1;
4139 #endif
4140
4141 if (contents == 3) {
4142 if (seg_not_present == 0)
4143 return -TARGET_EINVAL;
4144 }
4145
4146 /* NOTE: same code as Linux kernel */
4147 /* Allow LDTs to be cleared by the user. */
4148 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4149 if ((contents == 0 &&
4150 read_exec_only == 1 &&
4151 seg_32bit == 0 &&
4152 limit_in_pages == 0 &&
4153 seg_not_present == 1 &&
4154 useable == 0 )) {
4155 entry_1 = 0;
4156 entry_2 = 0;
4157 goto install;
4158 }
4159 }
4160
4161 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4162 (ldt_info.limit & 0x0ffff);
4163 entry_2 = (ldt_info.base_addr & 0xff000000) |
4164 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4165 (ldt_info.limit & 0xf0000) |
4166 ((read_exec_only ^ 1) << 9) |
4167 (contents << 10) |
4168 ((seg_not_present ^ 1) << 15) |
4169 (seg_32bit << 22) |
4170 (limit_in_pages << 23) |
4171 (useable << 20) |
4172 (lm << 21) |
4173 0x7000;
4174
4175 /* Install the new entry ... */
4176 install:
4177 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4178 lp[0] = tswap32(entry_1);
4179 lp[1] = tswap32(entry_2);
4180 return 0;
4181 }
4182
4183 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4184 {
4185 struct target_modify_ldt_ldt_s *target_ldt_info;
4186 uint64_t *gdt_table = g2h(env->gdt.base);
4187 uint32_t base_addr, limit, flags;
4188 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4189 int seg_not_present, useable, lm;
4190 uint32_t *lp, entry_1, entry_2;
4191
4192 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4193 if (!target_ldt_info)
4194 return -TARGET_EFAULT;
4195 idx = tswap32(target_ldt_info->entry_number);
4196 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4197 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4198 unlock_user_struct(target_ldt_info, ptr, 1);
4199 return -TARGET_EINVAL;
4200 }
4201 lp = (uint32_t *)(gdt_table + idx);
4202 entry_1 = tswap32(lp[0]);
4203 entry_2 = tswap32(lp[1]);
4204
4205 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4206 contents = (entry_2 >> 10) & 3;
4207 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4208 seg_32bit = (entry_2 >> 22) & 1;
4209 limit_in_pages = (entry_2 >> 23) & 1;
4210 useable = (entry_2 >> 20) & 1;
4211 #ifdef TARGET_ABI32
4212 lm = 0;
4213 #else
4214 lm = (entry_2 >> 21) & 1;
4215 #endif
4216 flags = (seg_32bit << 0) | (contents << 1) |
4217 (read_exec_only << 3) | (limit_in_pages << 4) |
4218 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4219 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4220 base_addr = (entry_1 >> 16) |
4221 (entry_2 & 0xff000000) |
4222 ((entry_2 & 0xff) << 16);
4223 target_ldt_info->base_addr = tswapal(base_addr);
4224 target_ldt_info->limit = tswap32(limit);
4225 target_ldt_info->flags = tswap32(flags);
4226 unlock_user_struct(target_ldt_info, ptr, 1);
4227 return 0;
4228 }
4229 #endif /* TARGET_I386 && TARGET_ABI32 */
4230
4231 #ifndef TARGET_ABI32
4232 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4233 {
4234 abi_long ret = 0;
4235 abi_ulong val;
4236 int idx;
4237
4238 switch(code) {
4239 case TARGET_ARCH_SET_GS:
4240 case TARGET_ARCH_SET_FS:
4241 if (code == TARGET_ARCH_SET_GS)
4242 idx = R_GS;
4243 else
4244 idx = R_FS;
4245 cpu_x86_load_seg(env, idx, 0);
4246 env->segs[idx].base = addr;
4247 break;
4248 case TARGET_ARCH_GET_GS:
4249 case TARGET_ARCH_GET_FS:
4250 if (code == TARGET_ARCH_GET_GS)
4251 idx = R_GS;
4252 else
4253 idx = R_FS;
4254 val = env->segs[idx].base;
4255 if (put_user(val, addr, abi_ulong))
4256 ret = -TARGET_EFAULT;
4257 break;
4258 default:
4259 ret = -TARGET_EINVAL;
4260 break;
4261 }
4262 return ret;
4263 }
4264 #endif
4265
4266 #endif /* defined(TARGET_I386) */
4267
4268 #define NEW_STACK_SIZE 0x40000
4269
4270
4271 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4272 typedef struct {
4273 CPUArchState *env;
4274 pthread_mutex_t mutex;
4275 pthread_cond_t cond;
4276 pthread_t thread;
4277 uint32_t tid;
4278 abi_ulong child_tidptr;
4279 abi_ulong parent_tidptr;
4280 sigset_t sigmask;
4281 } new_thread_info;
4282
4283 static void *clone_func(void *arg)
4284 {
4285 new_thread_info *info = arg;
4286 CPUArchState *env;
4287 CPUState *cpu;
4288 TaskState *ts;
4289
4290 env = info->env;
4291 cpu = ENV_GET_CPU(env);
4292 thread_cpu = cpu;
4293 ts = (TaskState *)env->opaque;
4294 info->tid = gettid();
4295 cpu->host_tid = info->tid;
4296 task_settid(ts);
4297 if (info->child_tidptr)
4298 put_user_u32(info->tid, info->child_tidptr);
4299 if (info->parent_tidptr)
4300 put_user_u32(info->tid, info->parent_tidptr);
4301 /* Enable signals. */
4302 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4303 /* Signal to the parent that we're ready. */
4304 pthread_mutex_lock(&info->mutex);
4305 pthread_cond_broadcast(&info->cond);
4306 pthread_mutex_unlock(&info->mutex);
4307 /* Wait until the parent has finshed initializing the tls state. */
4308 pthread_mutex_lock(&clone_lock);
4309 pthread_mutex_unlock(&clone_lock);
4310 cpu_loop(env);
4311 /* never exits */
4312 return NULL;
4313 }
4314
4315 /* do_fork() Must return host values and target errnos (unlike most
4316 do_*() functions). */
4317 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4318 abi_ulong parent_tidptr, target_ulong newtls,
4319 abi_ulong child_tidptr)
4320 {
4321 int ret;
4322 TaskState *ts;
4323 CPUArchState *new_env;
4324 unsigned int nptl_flags;
4325 sigset_t sigmask;
4326
4327 /* Emulate vfork() with fork() */
4328 if (flags & CLONE_VFORK)
4329 flags &= ~(CLONE_VFORK | CLONE_VM);
4330
4331 if (flags & CLONE_VM) {
4332 TaskState *parent_ts = (TaskState *)env->opaque;
4333 new_thread_info info;
4334 pthread_attr_t attr;
4335
4336 ts = g_malloc0(sizeof(TaskState));
4337 init_task_state(ts);
4338 /* we create a new CPU instance. */
4339 new_env = cpu_copy(env);
4340 /* Init regs that differ from the parent. */
4341 cpu_clone_regs(new_env, newsp);
4342 new_env->opaque = ts;
4343 ts->bprm = parent_ts->bprm;
4344 ts->info = parent_ts->info;
4345 nptl_flags = flags;
4346 flags &= ~CLONE_NPTL_FLAGS2;
4347
4348 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4349 ts->child_tidptr = child_tidptr;
4350 }
4351
4352 if (nptl_flags & CLONE_SETTLS)
4353 cpu_set_tls (new_env, newtls);
4354
4355 /* Grab a mutex so that thread setup appears atomic. */
4356 pthread_mutex_lock(&clone_lock);
4357
4358 memset(&info, 0, sizeof(info));
4359 pthread_mutex_init(&info.mutex, NULL);
4360 pthread_mutex_lock(&info.mutex);
4361 pthread_cond_init(&info.cond, NULL);
4362 info.env = new_env;
4363 if (nptl_flags & CLONE_CHILD_SETTID)
4364 info.child_tidptr = child_tidptr;
4365 if (nptl_flags & CLONE_PARENT_SETTID)
4366 info.parent_tidptr = parent_tidptr;
4367
4368 ret = pthread_attr_init(&attr);
4369 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4370 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4371 /* It is not safe to deliver signals until the child has finished
4372 initializing, so temporarily block all signals. */
4373 sigfillset(&sigmask);
4374 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4375
4376 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4377 /* TODO: Free new CPU state if thread creation failed. */
4378
4379 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4380 pthread_attr_destroy(&attr);
4381 if (ret == 0) {
4382 /* Wait for the child to initialize. */
4383 pthread_cond_wait(&info.cond, &info.mutex);
4384 ret = info.tid;
4385 if (flags & CLONE_PARENT_SETTID)
4386 put_user_u32(ret, parent_tidptr);
4387 } else {
4388 ret = -1;
4389 }
4390 pthread_mutex_unlock(&info.mutex);
4391 pthread_cond_destroy(&info.cond);
4392 pthread_mutex_destroy(&info.mutex);
4393 pthread_mutex_unlock(&clone_lock);
4394 } else {
4395 /* if no CLONE_VM, we consider it is a fork */
4396 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4397 return -EINVAL;
4398 fork_start();
4399 ret = fork();
4400 if (ret == 0) {
4401 /* Child Process. */
4402 cpu_clone_regs(env, newsp);
4403 fork_end(1);
4404 /* There is a race condition here. The parent process could
4405 theoretically read the TID in the child process before the child
4406 tid is set. This would require using either ptrace
4407 (not implemented) or having *_tidptr to point at a shared memory
4408 mapping. We can't repeat the spinlock hack used above because
4409 the child process gets its own copy of the lock. */
4410 if (flags & CLONE_CHILD_SETTID)
4411 put_user_u32(gettid(), child_tidptr);
4412 if (flags & CLONE_PARENT_SETTID)
4413 put_user_u32(gettid(), parent_tidptr);
4414 ts = (TaskState *)env->opaque;
4415 if (flags & CLONE_SETTLS)
4416 cpu_set_tls (env, newtls);
4417 if (flags & CLONE_CHILD_CLEARTID)
4418 ts->child_tidptr = child_tidptr;
4419 } else {
4420 fork_end(0);
4421 }
4422 }
4423 return ret;
4424 }
4425
4426 /* warning : doesn't handle linux specific flags... */
4427 static int target_to_host_fcntl_cmd(int cmd)
4428 {
4429 switch(cmd) {
4430 case TARGET_F_DUPFD:
4431 case TARGET_F_GETFD:
4432 case TARGET_F_SETFD:
4433 case TARGET_F_GETFL:
4434 case TARGET_F_SETFL:
4435 return cmd;
4436 case TARGET_F_GETLK:
4437 return F_GETLK;
4438 case TARGET_F_SETLK:
4439 return F_SETLK;
4440 case TARGET_F_SETLKW:
4441 return F_SETLKW;
4442 case TARGET_F_GETOWN:
4443 return F_GETOWN;
4444 case TARGET_F_SETOWN:
4445 return F_SETOWN;
4446 case TARGET_F_GETSIG:
4447 return F_GETSIG;
4448 case TARGET_F_SETSIG:
4449 return F_SETSIG;
4450 #if TARGET_ABI_BITS == 32
4451 case TARGET_F_GETLK64:
4452 return F_GETLK64;
4453 case TARGET_F_SETLK64:
4454 return F_SETLK64;
4455 case TARGET_F_SETLKW64:
4456 return F_SETLKW64;
4457 #endif
4458 case TARGET_F_SETLEASE:
4459 return F_SETLEASE;
4460 case TARGET_F_GETLEASE:
4461 return F_GETLEASE;
4462 #ifdef F_DUPFD_CLOEXEC
4463 case TARGET_F_DUPFD_CLOEXEC:
4464 return F_DUPFD_CLOEXEC;
4465 #endif
4466 case TARGET_F_NOTIFY:
4467 return F_NOTIFY;
4468 default:
4469 return -TARGET_EINVAL;
4470 }
4471 return -TARGET_EINVAL;
4472 }
4473
4474 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4475 static const bitmask_transtbl flock_tbl[] = {
4476 TRANSTBL_CONVERT(F_RDLCK),
4477 TRANSTBL_CONVERT(F_WRLCK),
4478 TRANSTBL_CONVERT(F_UNLCK),
4479 TRANSTBL_CONVERT(F_EXLCK),
4480 TRANSTBL_CONVERT(F_SHLCK),
4481 { 0, 0, 0, 0 }
4482 };
4483
4484 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4485 {
4486 struct flock fl;
4487 struct target_flock *target_fl;
4488 struct flock64 fl64;
4489 struct target_flock64 *target_fl64;
4490 abi_long ret;
4491 int host_cmd = target_to_host_fcntl_cmd(cmd);
4492
4493 if (host_cmd == -TARGET_EINVAL)
4494 return host_cmd;
4495
4496 switch(cmd) {
4497 case TARGET_F_GETLK:
4498 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4499 return -TARGET_EFAULT;
4500 fl.l_type =
4501 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4502 fl.l_whence = tswap16(target_fl->l_whence);
4503 fl.l_start = tswapal(target_fl->l_start);
4504 fl.l_len = tswapal(target_fl->l_len);
4505 fl.l_pid = tswap32(target_fl->l_pid);
4506 unlock_user_struct(target_fl, arg, 0);
4507 ret = get_errno(fcntl(fd, host_cmd, &fl));
4508 if (ret == 0) {
4509 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4510 return -TARGET_EFAULT;
4511 target_fl->l_type =
4512 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4513 target_fl->l_whence = tswap16(fl.l_whence);
4514 target_fl->l_start = tswapal(fl.l_start);
4515 target_fl->l_len = tswapal(fl.l_len);
4516 target_fl->l_pid = tswap32(fl.l_pid);
4517 unlock_user_struct(target_fl, arg, 1);
4518 }
4519 break;
4520
4521 case TARGET_F_SETLK:
4522 case TARGET_F_SETLKW:
4523 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4524 return -TARGET_EFAULT;
4525 fl.l_type =
4526 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4527 fl.l_whence = tswap16(target_fl->l_whence);
4528 fl.l_start = tswapal(target_fl->l_start);
4529 fl.l_len = tswapal(target_fl->l_len);
4530 fl.l_pid = tswap32(target_fl->l_pid);
4531 unlock_user_struct(target_fl, arg, 0);
4532 ret = get_errno(fcntl(fd, host_cmd, &fl));
4533 break;
4534
4535 case TARGET_F_GETLK64:
4536 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4537 return -TARGET_EFAULT;
4538 fl64.l_type =
4539 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4540 fl64.l_whence = tswap16(target_fl64->l_whence);
4541 fl64.l_start = tswap64(target_fl64->l_start);
4542 fl64.l_len = tswap64(target_fl64->l_len);
4543 fl64.l_pid = tswap32(target_fl64->l_pid);
4544 unlock_user_struct(target_fl64, arg, 0);
4545 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4546 if (ret == 0) {
4547 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4548 return -TARGET_EFAULT;
4549 target_fl64->l_type =
4550 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4551 target_fl64->l_whence = tswap16(fl64.l_whence);
4552 target_fl64->l_start = tswap64(fl64.l_start);
4553 target_fl64->l_len = tswap64(fl64.l_len);
4554 target_fl64->l_pid = tswap32(fl64.l_pid);
4555 unlock_user_struct(target_fl64, arg, 1);
4556 }
4557 break;
4558 case TARGET_F_SETLK64:
4559 case TARGET_F_SETLKW64:
4560 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4561 return -TARGET_EFAULT;
4562 fl64.l_type =
4563 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4564 fl64.l_whence = tswap16(target_fl64->l_whence);
4565 fl64.l_start = tswap64(target_fl64->l_start);
4566 fl64.l_len = tswap64(target_fl64->l_len);
4567 fl64.l_pid = tswap32(target_fl64->l_pid);
4568 unlock_user_struct(target_fl64, arg, 0);
4569 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4570 break;
4571
4572 case TARGET_F_GETFL:
4573 ret = get_errno(fcntl(fd, host_cmd, arg));
4574 if (ret >= 0) {
4575 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4576 }
4577 break;
4578
4579 case TARGET_F_SETFL:
4580 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4581 break;
4582
4583 case TARGET_F_SETOWN:
4584 case TARGET_F_GETOWN:
4585 case TARGET_F_SETSIG:
4586 case TARGET_F_GETSIG:
4587 case TARGET_F_SETLEASE:
4588 case TARGET_F_GETLEASE:
4589 ret = get_errno(fcntl(fd, host_cmd, arg));
4590 break;
4591
4592 default:
4593 ret = get_errno(fcntl(fd, cmd, arg));
4594 break;
4595 }
4596 return ret;
4597 }
4598
4599 #ifdef USE_UID16
4600
4601 static inline int high2lowuid(int uid)
4602 {
4603 if (uid > 65535)
4604 return 65534;
4605 else
4606 return uid;
4607 }
4608
4609 static inline int high2lowgid(int gid)
4610 {
4611 if (gid > 65535)
4612 return 65534;
4613 else
4614 return gid;
4615 }
4616
4617 static inline int low2highuid(int uid)
4618 {
4619 if ((int16_t)uid == -1)
4620 return -1;
4621 else
4622 return uid;
4623 }
4624
4625 static inline int low2highgid(int gid)
4626 {
4627 if ((int16_t)gid == -1)
4628 return -1;
4629 else
4630 return gid;
4631 }
4632 static inline int tswapid(int id)
4633 {
4634 return tswap16(id);
4635 }
4636 #else /* !USE_UID16 */
4637 static inline int high2lowuid(int uid)
4638 {
4639 return uid;
4640 }
4641 static inline int high2lowgid(int gid)
4642 {
4643 return gid;
4644 }
4645 static inline int low2highuid(int uid)
4646 {
4647 return uid;
4648 }
4649 static inline int low2highgid(int gid)
4650 {
4651 return gid;
4652 }
4653 static inline int tswapid(int id)
4654 {
4655 return tswap32(id);
4656 }
4657 #endif /* USE_UID16 */
4658
4659 void syscall_init(void)
4660 {
4661 IOCTLEntry *ie;
4662 const argtype *arg_type;
4663 int size;
4664 int i;
4665
4666 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4667 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4668 #include "syscall_types.h"
4669 #undef STRUCT
4670 #undef STRUCT_SPECIAL
4671
4672 /* Build target_to_host_errno_table[] table from
4673 * host_to_target_errno_table[]. */
4674 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4675 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4676 }
4677
4678 /* we patch the ioctl size if necessary. We rely on the fact that
4679 no ioctl has all the bits at '1' in the size field */
4680 ie = ioctl_entries;
4681 while (ie->target_cmd != 0) {
4682 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4683 TARGET_IOC_SIZEMASK) {
4684 arg_type = ie->arg_type;
4685 if (arg_type[0] != TYPE_PTR) {
4686 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4687 ie->target_cmd);
4688 exit(1);
4689 }
4690 arg_type++;
4691 size = thunk_type_size(arg_type, 0);
4692 ie->target_cmd = (ie->target_cmd &
4693 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4694 (size << TARGET_IOC_SIZESHIFT);
4695 }
4696
4697 /* automatic consistency check if same arch */
4698 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4699 (defined(__x86_64__) && defined(TARGET_X86_64))
4700 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4701 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4702 ie->name, ie->target_cmd, ie->host_cmd);
4703 }
4704 #endif
4705 ie++;
4706 }
4707 }
4708
4709 #if TARGET_ABI_BITS == 32
4710 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4711 {
4712 #ifdef TARGET_WORDS_BIGENDIAN
4713 return ((uint64_t)word0 << 32) | word1;
4714 #else
4715 return ((uint64_t)word1 << 32) | word0;
4716 #endif
4717 }
4718 #else /* TARGET_ABI_BITS == 32 */
4719 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4720 {
4721 return word0;
4722 }
4723 #endif /* TARGET_ABI_BITS != 32 */
4724
4725 #ifdef TARGET_NR_truncate64
4726 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4727 abi_long arg2,
4728 abi_long arg3,
4729 abi_long arg4)
4730 {
4731 if (regpairs_aligned(cpu_env)) {
4732 arg2 = arg3;
4733 arg3 = arg4;
4734 }
4735 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4736 }
4737 #endif
4738
4739 #ifdef TARGET_NR_ftruncate64
4740 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4741 abi_long arg2,
4742 abi_long arg3,
4743 abi_long arg4)
4744 {
4745 if (regpairs_aligned(cpu_env)) {
4746 arg2 = arg3;
4747 arg3 = arg4;
4748 }
4749 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4750 }
4751 #endif
4752
4753 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4754 abi_ulong target_addr)
4755 {
4756 struct target_timespec *target_ts;
4757
4758 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4759 return -TARGET_EFAULT;
4760 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4761 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4762 unlock_user_struct(target_ts, target_addr, 0);
4763 return 0;
4764 }
4765
4766 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4767 struct timespec *host_ts)
4768 {
4769 struct target_timespec *target_ts;
4770
4771 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4772 return -TARGET_EFAULT;
4773 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4774 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4775 unlock_user_struct(target_ts, target_addr, 1);
4776 return 0;
4777 }
4778
4779 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4780 static inline abi_long host_to_target_stat64(void *cpu_env,
4781 abi_ulong target_addr,
4782 struct stat *host_st)
4783 {
4784 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
4785 if (((CPUARMState *)cpu_env)->eabi) {
4786 struct target_eabi_stat64 *target_st;
4787
4788 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4789 return -TARGET_EFAULT;
4790 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4791 __put_user(host_st->st_dev, &target_st->st_dev);
4792 __put_user(host_st->st_ino, &target_st->st_ino);
4793 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4794 __put_user(host_st->st_ino, &target_st->__st_ino);
4795 #endif
4796 __put_user(host_st->st_mode, &target_st->st_mode);
4797 __put_user(host_st->st_nlink, &target_st->st_nlink);
4798 __put_user(host_st->st_uid, &target_st->st_uid);
4799 __put_user(host_st->st_gid, &target_st->st_gid);
4800 __put_user(host_st->st_rdev, &target_st->st_rdev);
4801 __put_user(host_st->st_size, &target_st->st_size);
4802 __put_user(host_st->st_blksize, &target_st->st_blksize);
4803 __put_user(host_st->st_blocks, &target_st->st_blocks);
4804 __put_user(host_st->st_atime, &target_st->target_st_atime);
4805 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4806 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4807 unlock_user_struct(target_st, target_addr, 1);
4808 } else
4809 #endif
4810 {
4811 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4812 struct target_stat *target_st;
4813 #else
4814 struct target_stat64 *target_st;
4815 #endif
4816
4817 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4818 return -TARGET_EFAULT;
4819 memset(target_st, 0, sizeof(*target_st));
4820 __put_user(host_st->st_dev, &target_st->st_dev);
4821 __put_user(host_st->st_ino, &target_st->st_ino);
4822 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4823 __put_user(host_st->st_ino, &target_st->__st_ino);
4824 #endif
4825 __put_user(host_st->st_mode, &target_st->st_mode);
4826 __put_user(host_st->st_nlink, &target_st->st_nlink);
4827 __put_user(host_st->st_uid, &target_st->st_uid);
4828 __put_user(host_st->st_gid, &target_st->st_gid);
4829 __put_user(host_st->st_rdev, &target_st->st_rdev);
4830 /* XXX: better use of kernel struct */
4831 __put_user(host_st->st_size, &target_st->st_size);
4832 __put_user(host_st->st_blksize, &target_st->st_blksize);
4833 __put_user(host_st->st_blocks, &target_st->st_blocks);
4834 __put_user(host_st->st_atime, &target_st->target_st_atime);
4835 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4836 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4837 unlock_user_struct(target_st, target_addr, 1);
4838 }
4839
4840 return 0;
4841 }
4842 #endif
4843
4844 /* ??? Using host futex calls even when target atomic operations
4845 are not really atomic probably breaks things. However implementing
4846 futexes locally would make futexes shared between multiple processes
4847 tricky. However they're probably useless because guest atomic
4848 operations won't work either. */
4849 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4850 target_ulong uaddr2, int val3)
4851 {
4852 struct timespec ts, *pts;
4853 int base_op;
4854
4855 /* ??? We assume FUTEX_* constants are the same on both host
4856 and target. */
4857 #ifdef FUTEX_CMD_MASK
4858 base_op = op & FUTEX_CMD_MASK;
4859 #else
4860 base_op = op;
4861 #endif
4862 switch (base_op) {
4863 case FUTEX_WAIT:
4864 case FUTEX_WAIT_BITSET:
4865 if (timeout) {
4866 pts = &ts;
4867 target_to_host_timespec(pts, timeout);
4868 } else {
4869 pts = NULL;
4870 }
4871 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4872 pts, NULL, val3));
4873 case FUTEX_WAKE:
4874 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4875 case FUTEX_FD:
4876 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4877 case FUTEX_REQUEUE:
4878 case FUTEX_CMP_REQUEUE:
4879 case FUTEX_WAKE_OP:
4880 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4881 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4882 But the prototype takes a `struct timespec *'; insert casts
4883 to satisfy the compiler. We do not need to tswap TIMEOUT
4884 since it's not compared to guest memory. */
4885 pts = (struct timespec *)(uintptr_t) timeout;
4886 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4887 g2h(uaddr2),
4888 (base_op == FUTEX_CMP_REQUEUE
4889 ? tswap32(val3)
4890 : val3)));
4891 default:
4892 return -TARGET_ENOSYS;
4893 }
4894 }
4895
4896 /* Map host to target signal numbers for the wait family of syscalls.
4897 Assume all other status bits are the same. */
4898 int host_to_target_waitstatus(int status)
4899 {
4900 if (WIFSIGNALED(status)) {
4901 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4902 }
4903 if (WIFSTOPPED(status)) {
4904 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4905 | (status & 0xff);
4906 }
4907 return status;
4908 }
4909
4910 static int relstr_to_int(const char *s)
4911 {
4912 /* Convert a uname release string like "2.6.18" to an integer
4913 * of the form 0x020612. (Beware that 0x020612 is *not* 2.6.12.)
4914 */
4915 int i, n, tmp;
4916
4917 tmp = 0;
4918 for (i = 0; i < 3; i++) {
4919 n = 0;
4920 while (*s >= '0' && *s <= '9') {
4921 n *= 10;
4922 n += *s - '0';
4923 s++;
4924 }
4925 tmp = (tmp << 8) + n;
4926 if (*s == '.') {
4927 s++;
4928 }
4929 }
4930 return tmp;
4931 }
4932
4933 int get_osversion(void)
4934 {
4935 static int osversion;
4936 struct new_utsname buf;
4937 const char *s;
4938
4939 if (osversion)
4940 return osversion;
4941 if (qemu_uname_release && *qemu_uname_release) {
4942 s = qemu_uname_release;
4943 } else {
4944 if (sys_uname(&buf))
4945 return 0;
4946 s = buf.release;
4947 }
4948 osversion = relstr_to_int(s);
4949 return osversion;
4950 }
4951
4952 void init_qemu_uname_release(void)
4953 {
4954 /* Initialize qemu_uname_release for later use.
4955 * If the host kernel is too old and the user hasn't asked for
4956 * a specific fake version number, we might want to fake a minimum
4957 * target kernel version.
4958 */
4959 #ifdef UNAME_MINIMUM_RELEASE
4960 struct new_utsname buf;
4961
4962 if (qemu_uname_release && *qemu_uname_release) {
4963 return;
4964 }
4965
4966 if (sys_uname(&buf)) {
4967 return;
4968 }
4969
4970 if (relstr_to_int(buf.release) < relstr_to_int(UNAME_MINIMUM_RELEASE)) {
4971 qemu_uname_release = UNAME_MINIMUM_RELEASE;
4972 }
4973 #endif
4974 }
4975
4976 static int open_self_maps(void *cpu_env, int fd)
4977 {
4978 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4979 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4980 #endif
4981 FILE *fp;
4982 char *line = NULL;
4983 size_t len = 0;
4984 ssize_t read;
4985
4986 fp = fopen("/proc/self/maps", "r");
4987 if (fp == NULL) {
4988 return -EACCES;
4989 }
4990
4991 while ((read = getline(&line, &len, fp)) != -1) {
4992 int fields, dev_maj, dev_min, inode;
4993 uint64_t min, max, offset;
4994 char flag_r, flag_w, flag_x, flag_p;
4995 char path[512] = "";
4996 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
4997 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
4998 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
4999
5000 if ((fields < 10) || (fields > 11)) {
5001 continue;
5002 }
5003 if (!strncmp(path, "[stack]", 7)) {
5004 continue;
5005 }
5006 if (h2g_valid(min) && h2g_valid(max)) {
5007 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5008 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5009 h2g(min), h2g(max), flag_r, flag_w,
5010 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5011 path[0] ? " " : "", path);
5012 }
5013 }
5014
5015 free(line);
5016 fclose(fp);
5017
5018 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5019 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5020 (unsigned long long)ts->info->stack_limit,
5021 (unsigned long long)(ts->info->start_stack +
5022 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
5023 (unsigned long long)0);
5024 #endif
5025
5026 return 0;
5027 }
5028
5029 static int open_self_stat(void *cpu_env, int fd)
5030 {
5031 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5032 abi_ulong start_stack = ts->info->start_stack;
5033 int i;
5034
5035 for (i = 0; i < 44; i++) {
5036 char buf[128];
5037 int len;
5038 uint64_t val = 0;
5039
5040 if (i == 0) {
5041 /* pid */
5042 val = getpid();
5043 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5044 } else if (i == 1) {
5045 /* app name */
5046 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5047 } else if (i == 27) {
5048 /* stack bottom */
5049 val = start_stack;
5050 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5051 } else {
5052 /* for the rest, there is MasterCard */
5053 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5054 }
5055
5056 len = strlen(buf);
5057 if (write(fd, buf, len) != len) {
5058 return -1;
5059 }
5060 }
5061
5062 return 0;
5063 }
5064
5065 static int open_self_auxv(void *cpu_env, int fd)
5066 {
5067 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5068 abi_ulong auxv = ts->info->saved_auxv;
5069 abi_ulong len = ts->info->auxv_len;
5070 char *ptr;
5071
5072 /*
5073 * Auxiliary vector is stored in target process stack.
5074 * read in whole auxv vector and copy it to file
5075 */
5076 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5077 if (ptr != NULL) {
5078 while (len > 0) {
5079 ssize_t r;
5080 r = write(fd, ptr, len);
5081 if (r <= 0) {
5082 break;
5083 }
5084 len -= r;
5085 ptr += r;
5086 }
5087 lseek(fd, 0, SEEK_SET);
5088 unlock_user(ptr, auxv, len);
5089 }
5090
5091 return 0;
5092 }
5093
5094 static int is_proc_myself(const char *filename, const char *entry)
5095 {
5096 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5097 filename += strlen("/proc/");
5098 if (!strncmp(filename, "self/", strlen("self/"))) {
5099 filename += strlen("self/");
5100 } else if (*filename >= '1' && *filename <= '9') {
5101 char myself[80];
5102 snprintf(myself, sizeof(myself), "%d/", getpid());
5103 if (!strncmp(filename, myself, strlen(myself))) {
5104 filename += strlen(myself);
5105 } else {
5106 return 0;
5107 }
5108 } else {
5109 return 0;
5110 }
5111 if (!strcmp(filename, entry)) {
5112 return 1;
5113 }
5114 }
5115 return 0;
5116 }
5117
5118 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5119 static int is_proc(const char *filename, const char *entry)
5120 {
5121 return strcmp(filename, entry) == 0;
5122 }
5123
5124 static int open_net_route(void *cpu_env, int fd)
5125 {
5126 FILE *fp;
5127 char *line = NULL;
5128 size_t len = 0;
5129 ssize_t read;
5130
5131 fp = fopen("/proc/net/route", "r");
5132 if (fp == NULL) {
5133 return -EACCES;
5134 }
5135
5136 /* read header */
5137
5138 read = getline(&line, &len, fp);
5139 dprintf(fd, "%s", line);
5140
5141 /* read routes */
5142
5143 while ((read = getline(&line, &len, fp)) != -1) {
5144 char iface[16];
5145 uint32_t dest, gw, mask;
5146 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5147 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5148 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5149 &mask, &mtu, &window, &irtt);
5150 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5151 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5152 metric, tswap32(mask), mtu, window, irtt);
5153 }
5154
5155 free(line);
5156 fclose(fp);
5157
5158 return 0;
5159 }
5160 #endif
5161
5162 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5163 {
5164 struct fake_open {
5165 const char *filename;
5166 int (*fill)(void *cpu_env, int fd);
5167 int (*cmp)(const char *s1, const char *s2);
5168 };
5169 const struct fake_open *fake_open;
5170 static const struct fake_open fakes[] = {
5171 { "maps", open_self_maps, is_proc_myself },
5172 { "stat", open_self_stat, is_proc_myself },
5173 { "auxv", open_self_auxv, is_proc_myself },
5174 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5175 { "/proc/net/route", open_net_route, is_proc },
5176 #endif
5177 { NULL, NULL, NULL }
5178 };
5179
5180 for (fake_open = fakes; fake_open->filename; fake_open++) {
5181 if (fake_open->cmp(pathname, fake_open->filename)) {
5182 break;
5183 }
5184 }
5185
5186 if (fake_open->filename) {
5187 const char *tmpdir;
5188 char filename[PATH_MAX];
5189 int fd, r;
5190
5191 /* create temporary file to map stat to */
5192 tmpdir = getenv("TMPDIR");
5193 if (!tmpdir)
5194 tmpdir = "/tmp";
5195 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5196 fd = mkstemp(filename);
5197 if (fd < 0) {
5198 return fd;
5199 }
5200 unlink(filename);
5201
5202 if ((r = fake_open->fill(cpu_env, fd))) {
5203 close(fd);
5204 return r;
5205 }
5206 lseek(fd, 0, SEEK_SET);
5207
5208 return fd;
5209 }
5210
5211 return get_errno(open(path(pathname), flags, mode));
5212 }
5213
5214 /* do_syscall() should always have a single exit point at the end so
5215 that actions, such as logging of syscall results, can be performed.
5216 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5217 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5218 abi_long arg2, abi_long arg3, abi_long arg4,
5219 abi_long arg5, abi_long arg6, abi_long arg7,
5220 abi_long arg8)
5221 {
5222 CPUState *cpu = ENV_GET_CPU(cpu_env);
5223 abi_long ret;
5224 struct stat st;
5225 struct statfs stfs;
5226 void *p;
5227
5228 #ifdef DEBUG
5229 gemu_log("syscall %d", num);
5230 #endif
5231 if(do_strace)
5232 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5233
5234 switch(num) {
5235 case TARGET_NR_exit:
5236 /* In old applications this may be used to implement _exit(2).
5237 However in threaded applictions it is used for thread termination,
5238 and _exit_group is used for application termination.
5239 Do thread termination if we have more then one thread. */
5240 /* FIXME: This probably breaks if a signal arrives. We should probably
5241 be disabling signals. */
5242 if (CPU_NEXT(first_cpu)) {
5243 TaskState *ts;
5244
5245 cpu_list_lock();
5246 /* Remove the CPU from the list. */
5247 QTAILQ_REMOVE(&cpus, cpu, node);
5248 cpu_list_unlock();
5249 ts = ((CPUArchState *)cpu_env)->opaque;
5250 if (ts->child_tidptr) {
5251 put_user_u32(0, ts->child_tidptr);
5252 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5253 NULL, NULL, 0);
5254 }
5255 thread_cpu = NULL;
5256 object_unref(OBJECT(ENV_GET_CPU(cpu_env)));
5257 g_free(ts);
5258 pthread_exit(NULL);
5259 }
5260 #ifdef TARGET_GPROF
5261 _mcleanup();
5262 #endif
5263 gdb_exit(cpu_env, arg1);
5264 _exit(arg1);
5265 ret = 0; /* avoid warning */
5266 break;
5267 case TARGET_NR_read:
5268 if (arg3 == 0)
5269 ret = 0;
5270 else {
5271 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5272 goto efault;
5273 ret = get_errno(read(arg1, p, arg3));
5274 unlock_user(p, arg2, ret);
5275 }
5276 break;
5277 case TARGET_NR_write:
5278 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5279 goto efault;
5280 ret = get_errno(write(arg1, p, arg3));
5281 unlock_user(p, arg2, 0);
5282 break;
5283 case TARGET_NR_open:
5284 if (!(p = lock_user_string(arg1)))
5285 goto efault;
5286 ret = get_errno(do_open(cpu_env, p,
5287 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5288 arg3));
5289 unlock_user(p, arg1, 0);
5290 break;
5291 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5292 case TARGET_NR_openat:
5293 if (!(p = lock_user_string(arg2)))
5294 goto efault;
5295 ret = get_errno(sys_openat(arg1,
5296 path(p),
5297 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5298 arg4));
5299 unlock_user(p, arg2, 0);
5300 break;
5301 #endif
5302 case TARGET_NR_close:
5303 ret = get_errno(close(arg1));
5304 break;
5305 case TARGET_NR_brk:
5306 ret = do_brk(arg1);
5307 break;
5308 case TARGET_NR_fork:
5309 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5310 break;
5311 #ifdef TARGET_NR_waitpid
5312 case TARGET_NR_waitpid:
5313 {
5314 int status;
5315 ret = get_errno(waitpid(arg1, &status, arg3));
5316 if (!is_error(ret) && arg2 && ret
5317 && put_user_s32(host_to_target_waitstatus(status), arg2))
5318 goto efault;
5319 }
5320 break;
5321 #endif
5322 #ifdef TARGET_NR_waitid
5323 case TARGET_NR_waitid:
5324 {
5325 siginfo_t info;
5326 info.si_pid = 0;
5327 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5328 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5329 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5330 goto efault;
5331 host_to_target_siginfo(p, &info);
5332 unlock_user(p, arg3, sizeof(target_siginfo_t));
5333 }
5334 }
5335 break;
5336 #endif
5337 #ifdef TARGET_NR_creat /* not on alpha */
5338 case TARGET_NR_creat:
5339 if (!(p = lock_user_string(arg1)))
5340 goto efault;
5341 ret = get_errno(creat(p, arg2));
5342 unlock_user(p, arg1, 0);
5343 break;
5344 #endif
5345 case TARGET_NR_link:
5346 {
5347 void * p2;
5348 p = lock_user_string(arg1);
5349 p2 = lock_user_string(arg2);
5350 if (!p || !p2)
5351 ret = -TARGET_EFAULT;
5352 else
5353 ret = get_errno(link(p, p2));
5354 unlock_user(p2, arg2, 0);
5355 unlock_user(p, arg1, 0);
5356 }
5357 break;
5358 #if defined(TARGET_NR_linkat)
5359 case TARGET_NR_linkat:
5360 {
5361 void * p2 = NULL;
5362 if (!arg2 || !arg4)
5363 goto efault;
5364 p = lock_user_string(arg2);
5365 p2 = lock_user_string(arg4);
5366 if (!p || !p2)
5367 ret = -TARGET_EFAULT;
5368 else
5369 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5370 unlock_user(p, arg2, 0);
5371 unlock_user(p2, arg4, 0);
5372 }
5373 break;
5374 #endif
5375 case TARGET_NR_unlink:
5376 if (!(p = lock_user_string(arg1)))
5377 goto efault;
5378 ret = get_errno(unlink(p));
5379 unlock_user(p, arg1, 0);
5380 break;
5381 #if defined(TARGET_NR_unlinkat)
5382 case TARGET_NR_unlinkat:
5383 if (!(p = lock_user_string(arg2)))
5384 goto efault;
5385 ret = get_errno(unlinkat(arg1, p, arg3));
5386 unlock_user(p, arg2, 0);
5387 break;
5388 #endif
5389 case TARGET_NR_execve:
5390 {
5391 char **argp, **envp;
5392 int argc, envc;
5393 abi_ulong gp;
5394 abi_ulong guest_argp;
5395 abi_ulong guest_envp;
5396 abi_ulong addr;
5397 char **q;
5398 int total_size = 0;
5399
5400 argc = 0;
5401 guest_argp = arg2;
5402 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5403 if (get_user_ual(addr, gp))
5404 goto efault;
5405 if (!addr)
5406 break;
5407 argc++;
5408 }
5409 envc = 0;
5410 guest_envp = arg3;
5411 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5412 if (get_user_ual(addr, gp))
5413 goto efault;
5414 if (!addr)
5415 break;
5416 envc++;
5417 }
5418
5419 argp = alloca((argc + 1) * sizeof(void *));
5420 envp = alloca((envc + 1) * sizeof(void *));
5421
5422 for (gp = guest_argp, q = argp; gp;
5423 gp += sizeof(abi_ulong), q++) {
5424 if (get_user_ual(addr, gp))
5425 goto execve_efault;
5426 if (!addr)
5427 break;
5428 if (!(*q = lock_user_string(addr)))
5429 goto execve_efault;
5430 total_size += strlen(*q) + 1;
5431 }
5432 *q = NULL;
5433
5434 for (gp = guest_envp, q = envp; gp;
5435 gp += sizeof(abi_ulong), q++) {
5436 if (get_user_ual(addr, gp))
5437 goto execve_efault;
5438 if (!addr)
5439 break;
5440 if (!(*q = lock_user_string(addr)))
5441 goto execve_efault;
5442 total_size += strlen(*q) + 1;
5443 }
5444 *q = NULL;
5445
5446 /* This case will not be caught by the host's execve() if its
5447 page size is bigger than the target's. */
5448 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5449 ret = -TARGET_E2BIG;
5450 goto execve_end;
5451 }
5452 if (!(p = lock_user_string(arg1)))
5453 goto execve_efault;
5454 ret = get_errno(execve(p, argp, envp));
5455 unlock_user(p, arg1, 0);
5456
5457 goto execve_end;
5458
5459 execve_efault:
5460 ret = -TARGET_EFAULT;
5461
5462 execve_end:
5463 for (gp = guest_argp, q = argp; *q;
5464 gp += sizeof(abi_ulong), q++) {
5465 if (get_user_ual(addr, gp)
5466 || !addr)
5467 break;
5468 unlock_user(*q, addr, 0);
5469 }
5470 for (gp = guest_envp, q = envp; *q;
5471 gp += sizeof(abi_ulong), q++) {
5472 if (get_user_ual(addr, gp)
5473 || !addr)
5474 break;
5475 unlock_user(*q, addr, 0);
5476 }
5477 }
5478 break;
5479 case TARGET_NR_chdir:
5480 if (!(p = lock_user_string(arg1)))
5481 goto efault;
5482 ret = get_errno(chdir(p));
5483 unlock_user(p, arg1, 0);
5484 break;
5485 #ifdef TARGET_NR_time
5486 case TARGET_NR_time:
5487 {
5488 time_t host_time;
5489 ret = get_errno(time(&host_time));
5490 if (!is_error(ret)
5491 && arg1
5492 && put_user_sal(host_time, arg1))
5493 goto efault;
5494 }
5495 break;
5496 #endif
5497 case TARGET_NR_mknod:
5498 if (!(p = lock_user_string(arg1)))
5499 goto efault;
5500 ret = get_errno(mknod(p, arg2, arg3));
5501 unlock_user(p, arg1, 0);
5502 break;
5503 #if defined(TARGET_NR_mknodat)
5504 case TARGET_NR_mknodat:
5505 if (!(p = lock_user_string(arg2)))
5506 goto efault;
5507 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5508 unlock_user(p, arg2, 0);
5509 break;
5510 #endif
5511 case TARGET_NR_chmod:
5512 if (!(p = lock_user_string(arg1)))
5513 goto efault;
5514 ret = get_errno(chmod(p, arg2));
5515 unlock_user(p, arg1, 0);
5516 break;
5517 #ifdef TARGET_NR_break
5518 case TARGET_NR_break:
5519 goto unimplemented;
5520 #endif
5521 #ifdef TARGET_NR_oldstat
5522 case TARGET_NR_oldstat:
5523 goto unimplemented;
5524 #endif
5525 case TARGET_NR_lseek:
5526 ret = get_errno(lseek(arg1, arg2, arg3));
5527 break;
5528 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5529 /* Alpha specific */
5530 case TARGET_NR_getxpid:
5531 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5532 ret = get_errno(getpid());
5533 break;
5534 #endif
5535 #ifdef TARGET_NR_getpid
5536 case TARGET_NR_getpid:
5537 ret = get_errno(getpid());
5538 break;
5539 #endif
5540 case TARGET_NR_mount:
5541 {
5542 /* need to look at the data field */
5543 void *p2, *p3;
5544 p = lock_user_string(arg1);
5545 p2 = lock_user_string(arg2);
5546 p3 = lock_user_string(arg3);
5547 if (!p || !p2 || !p3)
5548 ret = -TARGET_EFAULT;
5549 else {
5550 /* FIXME - arg5 should be locked, but it isn't clear how to
5551 * do that since it's not guaranteed to be a NULL-terminated
5552 * string.
5553 */
5554 if ( ! arg5 )
5555 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5556 else
5557 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5558 }
5559 unlock_user(p, arg1, 0);
5560 unlock_user(p2, arg2, 0);
5561 unlock_user(p3, arg3, 0);
5562 break;
5563 }
5564 #ifdef TARGET_NR_umount
5565 case TARGET_NR_umount:
5566 if (!(p = lock_user_string(arg1)))
5567 goto efault;
5568 ret = get_errno(umount(p));
5569 unlock_user(p, arg1, 0);
5570 break;
5571 #endif
5572 #ifdef TARGET_NR_stime /* not on alpha */
5573 case TARGET_NR_stime:
5574 {
5575 time_t host_time;
5576 if (get_user_sal(host_time, arg1))
5577 goto efault;
5578 ret = get_errno(stime(&host_time));
5579 }
5580 break;
5581 #endif
5582 case TARGET_NR_ptrace:
5583 goto unimplemented;
5584 #ifdef TARGET_NR_alarm /* not on alpha */
5585 case TARGET_NR_alarm:
5586 ret = alarm(arg1);
5587 break;
5588 #endif
5589 #ifdef TARGET_NR_oldfstat
5590 case TARGET_NR_oldfstat:
5591 goto unimplemented;
5592 #endif
5593 #ifdef TARGET_NR_pause /* not on alpha */
5594 case TARGET_NR_pause:
5595 ret = get_errno(pause());
5596 break;
5597 #endif
5598 #ifdef TARGET_NR_utime
5599 case TARGET_NR_utime:
5600 {
5601 struct utimbuf tbuf, *host_tbuf;
5602 struct target_utimbuf *target_tbuf;
5603 if (arg2) {
5604 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5605 goto efault;
5606 tbuf.actime = tswapal(target_tbuf->actime);
5607 tbuf.modtime = tswapal(target_tbuf->modtime);
5608 unlock_user_struct(target_tbuf, arg2, 0);
5609 host_tbuf = &tbuf;
5610 } else {
5611 host_tbuf = NULL;
5612 }
5613 if (!(p = lock_user_string(arg1)))
5614 goto efault;
5615 ret = get_errno(utime(p, host_tbuf));
5616 unlock_user(p, arg1, 0);
5617 }
5618 break;
5619 #endif
5620 case TARGET_NR_utimes:
5621 {
5622 struct timeval *tvp, tv[2];
5623 if (arg2) {
5624 if (copy_from_user_timeval(&tv[0], arg2)
5625 || copy_from_user_timeval(&tv[1],
5626 arg2 + sizeof(struct target_timeval)))
5627 goto efault;
5628 tvp = tv;
5629 } else {
5630 tvp = NULL;
5631 }
5632 if (!(p = lock_user_string(arg1)))
5633 goto efault;
5634 ret = get_errno(utimes(p, tvp));
5635 unlock_user(p, arg1, 0);
5636 }
5637 break;
5638 #if defined(TARGET_NR_futimesat)
5639 case TARGET_NR_futimesat:
5640 {
5641 struct timeval *tvp, tv[2];
5642 if (arg3) {
5643 if (copy_from_user_timeval(&tv[0], arg3)
5644 || copy_from_user_timeval(&tv[1],
5645 arg3 + sizeof(struct target_timeval)))
5646 goto efault;
5647 tvp = tv;
5648 } else {
5649 tvp = NULL;
5650 }
5651 if (!(p = lock_user_string(arg2)))
5652 goto efault;
5653 ret = get_errno(futimesat(arg1, path(p), tvp));
5654 unlock_user(p, arg2, 0);
5655 }
5656 break;
5657 #endif
5658 #ifdef TARGET_NR_stty
5659 case TARGET_NR_stty:
5660 goto unimplemented;
5661 #endif
5662 #ifdef TARGET_NR_gtty
5663 case TARGET_NR_gtty:
5664 goto unimplemented;
5665 #endif
5666 case TARGET_NR_access:
5667 if (!(p = lock_user_string(arg1)))
5668 goto efault;
5669 ret = get_errno(access(path(p), arg2));
5670 unlock_user(p, arg1, 0);
5671 break;
5672 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5673 case TARGET_NR_faccessat:
5674 if (!(p = lock_user_string(arg2)))
5675 goto efault;
5676 ret = get_errno(faccessat(arg1, p, arg3, 0));
5677 unlock_user(p, arg2, 0);
5678 break;
5679 #endif
5680 #ifdef TARGET_NR_nice /* not on alpha */
5681 case TARGET_NR_nice:
5682 ret = get_errno(nice(arg1));
5683 break;
5684 #endif
5685 #ifdef TARGET_NR_ftime
5686 case TARGET_NR_ftime:
5687 goto unimplemented;
5688 #endif
5689 case TARGET_NR_sync:
5690 sync();
5691 ret = 0;
5692 break;
5693 case TARGET_NR_kill:
5694 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5695 break;
5696 case TARGET_NR_rename:
5697 {
5698 void *p2;
5699 p = lock_user_string(arg1);
5700 p2 = lock_user_string(arg2);
5701 if (!p || !p2)
5702 ret = -TARGET_EFAULT;
5703 else
5704 ret = get_errno(rename(p, p2));
5705 unlock_user(p2, arg2, 0);
5706 unlock_user(p, arg1, 0);
5707 }
5708 break;
5709 #if defined(TARGET_NR_renameat)
5710 case TARGET_NR_renameat:
5711 {
5712 void *p2;
5713 p = lock_user_string(arg2);
5714 p2 = lock_user_string(arg4);
5715 if (!p || !p2)
5716 ret = -TARGET_EFAULT;
5717 else
5718 ret = get_errno(renameat(arg1, p, arg3, p2));
5719 unlock_user(p2, arg4, 0);
5720 unlock_user(p, arg2, 0);
5721 }
5722 break;
5723 #endif
5724 case TARGET_NR_mkdir:
5725 if (!(p = lock_user_string(arg1)))
5726 goto efault;
5727 ret = get_errno(mkdir(p, arg2));
5728 unlock_user(p, arg1, 0);
5729 break;
5730 #if defined(TARGET_NR_mkdirat)
5731 case TARGET_NR_mkdirat:
5732 if (!(p = lock_user_string(arg2)))
5733 goto efault;
5734 ret = get_errno(mkdirat(arg1, p, arg3));
5735 unlock_user(p, arg2, 0);
5736 break;
5737 #endif
5738 case TARGET_NR_rmdir:
5739 if (!(p = lock_user_string(arg1)))
5740 goto efault;
5741 ret = get_errno(rmdir(p));
5742 unlock_user(p, arg1, 0);
5743 break;
5744 case TARGET_NR_dup:
5745 ret = get_errno(dup(arg1));
5746 break;
5747 case TARGET_NR_pipe:
5748 ret = do_pipe(cpu_env, arg1, 0, 0);
5749 break;
5750 #ifdef TARGET_NR_pipe2
5751 case TARGET_NR_pipe2:
5752 ret = do_pipe(cpu_env, arg1,
5753 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5754 break;
5755 #endif
5756 case TARGET_NR_times:
5757 {
5758 struct target_tms *tmsp;
5759 struct tms tms;
5760 ret = get_errno(times(&tms));
5761 if (arg1) {
5762 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5763 if (!tmsp)
5764 goto efault;
5765 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5766 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5767 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5768 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5769 }
5770 if (!is_error(ret))
5771 ret = host_to_target_clock_t(ret);
5772 }
5773 break;
5774 #ifdef TARGET_NR_prof
5775 case TARGET_NR_prof:
5776 goto unimplemented;
5777 #endif
5778 #ifdef TARGET_NR_signal
5779 case TARGET_NR_signal:
5780 goto unimplemented;
5781 #endif
5782 case TARGET_NR_acct:
5783 if (arg1 == 0) {
5784 ret = get_errno(acct(NULL));
5785 } else {
5786 if (!(p = lock_user_string(arg1)))
5787 goto efault;
5788 ret = get_errno(acct(path(p)));
5789 unlock_user(p, arg1, 0);
5790 }
5791 break;
5792 #ifdef TARGET_NR_umount2
5793 case TARGET_NR_umount2:
5794 if (!(p = lock_user_string(arg1)))
5795 goto efault;
5796 ret = get_errno(umount2(p, arg2));
5797 unlock_user(p, arg1, 0);
5798 break;
5799 #endif
5800 #ifdef TARGET_NR_lock
5801 case TARGET_NR_lock:
5802 goto unimplemented;
5803 #endif
5804 case TARGET_NR_ioctl:
5805 ret = do_ioctl(arg1, arg2, arg3);
5806 break;
5807 case TARGET_NR_fcntl:
5808 ret = do_fcntl(arg1, arg2, arg3);
5809 break;
5810 #ifdef TARGET_NR_mpx
5811 case TARGET_NR_mpx:
5812 goto unimplemented;
5813 #endif
5814 case TARGET_NR_setpgid:
5815 ret = get_errno(setpgid(arg1, arg2));
5816 break;
5817 #ifdef TARGET_NR_ulimit
5818 case TARGET_NR_ulimit:
5819 goto unimplemented;
5820 #endif
5821 #ifdef TARGET_NR_oldolduname
5822 case TARGET_NR_oldolduname:
5823 goto unimplemented;
5824 #endif
5825 case TARGET_NR_umask:
5826 ret = get_errno(umask(arg1));
5827 break;
5828 case TARGET_NR_chroot:
5829 if (!(p = lock_user_string(arg1)))
5830 goto efault;
5831 ret = get_errno(chroot(p));
5832 unlock_user(p, arg1, 0);
5833 break;
5834 case TARGET_NR_ustat:
5835 goto unimplemented;
5836 case TARGET_NR_dup2:
5837 ret = get_errno(dup2(arg1, arg2));
5838 break;
5839 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5840 case TARGET_NR_dup3:
5841 ret = get_errno(dup3(arg1, arg2, arg3));
5842 break;
5843 #endif
5844 #ifdef TARGET_NR_getppid /* not on alpha */
5845 case TARGET_NR_getppid:
5846 ret = get_errno(getppid());
5847 break;
5848 #endif
5849 case TARGET_NR_getpgrp:
5850 ret = get_errno(getpgrp());
5851 break;
5852 case TARGET_NR_setsid:
5853 ret = get_errno(setsid());
5854 break;
5855 #ifdef TARGET_NR_sigaction
5856 case TARGET_NR_sigaction:
5857 {
5858 #if defined(TARGET_ALPHA)
5859 struct target_sigaction act, oact, *pact = 0;
5860 struct target_old_sigaction *old_act;
5861 if (arg2) {
5862 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5863 goto efault;
5864 act._sa_handler = old_act->_sa_handler;
5865 target_siginitset(&act.sa_mask, old_act->sa_mask);
5866 act.sa_flags = old_act->sa_flags;
5867 act.sa_restorer = 0;
5868 unlock_user_struct(old_act, arg2, 0);
5869 pact = &act;
5870 }
5871 ret = get_errno(do_sigaction(arg1, pact, &oact));
5872 if (!is_error(ret) && arg3) {
5873 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5874 goto efault;
5875 old_act->_sa_handler = oact._sa_handler;
5876 old_act->sa_mask = oact.sa_mask.sig[0];
5877 old_act->sa_flags = oact.sa_flags;
5878 unlock_user_struct(old_act, arg3, 1);
5879 }
5880 #elif defined(TARGET_MIPS)
5881 struct target_sigaction act, oact, *pact, *old_act;
5882
5883 if (arg2) {
5884 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5885 goto efault;
5886 act._sa_handler = old_act->_sa_handler;
5887 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5888 act.sa_flags = old_act->sa_flags;
5889 unlock_user_struct(old_act, arg2, 0);
5890 pact = &act;
5891 } else {
5892 pact = NULL;
5893 }
5894
5895 ret = get_errno(do_sigaction(arg1, pact, &oact));
5896
5897 if (!is_error(ret) && arg3) {
5898 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5899 goto efault;
5900 old_act->_sa_handler = oact._sa_handler;
5901 old_act->sa_flags = oact.sa_flags;
5902 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5903 old_act->sa_mask.sig[1] = 0;
5904 old_act->sa_mask.sig[2] = 0;
5905 old_act->sa_mask.sig[3] = 0;
5906 unlock_user_struct(old_act, arg3, 1);
5907 }
5908 #else
5909 struct target_old_sigaction *old_act;
5910 struct target_sigaction act, oact, *pact;
5911 if (arg2) {
5912 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5913 goto efault;
5914 act._sa_handler = old_act->_sa_handler;
5915 target_siginitset(&act.sa_mask, old_act->sa_mask);
5916 act.sa_flags = old_act->sa_flags;
5917 act.sa_restorer = old_act->sa_restorer;
5918 unlock_user_struct(old_act, arg2, 0);
5919 pact = &act;
5920 } else {
5921 pact = NULL;
5922 }
5923 ret = get_errno(do_sigaction(arg1, pact, &oact));
5924 if (!is_error(ret) && arg3) {
5925 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5926 goto efault;
5927 old_act->_sa_handler = oact._sa_handler;
5928 old_act->sa_mask = oact.sa_mask.sig[0];
5929 old_act->sa_flags = oact.sa_flags;
5930 old_act->sa_restorer = oact.sa_restorer;
5931 unlock_user_struct(old_act, arg3, 1);
5932 }
5933 #endif
5934 }
5935 break;
5936 #endif
5937 case TARGET_NR_rt_sigaction:
5938 {
5939 #if defined(TARGET_ALPHA)
5940 struct target_sigaction act, oact, *pact = 0;
5941 struct target_rt_sigaction *rt_act;
5942 /* ??? arg4 == sizeof(sigset_t). */
5943 if (arg2) {
5944 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5945 goto efault;
5946 act._sa_handler = rt_act->_sa_handler;
5947 act.sa_mask = rt_act->sa_mask;
5948 act.sa_flags = rt_act->sa_flags;
5949 act.sa_restorer = arg5;
5950 unlock_user_struct(rt_act, arg2, 0);
5951 pact = &act;
5952 }
5953 ret = get_errno(do_sigaction(arg1, pact, &oact));
5954 if (!is_error(ret) && arg3) {
5955 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5956 goto efault;
5957 rt_act->_sa_handler = oact._sa_handler;
5958 rt_act->sa_mask = oact.sa_mask;
5959 rt_act->sa_flags = oact.sa_flags;
5960 unlock_user_struct(rt_act, arg3, 1);
5961 }
5962 #else
5963 struct target_sigaction *act;
5964 struct target_sigaction *oact;
5965
5966 if (arg2) {
5967 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5968 goto efault;
5969 } else
5970 act = NULL;
5971 if (arg3) {
5972 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5973 ret = -TARGET_EFAULT;
5974 goto rt_sigaction_fail;
5975 }
5976 } else
5977 oact = NULL;
5978 ret = get_errno(do_sigaction(arg1, act, oact));
5979 rt_sigaction_fail:
5980 if (act)
5981 unlock_user_struct(act, arg2, 0);
5982 if (oact)
5983 unlock_user_struct(oact, arg3, 1);
5984 #endif
5985 }
5986 break;
5987 #ifdef TARGET_NR_sgetmask /* not on alpha */
5988 case TARGET_NR_sgetmask:
5989 {
5990 sigset_t cur_set;
5991 abi_ulong target_set;
5992 sigprocmask(0, NULL, &cur_set);
5993 host_to_target_old_sigset(&target_set, &cur_set);
5994 ret = target_set;
5995 }
5996 break;
5997 #endif
5998 #ifdef TARGET_NR_ssetmask /* not on alpha */
5999 case TARGET_NR_ssetmask:
6000 {
6001 sigset_t set, oset, cur_set;
6002 abi_ulong target_set = arg1;
6003 sigprocmask(0, NULL, &cur_set);
6004 target_to_host_old_sigset(&set, &target_set);
6005 sigorset(&set, &set, &cur_set);
6006 sigprocmask(SIG_SETMASK, &set, &oset);
6007 host_to_target_old_sigset(&target_set, &oset);
6008 ret = target_set;
6009 }
6010 break;
6011 #endif
6012 #ifdef TARGET_NR_sigprocmask
6013 case TARGET_NR_sigprocmask:
6014 {
6015 #if defined(TARGET_ALPHA)
6016 sigset_t set, oldset;
6017 abi_ulong mask;
6018 int how;
6019
6020 switch (arg1) {
6021 case TARGET_SIG_BLOCK:
6022 how = SIG_BLOCK;
6023 break;
6024 case TARGET_SIG_UNBLOCK:
6025 how = SIG_UNBLOCK;
6026 break;
6027 case TARGET_SIG_SETMASK:
6028 how = SIG_SETMASK;
6029 break;
6030 default:
6031 ret = -TARGET_EINVAL;
6032 goto fail;
6033 }
6034 mask = arg2;
6035 target_to_host_old_sigset(&set, &mask);
6036
6037 ret = get_errno(sigprocmask(how, &set, &oldset));
6038 if (!is_error(ret)) {
6039 host_to_target_old_sigset(&mask, &oldset);
6040 ret = mask;
6041 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6042 }
6043 #else
6044 sigset_t set, oldset, *set_ptr;
6045 int how;
6046
6047 if (arg2) {
6048 switch (arg1) {
6049 case TARGET_SIG_BLOCK:
6050 how = SIG_BLOCK;
6051 break;
6052 case TARGET_SIG_UNBLOCK:
6053 how = SIG_UNBLOCK;
6054 break;
6055 case TARGET_SIG_SETMASK:
6056 how = SIG_SETMASK;
6057 break;
6058 default:
6059 ret = -TARGET_EINVAL;
6060 goto fail;
6061 }
6062 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6063 goto efault;
6064 target_to_host_old_sigset(&set, p);
6065 unlock_user(p, arg2, 0);
6066 set_ptr = &set;
6067 } else {
6068 how = 0;
6069 set_ptr = NULL;
6070 }
6071 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6072 if (!is_error(ret) && arg3) {
6073 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6074 goto efault;
6075 host_to_target_old_sigset(p, &oldset);
6076 unlock_user(p, arg3, sizeof(target_sigset_t));
6077 }
6078 #endif
6079 }
6080 break;
6081 #endif
6082 case TARGET_NR_rt_sigprocmask:
6083 {
6084 int how = arg1;
6085 sigset_t set, oldset, *set_ptr;
6086
6087 if (arg2) {
6088 switch(how) {
6089 case TARGET_SIG_BLOCK:
6090 how = SIG_BLOCK;
6091 break;
6092 case TARGET_SIG_UNBLOCK:
6093 how = SIG_UNBLOCK;
6094 break;
6095 case TARGET_SIG_SETMASK:
6096 how = SIG_SETMASK;
6097 break;
6098 default:
6099 ret = -TARGET_EINVAL;
6100 goto fail;
6101 }
6102 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6103 goto efault;
6104 target_to_host_sigset(&set, p);
6105 unlock_user(p, arg2, 0);
6106 set_ptr = &set;
6107 } else {
6108 how = 0;
6109 set_ptr = NULL;
6110 }
6111 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6112 if (!is_error(ret) && arg3) {
6113 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6114 goto efault;
6115 host_to_target_sigset(p, &oldset);
6116 unlock_user(p, arg3, sizeof(target_sigset_t));
6117 }
6118 }
6119 break;
6120 #ifdef TARGET_NR_sigpending
6121 case TARGET_NR_sigpending:
6122 {
6123 sigset_t set;
6124 ret = get_errno(sigpending(&set));
6125 if (!is_error(ret)) {
6126 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6127 goto efault;
6128 host_to_target_old_sigset(p, &set);
6129 unlock_user(p, arg1, sizeof(target_sigset_t));
6130 }
6131 }
6132 break;
6133 #endif
6134 case TARGET_NR_rt_sigpending:
6135 {
6136 sigset_t set;
6137 ret = get_errno(sigpending(&set));
6138 if (!is_error(ret)) {
6139 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6140 goto efault;
6141 host_to_target_sigset(p, &set);
6142 unlock_user(p, arg1, sizeof(target_sigset_t));
6143 }
6144 }
6145 break;
6146 #ifdef TARGET_NR_sigsuspend
6147 case TARGET_NR_sigsuspend:
6148 {
6149 sigset_t set;
6150 #if defined(TARGET_ALPHA)
6151 abi_ulong mask = arg1;
6152 target_to_host_old_sigset(&set, &mask);
6153 #else
6154 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6155 goto efault;
6156 target_to_host_old_sigset(&set, p);
6157 unlock_user(p, arg1, 0);
6158 #endif
6159 ret = get_errno(sigsuspend(&set));
6160 }
6161 break;
6162 #endif
6163 case TARGET_NR_rt_sigsuspend:
6164 {
6165 sigset_t set;
6166 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6167 goto efault;
6168 target_to_host_sigset(&set, p);
6169 unlock_user(p, arg1, 0);
6170 ret = get_errno(sigsuspend(&set));
6171 }
6172 break;
6173 case TARGET_NR_rt_sigtimedwait:
6174 {
6175 sigset_t set;
6176 struct timespec uts, *puts;
6177 siginfo_t uinfo;
6178
6179 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6180 goto efault;
6181 target_to_host_sigset(&set, p);
6182 unlock_user(p, arg1, 0);
6183 if (arg3) {
6184 puts = &uts;
6185 target_to_host_timespec(puts, arg3);
6186 } else {
6187 puts = NULL;
6188 }
6189 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6190 if (!is_error(ret) && arg2) {
6191 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6192 goto efault;
6193 host_to_target_siginfo(p, &uinfo);
6194 unlock_user(p, arg2, sizeof(target_siginfo_t));
6195 }
6196 }
6197 break;
6198 case TARGET_NR_rt_sigqueueinfo:
6199 {
6200 siginfo_t uinfo;
6201 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6202 goto efault;
6203 target_to_host_siginfo(&uinfo, p);
6204 unlock_user(p, arg1, 0);
6205 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6206 }
6207 break;
6208 #ifdef TARGET_NR_sigreturn
6209 case TARGET_NR_sigreturn:
6210 /* NOTE: ret is eax, so not transcoding must be done */
6211 ret = do_sigreturn(cpu_env);
6212 break;
6213 #endif
6214 case TARGET_NR_rt_sigreturn:
6215 /* NOTE: ret is eax, so not transcoding must be done */
6216 ret = do_rt_sigreturn(cpu_env);
6217 break;
6218 case TARGET_NR_sethostname:
6219 if (!(p = lock_user_string(arg1)))
6220 goto efault;
6221 ret = get_errno(sethostname(p, arg2));
6222 unlock_user(p, arg1, 0);
6223 break;
6224 case TARGET_NR_setrlimit:
6225 {
6226 int resource = target_to_host_resource(arg1);
6227 struct target_rlimit *target_rlim;
6228 struct rlimit rlim;
6229 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6230 goto efault;
6231 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6232 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6233 unlock_user_struct(target_rlim, arg2, 0);
6234 ret = get_errno(setrlimit(resource, &rlim));
6235 }
6236 break;
6237 case TARGET_NR_getrlimit:
6238 {
6239 int resource = target_to_host_resource(arg1);
6240 struct target_rlimit *target_rlim;
6241 struct rlimit rlim;
6242
6243 ret = get_errno(getrlimit(resource, &rlim));
6244 if (!is_error(ret)) {
6245 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6246 goto efault;
6247 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6248 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6249 unlock_user_struct(target_rlim, arg2, 1);
6250 }
6251 }
6252 break;
6253 case TARGET_NR_getrusage:
6254 {
6255 struct rusage rusage;
6256 ret = get_errno(getrusage(arg1, &rusage));
6257 if (!is_error(ret)) {
6258 host_to_target_rusage(arg2, &rusage);
6259 }
6260 }
6261 break;
6262 case TARGET_NR_gettimeofday:
6263 {
6264 struct timeval tv;
6265 ret = get_errno(gettimeofday(&tv, NULL));
6266 if (!is_error(ret)) {
6267 if (copy_to_user_timeval(arg1, &tv))
6268 goto efault;
6269 }
6270 }
6271 break;
6272 case TARGET_NR_settimeofday:
6273 {
6274 struct timeval tv;
6275 if (copy_from_user_timeval(&tv, arg1))
6276 goto efault;
6277 ret = get_errno(settimeofday(&tv, NULL));
6278 }
6279 break;
6280 #if defined(TARGET_NR_select)
6281 case TARGET_NR_select:
6282 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6283 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6284 #else
6285 {
6286 struct target_sel_arg_struct *sel;
6287 abi_ulong inp, outp, exp, tvp;
6288 long nsel;
6289
6290 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6291 goto efault;
6292 nsel = tswapal(sel->n);
6293 inp = tswapal(sel->inp);
6294 outp = tswapal(sel->outp);
6295 exp = tswapal(sel->exp);
6296 tvp = tswapal(sel->tvp);
6297 unlock_user_struct(sel, arg1, 0);
6298 ret = do_select(nsel, inp, outp, exp, tvp);
6299 }
6300 #endif
6301 break;
6302 #endif
6303 #ifdef TARGET_NR_pselect6
6304 case TARGET_NR_pselect6:
6305 {
6306 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6307 fd_set rfds, wfds, efds;
6308 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6309 struct timespec ts, *ts_ptr;
6310
6311 /*
6312 * The 6th arg is actually two args smashed together,
6313 * so we cannot use the C library.
6314 */
6315 sigset_t set;
6316 struct {
6317 sigset_t *set;
6318 size_t size;
6319 } sig, *sig_ptr;
6320
6321 abi_ulong arg_sigset, arg_sigsize, *arg7;
6322 target_sigset_t *target_sigset;
6323
6324 n = arg1;
6325 rfd_addr = arg2;
6326 wfd_addr = arg3;
6327 efd_addr = arg4;
6328 ts_addr = arg5;
6329
6330 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6331 if (ret) {
6332 goto fail;
6333 }
6334 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6335 if (ret) {
6336 goto fail;
6337 }
6338 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6339 if (ret) {
6340 goto fail;
6341 }
6342
6343 /*
6344 * This takes a timespec, and not a timeval, so we cannot
6345 * use the do_select() helper ...
6346 */
6347 if (ts_addr) {
6348 if (target_to_host_timespec(&ts, ts_addr)) {
6349 goto efault;
6350 }
6351 ts_ptr = &ts;
6352 } else {
6353 ts_ptr = NULL;
6354 }
6355
6356 /* Extract the two packed args for the sigset */
6357 if (arg6) {
6358 sig_ptr = &sig;
6359 sig.size = _NSIG / 8;
6360
6361 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6362 if (!arg7) {
6363 goto efault;
6364 }
6365 arg_sigset = tswapal(arg7[0]);
6366 arg_sigsize = tswapal(arg7[1]);
6367 unlock_user(arg7, arg6, 0);
6368
6369 if (arg_sigset) {
6370 sig.set = &set;
6371 if (arg_sigsize != sizeof(*target_sigset)) {
6372 /* Like the kernel, we enforce correct size sigsets */
6373 ret = -TARGET_EINVAL;
6374 goto fail;
6375 }
6376 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6377 sizeof(*target_sigset), 1);
6378 if (!target_sigset) {
6379 goto efault;
6380 }
6381 target_to_host_sigset(&set, target_sigset);
6382 unlock_user(target_sigset, arg_sigset, 0);
6383 } else {
6384 sig.set = NULL;
6385 }
6386 } else {
6387 sig_ptr = NULL;
6388 }
6389
6390 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6391 ts_ptr, sig_ptr));
6392
6393 if (!is_error(ret)) {
6394 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6395 goto efault;
6396 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6397 goto efault;
6398 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6399 goto efault;
6400
6401 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6402 goto efault;
6403 }
6404 }
6405 break;
6406 #endif
6407 case TARGET_NR_symlink:
6408 {
6409 void *p2;
6410 p = lock_user_string(arg1);
6411 p2 = lock_user_string(arg2);
6412 if (!p || !p2)
6413 ret = -TARGET_EFAULT;
6414 else
6415 ret = get_errno(symlink(p, p2));
6416 unlock_user(p2, arg2, 0);
6417 unlock_user(p, arg1, 0);
6418 }
6419 break;
6420 #if defined(TARGET_NR_symlinkat)
6421 case TARGET_NR_symlinkat:
6422 {
6423 void *p2;
6424 p = lock_user_string(arg1);
6425 p2 = lock_user_string(arg3);
6426 if (!p || !p2)
6427 ret = -TARGET_EFAULT;
6428 else
6429 ret = get_errno(symlinkat(p, arg2, p2));
6430 unlock_user(p2, arg3, 0);
6431 unlock_user(p, arg1, 0);
6432 }
6433 break;
6434 #endif
6435 #ifdef TARGET_NR_oldlstat
6436 case TARGET_NR_oldlstat:
6437 goto unimplemented;
6438 #endif
6439 case TARGET_NR_readlink:
6440 {
6441 void *p2;
6442 p = lock_user_string(arg1);
6443 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6444 if (!p || !p2) {
6445 ret = -TARGET_EFAULT;
6446 } else if (is_proc_myself((const char *)p, "exe")) {
6447 char real[PATH_MAX], *temp;
6448 temp = realpath(exec_path, real);
6449 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6450 snprintf((char *)p2, arg3, "%s", real);
6451 } else {
6452 ret = get_errno(readlink(path(p), p2, arg3));
6453 }
6454 unlock_user(p2, arg2, ret);
6455 unlock_user(p, arg1, 0);
6456 }
6457 break;
6458 #if defined(TARGET_NR_readlinkat)
6459 case TARGET_NR_readlinkat:
6460 {
6461 void *p2;
6462 p = lock_user_string(arg2);
6463 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6464 if (!p || !p2) {
6465 ret = -TARGET_EFAULT;
6466 } else if (is_proc_myself((const char *)p, "exe")) {
6467 char real[PATH_MAX], *temp;
6468 temp = realpath(exec_path, real);
6469 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6470 snprintf((char *)p2, arg4, "%s", real);
6471 } else {
6472 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6473 }
6474 unlock_user(p2, arg3, ret);
6475 unlock_user(p, arg2, 0);
6476 }
6477 break;
6478 #endif
6479 #ifdef TARGET_NR_uselib
6480 case TARGET_NR_uselib:
6481 goto unimplemented;
6482 #endif
6483 #ifdef TARGET_NR_swapon
6484 case TARGET_NR_swapon:
6485 if (!(p = lock_user_string(arg1)))
6486 goto efault;
6487 ret = get_errno(swapon(p, arg2));
6488 unlock_user(p, arg1, 0);
6489 break;
6490 #endif
6491 case TARGET_NR_reboot:
6492 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6493 /* arg4 must be ignored in all other cases */
6494 p = lock_user_string(arg4);
6495 if (!p) {
6496 goto efault;
6497 }
6498 ret = get_errno(reboot(arg1, arg2, arg3, p));
6499 unlock_user(p, arg4, 0);
6500 } else {
6501 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6502 }
6503 break;
6504 #ifdef TARGET_NR_readdir
6505 case TARGET_NR_readdir:
6506 goto unimplemented;
6507 #endif
6508 #ifdef TARGET_NR_mmap
6509 case TARGET_NR_mmap:
6510 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6511 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6512 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6513 || defined(TARGET_S390X)
6514 {
6515 abi_ulong *v;
6516 abi_ulong v1, v2, v3, v4, v5, v6;
6517 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6518 goto efault;
6519 v1 = tswapal(v[0]);
6520 v2 = tswapal(v[1]);
6521 v3 = tswapal(v[2]);
6522 v4 = tswapal(v[3]);
6523 v5 = tswapal(v[4]);
6524 v6 = tswapal(v[5]);
6525 unlock_user(v, arg1, 0);
6526 ret = get_errno(target_mmap(v1, v2, v3,
6527 target_to_host_bitmask(v4, mmap_flags_tbl),
6528 v5, v6));
6529 }
6530 #else
6531 ret = get_errno(target_mmap(arg1, arg2, arg3,
6532 target_to_host_bitmask(arg4, mmap_flags_tbl),
6533 arg5,
6534 arg6));
6535 #endif
6536 break;
6537 #endif
6538 #ifdef TARGET_NR_mmap2
6539 case TARGET_NR_mmap2:
6540 #ifndef MMAP_SHIFT
6541 #define MMAP_SHIFT 12
6542 #endif
6543 ret = get_errno(target_mmap(arg1, arg2, arg3,
6544 target_to_host_bitmask(arg4, mmap_flags_tbl),
6545 arg5,
6546 arg6 << MMAP_SHIFT));
6547 break;
6548 #endif
6549 case TARGET_NR_munmap:
6550 ret = get_errno(target_munmap(arg1, arg2));
6551 break;
6552 case TARGET_NR_mprotect:
6553 {
6554 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6555 /* Special hack to detect libc making the stack executable. */
6556 if ((arg3 & PROT_GROWSDOWN)
6557 && arg1 >= ts->info->stack_limit
6558 && arg1 <= ts->info->start_stack) {
6559 arg3 &= ~PROT_GROWSDOWN;
6560 arg2 = arg2 + arg1 - ts->info->stack_limit;
6561 arg1 = ts->info->stack_limit;
6562 }
6563 }
6564 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6565 break;
6566 #ifdef TARGET_NR_mremap
6567 case TARGET_NR_mremap:
6568 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6569 break;
6570 #endif
6571 /* ??? msync/mlock/munlock are broken for softmmu. */
6572 #ifdef TARGET_NR_msync
6573 case TARGET_NR_msync:
6574 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6575 break;
6576 #endif
6577 #ifdef TARGET_NR_mlock
6578 case TARGET_NR_mlock:
6579 ret = get_errno(mlock(g2h(arg1), arg2));
6580 break;
6581 #endif
6582 #ifdef TARGET_NR_munlock
6583 case TARGET_NR_munlock:
6584 ret = get_errno(munlock(g2h(arg1), arg2));
6585 break;
6586 #endif
6587 #ifdef TARGET_NR_mlockall
6588 case TARGET_NR_mlockall:
6589 ret = get_errno(mlockall(arg1));
6590 break;
6591 #endif
6592 #ifdef TARGET_NR_munlockall
6593 case TARGET_NR_munlockall:
6594 ret = get_errno(munlockall());
6595 break;
6596 #endif
6597 case TARGET_NR_truncate:
6598 if (!(p = lock_user_string(arg1)))
6599 goto efault;
6600 ret = get_errno(truncate(p, arg2));
6601 unlock_user(p, arg1, 0);
6602 break;
6603 case TARGET_NR_ftruncate:
6604 ret = get_errno(ftruncate(arg1, arg2));
6605 break;
6606 case TARGET_NR_fchmod:
6607 ret = get_errno(fchmod(arg1, arg2));
6608 break;
6609 #if defined(TARGET_NR_fchmodat)
6610 case TARGET_NR_fchmodat:
6611 if (!(p = lock_user_string(arg2)))
6612 goto efault;
6613 ret = get_errno(fchmodat(arg1, p, arg3, 0));
6614 unlock_user(p, arg2, 0);
6615 break;
6616 #endif
6617 case TARGET_NR_getpriority:
6618 /* Note that negative values are valid for getpriority, so we must
6619 differentiate based on errno settings. */
6620 errno = 0;
6621 ret = getpriority(arg1, arg2);
6622 if (ret == -1 && errno != 0) {
6623 ret = -host_to_target_errno(errno);
6624 break;
6625 }
6626 #ifdef TARGET_ALPHA
6627 /* Return value is the unbiased priority. Signal no error. */
6628 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6629 #else
6630 /* Return value is a biased priority to avoid negative numbers. */
6631 ret = 20 - ret;
6632 #endif
6633 break;
6634 case TARGET_NR_setpriority:
6635 ret = get_errno(setpriority(arg1, arg2, arg3));
6636 break;
6637 #ifdef TARGET_NR_profil
6638 case TARGET_NR_profil:
6639 goto unimplemented;
6640 #endif
6641 case TARGET_NR_statfs:
6642 if (!(p = lock_user_string(arg1)))
6643 goto efault;
6644 ret = get_errno(statfs(path(p), &stfs));
6645 unlock_user(p, arg1, 0);
6646 convert_statfs:
6647 if (!is_error(ret)) {
6648 struct target_statfs *target_stfs;
6649
6650 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6651 goto efault;
6652 __put_user(stfs.f_type, &target_stfs->f_type);
6653 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6654 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6655 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6656 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6657 __put_user(stfs.f_files, &target_stfs->f_files);
6658 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6659 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6660 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6661 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6662 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6663 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6664 unlock_user_struct(target_stfs, arg2, 1);
6665 }
6666 break;
6667 case TARGET_NR_fstatfs:
6668 ret = get_errno(fstatfs(arg1, &stfs));
6669 goto convert_statfs;
6670 #ifdef TARGET_NR_statfs64
6671 case TARGET_NR_statfs64:
6672 if (!(p = lock_user_string(arg1)))
6673 goto efault;
6674 ret = get_errno(statfs(path(p), &stfs));
6675 unlock_user(p, arg1, 0);
6676 convert_statfs64:
6677 if (!is_error(ret)) {
6678 struct target_statfs64 *target_stfs;
6679
6680 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6681 goto efault;
6682 __put_user(stfs.f_type, &target_stfs->f_type);
6683 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6684 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6685 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6686 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6687 __put_user(stfs.f_files, &target_stfs->f_files);
6688 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6689 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6690 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6691 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6692 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6693 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6694 unlock_user_struct(target_stfs, arg3, 1);
6695 }
6696 break;
6697 case TARGET_NR_fstatfs64:
6698 ret = get_errno(fstatfs(arg1, &stfs));
6699 goto convert_statfs64;
6700 #endif
6701 #ifdef TARGET_NR_ioperm
6702 case TARGET_NR_ioperm:
6703 goto unimplemented;
6704 #endif
6705 #ifdef TARGET_NR_socketcall
6706 case TARGET_NR_socketcall:
6707 ret = do_socketcall(arg1, arg2);
6708 break;
6709 #endif
6710 #ifdef TARGET_NR_accept
6711 case TARGET_NR_accept:
6712 ret = do_accept4(arg1, arg2, arg3, 0);
6713 break;
6714 #endif
6715 #ifdef TARGET_NR_accept4
6716 case TARGET_NR_accept4:
6717 #ifdef CONFIG_ACCEPT4
6718 ret = do_accept4(arg1, arg2, arg3, arg4);
6719 #else
6720 goto unimplemented;
6721 #endif
6722 break;
6723 #endif
6724 #ifdef TARGET_NR_bind
6725 case TARGET_NR_bind:
6726 ret = do_bind(arg1, arg2, arg3);
6727 break;
6728 #endif
6729 #ifdef TARGET_NR_connect
6730 case TARGET_NR_connect:
6731 ret = do_connect(arg1, arg2, arg3);
6732 break;
6733 #endif
6734 #ifdef TARGET_NR_getpeername
6735 case TARGET_NR_getpeername:
6736 ret = do_getpeername(arg1, arg2, arg3);
6737 break;
6738 #endif
6739 #ifdef TARGET_NR_getsockname
6740 case TARGET_NR_getsockname:
6741 ret = do_getsockname(arg1, arg2, arg3);
6742 break;
6743 #endif
6744 #ifdef TARGET_NR_getsockopt
6745 case TARGET_NR_getsockopt:
6746 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6747 break;
6748 #endif
6749 #ifdef TARGET_NR_listen
6750 case TARGET_NR_listen:
6751 ret = get_errno(listen(arg1, arg2));
6752 break;
6753 #endif
6754 #ifdef TARGET_NR_recv
6755 case TARGET_NR_recv:
6756 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6757 break;
6758 #endif
6759 #ifdef TARGET_NR_recvfrom
6760 case TARGET_NR_recvfrom:
6761 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6762 break;
6763 #endif
6764 #ifdef TARGET_NR_recvmsg
6765 case TARGET_NR_recvmsg:
6766 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6767 break;
6768 #endif
6769 #ifdef TARGET_NR_send
6770 case TARGET_NR_send:
6771 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6772 break;
6773 #endif
6774 #ifdef TARGET_NR_sendmsg
6775 case TARGET_NR_sendmsg:
6776 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6777 break;
6778 #endif
6779 #ifdef TARGET_NR_sendto
6780 case TARGET_NR_sendto:
6781 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6782 break;
6783 #endif
6784 #ifdef TARGET_NR_shutdown
6785 case TARGET_NR_shutdown:
6786 ret = get_errno(shutdown(arg1, arg2));
6787 break;
6788 #endif
6789 #ifdef TARGET_NR_socket
6790 case TARGET_NR_socket:
6791 ret = do_socket(arg1, arg2, arg3);
6792 break;
6793 #endif
6794 #ifdef TARGET_NR_socketpair
6795 case TARGET_NR_socketpair:
6796 ret = do_socketpair(arg1, arg2, arg3, arg4);
6797 break;
6798 #endif
6799 #ifdef TARGET_NR_setsockopt
6800 case TARGET_NR_setsockopt:
6801 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6802 break;
6803 #endif
6804
6805 case TARGET_NR_syslog:
6806 if (!(p = lock_user_string(arg2)))
6807 goto efault;
6808 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6809 unlock_user(p, arg2, 0);
6810 break;
6811
6812 case TARGET_NR_setitimer:
6813 {
6814 struct itimerval value, ovalue, *pvalue;
6815
6816 if (arg2) {
6817 pvalue = &value;
6818 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6819 || copy_from_user_timeval(&pvalue->it_value,
6820 arg2 + sizeof(struct target_timeval)))
6821 goto efault;
6822 } else {
6823 pvalue = NULL;
6824 }
6825 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6826 if (!is_error(ret) && arg3) {
6827 if (copy_to_user_timeval(arg3,
6828 &ovalue.it_interval)
6829 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6830 &ovalue.it_value))
6831 goto efault;
6832 }
6833 }
6834 break;
6835 case TARGET_NR_getitimer:
6836 {
6837 struct itimerval value;
6838
6839 ret = get_errno(getitimer(arg1, &value));
6840 if (!is_error(ret) && arg2) {
6841 if (copy_to_user_timeval(arg2,
6842 &value.it_interval)
6843 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6844 &value.it_value))
6845 goto efault;
6846 }
6847 }
6848 break;
6849 case TARGET_NR_stat:
6850 if (!(p = lock_user_string(arg1)))
6851 goto efault;
6852 ret = get_errno(stat(path(p), &st));
6853 unlock_user(p, arg1, 0);
6854 goto do_stat;
6855 case TARGET_NR_lstat:
6856 if (!(p = lock_user_string(arg1)))
6857 goto efault;
6858 ret = get_errno(lstat(path(p), &st));
6859 unlock_user(p, arg1, 0);
6860 goto do_stat;
6861 case TARGET_NR_fstat:
6862 {
6863 ret = get_errno(fstat(arg1, &st));
6864 do_stat:
6865 if (!is_error(ret)) {
6866 struct target_stat *target_st;
6867
6868 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6869 goto efault;
6870 memset(target_st, 0, sizeof(*target_st));
6871 __put_user(st.st_dev, &target_st->st_dev);
6872 __put_user(st.st_ino, &target_st->st_ino);
6873 __put_user(st.st_mode, &target_st->st_mode);
6874 __put_user(st.st_uid, &target_st->st_uid);
6875 __put_user(st.st_gid, &target_st->st_gid);
6876 __put_user(st.st_nlink, &target_st->st_nlink);
6877 __put_user(st.st_rdev, &target_st->st_rdev);
6878 __put_user(st.st_size, &target_st->st_size);
6879 __put_user(st.st_blksize, &target_st->st_blksize);
6880 __put_user(st.st_blocks, &target_st->st_blocks);
6881 __put_user(st.st_atime, &target_st->target_st_atime);
6882 __put_user(st.st_mtime, &target_st->target_st_mtime);
6883 __put_user(st.st_ctime, &target_st->target_st_ctime);
6884 unlock_user_struct(target_st, arg2, 1);
6885 }
6886 }
6887 break;
6888 #ifdef TARGET_NR_olduname
6889 case TARGET_NR_olduname:
6890 goto unimplemented;
6891 #endif
6892 #ifdef TARGET_NR_iopl
6893 case TARGET_NR_iopl:
6894 goto unimplemented;
6895 #endif
6896 case TARGET_NR_vhangup:
6897 ret = get_errno(vhangup());
6898 break;
6899 #ifdef TARGET_NR_idle
6900 case TARGET_NR_idle:
6901 goto unimplemented;
6902 #endif
6903 #ifdef TARGET_NR_syscall
6904 case TARGET_NR_syscall:
6905 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6906 arg6, arg7, arg8, 0);
6907 break;
6908 #endif
6909 case TARGET_NR_wait4:
6910 {
6911 int status;
6912 abi_long status_ptr = arg2;
6913 struct rusage rusage, *rusage_ptr;
6914 abi_ulong target_rusage = arg4;
6915 if (target_rusage)
6916 rusage_ptr = &rusage;
6917 else
6918 rusage_ptr = NULL;
6919 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6920 if (!is_error(ret)) {
6921 if (status_ptr && ret) {
6922 status = host_to_target_waitstatus(status);
6923 if (put_user_s32(status, status_ptr))
6924 goto efault;
6925 }
6926 if (target_rusage)
6927 host_to_target_rusage(target_rusage, &rusage);
6928 }
6929 }
6930 break;
6931 #ifdef TARGET_NR_swapoff
6932 case TARGET_NR_swapoff:
6933 if (!(p = lock_user_string(arg1)))
6934 goto efault;
6935 ret = get_errno(swapoff(p));
6936 unlock_user(p, arg1, 0);
6937 break;
6938 #endif
6939 case TARGET_NR_sysinfo:
6940 {
6941 struct target_sysinfo *target_value;
6942 struct sysinfo value;
6943 ret = get_errno(sysinfo(&value));
6944 if (!is_error(ret) && arg1)
6945 {
6946 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6947 goto efault;
6948 __put_user(value.uptime, &target_value->uptime);
6949 __put_user(value.loads[0], &target_value->loads[0]);
6950 __put_user(value.loads[1], &target_value->loads[1]);
6951 __put_user(value.loads[2], &target_value->loads[2]);
6952 __put_user(value.totalram, &target_value->totalram);
6953 __put_user(value.freeram, &target_value->freeram);
6954 __put_user(value.sharedram, &target_value->sharedram);
6955 __put_user(value.bufferram, &target_value->bufferram);
6956 __put_user(value.totalswap, &target_value->totalswap);
6957 __put_user(value.freeswap, &target_value->freeswap);
6958 __put_user(value.procs, &target_value->procs);
6959 __put_user(value.totalhigh, &target_value->totalhigh);
6960 __put_user(value.freehigh, &target_value->freehigh);
6961 __put_user(value.mem_unit, &target_value->mem_unit);
6962 unlock_user_struct(target_value, arg1, 1);
6963 }
6964 }
6965 break;
6966 #ifdef TARGET_NR_ipc
6967 case TARGET_NR_ipc:
6968 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6969 break;
6970 #endif
6971 #ifdef TARGET_NR_semget
6972 case TARGET_NR_semget:
6973 ret = get_errno(semget(arg1, arg2, arg3));
6974 break;
6975 #endif
6976 #ifdef TARGET_NR_semop
6977 case TARGET_NR_semop:
6978 ret = do_semop(arg1, arg2, arg3);
6979 break;
6980 #endif
6981 #ifdef TARGET_NR_semctl
6982 case TARGET_NR_semctl:
6983 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6984 break;
6985 #endif
6986 #ifdef TARGET_NR_msgctl
6987 case TARGET_NR_msgctl:
6988 ret = do_msgctl(arg1, arg2, arg3);
6989 break;
6990 #endif
6991 #ifdef TARGET_NR_msgget
6992 case TARGET_NR_msgget:
6993 ret = get_errno(msgget(arg1, arg2));
6994 break;
6995 #endif
6996 #ifdef TARGET_NR_msgrcv
6997 case TARGET_NR_msgrcv:
6998 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6999 break;
7000 #endif
7001 #ifdef TARGET_NR_msgsnd
7002 case TARGET_NR_msgsnd:
7003 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7004 break;
7005 #endif
7006 #ifdef TARGET_NR_shmget
7007 case TARGET_NR_shmget:
7008 ret = get_errno(shmget(arg1, arg2, arg3));
7009 break;
7010 #endif
7011 #ifdef TARGET_NR_shmctl
7012 case TARGET_NR_shmctl:
7013 ret = do_shmctl(arg1, arg2, arg3);
7014 break;
7015 #endif
7016 #ifdef TARGET_NR_shmat
7017 case TARGET_NR_shmat:
7018 ret = do_shmat(arg1, arg2, arg3);
7019 break;
7020 #endif
7021 #ifdef TARGET_NR_shmdt
7022 case TARGET_NR_shmdt:
7023 ret = do_shmdt(arg1);
7024 break;
7025 #endif
7026 case TARGET_NR_fsync:
7027 ret = get_errno(fsync(arg1));
7028 break;
7029 case TARGET_NR_clone:
7030 /* Linux manages to have three different orderings for its
7031 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7032 * match the kernel's CONFIG_CLONE_* settings.
7033 * Microblaze is further special in that it uses a sixth
7034 * implicit argument to clone for the TLS pointer.
7035 */
7036 #if defined(TARGET_MICROBLAZE)
7037 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7038 #elif defined(TARGET_CLONE_BACKWARDS)
7039 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7040 #elif defined(TARGET_CLONE_BACKWARDS2)
7041 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7042 #else
7043 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7044 #endif
7045 break;
7046 #ifdef __NR_exit_group
7047 /* new thread calls */
7048 case TARGET_NR_exit_group:
7049 #ifdef TARGET_GPROF
7050 _mcleanup();
7051 #endif
7052 gdb_exit(cpu_env, arg1);
7053 ret = get_errno(exit_group(arg1));
7054 break;
7055 #endif
7056 case TARGET_NR_setdomainname:
7057 if (!(p = lock_user_string(arg1)))
7058 goto efault;
7059 ret = get_errno(setdomainname(p, arg2));
7060 unlock_user(p, arg1, 0);
7061 break;
7062 case TARGET_NR_uname:
7063 /* no need to transcode because we use the linux syscall */
7064 {
7065 struct new_utsname * buf;
7066
7067 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7068 goto efault;
7069 ret = get_errno(sys_uname(buf));
7070 if (!is_error(ret)) {
7071 /* Overrite the native machine name with whatever is being
7072 emulated. */
7073 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7074 /* Allow the user to override the reported release. */
7075 if (qemu_uname_release && *qemu_uname_release)
7076 strcpy (buf->release, qemu_uname_release);
7077 }
7078 unlock_user_struct(buf, arg1, 1);
7079 }
7080 break;
7081 #ifdef TARGET_I386
7082 case TARGET_NR_modify_ldt:
7083 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7084 break;
7085 #if !defined(TARGET_X86_64)
7086 case TARGET_NR_vm86old:
7087 goto unimplemented;
7088 case TARGET_NR_vm86:
7089 ret = do_vm86(cpu_env, arg1, arg2);
7090 break;
7091 #endif
7092 #endif
7093 case TARGET_NR_adjtimex:
7094 goto unimplemented;
7095 #ifdef TARGET_NR_create_module
7096 case TARGET_NR_create_module:
7097 #endif
7098 case TARGET_NR_init_module:
7099 case TARGET_NR_delete_module:
7100 #ifdef TARGET_NR_get_kernel_syms
7101 case TARGET_NR_get_kernel_syms:
7102 #endif
7103 goto unimplemented;
7104 case TARGET_NR_quotactl:
7105 goto unimplemented;
7106 case TARGET_NR_getpgid:
7107 ret = get_errno(getpgid(arg1));
7108 break;
7109 case TARGET_NR_fchdir:
7110 ret = get_errno(fchdir(arg1));
7111 break;
7112 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7113 case TARGET_NR_bdflush:
7114 goto unimplemented;
7115 #endif
7116 #ifdef TARGET_NR_sysfs
7117 case TARGET_NR_sysfs:
7118 goto unimplemented;
7119 #endif
7120 case TARGET_NR_personality:
7121 ret = get_errno(personality(arg1));
7122 break;
7123 #ifdef TARGET_NR_afs_syscall
7124 case TARGET_NR_afs_syscall:
7125 goto unimplemented;
7126 #endif
7127 #ifdef TARGET_NR__llseek /* Not on alpha */
7128 case TARGET_NR__llseek:
7129 {
7130 int64_t res;
7131 #if !defined(__NR_llseek)
7132 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7133 if (res == -1) {
7134 ret = get_errno(res);
7135 } else {
7136 ret = 0;
7137 }
7138 #else
7139 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7140 #endif
7141 if ((ret == 0) && put_user_s64(res, arg4)) {
7142 goto efault;
7143 }
7144 }
7145 break;
7146 #endif
7147 case TARGET_NR_getdents:
7148 #ifdef __NR_getdents
7149 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7150 {
7151 struct target_dirent *target_dirp;
7152 struct linux_dirent *dirp;
7153 abi_long count = arg3;
7154
7155 dirp = malloc(count);
7156 if (!dirp) {
7157 ret = -TARGET_ENOMEM;
7158 goto fail;
7159 }
7160
7161 ret = get_errno(sys_getdents(arg1, dirp, count));
7162 if (!is_error(ret)) {
7163 struct linux_dirent *de;
7164 struct target_dirent *tde;
7165 int len = ret;
7166 int reclen, treclen;
7167 int count1, tnamelen;
7168
7169 count1 = 0;
7170 de = dirp;
7171 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7172 goto efault;
7173 tde = target_dirp;
7174 while (len > 0) {
7175 reclen = de->d_reclen;
7176 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7177 assert(tnamelen >= 0);
7178 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7179 assert(count1 + treclen <= count);
7180 tde->d_reclen = tswap16(treclen);
7181 tde->d_ino = tswapal(de->d_ino);
7182 tde->d_off = tswapal(de->d_off);
7183 memcpy(tde->d_name, de->d_name, tnamelen);
7184 de = (struct linux_dirent *)((char *)de + reclen);
7185 len -= reclen;
7186 tde = (struct target_dirent *)((char *)tde + treclen);
7187 count1 += treclen;
7188 }
7189 ret = count1;
7190 unlock_user(target_dirp, arg2, ret);
7191 }
7192 free(dirp);
7193 }
7194 #else
7195 {
7196 struct linux_dirent *dirp;
7197 abi_long count = arg3;
7198
7199 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7200 goto efault;
7201 ret = get_errno(sys_getdents(arg1, dirp, count));
7202 if (!is_error(ret)) {
7203 struct linux_dirent *de;
7204 int len = ret;
7205 int reclen;
7206 de = dirp;
7207 while (len > 0) {
7208 reclen = de->d_reclen;
7209 if (reclen > len)
7210 break;
7211 de->d_reclen = tswap16(reclen);
7212 tswapls(&de->d_ino);
7213 tswapls(&de->d_off);
7214 de = (struct linux_dirent *)((char *)de + reclen);
7215 len -= reclen;
7216 }
7217 }
7218 unlock_user(dirp, arg2, ret);
7219 }
7220 #endif
7221 #else
7222 /* Implement getdents in terms of getdents64 */
7223 {
7224 struct linux_dirent64 *dirp;
7225 abi_long count = arg3;
7226
7227 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7228 if (!dirp) {
7229 goto efault;
7230 }
7231 ret = get_errno(sys_getdents64(arg1, dirp, count));
7232 if (!is_error(ret)) {
7233 /* Convert the dirent64 structs to target dirent. We do this
7234 * in-place, since we can guarantee that a target_dirent is no
7235 * larger than a dirent64; however this means we have to be
7236 * careful to read everything before writing in the new format.
7237 */
7238 struct linux_dirent64 *de;
7239 struct target_dirent *tde;
7240 int len = ret;
7241 int tlen = 0;
7242
7243 de = dirp;
7244 tde = (struct target_dirent *)dirp;
7245 while (len > 0) {
7246 int namelen, treclen;
7247 int reclen = de->d_reclen;
7248 uint64_t ino = de->d_ino;
7249 int64_t off = de->d_off;
7250 uint8_t type = de->d_type;
7251
7252 namelen = strlen(de->d_name);
7253 treclen = offsetof(struct target_dirent, d_name)
7254 + namelen + 2;
7255 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7256
7257 memmove(tde->d_name, de->d_name, namelen + 1);
7258 tde->d_ino = tswapal(ino);
7259 tde->d_off = tswapal(off);
7260 tde->d_reclen = tswap16(treclen);
7261 /* The target_dirent type is in what was formerly a padding
7262 * byte at the end of the structure:
7263 */
7264 *(((char *)tde) + treclen - 1) = type;
7265
7266 de = (struct linux_dirent64 *)((char *)de + reclen);
7267 tde = (struct target_dirent *)((char *)tde + treclen);
7268 len -= reclen;
7269 tlen += treclen;
7270 }
7271 ret = tlen;
7272 }
7273 unlock_user(dirp, arg2, ret);
7274 }
7275 #endif
7276 break;
7277 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7278 case TARGET_NR_getdents64:
7279 {
7280 struct linux_dirent64 *dirp;
7281 abi_long count = arg3;
7282 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7283 goto efault;
7284 ret = get_errno(sys_getdents64(arg1, dirp, count));
7285 if (!is_error(ret)) {
7286 struct linux_dirent64 *de;
7287 int len = ret;
7288 int reclen;
7289 de = dirp;
7290 while (len > 0) {
7291 reclen = de->d_reclen;
7292 if (reclen > len)
7293 break;
7294 de->d_reclen = tswap16(reclen);
7295 tswap64s((uint64_t *)&de->d_ino);
7296 tswap64s((uint64_t *)&de->d_off);
7297 de = (struct linux_dirent64 *)((char *)de + reclen);
7298 len -= reclen;
7299 }
7300 }
7301 unlock_user(dirp, arg2, ret);
7302 }
7303 break;
7304 #endif /* TARGET_NR_getdents64 */
7305 #if defined(TARGET_NR__newselect)
7306 case TARGET_NR__newselect:
7307 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7308 break;
7309 #endif
7310 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7311 # ifdef TARGET_NR_poll
7312 case TARGET_NR_poll:
7313 # endif
7314 # ifdef TARGET_NR_ppoll
7315 case TARGET_NR_ppoll:
7316 # endif
7317 {
7318 struct target_pollfd *target_pfd;
7319 unsigned int nfds = arg2;
7320 int timeout = arg3;
7321 struct pollfd *pfd;
7322 unsigned int i;
7323
7324 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7325 if (!target_pfd)
7326 goto efault;
7327
7328 pfd = alloca(sizeof(struct pollfd) * nfds);
7329 for(i = 0; i < nfds; i++) {
7330 pfd[i].fd = tswap32(target_pfd[i].fd);
7331 pfd[i].events = tswap16(target_pfd[i].events);
7332 }
7333
7334 # ifdef TARGET_NR_ppoll
7335 if (num == TARGET_NR_ppoll) {
7336 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7337 target_sigset_t *target_set;
7338 sigset_t _set, *set = &_set;
7339
7340 if (arg3) {
7341 if (target_to_host_timespec(timeout_ts, arg3)) {
7342 unlock_user(target_pfd, arg1, 0);
7343 goto efault;
7344 }
7345 } else {
7346 timeout_ts = NULL;
7347 }
7348
7349 if (arg4) {
7350 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7351 if (!target_set) {
7352 unlock_user(target_pfd, arg1, 0);
7353 goto efault;
7354 }
7355 target_to_host_sigset(set, target_set);
7356 } else {
7357 set = NULL;
7358 }
7359
7360 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7361
7362 if (!is_error(ret) && arg3) {
7363 host_to_target_timespec(arg3, timeout_ts);
7364 }
7365 if (arg4) {
7366 unlock_user(target_set, arg4, 0);
7367 }
7368 } else
7369 # endif
7370 ret = get_errno(poll(pfd, nfds, timeout));
7371
7372 if (!is_error(ret)) {
7373 for(i = 0; i < nfds; i++) {
7374 target_pfd[i].revents = tswap16(pfd[i].revents);
7375 }
7376 }
7377 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7378 }
7379 break;
7380 #endif
7381 case TARGET_NR_flock:
7382 /* NOTE: the flock constant seems to be the same for every
7383 Linux platform */
7384 ret = get_errno(flock(arg1, arg2));
7385 break;
7386 case TARGET_NR_readv:
7387 {
7388 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7389 if (vec != NULL) {
7390 ret = get_errno(readv(arg1, vec, arg3));
7391 unlock_iovec(vec, arg2, arg3, 1);
7392 } else {
7393 ret = -host_to_target_errno(errno);
7394 }
7395 }
7396 break;
7397 case TARGET_NR_writev:
7398 {
7399 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7400 if (vec != NULL) {
7401 ret = get_errno(writev(arg1, vec, arg3));
7402 unlock_iovec(vec, arg2, arg3, 0);
7403 } else {
7404 ret = -host_to_target_errno(errno);
7405 }
7406 }
7407 break;
7408 case TARGET_NR_getsid:
7409 ret = get_errno(getsid(arg1));
7410 break;
7411 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7412 case TARGET_NR_fdatasync:
7413 ret = get_errno(fdatasync(arg1));
7414 break;
7415 #endif
7416 case TARGET_NR__sysctl:
7417 /* We don't implement this, but ENOTDIR is always a safe
7418 return value. */
7419 ret = -TARGET_ENOTDIR;
7420 break;
7421 case TARGET_NR_sched_getaffinity:
7422 {
7423 unsigned int mask_size;
7424 unsigned long *mask;
7425
7426 /*
7427 * sched_getaffinity needs multiples of ulong, so need to take
7428 * care of mismatches between target ulong and host ulong sizes.
7429 */
7430 if (arg2 & (sizeof(abi_ulong) - 1)) {
7431 ret = -TARGET_EINVAL;
7432 break;
7433 }
7434 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7435
7436 mask = alloca(mask_size);
7437 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7438
7439 if (!is_error(ret)) {
7440 if (copy_to_user(arg3, mask, ret)) {
7441 goto efault;
7442 }
7443 }
7444 }
7445 break;
7446 case TARGET_NR_sched_setaffinity:
7447 {
7448 unsigned int mask_size;
7449 unsigned long *mask;
7450
7451 /*
7452 * sched_setaffinity needs multiples of ulong, so need to take
7453 * care of mismatches between target ulong and host ulong sizes.
7454 */
7455 if (arg2 & (sizeof(abi_ulong) - 1)) {
7456 ret = -TARGET_EINVAL;
7457 break;
7458 }
7459 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7460
7461 mask = alloca(mask_size);
7462 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7463 goto efault;
7464 }
7465 memcpy(mask, p, arg2);
7466 unlock_user_struct(p, arg2, 0);
7467
7468 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7469 }
7470 break;
7471 case TARGET_NR_sched_setparam:
7472 {
7473 struct sched_param *target_schp;
7474 struct sched_param schp;
7475
7476 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7477 goto efault;
7478 schp.sched_priority = tswap32(target_schp->sched_priority);
7479 unlock_user_struct(target_schp, arg2, 0);
7480 ret = get_errno(sched_setparam(arg1, &schp));
7481 }
7482 break;
7483 case TARGET_NR_sched_getparam:
7484 {
7485 struct sched_param *target_schp;
7486 struct sched_param schp;
7487 ret = get_errno(sched_getparam(arg1, &schp));
7488 if (!is_error(ret)) {
7489 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7490 goto efault;
7491 target_schp->sched_priority = tswap32(schp.sched_priority);
7492 unlock_user_struct(target_schp, arg2, 1);
7493 }
7494 }
7495 break;
7496 case TARGET_NR_sched_setscheduler:
7497 {
7498 struct sched_param *target_schp;
7499 struct sched_param schp;
7500 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7501 goto efault;
7502 schp.sched_priority = tswap32(target_schp->sched_priority);
7503 unlock_user_struct(target_schp, arg3, 0);
7504 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7505 }
7506 break;
7507 case TARGET_NR_sched_getscheduler:
7508 ret = get_errno(sched_getscheduler(arg1));
7509 break;
7510 case TARGET_NR_sched_yield:
7511 ret = get_errno(sched_yield());
7512 break;
7513 case TARGET_NR_sched_get_priority_max:
7514 ret = get_errno(sched_get_priority_max(arg1));
7515 break;
7516 case TARGET_NR_sched_get_priority_min:
7517 ret = get_errno(sched_get_priority_min(arg1));
7518 break;
7519 case TARGET_NR_sched_rr_get_interval:
7520 {
7521 struct timespec ts;
7522 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7523 if (!is_error(ret)) {
7524 host_to_target_timespec(arg2, &ts);
7525 }
7526 }
7527 break;
7528 case TARGET_NR_nanosleep:
7529 {
7530 struct timespec req, rem;
7531 target_to_host_timespec(&req, arg1);
7532 ret = get_errno(nanosleep(&req, &rem));
7533 if (is_error(ret) && arg2) {
7534 host_to_target_timespec(arg2, &rem);
7535 }
7536 }
7537 break;
7538 #ifdef TARGET_NR_query_module
7539 case TARGET_NR_query_module:
7540 goto unimplemented;
7541 #endif
7542 #ifdef TARGET_NR_nfsservctl
7543 case TARGET_NR_nfsservctl:
7544 goto unimplemented;
7545 #endif
7546 case TARGET_NR_prctl:
7547 switch (arg1) {
7548 case PR_GET_PDEATHSIG:
7549 {
7550 int deathsig;
7551 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7552 if (!is_error(ret) && arg2
7553 && put_user_ual(deathsig, arg2)) {
7554 goto efault;
7555 }
7556 break;
7557 }
7558 #ifdef PR_GET_NAME
7559 case PR_GET_NAME:
7560 {
7561 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7562 if (!name) {
7563 goto efault;
7564 }
7565 ret = get_errno(prctl(arg1, (unsigned long)name,
7566 arg3, arg4, arg5));
7567 unlock_user(name, arg2, 16);
7568 break;
7569 }
7570 case PR_SET_NAME:
7571 {
7572 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7573 if (!name) {
7574 goto efault;
7575 }
7576 ret = get_errno(prctl(arg1, (unsigned long)name,
7577 arg3, arg4, arg5));
7578 unlock_user(name, arg2, 0);
7579 break;
7580 }
7581 #endif
7582 default:
7583 /* Most prctl options have no pointer arguments */
7584 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7585 break;
7586 }
7587 break;
7588 #ifdef TARGET_NR_arch_prctl
7589 case TARGET_NR_arch_prctl:
7590 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7591 ret = do_arch_prctl(cpu_env, arg1, arg2);
7592 break;
7593 #else
7594 goto unimplemented;
7595 #endif
7596 #endif
7597 #ifdef TARGET_NR_pread64
7598 case TARGET_NR_pread64:
7599 if (regpairs_aligned(cpu_env)) {
7600 arg4 = arg5;
7601 arg5 = arg6;
7602 }
7603 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7604 goto efault;
7605 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7606 unlock_user(p, arg2, ret);
7607 break;
7608 case TARGET_NR_pwrite64:
7609 if (regpairs_aligned(cpu_env)) {
7610 arg4 = arg5;
7611 arg5 = arg6;
7612 }
7613 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7614 goto efault;
7615 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7616 unlock_user(p, arg2, 0);
7617 break;
7618 #endif
7619 case TARGET_NR_getcwd:
7620 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7621 goto efault;
7622 ret = get_errno(sys_getcwd1(p, arg2));
7623 unlock_user(p, arg1, ret);
7624 break;
7625 case TARGET_NR_capget:
7626 goto unimplemented;
7627 case TARGET_NR_capset:
7628 goto unimplemented;
7629 case TARGET_NR_sigaltstack:
7630 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7631 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7632 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7633 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7634 break;
7635 #else
7636 goto unimplemented;
7637 #endif
7638
7639 #ifdef CONFIG_SENDFILE
7640 case TARGET_NR_sendfile:
7641 {
7642 off_t *offp = NULL;
7643 off_t off;
7644 if (arg3) {
7645 ret = get_user_sal(off, arg3);
7646 if (is_error(ret)) {
7647 break;
7648 }
7649 offp = &off;
7650 }
7651 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7652 if (!is_error(ret) && arg3) {
7653 abi_long ret2 = put_user_sal(off, arg3);
7654 if (is_error(ret2)) {
7655 ret = ret2;
7656 }
7657 }
7658 break;
7659 }
7660 #ifdef TARGET_NR_sendfile64
7661 case TARGET_NR_sendfile64:
7662 {
7663 off_t *offp = NULL;
7664 off_t off;
7665 if (arg3) {
7666 ret = get_user_s64(off, arg3);
7667 if (is_error(ret)) {
7668 break;
7669 }
7670 offp = &off;
7671 }
7672 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7673 if (!is_error(ret) && arg3) {
7674 abi_long ret2 = put_user_s64(off, arg3);
7675 if (is_error(ret2)) {
7676 ret = ret2;
7677 }
7678 }
7679 break;
7680 }
7681 #endif
7682 #else
7683 case TARGET_NR_sendfile:
7684 #ifdef TARGET_NR_sendfile64
7685 case TARGET_NR_sendfile64:
7686 #endif
7687 goto unimplemented;
7688 #endif
7689
7690 #ifdef TARGET_NR_getpmsg
7691 case TARGET_NR_getpmsg:
7692 goto unimplemented;
7693 #endif
7694 #ifdef TARGET_NR_putpmsg
7695 case TARGET_NR_putpmsg:
7696 goto unimplemented;
7697 #endif
7698 #ifdef TARGET_NR_vfork
7699 case TARGET_NR_vfork:
7700 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7701 0, 0, 0, 0));
7702 break;
7703 #endif
7704 #ifdef TARGET_NR_ugetrlimit
7705 case TARGET_NR_ugetrlimit:
7706 {
7707 struct rlimit rlim;
7708 int resource = target_to_host_resource(arg1);
7709 ret = get_errno(getrlimit(resource, &rlim));
7710 if (!is_error(ret)) {
7711 struct target_rlimit *target_rlim;
7712 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7713 goto efault;
7714 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7715 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7716 unlock_user_struct(target_rlim, arg2, 1);
7717 }
7718 break;
7719 }
7720 #endif
7721 #ifdef TARGET_NR_truncate64
7722 case TARGET_NR_truncate64:
7723 if (!(p = lock_user_string(arg1)))
7724 goto efault;
7725 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7726 unlock_user(p, arg1, 0);
7727 break;
7728 #endif
7729 #ifdef TARGET_NR_ftruncate64
7730 case TARGET_NR_ftruncate64:
7731 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7732 break;
7733 #endif
7734 #ifdef TARGET_NR_stat64
7735 case TARGET_NR_stat64:
7736 if (!(p = lock_user_string(arg1)))
7737 goto efault;
7738 ret = get_errno(stat(path(p), &st));
7739 unlock_user(p, arg1, 0);
7740 if (!is_error(ret))
7741 ret = host_to_target_stat64(cpu_env, arg2, &st);
7742 break;
7743 #endif
7744 #ifdef TARGET_NR_lstat64
7745 case TARGET_NR_lstat64:
7746 if (!(p = lock_user_string(arg1)))
7747 goto efault;
7748 ret = get_errno(lstat(path(p), &st));
7749 unlock_user(p, arg1, 0);
7750 if (!is_error(ret))
7751 ret = host_to_target_stat64(cpu_env, arg2, &st);
7752 break;
7753 #endif
7754 #ifdef TARGET_NR_fstat64
7755 case TARGET_NR_fstat64:
7756 ret = get_errno(fstat(arg1, &st));
7757 if (!is_error(ret))
7758 ret = host_to_target_stat64(cpu_env, arg2, &st);
7759 break;
7760 #endif
7761 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
7762 #ifdef TARGET_NR_fstatat64
7763 case TARGET_NR_fstatat64:
7764 #endif
7765 #ifdef TARGET_NR_newfstatat
7766 case TARGET_NR_newfstatat:
7767 #endif
7768 if (!(p = lock_user_string(arg2)))
7769 goto efault;
7770 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
7771 if (!is_error(ret))
7772 ret = host_to_target_stat64(cpu_env, arg3, &st);
7773 break;
7774 #endif
7775 case TARGET_NR_lchown:
7776 if (!(p = lock_user_string(arg1)))
7777 goto efault;
7778 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7779 unlock_user(p, arg1, 0);
7780 break;
7781 #ifdef TARGET_NR_getuid
7782 case TARGET_NR_getuid:
7783 ret = get_errno(high2lowuid(getuid()));
7784 break;
7785 #endif
7786 #ifdef TARGET_NR_getgid
7787 case TARGET_NR_getgid:
7788 ret = get_errno(high2lowgid(getgid()));
7789 break;
7790 #endif
7791 #ifdef TARGET_NR_geteuid
7792 case TARGET_NR_geteuid:
7793 ret = get_errno(high2lowuid(geteuid()));
7794 break;
7795 #endif
7796 #ifdef TARGET_NR_getegid
7797 case TARGET_NR_getegid:
7798 ret = get_errno(high2lowgid(getegid()));
7799 break;
7800 #endif
7801 case TARGET_NR_setreuid:
7802 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7803 break;
7804 case TARGET_NR_setregid:
7805 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7806 break;
7807 case TARGET_NR_getgroups:
7808 {
7809 int gidsetsize = arg1;
7810 target_id *target_grouplist;
7811 gid_t *grouplist;
7812 int i;
7813
7814 grouplist = alloca(gidsetsize * sizeof(gid_t));
7815 ret = get_errno(getgroups(gidsetsize, grouplist));
7816 if (gidsetsize == 0)
7817 break;
7818 if (!is_error(ret)) {
7819 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
7820 if (!target_grouplist)
7821 goto efault;
7822 for(i = 0;i < ret; i++)
7823 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7824 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
7825 }
7826 }
7827 break;
7828 case TARGET_NR_setgroups:
7829 {
7830 int gidsetsize = arg1;
7831 target_id *target_grouplist;
7832 gid_t *grouplist = NULL;
7833 int i;
7834 if (gidsetsize) {
7835 grouplist = alloca(gidsetsize * sizeof(gid_t));
7836 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
7837 if (!target_grouplist) {
7838 ret = -TARGET_EFAULT;
7839 goto fail;
7840 }
7841 for (i = 0; i < gidsetsize; i++) {
7842 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7843 }
7844 unlock_user(target_grouplist, arg2, 0);
7845 }
7846 ret = get_errno(setgroups(gidsetsize, grouplist));
7847 }
7848 break;
7849 case TARGET_NR_fchown:
7850 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7851 break;
7852 #if defined(TARGET_NR_fchownat)
7853 case TARGET_NR_fchownat:
7854 if (!(p = lock_user_string(arg2)))
7855 goto efault;
7856 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
7857 low2highgid(arg4), arg5));
7858 unlock_user(p, arg2, 0);
7859 break;
7860 #endif
7861 #ifdef TARGET_NR_setresuid
7862 case TARGET_NR_setresuid:
7863 ret = get_errno(setresuid(low2highuid(arg1),
7864 low2highuid(arg2),
7865 low2highuid(arg3)));
7866 break;
7867 #endif
7868 #ifdef TARGET_NR_getresuid
7869 case TARGET_NR_getresuid:
7870 {
7871 uid_t ruid, euid, suid;
7872 ret = get_errno(getresuid(&ruid, &euid, &suid));
7873 if (!is_error(ret)) {
7874 if (put_user_u16(high2lowuid(ruid), arg1)
7875 || put_user_u16(high2lowuid(euid), arg2)
7876 || put_user_u16(high2lowuid(suid), arg3))
7877 goto efault;
7878 }
7879 }
7880 break;
7881 #endif
7882 #ifdef TARGET_NR_getresgid
7883 case TARGET_NR_setresgid:
7884 ret = get_errno(setresgid(low2highgid(arg1),
7885 low2highgid(arg2),
7886 low2highgid(arg3)));
7887 break;
7888 #endif
7889 #ifdef TARGET_NR_getresgid
7890 case TARGET_NR_getresgid:
7891 {
7892 gid_t rgid, egid, sgid;
7893 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7894 if (!is_error(ret)) {
7895 if (put_user_u16(high2lowgid(rgid), arg1)
7896 || put_user_u16(high2lowgid(egid), arg2)
7897 || put_user_u16(high2lowgid(sgid), arg3))
7898 goto efault;
7899 }
7900 }
7901 break;
7902 #endif
7903 case TARGET_NR_chown:
7904 if (!(p = lock_user_string(arg1)))
7905 goto efault;
7906 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7907 unlock_user(p, arg1, 0);
7908 break;
7909 case TARGET_NR_setuid:
7910 ret = get_errno(setuid(low2highuid(arg1)));
7911 break;
7912 case TARGET_NR_setgid:
7913 ret = get_errno(setgid(low2highgid(arg1)));
7914 break;
7915 case TARGET_NR_setfsuid:
7916 ret = get_errno(setfsuid(arg1));
7917 break;
7918 case TARGET_NR_setfsgid:
7919 ret = get_errno(setfsgid(arg1));
7920 break;
7921
7922 #ifdef TARGET_NR_lchown32
7923 case TARGET_NR_lchown32:
7924 if (!(p = lock_user_string(arg1)))
7925 goto efault;
7926 ret = get_errno(lchown(p, arg2, arg3));
7927 unlock_user(p, arg1, 0);
7928 break;
7929 #endif
7930 #ifdef TARGET_NR_getuid32
7931 case TARGET_NR_getuid32:
7932 ret = get_errno(getuid());
7933 break;
7934 #endif
7935
7936 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7937 /* Alpha specific */
7938 case TARGET_NR_getxuid:
7939 {
7940 uid_t euid;
7941 euid=geteuid();
7942 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7943 }
7944 ret = get_errno(getuid());
7945 break;
7946 #endif
7947 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7948 /* Alpha specific */
7949 case TARGET_NR_getxgid:
7950 {
7951 uid_t egid;
7952 egid=getegid();
7953 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7954 }
7955 ret = get_errno(getgid());
7956 break;
7957 #endif
7958 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7959 /* Alpha specific */
7960 case TARGET_NR_osf_getsysinfo:
7961 ret = -TARGET_EOPNOTSUPP;
7962 switch (arg1) {
7963 case TARGET_GSI_IEEE_FP_CONTROL:
7964 {
7965 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7966
7967 /* Copied from linux ieee_fpcr_to_swcr. */
7968 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7969 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7970 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7971 | SWCR_TRAP_ENABLE_DZE
7972 | SWCR_TRAP_ENABLE_OVF);
7973 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7974 | SWCR_TRAP_ENABLE_INE);
7975 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7976 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7977
7978 if (put_user_u64 (swcr, arg2))
7979 goto efault;
7980 ret = 0;
7981 }
7982 break;
7983
7984 /* case GSI_IEEE_STATE_AT_SIGNAL:
7985 -- Not implemented in linux kernel.
7986 case GSI_UACPROC:
7987 -- Retrieves current unaligned access state; not much used.
7988 case GSI_PROC_TYPE:
7989 -- Retrieves implver information; surely not used.
7990 case GSI_GET_HWRPB:
7991 -- Grabs a copy of the HWRPB; surely not used.
7992 */
7993 }
7994 break;
7995 #endif
7996 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7997 /* Alpha specific */
7998 case TARGET_NR_osf_setsysinfo:
7999 ret = -TARGET_EOPNOTSUPP;
8000 switch (arg1) {
8001 case TARGET_SSI_IEEE_FP_CONTROL:
8002 {
8003 uint64_t swcr, fpcr, orig_fpcr;
8004
8005 if (get_user_u64 (swcr, arg2)) {
8006 goto efault;
8007 }
8008 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8009 fpcr = orig_fpcr & FPCR_DYN_MASK;
8010
8011 /* Copied from linux ieee_swcr_to_fpcr. */
8012 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8013 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8014 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8015 | SWCR_TRAP_ENABLE_DZE
8016 | SWCR_TRAP_ENABLE_OVF)) << 48;
8017 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8018 | SWCR_TRAP_ENABLE_INE)) << 57;
8019 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8020 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8021
8022 cpu_alpha_store_fpcr(cpu_env, fpcr);
8023 ret = 0;
8024 }
8025 break;
8026
8027 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8028 {
8029 uint64_t exc, fpcr, orig_fpcr;
8030 int si_code;
8031
8032 if (get_user_u64(exc, arg2)) {
8033 goto efault;
8034 }
8035
8036 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8037
8038 /* We only add to the exception status here. */
8039 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8040
8041 cpu_alpha_store_fpcr(cpu_env, fpcr);
8042 ret = 0;
8043
8044 /* Old exceptions are not signaled. */
8045 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8046
8047 /* If any exceptions set by this call,
8048 and are unmasked, send a signal. */
8049 si_code = 0;
8050 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8051 si_code = TARGET_FPE_FLTRES;
8052 }
8053 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8054 si_code = TARGET_FPE_FLTUND;
8055 }
8056 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8057 si_code = TARGET_FPE_FLTOVF;
8058 }
8059 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8060 si_code = TARGET_FPE_FLTDIV;
8061 }
8062 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8063 si_code = TARGET_FPE_FLTINV;
8064 }
8065 if (si_code != 0) {
8066 target_siginfo_t info;
8067 info.si_signo = SIGFPE;
8068 info.si_errno = 0;
8069 info.si_code = si_code;
8070 info._sifields._sigfault._addr
8071 = ((CPUArchState *)cpu_env)->pc;
8072 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8073 }
8074 }
8075 break;
8076
8077 /* case SSI_NVPAIRS:
8078 -- Used with SSIN_UACPROC to enable unaligned accesses.
8079 case SSI_IEEE_STATE_AT_SIGNAL:
8080 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8081 -- Not implemented in linux kernel
8082 */
8083 }
8084 break;
8085 #endif
8086 #ifdef TARGET_NR_osf_sigprocmask
8087 /* Alpha specific. */
8088 case TARGET_NR_osf_sigprocmask:
8089 {
8090 abi_ulong mask;
8091 int how;
8092 sigset_t set, oldset;
8093
8094 switch(arg1) {
8095 case TARGET_SIG_BLOCK:
8096 how = SIG_BLOCK;
8097 break;
8098 case TARGET_SIG_UNBLOCK:
8099 how = SIG_UNBLOCK;
8100 break;
8101 case TARGET_SIG_SETMASK:
8102 how = SIG_SETMASK;
8103 break;
8104 default:
8105 ret = -TARGET_EINVAL;
8106 goto fail;
8107 }
8108 mask = arg2;
8109 target_to_host_old_sigset(&set, &mask);
8110 sigprocmask(how, &set, &oldset);
8111 host_to_target_old_sigset(&mask, &oldset);
8112 ret = mask;
8113 }
8114 break;
8115 #endif
8116
8117 #ifdef TARGET_NR_getgid32
8118 case TARGET_NR_getgid32:
8119 ret = get_errno(getgid());
8120 break;
8121 #endif
8122 #ifdef TARGET_NR_geteuid32
8123 case TARGET_NR_geteuid32:
8124 ret = get_errno(geteuid());
8125 break;
8126 #endif
8127 #ifdef TARGET_NR_getegid32
8128 case TARGET_NR_getegid32:
8129 ret = get_errno(getegid());
8130 break;
8131 #endif
8132 #ifdef TARGET_NR_setreuid32
8133 case TARGET_NR_setreuid32:
8134 ret = get_errno(setreuid(arg1, arg2));
8135 break;
8136 #endif
8137 #ifdef TARGET_NR_setregid32
8138 case TARGET_NR_setregid32:
8139 ret = get_errno(setregid(arg1, arg2));
8140 break;
8141 #endif
8142 #ifdef TARGET_NR_getgroups32
8143 case TARGET_NR_getgroups32:
8144 {
8145 int gidsetsize = arg1;
8146 uint32_t *target_grouplist;
8147 gid_t *grouplist;
8148 int i;
8149
8150 grouplist = alloca(gidsetsize * sizeof(gid_t));
8151 ret = get_errno(getgroups(gidsetsize, grouplist));
8152 if (gidsetsize == 0)
8153 break;
8154 if (!is_error(ret)) {
8155 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8156 if (!target_grouplist) {
8157 ret = -TARGET_EFAULT;
8158 goto fail;
8159 }
8160 for(i = 0;i < ret; i++)
8161 target_grouplist[i] = tswap32(grouplist[i]);
8162 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8163 }
8164 }
8165 break;
8166 #endif
8167 #ifdef TARGET_NR_setgroups32
8168 case TARGET_NR_setgroups32:
8169 {
8170 int gidsetsize = arg1;
8171 uint32_t *target_grouplist;
8172 gid_t *grouplist;
8173 int i;
8174
8175 grouplist = alloca(gidsetsize * sizeof(gid_t));
8176 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8177 if (!target_grouplist) {
8178 ret = -TARGET_EFAULT;
8179 goto fail;
8180 }
8181 for(i = 0;i < gidsetsize; i++)
8182 grouplist[i] = tswap32(target_grouplist[i]);
8183 unlock_user(target_grouplist, arg2, 0);
8184 ret = get_errno(setgroups(gidsetsize, grouplist));
8185 }
8186 break;
8187 #endif
8188 #ifdef TARGET_NR_fchown32
8189 case TARGET_NR_fchown32:
8190 ret = get_errno(fchown(arg1, arg2, arg3));
8191 break;
8192 #endif
8193 #ifdef TARGET_NR_setresuid32
8194 case TARGET_NR_setresuid32:
8195 ret = get_errno(setresuid(arg1, arg2, arg3));
8196 break;
8197 #endif
8198 #ifdef TARGET_NR_getresuid32
8199 case TARGET_NR_getresuid32:
8200 {
8201 uid_t ruid, euid, suid;
8202 ret = get_errno(getresuid(&ruid, &euid, &suid));
8203 if (!is_error(ret)) {
8204 if (put_user_u32(ruid, arg1)
8205 || put_user_u32(euid, arg2)
8206 || put_user_u32(suid, arg3))
8207 goto efault;
8208 }
8209 }
8210 break;
8211 #endif
8212 #ifdef TARGET_NR_setresgid32
8213 case TARGET_NR_setresgid32:
8214 ret = get_errno(setresgid(arg1, arg2, arg3));
8215 break;
8216 #endif
8217 #ifdef TARGET_NR_getresgid32
8218 case TARGET_NR_getresgid32:
8219 {
8220 gid_t rgid, egid, sgid;
8221 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8222 if (!is_error(ret)) {
8223 if (put_user_u32(rgid, arg1)
8224 || put_user_u32(egid, arg2)
8225 || put_user_u32(sgid, arg3))
8226 goto efault;
8227 }
8228 }
8229 break;
8230 #endif
8231 #ifdef TARGET_NR_chown32
8232 case TARGET_NR_chown32:
8233 if (!(p = lock_user_string(arg1)))
8234 goto efault;
8235 ret = get_errno(chown(p, arg2, arg3));
8236 unlock_user(p, arg1, 0);
8237 break;
8238 #endif
8239 #ifdef TARGET_NR_setuid32
8240 case TARGET_NR_setuid32:
8241 ret = get_errno(setuid(arg1));
8242 break;
8243 #endif
8244 #ifdef TARGET_NR_setgid32
8245 case TARGET_NR_setgid32:
8246 ret = get_errno(setgid(arg1));
8247 break;
8248 #endif
8249 #ifdef TARGET_NR_setfsuid32
8250 case TARGET_NR_setfsuid32:
8251 ret = get_errno(setfsuid(arg1));
8252 break;
8253 #endif
8254 #ifdef TARGET_NR_setfsgid32
8255 case TARGET_NR_setfsgid32:
8256 ret = get_errno(setfsgid(arg1));
8257 break;
8258 #endif
8259
8260 case TARGET_NR_pivot_root:
8261 goto unimplemented;
8262 #ifdef TARGET_NR_mincore
8263 case TARGET_NR_mincore:
8264 {
8265 void *a;
8266 ret = -TARGET_EFAULT;
8267 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8268 goto efault;
8269 if (!(p = lock_user_string(arg3)))
8270 goto mincore_fail;
8271 ret = get_errno(mincore(a, arg2, p));
8272 unlock_user(p, arg3, ret);
8273 mincore_fail:
8274 unlock_user(a, arg1, 0);
8275 }
8276 break;
8277 #endif
8278 #ifdef TARGET_NR_arm_fadvise64_64
8279 case TARGET_NR_arm_fadvise64_64:
8280 {
8281 /*
8282 * arm_fadvise64_64 looks like fadvise64_64 but
8283 * with different argument order
8284 */
8285 abi_long temp;
8286 temp = arg3;
8287 arg3 = arg4;
8288 arg4 = temp;
8289 }
8290 #endif
8291 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8292 #ifdef TARGET_NR_fadvise64_64
8293 case TARGET_NR_fadvise64_64:
8294 #endif
8295 #ifdef TARGET_NR_fadvise64
8296 case TARGET_NR_fadvise64:
8297 #endif
8298 #ifdef TARGET_S390X
8299 switch (arg4) {
8300 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8301 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8302 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8303 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8304 default: break;
8305 }
8306 #endif
8307 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8308 break;
8309 #endif
8310 #ifdef TARGET_NR_madvise
8311 case TARGET_NR_madvise:
8312 /* A straight passthrough may not be safe because qemu sometimes
8313 turns private file-backed mappings into anonymous mappings.
8314 This will break MADV_DONTNEED.
8315 This is a hint, so ignoring and returning success is ok. */
8316 ret = get_errno(0);
8317 break;
8318 #endif
8319 #if TARGET_ABI_BITS == 32
8320 case TARGET_NR_fcntl64:
8321 {
8322 int cmd;
8323 struct flock64 fl;
8324 struct target_flock64 *target_fl;
8325 #ifdef TARGET_ARM
8326 struct target_eabi_flock64 *target_efl;
8327 #endif
8328
8329 cmd = target_to_host_fcntl_cmd(arg2);
8330 if (cmd == -TARGET_EINVAL) {
8331 ret = cmd;
8332 break;
8333 }
8334
8335 switch(arg2) {
8336 case TARGET_F_GETLK64:
8337 #ifdef TARGET_ARM
8338 if (((CPUARMState *)cpu_env)->eabi) {
8339 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8340 goto efault;
8341 fl.l_type = tswap16(target_efl->l_type);
8342 fl.l_whence = tswap16(target_efl->l_whence);
8343 fl.l_start = tswap64(target_efl->l_start);
8344 fl.l_len = tswap64(target_efl->l_len);
8345 fl.l_pid = tswap32(target_efl->l_pid);
8346 unlock_user_struct(target_efl, arg3, 0);
8347 } else
8348 #endif
8349 {
8350 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8351 goto efault;
8352 fl.l_type = tswap16(target_fl->l_type);
8353 fl.l_whence = tswap16(target_fl->l_whence);
8354 fl.l_start = tswap64(target_fl->l_start);
8355 fl.l_len = tswap64(target_fl->l_len);
8356 fl.l_pid = tswap32(target_fl->l_pid);
8357 unlock_user_struct(target_fl, arg3, 0);
8358 }
8359 ret = get_errno(fcntl(arg1, cmd, &fl));
8360 if (ret == 0) {
8361 #ifdef TARGET_ARM
8362 if (((CPUARMState *)cpu_env)->eabi) {
8363 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8364 goto efault;
8365 target_efl->l_type = tswap16(fl.l_type);
8366 target_efl->l_whence = tswap16(fl.l_whence);
8367 target_efl->l_start = tswap64(fl.l_start);
8368 target_efl->l_len = tswap64(fl.l_len);
8369 target_efl->l_pid = tswap32(fl.l_pid);
8370 unlock_user_struct(target_efl, arg3, 1);
8371 } else
8372 #endif
8373 {
8374 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8375 goto efault;
8376 target_fl->l_type = tswap16(fl.l_type);
8377 target_fl->l_whence = tswap16(fl.l_whence);
8378 target_fl->l_start = tswap64(fl.l_start);
8379 target_fl->l_len = tswap64(fl.l_len);
8380 target_fl->l_pid = tswap32(fl.l_pid);
8381 unlock_user_struct(target_fl, arg3, 1);
8382 }
8383 }
8384 break;
8385
8386 case TARGET_F_SETLK64:
8387 case TARGET_F_SETLKW64:
8388 #ifdef TARGET_ARM
8389 if (((CPUARMState *)cpu_env)->eabi) {
8390 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8391 goto efault;
8392 fl.l_type = tswap16(target_efl->l_type);
8393 fl.l_whence = tswap16(target_efl->l_whence);
8394 fl.l_start = tswap64(target_efl->l_start);
8395 fl.l_len = tswap64(target_efl->l_len);
8396 fl.l_pid = tswap32(target_efl->l_pid);
8397 unlock_user_struct(target_efl, arg3, 0);
8398 } else
8399 #endif
8400 {
8401 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8402 goto efault;
8403 fl.l_type = tswap16(target_fl->l_type);
8404 fl.l_whence = tswap16(target_fl->l_whence);
8405 fl.l_start = tswap64(target_fl->l_start);
8406 fl.l_len = tswap64(target_fl->l_len);
8407 fl.l_pid = tswap32(target_fl->l_pid);
8408 unlock_user_struct(target_fl, arg3, 0);
8409 }
8410 ret = get_errno(fcntl(arg1, cmd, &fl));
8411 break;
8412 default:
8413 ret = do_fcntl(arg1, arg2, arg3);
8414 break;
8415 }
8416 break;
8417 }
8418 #endif
8419 #ifdef TARGET_NR_cacheflush
8420 case TARGET_NR_cacheflush:
8421 /* self-modifying code is handled automatically, so nothing needed */
8422 ret = 0;
8423 break;
8424 #endif
8425 #ifdef TARGET_NR_security
8426 case TARGET_NR_security:
8427 goto unimplemented;
8428 #endif
8429 #ifdef TARGET_NR_getpagesize
8430 case TARGET_NR_getpagesize:
8431 ret = TARGET_PAGE_SIZE;
8432 break;
8433 #endif
8434 case TARGET_NR_gettid:
8435 ret = get_errno(gettid());
8436 break;
8437 #ifdef TARGET_NR_readahead
8438 case TARGET_NR_readahead:
8439 #if TARGET_ABI_BITS == 32
8440 if (regpairs_aligned(cpu_env)) {
8441 arg2 = arg3;
8442 arg3 = arg4;
8443 arg4 = arg5;
8444 }
8445 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8446 #else
8447 ret = get_errno(readahead(arg1, arg2, arg3));
8448 #endif
8449 break;
8450 #endif
8451 #ifdef CONFIG_ATTR
8452 #ifdef TARGET_NR_setxattr
8453 case TARGET_NR_listxattr:
8454 case TARGET_NR_llistxattr:
8455 {
8456 void *p, *b = 0;
8457 if (arg2) {
8458 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8459 if (!b) {
8460 ret = -TARGET_EFAULT;
8461 break;
8462 }
8463 }
8464 p = lock_user_string(arg1);
8465 if (p) {
8466 if (num == TARGET_NR_listxattr) {
8467 ret = get_errno(listxattr(p, b, arg3));
8468 } else {
8469 ret = get_errno(llistxattr(p, b, arg3));
8470 }
8471 } else {
8472 ret = -TARGET_EFAULT;
8473 }
8474 unlock_user(p, arg1, 0);
8475 unlock_user(b, arg2, arg3);
8476 break;
8477 }
8478 case TARGET_NR_flistxattr:
8479 {
8480 void *b = 0;
8481 if (arg2) {
8482 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8483 if (!b) {
8484 ret = -TARGET_EFAULT;
8485 break;
8486 }
8487 }
8488 ret = get_errno(flistxattr(arg1, b, arg3));
8489 unlock_user(b, arg2, arg3);
8490 break;
8491 }
8492 case TARGET_NR_setxattr:
8493 case TARGET_NR_lsetxattr:
8494 {
8495 void *p, *n, *v = 0;
8496 if (arg3) {
8497 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8498 if (!v) {
8499 ret = -TARGET_EFAULT;
8500 break;
8501 }
8502 }
8503 p = lock_user_string(arg1);
8504 n = lock_user_string(arg2);
8505 if (p && n) {
8506 if (num == TARGET_NR_setxattr) {
8507 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8508 } else {
8509 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8510 }
8511 } else {
8512 ret = -TARGET_EFAULT;
8513 }
8514 unlock_user(p, arg1, 0);
8515 unlock_user(n, arg2, 0);
8516 unlock_user(v, arg3, 0);
8517 }
8518 break;
8519 case TARGET_NR_fsetxattr:
8520 {
8521 void *n, *v = 0;
8522 if (arg3) {
8523 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8524 if (!v) {
8525 ret = -TARGET_EFAULT;
8526 break;
8527 }
8528 }
8529 n = lock_user_string(arg2);
8530 if (n) {
8531 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8532 } else {
8533 ret = -TARGET_EFAULT;
8534 }
8535 unlock_user(n, arg2, 0);
8536 unlock_user(v, arg3, 0);
8537 }
8538 break;
8539 case TARGET_NR_getxattr:
8540 case TARGET_NR_lgetxattr:
8541 {
8542 void *p, *n, *v = 0;
8543 if (arg3) {
8544 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8545 if (!v) {
8546 ret = -TARGET_EFAULT;
8547 break;
8548 }
8549 }
8550 p = lock_user_string(arg1);
8551 n = lock_user_string(arg2);
8552 if (p && n) {
8553 if (num == TARGET_NR_getxattr) {
8554 ret = get_errno(getxattr(p, n, v, arg4));
8555 } else {
8556 ret = get_errno(lgetxattr(p, n, v, arg4));
8557 }
8558 } else {
8559 ret = -TARGET_EFAULT;
8560 }
8561 unlock_user(p, arg1, 0);
8562 unlock_user(n, arg2, 0);
8563 unlock_user(v, arg3, arg4);
8564 }
8565 break;
8566 case TARGET_NR_fgetxattr:
8567 {
8568 void *n, *v = 0;
8569 if (arg3) {
8570 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8571 if (!v) {
8572 ret = -TARGET_EFAULT;
8573 break;
8574 }
8575 }
8576 n = lock_user_string(arg2);
8577 if (n) {
8578 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8579 } else {
8580 ret = -TARGET_EFAULT;
8581 }
8582 unlock_user(n, arg2, 0);
8583 unlock_user(v, arg3, arg4);
8584 }
8585 break;
8586 case TARGET_NR_removexattr:
8587 case TARGET_NR_lremovexattr:
8588 {
8589 void *p, *n;
8590 p = lock_user_string(arg1);
8591 n = lock_user_string(arg2);
8592 if (p && n) {
8593 if (num == TARGET_NR_removexattr) {
8594 ret = get_errno(removexattr(p, n));
8595 } else {
8596 ret = get_errno(lremovexattr(p, n));
8597 }
8598 } else {
8599 ret = -TARGET_EFAULT;
8600 }
8601 unlock_user(p, arg1, 0);
8602 unlock_user(n, arg2, 0);
8603 }
8604 break;
8605 case TARGET_NR_fremovexattr:
8606 {
8607 void *n;
8608 n = lock_user_string(arg2);
8609 if (n) {
8610 ret = get_errno(fremovexattr(arg1, n));
8611 } else {
8612 ret = -TARGET_EFAULT;
8613 }
8614 unlock_user(n, arg2, 0);
8615 }
8616 break;
8617 #endif
8618 #endif /* CONFIG_ATTR */
8619 #ifdef TARGET_NR_set_thread_area
8620 case TARGET_NR_set_thread_area:
8621 #if defined(TARGET_MIPS)
8622 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8623 ret = 0;
8624 break;
8625 #elif defined(TARGET_CRIS)
8626 if (arg1 & 0xff)
8627 ret = -TARGET_EINVAL;
8628 else {
8629 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8630 ret = 0;
8631 }
8632 break;
8633 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8634 ret = do_set_thread_area(cpu_env, arg1);
8635 break;
8636 #elif defined(TARGET_M68K)
8637 {
8638 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
8639 ts->tp_value = arg1;
8640 ret = 0;
8641 break;
8642 }
8643 #else
8644 goto unimplemented_nowarn;
8645 #endif
8646 #endif
8647 #ifdef TARGET_NR_get_thread_area
8648 case TARGET_NR_get_thread_area:
8649 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8650 ret = do_get_thread_area(cpu_env, arg1);
8651 break;
8652 #elif defined(TARGET_M68K)
8653 {
8654 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
8655 ret = ts->tp_value;
8656 break;
8657 }
8658 #else
8659 goto unimplemented_nowarn;
8660 #endif
8661 #endif
8662 #ifdef TARGET_NR_getdomainname
8663 case TARGET_NR_getdomainname:
8664 goto unimplemented_nowarn;
8665 #endif
8666
8667 #ifdef TARGET_NR_clock_gettime
8668 case TARGET_NR_clock_gettime:
8669 {
8670 struct timespec ts;
8671 ret = get_errno(clock_gettime(arg1, &ts));
8672 if (!is_error(ret)) {
8673 host_to_target_timespec(arg2, &ts);
8674 }
8675 break;
8676 }
8677 #endif
8678 #ifdef TARGET_NR_clock_getres
8679 case TARGET_NR_clock_getres:
8680 {
8681 struct timespec ts;
8682 ret = get_errno(clock_getres(arg1, &ts));
8683 if (!is_error(ret)) {
8684 host_to_target_timespec(arg2, &ts);
8685 }
8686 break;
8687 }
8688 #endif
8689 #ifdef TARGET_NR_clock_nanosleep
8690 case TARGET_NR_clock_nanosleep:
8691 {
8692 struct timespec ts;
8693 target_to_host_timespec(&ts, arg3);
8694 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8695 if (arg4)
8696 host_to_target_timespec(arg4, &ts);
8697 break;
8698 }
8699 #endif
8700
8701 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8702 case TARGET_NR_set_tid_address:
8703 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8704 break;
8705 #endif
8706
8707 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8708 case TARGET_NR_tkill:
8709 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8710 break;
8711 #endif
8712
8713 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8714 case TARGET_NR_tgkill:
8715 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8716 target_to_host_signal(arg3)));
8717 break;
8718 #endif
8719
8720 #ifdef TARGET_NR_set_robust_list
8721 case TARGET_NR_set_robust_list:
8722 case TARGET_NR_get_robust_list:
8723 /* The ABI for supporting robust futexes has userspace pass
8724 * the kernel a pointer to a linked list which is updated by
8725 * userspace after the syscall; the list is walked by the kernel
8726 * when the thread exits. Since the linked list in QEMU guest
8727 * memory isn't a valid linked list for the host and we have
8728 * no way to reliably intercept the thread-death event, we can't
8729 * support these. Silently return ENOSYS so that guest userspace
8730 * falls back to a non-robust futex implementation (which should
8731 * be OK except in the corner case of the guest crashing while
8732 * holding a mutex that is shared with another process via
8733 * shared memory).
8734 */
8735 goto unimplemented_nowarn;
8736 #endif
8737
8738 #if defined(TARGET_NR_utimensat)
8739 case TARGET_NR_utimensat:
8740 {
8741 struct timespec *tsp, ts[2];
8742 if (!arg3) {
8743 tsp = NULL;
8744 } else {
8745 target_to_host_timespec(ts, arg3);
8746 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8747 tsp = ts;
8748 }
8749 if (!arg2)
8750 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8751 else {
8752 if (!(p = lock_user_string(arg2))) {
8753 ret = -TARGET_EFAULT;
8754 goto fail;
8755 }
8756 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8757 unlock_user(p, arg2, 0);
8758 }
8759 }
8760 break;
8761 #endif
8762 case TARGET_NR_futex:
8763 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8764 break;
8765 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8766 case TARGET_NR_inotify_init:
8767 ret = get_errno(sys_inotify_init());
8768 break;
8769 #endif
8770 #ifdef CONFIG_INOTIFY1
8771 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8772 case TARGET_NR_inotify_init1:
8773 ret = get_errno(sys_inotify_init1(arg1));
8774 break;
8775 #endif
8776 #endif
8777 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8778 case TARGET_NR_inotify_add_watch:
8779 p = lock_user_string(arg2);
8780 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8781 unlock_user(p, arg2, 0);
8782 break;
8783 #endif
8784 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8785 case TARGET_NR_inotify_rm_watch:
8786 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8787 break;
8788 #endif
8789
8790 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8791 case TARGET_NR_mq_open:
8792 {
8793 struct mq_attr posix_mq_attr;
8794
8795 p = lock_user_string(arg1 - 1);
8796 if (arg4 != 0)
8797 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8798 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8799 unlock_user (p, arg1, 0);
8800 }
8801 break;
8802
8803 case TARGET_NR_mq_unlink:
8804 p = lock_user_string(arg1 - 1);
8805 ret = get_errno(mq_unlink(p));
8806 unlock_user (p, arg1, 0);
8807 break;
8808
8809 case TARGET_NR_mq_timedsend:
8810 {
8811 struct timespec ts;
8812
8813 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8814 if (arg5 != 0) {
8815 target_to_host_timespec(&ts, arg5);
8816 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8817 host_to_target_timespec(arg5, &ts);
8818 }
8819 else
8820 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8821 unlock_user (p, arg2, arg3);
8822 }
8823 break;
8824
8825 case TARGET_NR_mq_timedreceive:
8826 {
8827 struct timespec ts;
8828 unsigned int prio;
8829
8830 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8831 if (arg5 != 0) {
8832 target_to_host_timespec(&ts, arg5);
8833 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8834 host_to_target_timespec(arg5, &ts);
8835 }
8836 else
8837 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8838 unlock_user (p, arg2, arg3);
8839 if (arg4 != 0)
8840 put_user_u32(prio, arg4);
8841 }
8842 break;
8843
8844 /* Not implemented for now... */
8845 /* case TARGET_NR_mq_notify: */
8846 /* break; */
8847
8848 case TARGET_NR_mq_getsetattr:
8849 {
8850 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8851 ret = 0;
8852 if (arg3 != 0) {
8853 ret = mq_getattr(arg1, &posix_mq_attr_out);
8854 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8855 }
8856 if (arg2 != 0) {
8857 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8858 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8859 }
8860
8861 }
8862 break;
8863 #endif
8864
8865 #ifdef CONFIG_SPLICE
8866 #ifdef TARGET_NR_tee
8867 case TARGET_NR_tee:
8868 {
8869 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8870 }
8871 break;
8872 #endif
8873 #ifdef TARGET_NR_splice
8874 case TARGET_NR_splice:
8875 {
8876 loff_t loff_in, loff_out;
8877 loff_t *ploff_in = NULL, *ploff_out = NULL;
8878 if(arg2) {
8879 get_user_u64(loff_in, arg2);
8880 ploff_in = &loff_in;
8881 }
8882 if(arg4) {
8883 get_user_u64(loff_out, arg2);
8884 ploff_out = &loff_out;
8885 }
8886 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8887 }
8888 break;
8889 #endif
8890 #ifdef TARGET_NR_vmsplice
8891 case TARGET_NR_vmsplice:
8892 {
8893 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8894 if (vec != NULL) {
8895 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
8896 unlock_iovec(vec, arg2, arg3, 0);
8897 } else {
8898 ret = -host_to_target_errno(errno);
8899 }
8900 }
8901 break;
8902 #endif
8903 #endif /* CONFIG_SPLICE */
8904 #ifdef CONFIG_EVENTFD
8905 #if defined(TARGET_NR_eventfd)
8906 case TARGET_NR_eventfd:
8907 ret = get_errno(eventfd(arg1, 0));
8908 break;
8909 #endif
8910 #if defined(TARGET_NR_eventfd2)
8911 case TARGET_NR_eventfd2:
8912 {
8913 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
8914 if (arg2 & TARGET_O_NONBLOCK) {
8915 host_flags |= O_NONBLOCK;
8916 }
8917 if (arg2 & TARGET_O_CLOEXEC) {
8918 host_flags |= O_CLOEXEC;
8919 }
8920 ret = get_errno(eventfd(arg1, host_flags));
8921 break;
8922 }
8923 #endif
8924 #endif /* CONFIG_EVENTFD */
8925 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8926 case TARGET_NR_fallocate:
8927 #if TARGET_ABI_BITS == 32
8928 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8929 target_offset64(arg5, arg6)));
8930 #else
8931 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8932 #endif
8933 break;
8934 #endif
8935 #if defined(CONFIG_SYNC_FILE_RANGE)
8936 #if defined(TARGET_NR_sync_file_range)
8937 case TARGET_NR_sync_file_range:
8938 #if TARGET_ABI_BITS == 32
8939 #if defined(TARGET_MIPS)
8940 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8941 target_offset64(arg5, arg6), arg7));
8942 #else
8943 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8944 target_offset64(arg4, arg5), arg6));
8945 #endif /* !TARGET_MIPS */
8946 #else
8947 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8948 #endif
8949 break;
8950 #endif
8951 #if defined(TARGET_NR_sync_file_range2)
8952 case TARGET_NR_sync_file_range2:
8953 /* This is like sync_file_range but the arguments are reordered */
8954 #if TARGET_ABI_BITS == 32
8955 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8956 target_offset64(arg5, arg6), arg2));
8957 #else
8958 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8959 #endif
8960 break;
8961 #endif
8962 #endif
8963 #if defined(CONFIG_EPOLL)
8964 #if defined(TARGET_NR_epoll_create)
8965 case TARGET_NR_epoll_create:
8966 ret = get_errno(epoll_create(arg1));
8967 break;
8968 #endif
8969 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8970 case TARGET_NR_epoll_create1:
8971 ret = get_errno(epoll_create1(arg1));
8972 break;
8973 #endif
8974 #if defined(TARGET_NR_epoll_ctl)
8975 case TARGET_NR_epoll_ctl:
8976 {
8977 struct epoll_event ep;
8978 struct epoll_event *epp = 0;
8979 if (arg4) {
8980 struct target_epoll_event *target_ep;
8981 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8982 goto efault;
8983 }
8984 ep.events = tswap32(target_ep->events);
8985 /* The epoll_data_t union is just opaque data to the kernel,
8986 * so we transfer all 64 bits across and need not worry what
8987 * actual data type it is.
8988 */
8989 ep.data.u64 = tswap64(target_ep->data.u64);
8990 unlock_user_struct(target_ep, arg4, 0);
8991 epp = &ep;
8992 }
8993 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8994 break;
8995 }
8996 #endif
8997
8998 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8999 #define IMPLEMENT_EPOLL_PWAIT
9000 #endif
9001 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9002 #if defined(TARGET_NR_epoll_wait)
9003 case TARGET_NR_epoll_wait:
9004 #endif
9005 #if defined(IMPLEMENT_EPOLL_PWAIT)
9006 case TARGET_NR_epoll_pwait:
9007 #endif
9008 {
9009 struct target_epoll_event *target_ep;
9010 struct epoll_event *ep;
9011 int epfd = arg1;
9012 int maxevents = arg3;
9013 int timeout = arg4;
9014
9015 target_ep = lock_user(VERIFY_WRITE, arg2,
9016 maxevents * sizeof(struct target_epoll_event), 1);
9017 if (!target_ep) {
9018 goto efault;
9019 }
9020
9021 ep = alloca(maxevents * sizeof(struct epoll_event));
9022
9023 switch (num) {
9024 #if defined(IMPLEMENT_EPOLL_PWAIT)
9025 case TARGET_NR_epoll_pwait:
9026 {
9027 target_sigset_t *target_set;
9028 sigset_t _set, *set = &_set;
9029
9030 if (arg5) {
9031 target_set = lock_user(VERIFY_READ, arg5,
9032 sizeof(target_sigset_t), 1);
9033 if (!target_set) {
9034 unlock_user(target_ep, arg2, 0);
9035 goto efault;
9036 }
9037 target_to_host_sigset(set, target_set);
9038 unlock_user(target_set, arg5, 0);
9039 } else {
9040 set = NULL;
9041 }
9042
9043 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9044 break;
9045 }
9046 #endif
9047 #if defined(TARGET_NR_epoll_wait)
9048 case TARGET_NR_epoll_wait:
9049 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9050 break;
9051 #endif
9052 default:
9053 ret = -TARGET_ENOSYS;
9054 }
9055 if (!is_error(ret)) {
9056 int i;
9057 for (i = 0; i < ret; i++) {
9058 target_ep[i].events = tswap32(ep[i].events);
9059 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9060 }
9061 }
9062 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9063 break;
9064 }
9065 #endif
9066 #endif
9067 #ifdef TARGET_NR_prlimit64
9068 case TARGET_NR_prlimit64:
9069 {
9070 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9071 struct target_rlimit64 *target_rnew, *target_rold;
9072 struct host_rlimit64 rnew, rold, *rnewp = 0;
9073 if (arg3) {
9074 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9075 goto efault;
9076 }
9077 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9078 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9079 unlock_user_struct(target_rnew, arg3, 0);
9080 rnewp = &rnew;
9081 }
9082
9083 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
9084 if (!is_error(ret) && arg4) {
9085 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
9086 goto efault;
9087 }
9088 target_rold->rlim_cur = tswap64(rold.rlim_cur);
9089 target_rold->rlim_max = tswap64(rold.rlim_max);
9090 unlock_user_struct(target_rold, arg4, 1);
9091 }
9092 break;
9093 }
9094 #endif
9095 #ifdef TARGET_NR_gethostname
9096 case TARGET_NR_gethostname:
9097 {
9098 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9099 if (name) {
9100 ret = get_errno(gethostname(name, arg2));
9101 unlock_user(name, arg1, arg2);
9102 } else {
9103 ret = -TARGET_EFAULT;
9104 }
9105 break;
9106 }
9107 #endif
9108 default:
9109 unimplemented:
9110 gemu_log("qemu: Unsupported syscall: %d\n", num);
9111 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9112 unimplemented_nowarn:
9113 #endif
9114 ret = -TARGET_ENOSYS;
9115 break;
9116 }
9117 fail:
9118 #ifdef DEBUG
9119 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
9120 #endif
9121 if(do_strace)
9122 print_syscall_ret(num, ret);
9123 return ret;
9124 efault:
9125 ret = -TARGET_EFAULT;
9126 goto fail;
9127 }