]> git.proxmox.com Git - qemu.git/blob - linux-user/syscall.c
linux-user: Add missing 'break' in i386 get_thread_area syscall
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <signal.h>
47 #include <sched.h>
48 #ifdef __ia64__
49 int __clone2(int (*fn)(void *), void *child_stack_base,
50 size_t stack_size, int flags, void *arg, ...);
51 #endif
52 #include <sys/socket.h>
53 #include <sys/un.h>
54 #include <sys/uio.h>
55 #include <sys/poll.h>
56 #include <sys/times.h>
57 #include <sys/shm.h>
58 #include <sys/sem.h>
59 #include <sys/statfs.h>
60 #include <utime.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef TARGET_GPROF
70 #include <sys/gmon.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
84
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
91
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/utsname.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #if defined(CONFIG_FIEMAP)
102 #include <linux/fiemap.h>
103 #endif
104 #include <linux/fb.h>
105 #include <linux/vt.h>
106 #include <linux/dm-ioctl.h>
107 #include <linux/reboot.h>
108 #include <linux/route.h>
109 #include "linux_loop.h"
110 #include "cpu-uname.h"
111
112 #include "qemu.h"
113
114 #if defined(CONFIG_USE_NPTL)
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
117 #else
118 /* XXX: Hardcode the above values. */
119 #define CLONE_NPTL_FLAGS2 0
120 #endif
121
122 //#define DEBUG
123
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
127
128
129 #undef _syscall0
130 #undef _syscall1
131 #undef _syscall2
132 #undef _syscall3
133 #undef _syscall4
134 #undef _syscall5
135 #undef _syscall6
136
137 #define _syscall0(type,name) \
138 static type name (void) \
139 { \
140 return syscall(__NR_##name); \
141 }
142
143 #define _syscall1(type,name,type1,arg1) \
144 static type name (type1 arg1) \
145 { \
146 return syscall(__NR_##name, arg1); \
147 }
148
149 #define _syscall2(type,name,type1,arg1,type2,arg2) \
150 static type name (type1 arg1,type2 arg2) \
151 { \
152 return syscall(__NR_##name, arg1, arg2); \
153 }
154
155 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
156 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 { \
158 return syscall(__NR_##name, arg1, arg2, arg3); \
159 }
160
161 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
162 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 { \
164 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
165 }
166
167 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
168 type5,arg5) \
169 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 { \
171 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
172 }
173
174
175 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
176 type5,arg5,type6,arg6) \
177 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
178 type6 arg6) \
179 { \
180 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
181 }
182
183
184 #define __NR_sys_uname __NR_uname
185 #define __NR_sys_getcwd1 __NR_getcwd
186 #define __NR_sys_getdents __NR_getdents
187 #define __NR_sys_getdents64 __NR_getdents64
188 #define __NR_sys_getpriority __NR_getpriority
189 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_futex __NR_futex
194 #define __NR_sys_inotify_init __NR_inotify_init
195 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
196 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
197
198 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
199 defined(__s390x__)
200 #define __NR__llseek __NR_lseek
201 #endif
202
203 #ifdef __NR_gettid
204 _syscall0(int, gettid)
205 #else
206 /* This is a replacement for the host gettid() and must return a host
207 errno. */
208 static int gettid(void) {
209 return -ENOSYS;
210 }
211 #endif
212 #ifdef __NR_getdents
213 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
214 #endif
215 #if !defined(__NR_getdents) || \
216 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
217 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
218 #endif
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
221 loff_t *, res, uint, wh);
222 #endif
223 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
224 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
225 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
226 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
227 #endif
228 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
229 _syscall2(int,sys_tkill,int,tid,int,sig)
230 #endif
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group,int,error_code)
233 #endif
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address,int *,tidptr)
236 #endif
237 #if defined(CONFIG_USE_NPTL)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
240 const struct timespec *,timeout,int *,uaddr2,int,val3)
241 #endif
242 #endif
243 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
244 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
245 unsigned long *, user_mask_ptr);
246 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
247 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
249 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
250 void *, arg);
251
252 static bitmask_transtbl fcntl_flags_tbl[] = {
253 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
254 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
255 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
256 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
257 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
258 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
259 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
260 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
261 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
262 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
263 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
264 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
265 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
266 #if defined(O_DIRECT)
267 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
268 #endif
269 #if defined(O_NOATIME)
270 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
271 #endif
272 #if defined(O_CLOEXEC)
273 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
274 #endif
275 #if defined(O_PATH)
276 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
277 #endif
278 /* Don't terminate the list prematurely on 64-bit host+guest. */
279 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
280 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
281 #endif
282 { 0, 0, 0, 0 }
283 };
284
285 #define COPY_UTSNAME_FIELD(dest, src) \
286 do { \
287 /* __NEW_UTS_LEN doesn't include terminating null */ \
288 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
289 (dest)[__NEW_UTS_LEN] = '\0'; \
290 } while (0)
291
292 static int sys_uname(struct new_utsname *buf)
293 {
294 struct utsname uts_buf;
295
296 if (uname(&uts_buf) < 0)
297 return (-1);
298
299 /*
300 * Just in case these have some differences, we
301 * translate utsname to new_utsname (which is the
302 * struct linux kernel uses).
303 */
304
305 memset(buf, 0, sizeof(*buf));
306 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
307 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
308 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
309 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
310 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
311 #ifdef _GNU_SOURCE
312 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
313 #endif
314 return (0);
315
316 #undef COPY_UTSNAME_FIELD
317 }
318
319 static int sys_getcwd1(char *buf, size_t size)
320 {
321 if (getcwd(buf, size) == NULL) {
322 /* getcwd() sets errno */
323 return (-1);
324 }
325 return strlen(buf)+1;
326 }
327
328 #ifdef TARGET_NR_openat
329 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
330 {
331 /*
332 * open(2) has extra parameter 'mode' when called with
333 * flag O_CREAT.
334 */
335 if ((flags & O_CREAT) != 0) {
336 return (openat(dirfd, pathname, flags, mode));
337 }
338 return (openat(dirfd, pathname, flags));
339 }
340 #endif
341
342 #ifdef TARGET_NR_utimensat
343 #ifdef CONFIG_UTIMENSAT
344 static int sys_utimensat(int dirfd, const char *pathname,
345 const struct timespec times[2], int flags)
346 {
347 if (pathname == NULL)
348 return futimens(dirfd, times);
349 else
350 return utimensat(dirfd, pathname, times, flags);
351 }
352 #elif defined(__NR_utimensat)
353 #define __NR_sys_utimensat __NR_utimensat
354 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
355 const struct timespec *,tsp,int,flags)
356 #else
357 static int sys_utimensat(int dirfd, const char *pathname,
358 const struct timespec times[2], int flags)
359 {
360 errno = ENOSYS;
361 return -1;
362 }
363 #endif
364 #endif /* TARGET_NR_utimensat */
365
366 #ifdef CONFIG_INOTIFY
367 #include <sys/inotify.h>
368
369 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
370 static int sys_inotify_init(void)
371 {
372 return (inotify_init());
373 }
374 #endif
375 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
376 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
377 {
378 return (inotify_add_watch(fd, pathname, mask));
379 }
380 #endif
381 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
382 static int sys_inotify_rm_watch(int fd, int32_t wd)
383 {
384 return (inotify_rm_watch(fd, wd));
385 }
386 #endif
387 #ifdef CONFIG_INOTIFY1
388 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
389 static int sys_inotify_init1(int flags)
390 {
391 return (inotify_init1(flags));
392 }
393 #endif
394 #endif
395 #else
396 /* Userspace can usually survive runtime without inotify */
397 #undef TARGET_NR_inotify_init
398 #undef TARGET_NR_inotify_init1
399 #undef TARGET_NR_inotify_add_watch
400 #undef TARGET_NR_inotify_rm_watch
401 #endif /* CONFIG_INOTIFY */
402
403 #if defined(TARGET_NR_ppoll)
404 #ifndef __NR_ppoll
405 # define __NR_ppoll -1
406 #endif
407 #define __NR_sys_ppoll __NR_ppoll
408 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
409 struct timespec *, timeout, const __sigset_t *, sigmask,
410 size_t, sigsetsize)
411 #endif
412
413 #if defined(TARGET_NR_pselect6)
414 #ifndef __NR_pselect6
415 # define __NR_pselect6 -1
416 #endif
417 #define __NR_sys_pselect6 __NR_pselect6
418 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
419 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
420 #endif
421
422 #if defined(TARGET_NR_prlimit64)
423 #ifndef __NR_prlimit64
424 # define __NR_prlimit64 -1
425 #endif
426 #define __NR_sys_prlimit64 __NR_prlimit64
427 /* The glibc rlimit structure may not be that used by the underlying syscall */
428 struct host_rlimit64 {
429 uint64_t rlim_cur;
430 uint64_t rlim_max;
431 };
432 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
433 const struct host_rlimit64 *, new_limit,
434 struct host_rlimit64 *, old_limit)
435 #endif
436
437 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
438 #ifdef TARGET_ARM
439 static inline int regpairs_aligned(void *cpu_env) {
440 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
441 }
442 #elif defined(TARGET_MIPS)
443 static inline int regpairs_aligned(void *cpu_env) { return 1; }
444 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
445 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
446 * of registers which translates to the same as ARM/MIPS, because we start with
447 * r3 as arg1 */
448 static inline int regpairs_aligned(void *cpu_env) { return 1; }
449 #else
450 static inline int regpairs_aligned(void *cpu_env) { return 0; }
451 #endif
452
453 #define ERRNO_TABLE_SIZE 1200
454
455 /* target_to_host_errno_table[] is initialized from
456 * host_to_target_errno_table[] in syscall_init(). */
457 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
458 };
459
460 /*
461 * This list is the union of errno values overridden in asm-<arch>/errno.h
462 * minus the errnos that are not actually generic to all archs.
463 */
464 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
465 [EIDRM] = TARGET_EIDRM,
466 [ECHRNG] = TARGET_ECHRNG,
467 [EL2NSYNC] = TARGET_EL2NSYNC,
468 [EL3HLT] = TARGET_EL3HLT,
469 [EL3RST] = TARGET_EL3RST,
470 [ELNRNG] = TARGET_ELNRNG,
471 [EUNATCH] = TARGET_EUNATCH,
472 [ENOCSI] = TARGET_ENOCSI,
473 [EL2HLT] = TARGET_EL2HLT,
474 [EDEADLK] = TARGET_EDEADLK,
475 [ENOLCK] = TARGET_ENOLCK,
476 [EBADE] = TARGET_EBADE,
477 [EBADR] = TARGET_EBADR,
478 [EXFULL] = TARGET_EXFULL,
479 [ENOANO] = TARGET_ENOANO,
480 [EBADRQC] = TARGET_EBADRQC,
481 [EBADSLT] = TARGET_EBADSLT,
482 [EBFONT] = TARGET_EBFONT,
483 [ENOSTR] = TARGET_ENOSTR,
484 [ENODATA] = TARGET_ENODATA,
485 [ETIME] = TARGET_ETIME,
486 [ENOSR] = TARGET_ENOSR,
487 [ENONET] = TARGET_ENONET,
488 [ENOPKG] = TARGET_ENOPKG,
489 [EREMOTE] = TARGET_EREMOTE,
490 [ENOLINK] = TARGET_ENOLINK,
491 [EADV] = TARGET_EADV,
492 [ESRMNT] = TARGET_ESRMNT,
493 [ECOMM] = TARGET_ECOMM,
494 [EPROTO] = TARGET_EPROTO,
495 [EDOTDOT] = TARGET_EDOTDOT,
496 [EMULTIHOP] = TARGET_EMULTIHOP,
497 [EBADMSG] = TARGET_EBADMSG,
498 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
499 [EOVERFLOW] = TARGET_EOVERFLOW,
500 [ENOTUNIQ] = TARGET_ENOTUNIQ,
501 [EBADFD] = TARGET_EBADFD,
502 [EREMCHG] = TARGET_EREMCHG,
503 [ELIBACC] = TARGET_ELIBACC,
504 [ELIBBAD] = TARGET_ELIBBAD,
505 [ELIBSCN] = TARGET_ELIBSCN,
506 [ELIBMAX] = TARGET_ELIBMAX,
507 [ELIBEXEC] = TARGET_ELIBEXEC,
508 [EILSEQ] = TARGET_EILSEQ,
509 [ENOSYS] = TARGET_ENOSYS,
510 [ELOOP] = TARGET_ELOOP,
511 [ERESTART] = TARGET_ERESTART,
512 [ESTRPIPE] = TARGET_ESTRPIPE,
513 [ENOTEMPTY] = TARGET_ENOTEMPTY,
514 [EUSERS] = TARGET_EUSERS,
515 [ENOTSOCK] = TARGET_ENOTSOCK,
516 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
517 [EMSGSIZE] = TARGET_EMSGSIZE,
518 [EPROTOTYPE] = TARGET_EPROTOTYPE,
519 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
520 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
521 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
522 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
523 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
524 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
525 [EADDRINUSE] = TARGET_EADDRINUSE,
526 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
527 [ENETDOWN] = TARGET_ENETDOWN,
528 [ENETUNREACH] = TARGET_ENETUNREACH,
529 [ENETRESET] = TARGET_ENETRESET,
530 [ECONNABORTED] = TARGET_ECONNABORTED,
531 [ECONNRESET] = TARGET_ECONNRESET,
532 [ENOBUFS] = TARGET_ENOBUFS,
533 [EISCONN] = TARGET_EISCONN,
534 [ENOTCONN] = TARGET_ENOTCONN,
535 [EUCLEAN] = TARGET_EUCLEAN,
536 [ENOTNAM] = TARGET_ENOTNAM,
537 [ENAVAIL] = TARGET_ENAVAIL,
538 [EISNAM] = TARGET_EISNAM,
539 [EREMOTEIO] = TARGET_EREMOTEIO,
540 [ESHUTDOWN] = TARGET_ESHUTDOWN,
541 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
542 [ETIMEDOUT] = TARGET_ETIMEDOUT,
543 [ECONNREFUSED] = TARGET_ECONNREFUSED,
544 [EHOSTDOWN] = TARGET_EHOSTDOWN,
545 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
546 [EALREADY] = TARGET_EALREADY,
547 [EINPROGRESS] = TARGET_EINPROGRESS,
548 [ESTALE] = TARGET_ESTALE,
549 [ECANCELED] = TARGET_ECANCELED,
550 [ENOMEDIUM] = TARGET_ENOMEDIUM,
551 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
552 #ifdef ENOKEY
553 [ENOKEY] = TARGET_ENOKEY,
554 #endif
555 #ifdef EKEYEXPIRED
556 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
557 #endif
558 #ifdef EKEYREVOKED
559 [EKEYREVOKED] = TARGET_EKEYREVOKED,
560 #endif
561 #ifdef EKEYREJECTED
562 [EKEYREJECTED] = TARGET_EKEYREJECTED,
563 #endif
564 #ifdef EOWNERDEAD
565 [EOWNERDEAD] = TARGET_EOWNERDEAD,
566 #endif
567 #ifdef ENOTRECOVERABLE
568 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
569 #endif
570 };
571
572 static inline int host_to_target_errno(int err)
573 {
574 if(host_to_target_errno_table[err])
575 return host_to_target_errno_table[err];
576 return err;
577 }
578
579 static inline int target_to_host_errno(int err)
580 {
581 if (target_to_host_errno_table[err])
582 return target_to_host_errno_table[err];
583 return err;
584 }
585
586 static inline abi_long get_errno(abi_long ret)
587 {
588 if (ret == -1)
589 return -host_to_target_errno(errno);
590 else
591 return ret;
592 }
593
594 static inline int is_error(abi_long ret)
595 {
596 return (abi_ulong)ret >= (abi_ulong)(-4096);
597 }
598
599 char *target_strerror(int err)
600 {
601 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
602 return NULL;
603 }
604 return strerror(target_to_host_errno(err));
605 }
606
607 static abi_ulong target_brk;
608 static abi_ulong target_original_brk;
609 static abi_ulong brk_page;
610
611 void target_set_brk(abi_ulong new_brk)
612 {
613 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
614 brk_page = HOST_PAGE_ALIGN(target_brk);
615 }
616
617 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
618 #define DEBUGF_BRK(message, args...)
619
620 /* do_brk() must return target values and target errnos. */
621 abi_long do_brk(abi_ulong new_brk)
622 {
623 abi_long mapped_addr;
624 int new_alloc_size;
625
626 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
627
628 if (!new_brk) {
629 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
630 return target_brk;
631 }
632 if (new_brk < target_original_brk) {
633 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
634 target_brk);
635 return target_brk;
636 }
637
638 /* If the new brk is less than the highest page reserved to the
639 * target heap allocation, set it and we're almost done... */
640 if (new_brk <= brk_page) {
641 /* Heap contents are initialized to zero, as for anonymous
642 * mapped pages. */
643 if (new_brk > target_brk) {
644 memset(g2h(target_brk), 0, new_brk - target_brk);
645 }
646 target_brk = new_brk;
647 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
648 return target_brk;
649 }
650
651 /* We need to allocate more memory after the brk... Note that
652 * we don't use MAP_FIXED because that will map over the top of
653 * any existing mapping (like the one with the host libc or qemu
654 * itself); instead we treat "mapped but at wrong address" as
655 * a failure and unmap again.
656 */
657 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
658 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
659 PROT_READ|PROT_WRITE,
660 MAP_ANON|MAP_PRIVATE, 0, 0));
661
662 if (mapped_addr == brk_page) {
663 /* Heap contents are initialized to zero, as for anonymous
664 * mapped pages. Technically the new pages are already
665 * initialized to zero since they *are* anonymous mapped
666 * pages, however we have to take care with the contents that
667 * come from the remaining part of the previous page: it may
668 * contains garbage data due to a previous heap usage (grown
669 * then shrunken). */
670 memset(g2h(target_brk), 0, brk_page - target_brk);
671
672 target_brk = new_brk;
673 brk_page = HOST_PAGE_ALIGN(target_brk);
674 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
675 target_brk);
676 return target_brk;
677 } else if (mapped_addr != -1) {
678 /* Mapped but at wrong address, meaning there wasn't actually
679 * enough space for this brk.
680 */
681 target_munmap(mapped_addr, new_alloc_size);
682 mapped_addr = -1;
683 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
684 }
685 else {
686 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
687 }
688
689 #if defined(TARGET_ALPHA)
690 /* We (partially) emulate OSF/1 on Alpha, which requires we
691 return a proper errno, not an unchanged brk value. */
692 return -TARGET_ENOMEM;
693 #endif
694 /* For everything else, return the previous break. */
695 return target_brk;
696 }
697
698 static inline abi_long copy_from_user_fdset(fd_set *fds,
699 abi_ulong target_fds_addr,
700 int n)
701 {
702 int i, nw, j, k;
703 abi_ulong b, *target_fds;
704
705 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
706 if (!(target_fds = lock_user(VERIFY_READ,
707 target_fds_addr,
708 sizeof(abi_ulong) * nw,
709 1)))
710 return -TARGET_EFAULT;
711
712 FD_ZERO(fds);
713 k = 0;
714 for (i = 0; i < nw; i++) {
715 /* grab the abi_ulong */
716 __get_user(b, &target_fds[i]);
717 for (j = 0; j < TARGET_ABI_BITS; j++) {
718 /* check the bit inside the abi_ulong */
719 if ((b >> j) & 1)
720 FD_SET(k, fds);
721 k++;
722 }
723 }
724
725 unlock_user(target_fds, target_fds_addr, 0);
726
727 return 0;
728 }
729
730 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
731 abi_ulong target_fds_addr,
732 int n)
733 {
734 if (target_fds_addr) {
735 if (copy_from_user_fdset(fds, target_fds_addr, n))
736 return -TARGET_EFAULT;
737 *fds_ptr = fds;
738 } else {
739 *fds_ptr = NULL;
740 }
741 return 0;
742 }
743
744 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
745 const fd_set *fds,
746 int n)
747 {
748 int i, nw, j, k;
749 abi_long v;
750 abi_ulong *target_fds;
751
752 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
753 if (!(target_fds = lock_user(VERIFY_WRITE,
754 target_fds_addr,
755 sizeof(abi_ulong) * nw,
756 0)))
757 return -TARGET_EFAULT;
758
759 k = 0;
760 for (i = 0; i < nw; i++) {
761 v = 0;
762 for (j = 0; j < TARGET_ABI_BITS; j++) {
763 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
764 k++;
765 }
766 __put_user(v, &target_fds[i]);
767 }
768
769 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
770
771 return 0;
772 }
773
774 #if defined(__alpha__)
775 #define HOST_HZ 1024
776 #else
777 #define HOST_HZ 100
778 #endif
779
780 static inline abi_long host_to_target_clock_t(long ticks)
781 {
782 #if HOST_HZ == TARGET_HZ
783 return ticks;
784 #else
785 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
786 #endif
787 }
788
789 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
790 const struct rusage *rusage)
791 {
792 struct target_rusage *target_rusage;
793
794 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
795 return -TARGET_EFAULT;
796 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
797 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
798 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
799 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
800 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
801 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
802 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
803 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
804 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
805 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
806 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
807 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
808 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
809 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
810 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
811 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
812 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
813 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
814 unlock_user_struct(target_rusage, target_addr, 1);
815
816 return 0;
817 }
818
819 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
820 {
821 abi_ulong target_rlim_swap;
822 rlim_t result;
823
824 target_rlim_swap = tswapal(target_rlim);
825 if (target_rlim_swap == TARGET_RLIM_INFINITY)
826 return RLIM_INFINITY;
827
828 result = target_rlim_swap;
829 if (target_rlim_swap != (rlim_t)result)
830 return RLIM_INFINITY;
831
832 return result;
833 }
834
835 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
836 {
837 abi_ulong target_rlim_swap;
838 abi_ulong result;
839
840 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
841 target_rlim_swap = TARGET_RLIM_INFINITY;
842 else
843 target_rlim_swap = rlim;
844 result = tswapal(target_rlim_swap);
845
846 return result;
847 }
848
849 static inline int target_to_host_resource(int code)
850 {
851 switch (code) {
852 case TARGET_RLIMIT_AS:
853 return RLIMIT_AS;
854 case TARGET_RLIMIT_CORE:
855 return RLIMIT_CORE;
856 case TARGET_RLIMIT_CPU:
857 return RLIMIT_CPU;
858 case TARGET_RLIMIT_DATA:
859 return RLIMIT_DATA;
860 case TARGET_RLIMIT_FSIZE:
861 return RLIMIT_FSIZE;
862 case TARGET_RLIMIT_LOCKS:
863 return RLIMIT_LOCKS;
864 case TARGET_RLIMIT_MEMLOCK:
865 return RLIMIT_MEMLOCK;
866 case TARGET_RLIMIT_MSGQUEUE:
867 return RLIMIT_MSGQUEUE;
868 case TARGET_RLIMIT_NICE:
869 return RLIMIT_NICE;
870 case TARGET_RLIMIT_NOFILE:
871 return RLIMIT_NOFILE;
872 case TARGET_RLIMIT_NPROC:
873 return RLIMIT_NPROC;
874 case TARGET_RLIMIT_RSS:
875 return RLIMIT_RSS;
876 case TARGET_RLIMIT_RTPRIO:
877 return RLIMIT_RTPRIO;
878 case TARGET_RLIMIT_SIGPENDING:
879 return RLIMIT_SIGPENDING;
880 case TARGET_RLIMIT_STACK:
881 return RLIMIT_STACK;
882 default:
883 return code;
884 }
885 }
886
887 static inline abi_long copy_from_user_timeval(struct timeval *tv,
888 abi_ulong target_tv_addr)
889 {
890 struct target_timeval *target_tv;
891
892 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
893 return -TARGET_EFAULT;
894
895 __get_user(tv->tv_sec, &target_tv->tv_sec);
896 __get_user(tv->tv_usec, &target_tv->tv_usec);
897
898 unlock_user_struct(target_tv, target_tv_addr, 0);
899
900 return 0;
901 }
902
903 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
904 const struct timeval *tv)
905 {
906 struct target_timeval *target_tv;
907
908 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
909 return -TARGET_EFAULT;
910
911 __put_user(tv->tv_sec, &target_tv->tv_sec);
912 __put_user(tv->tv_usec, &target_tv->tv_usec);
913
914 unlock_user_struct(target_tv, target_tv_addr, 1);
915
916 return 0;
917 }
918
919 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
920 #include <mqueue.h>
921
922 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
923 abi_ulong target_mq_attr_addr)
924 {
925 struct target_mq_attr *target_mq_attr;
926
927 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
928 target_mq_attr_addr, 1))
929 return -TARGET_EFAULT;
930
931 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
932 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
933 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
934 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
935
936 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
937
938 return 0;
939 }
940
941 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
942 const struct mq_attr *attr)
943 {
944 struct target_mq_attr *target_mq_attr;
945
946 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
947 target_mq_attr_addr, 0))
948 return -TARGET_EFAULT;
949
950 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
951 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
952 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
953 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
954
955 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
956
957 return 0;
958 }
959 #endif
960
961 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
962 /* do_select() must return target values and target errnos. */
963 static abi_long do_select(int n,
964 abi_ulong rfd_addr, abi_ulong wfd_addr,
965 abi_ulong efd_addr, abi_ulong target_tv_addr)
966 {
967 fd_set rfds, wfds, efds;
968 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
969 struct timeval tv, *tv_ptr;
970 abi_long ret;
971
972 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
973 if (ret) {
974 return ret;
975 }
976 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
977 if (ret) {
978 return ret;
979 }
980 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
981 if (ret) {
982 return ret;
983 }
984
985 if (target_tv_addr) {
986 if (copy_from_user_timeval(&tv, target_tv_addr))
987 return -TARGET_EFAULT;
988 tv_ptr = &tv;
989 } else {
990 tv_ptr = NULL;
991 }
992
993 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
994
995 if (!is_error(ret)) {
996 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
997 return -TARGET_EFAULT;
998 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
999 return -TARGET_EFAULT;
1000 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1001 return -TARGET_EFAULT;
1002
1003 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1004 return -TARGET_EFAULT;
1005 }
1006
1007 return ret;
1008 }
1009 #endif
1010
1011 static abi_long do_pipe2(int host_pipe[], int flags)
1012 {
1013 #ifdef CONFIG_PIPE2
1014 return pipe2(host_pipe, flags);
1015 #else
1016 return -ENOSYS;
1017 #endif
1018 }
1019
1020 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1021 int flags, int is_pipe2)
1022 {
1023 int host_pipe[2];
1024 abi_long ret;
1025 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1026
1027 if (is_error(ret))
1028 return get_errno(ret);
1029
1030 /* Several targets have special calling conventions for the original
1031 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1032 if (!is_pipe2) {
1033 #if defined(TARGET_ALPHA)
1034 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1035 return host_pipe[0];
1036 #elif defined(TARGET_MIPS)
1037 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1038 return host_pipe[0];
1039 #elif defined(TARGET_SH4)
1040 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1041 return host_pipe[0];
1042 #endif
1043 }
1044
1045 if (put_user_s32(host_pipe[0], pipedes)
1046 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1047 return -TARGET_EFAULT;
1048 return get_errno(ret);
1049 }
1050
1051 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1052 abi_ulong target_addr,
1053 socklen_t len)
1054 {
1055 struct target_ip_mreqn *target_smreqn;
1056
1057 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1058 if (!target_smreqn)
1059 return -TARGET_EFAULT;
1060 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1061 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1062 if (len == sizeof(struct target_ip_mreqn))
1063 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1064 unlock_user(target_smreqn, target_addr, 0);
1065
1066 return 0;
1067 }
1068
1069 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1070 abi_ulong target_addr,
1071 socklen_t len)
1072 {
1073 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1074 sa_family_t sa_family;
1075 struct target_sockaddr *target_saddr;
1076
1077 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1078 if (!target_saddr)
1079 return -TARGET_EFAULT;
1080
1081 sa_family = tswap16(target_saddr->sa_family);
1082
1083 /* Oops. The caller might send a incomplete sun_path; sun_path
1084 * must be terminated by \0 (see the manual page), but
1085 * unfortunately it is quite common to specify sockaddr_un
1086 * length as "strlen(x->sun_path)" while it should be
1087 * "strlen(...) + 1". We'll fix that here if needed.
1088 * Linux kernel has a similar feature.
1089 */
1090
1091 if (sa_family == AF_UNIX) {
1092 if (len < unix_maxlen && len > 0) {
1093 char *cp = (char*)target_saddr;
1094
1095 if ( cp[len-1] && !cp[len] )
1096 len++;
1097 }
1098 if (len > unix_maxlen)
1099 len = unix_maxlen;
1100 }
1101
1102 memcpy(addr, target_saddr, len);
1103 addr->sa_family = sa_family;
1104 unlock_user(target_saddr, target_addr, 0);
1105
1106 return 0;
1107 }
1108
1109 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1110 struct sockaddr *addr,
1111 socklen_t len)
1112 {
1113 struct target_sockaddr *target_saddr;
1114
1115 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1116 if (!target_saddr)
1117 return -TARGET_EFAULT;
1118 memcpy(target_saddr, addr, len);
1119 target_saddr->sa_family = tswap16(addr->sa_family);
1120 unlock_user(target_saddr, target_addr, len);
1121
1122 return 0;
1123 }
1124
1125 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1126 struct target_msghdr *target_msgh)
1127 {
1128 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1129 abi_long msg_controllen;
1130 abi_ulong target_cmsg_addr;
1131 struct target_cmsghdr *target_cmsg;
1132 socklen_t space = 0;
1133
1134 msg_controllen = tswapal(target_msgh->msg_controllen);
1135 if (msg_controllen < sizeof (struct target_cmsghdr))
1136 goto the_end;
1137 target_cmsg_addr = tswapal(target_msgh->msg_control);
1138 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1139 if (!target_cmsg)
1140 return -TARGET_EFAULT;
1141
1142 while (cmsg && target_cmsg) {
1143 void *data = CMSG_DATA(cmsg);
1144 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1145
1146 int len = tswapal(target_cmsg->cmsg_len)
1147 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1148
1149 space += CMSG_SPACE(len);
1150 if (space > msgh->msg_controllen) {
1151 space -= CMSG_SPACE(len);
1152 gemu_log("Host cmsg overflow\n");
1153 break;
1154 }
1155
1156 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1157 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1158 cmsg->cmsg_len = CMSG_LEN(len);
1159
1160 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1161 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1162 memcpy(data, target_data, len);
1163 } else {
1164 int *fd = (int *)data;
1165 int *target_fd = (int *)target_data;
1166 int i, numfds = len / sizeof(int);
1167
1168 for (i = 0; i < numfds; i++)
1169 fd[i] = tswap32(target_fd[i]);
1170 }
1171
1172 cmsg = CMSG_NXTHDR(msgh, cmsg);
1173 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1174 }
1175 unlock_user(target_cmsg, target_cmsg_addr, 0);
1176 the_end:
1177 msgh->msg_controllen = space;
1178 return 0;
1179 }
1180
1181 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1182 struct msghdr *msgh)
1183 {
1184 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1185 abi_long msg_controllen;
1186 abi_ulong target_cmsg_addr;
1187 struct target_cmsghdr *target_cmsg;
1188 socklen_t space = 0;
1189
1190 msg_controllen = tswapal(target_msgh->msg_controllen);
1191 if (msg_controllen < sizeof (struct target_cmsghdr))
1192 goto the_end;
1193 target_cmsg_addr = tswapal(target_msgh->msg_control);
1194 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1195 if (!target_cmsg)
1196 return -TARGET_EFAULT;
1197
1198 while (cmsg && target_cmsg) {
1199 void *data = CMSG_DATA(cmsg);
1200 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1201
1202 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1203
1204 space += TARGET_CMSG_SPACE(len);
1205 if (space > msg_controllen) {
1206 space -= TARGET_CMSG_SPACE(len);
1207 gemu_log("Target cmsg overflow\n");
1208 break;
1209 }
1210
1211 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1212 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1213 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1214
1215 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1216 (cmsg->cmsg_type == SCM_RIGHTS)) {
1217 int *fd = (int *)data;
1218 int *target_fd = (int *)target_data;
1219 int i, numfds = len / sizeof(int);
1220
1221 for (i = 0; i < numfds; i++)
1222 target_fd[i] = tswap32(fd[i]);
1223 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1224 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1225 (len == sizeof(struct timeval))) {
1226 /* copy struct timeval to target */
1227 struct timeval *tv = (struct timeval *)data;
1228 struct target_timeval *target_tv =
1229 (struct target_timeval *)target_data;
1230
1231 target_tv->tv_sec = tswapal(tv->tv_sec);
1232 target_tv->tv_usec = tswapal(tv->tv_usec);
1233 } else {
1234 gemu_log("Unsupported ancillary data: %d/%d\n",
1235 cmsg->cmsg_level, cmsg->cmsg_type);
1236 memcpy(target_data, data, len);
1237 }
1238
1239 cmsg = CMSG_NXTHDR(msgh, cmsg);
1240 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1241 }
1242 unlock_user(target_cmsg, target_cmsg_addr, space);
1243 the_end:
1244 target_msgh->msg_controllen = tswapal(space);
1245 return 0;
1246 }
1247
1248 /* do_setsockopt() Must return target values and target errnos. */
1249 static abi_long do_setsockopt(int sockfd, int level, int optname,
1250 abi_ulong optval_addr, socklen_t optlen)
1251 {
1252 abi_long ret;
1253 int val;
1254 struct ip_mreqn *ip_mreq;
1255 struct ip_mreq_source *ip_mreq_source;
1256
1257 switch(level) {
1258 case SOL_TCP:
1259 /* TCP options all take an 'int' value. */
1260 if (optlen < sizeof(uint32_t))
1261 return -TARGET_EINVAL;
1262
1263 if (get_user_u32(val, optval_addr))
1264 return -TARGET_EFAULT;
1265 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1266 break;
1267 case SOL_IP:
1268 switch(optname) {
1269 case IP_TOS:
1270 case IP_TTL:
1271 case IP_HDRINCL:
1272 case IP_ROUTER_ALERT:
1273 case IP_RECVOPTS:
1274 case IP_RETOPTS:
1275 case IP_PKTINFO:
1276 case IP_MTU_DISCOVER:
1277 case IP_RECVERR:
1278 case IP_RECVTOS:
1279 #ifdef IP_FREEBIND
1280 case IP_FREEBIND:
1281 #endif
1282 case IP_MULTICAST_TTL:
1283 case IP_MULTICAST_LOOP:
1284 val = 0;
1285 if (optlen >= sizeof(uint32_t)) {
1286 if (get_user_u32(val, optval_addr))
1287 return -TARGET_EFAULT;
1288 } else if (optlen >= 1) {
1289 if (get_user_u8(val, optval_addr))
1290 return -TARGET_EFAULT;
1291 }
1292 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1293 break;
1294 case IP_ADD_MEMBERSHIP:
1295 case IP_DROP_MEMBERSHIP:
1296 if (optlen < sizeof (struct target_ip_mreq) ||
1297 optlen > sizeof (struct target_ip_mreqn))
1298 return -TARGET_EINVAL;
1299
1300 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1301 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1302 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1303 break;
1304
1305 case IP_BLOCK_SOURCE:
1306 case IP_UNBLOCK_SOURCE:
1307 case IP_ADD_SOURCE_MEMBERSHIP:
1308 case IP_DROP_SOURCE_MEMBERSHIP:
1309 if (optlen != sizeof (struct target_ip_mreq_source))
1310 return -TARGET_EINVAL;
1311
1312 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1313 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1314 unlock_user (ip_mreq_source, optval_addr, 0);
1315 break;
1316
1317 default:
1318 goto unimplemented;
1319 }
1320 break;
1321 case SOL_RAW:
1322 switch (optname) {
1323 case ICMP_FILTER:
1324 /* struct icmp_filter takes an u32 value */
1325 if (optlen < sizeof(uint32_t)) {
1326 return -TARGET_EINVAL;
1327 }
1328
1329 if (get_user_u32(val, optval_addr)) {
1330 return -TARGET_EFAULT;
1331 }
1332 ret = get_errno(setsockopt(sockfd, level, optname,
1333 &val, sizeof(val)));
1334 break;
1335
1336 default:
1337 goto unimplemented;
1338 }
1339 break;
1340 case TARGET_SOL_SOCKET:
1341 switch (optname) {
1342 case TARGET_SO_RCVTIMEO:
1343 {
1344 struct timeval tv;
1345
1346 optname = SO_RCVTIMEO;
1347
1348 set_timeout:
1349 if (optlen != sizeof(struct target_timeval)) {
1350 return -TARGET_EINVAL;
1351 }
1352
1353 if (copy_from_user_timeval(&tv, optval_addr)) {
1354 return -TARGET_EFAULT;
1355 }
1356
1357 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1358 &tv, sizeof(tv)));
1359 return ret;
1360 }
1361 case TARGET_SO_SNDTIMEO:
1362 optname = SO_SNDTIMEO;
1363 goto set_timeout;
1364 /* Options with 'int' argument. */
1365 case TARGET_SO_DEBUG:
1366 optname = SO_DEBUG;
1367 break;
1368 case TARGET_SO_REUSEADDR:
1369 optname = SO_REUSEADDR;
1370 break;
1371 case TARGET_SO_TYPE:
1372 optname = SO_TYPE;
1373 break;
1374 case TARGET_SO_ERROR:
1375 optname = SO_ERROR;
1376 break;
1377 case TARGET_SO_DONTROUTE:
1378 optname = SO_DONTROUTE;
1379 break;
1380 case TARGET_SO_BROADCAST:
1381 optname = SO_BROADCAST;
1382 break;
1383 case TARGET_SO_SNDBUF:
1384 optname = SO_SNDBUF;
1385 break;
1386 case TARGET_SO_RCVBUF:
1387 optname = SO_RCVBUF;
1388 break;
1389 case TARGET_SO_KEEPALIVE:
1390 optname = SO_KEEPALIVE;
1391 break;
1392 case TARGET_SO_OOBINLINE:
1393 optname = SO_OOBINLINE;
1394 break;
1395 case TARGET_SO_NO_CHECK:
1396 optname = SO_NO_CHECK;
1397 break;
1398 case TARGET_SO_PRIORITY:
1399 optname = SO_PRIORITY;
1400 break;
1401 #ifdef SO_BSDCOMPAT
1402 case TARGET_SO_BSDCOMPAT:
1403 optname = SO_BSDCOMPAT;
1404 break;
1405 #endif
1406 case TARGET_SO_PASSCRED:
1407 optname = SO_PASSCRED;
1408 break;
1409 case TARGET_SO_TIMESTAMP:
1410 optname = SO_TIMESTAMP;
1411 break;
1412 case TARGET_SO_RCVLOWAT:
1413 optname = SO_RCVLOWAT;
1414 break;
1415 break;
1416 default:
1417 goto unimplemented;
1418 }
1419 if (optlen < sizeof(uint32_t))
1420 return -TARGET_EINVAL;
1421
1422 if (get_user_u32(val, optval_addr))
1423 return -TARGET_EFAULT;
1424 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1425 break;
1426 default:
1427 unimplemented:
1428 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1429 ret = -TARGET_ENOPROTOOPT;
1430 }
1431 return ret;
1432 }
1433
1434 /* do_getsockopt() Must return target values and target errnos. */
1435 static abi_long do_getsockopt(int sockfd, int level, int optname,
1436 abi_ulong optval_addr, abi_ulong optlen)
1437 {
1438 abi_long ret;
1439 int len, val;
1440 socklen_t lv;
1441
1442 switch(level) {
1443 case TARGET_SOL_SOCKET:
1444 level = SOL_SOCKET;
1445 switch (optname) {
1446 /* These don't just return a single integer */
1447 case TARGET_SO_LINGER:
1448 case TARGET_SO_RCVTIMEO:
1449 case TARGET_SO_SNDTIMEO:
1450 case TARGET_SO_PEERNAME:
1451 goto unimplemented;
1452 case TARGET_SO_PEERCRED: {
1453 struct ucred cr;
1454 socklen_t crlen;
1455 struct target_ucred *tcr;
1456
1457 if (get_user_u32(len, optlen)) {
1458 return -TARGET_EFAULT;
1459 }
1460 if (len < 0) {
1461 return -TARGET_EINVAL;
1462 }
1463
1464 crlen = sizeof(cr);
1465 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1466 &cr, &crlen));
1467 if (ret < 0) {
1468 return ret;
1469 }
1470 if (len > crlen) {
1471 len = crlen;
1472 }
1473 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1474 return -TARGET_EFAULT;
1475 }
1476 __put_user(cr.pid, &tcr->pid);
1477 __put_user(cr.uid, &tcr->uid);
1478 __put_user(cr.gid, &tcr->gid);
1479 unlock_user_struct(tcr, optval_addr, 1);
1480 if (put_user_u32(len, optlen)) {
1481 return -TARGET_EFAULT;
1482 }
1483 break;
1484 }
1485 /* Options with 'int' argument. */
1486 case TARGET_SO_DEBUG:
1487 optname = SO_DEBUG;
1488 goto int_case;
1489 case TARGET_SO_REUSEADDR:
1490 optname = SO_REUSEADDR;
1491 goto int_case;
1492 case TARGET_SO_TYPE:
1493 optname = SO_TYPE;
1494 goto int_case;
1495 case TARGET_SO_ERROR:
1496 optname = SO_ERROR;
1497 goto int_case;
1498 case TARGET_SO_DONTROUTE:
1499 optname = SO_DONTROUTE;
1500 goto int_case;
1501 case TARGET_SO_BROADCAST:
1502 optname = SO_BROADCAST;
1503 goto int_case;
1504 case TARGET_SO_SNDBUF:
1505 optname = SO_SNDBUF;
1506 goto int_case;
1507 case TARGET_SO_RCVBUF:
1508 optname = SO_RCVBUF;
1509 goto int_case;
1510 case TARGET_SO_KEEPALIVE:
1511 optname = SO_KEEPALIVE;
1512 goto int_case;
1513 case TARGET_SO_OOBINLINE:
1514 optname = SO_OOBINLINE;
1515 goto int_case;
1516 case TARGET_SO_NO_CHECK:
1517 optname = SO_NO_CHECK;
1518 goto int_case;
1519 case TARGET_SO_PRIORITY:
1520 optname = SO_PRIORITY;
1521 goto int_case;
1522 #ifdef SO_BSDCOMPAT
1523 case TARGET_SO_BSDCOMPAT:
1524 optname = SO_BSDCOMPAT;
1525 goto int_case;
1526 #endif
1527 case TARGET_SO_PASSCRED:
1528 optname = SO_PASSCRED;
1529 goto int_case;
1530 case TARGET_SO_TIMESTAMP:
1531 optname = SO_TIMESTAMP;
1532 goto int_case;
1533 case TARGET_SO_RCVLOWAT:
1534 optname = SO_RCVLOWAT;
1535 goto int_case;
1536 default:
1537 goto int_case;
1538 }
1539 break;
1540 case SOL_TCP:
1541 /* TCP options all take an 'int' value. */
1542 int_case:
1543 if (get_user_u32(len, optlen))
1544 return -TARGET_EFAULT;
1545 if (len < 0)
1546 return -TARGET_EINVAL;
1547 lv = sizeof(lv);
1548 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1549 if (ret < 0)
1550 return ret;
1551 if (len > lv)
1552 len = lv;
1553 if (len == 4) {
1554 if (put_user_u32(val, optval_addr))
1555 return -TARGET_EFAULT;
1556 } else {
1557 if (put_user_u8(val, optval_addr))
1558 return -TARGET_EFAULT;
1559 }
1560 if (put_user_u32(len, optlen))
1561 return -TARGET_EFAULT;
1562 break;
1563 case SOL_IP:
1564 switch(optname) {
1565 case IP_TOS:
1566 case IP_TTL:
1567 case IP_HDRINCL:
1568 case IP_ROUTER_ALERT:
1569 case IP_RECVOPTS:
1570 case IP_RETOPTS:
1571 case IP_PKTINFO:
1572 case IP_MTU_DISCOVER:
1573 case IP_RECVERR:
1574 case IP_RECVTOS:
1575 #ifdef IP_FREEBIND
1576 case IP_FREEBIND:
1577 #endif
1578 case IP_MULTICAST_TTL:
1579 case IP_MULTICAST_LOOP:
1580 if (get_user_u32(len, optlen))
1581 return -TARGET_EFAULT;
1582 if (len < 0)
1583 return -TARGET_EINVAL;
1584 lv = sizeof(lv);
1585 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1586 if (ret < 0)
1587 return ret;
1588 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1589 len = 1;
1590 if (put_user_u32(len, optlen)
1591 || put_user_u8(val, optval_addr))
1592 return -TARGET_EFAULT;
1593 } else {
1594 if (len > sizeof(int))
1595 len = sizeof(int);
1596 if (put_user_u32(len, optlen)
1597 || put_user_u32(val, optval_addr))
1598 return -TARGET_EFAULT;
1599 }
1600 break;
1601 default:
1602 ret = -TARGET_ENOPROTOOPT;
1603 break;
1604 }
1605 break;
1606 default:
1607 unimplemented:
1608 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1609 level, optname);
1610 ret = -TARGET_EOPNOTSUPP;
1611 break;
1612 }
1613 return ret;
1614 }
1615
1616 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1617 int count, int copy)
1618 {
1619 struct target_iovec *target_vec;
1620 struct iovec *vec;
1621 abi_ulong total_len, max_len;
1622 int i;
1623
1624 if (count == 0) {
1625 errno = 0;
1626 return NULL;
1627 }
1628 if (count < 0 || count > IOV_MAX) {
1629 errno = EINVAL;
1630 return NULL;
1631 }
1632
1633 vec = calloc(count, sizeof(struct iovec));
1634 if (vec == NULL) {
1635 errno = ENOMEM;
1636 return NULL;
1637 }
1638
1639 target_vec = lock_user(VERIFY_READ, target_addr,
1640 count * sizeof(struct target_iovec), 1);
1641 if (target_vec == NULL) {
1642 errno = EFAULT;
1643 goto fail2;
1644 }
1645
1646 /* ??? If host page size > target page size, this will result in a
1647 value larger than what we can actually support. */
1648 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1649 total_len = 0;
1650
1651 for (i = 0; i < count; i++) {
1652 abi_ulong base = tswapal(target_vec[i].iov_base);
1653 abi_long len = tswapal(target_vec[i].iov_len);
1654
1655 if (len < 0) {
1656 errno = EINVAL;
1657 goto fail;
1658 } else if (len == 0) {
1659 /* Zero length pointer is ignored. */
1660 vec[i].iov_base = 0;
1661 } else {
1662 vec[i].iov_base = lock_user(type, base, len, copy);
1663 if (!vec[i].iov_base) {
1664 errno = EFAULT;
1665 goto fail;
1666 }
1667 if (len > max_len - total_len) {
1668 len = max_len - total_len;
1669 }
1670 }
1671 vec[i].iov_len = len;
1672 total_len += len;
1673 }
1674
1675 unlock_user(target_vec, target_addr, 0);
1676 return vec;
1677
1678 fail:
1679 free(vec);
1680 fail2:
1681 unlock_user(target_vec, target_addr, 0);
1682 return NULL;
1683 }
1684
1685 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1686 int count, int copy)
1687 {
1688 struct target_iovec *target_vec;
1689 int i;
1690
1691 target_vec = lock_user(VERIFY_READ, target_addr,
1692 count * sizeof(struct target_iovec), 1);
1693 if (target_vec) {
1694 for (i = 0; i < count; i++) {
1695 abi_ulong base = tswapal(target_vec[i].iov_base);
1696 abi_long len = tswapal(target_vec[i].iov_base);
1697 if (len < 0) {
1698 break;
1699 }
1700 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1701 }
1702 unlock_user(target_vec, target_addr, 0);
1703 }
1704
1705 free(vec);
1706 }
1707
1708 static inline void target_to_host_sock_type(int *type)
1709 {
1710 int host_type = 0;
1711 int target_type = *type;
1712
1713 switch (target_type & TARGET_SOCK_TYPE_MASK) {
1714 case TARGET_SOCK_DGRAM:
1715 host_type = SOCK_DGRAM;
1716 break;
1717 case TARGET_SOCK_STREAM:
1718 host_type = SOCK_STREAM;
1719 break;
1720 default:
1721 host_type = target_type & TARGET_SOCK_TYPE_MASK;
1722 break;
1723 }
1724 if (target_type & TARGET_SOCK_CLOEXEC) {
1725 host_type |= SOCK_CLOEXEC;
1726 }
1727 if (target_type & TARGET_SOCK_NONBLOCK) {
1728 host_type |= SOCK_NONBLOCK;
1729 }
1730 *type = host_type;
1731 }
1732
1733 /* do_socket() Must return target values and target errnos. */
1734 static abi_long do_socket(int domain, int type, int protocol)
1735 {
1736 target_to_host_sock_type(&type);
1737
1738 if (domain == PF_NETLINK)
1739 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1740 return get_errno(socket(domain, type, protocol));
1741 }
1742
1743 /* do_bind() Must return target values and target errnos. */
1744 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1745 socklen_t addrlen)
1746 {
1747 void *addr;
1748 abi_long ret;
1749
1750 if ((int)addrlen < 0) {
1751 return -TARGET_EINVAL;
1752 }
1753
1754 addr = alloca(addrlen+1);
1755
1756 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1757 if (ret)
1758 return ret;
1759
1760 return get_errno(bind(sockfd, addr, addrlen));
1761 }
1762
1763 /* do_connect() Must return target values and target errnos. */
1764 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1765 socklen_t addrlen)
1766 {
1767 void *addr;
1768 abi_long ret;
1769
1770 if ((int)addrlen < 0) {
1771 return -TARGET_EINVAL;
1772 }
1773
1774 addr = alloca(addrlen);
1775
1776 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1777 if (ret)
1778 return ret;
1779
1780 return get_errno(connect(sockfd, addr, addrlen));
1781 }
1782
1783 /* do_sendrecvmsg() Must return target values and target errnos. */
1784 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1785 int flags, int send)
1786 {
1787 abi_long ret, len;
1788 struct target_msghdr *msgp;
1789 struct msghdr msg;
1790 int count;
1791 struct iovec *vec;
1792 abi_ulong target_vec;
1793
1794 /* FIXME */
1795 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1796 msgp,
1797 target_msg,
1798 send ? 1 : 0))
1799 return -TARGET_EFAULT;
1800 if (msgp->msg_name) {
1801 msg.msg_namelen = tswap32(msgp->msg_namelen);
1802 msg.msg_name = alloca(msg.msg_namelen);
1803 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1804 msg.msg_namelen);
1805 if (ret) {
1806 goto out2;
1807 }
1808 } else {
1809 msg.msg_name = NULL;
1810 msg.msg_namelen = 0;
1811 }
1812 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1813 msg.msg_control = alloca(msg.msg_controllen);
1814 msg.msg_flags = tswap32(msgp->msg_flags);
1815
1816 count = tswapal(msgp->msg_iovlen);
1817 target_vec = tswapal(msgp->msg_iov);
1818 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1819 target_vec, count, send);
1820 if (vec == NULL) {
1821 ret = -host_to_target_errno(errno);
1822 goto out2;
1823 }
1824 msg.msg_iovlen = count;
1825 msg.msg_iov = vec;
1826
1827 if (send) {
1828 ret = target_to_host_cmsg(&msg, msgp);
1829 if (ret == 0)
1830 ret = get_errno(sendmsg(fd, &msg, flags));
1831 } else {
1832 ret = get_errno(recvmsg(fd, &msg, flags));
1833 if (!is_error(ret)) {
1834 len = ret;
1835 ret = host_to_target_cmsg(msgp, &msg);
1836 if (!is_error(ret)) {
1837 msgp->msg_namelen = tswap32(msg.msg_namelen);
1838 if (msg.msg_name != NULL) {
1839 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1840 msg.msg_name, msg.msg_namelen);
1841 if (ret) {
1842 goto out;
1843 }
1844 }
1845
1846 ret = len;
1847 }
1848 }
1849 }
1850
1851 out:
1852 unlock_iovec(vec, target_vec, count, !send);
1853 out2:
1854 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1855 return ret;
1856 }
1857
1858 /* If we don't have a system accept4() then just call accept.
1859 * The callsites to do_accept4() will ensure that they don't
1860 * pass a non-zero flags argument in this config.
1861 */
1862 #ifndef CONFIG_ACCEPT4
1863 static inline int accept4(int sockfd, struct sockaddr *addr,
1864 socklen_t *addrlen, int flags)
1865 {
1866 assert(flags == 0);
1867 return accept(sockfd, addr, addrlen);
1868 }
1869 #endif
1870
1871 /* do_accept4() Must return target values and target errnos. */
1872 static abi_long do_accept4(int fd, abi_ulong target_addr,
1873 abi_ulong target_addrlen_addr, int flags)
1874 {
1875 socklen_t addrlen;
1876 void *addr;
1877 abi_long ret;
1878
1879 if (target_addr == 0) {
1880 return get_errno(accept4(fd, NULL, NULL, flags));
1881 }
1882
1883 /* linux returns EINVAL if addrlen pointer is invalid */
1884 if (get_user_u32(addrlen, target_addrlen_addr))
1885 return -TARGET_EINVAL;
1886
1887 if ((int)addrlen < 0) {
1888 return -TARGET_EINVAL;
1889 }
1890
1891 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1892 return -TARGET_EINVAL;
1893
1894 addr = alloca(addrlen);
1895
1896 ret = get_errno(accept4(fd, addr, &addrlen, flags));
1897 if (!is_error(ret)) {
1898 host_to_target_sockaddr(target_addr, addr, addrlen);
1899 if (put_user_u32(addrlen, target_addrlen_addr))
1900 ret = -TARGET_EFAULT;
1901 }
1902 return ret;
1903 }
1904
1905 /* do_getpeername() Must return target values and target errnos. */
1906 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1907 abi_ulong target_addrlen_addr)
1908 {
1909 socklen_t addrlen;
1910 void *addr;
1911 abi_long ret;
1912
1913 if (get_user_u32(addrlen, target_addrlen_addr))
1914 return -TARGET_EFAULT;
1915
1916 if ((int)addrlen < 0) {
1917 return -TARGET_EINVAL;
1918 }
1919
1920 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1921 return -TARGET_EFAULT;
1922
1923 addr = alloca(addrlen);
1924
1925 ret = get_errno(getpeername(fd, addr, &addrlen));
1926 if (!is_error(ret)) {
1927 host_to_target_sockaddr(target_addr, addr, addrlen);
1928 if (put_user_u32(addrlen, target_addrlen_addr))
1929 ret = -TARGET_EFAULT;
1930 }
1931 return ret;
1932 }
1933
1934 /* do_getsockname() Must return target values and target errnos. */
1935 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1936 abi_ulong target_addrlen_addr)
1937 {
1938 socklen_t addrlen;
1939 void *addr;
1940 abi_long ret;
1941
1942 if (get_user_u32(addrlen, target_addrlen_addr))
1943 return -TARGET_EFAULT;
1944
1945 if ((int)addrlen < 0) {
1946 return -TARGET_EINVAL;
1947 }
1948
1949 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1950 return -TARGET_EFAULT;
1951
1952 addr = alloca(addrlen);
1953
1954 ret = get_errno(getsockname(fd, addr, &addrlen));
1955 if (!is_error(ret)) {
1956 host_to_target_sockaddr(target_addr, addr, addrlen);
1957 if (put_user_u32(addrlen, target_addrlen_addr))
1958 ret = -TARGET_EFAULT;
1959 }
1960 return ret;
1961 }
1962
1963 /* do_socketpair() Must return target values and target errnos. */
1964 static abi_long do_socketpair(int domain, int type, int protocol,
1965 abi_ulong target_tab_addr)
1966 {
1967 int tab[2];
1968 abi_long ret;
1969
1970 target_to_host_sock_type(&type);
1971
1972 ret = get_errno(socketpair(domain, type, protocol, tab));
1973 if (!is_error(ret)) {
1974 if (put_user_s32(tab[0], target_tab_addr)
1975 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1976 ret = -TARGET_EFAULT;
1977 }
1978 return ret;
1979 }
1980
1981 /* do_sendto() Must return target values and target errnos. */
1982 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1983 abi_ulong target_addr, socklen_t addrlen)
1984 {
1985 void *addr;
1986 void *host_msg;
1987 abi_long ret;
1988
1989 if ((int)addrlen < 0) {
1990 return -TARGET_EINVAL;
1991 }
1992
1993 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1994 if (!host_msg)
1995 return -TARGET_EFAULT;
1996 if (target_addr) {
1997 addr = alloca(addrlen);
1998 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1999 if (ret) {
2000 unlock_user(host_msg, msg, 0);
2001 return ret;
2002 }
2003 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2004 } else {
2005 ret = get_errno(send(fd, host_msg, len, flags));
2006 }
2007 unlock_user(host_msg, msg, 0);
2008 return ret;
2009 }
2010
2011 /* do_recvfrom() Must return target values and target errnos. */
2012 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2013 abi_ulong target_addr,
2014 abi_ulong target_addrlen)
2015 {
2016 socklen_t addrlen;
2017 void *addr;
2018 void *host_msg;
2019 abi_long ret;
2020
2021 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2022 if (!host_msg)
2023 return -TARGET_EFAULT;
2024 if (target_addr) {
2025 if (get_user_u32(addrlen, target_addrlen)) {
2026 ret = -TARGET_EFAULT;
2027 goto fail;
2028 }
2029 if ((int)addrlen < 0) {
2030 ret = -TARGET_EINVAL;
2031 goto fail;
2032 }
2033 addr = alloca(addrlen);
2034 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2035 } else {
2036 addr = NULL; /* To keep compiler quiet. */
2037 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2038 }
2039 if (!is_error(ret)) {
2040 if (target_addr) {
2041 host_to_target_sockaddr(target_addr, addr, addrlen);
2042 if (put_user_u32(addrlen, target_addrlen)) {
2043 ret = -TARGET_EFAULT;
2044 goto fail;
2045 }
2046 }
2047 unlock_user(host_msg, msg, len);
2048 } else {
2049 fail:
2050 unlock_user(host_msg, msg, 0);
2051 }
2052 return ret;
2053 }
2054
2055 #ifdef TARGET_NR_socketcall
2056 /* do_socketcall() Must return target values and target errnos. */
2057 static abi_long do_socketcall(int num, abi_ulong vptr)
2058 {
2059 abi_long ret;
2060 const int n = sizeof(abi_ulong);
2061
2062 switch(num) {
2063 case SOCKOP_socket:
2064 {
2065 abi_ulong domain, type, protocol;
2066
2067 if (get_user_ual(domain, vptr)
2068 || get_user_ual(type, vptr + n)
2069 || get_user_ual(protocol, vptr + 2 * n))
2070 return -TARGET_EFAULT;
2071
2072 ret = do_socket(domain, type, protocol);
2073 }
2074 break;
2075 case SOCKOP_bind:
2076 {
2077 abi_ulong sockfd;
2078 abi_ulong target_addr;
2079 socklen_t addrlen;
2080
2081 if (get_user_ual(sockfd, vptr)
2082 || get_user_ual(target_addr, vptr + n)
2083 || get_user_ual(addrlen, vptr + 2 * n))
2084 return -TARGET_EFAULT;
2085
2086 ret = do_bind(sockfd, target_addr, addrlen);
2087 }
2088 break;
2089 case SOCKOP_connect:
2090 {
2091 abi_ulong sockfd;
2092 abi_ulong target_addr;
2093 socklen_t addrlen;
2094
2095 if (get_user_ual(sockfd, vptr)
2096 || get_user_ual(target_addr, vptr + n)
2097 || get_user_ual(addrlen, vptr + 2 * n))
2098 return -TARGET_EFAULT;
2099
2100 ret = do_connect(sockfd, target_addr, addrlen);
2101 }
2102 break;
2103 case SOCKOP_listen:
2104 {
2105 abi_ulong sockfd, backlog;
2106
2107 if (get_user_ual(sockfd, vptr)
2108 || get_user_ual(backlog, vptr + n))
2109 return -TARGET_EFAULT;
2110
2111 ret = get_errno(listen(sockfd, backlog));
2112 }
2113 break;
2114 case SOCKOP_accept:
2115 {
2116 abi_ulong sockfd;
2117 abi_ulong target_addr, target_addrlen;
2118
2119 if (get_user_ual(sockfd, vptr)
2120 || get_user_ual(target_addr, vptr + n)
2121 || get_user_ual(target_addrlen, vptr + 2 * n))
2122 return -TARGET_EFAULT;
2123
2124 ret = do_accept4(sockfd, target_addr, target_addrlen, 0);
2125 }
2126 break;
2127 case SOCKOP_getsockname:
2128 {
2129 abi_ulong sockfd;
2130 abi_ulong target_addr, target_addrlen;
2131
2132 if (get_user_ual(sockfd, vptr)
2133 || get_user_ual(target_addr, vptr + n)
2134 || get_user_ual(target_addrlen, vptr + 2 * n))
2135 return -TARGET_EFAULT;
2136
2137 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2138 }
2139 break;
2140 case SOCKOP_getpeername:
2141 {
2142 abi_ulong sockfd;
2143 abi_ulong target_addr, target_addrlen;
2144
2145 if (get_user_ual(sockfd, vptr)
2146 || get_user_ual(target_addr, vptr + n)
2147 || get_user_ual(target_addrlen, vptr + 2 * n))
2148 return -TARGET_EFAULT;
2149
2150 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2151 }
2152 break;
2153 case SOCKOP_socketpair:
2154 {
2155 abi_ulong domain, type, protocol;
2156 abi_ulong tab;
2157
2158 if (get_user_ual(domain, vptr)
2159 || get_user_ual(type, vptr + n)
2160 || get_user_ual(protocol, vptr + 2 * n)
2161 || get_user_ual(tab, vptr + 3 * n))
2162 return -TARGET_EFAULT;
2163
2164 ret = do_socketpair(domain, type, protocol, tab);
2165 }
2166 break;
2167 case SOCKOP_send:
2168 {
2169 abi_ulong sockfd;
2170 abi_ulong msg;
2171 size_t len;
2172 abi_ulong flags;
2173
2174 if (get_user_ual(sockfd, vptr)
2175 || get_user_ual(msg, vptr + n)
2176 || get_user_ual(len, vptr + 2 * n)
2177 || get_user_ual(flags, vptr + 3 * n))
2178 return -TARGET_EFAULT;
2179
2180 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2181 }
2182 break;
2183 case SOCKOP_recv:
2184 {
2185 abi_ulong sockfd;
2186 abi_ulong msg;
2187 size_t len;
2188 abi_ulong flags;
2189
2190 if (get_user_ual(sockfd, vptr)
2191 || get_user_ual(msg, vptr + n)
2192 || get_user_ual(len, vptr + 2 * n)
2193 || get_user_ual(flags, vptr + 3 * n))
2194 return -TARGET_EFAULT;
2195
2196 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2197 }
2198 break;
2199 case SOCKOP_sendto:
2200 {
2201 abi_ulong sockfd;
2202 abi_ulong msg;
2203 size_t len;
2204 abi_ulong flags;
2205 abi_ulong addr;
2206 socklen_t addrlen;
2207
2208 if (get_user_ual(sockfd, vptr)
2209 || get_user_ual(msg, vptr + n)
2210 || get_user_ual(len, vptr + 2 * n)
2211 || get_user_ual(flags, vptr + 3 * n)
2212 || get_user_ual(addr, vptr + 4 * n)
2213 || get_user_ual(addrlen, vptr + 5 * n))
2214 return -TARGET_EFAULT;
2215
2216 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2217 }
2218 break;
2219 case SOCKOP_recvfrom:
2220 {
2221 abi_ulong sockfd;
2222 abi_ulong msg;
2223 size_t len;
2224 abi_ulong flags;
2225 abi_ulong addr;
2226 socklen_t addrlen;
2227
2228 if (get_user_ual(sockfd, vptr)
2229 || get_user_ual(msg, vptr + n)
2230 || get_user_ual(len, vptr + 2 * n)
2231 || get_user_ual(flags, vptr + 3 * n)
2232 || get_user_ual(addr, vptr + 4 * n)
2233 || get_user_ual(addrlen, vptr + 5 * n))
2234 return -TARGET_EFAULT;
2235
2236 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2237 }
2238 break;
2239 case SOCKOP_shutdown:
2240 {
2241 abi_ulong sockfd, how;
2242
2243 if (get_user_ual(sockfd, vptr)
2244 || get_user_ual(how, vptr + n))
2245 return -TARGET_EFAULT;
2246
2247 ret = get_errno(shutdown(sockfd, how));
2248 }
2249 break;
2250 case SOCKOP_sendmsg:
2251 case SOCKOP_recvmsg:
2252 {
2253 abi_ulong fd;
2254 abi_ulong target_msg;
2255 abi_ulong flags;
2256
2257 if (get_user_ual(fd, vptr)
2258 || get_user_ual(target_msg, vptr + n)
2259 || get_user_ual(flags, vptr + 2 * n))
2260 return -TARGET_EFAULT;
2261
2262 ret = do_sendrecvmsg(fd, target_msg, flags,
2263 (num == SOCKOP_sendmsg));
2264 }
2265 break;
2266 case SOCKOP_setsockopt:
2267 {
2268 abi_ulong sockfd;
2269 abi_ulong level;
2270 abi_ulong optname;
2271 abi_ulong optval;
2272 socklen_t optlen;
2273
2274 if (get_user_ual(sockfd, vptr)
2275 || get_user_ual(level, vptr + n)
2276 || get_user_ual(optname, vptr + 2 * n)
2277 || get_user_ual(optval, vptr + 3 * n)
2278 || get_user_ual(optlen, vptr + 4 * n))
2279 return -TARGET_EFAULT;
2280
2281 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2282 }
2283 break;
2284 case SOCKOP_getsockopt:
2285 {
2286 abi_ulong sockfd;
2287 abi_ulong level;
2288 abi_ulong optname;
2289 abi_ulong optval;
2290 socklen_t optlen;
2291
2292 if (get_user_ual(sockfd, vptr)
2293 || get_user_ual(level, vptr + n)
2294 || get_user_ual(optname, vptr + 2 * n)
2295 || get_user_ual(optval, vptr + 3 * n)
2296 || get_user_ual(optlen, vptr + 4 * n))
2297 return -TARGET_EFAULT;
2298
2299 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2300 }
2301 break;
2302 default:
2303 gemu_log("Unsupported socketcall: %d\n", num);
2304 ret = -TARGET_ENOSYS;
2305 break;
2306 }
2307 return ret;
2308 }
2309 #endif
2310
2311 #define N_SHM_REGIONS 32
2312
2313 static struct shm_region {
2314 abi_ulong start;
2315 abi_ulong size;
2316 } shm_regions[N_SHM_REGIONS];
2317
2318 struct target_ipc_perm
2319 {
2320 abi_long __key;
2321 abi_ulong uid;
2322 abi_ulong gid;
2323 abi_ulong cuid;
2324 abi_ulong cgid;
2325 unsigned short int mode;
2326 unsigned short int __pad1;
2327 unsigned short int __seq;
2328 unsigned short int __pad2;
2329 abi_ulong __unused1;
2330 abi_ulong __unused2;
2331 };
2332
2333 struct target_semid_ds
2334 {
2335 struct target_ipc_perm sem_perm;
2336 abi_ulong sem_otime;
2337 abi_ulong __unused1;
2338 abi_ulong sem_ctime;
2339 abi_ulong __unused2;
2340 abi_ulong sem_nsems;
2341 abi_ulong __unused3;
2342 abi_ulong __unused4;
2343 };
2344
2345 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2346 abi_ulong target_addr)
2347 {
2348 struct target_ipc_perm *target_ip;
2349 struct target_semid_ds *target_sd;
2350
2351 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2352 return -TARGET_EFAULT;
2353 target_ip = &(target_sd->sem_perm);
2354 host_ip->__key = tswapal(target_ip->__key);
2355 host_ip->uid = tswapal(target_ip->uid);
2356 host_ip->gid = tswapal(target_ip->gid);
2357 host_ip->cuid = tswapal(target_ip->cuid);
2358 host_ip->cgid = tswapal(target_ip->cgid);
2359 host_ip->mode = tswap16(target_ip->mode);
2360 unlock_user_struct(target_sd, target_addr, 0);
2361 return 0;
2362 }
2363
2364 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2365 struct ipc_perm *host_ip)
2366 {
2367 struct target_ipc_perm *target_ip;
2368 struct target_semid_ds *target_sd;
2369
2370 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2371 return -TARGET_EFAULT;
2372 target_ip = &(target_sd->sem_perm);
2373 target_ip->__key = tswapal(host_ip->__key);
2374 target_ip->uid = tswapal(host_ip->uid);
2375 target_ip->gid = tswapal(host_ip->gid);
2376 target_ip->cuid = tswapal(host_ip->cuid);
2377 target_ip->cgid = tswapal(host_ip->cgid);
2378 target_ip->mode = tswap16(host_ip->mode);
2379 unlock_user_struct(target_sd, target_addr, 1);
2380 return 0;
2381 }
2382
2383 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2384 abi_ulong target_addr)
2385 {
2386 struct target_semid_ds *target_sd;
2387
2388 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2389 return -TARGET_EFAULT;
2390 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2391 return -TARGET_EFAULT;
2392 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2393 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2394 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2395 unlock_user_struct(target_sd, target_addr, 0);
2396 return 0;
2397 }
2398
2399 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2400 struct semid_ds *host_sd)
2401 {
2402 struct target_semid_ds *target_sd;
2403
2404 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2405 return -TARGET_EFAULT;
2406 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2407 return -TARGET_EFAULT;
2408 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2409 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2410 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2411 unlock_user_struct(target_sd, target_addr, 1);
2412 return 0;
2413 }
2414
2415 struct target_seminfo {
2416 int semmap;
2417 int semmni;
2418 int semmns;
2419 int semmnu;
2420 int semmsl;
2421 int semopm;
2422 int semume;
2423 int semusz;
2424 int semvmx;
2425 int semaem;
2426 };
2427
2428 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2429 struct seminfo *host_seminfo)
2430 {
2431 struct target_seminfo *target_seminfo;
2432 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2433 return -TARGET_EFAULT;
2434 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2435 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2436 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2437 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2438 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2439 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2440 __put_user(host_seminfo->semume, &target_seminfo->semume);
2441 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2442 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2443 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2444 unlock_user_struct(target_seminfo, target_addr, 1);
2445 return 0;
2446 }
2447
2448 union semun {
2449 int val;
2450 struct semid_ds *buf;
2451 unsigned short *array;
2452 struct seminfo *__buf;
2453 };
2454
2455 union target_semun {
2456 int val;
2457 abi_ulong buf;
2458 abi_ulong array;
2459 abi_ulong __buf;
2460 };
2461
2462 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2463 abi_ulong target_addr)
2464 {
2465 int nsems;
2466 unsigned short *array;
2467 union semun semun;
2468 struct semid_ds semid_ds;
2469 int i, ret;
2470
2471 semun.buf = &semid_ds;
2472
2473 ret = semctl(semid, 0, IPC_STAT, semun);
2474 if (ret == -1)
2475 return get_errno(ret);
2476
2477 nsems = semid_ds.sem_nsems;
2478
2479 *host_array = malloc(nsems*sizeof(unsigned short));
2480 array = lock_user(VERIFY_READ, target_addr,
2481 nsems*sizeof(unsigned short), 1);
2482 if (!array)
2483 return -TARGET_EFAULT;
2484
2485 for(i=0; i<nsems; i++) {
2486 __get_user((*host_array)[i], &array[i]);
2487 }
2488 unlock_user(array, target_addr, 0);
2489
2490 return 0;
2491 }
2492
2493 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2494 unsigned short **host_array)
2495 {
2496 int nsems;
2497 unsigned short *array;
2498 union semun semun;
2499 struct semid_ds semid_ds;
2500 int i, ret;
2501
2502 semun.buf = &semid_ds;
2503
2504 ret = semctl(semid, 0, IPC_STAT, semun);
2505 if (ret == -1)
2506 return get_errno(ret);
2507
2508 nsems = semid_ds.sem_nsems;
2509
2510 array = lock_user(VERIFY_WRITE, target_addr,
2511 nsems*sizeof(unsigned short), 0);
2512 if (!array)
2513 return -TARGET_EFAULT;
2514
2515 for(i=0; i<nsems; i++) {
2516 __put_user((*host_array)[i], &array[i]);
2517 }
2518 free(*host_array);
2519 unlock_user(array, target_addr, 1);
2520
2521 return 0;
2522 }
2523
2524 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2525 union target_semun target_su)
2526 {
2527 union semun arg;
2528 struct semid_ds dsarg;
2529 unsigned short *array = NULL;
2530 struct seminfo seminfo;
2531 abi_long ret = -TARGET_EINVAL;
2532 abi_long err;
2533 cmd &= 0xff;
2534
2535 switch( cmd ) {
2536 case GETVAL:
2537 case SETVAL:
2538 arg.val = tswap32(target_su.val);
2539 ret = get_errno(semctl(semid, semnum, cmd, arg));
2540 target_su.val = tswap32(arg.val);
2541 break;
2542 case GETALL:
2543 case SETALL:
2544 err = target_to_host_semarray(semid, &array, target_su.array);
2545 if (err)
2546 return err;
2547 arg.array = array;
2548 ret = get_errno(semctl(semid, semnum, cmd, arg));
2549 err = host_to_target_semarray(semid, target_su.array, &array);
2550 if (err)
2551 return err;
2552 break;
2553 case IPC_STAT:
2554 case IPC_SET:
2555 case SEM_STAT:
2556 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2557 if (err)
2558 return err;
2559 arg.buf = &dsarg;
2560 ret = get_errno(semctl(semid, semnum, cmd, arg));
2561 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2562 if (err)
2563 return err;
2564 break;
2565 case IPC_INFO:
2566 case SEM_INFO:
2567 arg.__buf = &seminfo;
2568 ret = get_errno(semctl(semid, semnum, cmd, arg));
2569 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2570 if (err)
2571 return err;
2572 break;
2573 case IPC_RMID:
2574 case GETPID:
2575 case GETNCNT:
2576 case GETZCNT:
2577 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2578 break;
2579 }
2580
2581 return ret;
2582 }
2583
2584 struct target_sembuf {
2585 unsigned short sem_num;
2586 short sem_op;
2587 short sem_flg;
2588 };
2589
2590 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2591 abi_ulong target_addr,
2592 unsigned nsops)
2593 {
2594 struct target_sembuf *target_sembuf;
2595 int i;
2596
2597 target_sembuf = lock_user(VERIFY_READ, target_addr,
2598 nsops*sizeof(struct target_sembuf), 1);
2599 if (!target_sembuf)
2600 return -TARGET_EFAULT;
2601
2602 for(i=0; i<nsops; i++) {
2603 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2604 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2605 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2606 }
2607
2608 unlock_user(target_sembuf, target_addr, 0);
2609
2610 return 0;
2611 }
2612
2613 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2614 {
2615 struct sembuf sops[nsops];
2616
2617 if (target_to_host_sembuf(sops, ptr, nsops))
2618 return -TARGET_EFAULT;
2619
2620 return get_errno(semop(semid, sops, nsops));
2621 }
2622
2623 struct target_msqid_ds
2624 {
2625 struct target_ipc_perm msg_perm;
2626 abi_ulong msg_stime;
2627 #if TARGET_ABI_BITS == 32
2628 abi_ulong __unused1;
2629 #endif
2630 abi_ulong msg_rtime;
2631 #if TARGET_ABI_BITS == 32
2632 abi_ulong __unused2;
2633 #endif
2634 abi_ulong msg_ctime;
2635 #if TARGET_ABI_BITS == 32
2636 abi_ulong __unused3;
2637 #endif
2638 abi_ulong __msg_cbytes;
2639 abi_ulong msg_qnum;
2640 abi_ulong msg_qbytes;
2641 abi_ulong msg_lspid;
2642 abi_ulong msg_lrpid;
2643 abi_ulong __unused4;
2644 abi_ulong __unused5;
2645 };
2646
2647 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2648 abi_ulong target_addr)
2649 {
2650 struct target_msqid_ds *target_md;
2651
2652 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2653 return -TARGET_EFAULT;
2654 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2655 return -TARGET_EFAULT;
2656 host_md->msg_stime = tswapal(target_md->msg_stime);
2657 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2658 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2659 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2660 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2661 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2662 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2663 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2664 unlock_user_struct(target_md, target_addr, 0);
2665 return 0;
2666 }
2667
2668 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2669 struct msqid_ds *host_md)
2670 {
2671 struct target_msqid_ds *target_md;
2672
2673 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2674 return -TARGET_EFAULT;
2675 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2676 return -TARGET_EFAULT;
2677 target_md->msg_stime = tswapal(host_md->msg_stime);
2678 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2679 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2680 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2681 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2682 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2683 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2684 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2685 unlock_user_struct(target_md, target_addr, 1);
2686 return 0;
2687 }
2688
2689 struct target_msginfo {
2690 int msgpool;
2691 int msgmap;
2692 int msgmax;
2693 int msgmnb;
2694 int msgmni;
2695 int msgssz;
2696 int msgtql;
2697 unsigned short int msgseg;
2698 };
2699
2700 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2701 struct msginfo *host_msginfo)
2702 {
2703 struct target_msginfo *target_msginfo;
2704 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2705 return -TARGET_EFAULT;
2706 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2707 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2708 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2709 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2710 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2711 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2712 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2713 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2714 unlock_user_struct(target_msginfo, target_addr, 1);
2715 return 0;
2716 }
2717
2718 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2719 {
2720 struct msqid_ds dsarg;
2721 struct msginfo msginfo;
2722 abi_long ret = -TARGET_EINVAL;
2723
2724 cmd &= 0xff;
2725
2726 switch (cmd) {
2727 case IPC_STAT:
2728 case IPC_SET:
2729 case MSG_STAT:
2730 if (target_to_host_msqid_ds(&dsarg,ptr))
2731 return -TARGET_EFAULT;
2732 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2733 if (host_to_target_msqid_ds(ptr,&dsarg))
2734 return -TARGET_EFAULT;
2735 break;
2736 case IPC_RMID:
2737 ret = get_errno(msgctl(msgid, cmd, NULL));
2738 break;
2739 case IPC_INFO:
2740 case MSG_INFO:
2741 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2742 if (host_to_target_msginfo(ptr, &msginfo))
2743 return -TARGET_EFAULT;
2744 break;
2745 }
2746
2747 return ret;
2748 }
2749
2750 struct target_msgbuf {
2751 abi_long mtype;
2752 char mtext[1];
2753 };
2754
2755 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2756 unsigned int msgsz, int msgflg)
2757 {
2758 struct target_msgbuf *target_mb;
2759 struct msgbuf *host_mb;
2760 abi_long ret = 0;
2761
2762 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2763 return -TARGET_EFAULT;
2764 host_mb = malloc(msgsz+sizeof(long));
2765 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2766 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2767 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2768 free(host_mb);
2769 unlock_user_struct(target_mb, msgp, 0);
2770
2771 return ret;
2772 }
2773
2774 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2775 unsigned int msgsz, abi_long msgtyp,
2776 int msgflg)
2777 {
2778 struct target_msgbuf *target_mb;
2779 char *target_mtext;
2780 struct msgbuf *host_mb;
2781 abi_long ret = 0;
2782
2783 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2784 return -TARGET_EFAULT;
2785
2786 host_mb = g_malloc(msgsz+sizeof(long));
2787 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2788
2789 if (ret > 0) {
2790 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2791 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2792 if (!target_mtext) {
2793 ret = -TARGET_EFAULT;
2794 goto end;
2795 }
2796 memcpy(target_mb->mtext, host_mb->mtext, ret);
2797 unlock_user(target_mtext, target_mtext_addr, ret);
2798 }
2799
2800 target_mb->mtype = tswapal(host_mb->mtype);
2801
2802 end:
2803 if (target_mb)
2804 unlock_user_struct(target_mb, msgp, 1);
2805 g_free(host_mb);
2806 return ret;
2807 }
2808
2809 struct target_shmid_ds
2810 {
2811 struct target_ipc_perm shm_perm;
2812 abi_ulong shm_segsz;
2813 abi_ulong shm_atime;
2814 #if TARGET_ABI_BITS == 32
2815 abi_ulong __unused1;
2816 #endif
2817 abi_ulong shm_dtime;
2818 #if TARGET_ABI_BITS == 32
2819 abi_ulong __unused2;
2820 #endif
2821 abi_ulong shm_ctime;
2822 #if TARGET_ABI_BITS == 32
2823 abi_ulong __unused3;
2824 #endif
2825 int shm_cpid;
2826 int shm_lpid;
2827 abi_ulong shm_nattch;
2828 unsigned long int __unused4;
2829 unsigned long int __unused5;
2830 };
2831
2832 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2833 abi_ulong target_addr)
2834 {
2835 struct target_shmid_ds *target_sd;
2836
2837 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2838 return -TARGET_EFAULT;
2839 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2840 return -TARGET_EFAULT;
2841 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2842 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2843 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2844 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2845 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2846 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2847 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2848 unlock_user_struct(target_sd, target_addr, 0);
2849 return 0;
2850 }
2851
2852 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2853 struct shmid_ds *host_sd)
2854 {
2855 struct target_shmid_ds *target_sd;
2856
2857 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2858 return -TARGET_EFAULT;
2859 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2860 return -TARGET_EFAULT;
2861 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2862 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2863 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2864 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2865 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2866 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2867 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2868 unlock_user_struct(target_sd, target_addr, 1);
2869 return 0;
2870 }
2871
2872 struct target_shminfo {
2873 abi_ulong shmmax;
2874 abi_ulong shmmin;
2875 abi_ulong shmmni;
2876 abi_ulong shmseg;
2877 abi_ulong shmall;
2878 };
2879
2880 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2881 struct shminfo *host_shminfo)
2882 {
2883 struct target_shminfo *target_shminfo;
2884 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2885 return -TARGET_EFAULT;
2886 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2887 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2888 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2889 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2890 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2891 unlock_user_struct(target_shminfo, target_addr, 1);
2892 return 0;
2893 }
2894
2895 struct target_shm_info {
2896 int used_ids;
2897 abi_ulong shm_tot;
2898 abi_ulong shm_rss;
2899 abi_ulong shm_swp;
2900 abi_ulong swap_attempts;
2901 abi_ulong swap_successes;
2902 };
2903
2904 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2905 struct shm_info *host_shm_info)
2906 {
2907 struct target_shm_info *target_shm_info;
2908 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2909 return -TARGET_EFAULT;
2910 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2911 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2912 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2913 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2914 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2915 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2916 unlock_user_struct(target_shm_info, target_addr, 1);
2917 return 0;
2918 }
2919
2920 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2921 {
2922 struct shmid_ds dsarg;
2923 struct shminfo shminfo;
2924 struct shm_info shm_info;
2925 abi_long ret = -TARGET_EINVAL;
2926
2927 cmd &= 0xff;
2928
2929 switch(cmd) {
2930 case IPC_STAT:
2931 case IPC_SET:
2932 case SHM_STAT:
2933 if (target_to_host_shmid_ds(&dsarg, buf))
2934 return -TARGET_EFAULT;
2935 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2936 if (host_to_target_shmid_ds(buf, &dsarg))
2937 return -TARGET_EFAULT;
2938 break;
2939 case IPC_INFO:
2940 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2941 if (host_to_target_shminfo(buf, &shminfo))
2942 return -TARGET_EFAULT;
2943 break;
2944 case SHM_INFO:
2945 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2946 if (host_to_target_shm_info(buf, &shm_info))
2947 return -TARGET_EFAULT;
2948 break;
2949 case IPC_RMID:
2950 case SHM_LOCK:
2951 case SHM_UNLOCK:
2952 ret = get_errno(shmctl(shmid, cmd, NULL));
2953 break;
2954 }
2955
2956 return ret;
2957 }
2958
2959 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2960 {
2961 abi_long raddr;
2962 void *host_raddr;
2963 struct shmid_ds shm_info;
2964 int i,ret;
2965
2966 /* find out the length of the shared memory segment */
2967 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2968 if (is_error(ret)) {
2969 /* can't get length, bail out */
2970 return ret;
2971 }
2972
2973 mmap_lock();
2974
2975 if (shmaddr)
2976 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2977 else {
2978 abi_ulong mmap_start;
2979
2980 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2981
2982 if (mmap_start == -1) {
2983 errno = ENOMEM;
2984 host_raddr = (void *)-1;
2985 } else
2986 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2987 }
2988
2989 if (host_raddr == (void *)-1) {
2990 mmap_unlock();
2991 return get_errno((long)host_raddr);
2992 }
2993 raddr=h2g((unsigned long)host_raddr);
2994
2995 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2996 PAGE_VALID | PAGE_READ |
2997 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2998
2999 for (i = 0; i < N_SHM_REGIONS; i++) {
3000 if (shm_regions[i].start == 0) {
3001 shm_regions[i].start = raddr;
3002 shm_regions[i].size = shm_info.shm_segsz;
3003 break;
3004 }
3005 }
3006
3007 mmap_unlock();
3008 return raddr;
3009
3010 }
3011
3012 static inline abi_long do_shmdt(abi_ulong shmaddr)
3013 {
3014 int i;
3015
3016 for (i = 0; i < N_SHM_REGIONS; ++i) {
3017 if (shm_regions[i].start == shmaddr) {
3018 shm_regions[i].start = 0;
3019 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3020 break;
3021 }
3022 }
3023
3024 return get_errno(shmdt(g2h(shmaddr)));
3025 }
3026
3027 #ifdef TARGET_NR_ipc
3028 /* ??? This only works with linear mappings. */
3029 /* do_ipc() must return target values and target errnos. */
3030 static abi_long do_ipc(unsigned int call, int first,
3031 int second, int third,
3032 abi_long ptr, abi_long fifth)
3033 {
3034 int version;
3035 abi_long ret = 0;
3036
3037 version = call >> 16;
3038 call &= 0xffff;
3039
3040 switch (call) {
3041 case IPCOP_semop:
3042 ret = do_semop(first, ptr, second);
3043 break;
3044
3045 case IPCOP_semget:
3046 ret = get_errno(semget(first, second, third));
3047 break;
3048
3049 case IPCOP_semctl:
3050 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3051 break;
3052
3053 case IPCOP_msgget:
3054 ret = get_errno(msgget(first, second));
3055 break;
3056
3057 case IPCOP_msgsnd:
3058 ret = do_msgsnd(first, ptr, second, third);
3059 break;
3060
3061 case IPCOP_msgctl:
3062 ret = do_msgctl(first, second, ptr);
3063 break;
3064
3065 case IPCOP_msgrcv:
3066 switch (version) {
3067 case 0:
3068 {
3069 struct target_ipc_kludge {
3070 abi_long msgp;
3071 abi_long msgtyp;
3072 } *tmp;
3073
3074 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3075 ret = -TARGET_EFAULT;
3076 break;
3077 }
3078
3079 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3080
3081 unlock_user_struct(tmp, ptr, 0);
3082 break;
3083 }
3084 default:
3085 ret = do_msgrcv(first, ptr, second, fifth, third);
3086 }
3087 break;
3088
3089 case IPCOP_shmat:
3090 switch (version) {
3091 default:
3092 {
3093 abi_ulong raddr;
3094 raddr = do_shmat(first, ptr, second);
3095 if (is_error(raddr))
3096 return get_errno(raddr);
3097 if (put_user_ual(raddr, third))
3098 return -TARGET_EFAULT;
3099 break;
3100 }
3101 case 1:
3102 ret = -TARGET_EINVAL;
3103 break;
3104 }
3105 break;
3106 case IPCOP_shmdt:
3107 ret = do_shmdt(ptr);
3108 break;
3109
3110 case IPCOP_shmget:
3111 /* IPC_* flag values are the same on all linux platforms */
3112 ret = get_errno(shmget(first, second, third));
3113 break;
3114
3115 /* IPC_* and SHM_* command values are the same on all linux platforms */
3116 case IPCOP_shmctl:
3117 ret = do_shmctl(first, second, third);
3118 break;
3119 default:
3120 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3121 ret = -TARGET_ENOSYS;
3122 break;
3123 }
3124 return ret;
3125 }
3126 #endif
3127
3128 /* kernel structure types definitions */
3129
3130 #define STRUCT(name, ...) STRUCT_ ## name,
3131 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3132 enum {
3133 #include "syscall_types.h"
3134 };
3135 #undef STRUCT
3136 #undef STRUCT_SPECIAL
3137
3138 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3139 #define STRUCT_SPECIAL(name)
3140 #include "syscall_types.h"
3141 #undef STRUCT
3142 #undef STRUCT_SPECIAL
3143
3144 typedef struct IOCTLEntry IOCTLEntry;
3145
3146 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3147 int fd, abi_long cmd, abi_long arg);
3148
3149 struct IOCTLEntry {
3150 unsigned int target_cmd;
3151 unsigned int host_cmd;
3152 const char *name;
3153 int access;
3154 do_ioctl_fn *do_ioctl;
3155 const argtype arg_type[5];
3156 };
3157
3158 #define IOC_R 0x0001
3159 #define IOC_W 0x0002
3160 #define IOC_RW (IOC_R | IOC_W)
3161
3162 #define MAX_STRUCT_SIZE 4096
3163
3164 #ifdef CONFIG_FIEMAP
3165 /* So fiemap access checks don't overflow on 32 bit systems.
3166 * This is very slightly smaller than the limit imposed by
3167 * the underlying kernel.
3168 */
3169 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3170 / sizeof(struct fiemap_extent))
3171
3172 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3173 int fd, abi_long cmd, abi_long arg)
3174 {
3175 /* The parameter for this ioctl is a struct fiemap followed
3176 * by an array of struct fiemap_extent whose size is set
3177 * in fiemap->fm_extent_count. The array is filled in by the
3178 * ioctl.
3179 */
3180 int target_size_in, target_size_out;
3181 struct fiemap *fm;
3182 const argtype *arg_type = ie->arg_type;
3183 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3184 void *argptr, *p;
3185 abi_long ret;
3186 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3187 uint32_t outbufsz;
3188 int free_fm = 0;
3189
3190 assert(arg_type[0] == TYPE_PTR);
3191 assert(ie->access == IOC_RW);
3192 arg_type++;
3193 target_size_in = thunk_type_size(arg_type, 0);
3194 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3195 if (!argptr) {
3196 return -TARGET_EFAULT;
3197 }
3198 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3199 unlock_user(argptr, arg, 0);
3200 fm = (struct fiemap *)buf_temp;
3201 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3202 return -TARGET_EINVAL;
3203 }
3204
3205 outbufsz = sizeof (*fm) +
3206 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3207
3208 if (outbufsz > MAX_STRUCT_SIZE) {
3209 /* We can't fit all the extents into the fixed size buffer.
3210 * Allocate one that is large enough and use it instead.
3211 */
3212 fm = malloc(outbufsz);
3213 if (!fm) {
3214 return -TARGET_ENOMEM;
3215 }
3216 memcpy(fm, buf_temp, sizeof(struct fiemap));
3217 free_fm = 1;
3218 }
3219 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3220 if (!is_error(ret)) {
3221 target_size_out = target_size_in;
3222 /* An extent_count of 0 means we were only counting the extents
3223 * so there are no structs to copy
3224 */
3225 if (fm->fm_extent_count != 0) {
3226 target_size_out += fm->fm_mapped_extents * extent_size;
3227 }
3228 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3229 if (!argptr) {
3230 ret = -TARGET_EFAULT;
3231 } else {
3232 /* Convert the struct fiemap */
3233 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3234 if (fm->fm_extent_count != 0) {
3235 p = argptr + target_size_in;
3236 /* ...and then all the struct fiemap_extents */
3237 for (i = 0; i < fm->fm_mapped_extents; i++) {
3238 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3239 THUNK_TARGET);
3240 p += extent_size;
3241 }
3242 }
3243 unlock_user(argptr, arg, target_size_out);
3244 }
3245 }
3246 if (free_fm) {
3247 free(fm);
3248 }
3249 return ret;
3250 }
3251 #endif
3252
3253 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3254 int fd, abi_long cmd, abi_long arg)
3255 {
3256 const argtype *arg_type = ie->arg_type;
3257 int target_size;
3258 void *argptr;
3259 int ret;
3260 struct ifconf *host_ifconf;
3261 uint32_t outbufsz;
3262 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3263 int target_ifreq_size;
3264 int nb_ifreq;
3265 int free_buf = 0;
3266 int i;
3267 int target_ifc_len;
3268 abi_long target_ifc_buf;
3269 int host_ifc_len;
3270 char *host_ifc_buf;
3271
3272 assert(arg_type[0] == TYPE_PTR);
3273 assert(ie->access == IOC_RW);
3274
3275 arg_type++;
3276 target_size = thunk_type_size(arg_type, 0);
3277
3278 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3279 if (!argptr)
3280 return -TARGET_EFAULT;
3281 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3282 unlock_user(argptr, arg, 0);
3283
3284 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3285 target_ifc_len = host_ifconf->ifc_len;
3286 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3287
3288 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3289 nb_ifreq = target_ifc_len / target_ifreq_size;
3290 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3291
3292 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3293 if (outbufsz > MAX_STRUCT_SIZE) {
3294 /* We can't fit all the extents into the fixed size buffer.
3295 * Allocate one that is large enough and use it instead.
3296 */
3297 host_ifconf = malloc(outbufsz);
3298 if (!host_ifconf) {
3299 return -TARGET_ENOMEM;
3300 }
3301 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3302 free_buf = 1;
3303 }
3304 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3305
3306 host_ifconf->ifc_len = host_ifc_len;
3307 host_ifconf->ifc_buf = host_ifc_buf;
3308
3309 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3310 if (!is_error(ret)) {
3311 /* convert host ifc_len to target ifc_len */
3312
3313 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3314 target_ifc_len = nb_ifreq * target_ifreq_size;
3315 host_ifconf->ifc_len = target_ifc_len;
3316
3317 /* restore target ifc_buf */
3318
3319 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3320
3321 /* copy struct ifconf to target user */
3322
3323 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3324 if (!argptr)
3325 return -TARGET_EFAULT;
3326 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3327 unlock_user(argptr, arg, target_size);
3328
3329 /* copy ifreq[] to target user */
3330
3331 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3332 for (i = 0; i < nb_ifreq ; i++) {
3333 thunk_convert(argptr + i * target_ifreq_size,
3334 host_ifc_buf + i * sizeof(struct ifreq),
3335 ifreq_arg_type, THUNK_TARGET);
3336 }
3337 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3338 }
3339
3340 if (free_buf) {
3341 free(host_ifconf);
3342 }
3343
3344 return ret;
3345 }
3346
3347 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3348 abi_long cmd, abi_long arg)
3349 {
3350 void *argptr;
3351 struct dm_ioctl *host_dm;
3352 abi_long guest_data;
3353 uint32_t guest_data_size;
3354 int target_size;
3355 const argtype *arg_type = ie->arg_type;
3356 abi_long ret;
3357 void *big_buf = NULL;
3358 char *host_data;
3359
3360 arg_type++;
3361 target_size = thunk_type_size(arg_type, 0);
3362 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3363 if (!argptr) {
3364 ret = -TARGET_EFAULT;
3365 goto out;
3366 }
3367 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3368 unlock_user(argptr, arg, 0);
3369
3370 /* buf_temp is too small, so fetch things into a bigger buffer */
3371 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3372 memcpy(big_buf, buf_temp, target_size);
3373 buf_temp = big_buf;
3374 host_dm = big_buf;
3375
3376 guest_data = arg + host_dm->data_start;
3377 if ((guest_data - arg) < 0) {
3378 ret = -EINVAL;
3379 goto out;
3380 }
3381 guest_data_size = host_dm->data_size - host_dm->data_start;
3382 host_data = (char*)host_dm + host_dm->data_start;
3383
3384 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3385 switch (ie->host_cmd) {
3386 case DM_REMOVE_ALL:
3387 case DM_LIST_DEVICES:
3388 case DM_DEV_CREATE:
3389 case DM_DEV_REMOVE:
3390 case DM_DEV_SUSPEND:
3391 case DM_DEV_STATUS:
3392 case DM_DEV_WAIT:
3393 case DM_TABLE_STATUS:
3394 case DM_TABLE_CLEAR:
3395 case DM_TABLE_DEPS:
3396 case DM_LIST_VERSIONS:
3397 /* no input data */
3398 break;
3399 case DM_DEV_RENAME:
3400 case DM_DEV_SET_GEOMETRY:
3401 /* data contains only strings */
3402 memcpy(host_data, argptr, guest_data_size);
3403 break;
3404 case DM_TARGET_MSG:
3405 memcpy(host_data, argptr, guest_data_size);
3406 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3407 break;
3408 case DM_TABLE_LOAD:
3409 {
3410 void *gspec = argptr;
3411 void *cur_data = host_data;
3412 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3413 int spec_size = thunk_type_size(arg_type, 0);
3414 int i;
3415
3416 for (i = 0; i < host_dm->target_count; i++) {
3417 struct dm_target_spec *spec = cur_data;
3418 uint32_t next;
3419 int slen;
3420
3421 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3422 slen = strlen((char*)gspec + spec_size) + 1;
3423 next = spec->next;
3424 spec->next = sizeof(*spec) + slen;
3425 strcpy((char*)&spec[1], gspec + spec_size);
3426 gspec += next;
3427 cur_data += spec->next;
3428 }
3429 break;
3430 }
3431 default:
3432 ret = -TARGET_EINVAL;
3433 goto out;
3434 }
3435 unlock_user(argptr, guest_data, 0);
3436
3437 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3438 if (!is_error(ret)) {
3439 guest_data = arg + host_dm->data_start;
3440 guest_data_size = host_dm->data_size - host_dm->data_start;
3441 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3442 switch (ie->host_cmd) {
3443 case DM_REMOVE_ALL:
3444 case DM_DEV_CREATE:
3445 case DM_DEV_REMOVE:
3446 case DM_DEV_RENAME:
3447 case DM_DEV_SUSPEND:
3448 case DM_DEV_STATUS:
3449 case DM_TABLE_LOAD:
3450 case DM_TABLE_CLEAR:
3451 case DM_TARGET_MSG:
3452 case DM_DEV_SET_GEOMETRY:
3453 /* no return data */
3454 break;
3455 case DM_LIST_DEVICES:
3456 {
3457 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3458 uint32_t remaining_data = guest_data_size;
3459 void *cur_data = argptr;
3460 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3461 int nl_size = 12; /* can't use thunk_size due to alignment */
3462
3463 while (1) {
3464 uint32_t next = nl->next;
3465 if (next) {
3466 nl->next = nl_size + (strlen(nl->name) + 1);
3467 }
3468 if (remaining_data < nl->next) {
3469 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3470 break;
3471 }
3472 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3473 strcpy(cur_data + nl_size, nl->name);
3474 cur_data += nl->next;
3475 remaining_data -= nl->next;
3476 if (!next) {
3477 break;
3478 }
3479 nl = (void*)nl + next;
3480 }
3481 break;
3482 }
3483 case DM_DEV_WAIT:
3484 case DM_TABLE_STATUS:
3485 {
3486 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3487 void *cur_data = argptr;
3488 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3489 int spec_size = thunk_type_size(arg_type, 0);
3490 int i;
3491
3492 for (i = 0; i < host_dm->target_count; i++) {
3493 uint32_t next = spec->next;
3494 int slen = strlen((char*)&spec[1]) + 1;
3495 spec->next = (cur_data - argptr) + spec_size + slen;
3496 if (guest_data_size < spec->next) {
3497 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3498 break;
3499 }
3500 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3501 strcpy(cur_data + spec_size, (char*)&spec[1]);
3502 cur_data = argptr + spec->next;
3503 spec = (void*)host_dm + host_dm->data_start + next;
3504 }
3505 break;
3506 }
3507 case DM_TABLE_DEPS:
3508 {
3509 void *hdata = (void*)host_dm + host_dm->data_start;
3510 int count = *(uint32_t*)hdata;
3511 uint64_t *hdev = hdata + 8;
3512 uint64_t *gdev = argptr + 8;
3513 int i;
3514
3515 *(uint32_t*)argptr = tswap32(count);
3516 for (i = 0; i < count; i++) {
3517 *gdev = tswap64(*hdev);
3518 gdev++;
3519 hdev++;
3520 }
3521 break;
3522 }
3523 case DM_LIST_VERSIONS:
3524 {
3525 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3526 uint32_t remaining_data = guest_data_size;
3527 void *cur_data = argptr;
3528 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3529 int vers_size = thunk_type_size(arg_type, 0);
3530
3531 while (1) {
3532 uint32_t next = vers->next;
3533 if (next) {
3534 vers->next = vers_size + (strlen(vers->name) + 1);
3535 }
3536 if (remaining_data < vers->next) {
3537 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3538 break;
3539 }
3540 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3541 strcpy(cur_data + vers_size, vers->name);
3542 cur_data += vers->next;
3543 remaining_data -= vers->next;
3544 if (!next) {
3545 break;
3546 }
3547 vers = (void*)vers + next;
3548 }
3549 break;
3550 }
3551 default:
3552 ret = -TARGET_EINVAL;
3553 goto out;
3554 }
3555 unlock_user(argptr, guest_data, guest_data_size);
3556
3557 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3558 if (!argptr) {
3559 ret = -TARGET_EFAULT;
3560 goto out;
3561 }
3562 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3563 unlock_user(argptr, arg, target_size);
3564 }
3565 out:
3566 g_free(big_buf);
3567 return ret;
3568 }
3569
3570 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3571 int fd, abi_long cmd, abi_long arg)
3572 {
3573 const argtype *arg_type = ie->arg_type;
3574 const StructEntry *se;
3575 const argtype *field_types;
3576 const int *dst_offsets, *src_offsets;
3577 int target_size;
3578 void *argptr;
3579 abi_ulong *target_rt_dev_ptr;
3580 unsigned long *host_rt_dev_ptr;
3581 abi_long ret;
3582 int i;
3583
3584 assert(ie->access == IOC_W);
3585 assert(*arg_type == TYPE_PTR);
3586 arg_type++;
3587 assert(*arg_type == TYPE_STRUCT);
3588 target_size = thunk_type_size(arg_type, 0);
3589 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3590 if (!argptr) {
3591 return -TARGET_EFAULT;
3592 }
3593 arg_type++;
3594 assert(*arg_type == (int)STRUCT_rtentry);
3595 se = struct_entries + *arg_type++;
3596 assert(se->convert[0] == NULL);
3597 /* convert struct here to be able to catch rt_dev string */
3598 field_types = se->field_types;
3599 dst_offsets = se->field_offsets[THUNK_HOST];
3600 src_offsets = se->field_offsets[THUNK_TARGET];
3601 for (i = 0; i < se->nb_fields; i++) {
3602 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3603 assert(*field_types == TYPE_PTRVOID);
3604 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3605 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3606 if (*target_rt_dev_ptr != 0) {
3607 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3608 tswapal(*target_rt_dev_ptr));
3609 if (!*host_rt_dev_ptr) {
3610 unlock_user(argptr, arg, 0);
3611 return -TARGET_EFAULT;
3612 }
3613 } else {
3614 *host_rt_dev_ptr = 0;
3615 }
3616 field_types++;
3617 continue;
3618 }
3619 field_types = thunk_convert(buf_temp + dst_offsets[i],
3620 argptr + src_offsets[i],
3621 field_types, THUNK_HOST);
3622 }
3623 unlock_user(argptr, arg, 0);
3624
3625 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3626 if (*host_rt_dev_ptr != 0) {
3627 unlock_user((void *)*host_rt_dev_ptr,
3628 *target_rt_dev_ptr, 0);
3629 }
3630 return ret;
3631 }
3632
3633 static IOCTLEntry ioctl_entries[] = {
3634 #define IOCTL(cmd, access, ...) \
3635 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3636 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3637 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3638 #include "ioctls.h"
3639 { 0, 0, },
3640 };
3641
3642 /* ??? Implement proper locking for ioctls. */
3643 /* do_ioctl() Must return target values and target errnos. */
3644 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3645 {
3646 const IOCTLEntry *ie;
3647 const argtype *arg_type;
3648 abi_long ret;
3649 uint8_t buf_temp[MAX_STRUCT_SIZE];
3650 int target_size;
3651 void *argptr;
3652
3653 ie = ioctl_entries;
3654 for(;;) {
3655 if (ie->target_cmd == 0) {
3656 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3657 return -TARGET_ENOSYS;
3658 }
3659 if (ie->target_cmd == cmd)
3660 break;
3661 ie++;
3662 }
3663 arg_type = ie->arg_type;
3664 #if defined(DEBUG)
3665 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3666 #endif
3667 if (ie->do_ioctl) {
3668 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3669 }
3670
3671 switch(arg_type[0]) {
3672 case TYPE_NULL:
3673 /* no argument */
3674 ret = get_errno(ioctl(fd, ie->host_cmd));
3675 break;
3676 case TYPE_PTRVOID:
3677 case TYPE_INT:
3678 /* int argment */
3679 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3680 break;
3681 case TYPE_PTR:
3682 arg_type++;
3683 target_size = thunk_type_size(arg_type, 0);
3684 switch(ie->access) {
3685 case IOC_R:
3686 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3687 if (!is_error(ret)) {
3688 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3689 if (!argptr)
3690 return -TARGET_EFAULT;
3691 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3692 unlock_user(argptr, arg, target_size);
3693 }
3694 break;
3695 case IOC_W:
3696 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3697 if (!argptr)
3698 return -TARGET_EFAULT;
3699 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3700 unlock_user(argptr, arg, 0);
3701 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3702 break;
3703 default:
3704 case IOC_RW:
3705 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3706 if (!argptr)
3707 return -TARGET_EFAULT;
3708 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3709 unlock_user(argptr, arg, 0);
3710 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3711 if (!is_error(ret)) {
3712 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3713 if (!argptr)
3714 return -TARGET_EFAULT;
3715 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3716 unlock_user(argptr, arg, target_size);
3717 }
3718 break;
3719 }
3720 break;
3721 default:
3722 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3723 (long)cmd, arg_type[0]);
3724 ret = -TARGET_ENOSYS;
3725 break;
3726 }
3727 return ret;
3728 }
3729
3730 static const bitmask_transtbl iflag_tbl[] = {
3731 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3732 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3733 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3734 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3735 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3736 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3737 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3738 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3739 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3740 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3741 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3742 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3743 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3744 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3745 { 0, 0, 0, 0 }
3746 };
3747
3748 static const bitmask_transtbl oflag_tbl[] = {
3749 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3750 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3751 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3752 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3753 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3754 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3755 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3756 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3757 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3758 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3759 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3760 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3761 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3762 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3763 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3764 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3765 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3766 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3767 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3768 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3769 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3770 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3771 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3772 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3773 { 0, 0, 0, 0 }
3774 };
3775
3776 static const bitmask_transtbl cflag_tbl[] = {
3777 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3778 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3779 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3780 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3781 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3782 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3783 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3784 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3785 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3786 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3787 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3788 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3789 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3790 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3791 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3792 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3793 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3794 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3795 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3796 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3797 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3798 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3799 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3800 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3801 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3802 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3803 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3804 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3805 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3806 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3807 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3808 { 0, 0, 0, 0 }
3809 };
3810
3811 static const bitmask_transtbl lflag_tbl[] = {
3812 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3813 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3814 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3815 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3816 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3817 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3818 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3819 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3820 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3821 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3822 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3823 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3824 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3825 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3826 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3827 { 0, 0, 0, 0 }
3828 };
3829
3830 static void target_to_host_termios (void *dst, const void *src)
3831 {
3832 struct host_termios *host = dst;
3833 const struct target_termios *target = src;
3834
3835 host->c_iflag =
3836 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3837 host->c_oflag =
3838 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3839 host->c_cflag =
3840 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3841 host->c_lflag =
3842 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3843 host->c_line = target->c_line;
3844
3845 memset(host->c_cc, 0, sizeof(host->c_cc));
3846 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3847 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3848 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3849 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3850 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3851 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3852 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3853 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3854 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3855 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3856 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3857 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3858 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3859 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3860 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3861 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3862 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3863 }
3864
3865 static void host_to_target_termios (void *dst, const void *src)
3866 {
3867 struct target_termios *target = dst;
3868 const struct host_termios *host = src;
3869
3870 target->c_iflag =
3871 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3872 target->c_oflag =
3873 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3874 target->c_cflag =
3875 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3876 target->c_lflag =
3877 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3878 target->c_line = host->c_line;
3879
3880 memset(target->c_cc, 0, sizeof(target->c_cc));
3881 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3882 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3883 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3884 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3885 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3886 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3887 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3888 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3889 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3890 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3891 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3892 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3893 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3894 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3895 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3896 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3897 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3898 }
3899
3900 static const StructEntry struct_termios_def = {
3901 .convert = { host_to_target_termios, target_to_host_termios },
3902 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3903 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3904 };
3905
3906 static bitmask_transtbl mmap_flags_tbl[] = {
3907 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3908 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3909 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3910 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3911 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3912 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3913 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3914 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3915 { 0, 0, 0, 0 }
3916 };
3917
3918 #if defined(TARGET_I386)
3919
3920 /* NOTE: there is really one LDT for all the threads */
3921 static uint8_t *ldt_table;
3922
3923 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3924 {
3925 int size;
3926 void *p;
3927
3928 if (!ldt_table)
3929 return 0;
3930 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3931 if (size > bytecount)
3932 size = bytecount;
3933 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3934 if (!p)
3935 return -TARGET_EFAULT;
3936 /* ??? Should this by byteswapped? */
3937 memcpy(p, ldt_table, size);
3938 unlock_user(p, ptr, size);
3939 return size;
3940 }
3941
3942 /* XXX: add locking support */
3943 static abi_long write_ldt(CPUX86State *env,
3944 abi_ulong ptr, unsigned long bytecount, int oldmode)
3945 {
3946 struct target_modify_ldt_ldt_s ldt_info;
3947 struct target_modify_ldt_ldt_s *target_ldt_info;
3948 int seg_32bit, contents, read_exec_only, limit_in_pages;
3949 int seg_not_present, useable, lm;
3950 uint32_t *lp, entry_1, entry_2;
3951
3952 if (bytecount != sizeof(ldt_info))
3953 return -TARGET_EINVAL;
3954 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3955 return -TARGET_EFAULT;
3956 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3957 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3958 ldt_info.limit = tswap32(target_ldt_info->limit);
3959 ldt_info.flags = tswap32(target_ldt_info->flags);
3960 unlock_user_struct(target_ldt_info, ptr, 0);
3961
3962 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3963 return -TARGET_EINVAL;
3964 seg_32bit = ldt_info.flags & 1;
3965 contents = (ldt_info.flags >> 1) & 3;
3966 read_exec_only = (ldt_info.flags >> 3) & 1;
3967 limit_in_pages = (ldt_info.flags >> 4) & 1;
3968 seg_not_present = (ldt_info.flags >> 5) & 1;
3969 useable = (ldt_info.flags >> 6) & 1;
3970 #ifdef TARGET_ABI32
3971 lm = 0;
3972 #else
3973 lm = (ldt_info.flags >> 7) & 1;
3974 #endif
3975 if (contents == 3) {
3976 if (oldmode)
3977 return -TARGET_EINVAL;
3978 if (seg_not_present == 0)
3979 return -TARGET_EINVAL;
3980 }
3981 /* allocate the LDT */
3982 if (!ldt_table) {
3983 env->ldt.base = target_mmap(0,
3984 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3985 PROT_READ|PROT_WRITE,
3986 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3987 if (env->ldt.base == -1)
3988 return -TARGET_ENOMEM;
3989 memset(g2h(env->ldt.base), 0,
3990 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3991 env->ldt.limit = 0xffff;
3992 ldt_table = g2h(env->ldt.base);
3993 }
3994
3995 /* NOTE: same code as Linux kernel */
3996 /* Allow LDTs to be cleared by the user. */
3997 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3998 if (oldmode ||
3999 (contents == 0 &&
4000 read_exec_only == 1 &&
4001 seg_32bit == 0 &&
4002 limit_in_pages == 0 &&
4003 seg_not_present == 1 &&
4004 useable == 0 )) {
4005 entry_1 = 0;
4006 entry_2 = 0;
4007 goto install;
4008 }
4009 }
4010
4011 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4012 (ldt_info.limit & 0x0ffff);
4013 entry_2 = (ldt_info.base_addr & 0xff000000) |
4014 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4015 (ldt_info.limit & 0xf0000) |
4016 ((read_exec_only ^ 1) << 9) |
4017 (contents << 10) |
4018 ((seg_not_present ^ 1) << 15) |
4019 (seg_32bit << 22) |
4020 (limit_in_pages << 23) |
4021 (lm << 21) |
4022 0x7000;
4023 if (!oldmode)
4024 entry_2 |= (useable << 20);
4025
4026 /* Install the new entry ... */
4027 install:
4028 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4029 lp[0] = tswap32(entry_1);
4030 lp[1] = tswap32(entry_2);
4031 return 0;
4032 }
4033
4034 /* specific and weird i386 syscalls */
4035 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4036 unsigned long bytecount)
4037 {
4038 abi_long ret;
4039
4040 switch (func) {
4041 case 0:
4042 ret = read_ldt(ptr, bytecount);
4043 break;
4044 case 1:
4045 ret = write_ldt(env, ptr, bytecount, 1);
4046 break;
4047 case 0x11:
4048 ret = write_ldt(env, ptr, bytecount, 0);
4049 break;
4050 default:
4051 ret = -TARGET_ENOSYS;
4052 break;
4053 }
4054 return ret;
4055 }
4056
4057 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4058 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4059 {
4060 uint64_t *gdt_table = g2h(env->gdt.base);
4061 struct target_modify_ldt_ldt_s ldt_info;
4062 struct target_modify_ldt_ldt_s *target_ldt_info;
4063 int seg_32bit, contents, read_exec_only, limit_in_pages;
4064 int seg_not_present, useable, lm;
4065 uint32_t *lp, entry_1, entry_2;
4066 int i;
4067
4068 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4069 if (!target_ldt_info)
4070 return -TARGET_EFAULT;
4071 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4072 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4073 ldt_info.limit = tswap32(target_ldt_info->limit);
4074 ldt_info.flags = tswap32(target_ldt_info->flags);
4075 if (ldt_info.entry_number == -1) {
4076 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4077 if (gdt_table[i] == 0) {
4078 ldt_info.entry_number = i;
4079 target_ldt_info->entry_number = tswap32(i);
4080 break;
4081 }
4082 }
4083 }
4084 unlock_user_struct(target_ldt_info, ptr, 1);
4085
4086 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4087 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4088 return -TARGET_EINVAL;
4089 seg_32bit = ldt_info.flags & 1;
4090 contents = (ldt_info.flags >> 1) & 3;
4091 read_exec_only = (ldt_info.flags >> 3) & 1;
4092 limit_in_pages = (ldt_info.flags >> 4) & 1;
4093 seg_not_present = (ldt_info.flags >> 5) & 1;
4094 useable = (ldt_info.flags >> 6) & 1;
4095 #ifdef TARGET_ABI32
4096 lm = 0;
4097 #else
4098 lm = (ldt_info.flags >> 7) & 1;
4099 #endif
4100
4101 if (contents == 3) {
4102 if (seg_not_present == 0)
4103 return -TARGET_EINVAL;
4104 }
4105
4106 /* NOTE: same code as Linux kernel */
4107 /* Allow LDTs to be cleared by the user. */
4108 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4109 if ((contents == 0 &&
4110 read_exec_only == 1 &&
4111 seg_32bit == 0 &&
4112 limit_in_pages == 0 &&
4113 seg_not_present == 1 &&
4114 useable == 0 )) {
4115 entry_1 = 0;
4116 entry_2 = 0;
4117 goto install;
4118 }
4119 }
4120
4121 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4122 (ldt_info.limit & 0x0ffff);
4123 entry_2 = (ldt_info.base_addr & 0xff000000) |
4124 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4125 (ldt_info.limit & 0xf0000) |
4126 ((read_exec_only ^ 1) << 9) |
4127 (contents << 10) |
4128 ((seg_not_present ^ 1) << 15) |
4129 (seg_32bit << 22) |
4130 (limit_in_pages << 23) |
4131 (useable << 20) |
4132 (lm << 21) |
4133 0x7000;
4134
4135 /* Install the new entry ... */
4136 install:
4137 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4138 lp[0] = tswap32(entry_1);
4139 lp[1] = tswap32(entry_2);
4140 return 0;
4141 }
4142
4143 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4144 {
4145 struct target_modify_ldt_ldt_s *target_ldt_info;
4146 uint64_t *gdt_table = g2h(env->gdt.base);
4147 uint32_t base_addr, limit, flags;
4148 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4149 int seg_not_present, useable, lm;
4150 uint32_t *lp, entry_1, entry_2;
4151
4152 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4153 if (!target_ldt_info)
4154 return -TARGET_EFAULT;
4155 idx = tswap32(target_ldt_info->entry_number);
4156 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4157 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4158 unlock_user_struct(target_ldt_info, ptr, 1);
4159 return -TARGET_EINVAL;
4160 }
4161 lp = (uint32_t *)(gdt_table + idx);
4162 entry_1 = tswap32(lp[0]);
4163 entry_2 = tswap32(lp[1]);
4164
4165 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4166 contents = (entry_2 >> 10) & 3;
4167 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4168 seg_32bit = (entry_2 >> 22) & 1;
4169 limit_in_pages = (entry_2 >> 23) & 1;
4170 useable = (entry_2 >> 20) & 1;
4171 #ifdef TARGET_ABI32
4172 lm = 0;
4173 #else
4174 lm = (entry_2 >> 21) & 1;
4175 #endif
4176 flags = (seg_32bit << 0) | (contents << 1) |
4177 (read_exec_only << 3) | (limit_in_pages << 4) |
4178 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4179 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4180 base_addr = (entry_1 >> 16) |
4181 (entry_2 & 0xff000000) |
4182 ((entry_2 & 0xff) << 16);
4183 target_ldt_info->base_addr = tswapal(base_addr);
4184 target_ldt_info->limit = tswap32(limit);
4185 target_ldt_info->flags = tswap32(flags);
4186 unlock_user_struct(target_ldt_info, ptr, 1);
4187 return 0;
4188 }
4189 #endif /* TARGET_I386 && TARGET_ABI32 */
4190
4191 #ifndef TARGET_ABI32
4192 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4193 {
4194 abi_long ret = 0;
4195 abi_ulong val;
4196 int idx;
4197
4198 switch(code) {
4199 case TARGET_ARCH_SET_GS:
4200 case TARGET_ARCH_SET_FS:
4201 if (code == TARGET_ARCH_SET_GS)
4202 idx = R_GS;
4203 else
4204 idx = R_FS;
4205 cpu_x86_load_seg(env, idx, 0);
4206 env->segs[idx].base = addr;
4207 break;
4208 case TARGET_ARCH_GET_GS:
4209 case TARGET_ARCH_GET_FS:
4210 if (code == TARGET_ARCH_GET_GS)
4211 idx = R_GS;
4212 else
4213 idx = R_FS;
4214 val = env->segs[idx].base;
4215 if (put_user(val, addr, abi_ulong))
4216 ret = -TARGET_EFAULT;
4217 break;
4218 default:
4219 ret = -TARGET_EINVAL;
4220 break;
4221 }
4222 return ret;
4223 }
4224 #endif
4225
4226 #endif /* defined(TARGET_I386) */
4227
4228 #define NEW_STACK_SIZE 0x40000
4229
4230 #if defined(CONFIG_USE_NPTL)
4231
4232 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4233 typedef struct {
4234 CPUArchState *env;
4235 pthread_mutex_t mutex;
4236 pthread_cond_t cond;
4237 pthread_t thread;
4238 uint32_t tid;
4239 abi_ulong child_tidptr;
4240 abi_ulong parent_tidptr;
4241 sigset_t sigmask;
4242 } new_thread_info;
4243
4244 static void *clone_func(void *arg)
4245 {
4246 new_thread_info *info = arg;
4247 CPUArchState *env;
4248 CPUState *cpu;
4249 TaskState *ts;
4250
4251 env = info->env;
4252 cpu = ENV_GET_CPU(env);
4253 thread_cpu = cpu;
4254 ts = (TaskState *)env->opaque;
4255 info->tid = gettid();
4256 cpu->host_tid = info->tid;
4257 task_settid(ts);
4258 if (info->child_tidptr)
4259 put_user_u32(info->tid, info->child_tidptr);
4260 if (info->parent_tidptr)
4261 put_user_u32(info->tid, info->parent_tidptr);
4262 /* Enable signals. */
4263 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4264 /* Signal to the parent that we're ready. */
4265 pthread_mutex_lock(&info->mutex);
4266 pthread_cond_broadcast(&info->cond);
4267 pthread_mutex_unlock(&info->mutex);
4268 /* Wait until the parent has finshed initializing the tls state. */
4269 pthread_mutex_lock(&clone_lock);
4270 pthread_mutex_unlock(&clone_lock);
4271 cpu_loop(env);
4272 /* never exits */
4273 return NULL;
4274 }
4275 #else
4276
4277 static int clone_func(void *arg)
4278 {
4279 CPUArchState *env = arg;
4280 cpu_loop(env);
4281 /* never exits */
4282 return 0;
4283 }
4284 #endif
4285
4286 /* do_fork() Must return host values and target errnos (unlike most
4287 do_*() functions). */
4288 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4289 abi_ulong parent_tidptr, target_ulong newtls,
4290 abi_ulong child_tidptr)
4291 {
4292 int ret;
4293 TaskState *ts;
4294 CPUArchState *new_env;
4295 #if defined(CONFIG_USE_NPTL)
4296 unsigned int nptl_flags;
4297 sigset_t sigmask;
4298 #else
4299 uint8_t *new_stack;
4300 #endif
4301
4302 /* Emulate vfork() with fork() */
4303 if (flags & CLONE_VFORK)
4304 flags &= ~(CLONE_VFORK | CLONE_VM);
4305
4306 if (flags & CLONE_VM) {
4307 TaskState *parent_ts = (TaskState *)env->opaque;
4308 #if defined(CONFIG_USE_NPTL)
4309 new_thread_info info;
4310 pthread_attr_t attr;
4311 #endif
4312 ts = g_malloc0(sizeof(TaskState));
4313 init_task_state(ts);
4314 /* we create a new CPU instance. */
4315 new_env = cpu_copy(env);
4316 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4317 cpu_reset(ENV_GET_CPU(new_env));
4318 #endif
4319 /* Init regs that differ from the parent. */
4320 cpu_clone_regs(new_env, newsp);
4321 new_env->opaque = ts;
4322 ts->bprm = parent_ts->bprm;
4323 ts->info = parent_ts->info;
4324 #if defined(CONFIG_USE_NPTL)
4325 nptl_flags = flags;
4326 flags &= ~CLONE_NPTL_FLAGS2;
4327
4328 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4329 ts->child_tidptr = child_tidptr;
4330 }
4331
4332 if (nptl_flags & CLONE_SETTLS)
4333 cpu_set_tls (new_env, newtls);
4334
4335 /* Grab a mutex so that thread setup appears atomic. */
4336 pthread_mutex_lock(&clone_lock);
4337
4338 memset(&info, 0, sizeof(info));
4339 pthread_mutex_init(&info.mutex, NULL);
4340 pthread_mutex_lock(&info.mutex);
4341 pthread_cond_init(&info.cond, NULL);
4342 info.env = new_env;
4343 if (nptl_flags & CLONE_CHILD_SETTID)
4344 info.child_tidptr = child_tidptr;
4345 if (nptl_flags & CLONE_PARENT_SETTID)
4346 info.parent_tidptr = parent_tidptr;
4347
4348 ret = pthread_attr_init(&attr);
4349 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4350 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4351 /* It is not safe to deliver signals until the child has finished
4352 initializing, so temporarily block all signals. */
4353 sigfillset(&sigmask);
4354 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4355
4356 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4357 /* TODO: Free new CPU state if thread creation failed. */
4358
4359 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4360 pthread_attr_destroy(&attr);
4361 if (ret == 0) {
4362 /* Wait for the child to initialize. */
4363 pthread_cond_wait(&info.cond, &info.mutex);
4364 ret = info.tid;
4365 if (flags & CLONE_PARENT_SETTID)
4366 put_user_u32(ret, parent_tidptr);
4367 } else {
4368 ret = -1;
4369 }
4370 pthread_mutex_unlock(&info.mutex);
4371 pthread_cond_destroy(&info.cond);
4372 pthread_mutex_destroy(&info.mutex);
4373 pthread_mutex_unlock(&clone_lock);
4374 #else
4375 if (flags & CLONE_NPTL_FLAGS2)
4376 return -EINVAL;
4377 /* This is probably going to die very quickly, but do it anyway. */
4378 new_stack = g_malloc0 (NEW_STACK_SIZE);
4379 #ifdef __ia64__
4380 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4381 #else
4382 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4383 #endif
4384 #endif
4385 } else {
4386 /* if no CLONE_VM, we consider it is a fork */
4387 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4388 return -EINVAL;
4389 fork_start();
4390 ret = fork();
4391 if (ret == 0) {
4392 /* Child Process. */
4393 cpu_clone_regs(env, newsp);
4394 fork_end(1);
4395 #if defined(CONFIG_USE_NPTL)
4396 /* There is a race condition here. The parent process could
4397 theoretically read the TID in the child process before the child
4398 tid is set. This would require using either ptrace
4399 (not implemented) or having *_tidptr to point at a shared memory
4400 mapping. We can't repeat the spinlock hack used above because
4401 the child process gets its own copy of the lock. */
4402 if (flags & CLONE_CHILD_SETTID)
4403 put_user_u32(gettid(), child_tidptr);
4404 if (flags & CLONE_PARENT_SETTID)
4405 put_user_u32(gettid(), parent_tidptr);
4406 ts = (TaskState *)env->opaque;
4407 if (flags & CLONE_SETTLS)
4408 cpu_set_tls (env, newtls);
4409 if (flags & CLONE_CHILD_CLEARTID)
4410 ts->child_tidptr = child_tidptr;
4411 #endif
4412 } else {
4413 fork_end(0);
4414 }
4415 }
4416 return ret;
4417 }
4418
4419 /* warning : doesn't handle linux specific flags... */
4420 static int target_to_host_fcntl_cmd(int cmd)
4421 {
4422 switch(cmd) {
4423 case TARGET_F_DUPFD:
4424 case TARGET_F_GETFD:
4425 case TARGET_F_SETFD:
4426 case TARGET_F_GETFL:
4427 case TARGET_F_SETFL:
4428 return cmd;
4429 case TARGET_F_GETLK:
4430 return F_GETLK;
4431 case TARGET_F_SETLK:
4432 return F_SETLK;
4433 case TARGET_F_SETLKW:
4434 return F_SETLKW;
4435 case TARGET_F_GETOWN:
4436 return F_GETOWN;
4437 case TARGET_F_SETOWN:
4438 return F_SETOWN;
4439 case TARGET_F_GETSIG:
4440 return F_GETSIG;
4441 case TARGET_F_SETSIG:
4442 return F_SETSIG;
4443 #if TARGET_ABI_BITS == 32
4444 case TARGET_F_GETLK64:
4445 return F_GETLK64;
4446 case TARGET_F_SETLK64:
4447 return F_SETLK64;
4448 case TARGET_F_SETLKW64:
4449 return F_SETLKW64;
4450 #endif
4451 case TARGET_F_SETLEASE:
4452 return F_SETLEASE;
4453 case TARGET_F_GETLEASE:
4454 return F_GETLEASE;
4455 #ifdef F_DUPFD_CLOEXEC
4456 case TARGET_F_DUPFD_CLOEXEC:
4457 return F_DUPFD_CLOEXEC;
4458 #endif
4459 case TARGET_F_NOTIFY:
4460 return F_NOTIFY;
4461 default:
4462 return -TARGET_EINVAL;
4463 }
4464 return -TARGET_EINVAL;
4465 }
4466
4467 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4468 static const bitmask_transtbl flock_tbl[] = {
4469 TRANSTBL_CONVERT(F_RDLCK),
4470 TRANSTBL_CONVERT(F_WRLCK),
4471 TRANSTBL_CONVERT(F_UNLCK),
4472 TRANSTBL_CONVERT(F_EXLCK),
4473 TRANSTBL_CONVERT(F_SHLCK),
4474 { 0, 0, 0, 0 }
4475 };
4476
4477 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4478 {
4479 struct flock fl;
4480 struct target_flock *target_fl;
4481 struct flock64 fl64;
4482 struct target_flock64 *target_fl64;
4483 abi_long ret;
4484 int host_cmd = target_to_host_fcntl_cmd(cmd);
4485
4486 if (host_cmd == -TARGET_EINVAL)
4487 return host_cmd;
4488
4489 switch(cmd) {
4490 case TARGET_F_GETLK:
4491 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4492 return -TARGET_EFAULT;
4493 fl.l_type =
4494 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4495 fl.l_whence = tswap16(target_fl->l_whence);
4496 fl.l_start = tswapal(target_fl->l_start);
4497 fl.l_len = tswapal(target_fl->l_len);
4498 fl.l_pid = tswap32(target_fl->l_pid);
4499 unlock_user_struct(target_fl, arg, 0);
4500 ret = get_errno(fcntl(fd, host_cmd, &fl));
4501 if (ret == 0) {
4502 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4503 return -TARGET_EFAULT;
4504 target_fl->l_type =
4505 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4506 target_fl->l_whence = tswap16(fl.l_whence);
4507 target_fl->l_start = tswapal(fl.l_start);
4508 target_fl->l_len = tswapal(fl.l_len);
4509 target_fl->l_pid = tswap32(fl.l_pid);
4510 unlock_user_struct(target_fl, arg, 1);
4511 }
4512 break;
4513
4514 case TARGET_F_SETLK:
4515 case TARGET_F_SETLKW:
4516 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4517 return -TARGET_EFAULT;
4518 fl.l_type =
4519 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4520 fl.l_whence = tswap16(target_fl->l_whence);
4521 fl.l_start = tswapal(target_fl->l_start);
4522 fl.l_len = tswapal(target_fl->l_len);
4523 fl.l_pid = tswap32(target_fl->l_pid);
4524 unlock_user_struct(target_fl, arg, 0);
4525 ret = get_errno(fcntl(fd, host_cmd, &fl));
4526 break;
4527
4528 case TARGET_F_GETLK64:
4529 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4530 return -TARGET_EFAULT;
4531 fl64.l_type =
4532 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4533 fl64.l_whence = tswap16(target_fl64->l_whence);
4534 fl64.l_start = tswap64(target_fl64->l_start);
4535 fl64.l_len = tswap64(target_fl64->l_len);
4536 fl64.l_pid = tswap32(target_fl64->l_pid);
4537 unlock_user_struct(target_fl64, arg, 0);
4538 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4539 if (ret == 0) {
4540 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4541 return -TARGET_EFAULT;
4542 target_fl64->l_type =
4543 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4544 target_fl64->l_whence = tswap16(fl64.l_whence);
4545 target_fl64->l_start = tswap64(fl64.l_start);
4546 target_fl64->l_len = tswap64(fl64.l_len);
4547 target_fl64->l_pid = tswap32(fl64.l_pid);
4548 unlock_user_struct(target_fl64, arg, 1);
4549 }
4550 break;
4551 case TARGET_F_SETLK64:
4552 case TARGET_F_SETLKW64:
4553 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4554 return -TARGET_EFAULT;
4555 fl64.l_type =
4556 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4557 fl64.l_whence = tswap16(target_fl64->l_whence);
4558 fl64.l_start = tswap64(target_fl64->l_start);
4559 fl64.l_len = tswap64(target_fl64->l_len);
4560 fl64.l_pid = tswap32(target_fl64->l_pid);
4561 unlock_user_struct(target_fl64, arg, 0);
4562 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4563 break;
4564
4565 case TARGET_F_GETFL:
4566 ret = get_errno(fcntl(fd, host_cmd, arg));
4567 if (ret >= 0) {
4568 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4569 }
4570 break;
4571
4572 case TARGET_F_SETFL:
4573 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4574 break;
4575
4576 case TARGET_F_SETOWN:
4577 case TARGET_F_GETOWN:
4578 case TARGET_F_SETSIG:
4579 case TARGET_F_GETSIG:
4580 case TARGET_F_SETLEASE:
4581 case TARGET_F_GETLEASE:
4582 ret = get_errno(fcntl(fd, host_cmd, arg));
4583 break;
4584
4585 default:
4586 ret = get_errno(fcntl(fd, cmd, arg));
4587 break;
4588 }
4589 return ret;
4590 }
4591
4592 #ifdef USE_UID16
4593
4594 static inline int high2lowuid(int uid)
4595 {
4596 if (uid > 65535)
4597 return 65534;
4598 else
4599 return uid;
4600 }
4601
4602 static inline int high2lowgid(int gid)
4603 {
4604 if (gid > 65535)
4605 return 65534;
4606 else
4607 return gid;
4608 }
4609
4610 static inline int low2highuid(int uid)
4611 {
4612 if ((int16_t)uid == -1)
4613 return -1;
4614 else
4615 return uid;
4616 }
4617
4618 static inline int low2highgid(int gid)
4619 {
4620 if ((int16_t)gid == -1)
4621 return -1;
4622 else
4623 return gid;
4624 }
4625 static inline int tswapid(int id)
4626 {
4627 return tswap16(id);
4628 }
4629 #else /* !USE_UID16 */
4630 static inline int high2lowuid(int uid)
4631 {
4632 return uid;
4633 }
4634 static inline int high2lowgid(int gid)
4635 {
4636 return gid;
4637 }
4638 static inline int low2highuid(int uid)
4639 {
4640 return uid;
4641 }
4642 static inline int low2highgid(int gid)
4643 {
4644 return gid;
4645 }
4646 static inline int tswapid(int id)
4647 {
4648 return tswap32(id);
4649 }
4650 #endif /* USE_UID16 */
4651
4652 void syscall_init(void)
4653 {
4654 IOCTLEntry *ie;
4655 const argtype *arg_type;
4656 int size;
4657 int i;
4658
4659 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4660 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4661 #include "syscall_types.h"
4662 #undef STRUCT
4663 #undef STRUCT_SPECIAL
4664
4665 /* Build target_to_host_errno_table[] table from
4666 * host_to_target_errno_table[]. */
4667 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4668 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4669 }
4670
4671 /* we patch the ioctl size if necessary. We rely on the fact that
4672 no ioctl has all the bits at '1' in the size field */
4673 ie = ioctl_entries;
4674 while (ie->target_cmd != 0) {
4675 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4676 TARGET_IOC_SIZEMASK) {
4677 arg_type = ie->arg_type;
4678 if (arg_type[0] != TYPE_PTR) {
4679 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4680 ie->target_cmd);
4681 exit(1);
4682 }
4683 arg_type++;
4684 size = thunk_type_size(arg_type, 0);
4685 ie->target_cmd = (ie->target_cmd &
4686 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4687 (size << TARGET_IOC_SIZESHIFT);
4688 }
4689
4690 /* automatic consistency check if same arch */
4691 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4692 (defined(__x86_64__) && defined(TARGET_X86_64))
4693 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4694 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4695 ie->name, ie->target_cmd, ie->host_cmd);
4696 }
4697 #endif
4698 ie++;
4699 }
4700 }
4701
4702 #if TARGET_ABI_BITS == 32
4703 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4704 {
4705 #ifdef TARGET_WORDS_BIGENDIAN
4706 return ((uint64_t)word0 << 32) | word1;
4707 #else
4708 return ((uint64_t)word1 << 32) | word0;
4709 #endif
4710 }
4711 #else /* TARGET_ABI_BITS == 32 */
4712 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4713 {
4714 return word0;
4715 }
4716 #endif /* TARGET_ABI_BITS != 32 */
4717
4718 #ifdef TARGET_NR_truncate64
4719 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4720 abi_long arg2,
4721 abi_long arg3,
4722 abi_long arg4)
4723 {
4724 if (regpairs_aligned(cpu_env)) {
4725 arg2 = arg3;
4726 arg3 = arg4;
4727 }
4728 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4729 }
4730 #endif
4731
4732 #ifdef TARGET_NR_ftruncate64
4733 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4734 abi_long arg2,
4735 abi_long arg3,
4736 abi_long arg4)
4737 {
4738 if (regpairs_aligned(cpu_env)) {
4739 arg2 = arg3;
4740 arg3 = arg4;
4741 }
4742 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4743 }
4744 #endif
4745
4746 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4747 abi_ulong target_addr)
4748 {
4749 struct target_timespec *target_ts;
4750
4751 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4752 return -TARGET_EFAULT;
4753 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4754 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4755 unlock_user_struct(target_ts, target_addr, 0);
4756 return 0;
4757 }
4758
4759 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4760 struct timespec *host_ts)
4761 {
4762 struct target_timespec *target_ts;
4763
4764 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4765 return -TARGET_EFAULT;
4766 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4767 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4768 unlock_user_struct(target_ts, target_addr, 1);
4769 return 0;
4770 }
4771
4772 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4773 static inline abi_long host_to_target_stat64(void *cpu_env,
4774 abi_ulong target_addr,
4775 struct stat *host_st)
4776 {
4777 #ifdef TARGET_ARM
4778 if (((CPUARMState *)cpu_env)->eabi) {
4779 struct target_eabi_stat64 *target_st;
4780
4781 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4782 return -TARGET_EFAULT;
4783 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4784 __put_user(host_st->st_dev, &target_st->st_dev);
4785 __put_user(host_st->st_ino, &target_st->st_ino);
4786 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4787 __put_user(host_st->st_ino, &target_st->__st_ino);
4788 #endif
4789 __put_user(host_st->st_mode, &target_st->st_mode);
4790 __put_user(host_st->st_nlink, &target_st->st_nlink);
4791 __put_user(host_st->st_uid, &target_st->st_uid);
4792 __put_user(host_st->st_gid, &target_st->st_gid);
4793 __put_user(host_st->st_rdev, &target_st->st_rdev);
4794 __put_user(host_st->st_size, &target_st->st_size);
4795 __put_user(host_st->st_blksize, &target_st->st_blksize);
4796 __put_user(host_st->st_blocks, &target_st->st_blocks);
4797 __put_user(host_st->st_atime, &target_st->target_st_atime);
4798 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4799 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4800 unlock_user_struct(target_st, target_addr, 1);
4801 } else
4802 #endif
4803 {
4804 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4805 struct target_stat *target_st;
4806 #else
4807 struct target_stat64 *target_st;
4808 #endif
4809
4810 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4811 return -TARGET_EFAULT;
4812 memset(target_st, 0, sizeof(*target_st));
4813 __put_user(host_st->st_dev, &target_st->st_dev);
4814 __put_user(host_st->st_ino, &target_st->st_ino);
4815 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4816 __put_user(host_st->st_ino, &target_st->__st_ino);
4817 #endif
4818 __put_user(host_st->st_mode, &target_st->st_mode);
4819 __put_user(host_st->st_nlink, &target_st->st_nlink);
4820 __put_user(host_st->st_uid, &target_st->st_uid);
4821 __put_user(host_st->st_gid, &target_st->st_gid);
4822 __put_user(host_st->st_rdev, &target_st->st_rdev);
4823 /* XXX: better use of kernel struct */
4824 __put_user(host_st->st_size, &target_st->st_size);
4825 __put_user(host_st->st_blksize, &target_st->st_blksize);
4826 __put_user(host_st->st_blocks, &target_st->st_blocks);
4827 __put_user(host_st->st_atime, &target_st->target_st_atime);
4828 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4829 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4830 unlock_user_struct(target_st, target_addr, 1);
4831 }
4832
4833 return 0;
4834 }
4835 #endif
4836
4837 #if defined(CONFIG_USE_NPTL)
4838 /* ??? Using host futex calls even when target atomic operations
4839 are not really atomic probably breaks things. However implementing
4840 futexes locally would make futexes shared between multiple processes
4841 tricky. However they're probably useless because guest atomic
4842 operations won't work either. */
4843 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4844 target_ulong uaddr2, int val3)
4845 {
4846 struct timespec ts, *pts;
4847 int base_op;
4848
4849 /* ??? We assume FUTEX_* constants are the same on both host
4850 and target. */
4851 #ifdef FUTEX_CMD_MASK
4852 base_op = op & FUTEX_CMD_MASK;
4853 #else
4854 base_op = op;
4855 #endif
4856 switch (base_op) {
4857 case FUTEX_WAIT:
4858 case FUTEX_WAIT_BITSET:
4859 if (timeout) {
4860 pts = &ts;
4861 target_to_host_timespec(pts, timeout);
4862 } else {
4863 pts = NULL;
4864 }
4865 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4866 pts, NULL, val3));
4867 case FUTEX_WAKE:
4868 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4869 case FUTEX_FD:
4870 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4871 case FUTEX_REQUEUE:
4872 case FUTEX_CMP_REQUEUE:
4873 case FUTEX_WAKE_OP:
4874 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4875 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4876 But the prototype takes a `struct timespec *'; insert casts
4877 to satisfy the compiler. We do not need to tswap TIMEOUT
4878 since it's not compared to guest memory. */
4879 pts = (struct timespec *)(uintptr_t) timeout;
4880 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4881 g2h(uaddr2),
4882 (base_op == FUTEX_CMP_REQUEUE
4883 ? tswap32(val3)
4884 : val3)));
4885 default:
4886 return -TARGET_ENOSYS;
4887 }
4888 }
4889 #endif
4890
4891 /* Map host to target signal numbers for the wait family of syscalls.
4892 Assume all other status bits are the same. */
4893 int host_to_target_waitstatus(int status)
4894 {
4895 if (WIFSIGNALED(status)) {
4896 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4897 }
4898 if (WIFSTOPPED(status)) {
4899 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4900 | (status & 0xff);
4901 }
4902 return status;
4903 }
4904
4905 int get_osversion(void)
4906 {
4907 static int osversion;
4908 struct new_utsname buf;
4909 const char *s;
4910 int i, n, tmp;
4911 if (osversion)
4912 return osversion;
4913 if (qemu_uname_release && *qemu_uname_release) {
4914 s = qemu_uname_release;
4915 } else {
4916 if (sys_uname(&buf))
4917 return 0;
4918 s = buf.release;
4919 }
4920 tmp = 0;
4921 for (i = 0; i < 3; i++) {
4922 n = 0;
4923 while (*s >= '0' && *s <= '9') {
4924 n *= 10;
4925 n += *s - '0';
4926 s++;
4927 }
4928 tmp = (tmp << 8) + n;
4929 if (*s == '.')
4930 s++;
4931 }
4932 osversion = tmp;
4933 return osversion;
4934 }
4935
4936
4937 static int open_self_maps(void *cpu_env, int fd)
4938 {
4939 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4940 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4941 #endif
4942 FILE *fp;
4943 char *line = NULL;
4944 size_t len = 0;
4945 ssize_t read;
4946
4947 fp = fopen("/proc/self/maps", "r");
4948 if (fp == NULL) {
4949 return -EACCES;
4950 }
4951
4952 while ((read = getline(&line, &len, fp)) != -1) {
4953 int fields, dev_maj, dev_min, inode;
4954 uint64_t min, max, offset;
4955 char flag_r, flag_w, flag_x, flag_p;
4956 char path[512] = "";
4957 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
4958 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
4959 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
4960
4961 if ((fields < 10) || (fields > 11)) {
4962 continue;
4963 }
4964 if (!strncmp(path, "[stack]", 7)) {
4965 continue;
4966 }
4967 if (h2g_valid(min) && h2g_valid(max)) {
4968 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
4969 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
4970 h2g(min), h2g(max), flag_r, flag_w,
4971 flag_x, flag_p, offset, dev_maj, dev_min, inode,
4972 path[0] ? " " : "", path);
4973 }
4974 }
4975
4976 free(line);
4977 fclose(fp);
4978
4979 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4980 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4981 (unsigned long long)ts->info->stack_limit,
4982 (unsigned long long)(ts->info->start_stack +
4983 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
4984 (unsigned long long)0);
4985 #endif
4986
4987 return 0;
4988 }
4989
4990 static int open_self_stat(void *cpu_env, int fd)
4991 {
4992 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4993 abi_ulong start_stack = ts->info->start_stack;
4994 int i;
4995
4996 for (i = 0; i < 44; i++) {
4997 char buf[128];
4998 int len;
4999 uint64_t val = 0;
5000
5001 if (i == 0) {
5002 /* pid */
5003 val = getpid();
5004 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5005 } else if (i == 1) {
5006 /* app name */
5007 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5008 } else if (i == 27) {
5009 /* stack bottom */
5010 val = start_stack;
5011 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5012 } else {
5013 /* for the rest, there is MasterCard */
5014 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5015 }
5016
5017 len = strlen(buf);
5018 if (write(fd, buf, len) != len) {
5019 return -1;
5020 }
5021 }
5022
5023 return 0;
5024 }
5025
5026 static int open_self_auxv(void *cpu_env, int fd)
5027 {
5028 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5029 abi_ulong auxv = ts->info->saved_auxv;
5030 abi_ulong len = ts->info->auxv_len;
5031 char *ptr;
5032
5033 /*
5034 * Auxiliary vector is stored in target process stack.
5035 * read in whole auxv vector and copy it to file
5036 */
5037 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5038 if (ptr != NULL) {
5039 while (len > 0) {
5040 ssize_t r;
5041 r = write(fd, ptr, len);
5042 if (r <= 0) {
5043 break;
5044 }
5045 len -= r;
5046 ptr += r;
5047 }
5048 lseek(fd, 0, SEEK_SET);
5049 unlock_user(ptr, auxv, len);
5050 }
5051
5052 return 0;
5053 }
5054
5055 static int is_proc_myself(const char *filename, const char *entry)
5056 {
5057 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5058 filename += strlen("/proc/");
5059 if (!strncmp(filename, "self/", strlen("self/"))) {
5060 filename += strlen("self/");
5061 } else if (*filename >= '1' && *filename <= '9') {
5062 char myself[80];
5063 snprintf(myself, sizeof(myself), "%d/", getpid());
5064 if (!strncmp(filename, myself, strlen(myself))) {
5065 filename += strlen(myself);
5066 } else {
5067 return 0;
5068 }
5069 } else {
5070 return 0;
5071 }
5072 if (!strcmp(filename, entry)) {
5073 return 1;
5074 }
5075 }
5076 return 0;
5077 }
5078
5079 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5080 {
5081 struct fake_open {
5082 const char *filename;
5083 int (*fill)(void *cpu_env, int fd);
5084 };
5085 const struct fake_open *fake_open;
5086 static const struct fake_open fakes[] = {
5087 { "maps", open_self_maps },
5088 { "stat", open_self_stat },
5089 { "auxv", open_self_auxv },
5090 { NULL, NULL }
5091 };
5092
5093 for (fake_open = fakes; fake_open->filename; fake_open++) {
5094 if (is_proc_myself(pathname, fake_open->filename)) {
5095 break;
5096 }
5097 }
5098
5099 if (fake_open->filename) {
5100 const char *tmpdir;
5101 char filename[PATH_MAX];
5102 int fd, r;
5103
5104 /* create temporary file to map stat to */
5105 tmpdir = getenv("TMPDIR");
5106 if (!tmpdir)
5107 tmpdir = "/tmp";
5108 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5109 fd = mkstemp(filename);
5110 if (fd < 0) {
5111 return fd;
5112 }
5113 unlink(filename);
5114
5115 if ((r = fake_open->fill(cpu_env, fd))) {
5116 close(fd);
5117 return r;
5118 }
5119 lseek(fd, 0, SEEK_SET);
5120
5121 return fd;
5122 }
5123
5124 return get_errno(open(path(pathname), flags, mode));
5125 }
5126
5127 /* do_syscall() should always have a single exit point at the end so
5128 that actions, such as logging of syscall results, can be performed.
5129 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5130 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5131 abi_long arg2, abi_long arg3, abi_long arg4,
5132 abi_long arg5, abi_long arg6, abi_long arg7,
5133 abi_long arg8)
5134 {
5135 #ifdef CONFIG_USE_NPTL
5136 CPUState *cpu = ENV_GET_CPU(cpu_env);
5137 #endif
5138 abi_long ret;
5139 struct stat st;
5140 struct statfs stfs;
5141 void *p;
5142
5143 #ifdef DEBUG
5144 gemu_log("syscall %d", num);
5145 #endif
5146 if(do_strace)
5147 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5148
5149 switch(num) {
5150 case TARGET_NR_exit:
5151 #ifdef CONFIG_USE_NPTL
5152 /* In old applications this may be used to implement _exit(2).
5153 However in threaded applictions it is used for thread termination,
5154 and _exit_group is used for application termination.
5155 Do thread termination if we have more then one thread. */
5156 /* FIXME: This probably breaks if a signal arrives. We should probably
5157 be disabling signals. */
5158 if (first_cpu->next_cpu) {
5159 TaskState *ts;
5160 CPUState **lastp;
5161 CPUState *p;
5162
5163 cpu_list_lock();
5164 lastp = &first_cpu;
5165 p = first_cpu;
5166 while (p && p != cpu) {
5167 lastp = &p->next_cpu;
5168 p = p->next_cpu;
5169 }
5170 /* If we didn't find the CPU for this thread then something is
5171 horribly wrong. */
5172 if (!p) {
5173 abort();
5174 }
5175 /* Remove the CPU from the list. */
5176 *lastp = p->next_cpu;
5177 cpu_list_unlock();
5178 ts = ((CPUArchState *)cpu_env)->opaque;
5179 if (ts->child_tidptr) {
5180 put_user_u32(0, ts->child_tidptr);
5181 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5182 NULL, NULL, 0);
5183 }
5184 thread_cpu = NULL;
5185 object_unref(OBJECT(ENV_GET_CPU(cpu_env)));
5186 g_free(ts);
5187 pthread_exit(NULL);
5188 }
5189 #endif
5190 #ifdef TARGET_GPROF
5191 _mcleanup();
5192 #endif
5193 gdb_exit(cpu_env, arg1);
5194 _exit(arg1);
5195 ret = 0; /* avoid warning */
5196 break;
5197 case TARGET_NR_read:
5198 if (arg3 == 0)
5199 ret = 0;
5200 else {
5201 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5202 goto efault;
5203 ret = get_errno(read(arg1, p, arg3));
5204 unlock_user(p, arg2, ret);
5205 }
5206 break;
5207 case TARGET_NR_write:
5208 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5209 goto efault;
5210 ret = get_errno(write(arg1, p, arg3));
5211 unlock_user(p, arg2, 0);
5212 break;
5213 case TARGET_NR_open:
5214 if (!(p = lock_user_string(arg1)))
5215 goto efault;
5216 ret = get_errno(do_open(cpu_env, p,
5217 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5218 arg3));
5219 unlock_user(p, arg1, 0);
5220 break;
5221 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5222 case TARGET_NR_openat:
5223 if (!(p = lock_user_string(arg2)))
5224 goto efault;
5225 ret = get_errno(sys_openat(arg1,
5226 path(p),
5227 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5228 arg4));
5229 unlock_user(p, arg2, 0);
5230 break;
5231 #endif
5232 case TARGET_NR_close:
5233 ret = get_errno(close(arg1));
5234 break;
5235 case TARGET_NR_brk:
5236 ret = do_brk(arg1);
5237 break;
5238 case TARGET_NR_fork:
5239 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5240 break;
5241 #ifdef TARGET_NR_waitpid
5242 case TARGET_NR_waitpid:
5243 {
5244 int status;
5245 ret = get_errno(waitpid(arg1, &status, arg3));
5246 if (!is_error(ret) && arg2 && ret
5247 && put_user_s32(host_to_target_waitstatus(status), arg2))
5248 goto efault;
5249 }
5250 break;
5251 #endif
5252 #ifdef TARGET_NR_waitid
5253 case TARGET_NR_waitid:
5254 {
5255 siginfo_t info;
5256 info.si_pid = 0;
5257 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5258 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5259 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5260 goto efault;
5261 host_to_target_siginfo(p, &info);
5262 unlock_user(p, arg3, sizeof(target_siginfo_t));
5263 }
5264 }
5265 break;
5266 #endif
5267 #ifdef TARGET_NR_creat /* not on alpha */
5268 case TARGET_NR_creat:
5269 if (!(p = lock_user_string(arg1)))
5270 goto efault;
5271 ret = get_errno(creat(p, arg2));
5272 unlock_user(p, arg1, 0);
5273 break;
5274 #endif
5275 case TARGET_NR_link:
5276 {
5277 void * p2;
5278 p = lock_user_string(arg1);
5279 p2 = lock_user_string(arg2);
5280 if (!p || !p2)
5281 ret = -TARGET_EFAULT;
5282 else
5283 ret = get_errno(link(p, p2));
5284 unlock_user(p2, arg2, 0);
5285 unlock_user(p, arg1, 0);
5286 }
5287 break;
5288 #if defined(TARGET_NR_linkat)
5289 case TARGET_NR_linkat:
5290 {
5291 void * p2 = NULL;
5292 if (!arg2 || !arg4)
5293 goto efault;
5294 p = lock_user_string(arg2);
5295 p2 = lock_user_string(arg4);
5296 if (!p || !p2)
5297 ret = -TARGET_EFAULT;
5298 else
5299 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5300 unlock_user(p, arg2, 0);
5301 unlock_user(p2, arg4, 0);
5302 }
5303 break;
5304 #endif
5305 case TARGET_NR_unlink:
5306 if (!(p = lock_user_string(arg1)))
5307 goto efault;
5308 ret = get_errno(unlink(p));
5309 unlock_user(p, arg1, 0);
5310 break;
5311 #if defined(TARGET_NR_unlinkat)
5312 case TARGET_NR_unlinkat:
5313 if (!(p = lock_user_string(arg2)))
5314 goto efault;
5315 ret = get_errno(unlinkat(arg1, p, arg3));
5316 unlock_user(p, arg2, 0);
5317 break;
5318 #endif
5319 case TARGET_NR_execve:
5320 {
5321 char **argp, **envp;
5322 int argc, envc;
5323 abi_ulong gp;
5324 abi_ulong guest_argp;
5325 abi_ulong guest_envp;
5326 abi_ulong addr;
5327 char **q;
5328 int total_size = 0;
5329
5330 argc = 0;
5331 guest_argp = arg2;
5332 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5333 if (get_user_ual(addr, gp))
5334 goto efault;
5335 if (!addr)
5336 break;
5337 argc++;
5338 }
5339 envc = 0;
5340 guest_envp = arg3;
5341 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5342 if (get_user_ual(addr, gp))
5343 goto efault;
5344 if (!addr)
5345 break;
5346 envc++;
5347 }
5348
5349 argp = alloca((argc + 1) * sizeof(void *));
5350 envp = alloca((envc + 1) * sizeof(void *));
5351
5352 for (gp = guest_argp, q = argp; gp;
5353 gp += sizeof(abi_ulong), q++) {
5354 if (get_user_ual(addr, gp))
5355 goto execve_efault;
5356 if (!addr)
5357 break;
5358 if (!(*q = lock_user_string(addr)))
5359 goto execve_efault;
5360 total_size += strlen(*q) + 1;
5361 }
5362 *q = NULL;
5363
5364 for (gp = guest_envp, q = envp; gp;
5365 gp += sizeof(abi_ulong), q++) {
5366 if (get_user_ual(addr, gp))
5367 goto execve_efault;
5368 if (!addr)
5369 break;
5370 if (!(*q = lock_user_string(addr)))
5371 goto execve_efault;
5372 total_size += strlen(*q) + 1;
5373 }
5374 *q = NULL;
5375
5376 /* This case will not be caught by the host's execve() if its
5377 page size is bigger than the target's. */
5378 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5379 ret = -TARGET_E2BIG;
5380 goto execve_end;
5381 }
5382 if (!(p = lock_user_string(arg1)))
5383 goto execve_efault;
5384 ret = get_errno(execve(p, argp, envp));
5385 unlock_user(p, arg1, 0);
5386
5387 goto execve_end;
5388
5389 execve_efault:
5390 ret = -TARGET_EFAULT;
5391
5392 execve_end:
5393 for (gp = guest_argp, q = argp; *q;
5394 gp += sizeof(abi_ulong), q++) {
5395 if (get_user_ual(addr, gp)
5396 || !addr)
5397 break;
5398 unlock_user(*q, addr, 0);
5399 }
5400 for (gp = guest_envp, q = envp; *q;
5401 gp += sizeof(abi_ulong), q++) {
5402 if (get_user_ual(addr, gp)
5403 || !addr)
5404 break;
5405 unlock_user(*q, addr, 0);
5406 }
5407 }
5408 break;
5409 case TARGET_NR_chdir:
5410 if (!(p = lock_user_string(arg1)))
5411 goto efault;
5412 ret = get_errno(chdir(p));
5413 unlock_user(p, arg1, 0);
5414 break;
5415 #ifdef TARGET_NR_time
5416 case TARGET_NR_time:
5417 {
5418 time_t host_time;
5419 ret = get_errno(time(&host_time));
5420 if (!is_error(ret)
5421 && arg1
5422 && put_user_sal(host_time, arg1))
5423 goto efault;
5424 }
5425 break;
5426 #endif
5427 case TARGET_NR_mknod:
5428 if (!(p = lock_user_string(arg1)))
5429 goto efault;
5430 ret = get_errno(mknod(p, arg2, arg3));
5431 unlock_user(p, arg1, 0);
5432 break;
5433 #if defined(TARGET_NR_mknodat)
5434 case TARGET_NR_mknodat:
5435 if (!(p = lock_user_string(arg2)))
5436 goto efault;
5437 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5438 unlock_user(p, arg2, 0);
5439 break;
5440 #endif
5441 case TARGET_NR_chmod:
5442 if (!(p = lock_user_string(arg1)))
5443 goto efault;
5444 ret = get_errno(chmod(p, arg2));
5445 unlock_user(p, arg1, 0);
5446 break;
5447 #ifdef TARGET_NR_break
5448 case TARGET_NR_break:
5449 goto unimplemented;
5450 #endif
5451 #ifdef TARGET_NR_oldstat
5452 case TARGET_NR_oldstat:
5453 goto unimplemented;
5454 #endif
5455 case TARGET_NR_lseek:
5456 ret = get_errno(lseek(arg1, arg2, arg3));
5457 break;
5458 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5459 /* Alpha specific */
5460 case TARGET_NR_getxpid:
5461 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5462 ret = get_errno(getpid());
5463 break;
5464 #endif
5465 #ifdef TARGET_NR_getpid
5466 case TARGET_NR_getpid:
5467 ret = get_errno(getpid());
5468 break;
5469 #endif
5470 case TARGET_NR_mount:
5471 {
5472 /* need to look at the data field */
5473 void *p2, *p3;
5474 p = lock_user_string(arg1);
5475 p2 = lock_user_string(arg2);
5476 p3 = lock_user_string(arg3);
5477 if (!p || !p2 || !p3)
5478 ret = -TARGET_EFAULT;
5479 else {
5480 /* FIXME - arg5 should be locked, but it isn't clear how to
5481 * do that since it's not guaranteed to be a NULL-terminated
5482 * string.
5483 */
5484 if ( ! arg5 )
5485 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5486 else
5487 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5488 }
5489 unlock_user(p, arg1, 0);
5490 unlock_user(p2, arg2, 0);
5491 unlock_user(p3, arg3, 0);
5492 break;
5493 }
5494 #ifdef TARGET_NR_umount
5495 case TARGET_NR_umount:
5496 if (!(p = lock_user_string(arg1)))
5497 goto efault;
5498 ret = get_errno(umount(p));
5499 unlock_user(p, arg1, 0);
5500 break;
5501 #endif
5502 #ifdef TARGET_NR_stime /* not on alpha */
5503 case TARGET_NR_stime:
5504 {
5505 time_t host_time;
5506 if (get_user_sal(host_time, arg1))
5507 goto efault;
5508 ret = get_errno(stime(&host_time));
5509 }
5510 break;
5511 #endif
5512 case TARGET_NR_ptrace:
5513 goto unimplemented;
5514 #ifdef TARGET_NR_alarm /* not on alpha */
5515 case TARGET_NR_alarm:
5516 ret = alarm(arg1);
5517 break;
5518 #endif
5519 #ifdef TARGET_NR_oldfstat
5520 case TARGET_NR_oldfstat:
5521 goto unimplemented;
5522 #endif
5523 #ifdef TARGET_NR_pause /* not on alpha */
5524 case TARGET_NR_pause:
5525 ret = get_errno(pause());
5526 break;
5527 #endif
5528 #ifdef TARGET_NR_utime
5529 case TARGET_NR_utime:
5530 {
5531 struct utimbuf tbuf, *host_tbuf;
5532 struct target_utimbuf *target_tbuf;
5533 if (arg2) {
5534 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5535 goto efault;
5536 tbuf.actime = tswapal(target_tbuf->actime);
5537 tbuf.modtime = tswapal(target_tbuf->modtime);
5538 unlock_user_struct(target_tbuf, arg2, 0);
5539 host_tbuf = &tbuf;
5540 } else {
5541 host_tbuf = NULL;
5542 }
5543 if (!(p = lock_user_string(arg1)))
5544 goto efault;
5545 ret = get_errno(utime(p, host_tbuf));
5546 unlock_user(p, arg1, 0);
5547 }
5548 break;
5549 #endif
5550 case TARGET_NR_utimes:
5551 {
5552 struct timeval *tvp, tv[2];
5553 if (arg2) {
5554 if (copy_from_user_timeval(&tv[0], arg2)
5555 || copy_from_user_timeval(&tv[1],
5556 arg2 + sizeof(struct target_timeval)))
5557 goto efault;
5558 tvp = tv;
5559 } else {
5560 tvp = NULL;
5561 }
5562 if (!(p = lock_user_string(arg1)))
5563 goto efault;
5564 ret = get_errno(utimes(p, tvp));
5565 unlock_user(p, arg1, 0);
5566 }
5567 break;
5568 #if defined(TARGET_NR_futimesat)
5569 case TARGET_NR_futimesat:
5570 {
5571 struct timeval *tvp, tv[2];
5572 if (arg3) {
5573 if (copy_from_user_timeval(&tv[0], arg3)
5574 || copy_from_user_timeval(&tv[1],
5575 arg3 + sizeof(struct target_timeval)))
5576 goto efault;
5577 tvp = tv;
5578 } else {
5579 tvp = NULL;
5580 }
5581 if (!(p = lock_user_string(arg2)))
5582 goto efault;
5583 ret = get_errno(futimesat(arg1, path(p), tvp));
5584 unlock_user(p, arg2, 0);
5585 }
5586 break;
5587 #endif
5588 #ifdef TARGET_NR_stty
5589 case TARGET_NR_stty:
5590 goto unimplemented;
5591 #endif
5592 #ifdef TARGET_NR_gtty
5593 case TARGET_NR_gtty:
5594 goto unimplemented;
5595 #endif
5596 case TARGET_NR_access:
5597 if (!(p = lock_user_string(arg1)))
5598 goto efault;
5599 ret = get_errno(access(path(p), arg2));
5600 unlock_user(p, arg1, 0);
5601 break;
5602 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5603 case TARGET_NR_faccessat:
5604 if (!(p = lock_user_string(arg2)))
5605 goto efault;
5606 ret = get_errno(faccessat(arg1, p, arg3, 0));
5607 unlock_user(p, arg2, 0);
5608 break;
5609 #endif
5610 #ifdef TARGET_NR_nice /* not on alpha */
5611 case TARGET_NR_nice:
5612 ret = get_errno(nice(arg1));
5613 break;
5614 #endif
5615 #ifdef TARGET_NR_ftime
5616 case TARGET_NR_ftime:
5617 goto unimplemented;
5618 #endif
5619 case TARGET_NR_sync:
5620 sync();
5621 ret = 0;
5622 break;
5623 case TARGET_NR_kill:
5624 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5625 break;
5626 case TARGET_NR_rename:
5627 {
5628 void *p2;
5629 p = lock_user_string(arg1);
5630 p2 = lock_user_string(arg2);
5631 if (!p || !p2)
5632 ret = -TARGET_EFAULT;
5633 else
5634 ret = get_errno(rename(p, p2));
5635 unlock_user(p2, arg2, 0);
5636 unlock_user(p, arg1, 0);
5637 }
5638 break;
5639 #if defined(TARGET_NR_renameat)
5640 case TARGET_NR_renameat:
5641 {
5642 void *p2;
5643 p = lock_user_string(arg2);
5644 p2 = lock_user_string(arg4);
5645 if (!p || !p2)
5646 ret = -TARGET_EFAULT;
5647 else
5648 ret = get_errno(renameat(arg1, p, arg3, p2));
5649 unlock_user(p2, arg4, 0);
5650 unlock_user(p, arg2, 0);
5651 }
5652 break;
5653 #endif
5654 case TARGET_NR_mkdir:
5655 if (!(p = lock_user_string(arg1)))
5656 goto efault;
5657 ret = get_errno(mkdir(p, arg2));
5658 unlock_user(p, arg1, 0);
5659 break;
5660 #if defined(TARGET_NR_mkdirat)
5661 case TARGET_NR_mkdirat:
5662 if (!(p = lock_user_string(arg2)))
5663 goto efault;
5664 ret = get_errno(mkdirat(arg1, p, arg3));
5665 unlock_user(p, arg2, 0);
5666 break;
5667 #endif
5668 case TARGET_NR_rmdir:
5669 if (!(p = lock_user_string(arg1)))
5670 goto efault;
5671 ret = get_errno(rmdir(p));
5672 unlock_user(p, arg1, 0);
5673 break;
5674 case TARGET_NR_dup:
5675 ret = get_errno(dup(arg1));
5676 break;
5677 case TARGET_NR_pipe:
5678 ret = do_pipe(cpu_env, arg1, 0, 0);
5679 break;
5680 #ifdef TARGET_NR_pipe2
5681 case TARGET_NR_pipe2:
5682 ret = do_pipe(cpu_env, arg1,
5683 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5684 break;
5685 #endif
5686 case TARGET_NR_times:
5687 {
5688 struct target_tms *tmsp;
5689 struct tms tms;
5690 ret = get_errno(times(&tms));
5691 if (arg1) {
5692 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5693 if (!tmsp)
5694 goto efault;
5695 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5696 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5697 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5698 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5699 }
5700 if (!is_error(ret))
5701 ret = host_to_target_clock_t(ret);
5702 }
5703 break;
5704 #ifdef TARGET_NR_prof
5705 case TARGET_NR_prof:
5706 goto unimplemented;
5707 #endif
5708 #ifdef TARGET_NR_signal
5709 case TARGET_NR_signal:
5710 goto unimplemented;
5711 #endif
5712 case TARGET_NR_acct:
5713 if (arg1 == 0) {
5714 ret = get_errno(acct(NULL));
5715 } else {
5716 if (!(p = lock_user_string(arg1)))
5717 goto efault;
5718 ret = get_errno(acct(path(p)));
5719 unlock_user(p, arg1, 0);
5720 }
5721 break;
5722 #ifdef TARGET_NR_umount2 /* not on alpha */
5723 case TARGET_NR_umount2:
5724 if (!(p = lock_user_string(arg1)))
5725 goto efault;
5726 ret = get_errno(umount2(p, arg2));
5727 unlock_user(p, arg1, 0);
5728 break;
5729 #endif
5730 #ifdef TARGET_NR_lock
5731 case TARGET_NR_lock:
5732 goto unimplemented;
5733 #endif
5734 case TARGET_NR_ioctl:
5735 ret = do_ioctl(arg1, arg2, arg3);
5736 break;
5737 case TARGET_NR_fcntl:
5738 ret = do_fcntl(arg1, arg2, arg3);
5739 break;
5740 #ifdef TARGET_NR_mpx
5741 case TARGET_NR_mpx:
5742 goto unimplemented;
5743 #endif
5744 case TARGET_NR_setpgid:
5745 ret = get_errno(setpgid(arg1, arg2));
5746 break;
5747 #ifdef TARGET_NR_ulimit
5748 case TARGET_NR_ulimit:
5749 goto unimplemented;
5750 #endif
5751 #ifdef TARGET_NR_oldolduname
5752 case TARGET_NR_oldolduname:
5753 goto unimplemented;
5754 #endif
5755 case TARGET_NR_umask:
5756 ret = get_errno(umask(arg1));
5757 break;
5758 case TARGET_NR_chroot:
5759 if (!(p = lock_user_string(arg1)))
5760 goto efault;
5761 ret = get_errno(chroot(p));
5762 unlock_user(p, arg1, 0);
5763 break;
5764 case TARGET_NR_ustat:
5765 goto unimplemented;
5766 case TARGET_NR_dup2:
5767 ret = get_errno(dup2(arg1, arg2));
5768 break;
5769 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5770 case TARGET_NR_dup3:
5771 ret = get_errno(dup3(arg1, arg2, arg3));
5772 break;
5773 #endif
5774 #ifdef TARGET_NR_getppid /* not on alpha */
5775 case TARGET_NR_getppid:
5776 ret = get_errno(getppid());
5777 break;
5778 #endif
5779 case TARGET_NR_getpgrp:
5780 ret = get_errno(getpgrp());
5781 break;
5782 case TARGET_NR_setsid:
5783 ret = get_errno(setsid());
5784 break;
5785 #ifdef TARGET_NR_sigaction
5786 case TARGET_NR_sigaction:
5787 {
5788 #if defined(TARGET_ALPHA)
5789 struct target_sigaction act, oact, *pact = 0;
5790 struct target_old_sigaction *old_act;
5791 if (arg2) {
5792 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5793 goto efault;
5794 act._sa_handler = old_act->_sa_handler;
5795 target_siginitset(&act.sa_mask, old_act->sa_mask);
5796 act.sa_flags = old_act->sa_flags;
5797 act.sa_restorer = 0;
5798 unlock_user_struct(old_act, arg2, 0);
5799 pact = &act;
5800 }
5801 ret = get_errno(do_sigaction(arg1, pact, &oact));
5802 if (!is_error(ret) && arg3) {
5803 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5804 goto efault;
5805 old_act->_sa_handler = oact._sa_handler;
5806 old_act->sa_mask = oact.sa_mask.sig[0];
5807 old_act->sa_flags = oact.sa_flags;
5808 unlock_user_struct(old_act, arg3, 1);
5809 }
5810 #elif defined(TARGET_MIPS)
5811 struct target_sigaction act, oact, *pact, *old_act;
5812
5813 if (arg2) {
5814 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5815 goto efault;
5816 act._sa_handler = old_act->_sa_handler;
5817 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5818 act.sa_flags = old_act->sa_flags;
5819 unlock_user_struct(old_act, arg2, 0);
5820 pact = &act;
5821 } else {
5822 pact = NULL;
5823 }
5824
5825 ret = get_errno(do_sigaction(arg1, pact, &oact));
5826
5827 if (!is_error(ret) && arg3) {
5828 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5829 goto efault;
5830 old_act->_sa_handler = oact._sa_handler;
5831 old_act->sa_flags = oact.sa_flags;
5832 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5833 old_act->sa_mask.sig[1] = 0;
5834 old_act->sa_mask.sig[2] = 0;
5835 old_act->sa_mask.sig[3] = 0;
5836 unlock_user_struct(old_act, arg3, 1);
5837 }
5838 #else
5839 struct target_old_sigaction *old_act;
5840 struct target_sigaction act, oact, *pact;
5841 if (arg2) {
5842 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5843 goto efault;
5844 act._sa_handler = old_act->_sa_handler;
5845 target_siginitset(&act.sa_mask, old_act->sa_mask);
5846 act.sa_flags = old_act->sa_flags;
5847 act.sa_restorer = old_act->sa_restorer;
5848 unlock_user_struct(old_act, arg2, 0);
5849 pact = &act;
5850 } else {
5851 pact = NULL;
5852 }
5853 ret = get_errno(do_sigaction(arg1, pact, &oact));
5854 if (!is_error(ret) && arg3) {
5855 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5856 goto efault;
5857 old_act->_sa_handler = oact._sa_handler;
5858 old_act->sa_mask = oact.sa_mask.sig[0];
5859 old_act->sa_flags = oact.sa_flags;
5860 old_act->sa_restorer = oact.sa_restorer;
5861 unlock_user_struct(old_act, arg3, 1);
5862 }
5863 #endif
5864 }
5865 break;
5866 #endif
5867 case TARGET_NR_rt_sigaction:
5868 {
5869 #if defined(TARGET_ALPHA)
5870 struct target_sigaction act, oact, *pact = 0;
5871 struct target_rt_sigaction *rt_act;
5872 /* ??? arg4 == sizeof(sigset_t). */
5873 if (arg2) {
5874 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5875 goto efault;
5876 act._sa_handler = rt_act->_sa_handler;
5877 act.sa_mask = rt_act->sa_mask;
5878 act.sa_flags = rt_act->sa_flags;
5879 act.sa_restorer = arg5;
5880 unlock_user_struct(rt_act, arg2, 0);
5881 pact = &act;
5882 }
5883 ret = get_errno(do_sigaction(arg1, pact, &oact));
5884 if (!is_error(ret) && arg3) {
5885 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5886 goto efault;
5887 rt_act->_sa_handler = oact._sa_handler;
5888 rt_act->sa_mask = oact.sa_mask;
5889 rt_act->sa_flags = oact.sa_flags;
5890 unlock_user_struct(rt_act, arg3, 1);
5891 }
5892 #else
5893 struct target_sigaction *act;
5894 struct target_sigaction *oact;
5895
5896 if (arg2) {
5897 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5898 goto efault;
5899 } else
5900 act = NULL;
5901 if (arg3) {
5902 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5903 ret = -TARGET_EFAULT;
5904 goto rt_sigaction_fail;
5905 }
5906 } else
5907 oact = NULL;
5908 ret = get_errno(do_sigaction(arg1, act, oact));
5909 rt_sigaction_fail:
5910 if (act)
5911 unlock_user_struct(act, arg2, 0);
5912 if (oact)
5913 unlock_user_struct(oact, arg3, 1);
5914 #endif
5915 }
5916 break;
5917 #ifdef TARGET_NR_sgetmask /* not on alpha */
5918 case TARGET_NR_sgetmask:
5919 {
5920 sigset_t cur_set;
5921 abi_ulong target_set;
5922 sigprocmask(0, NULL, &cur_set);
5923 host_to_target_old_sigset(&target_set, &cur_set);
5924 ret = target_set;
5925 }
5926 break;
5927 #endif
5928 #ifdef TARGET_NR_ssetmask /* not on alpha */
5929 case TARGET_NR_ssetmask:
5930 {
5931 sigset_t set, oset, cur_set;
5932 abi_ulong target_set = arg1;
5933 sigprocmask(0, NULL, &cur_set);
5934 target_to_host_old_sigset(&set, &target_set);
5935 sigorset(&set, &set, &cur_set);
5936 sigprocmask(SIG_SETMASK, &set, &oset);
5937 host_to_target_old_sigset(&target_set, &oset);
5938 ret = target_set;
5939 }
5940 break;
5941 #endif
5942 #ifdef TARGET_NR_sigprocmask
5943 case TARGET_NR_sigprocmask:
5944 {
5945 #if defined(TARGET_ALPHA)
5946 sigset_t set, oldset;
5947 abi_ulong mask;
5948 int how;
5949
5950 switch (arg1) {
5951 case TARGET_SIG_BLOCK:
5952 how = SIG_BLOCK;
5953 break;
5954 case TARGET_SIG_UNBLOCK:
5955 how = SIG_UNBLOCK;
5956 break;
5957 case TARGET_SIG_SETMASK:
5958 how = SIG_SETMASK;
5959 break;
5960 default:
5961 ret = -TARGET_EINVAL;
5962 goto fail;
5963 }
5964 mask = arg2;
5965 target_to_host_old_sigset(&set, &mask);
5966
5967 ret = get_errno(sigprocmask(how, &set, &oldset));
5968 if (!is_error(ret)) {
5969 host_to_target_old_sigset(&mask, &oldset);
5970 ret = mask;
5971 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
5972 }
5973 #else
5974 sigset_t set, oldset, *set_ptr;
5975 int how;
5976
5977 if (arg2) {
5978 switch (arg1) {
5979 case TARGET_SIG_BLOCK:
5980 how = SIG_BLOCK;
5981 break;
5982 case TARGET_SIG_UNBLOCK:
5983 how = SIG_UNBLOCK;
5984 break;
5985 case TARGET_SIG_SETMASK:
5986 how = SIG_SETMASK;
5987 break;
5988 default:
5989 ret = -TARGET_EINVAL;
5990 goto fail;
5991 }
5992 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5993 goto efault;
5994 target_to_host_old_sigset(&set, p);
5995 unlock_user(p, arg2, 0);
5996 set_ptr = &set;
5997 } else {
5998 how = 0;
5999 set_ptr = NULL;
6000 }
6001 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6002 if (!is_error(ret) && arg3) {
6003 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6004 goto efault;
6005 host_to_target_old_sigset(p, &oldset);
6006 unlock_user(p, arg3, sizeof(target_sigset_t));
6007 }
6008 #endif
6009 }
6010 break;
6011 #endif
6012 case TARGET_NR_rt_sigprocmask:
6013 {
6014 int how = arg1;
6015 sigset_t set, oldset, *set_ptr;
6016
6017 if (arg2) {
6018 switch(how) {
6019 case TARGET_SIG_BLOCK:
6020 how = SIG_BLOCK;
6021 break;
6022 case TARGET_SIG_UNBLOCK:
6023 how = SIG_UNBLOCK;
6024 break;
6025 case TARGET_SIG_SETMASK:
6026 how = SIG_SETMASK;
6027 break;
6028 default:
6029 ret = -TARGET_EINVAL;
6030 goto fail;
6031 }
6032 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6033 goto efault;
6034 target_to_host_sigset(&set, p);
6035 unlock_user(p, arg2, 0);
6036 set_ptr = &set;
6037 } else {
6038 how = 0;
6039 set_ptr = NULL;
6040 }
6041 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6042 if (!is_error(ret) && arg3) {
6043 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6044 goto efault;
6045 host_to_target_sigset(p, &oldset);
6046 unlock_user(p, arg3, sizeof(target_sigset_t));
6047 }
6048 }
6049 break;
6050 #ifdef TARGET_NR_sigpending
6051 case TARGET_NR_sigpending:
6052 {
6053 sigset_t set;
6054 ret = get_errno(sigpending(&set));
6055 if (!is_error(ret)) {
6056 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6057 goto efault;
6058 host_to_target_old_sigset(p, &set);
6059 unlock_user(p, arg1, sizeof(target_sigset_t));
6060 }
6061 }
6062 break;
6063 #endif
6064 case TARGET_NR_rt_sigpending:
6065 {
6066 sigset_t set;
6067 ret = get_errno(sigpending(&set));
6068 if (!is_error(ret)) {
6069 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6070 goto efault;
6071 host_to_target_sigset(p, &set);
6072 unlock_user(p, arg1, sizeof(target_sigset_t));
6073 }
6074 }
6075 break;
6076 #ifdef TARGET_NR_sigsuspend
6077 case TARGET_NR_sigsuspend:
6078 {
6079 sigset_t set;
6080 #if defined(TARGET_ALPHA)
6081 abi_ulong mask = arg1;
6082 target_to_host_old_sigset(&set, &mask);
6083 #else
6084 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6085 goto efault;
6086 target_to_host_old_sigset(&set, p);
6087 unlock_user(p, arg1, 0);
6088 #endif
6089 ret = get_errno(sigsuspend(&set));
6090 }
6091 break;
6092 #endif
6093 case TARGET_NR_rt_sigsuspend:
6094 {
6095 sigset_t set;
6096 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6097 goto efault;
6098 target_to_host_sigset(&set, p);
6099 unlock_user(p, arg1, 0);
6100 ret = get_errno(sigsuspend(&set));
6101 }
6102 break;
6103 case TARGET_NR_rt_sigtimedwait:
6104 {
6105 sigset_t set;
6106 struct timespec uts, *puts;
6107 siginfo_t uinfo;
6108
6109 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6110 goto efault;
6111 target_to_host_sigset(&set, p);
6112 unlock_user(p, arg1, 0);
6113 if (arg3) {
6114 puts = &uts;
6115 target_to_host_timespec(puts, arg3);
6116 } else {
6117 puts = NULL;
6118 }
6119 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6120 if (!is_error(ret) && arg2) {
6121 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6122 goto efault;
6123 host_to_target_siginfo(p, &uinfo);
6124 unlock_user(p, arg2, sizeof(target_siginfo_t));
6125 }
6126 }
6127 break;
6128 case TARGET_NR_rt_sigqueueinfo:
6129 {
6130 siginfo_t uinfo;
6131 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6132 goto efault;
6133 target_to_host_siginfo(&uinfo, p);
6134 unlock_user(p, arg1, 0);
6135 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6136 }
6137 break;
6138 #ifdef TARGET_NR_sigreturn
6139 case TARGET_NR_sigreturn:
6140 /* NOTE: ret is eax, so not transcoding must be done */
6141 ret = do_sigreturn(cpu_env);
6142 break;
6143 #endif
6144 case TARGET_NR_rt_sigreturn:
6145 /* NOTE: ret is eax, so not transcoding must be done */
6146 ret = do_rt_sigreturn(cpu_env);
6147 break;
6148 case TARGET_NR_sethostname:
6149 if (!(p = lock_user_string(arg1)))
6150 goto efault;
6151 ret = get_errno(sethostname(p, arg2));
6152 unlock_user(p, arg1, 0);
6153 break;
6154 case TARGET_NR_setrlimit:
6155 {
6156 int resource = target_to_host_resource(arg1);
6157 struct target_rlimit *target_rlim;
6158 struct rlimit rlim;
6159 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6160 goto efault;
6161 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6162 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6163 unlock_user_struct(target_rlim, arg2, 0);
6164 ret = get_errno(setrlimit(resource, &rlim));
6165 }
6166 break;
6167 case TARGET_NR_getrlimit:
6168 {
6169 int resource = target_to_host_resource(arg1);
6170 struct target_rlimit *target_rlim;
6171 struct rlimit rlim;
6172
6173 ret = get_errno(getrlimit(resource, &rlim));
6174 if (!is_error(ret)) {
6175 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6176 goto efault;
6177 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6178 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6179 unlock_user_struct(target_rlim, arg2, 1);
6180 }
6181 }
6182 break;
6183 case TARGET_NR_getrusage:
6184 {
6185 struct rusage rusage;
6186 ret = get_errno(getrusage(arg1, &rusage));
6187 if (!is_error(ret)) {
6188 host_to_target_rusage(arg2, &rusage);
6189 }
6190 }
6191 break;
6192 case TARGET_NR_gettimeofday:
6193 {
6194 struct timeval tv;
6195 ret = get_errno(gettimeofday(&tv, NULL));
6196 if (!is_error(ret)) {
6197 if (copy_to_user_timeval(arg1, &tv))
6198 goto efault;
6199 }
6200 }
6201 break;
6202 case TARGET_NR_settimeofday:
6203 {
6204 struct timeval tv;
6205 if (copy_from_user_timeval(&tv, arg1))
6206 goto efault;
6207 ret = get_errno(settimeofday(&tv, NULL));
6208 }
6209 break;
6210 #if defined(TARGET_NR_select)
6211 case TARGET_NR_select:
6212 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6213 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6214 #else
6215 {
6216 struct target_sel_arg_struct *sel;
6217 abi_ulong inp, outp, exp, tvp;
6218 long nsel;
6219
6220 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6221 goto efault;
6222 nsel = tswapal(sel->n);
6223 inp = tswapal(sel->inp);
6224 outp = tswapal(sel->outp);
6225 exp = tswapal(sel->exp);
6226 tvp = tswapal(sel->tvp);
6227 unlock_user_struct(sel, arg1, 0);
6228 ret = do_select(nsel, inp, outp, exp, tvp);
6229 }
6230 #endif
6231 break;
6232 #endif
6233 #ifdef TARGET_NR_pselect6
6234 case TARGET_NR_pselect6:
6235 {
6236 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6237 fd_set rfds, wfds, efds;
6238 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6239 struct timespec ts, *ts_ptr;
6240
6241 /*
6242 * The 6th arg is actually two args smashed together,
6243 * so we cannot use the C library.
6244 */
6245 sigset_t set;
6246 struct {
6247 sigset_t *set;
6248 size_t size;
6249 } sig, *sig_ptr;
6250
6251 abi_ulong arg_sigset, arg_sigsize, *arg7;
6252 target_sigset_t *target_sigset;
6253
6254 n = arg1;
6255 rfd_addr = arg2;
6256 wfd_addr = arg3;
6257 efd_addr = arg4;
6258 ts_addr = arg5;
6259
6260 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6261 if (ret) {
6262 goto fail;
6263 }
6264 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6265 if (ret) {
6266 goto fail;
6267 }
6268 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6269 if (ret) {
6270 goto fail;
6271 }
6272
6273 /*
6274 * This takes a timespec, and not a timeval, so we cannot
6275 * use the do_select() helper ...
6276 */
6277 if (ts_addr) {
6278 if (target_to_host_timespec(&ts, ts_addr)) {
6279 goto efault;
6280 }
6281 ts_ptr = &ts;
6282 } else {
6283 ts_ptr = NULL;
6284 }
6285
6286 /* Extract the two packed args for the sigset */
6287 if (arg6) {
6288 sig_ptr = &sig;
6289 sig.size = _NSIG / 8;
6290
6291 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6292 if (!arg7) {
6293 goto efault;
6294 }
6295 arg_sigset = tswapal(arg7[0]);
6296 arg_sigsize = tswapal(arg7[1]);
6297 unlock_user(arg7, arg6, 0);
6298
6299 if (arg_sigset) {
6300 sig.set = &set;
6301 if (arg_sigsize != sizeof(*target_sigset)) {
6302 /* Like the kernel, we enforce correct size sigsets */
6303 ret = -TARGET_EINVAL;
6304 goto fail;
6305 }
6306 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6307 sizeof(*target_sigset), 1);
6308 if (!target_sigset) {
6309 goto efault;
6310 }
6311 target_to_host_sigset(&set, target_sigset);
6312 unlock_user(target_sigset, arg_sigset, 0);
6313 } else {
6314 sig.set = NULL;
6315 }
6316 } else {
6317 sig_ptr = NULL;
6318 }
6319
6320 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6321 ts_ptr, sig_ptr));
6322
6323 if (!is_error(ret)) {
6324 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6325 goto efault;
6326 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6327 goto efault;
6328 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6329 goto efault;
6330
6331 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6332 goto efault;
6333 }
6334 }
6335 break;
6336 #endif
6337 case TARGET_NR_symlink:
6338 {
6339 void *p2;
6340 p = lock_user_string(arg1);
6341 p2 = lock_user_string(arg2);
6342 if (!p || !p2)
6343 ret = -TARGET_EFAULT;
6344 else
6345 ret = get_errno(symlink(p, p2));
6346 unlock_user(p2, arg2, 0);
6347 unlock_user(p, arg1, 0);
6348 }
6349 break;
6350 #if defined(TARGET_NR_symlinkat)
6351 case TARGET_NR_symlinkat:
6352 {
6353 void *p2;
6354 p = lock_user_string(arg1);
6355 p2 = lock_user_string(arg3);
6356 if (!p || !p2)
6357 ret = -TARGET_EFAULT;
6358 else
6359 ret = get_errno(symlinkat(p, arg2, p2));
6360 unlock_user(p2, arg3, 0);
6361 unlock_user(p, arg1, 0);
6362 }
6363 break;
6364 #endif
6365 #ifdef TARGET_NR_oldlstat
6366 case TARGET_NR_oldlstat:
6367 goto unimplemented;
6368 #endif
6369 case TARGET_NR_readlink:
6370 {
6371 void *p2;
6372 p = lock_user_string(arg1);
6373 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6374 if (!p || !p2) {
6375 ret = -TARGET_EFAULT;
6376 } else if (is_proc_myself((const char *)p, "exe")) {
6377 char real[PATH_MAX], *temp;
6378 temp = realpath(exec_path, real);
6379 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6380 snprintf((char *)p2, arg3, "%s", real);
6381 } else {
6382 ret = get_errno(readlink(path(p), p2, arg3));
6383 }
6384 unlock_user(p2, arg2, ret);
6385 unlock_user(p, arg1, 0);
6386 }
6387 break;
6388 #if defined(TARGET_NR_readlinkat)
6389 case TARGET_NR_readlinkat:
6390 {
6391 void *p2;
6392 p = lock_user_string(arg2);
6393 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6394 if (!p || !p2) {
6395 ret = -TARGET_EFAULT;
6396 } else if (is_proc_myself((const char *)p, "exe")) {
6397 char real[PATH_MAX], *temp;
6398 temp = realpath(exec_path, real);
6399 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6400 snprintf((char *)p2, arg4, "%s", real);
6401 } else {
6402 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6403 }
6404 unlock_user(p2, arg3, ret);
6405 unlock_user(p, arg2, 0);
6406 }
6407 break;
6408 #endif
6409 #ifdef TARGET_NR_uselib
6410 case TARGET_NR_uselib:
6411 goto unimplemented;
6412 #endif
6413 #ifdef TARGET_NR_swapon
6414 case TARGET_NR_swapon:
6415 if (!(p = lock_user_string(arg1)))
6416 goto efault;
6417 ret = get_errno(swapon(p, arg2));
6418 unlock_user(p, arg1, 0);
6419 break;
6420 #endif
6421 case TARGET_NR_reboot:
6422 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6423 /* arg4 must be ignored in all other cases */
6424 p = lock_user_string(arg4);
6425 if (!p) {
6426 goto efault;
6427 }
6428 ret = get_errno(reboot(arg1, arg2, arg3, p));
6429 unlock_user(p, arg4, 0);
6430 } else {
6431 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6432 }
6433 break;
6434 #ifdef TARGET_NR_readdir
6435 case TARGET_NR_readdir:
6436 goto unimplemented;
6437 #endif
6438 #ifdef TARGET_NR_mmap
6439 case TARGET_NR_mmap:
6440 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6441 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6442 || defined(TARGET_S390X)
6443 {
6444 abi_ulong *v;
6445 abi_ulong v1, v2, v3, v4, v5, v6;
6446 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6447 goto efault;
6448 v1 = tswapal(v[0]);
6449 v2 = tswapal(v[1]);
6450 v3 = tswapal(v[2]);
6451 v4 = tswapal(v[3]);
6452 v5 = tswapal(v[4]);
6453 v6 = tswapal(v[5]);
6454 unlock_user(v, arg1, 0);
6455 ret = get_errno(target_mmap(v1, v2, v3,
6456 target_to_host_bitmask(v4, mmap_flags_tbl),
6457 v5, v6));
6458 }
6459 #else
6460 ret = get_errno(target_mmap(arg1, arg2, arg3,
6461 target_to_host_bitmask(arg4, mmap_flags_tbl),
6462 arg5,
6463 arg6));
6464 #endif
6465 break;
6466 #endif
6467 #ifdef TARGET_NR_mmap2
6468 case TARGET_NR_mmap2:
6469 #ifndef MMAP_SHIFT
6470 #define MMAP_SHIFT 12
6471 #endif
6472 ret = get_errno(target_mmap(arg1, arg2, arg3,
6473 target_to_host_bitmask(arg4, mmap_flags_tbl),
6474 arg5,
6475 arg6 << MMAP_SHIFT));
6476 break;
6477 #endif
6478 case TARGET_NR_munmap:
6479 ret = get_errno(target_munmap(arg1, arg2));
6480 break;
6481 case TARGET_NR_mprotect:
6482 {
6483 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6484 /* Special hack to detect libc making the stack executable. */
6485 if ((arg3 & PROT_GROWSDOWN)
6486 && arg1 >= ts->info->stack_limit
6487 && arg1 <= ts->info->start_stack) {
6488 arg3 &= ~PROT_GROWSDOWN;
6489 arg2 = arg2 + arg1 - ts->info->stack_limit;
6490 arg1 = ts->info->stack_limit;
6491 }
6492 }
6493 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6494 break;
6495 #ifdef TARGET_NR_mremap
6496 case TARGET_NR_mremap:
6497 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6498 break;
6499 #endif
6500 /* ??? msync/mlock/munlock are broken for softmmu. */
6501 #ifdef TARGET_NR_msync
6502 case TARGET_NR_msync:
6503 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6504 break;
6505 #endif
6506 #ifdef TARGET_NR_mlock
6507 case TARGET_NR_mlock:
6508 ret = get_errno(mlock(g2h(arg1), arg2));
6509 break;
6510 #endif
6511 #ifdef TARGET_NR_munlock
6512 case TARGET_NR_munlock:
6513 ret = get_errno(munlock(g2h(arg1), arg2));
6514 break;
6515 #endif
6516 #ifdef TARGET_NR_mlockall
6517 case TARGET_NR_mlockall:
6518 ret = get_errno(mlockall(arg1));
6519 break;
6520 #endif
6521 #ifdef TARGET_NR_munlockall
6522 case TARGET_NR_munlockall:
6523 ret = get_errno(munlockall());
6524 break;
6525 #endif
6526 case TARGET_NR_truncate:
6527 if (!(p = lock_user_string(arg1)))
6528 goto efault;
6529 ret = get_errno(truncate(p, arg2));
6530 unlock_user(p, arg1, 0);
6531 break;
6532 case TARGET_NR_ftruncate:
6533 ret = get_errno(ftruncate(arg1, arg2));
6534 break;
6535 case TARGET_NR_fchmod:
6536 ret = get_errno(fchmod(arg1, arg2));
6537 break;
6538 #if defined(TARGET_NR_fchmodat)
6539 case TARGET_NR_fchmodat:
6540 if (!(p = lock_user_string(arg2)))
6541 goto efault;
6542 ret = get_errno(fchmodat(arg1, p, arg3, 0));
6543 unlock_user(p, arg2, 0);
6544 break;
6545 #endif
6546 case TARGET_NR_getpriority:
6547 /* Note that negative values are valid for getpriority, so we must
6548 differentiate based on errno settings. */
6549 errno = 0;
6550 ret = getpriority(arg1, arg2);
6551 if (ret == -1 && errno != 0) {
6552 ret = -host_to_target_errno(errno);
6553 break;
6554 }
6555 #ifdef TARGET_ALPHA
6556 /* Return value is the unbiased priority. Signal no error. */
6557 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6558 #else
6559 /* Return value is a biased priority to avoid negative numbers. */
6560 ret = 20 - ret;
6561 #endif
6562 break;
6563 case TARGET_NR_setpriority:
6564 ret = get_errno(setpriority(arg1, arg2, arg3));
6565 break;
6566 #ifdef TARGET_NR_profil
6567 case TARGET_NR_profil:
6568 goto unimplemented;
6569 #endif
6570 case TARGET_NR_statfs:
6571 if (!(p = lock_user_string(arg1)))
6572 goto efault;
6573 ret = get_errno(statfs(path(p), &stfs));
6574 unlock_user(p, arg1, 0);
6575 convert_statfs:
6576 if (!is_error(ret)) {
6577 struct target_statfs *target_stfs;
6578
6579 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6580 goto efault;
6581 __put_user(stfs.f_type, &target_stfs->f_type);
6582 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6583 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6584 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6585 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6586 __put_user(stfs.f_files, &target_stfs->f_files);
6587 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6588 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6589 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6590 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6591 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6592 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6593 unlock_user_struct(target_stfs, arg2, 1);
6594 }
6595 break;
6596 case TARGET_NR_fstatfs:
6597 ret = get_errno(fstatfs(arg1, &stfs));
6598 goto convert_statfs;
6599 #ifdef TARGET_NR_statfs64
6600 case TARGET_NR_statfs64:
6601 if (!(p = lock_user_string(arg1)))
6602 goto efault;
6603 ret = get_errno(statfs(path(p), &stfs));
6604 unlock_user(p, arg1, 0);
6605 convert_statfs64:
6606 if (!is_error(ret)) {
6607 struct target_statfs64 *target_stfs;
6608
6609 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6610 goto efault;
6611 __put_user(stfs.f_type, &target_stfs->f_type);
6612 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6613 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6614 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6615 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6616 __put_user(stfs.f_files, &target_stfs->f_files);
6617 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6618 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6619 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6620 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6621 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6622 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6623 unlock_user_struct(target_stfs, arg3, 1);
6624 }
6625 break;
6626 case TARGET_NR_fstatfs64:
6627 ret = get_errno(fstatfs(arg1, &stfs));
6628 goto convert_statfs64;
6629 #endif
6630 #ifdef TARGET_NR_ioperm
6631 case TARGET_NR_ioperm:
6632 goto unimplemented;
6633 #endif
6634 #ifdef TARGET_NR_socketcall
6635 case TARGET_NR_socketcall:
6636 ret = do_socketcall(arg1, arg2);
6637 break;
6638 #endif
6639 #ifdef TARGET_NR_accept
6640 case TARGET_NR_accept:
6641 ret = do_accept4(arg1, arg2, arg3, 0);
6642 break;
6643 #endif
6644 #ifdef TARGET_NR_accept4
6645 case TARGET_NR_accept4:
6646 #ifdef CONFIG_ACCEPT4
6647 ret = do_accept4(arg1, arg2, arg3, arg4);
6648 #else
6649 goto unimplemented;
6650 #endif
6651 break;
6652 #endif
6653 #ifdef TARGET_NR_bind
6654 case TARGET_NR_bind:
6655 ret = do_bind(arg1, arg2, arg3);
6656 break;
6657 #endif
6658 #ifdef TARGET_NR_connect
6659 case TARGET_NR_connect:
6660 ret = do_connect(arg1, arg2, arg3);
6661 break;
6662 #endif
6663 #ifdef TARGET_NR_getpeername
6664 case TARGET_NR_getpeername:
6665 ret = do_getpeername(arg1, arg2, arg3);
6666 break;
6667 #endif
6668 #ifdef TARGET_NR_getsockname
6669 case TARGET_NR_getsockname:
6670 ret = do_getsockname(arg1, arg2, arg3);
6671 break;
6672 #endif
6673 #ifdef TARGET_NR_getsockopt
6674 case TARGET_NR_getsockopt:
6675 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6676 break;
6677 #endif
6678 #ifdef TARGET_NR_listen
6679 case TARGET_NR_listen:
6680 ret = get_errno(listen(arg1, arg2));
6681 break;
6682 #endif
6683 #ifdef TARGET_NR_recv
6684 case TARGET_NR_recv:
6685 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6686 break;
6687 #endif
6688 #ifdef TARGET_NR_recvfrom
6689 case TARGET_NR_recvfrom:
6690 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6691 break;
6692 #endif
6693 #ifdef TARGET_NR_recvmsg
6694 case TARGET_NR_recvmsg:
6695 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6696 break;
6697 #endif
6698 #ifdef TARGET_NR_send
6699 case TARGET_NR_send:
6700 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6701 break;
6702 #endif
6703 #ifdef TARGET_NR_sendmsg
6704 case TARGET_NR_sendmsg:
6705 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6706 break;
6707 #endif
6708 #ifdef TARGET_NR_sendto
6709 case TARGET_NR_sendto:
6710 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6711 break;
6712 #endif
6713 #ifdef TARGET_NR_shutdown
6714 case TARGET_NR_shutdown:
6715 ret = get_errno(shutdown(arg1, arg2));
6716 break;
6717 #endif
6718 #ifdef TARGET_NR_socket
6719 case TARGET_NR_socket:
6720 ret = do_socket(arg1, arg2, arg3);
6721 break;
6722 #endif
6723 #ifdef TARGET_NR_socketpair
6724 case TARGET_NR_socketpair:
6725 ret = do_socketpair(arg1, arg2, arg3, arg4);
6726 break;
6727 #endif
6728 #ifdef TARGET_NR_setsockopt
6729 case TARGET_NR_setsockopt:
6730 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6731 break;
6732 #endif
6733
6734 case TARGET_NR_syslog:
6735 if (!(p = lock_user_string(arg2)))
6736 goto efault;
6737 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6738 unlock_user(p, arg2, 0);
6739 break;
6740
6741 case TARGET_NR_setitimer:
6742 {
6743 struct itimerval value, ovalue, *pvalue;
6744
6745 if (arg2) {
6746 pvalue = &value;
6747 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6748 || copy_from_user_timeval(&pvalue->it_value,
6749 arg2 + sizeof(struct target_timeval)))
6750 goto efault;
6751 } else {
6752 pvalue = NULL;
6753 }
6754 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6755 if (!is_error(ret) && arg3) {
6756 if (copy_to_user_timeval(arg3,
6757 &ovalue.it_interval)
6758 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6759 &ovalue.it_value))
6760 goto efault;
6761 }
6762 }
6763 break;
6764 case TARGET_NR_getitimer:
6765 {
6766 struct itimerval value;
6767
6768 ret = get_errno(getitimer(arg1, &value));
6769 if (!is_error(ret) && arg2) {
6770 if (copy_to_user_timeval(arg2,
6771 &value.it_interval)
6772 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6773 &value.it_value))
6774 goto efault;
6775 }
6776 }
6777 break;
6778 case TARGET_NR_stat:
6779 if (!(p = lock_user_string(arg1)))
6780 goto efault;
6781 ret = get_errno(stat(path(p), &st));
6782 unlock_user(p, arg1, 0);
6783 goto do_stat;
6784 case TARGET_NR_lstat:
6785 if (!(p = lock_user_string(arg1)))
6786 goto efault;
6787 ret = get_errno(lstat(path(p), &st));
6788 unlock_user(p, arg1, 0);
6789 goto do_stat;
6790 case TARGET_NR_fstat:
6791 {
6792 ret = get_errno(fstat(arg1, &st));
6793 do_stat:
6794 if (!is_error(ret)) {
6795 struct target_stat *target_st;
6796
6797 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6798 goto efault;
6799 memset(target_st, 0, sizeof(*target_st));
6800 __put_user(st.st_dev, &target_st->st_dev);
6801 __put_user(st.st_ino, &target_st->st_ino);
6802 __put_user(st.st_mode, &target_st->st_mode);
6803 __put_user(st.st_uid, &target_st->st_uid);
6804 __put_user(st.st_gid, &target_st->st_gid);
6805 __put_user(st.st_nlink, &target_st->st_nlink);
6806 __put_user(st.st_rdev, &target_st->st_rdev);
6807 __put_user(st.st_size, &target_st->st_size);
6808 __put_user(st.st_blksize, &target_st->st_blksize);
6809 __put_user(st.st_blocks, &target_st->st_blocks);
6810 __put_user(st.st_atime, &target_st->target_st_atime);
6811 __put_user(st.st_mtime, &target_st->target_st_mtime);
6812 __put_user(st.st_ctime, &target_st->target_st_ctime);
6813 unlock_user_struct(target_st, arg2, 1);
6814 }
6815 }
6816 break;
6817 #ifdef TARGET_NR_olduname
6818 case TARGET_NR_olduname:
6819 goto unimplemented;
6820 #endif
6821 #ifdef TARGET_NR_iopl
6822 case TARGET_NR_iopl:
6823 goto unimplemented;
6824 #endif
6825 case TARGET_NR_vhangup:
6826 ret = get_errno(vhangup());
6827 break;
6828 #ifdef TARGET_NR_idle
6829 case TARGET_NR_idle:
6830 goto unimplemented;
6831 #endif
6832 #ifdef TARGET_NR_syscall
6833 case TARGET_NR_syscall:
6834 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6835 arg6, arg7, arg8, 0);
6836 break;
6837 #endif
6838 case TARGET_NR_wait4:
6839 {
6840 int status;
6841 abi_long status_ptr = arg2;
6842 struct rusage rusage, *rusage_ptr;
6843 abi_ulong target_rusage = arg4;
6844 if (target_rusage)
6845 rusage_ptr = &rusage;
6846 else
6847 rusage_ptr = NULL;
6848 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6849 if (!is_error(ret)) {
6850 if (status_ptr && ret) {
6851 status = host_to_target_waitstatus(status);
6852 if (put_user_s32(status, status_ptr))
6853 goto efault;
6854 }
6855 if (target_rusage)
6856 host_to_target_rusage(target_rusage, &rusage);
6857 }
6858 }
6859 break;
6860 #ifdef TARGET_NR_swapoff
6861 case TARGET_NR_swapoff:
6862 if (!(p = lock_user_string(arg1)))
6863 goto efault;
6864 ret = get_errno(swapoff(p));
6865 unlock_user(p, arg1, 0);
6866 break;
6867 #endif
6868 case TARGET_NR_sysinfo:
6869 {
6870 struct target_sysinfo *target_value;
6871 struct sysinfo value;
6872 ret = get_errno(sysinfo(&value));
6873 if (!is_error(ret) && arg1)
6874 {
6875 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6876 goto efault;
6877 __put_user(value.uptime, &target_value->uptime);
6878 __put_user(value.loads[0], &target_value->loads[0]);
6879 __put_user(value.loads[1], &target_value->loads[1]);
6880 __put_user(value.loads[2], &target_value->loads[2]);
6881 __put_user(value.totalram, &target_value->totalram);
6882 __put_user(value.freeram, &target_value->freeram);
6883 __put_user(value.sharedram, &target_value->sharedram);
6884 __put_user(value.bufferram, &target_value->bufferram);
6885 __put_user(value.totalswap, &target_value->totalswap);
6886 __put_user(value.freeswap, &target_value->freeswap);
6887 __put_user(value.procs, &target_value->procs);
6888 __put_user(value.totalhigh, &target_value->totalhigh);
6889 __put_user(value.freehigh, &target_value->freehigh);
6890 __put_user(value.mem_unit, &target_value->mem_unit);
6891 unlock_user_struct(target_value, arg1, 1);
6892 }
6893 }
6894 break;
6895 #ifdef TARGET_NR_ipc
6896 case TARGET_NR_ipc:
6897 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6898 break;
6899 #endif
6900 #ifdef TARGET_NR_semget
6901 case TARGET_NR_semget:
6902 ret = get_errno(semget(arg1, arg2, arg3));
6903 break;
6904 #endif
6905 #ifdef TARGET_NR_semop
6906 case TARGET_NR_semop:
6907 ret = do_semop(arg1, arg2, arg3);
6908 break;
6909 #endif
6910 #ifdef TARGET_NR_semctl
6911 case TARGET_NR_semctl:
6912 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6913 break;
6914 #endif
6915 #ifdef TARGET_NR_msgctl
6916 case TARGET_NR_msgctl:
6917 ret = do_msgctl(arg1, arg2, arg3);
6918 break;
6919 #endif
6920 #ifdef TARGET_NR_msgget
6921 case TARGET_NR_msgget:
6922 ret = get_errno(msgget(arg1, arg2));
6923 break;
6924 #endif
6925 #ifdef TARGET_NR_msgrcv
6926 case TARGET_NR_msgrcv:
6927 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6928 break;
6929 #endif
6930 #ifdef TARGET_NR_msgsnd
6931 case TARGET_NR_msgsnd:
6932 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6933 break;
6934 #endif
6935 #ifdef TARGET_NR_shmget
6936 case TARGET_NR_shmget:
6937 ret = get_errno(shmget(arg1, arg2, arg3));
6938 break;
6939 #endif
6940 #ifdef TARGET_NR_shmctl
6941 case TARGET_NR_shmctl:
6942 ret = do_shmctl(arg1, arg2, arg3);
6943 break;
6944 #endif
6945 #ifdef TARGET_NR_shmat
6946 case TARGET_NR_shmat:
6947 ret = do_shmat(arg1, arg2, arg3);
6948 break;
6949 #endif
6950 #ifdef TARGET_NR_shmdt
6951 case TARGET_NR_shmdt:
6952 ret = do_shmdt(arg1);
6953 break;
6954 #endif
6955 case TARGET_NR_fsync:
6956 ret = get_errno(fsync(arg1));
6957 break;
6958 case TARGET_NR_clone:
6959 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6960 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6961 #elif defined(TARGET_CRIS)
6962 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6963 #elif defined(TARGET_MICROBLAZE)
6964 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
6965 #elif defined(TARGET_S390X)
6966 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6967 #else
6968 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6969 #endif
6970 break;
6971 #ifdef __NR_exit_group
6972 /* new thread calls */
6973 case TARGET_NR_exit_group:
6974 #ifdef TARGET_GPROF
6975 _mcleanup();
6976 #endif
6977 gdb_exit(cpu_env, arg1);
6978 ret = get_errno(exit_group(arg1));
6979 break;
6980 #endif
6981 case TARGET_NR_setdomainname:
6982 if (!(p = lock_user_string(arg1)))
6983 goto efault;
6984 ret = get_errno(setdomainname(p, arg2));
6985 unlock_user(p, arg1, 0);
6986 break;
6987 case TARGET_NR_uname:
6988 /* no need to transcode because we use the linux syscall */
6989 {
6990 struct new_utsname * buf;
6991
6992 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6993 goto efault;
6994 ret = get_errno(sys_uname(buf));
6995 if (!is_error(ret)) {
6996 /* Overrite the native machine name with whatever is being
6997 emulated. */
6998 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6999 /* Allow the user to override the reported release. */
7000 if (qemu_uname_release && *qemu_uname_release)
7001 strcpy (buf->release, qemu_uname_release);
7002 }
7003 unlock_user_struct(buf, arg1, 1);
7004 }
7005 break;
7006 #ifdef TARGET_I386
7007 case TARGET_NR_modify_ldt:
7008 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7009 break;
7010 #if !defined(TARGET_X86_64)
7011 case TARGET_NR_vm86old:
7012 goto unimplemented;
7013 case TARGET_NR_vm86:
7014 ret = do_vm86(cpu_env, arg1, arg2);
7015 break;
7016 #endif
7017 #endif
7018 case TARGET_NR_adjtimex:
7019 goto unimplemented;
7020 #ifdef TARGET_NR_create_module
7021 case TARGET_NR_create_module:
7022 #endif
7023 case TARGET_NR_init_module:
7024 case TARGET_NR_delete_module:
7025 #ifdef TARGET_NR_get_kernel_syms
7026 case TARGET_NR_get_kernel_syms:
7027 #endif
7028 goto unimplemented;
7029 case TARGET_NR_quotactl:
7030 goto unimplemented;
7031 case TARGET_NR_getpgid:
7032 ret = get_errno(getpgid(arg1));
7033 break;
7034 case TARGET_NR_fchdir:
7035 ret = get_errno(fchdir(arg1));
7036 break;
7037 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7038 case TARGET_NR_bdflush:
7039 goto unimplemented;
7040 #endif
7041 #ifdef TARGET_NR_sysfs
7042 case TARGET_NR_sysfs:
7043 goto unimplemented;
7044 #endif
7045 case TARGET_NR_personality:
7046 ret = get_errno(personality(arg1));
7047 break;
7048 #ifdef TARGET_NR_afs_syscall
7049 case TARGET_NR_afs_syscall:
7050 goto unimplemented;
7051 #endif
7052 #ifdef TARGET_NR__llseek /* Not on alpha */
7053 case TARGET_NR__llseek:
7054 {
7055 int64_t res;
7056 #if !defined(__NR_llseek)
7057 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7058 if (res == -1) {
7059 ret = get_errno(res);
7060 } else {
7061 ret = 0;
7062 }
7063 #else
7064 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7065 #endif
7066 if ((ret == 0) && put_user_s64(res, arg4)) {
7067 goto efault;
7068 }
7069 }
7070 break;
7071 #endif
7072 case TARGET_NR_getdents:
7073 #ifdef __NR_getdents
7074 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7075 {
7076 struct target_dirent *target_dirp;
7077 struct linux_dirent *dirp;
7078 abi_long count = arg3;
7079
7080 dirp = malloc(count);
7081 if (!dirp) {
7082 ret = -TARGET_ENOMEM;
7083 goto fail;
7084 }
7085
7086 ret = get_errno(sys_getdents(arg1, dirp, count));
7087 if (!is_error(ret)) {
7088 struct linux_dirent *de;
7089 struct target_dirent *tde;
7090 int len = ret;
7091 int reclen, treclen;
7092 int count1, tnamelen;
7093
7094 count1 = 0;
7095 de = dirp;
7096 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7097 goto efault;
7098 tde = target_dirp;
7099 while (len > 0) {
7100 reclen = de->d_reclen;
7101 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7102 assert(tnamelen >= 0);
7103 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7104 assert(count1 + treclen <= count);
7105 tde->d_reclen = tswap16(treclen);
7106 tde->d_ino = tswapal(de->d_ino);
7107 tde->d_off = tswapal(de->d_off);
7108 memcpy(tde->d_name, de->d_name, tnamelen);
7109 de = (struct linux_dirent *)((char *)de + reclen);
7110 len -= reclen;
7111 tde = (struct target_dirent *)((char *)tde + treclen);
7112 count1 += treclen;
7113 }
7114 ret = count1;
7115 unlock_user(target_dirp, arg2, ret);
7116 }
7117 free(dirp);
7118 }
7119 #else
7120 {
7121 struct linux_dirent *dirp;
7122 abi_long count = arg3;
7123
7124 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7125 goto efault;
7126 ret = get_errno(sys_getdents(arg1, dirp, count));
7127 if (!is_error(ret)) {
7128 struct linux_dirent *de;
7129 int len = ret;
7130 int reclen;
7131 de = dirp;
7132 while (len > 0) {
7133 reclen = de->d_reclen;
7134 if (reclen > len)
7135 break;
7136 de->d_reclen = tswap16(reclen);
7137 tswapls(&de->d_ino);
7138 tswapls(&de->d_off);
7139 de = (struct linux_dirent *)((char *)de + reclen);
7140 len -= reclen;
7141 }
7142 }
7143 unlock_user(dirp, arg2, ret);
7144 }
7145 #endif
7146 #else
7147 /* Implement getdents in terms of getdents64 */
7148 {
7149 struct linux_dirent64 *dirp;
7150 abi_long count = arg3;
7151
7152 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7153 if (!dirp) {
7154 goto efault;
7155 }
7156 ret = get_errno(sys_getdents64(arg1, dirp, count));
7157 if (!is_error(ret)) {
7158 /* Convert the dirent64 structs to target dirent. We do this
7159 * in-place, since we can guarantee that a target_dirent is no
7160 * larger than a dirent64; however this means we have to be
7161 * careful to read everything before writing in the new format.
7162 */
7163 struct linux_dirent64 *de;
7164 struct target_dirent *tde;
7165 int len = ret;
7166 int tlen = 0;
7167
7168 de = dirp;
7169 tde = (struct target_dirent *)dirp;
7170 while (len > 0) {
7171 int namelen, treclen;
7172 int reclen = de->d_reclen;
7173 uint64_t ino = de->d_ino;
7174 int64_t off = de->d_off;
7175 uint8_t type = de->d_type;
7176
7177 namelen = strlen(de->d_name);
7178 treclen = offsetof(struct target_dirent, d_name)
7179 + namelen + 2;
7180 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7181
7182 memmove(tde->d_name, de->d_name, namelen + 1);
7183 tde->d_ino = tswapal(ino);
7184 tde->d_off = tswapal(off);
7185 tde->d_reclen = tswap16(treclen);
7186 /* The target_dirent type is in what was formerly a padding
7187 * byte at the end of the structure:
7188 */
7189 *(((char *)tde) + treclen - 1) = type;
7190
7191 de = (struct linux_dirent64 *)((char *)de + reclen);
7192 tde = (struct target_dirent *)((char *)tde + treclen);
7193 len -= reclen;
7194 tlen += treclen;
7195 }
7196 ret = tlen;
7197 }
7198 unlock_user(dirp, arg2, ret);
7199 }
7200 #endif
7201 break;
7202 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7203 case TARGET_NR_getdents64:
7204 {
7205 struct linux_dirent64 *dirp;
7206 abi_long count = arg3;
7207 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7208 goto efault;
7209 ret = get_errno(sys_getdents64(arg1, dirp, count));
7210 if (!is_error(ret)) {
7211 struct linux_dirent64 *de;
7212 int len = ret;
7213 int reclen;
7214 de = dirp;
7215 while (len > 0) {
7216 reclen = de->d_reclen;
7217 if (reclen > len)
7218 break;
7219 de->d_reclen = tswap16(reclen);
7220 tswap64s((uint64_t *)&de->d_ino);
7221 tswap64s((uint64_t *)&de->d_off);
7222 de = (struct linux_dirent64 *)((char *)de + reclen);
7223 len -= reclen;
7224 }
7225 }
7226 unlock_user(dirp, arg2, ret);
7227 }
7228 break;
7229 #endif /* TARGET_NR_getdents64 */
7230 #if defined(TARGET_NR__newselect)
7231 case TARGET_NR__newselect:
7232 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7233 break;
7234 #endif
7235 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7236 # ifdef TARGET_NR_poll
7237 case TARGET_NR_poll:
7238 # endif
7239 # ifdef TARGET_NR_ppoll
7240 case TARGET_NR_ppoll:
7241 # endif
7242 {
7243 struct target_pollfd *target_pfd;
7244 unsigned int nfds = arg2;
7245 int timeout = arg3;
7246 struct pollfd *pfd;
7247 unsigned int i;
7248
7249 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7250 if (!target_pfd)
7251 goto efault;
7252
7253 pfd = alloca(sizeof(struct pollfd) * nfds);
7254 for(i = 0; i < nfds; i++) {
7255 pfd[i].fd = tswap32(target_pfd[i].fd);
7256 pfd[i].events = tswap16(target_pfd[i].events);
7257 }
7258
7259 # ifdef TARGET_NR_ppoll
7260 if (num == TARGET_NR_ppoll) {
7261 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7262 target_sigset_t *target_set;
7263 sigset_t _set, *set = &_set;
7264
7265 if (arg3) {
7266 if (target_to_host_timespec(timeout_ts, arg3)) {
7267 unlock_user(target_pfd, arg1, 0);
7268 goto efault;
7269 }
7270 } else {
7271 timeout_ts = NULL;
7272 }
7273
7274 if (arg4) {
7275 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7276 if (!target_set) {
7277 unlock_user(target_pfd, arg1, 0);
7278 goto efault;
7279 }
7280 target_to_host_sigset(set, target_set);
7281 } else {
7282 set = NULL;
7283 }
7284
7285 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7286
7287 if (!is_error(ret) && arg3) {
7288 host_to_target_timespec(arg3, timeout_ts);
7289 }
7290 if (arg4) {
7291 unlock_user(target_set, arg4, 0);
7292 }
7293 } else
7294 # endif
7295 ret = get_errno(poll(pfd, nfds, timeout));
7296
7297 if (!is_error(ret)) {
7298 for(i = 0; i < nfds; i++) {
7299 target_pfd[i].revents = tswap16(pfd[i].revents);
7300 }
7301 }
7302 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7303 }
7304 break;
7305 #endif
7306 case TARGET_NR_flock:
7307 /* NOTE: the flock constant seems to be the same for every
7308 Linux platform */
7309 ret = get_errno(flock(arg1, arg2));
7310 break;
7311 case TARGET_NR_readv:
7312 {
7313 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7314 if (vec != NULL) {
7315 ret = get_errno(readv(arg1, vec, arg3));
7316 unlock_iovec(vec, arg2, arg3, 1);
7317 } else {
7318 ret = -host_to_target_errno(errno);
7319 }
7320 }
7321 break;
7322 case TARGET_NR_writev:
7323 {
7324 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7325 if (vec != NULL) {
7326 ret = get_errno(writev(arg1, vec, arg3));
7327 unlock_iovec(vec, arg2, arg3, 0);
7328 } else {
7329 ret = -host_to_target_errno(errno);
7330 }
7331 }
7332 break;
7333 case TARGET_NR_getsid:
7334 ret = get_errno(getsid(arg1));
7335 break;
7336 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7337 case TARGET_NR_fdatasync:
7338 ret = get_errno(fdatasync(arg1));
7339 break;
7340 #endif
7341 case TARGET_NR__sysctl:
7342 /* We don't implement this, but ENOTDIR is always a safe
7343 return value. */
7344 ret = -TARGET_ENOTDIR;
7345 break;
7346 case TARGET_NR_sched_getaffinity:
7347 {
7348 unsigned int mask_size;
7349 unsigned long *mask;
7350
7351 /*
7352 * sched_getaffinity needs multiples of ulong, so need to take
7353 * care of mismatches between target ulong and host ulong sizes.
7354 */
7355 if (arg2 & (sizeof(abi_ulong) - 1)) {
7356 ret = -TARGET_EINVAL;
7357 break;
7358 }
7359 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7360
7361 mask = alloca(mask_size);
7362 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7363
7364 if (!is_error(ret)) {
7365 if (copy_to_user(arg3, mask, ret)) {
7366 goto efault;
7367 }
7368 }
7369 }
7370 break;
7371 case TARGET_NR_sched_setaffinity:
7372 {
7373 unsigned int mask_size;
7374 unsigned long *mask;
7375
7376 /*
7377 * sched_setaffinity needs multiples of ulong, so need to take
7378 * care of mismatches between target ulong and host ulong sizes.
7379 */
7380 if (arg2 & (sizeof(abi_ulong) - 1)) {
7381 ret = -TARGET_EINVAL;
7382 break;
7383 }
7384 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7385
7386 mask = alloca(mask_size);
7387 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7388 goto efault;
7389 }
7390 memcpy(mask, p, arg2);
7391 unlock_user_struct(p, arg2, 0);
7392
7393 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7394 }
7395 break;
7396 case TARGET_NR_sched_setparam:
7397 {
7398 struct sched_param *target_schp;
7399 struct sched_param schp;
7400
7401 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7402 goto efault;
7403 schp.sched_priority = tswap32(target_schp->sched_priority);
7404 unlock_user_struct(target_schp, arg2, 0);
7405 ret = get_errno(sched_setparam(arg1, &schp));
7406 }
7407 break;
7408 case TARGET_NR_sched_getparam:
7409 {
7410 struct sched_param *target_schp;
7411 struct sched_param schp;
7412 ret = get_errno(sched_getparam(arg1, &schp));
7413 if (!is_error(ret)) {
7414 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7415 goto efault;
7416 target_schp->sched_priority = tswap32(schp.sched_priority);
7417 unlock_user_struct(target_schp, arg2, 1);
7418 }
7419 }
7420 break;
7421 case TARGET_NR_sched_setscheduler:
7422 {
7423 struct sched_param *target_schp;
7424 struct sched_param schp;
7425 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7426 goto efault;
7427 schp.sched_priority = tswap32(target_schp->sched_priority);
7428 unlock_user_struct(target_schp, arg3, 0);
7429 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7430 }
7431 break;
7432 case TARGET_NR_sched_getscheduler:
7433 ret = get_errno(sched_getscheduler(arg1));
7434 break;
7435 case TARGET_NR_sched_yield:
7436 ret = get_errno(sched_yield());
7437 break;
7438 case TARGET_NR_sched_get_priority_max:
7439 ret = get_errno(sched_get_priority_max(arg1));
7440 break;
7441 case TARGET_NR_sched_get_priority_min:
7442 ret = get_errno(sched_get_priority_min(arg1));
7443 break;
7444 case TARGET_NR_sched_rr_get_interval:
7445 {
7446 struct timespec ts;
7447 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7448 if (!is_error(ret)) {
7449 host_to_target_timespec(arg2, &ts);
7450 }
7451 }
7452 break;
7453 case TARGET_NR_nanosleep:
7454 {
7455 struct timespec req, rem;
7456 target_to_host_timespec(&req, arg1);
7457 ret = get_errno(nanosleep(&req, &rem));
7458 if (is_error(ret) && arg2) {
7459 host_to_target_timespec(arg2, &rem);
7460 }
7461 }
7462 break;
7463 #ifdef TARGET_NR_query_module
7464 case TARGET_NR_query_module:
7465 goto unimplemented;
7466 #endif
7467 #ifdef TARGET_NR_nfsservctl
7468 case TARGET_NR_nfsservctl:
7469 goto unimplemented;
7470 #endif
7471 case TARGET_NR_prctl:
7472 switch (arg1) {
7473 case PR_GET_PDEATHSIG:
7474 {
7475 int deathsig;
7476 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7477 if (!is_error(ret) && arg2
7478 && put_user_ual(deathsig, arg2)) {
7479 goto efault;
7480 }
7481 break;
7482 }
7483 #ifdef PR_GET_NAME
7484 case PR_GET_NAME:
7485 {
7486 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7487 if (!name) {
7488 goto efault;
7489 }
7490 ret = get_errno(prctl(arg1, (unsigned long)name,
7491 arg3, arg4, arg5));
7492 unlock_user(name, arg2, 16);
7493 break;
7494 }
7495 case PR_SET_NAME:
7496 {
7497 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7498 if (!name) {
7499 goto efault;
7500 }
7501 ret = get_errno(prctl(arg1, (unsigned long)name,
7502 arg3, arg4, arg5));
7503 unlock_user(name, arg2, 0);
7504 break;
7505 }
7506 #endif
7507 default:
7508 /* Most prctl options have no pointer arguments */
7509 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7510 break;
7511 }
7512 break;
7513 #ifdef TARGET_NR_arch_prctl
7514 case TARGET_NR_arch_prctl:
7515 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7516 ret = do_arch_prctl(cpu_env, arg1, arg2);
7517 break;
7518 #else
7519 goto unimplemented;
7520 #endif
7521 #endif
7522 #ifdef TARGET_NR_pread64
7523 case TARGET_NR_pread64:
7524 if (regpairs_aligned(cpu_env)) {
7525 arg4 = arg5;
7526 arg5 = arg6;
7527 }
7528 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7529 goto efault;
7530 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7531 unlock_user(p, arg2, ret);
7532 break;
7533 case TARGET_NR_pwrite64:
7534 if (regpairs_aligned(cpu_env)) {
7535 arg4 = arg5;
7536 arg5 = arg6;
7537 }
7538 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7539 goto efault;
7540 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7541 unlock_user(p, arg2, 0);
7542 break;
7543 #endif
7544 case TARGET_NR_getcwd:
7545 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7546 goto efault;
7547 ret = get_errno(sys_getcwd1(p, arg2));
7548 unlock_user(p, arg1, ret);
7549 break;
7550 case TARGET_NR_capget:
7551 goto unimplemented;
7552 case TARGET_NR_capset:
7553 goto unimplemented;
7554 case TARGET_NR_sigaltstack:
7555 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7556 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7557 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7558 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7559 break;
7560 #else
7561 goto unimplemented;
7562 #endif
7563
7564 #ifdef CONFIG_SENDFILE
7565 case TARGET_NR_sendfile:
7566 {
7567 off_t *offp = NULL;
7568 off_t off;
7569 if (arg3) {
7570 ret = get_user_sal(off, arg3);
7571 if (is_error(ret)) {
7572 break;
7573 }
7574 offp = &off;
7575 }
7576 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7577 if (!is_error(ret) && arg3) {
7578 abi_long ret2 = put_user_sal(off, arg3);
7579 if (is_error(ret2)) {
7580 ret = ret2;
7581 }
7582 }
7583 break;
7584 }
7585 #ifdef TARGET_NR_sendfile64
7586 case TARGET_NR_sendfile64:
7587 {
7588 off_t *offp = NULL;
7589 off_t off;
7590 if (arg3) {
7591 ret = get_user_s64(off, arg3);
7592 if (is_error(ret)) {
7593 break;
7594 }
7595 offp = &off;
7596 }
7597 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7598 if (!is_error(ret) && arg3) {
7599 abi_long ret2 = put_user_s64(off, arg3);
7600 if (is_error(ret2)) {
7601 ret = ret2;
7602 }
7603 }
7604 break;
7605 }
7606 #endif
7607 #else
7608 case TARGET_NR_sendfile:
7609 #ifdef TARGET_NR_sendfile64
7610 case TARGET_NR_sendfile64:
7611 #endif
7612 goto unimplemented;
7613 #endif
7614
7615 #ifdef TARGET_NR_getpmsg
7616 case TARGET_NR_getpmsg:
7617 goto unimplemented;
7618 #endif
7619 #ifdef TARGET_NR_putpmsg
7620 case TARGET_NR_putpmsg:
7621 goto unimplemented;
7622 #endif
7623 #ifdef TARGET_NR_vfork
7624 case TARGET_NR_vfork:
7625 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7626 0, 0, 0, 0));
7627 break;
7628 #endif
7629 #ifdef TARGET_NR_ugetrlimit
7630 case TARGET_NR_ugetrlimit:
7631 {
7632 struct rlimit rlim;
7633 int resource = target_to_host_resource(arg1);
7634 ret = get_errno(getrlimit(resource, &rlim));
7635 if (!is_error(ret)) {
7636 struct target_rlimit *target_rlim;
7637 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7638 goto efault;
7639 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7640 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7641 unlock_user_struct(target_rlim, arg2, 1);
7642 }
7643 break;
7644 }
7645 #endif
7646 #ifdef TARGET_NR_truncate64
7647 case TARGET_NR_truncate64:
7648 if (!(p = lock_user_string(arg1)))
7649 goto efault;
7650 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7651 unlock_user(p, arg1, 0);
7652 break;
7653 #endif
7654 #ifdef TARGET_NR_ftruncate64
7655 case TARGET_NR_ftruncate64:
7656 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7657 break;
7658 #endif
7659 #ifdef TARGET_NR_stat64
7660 case TARGET_NR_stat64:
7661 if (!(p = lock_user_string(arg1)))
7662 goto efault;
7663 ret = get_errno(stat(path(p), &st));
7664 unlock_user(p, arg1, 0);
7665 if (!is_error(ret))
7666 ret = host_to_target_stat64(cpu_env, arg2, &st);
7667 break;
7668 #endif
7669 #ifdef TARGET_NR_lstat64
7670 case TARGET_NR_lstat64:
7671 if (!(p = lock_user_string(arg1)))
7672 goto efault;
7673 ret = get_errno(lstat(path(p), &st));
7674 unlock_user(p, arg1, 0);
7675 if (!is_error(ret))
7676 ret = host_to_target_stat64(cpu_env, arg2, &st);
7677 break;
7678 #endif
7679 #ifdef TARGET_NR_fstat64
7680 case TARGET_NR_fstat64:
7681 ret = get_errno(fstat(arg1, &st));
7682 if (!is_error(ret))
7683 ret = host_to_target_stat64(cpu_env, arg2, &st);
7684 break;
7685 #endif
7686 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
7687 #ifdef TARGET_NR_fstatat64
7688 case TARGET_NR_fstatat64:
7689 #endif
7690 #ifdef TARGET_NR_newfstatat
7691 case TARGET_NR_newfstatat:
7692 #endif
7693 if (!(p = lock_user_string(arg2)))
7694 goto efault;
7695 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
7696 if (!is_error(ret))
7697 ret = host_to_target_stat64(cpu_env, arg3, &st);
7698 break;
7699 #endif
7700 case TARGET_NR_lchown:
7701 if (!(p = lock_user_string(arg1)))
7702 goto efault;
7703 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7704 unlock_user(p, arg1, 0);
7705 break;
7706 #ifdef TARGET_NR_getuid
7707 case TARGET_NR_getuid:
7708 ret = get_errno(high2lowuid(getuid()));
7709 break;
7710 #endif
7711 #ifdef TARGET_NR_getgid
7712 case TARGET_NR_getgid:
7713 ret = get_errno(high2lowgid(getgid()));
7714 break;
7715 #endif
7716 #ifdef TARGET_NR_geteuid
7717 case TARGET_NR_geteuid:
7718 ret = get_errno(high2lowuid(geteuid()));
7719 break;
7720 #endif
7721 #ifdef TARGET_NR_getegid
7722 case TARGET_NR_getegid:
7723 ret = get_errno(high2lowgid(getegid()));
7724 break;
7725 #endif
7726 case TARGET_NR_setreuid:
7727 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7728 break;
7729 case TARGET_NR_setregid:
7730 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7731 break;
7732 case TARGET_NR_getgroups:
7733 {
7734 int gidsetsize = arg1;
7735 target_id *target_grouplist;
7736 gid_t *grouplist;
7737 int i;
7738
7739 grouplist = alloca(gidsetsize * sizeof(gid_t));
7740 ret = get_errno(getgroups(gidsetsize, grouplist));
7741 if (gidsetsize == 0)
7742 break;
7743 if (!is_error(ret)) {
7744 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
7745 if (!target_grouplist)
7746 goto efault;
7747 for(i = 0;i < ret; i++)
7748 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7749 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
7750 }
7751 }
7752 break;
7753 case TARGET_NR_setgroups:
7754 {
7755 int gidsetsize = arg1;
7756 target_id *target_grouplist;
7757 gid_t *grouplist = NULL;
7758 int i;
7759 if (gidsetsize) {
7760 grouplist = alloca(gidsetsize * sizeof(gid_t));
7761 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
7762 if (!target_grouplist) {
7763 ret = -TARGET_EFAULT;
7764 goto fail;
7765 }
7766 for (i = 0; i < gidsetsize; i++) {
7767 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7768 }
7769 unlock_user(target_grouplist, arg2, 0);
7770 }
7771 ret = get_errno(setgroups(gidsetsize, grouplist));
7772 }
7773 break;
7774 case TARGET_NR_fchown:
7775 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7776 break;
7777 #if defined(TARGET_NR_fchownat)
7778 case TARGET_NR_fchownat:
7779 if (!(p = lock_user_string(arg2)))
7780 goto efault;
7781 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
7782 low2highgid(arg4), arg5));
7783 unlock_user(p, arg2, 0);
7784 break;
7785 #endif
7786 #ifdef TARGET_NR_setresuid
7787 case TARGET_NR_setresuid:
7788 ret = get_errno(setresuid(low2highuid(arg1),
7789 low2highuid(arg2),
7790 low2highuid(arg3)));
7791 break;
7792 #endif
7793 #ifdef TARGET_NR_getresuid
7794 case TARGET_NR_getresuid:
7795 {
7796 uid_t ruid, euid, suid;
7797 ret = get_errno(getresuid(&ruid, &euid, &suid));
7798 if (!is_error(ret)) {
7799 if (put_user_u16(high2lowuid(ruid), arg1)
7800 || put_user_u16(high2lowuid(euid), arg2)
7801 || put_user_u16(high2lowuid(suid), arg3))
7802 goto efault;
7803 }
7804 }
7805 break;
7806 #endif
7807 #ifdef TARGET_NR_getresgid
7808 case TARGET_NR_setresgid:
7809 ret = get_errno(setresgid(low2highgid(arg1),
7810 low2highgid(arg2),
7811 low2highgid(arg3)));
7812 break;
7813 #endif
7814 #ifdef TARGET_NR_getresgid
7815 case TARGET_NR_getresgid:
7816 {
7817 gid_t rgid, egid, sgid;
7818 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7819 if (!is_error(ret)) {
7820 if (put_user_u16(high2lowgid(rgid), arg1)
7821 || put_user_u16(high2lowgid(egid), arg2)
7822 || put_user_u16(high2lowgid(sgid), arg3))
7823 goto efault;
7824 }
7825 }
7826 break;
7827 #endif
7828 case TARGET_NR_chown:
7829 if (!(p = lock_user_string(arg1)))
7830 goto efault;
7831 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7832 unlock_user(p, arg1, 0);
7833 break;
7834 case TARGET_NR_setuid:
7835 ret = get_errno(setuid(low2highuid(arg1)));
7836 break;
7837 case TARGET_NR_setgid:
7838 ret = get_errno(setgid(low2highgid(arg1)));
7839 break;
7840 case TARGET_NR_setfsuid:
7841 ret = get_errno(setfsuid(arg1));
7842 break;
7843 case TARGET_NR_setfsgid:
7844 ret = get_errno(setfsgid(arg1));
7845 break;
7846
7847 #ifdef TARGET_NR_lchown32
7848 case TARGET_NR_lchown32:
7849 if (!(p = lock_user_string(arg1)))
7850 goto efault;
7851 ret = get_errno(lchown(p, arg2, arg3));
7852 unlock_user(p, arg1, 0);
7853 break;
7854 #endif
7855 #ifdef TARGET_NR_getuid32
7856 case TARGET_NR_getuid32:
7857 ret = get_errno(getuid());
7858 break;
7859 #endif
7860
7861 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7862 /* Alpha specific */
7863 case TARGET_NR_getxuid:
7864 {
7865 uid_t euid;
7866 euid=geteuid();
7867 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7868 }
7869 ret = get_errno(getuid());
7870 break;
7871 #endif
7872 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7873 /* Alpha specific */
7874 case TARGET_NR_getxgid:
7875 {
7876 uid_t egid;
7877 egid=getegid();
7878 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7879 }
7880 ret = get_errno(getgid());
7881 break;
7882 #endif
7883 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7884 /* Alpha specific */
7885 case TARGET_NR_osf_getsysinfo:
7886 ret = -TARGET_EOPNOTSUPP;
7887 switch (arg1) {
7888 case TARGET_GSI_IEEE_FP_CONTROL:
7889 {
7890 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7891
7892 /* Copied from linux ieee_fpcr_to_swcr. */
7893 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7894 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7895 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7896 | SWCR_TRAP_ENABLE_DZE
7897 | SWCR_TRAP_ENABLE_OVF);
7898 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7899 | SWCR_TRAP_ENABLE_INE);
7900 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7901 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7902
7903 if (put_user_u64 (swcr, arg2))
7904 goto efault;
7905 ret = 0;
7906 }
7907 break;
7908
7909 /* case GSI_IEEE_STATE_AT_SIGNAL:
7910 -- Not implemented in linux kernel.
7911 case GSI_UACPROC:
7912 -- Retrieves current unaligned access state; not much used.
7913 case GSI_PROC_TYPE:
7914 -- Retrieves implver information; surely not used.
7915 case GSI_GET_HWRPB:
7916 -- Grabs a copy of the HWRPB; surely not used.
7917 */
7918 }
7919 break;
7920 #endif
7921 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7922 /* Alpha specific */
7923 case TARGET_NR_osf_setsysinfo:
7924 ret = -TARGET_EOPNOTSUPP;
7925 switch (arg1) {
7926 case TARGET_SSI_IEEE_FP_CONTROL:
7927 {
7928 uint64_t swcr, fpcr, orig_fpcr;
7929
7930 if (get_user_u64 (swcr, arg2)) {
7931 goto efault;
7932 }
7933 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7934 fpcr = orig_fpcr & FPCR_DYN_MASK;
7935
7936 /* Copied from linux ieee_swcr_to_fpcr. */
7937 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7938 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7939 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7940 | SWCR_TRAP_ENABLE_DZE
7941 | SWCR_TRAP_ENABLE_OVF)) << 48;
7942 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7943 | SWCR_TRAP_ENABLE_INE)) << 57;
7944 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7945 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7946
7947 cpu_alpha_store_fpcr(cpu_env, fpcr);
7948 ret = 0;
7949 }
7950 break;
7951
7952 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7953 {
7954 uint64_t exc, fpcr, orig_fpcr;
7955 int si_code;
7956
7957 if (get_user_u64(exc, arg2)) {
7958 goto efault;
7959 }
7960
7961 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7962
7963 /* We only add to the exception status here. */
7964 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7965
7966 cpu_alpha_store_fpcr(cpu_env, fpcr);
7967 ret = 0;
7968
7969 /* Old exceptions are not signaled. */
7970 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7971
7972 /* If any exceptions set by this call,
7973 and are unmasked, send a signal. */
7974 si_code = 0;
7975 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7976 si_code = TARGET_FPE_FLTRES;
7977 }
7978 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7979 si_code = TARGET_FPE_FLTUND;
7980 }
7981 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7982 si_code = TARGET_FPE_FLTOVF;
7983 }
7984 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7985 si_code = TARGET_FPE_FLTDIV;
7986 }
7987 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7988 si_code = TARGET_FPE_FLTINV;
7989 }
7990 if (si_code != 0) {
7991 target_siginfo_t info;
7992 info.si_signo = SIGFPE;
7993 info.si_errno = 0;
7994 info.si_code = si_code;
7995 info._sifields._sigfault._addr
7996 = ((CPUArchState *)cpu_env)->pc;
7997 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
7998 }
7999 }
8000 break;
8001
8002 /* case SSI_NVPAIRS:
8003 -- Used with SSIN_UACPROC to enable unaligned accesses.
8004 case SSI_IEEE_STATE_AT_SIGNAL:
8005 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8006 -- Not implemented in linux kernel
8007 */
8008 }
8009 break;
8010 #endif
8011 #ifdef TARGET_NR_osf_sigprocmask
8012 /* Alpha specific. */
8013 case TARGET_NR_osf_sigprocmask:
8014 {
8015 abi_ulong mask;
8016 int how;
8017 sigset_t set, oldset;
8018
8019 switch(arg1) {
8020 case TARGET_SIG_BLOCK:
8021 how = SIG_BLOCK;
8022 break;
8023 case TARGET_SIG_UNBLOCK:
8024 how = SIG_UNBLOCK;
8025 break;
8026 case TARGET_SIG_SETMASK:
8027 how = SIG_SETMASK;
8028 break;
8029 default:
8030 ret = -TARGET_EINVAL;
8031 goto fail;
8032 }
8033 mask = arg2;
8034 target_to_host_old_sigset(&set, &mask);
8035 sigprocmask(how, &set, &oldset);
8036 host_to_target_old_sigset(&mask, &oldset);
8037 ret = mask;
8038 }
8039 break;
8040 #endif
8041
8042 #ifdef TARGET_NR_getgid32
8043 case TARGET_NR_getgid32:
8044 ret = get_errno(getgid());
8045 break;
8046 #endif
8047 #ifdef TARGET_NR_geteuid32
8048 case TARGET_NR_geteuid32:
8049 ret = get_errno(geteuid());
8050 break;
8051 #endif
8052 #ifdef TARGET_NR_getegid32
8053 case TARGET_NR_getegid32:
8054 ret = get_errno(getegid());
8055 break;
8056 #endif
8057 #ifdef TARGET_NR_setreuid32
8058 case TARGET_NR_setreuid32:
8059 ret = get_errno(setreuid(arg1, arg2));
8060 break;
8061 #endif
8062 #ifdef TARGET_NR_setregid32
8063 case TARGET_NR_setregid32:
8064 ret = get_errno(setregid(arg1, arg2));
8065 break;
8066 #endif
8067 #ifdef TARGET_NR_getgroups32
8068 case TARGET_NR_getgroups32:
8069 {
8070 int gidsetsize = arg1;
8071 uint32_t *target_grouplist;
8072 gid_t *grouplist;
8073 int i;
8074
8075 grouplist = alloca(gidsetsize * sizeof(gid_t));
8076 ret = get_errno(getgroups(gidsetsize, grouplist));
8077 if (gidsetsize == 0)
8078 break;
8079 if (!is_error(ret)) {
8080 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8081 if (!target_grouplist) {
8082 ret = -TARGET_EFAULT;
8083 goto fail;
8084 }
8085 for(i = 0;i < ret; i++)
8086 target_grouplist[i] = tswap32(grouplist[i]);
8087 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8088 }
8089 }
8090 break;
8091 #endif
8092 #ifdef TARGET_NR_setgroups32
8093 case TARGET_NR_setgroups32:
8094 {
8095 int gidsetsize = arg1;
8096 uint32_t *target_grouplist;
8097 gid_t *grouplist;
8098 int i;
8099
8100 grouplist = alloca(gidsetsize * sizeof(gid_t));
8101 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8102 if (!target_grouplist) {
8103 ret = -TARGET_EFAULT;
8104 goto fail;
8105 }
8106 for(i = 0;i < gidsetsize; i++)
8107 grouplist[i] = tswap32(target_grouplist[i]);
8108 unlock_user(target_grouplist, arg2, 0);
8109 ret = get_errno(setgroups(gidsetsize, grouplist));
8110 }
8111 break;
8112 #endif
8113 #ifdef TARGET_NR_fchown32
8114 case TARGET_NR_fchown32:
8115 ret = get_errno(fchown(arg1, arg2, arg3));
8116 break;
8117 #endif
8118 #ifdef TARGET_NR_setresuid32
8119 case TARGET_NR_setresuid32:
8120 ret = get_errno(setresuid(arg1, arg2, arg3));
8121 break;
8122 #endif
8123 #ifdef TARGET_NR_getresuid32
8124 case TARGET_NR_getresuid32:
8125 {
8126 uid_t ruid, euid, suid;
8127 ret = get_errno(getresuid(&ruid, &euid, &suid));
8128 if (!is_error(ret)) {
8129 if (put_user_u32(ruid, arg1)
8130 || put_user_u32(euid, arg2)
8131 || put_user_u32(suid, arg3))
8132 goto efault;
8133 }
8134 }
8135 break;
8136 #endif
8137 #ifdef TARGET_NR_setresgid32
8138 case TARGET_NR_setresgid32:
8139 ret = get_errno(setresgid(arg1, arg2, arg3));
8140 break;
8141 #endif
8142 #ifdef TARGET_NR_getresgid32
8143 case TARGET_NR_getresgid32:
8144 {
8145 gid_t rgid, egid, sgid;
8146 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8147 if (!is_error(ret)) {
8148 if (put_user_u32(rgid, arg1)
8149 || put_user_u32(egid, arg2)
8150 || put_user_u32(sgid, arg3))
8151 goto efault;
8152 }
8153 }
8154 break;
8155 #endif
8156 #ifdef TARGET_NR_chown32
8157 case TARGET_NR_chown32:
8158 if (!(p = lock_user_string(arg1)))
8159 goto efault;
8160 ret = get_errno(chown(p, arg2, arg3));
8161 unlock_user(p, arg1, 0);
8162 break;
8163 #endif
8164 #ifdef TARGET_NR_setuid32
8165 case TARGET_NR_setuid32:
8166 ret = get_errno(setuid(arg1));
8167 break;
8168 #endif
8169 #ifdef TARGET_NR_setgid32
8170 case TARGET_NR_setgid32:
8171 ret = get_errno(setgid(arg1));
8172 break;
8173 #endif
8174 #ifdef TARGET_NR_setfsuid32
8175 case TARGET_NR_setfsuid32:
8176 ret = get_errno(setfsuid(arg1));
8177 break;
8178 #endif
8179 #ifdef TARGET_NR_setfsgid32
8180 case TARGET_NR_setfsgid32:
8181 ret = get_errno(setfsgid(arg1));
8182 break;
8183 #endif
8184
8185 case TARGET_NR_pivot_root:
8186 goto unimplemented;
8187 #ifdef TARGET_NR_mincore
8188 case TARGET_NR_mincore:
8189 {
8190 void *a;
8191 ret = -TARGET_EFAULT;
8192 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8193 goto efault;
8194 if (!(p = lock_user_string(arg3)))
8195 goto mincore_fail;
8196 ret = get_errno(mincore(a, arg2, p));
8197 unlock_user(p, arg3, ret);
8198 mincore_fail:
8199 unlock_user(a, arg1, 0);
8200 }
8201 break;
8202 #endif
8203 #ifdef TARGET_NR_arm_fadvise64_64
8204 case TARGET_NR_arm_fadvise64_64:
8205 {
8206 /*
8207 * arm_fadvise64_64 looks like fadvise64_64 but
8208 * with different argument order
8209 */
8210 abi_long temp;
8211 temp = arg3;
8212 arg3 = arg4;
8213 arg4 = temp;
8214 }
8215 #endif
8216 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8217 #ifdef TARGET_NR_fadvise64_64
8218 case TARGET_NR_fadvise64_64:
8219 #endif
8220 #ifdef TARGET_NR_fadvise64
8221 case TARGET_NR_fadvise64:
8222 #endif
8223 #ifdef TARGET_S390X
8224 switch (arg4) {
8225 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8226 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8227 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8228 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8229 default: break;
8230 }
8231 #endif
8232 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8233 break;
8234 #endif
8235 #ifdef TARGET_NR_madvise
8236 case TARGET_NR_madvise:
8237 /* A straight passthrough may not be safe because qemu sometimes
8238 turns private file-backed mappings into anonymous mappings.
8239 This will break MADV_DONTNEED.
8240 This is a hint, so ignoring and returning success is ok. */
8241 ret = get_errno(0);
8242 break;
8243 #endif
8244 #if TARGET_ABI_BITS == 32
8245 case TARGET_NR_fcntl64:
8246 {
8247 int cmd;
8248 struct flock64 fl;
8249 struct target_flock64 *target_fl;
8250 #ifdef TARGET_ARM
8251 struct target_eabi_flock64 *target_efl;
8252 #endif
8253
8254 cmd = target_to_host_fcntl_cmd(arg2);
8255 if (cmd == -TARGET_EINVAL) {
8256 ret = cmd;
8257 break;
8258 }
8259
8260 switch(arg2) {
8261 case TARGET_F_GETLK64:
8262 #ifdef TARGET_ARM
8263 if (((CPUARMState *)cpu_env)->eabi) {
8264 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8265 goto efault;
8266 fl.l_type = tswap16(target_efl->l_type);
8267 fl.l_whence = tswap16(target_efl->l_whence);
8268 fl.l_start = tswap64(target_efl->l_start);
8269 fl.l_len = tswap64(target_efl->l_len);
8270 fl.l_pid = tswap32(target_efl->l_pid);
8271 unlock_user_struct(target_efl, arg3, 0);
8272 } else
8273 #endif
8274 {
8275 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8276 goto efault;
8277 fl.l_type = tswap16(target_fl->l_type);
8278 fl.l_whence = tswap16(target_fl->l_whence);
8279 fl.l_start = tswap64(target_fl->l_start);
8280 fl.l_len = tswap64(target_fl->l_len);
8281 fl.l_pid = tswap32(target_fl->l_pid);
8282 unlock_user_struct(target_fl, arg3, 0);
8283 }
8284 ret = get_errno(fcntl(arg1, cmd, &fl));
8285 if (ret == 0) {
8286 #ifdef TARGET_ARM
8287 if (((CPUARMState *)cpu_env)->eabi) {
8288 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8289 goto efault;
8290 target_efl->l_type = tswap16(fl.l_type);
8291 target_efl->l_whence = tswap16(fl.l_whence);
8292 target_efl->l_start = tswap64(fl.l_start);
8293 target_efl->l_len = tswap64(fl.l_len);
8294 target_efl->l_pid = tswap32(fl.l_pid);
8295 unlock_user_struct(target_efl, arg3, 1);
8296 } else
8297 #endif
8298 {
8299 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8300 goto efault;
8301 target_fl->l_type = tswap16(fl.l_type);
8302 target_fl->l_whence = tswap16(fl.l_whence);
8303 target_fl->l_start = tswap64(fl.l_start);
8304 target_fl->l_len = tswap64(fl.l_len);
8305 target_fl->l_pid = tswap32(fl.l_pid);
8306 unlock_user_struct(target_fl, arg3, 1);
8307 }
8308 }
8309 break;
8310
8311 case TARGET_F_SETLK64:
8312 case TARGET_F_SETLKW64:
8313 #ifdef TARGET_ARM
8314 if (((CPUARMState *)cpu_env)->eabi) {
8315 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8316 goto efault;
8317 fl.l_type = tswap16(target_efl->l_type);
8318 fl.l_whence = tswap16(target_efl->l_whence);
8319 fl.l_start = tswap64(target_efl->l_start);
8320 fl.l_len = tswap64(target_efl->l_len);
8321 fl.l_pid = tswap32(target_efl->l_pid);
8322 unlock_user_struct(target_efl, arg3, 0);
8323 } else
8324 #endif
8325 {
8326 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8327 goto efault;
8328 fl.l_type = tswap16(target_fl->l_type);
8329 fl.l_whence = tswap16(target_fl->l_whence);
8330 fl.l_start = tswap64(target_fl->l_start);
8331 fl.l_len = tswap64(target_fl->l_len);
8332 fl.l_pid = tswap32(target_fl->l_pid);
8333 unlock_user_struct(target_fl, arg3, 0);
8334 }
8335 ret = get_errno(fcntl(arg1, cmd, &fl));
8336 break;
8337 default:
8338 ret = do_fcntl(arg1, arg2, arg3);
8339 break;
8340 }
8341 break;
8342 }
8343 #endif
8344 #ifdef TARGET_NR_cacheflush
8345 case TARGET_NR_cacheflush:
8346 /* self-modifying code is handled automatically, so nothing needed */
8347 ret = 0;
8348 break;
8349 #endif
8350 #ifdef TARGET_NR_security
8351 case TARGET_NR_security:
8352 goto unimplemented;
8353 #endif
8354 #ifdef TARGET_NR_getpagesize
8355 case TARGET_NR_getpagesize:
8356 ret = TARGET_PAGE_SIZE;
8357 break;
8358 #endif
8359 case TARGET_NR_gettid:
8360 ret = get_errno(gettid());
8361 break;
8362 #ifdef TARGET_NR_readahead
8363 case TARGET_NR_readahead:
8364 #if TARGET_ABI_BITS == 32
8365 if (regpairs_aligned(cpu_env)) {
8366 arg2 = arg3;
8367 arg3 = arg4;
8368 arg4 = arg5;
8369 }
8370 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8371 #else
8372 ret = get_errno(readahead(arg1, arg2, arg3));
8373 #endif
8374 break;
8375 #endif
8376 #ifdef CONFIG_ATTR
8377 #ifdef TARGET_NR_setxattr
8378 case TARGET_NR_listxattr:
8379 case TARGET_NR_llistxattr:
8380 {
8381 void *p, *b = 0;
8382 if (arg2) {
8383 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8384 if (!b) {
8385 ret = -TARGET_EFAULT;
8386 break;
8387 }
8388 }
8389 p = lock_user_string(arg1);
8390 if (p) {
8391 if (num == TARGET_NR_listxattr) {
8392 ret = get_errno(listxattr(p, b, arg3));
8393 } else {
8394 ret = get_errno(llistxattr(p, b, arg3));
8395 }
8396 } else {
8397 ret = -TARGET_EFAULT;
8398 }
8399 unlock_user(p, arg1, 0);
8400 unlock_user(b, arg2, arg3);
8401 break;
8402 }
8403 case TARGET_NR_flistxattr:
8404 {
8405 void *b = 0;
8406 if (arg2) {
8407 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8408 if (!b) {
8409 ret = -TARGET_EFAULT;
8410 break;
8411 }
8412 }
8413 ret = get_errno(flistxattr(arg1, b, arg3));
8414 unlock_user(b, arg2, arg3);
8415 break;
8416 }
8417 case TARGET_NR_setxattr:
8418 case TARGET_NR_lsetxattr:
8419 {
8420 void *p, *n, *v = 0;
8421 if (arg3) {
8422 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8423 if (!v) {
8424 ret = -TARGET_EFAULT;
8425 break;
8426 }
8427 }
8428 p = lock_user_string(arg1);
8429 n = lock_user_string(arg2);
8430 if (p && n) {
8431 if (num == TARGET_NR_setxattr) {
8432 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8433 } else {
8434 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8435 }
8436 } else {
8437 ret = -TARGET_EFAULT;
8438 }
8439 unlock_user(p, arg1, 0);
8440 unlock_user(n, arg2, 0);
8441 unlock_user(v, arg3, 0);
8442 }
8443 break;
8444 case TARGET_NR_fsetxattr:
8445 {
8446 void *n, *v = 0;
8447 if (arg3) {
8448 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8449 if (!v) {
8450 ret = -TARGET_EFAULT;
8451 break;
8452 }
8453 }
8454 n = lock_user_string(arg2);
8455 if (n) {
8456 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8457 } else {
8458 ret = -TARGET_EFAULT;
8459 }
8460 unlock_user(n, arg2, 0);
8461 unlock_user(v, arg3, 0);
8462 }
8463 break;
8464 case TARGET_NR_getxattr:
8465 case TARGET_NR_lgetxattr:
8466 {
8467 void *p, *n, *v = 0;
8468 if (arg3) {
8469 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8470 if (!v) {
8471 ret = -TARGET_EFAULT;
8472 break;
8473 }
8474 }
8475 p = lock_user_string(arg1);
8476 n = lock_user_string(arg2);
8477 if (p && n) {
8478 if (num == TARGET_NR_getxattr) {
8479 ret = get_errno(getxattr(p, n, v, arg4));
8480 } else {
8481 ret = get_errno(lgetxattr(p, n, v, arg4));
8482 }
8483 } else {
8484 ret = -TARGET_EFAULT;
8485 }
8486 unlock_user(p, arg1, 0);
8487 unlock_user(n, arg2, 0);
8488 unlock_user(v, arg3, arg4);
8489 }
8490 break;
8491 case TARGET_NR_fgetxattr:
8492 {
8493 void *n, *v = 0;
8494 if (arg3) {
8495 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8496 if (!v) {
8497 ret = -TARGET_EFAULT;
8498 break;
8499 }
8500 }
8501 n = lock_user_string(arg2);
8502 if (n) {
8503 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8504 } else {
8505 ret = -TARGET_EFAULT;
8506 }
8507 unlock_user(n, arg2, 0);
8508 unlock_user(v, arg3, arg4);
8509 }
8510 break;
8511 case TARGET_NR_removexattr:
8512 case TARGET_NR_lremovexattr:
8513 {
8514 void *p, *n;
8515 p = lock_user_string(arg1);
8516 n = lock_user_string(arg2);
8517 if (p && n) {
8518 if (num == TARGET_NR_removexattr) {
8519 ret = get_errno(removexattr(p, n));
8520 } else {
8521 ret = get_errno(lremovexattr(p, n));
8522 }
8523 } else {
8524 ret = -TARGET_EFAULT;
8525 }
8526 unlock_user(p, arg1, 0);
8527 unlock_user(n, arg2, 0);
8528 }
8529 break;
8530 case TARGET_NR_fremovexattr:
8531 {
8532 void *n;
8533 n = lock_user_string(arg2);
8534 if (n) {
8535 ret = get_errno(fremovexattr(arg1, n));
8536 } else {
8537 ret = -TARGET_EFAULT;
8538 }
8539 unlock_user(n, arg2, 0);
8540 }
8541 break;
8542 #endif
8543 #endif /* CONFIG_ATTR */
8544 #ifdef TARGET_NR_set_thread_area
8545 case TARGET_NR_set_thread_area:
8546 #if defined(TARGET_MIPS)
8547 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8548 ret = 0;
8549 break;
8550 #elif defined(TARGET_CRIS)
8551 if (arg1 & 0xff)
8552 ret = -TARGET_EINVAL;
8553 else {
8554 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8555 ret = 0;
8556 }
8557 break;
8558 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8559 ret = do_set_thread_area(cpu_env, arg1);
8560 break;
8561 #elif defined(TARGET_M68K)
8562 {
8563 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
8564 ts->tp_value = arg1;
8565 break;
8566 }
8567 #else
8568 goto unimplemented_nowarn;
8569 #endif
8570 #endif
8571 #ifdef TARGET_NR_get_thread_area
8572 case TARGET_NR_get_thread_area:
8573 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8574 ret = do_get_thread_area(cpu_env, arg1);
8575 break;
8576 #elif defined(TARGET_M68K)
8577 {
8578 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
8579 ret = ts->tp_value;
8580 break;
8581 }
8582 #else
8583 goto unimplemented_nowarn;
8584 #endif
8585 #endif
8586 #ifdef TARGET_NR_getdomainname
8587 case TARGET_NR_getdomainname:
8588 goto unimplemented_nowarn;
8589 #endif
8590
8591 #ifdef TARGET_NR_clock_gettime
8592 case TARGET_NR_clock_gettime:
8593 {
8594 struct timespec ts;
8595 ret = get_errno(clock_gettime(arg1, &ts));
8596 if (!is_error(ret)) {
8597 host_to_target_timespec(arg2, &ts);
8598 }
8599 break;
8600 }
8601 #endif
8602 #ifdef TARGET_NR_clock_getres
8603 case TARGET_NR_clock_getres:
8604 {
8605 struct timespec ts;
8606 ret = get_errno(clock_getres(arg1, &ts));
8607 if (!is_error(ret)) {
8608 host_to_target_timespec(arg2, &ts);
8609 }
8610 break;
8611 }
8612 #endif
8613 #ifdef TARGET_NR_clock_nanosleep
8614 case TARGET_NR_clock_nanosleep:
8615 {
8616 struct timespec ts;
8617 target_to_host_timespec(&ts, arg3);
8618 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8619 if (arg4)
8620 host_to_target_timespec(arg4, &ts);
8621 break;
8622 }
8623 #endif
8624
8625 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8626 case TARGET_NR_set_tid_address:
8627 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8628 break;
8629 #endif
8630
8631 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8632 case TARGET_NR_tkill:
8633 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8634 break;
8635 #endif
8636
8637 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8638 case TARGET_NR_tgkill:
8639 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8640 target_to_host_signal(arg3)));
8641 break;
8642 #endif
8643
8644 #ifdef TARGET_NR_set_robust_list
8645 case TARGET_NR_set_robust_list:
8646 case TARGET_NR_get_robust_list:
8647 /* The ABI for supporting robust futexes has userspace pass
8648 * the kernel a pointer to a linked list which is updated by
8649 * userspace after the syscall; the list is walked by the kernel
8650 * when the thread exits. Since the linked list in QEMU guest
8651 * memory isn't a valid linked list for the host and we have
8652 * no way to reliably intercept the thread-death event, we can't
8653 * support these. Silently return ENOSYS so that guest userspace
8654 * falls back to a non-robust futex implementation (which should
8655 * be OK except in the corner case of the guest crashing while
8656 * holding a mutex that is shared with another process via
8657 * shared memory).
8658 */
8659 goto unimplemented_nowarn;
8660 #endif
8661
8662 #if defined(TARGET_NR_utimensat)
8663 case TARGET_NR_utimensat:
8664 {
8665 struct timespec *tsp, ts[2];
8666 if (!arg3) {
8667 tsp = NULL;
8668 } else {
8669 target_to_host_timespec(ts, arg3);
8670 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8671 tsp = ts;
8672 }
8673 if (!arg2)
8674 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8675 else {
8676 if (!(p = lock_user_string(arg2))) {
8677 ret = -TARGET_EFAULT;
8678 goto fail;
8679 }
8680 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8681 unlock_user(p, arg2, 0);
8682 }
8683 }
8684 break;
8685 #endif
8686 #if defined(CONFIG_USE_NPTL)
8687 case TARGET_NR_futex:
8688 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8689 break;
8690 #endif
8691 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8692 case TARGET_NR_inotify_init:
8693 ret = get_errno(sys_inotify_init());
8694 break;
8695 #endif
8696 #ifdef CONFIG_INOTIFY1
8697 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8698 case TARGET_NR_inotify_init1:
8699 ret = get_errno(sys_inotify_init1(arg1));
8700 break;
8701 #endif
8702 #endif
8703 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8704 case TARGET_NR_inotify_add_watch:
8705 p = lock_user_string(arg2);
8706 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8707 unlock_user(p, arg2, 0);
8708 break;
8709 #endif
8710 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8711 case TARGET_NR_inotify_rm_watch:
8712 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8713 break;
8714 #endif
8715
8716 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8717 case TARGET_NR_mq_open:
8718 {
8719 struct mq_attr posix_mq_attr;
8720
8721 p = lock_user_string(arg1 - 1);
8722 if (arg4 != 0)
8723 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8724 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8725 unlock_user (p, arg1, 0);
8726 }
8727 break;
8728
8729 case TARGET_NR_mq_unlink:
8730 p = lock_user_string(arg1 - 1);
8731 ret = get_errno(mq_unlink(p));
8732 unlock_user (p, arg1, 0);
8733 break;
8734
8735 case TARGET_NR_mq_timedsend:
8736 {
8737 struct timespec ts;
8738
8739 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8740 if (arg5 != 0) {
8741 target_to_host_timespec(&ts, arg5);
8742 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8743 host_to_target_timespec(arg5, &ts);
8744 }
8745 else
8746 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8747 unlock_user (p, arg2, arg3);
8748 }
8749 break;
8750
8751 case TARGET_NR_mq_timedreceive:
8752 {
8753 struct timespec ts;
8754 unsigned int prio;
8755
8756 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8757 if (arg5 != 0) {
8758 target_to_host_timespec(&ts, arg5);
8759 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8760 host_to_target_timespec(arg5, &ts);
8761 }
8762 else
8763 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8764 unlock_user (p, arg2, arg3);
8765 if (arg4 != 0)
8766 put_user_u32(prio, arg4);
8767 }
8768 break;
8769
8770 /* Not implemented for now... */
8771 /* case TARGET_NR_mq_notify: */
8772 /* break; */
8773
8774 case TARGET_NR_mq_getsetattr:
8775 {
8776 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8777 ret = 0;
8778 if (arg3 != 0) {
8779 ret = mq_getattr(arg1, &posix_mq_attr_out);
8780 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8781 }
8782 if (arg2 != 0) {
8783 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8784 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8785 }
8786
8787 }
8788 break;
8789 #endif
8790
8791 #ifdef CONFIG_SPLICE
8792 #ifdef TARGET_NR_tee
8793 case TARGET_NR_tee:
8794 {
8795 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8796 }
8797 break;
8798 #endif
8799 #ifdef TARGET_NR_splice
8800 case TARGET_NR_splice:
8801 {
8802 loff_t loff_in, loff_out;
8803 loff_t *ploff_in = NULL, *ploff_out = NULL;
8804 if(arg2) {
8805 get_user_u64(loff_in, arg2);
8806 ploff_in = &loff_in;
8807 }
8808 if(arg4) {
8809 get_user_u64(loff_out, arg2);
8810 ploff_out = &loff_out;
8811 }
8812 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8813 }
8814 break;
8815 #endif
8816 #ifdef TARGET_NR_vmsplice
8817 case TARGET_NR_vmsplice:
8818 {
8819 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8820 if (vec != NULL) {
8821 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
8822 unlock_iovec(vec, arg2, arg3, 0);
8823 } else {
8824 ret = -host_to_target_errno(errno);
8825 }
8826 }
8827 break;
8828 #endif
8829 #endif /* CONFIG_SPLICE */
8830 #ifdef CONFIG_EVENTFD
8831 #if defined(TARGET_NR_eventfd)
8832 case TARGET_NR_eventfd:
8833 ret = get_errno(eventfd(arg1, 0));
8834 break;
8835 #endif
8836 #if defined(TARGET_NR_eventfd2)
8837 case TARGET_NR_eventfd2:
8838 {
8839 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
8840 if (arg2 & TARGET_O_NONBLOCK) {
8841 host_flags |= O_NONBLOCK;
8842 }
8843 if (arg2 & TARGET_O_CLOEXEC) {
8844 host_flags |= O_CLOEXEC;
8845 }
8846 ret = get_errno(eventfd(arg1, host_flags));
8847 break;
8848 }
8849 #endif
8850 #endif /* CONFIG_EVENTFD */
8851 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8852 case TARGET_NR_fallocate:
8853 #if TARGET_ABI_BITS == 32
8854 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8855 target_offset64(arg5, arg6)));
8856 #else
8857 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8858 #endif
8859 break;
8860 #endif
8861 #if defined(CONFIG_SYNC_FILE_RANGE)
8862 #if defined(TARGET_NR_sync_file_range)
8863 case TARGET_NR_sync_file_range:
8864 #if TARGET_ABI_BITS == 32
8865 #if defined(TARGET_MIPS)
8866 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8867 target_offset64(arg5, arg6), arg7));
8868 #else
8869 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8870 target_offset64(arg4, arg5), arg6));
8871 #endif /* !TARGET_MIPS */
8872 #else
8873 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8874 #endif
8875 break;
8876 #endif
8877 #if defined(TARGET_NR_sync_file_range2)
8878 case TARGET_NR_sync_file_range2:
8879 /* This is like sync_file_range but the arguments are reordered */
8880 #if TARGET_ABI_BITS == 32
8881 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8882 target_offset64(arg5, arg6), arg2));
8883 #else
8884 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8885 #endif
8886 break;
8887 #endif
8888 #endif
8889 #if defined(CONFIG_EPOLL)
8890 #if defined(TARGET_NR_epoll_create)
8891 case TARGET_NR_epoll_create:
8892 ret = get_errno(epoll_create(arg1));
8893 break;
8894 #endif
8895 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8896 case TARGET_NR_epoll_create1:
8897 ret = get_errno(epoll_create1(arg1));
8898 break;
8899 #endif
8900 #if defined(TARGET_NR_epoll_ctl)
8901 case TARGET_NR_epoll_ctl:
8902 {
8903 struct epoll_event ep;
8904 struct epoll_event *epp = 0;
8905 if (arg4) {
8906 struct target_epoll_event *target_ep;
8907 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8908 goto efault;
8909 }
8910 ep.events = tswap32(target_ep->events);
8911 /* The epoll_data_t union is just opaque data to the kernel,
8912 * so we transfer all 64 bits across and need not worry what
8913 * actual data type it is.
8914 */
8915 ep.data.u64 = tswap64(target_ep->data.u64);
8916 unlock_user_struct(target_ep, arg4, 0);
8917 epp = &ep;
8918 }
8919 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8920 break;
8921 }
8922 #endif
8923
8924 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8925 #define IMPLEMENT_EPOLL_PWAIT
8926 #endif
8927 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8928 #if defined(TARGET_NR_epoll_wait)
8929 case TARGET_NR_epoll_wait:
8930 #endif
8931 #if defined(IMPLEMENT_EPOLL_PWAIT)
8932 case TARGET_NR_epoll_pwait:
8933 #endif
8934 {
8935 struct target_epoll_event *target_ep;
8936 struct epoll_event *ep;
8937 int epfd = arg1;
8938 int maxevents = arg3;
8939 int timeout = arg4;
8940
8941 target_ep = lock_user(VERIFY_WRITE, arg2,
8942 maxevents * sizeof(struct target_epoll_event), 1);
8943 if (!target_ep) {
8944 goto efault;
8945 }
8946
8947 ep = alloca(maxevents * sizeof(struct epoll_event));
8948
8949 switch (num) {
8950 #if defined(IMPLEMENT_EPOLL_PWAIT)
8951 case TARGET_NR_epoll_pwait:
8952 {
8953 target_sigset_t *target_set;
8954 sigset_t _set, *set = &_set;
8955
8956 if (arg5) {
8957 target_set = lock_user(VERIFY_READ, arg5,
8958 sizeof(target_sigset_t), 1);
8959 if (!target_set) {
8960 unlock_user(target_ep, arg2, 0);
8961 goto efault;
8962 }
8963 target_to_host_sigset(set, target_set);
8964 unlock_user(target_set, arg5, 0);
8965 } else {
8966 set = NULL;
8967 }
8968
8969 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8970 break;
8971 }
8972 #endif
8973 #if defined(TARGET_NR_epoll_wait)
8974 case TARGET_NR_epoll_wait:
8975 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8976 break;
8977 #endif
8978 default:
8979 ret = -TARGET_ENOSYS;
8980 }
8981 if (!is_error(ret)) {
8982 int i;
8983 for (i = 0; i < ret; i++) {
8984 target_ep[i].events = tswap32(ep[i].events);
8985 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8986 }
8987 }
8988 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8989 break;
8990 }
8991 #endif
8992 #endif
8993 #ifdef TARGET_NR_prlimit64
8994 case TARGET_NR_prlimit64:
8995 {
8996 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8997 struct target_rlimit64 *target_rnew, *target_rold;
8998 struct host_rlimit64 rnew, rold, *rnewp = 0;
8999 if (arg3) {
9000 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9001 goto efault;
9002 }
9003 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9004 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9005 unlock_user_struct(target_rnew, arg3, 0);
9006 rnewp = &rnew;
9007 }
9008
9009 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
9010 if (!is_error(ret) && arg4) {
9011 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
9012 goto efault;
9013 }
9014 target_rold->rlim_cur = tswap64(rold.rlim_cur);
9015 target_rold->rlim_max = tswap64(rold.rlim_max);
9016 unlock_user_struct(target_rold, arg4, 1);
9017 }
9018 break;
9019 }
9020 #endif
9021 #ifdef TARGET_NR_gethostname
9022 case TARGET_NR_gethostname:
9023 {
9024 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9025 if (name) {
9026 ret = get_errno(gethostname(name, arg2));
9027 unlock_user(name, arg1, arg2);
9028 } else {
9029 ret = -TARGET_EFAULT;
9030 }
9031 break;
9032 }
9033 #endif
9034 default:
9035 unimplemented:
9036 gemu_log("qemu: Unsupported syscall: %d\n", num);
9037 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9038 unimplemented_nowarn:
9039 #endif
9040 ret = -TARGET_ENOSYS;
9041 break;
9042 }
9043 fail:
9044 #ifdef DEBUG
9045 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
9046 #endif
9047 if(do_strace)
9048 print_syscall_ret(num, ret);
9049 return ret;
9050 efault:
9051 ret = -TARGET_EFAULT;
9052 goto fail;
9053 }