]> git.proxmox.com Git - qemu.git/blob - linux-user/syscall.c
Merge remote-tracking branch 'kwolf/for-anthony' into staging
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include "qemu-common.h"
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
73 #ifdef CONFIG_ATTR
74 #include "qemu-xattr.h"
75 #endif
76
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
83
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/utsname.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include "linux_loop.h"
100 #include "cpu-uname.h"
101
102 #include "qemu.h"
103
104 #if defined(CONFIG_USE_NPTL)
105 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
106 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
107 #else
108 /* XXX: Hardcode the above values. */
109 #define CLONE_NPTL_FLAGS2 0
110 #endif
111
112 //#define DEBUG
113
114 //#include <linux/msdos_fs.h>
115 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
116 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
117
118
119 #undef _syscall0
120 #undef _syscall1
121 #undef _syscall2
122 #undef _syscall3
123 #undef _syscall4
124 #undef _syscall5
125 #undef _syscall6
126
127 #define _syscall0(type,name) \
128 static type name (void) \
129 { \
130 return syscall(__NR_##name); \
131 }
132
133 #define _syscall1(type,name,type1,arg1) \
134 static type name (type1 arg1) \
135 { \
136 return syscall(__NR_##name, arg1); \
137 }
138
139 #define _syscall2(type,name,type1,arg1,type2,arg2) \
140 static type name (type1 arg1,type2 arg2) \
141 { \
142 return syscall(__NR_##name, arg1, arg2); \
143 }
144
145 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
146 static type name (type1 arg1,type2 arg2,type3 arg3) \
147 { \
148 return syscall(__NR_##name, arg1, arg2, arg3); \
149 }
150
151 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
152 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
153 { \
154 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
155 }
156
157 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
158 type5,arg5) \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
160 { \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
162 }
163
164
165 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
166 type5,arg5,type6,arg6) \
167 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
168 type6 arg6) \
169 { \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
171 }
172
173
174 #define __NR_sys_uname __NR_uname
175 #define __NR_sys_faccessat __NR_faccessat
176 #define __NR_sys_fchmodat __NR_fchmodat
177 #define __NR_sys_fchownat __NR_fchownat
178 #define __NR_sys_fstatat64 __NR_fstatat64
179 #define __NR_sys_futimesat __NR_futimesat
180 #define __NR_sys_getcwd1 __NR_getcwd
181 #define __NR_sys_getdents __NR_getdents
182 #define __NR_sys_getdents64 __NR_getdents64
183 #define __NR_sys_getpriority __NR_getpriority
184 #define __NR_sys_linkat __NR_linkat
185 #define __NR_sys_mkdirat __NR_mkdirat
186 #define __NR_sys_mknodat __NR_mknodat
187 #define __NR_sys_newfstatat __NR_newfstatat
188 #define __NR_sys_openat __NR_openat
189 #define __NR_sys_readlinkat __NR_readlinkat
190 #define __NR_sys_renameat __NR_renameat
191 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
192 #define __NR_sys_symlinkat __NR_symlinkat
193 #define __NR_sys_syslog __NR_syslog
194 #define __NR_sys_tgkill __NR_tgkill
195 #define __NR_sys_tkill __NR_tkill
196 #define __NR_sys_unlinkat __NR_unlinkat
197 #define __NR_sys_utimensat __NR_utimensat
198 #define __NR_sys_futex __NR_futex
199 #define __NR_sys_inotify_init __NR_inotify_init
200 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
201 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
202
203 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
204 defined(__s390x__)
205 #define __NR__llseek __NR_lseek
206 #endif
207
208 #ifdef __NR_gettid
209 _syscall0(int, gettid)
210 #else
211 /* This is a replacement for the host gettid() and must return a host
212 errno. */
213 static int gettid(void) {
214 return -ENOSYS;
215 }
216 #endif
217 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
218 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
219 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
220 #endif
221 _syscall2(int, sys_getpriority, int, which, int, who);
222 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
223 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
224 loff_t *, res, uint, wh);
225 #endif
226 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
227 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
228 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
229 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
230 #endif
231 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
232 _syscall2(int,sys_tkill,int,tid,int,sig)
233 #endif
234 #ifdef __NR_exit_group
235 _syscall1(int,exit_group,int,error_code)
236 #endif
237 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
238 _syscall1(int,set_tid_address,int *,tidptr)
239 #endif
240 #if defined(CONFIG_USE_NPTL)
241 #if defined(TARGET_NR_futex) && defined(__NR_futex)
242 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
243 const struct timespec *,timeout,int *,uaddr2,int,val3)
244 #endif
245 #endif
246 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
247 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
249 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
250 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
251 unsigned long *, user_mask_ptr);
252 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
253 void *, arg);
254
255 static bitmask_transtbl fcntl_flags_tbl[] = {
256 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
257 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
258 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
259 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
260 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
261 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
262 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
263 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
264 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
265 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
266 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
267 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
268 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
269 #if defined(O_DIRECT)
270 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
271 #endif
272 { 0, 0, 0, 0 }
273 };
274
275 #define COPY_UTSNAME_FIELD(dest, src) \
276 do { \
277 /* __NEW_UTS_LEN doesn't include terminating null */ \
278 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
279 (dest)[__NEW_UTS_LEN] = '\0'; \
280 } while (0)
281
282 static int sys_uname(struct new_utsname *buf)
283 {
284 struct utsname uts_buf;
285
286 if (uname(&uts_buf) < 0)
287 return (-1);
288
289 /*
290 * Just in case these have some differences, we
291 * translate utsname to new_utsname (which is the
292 * struct linux kernel uses).
293 */
294
295 memset(buf, 0, sizeof(*buf));
296 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
297 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
298 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
299 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
300 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
301 #ifdef _GNU_SOURCE
302 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
303 #endif
304 return (0);
305
306 #undef COPY_UTSNAME_FIELD
307 }
308
309 static int sys_getcwd1(char *buf, size_t size)
310 {
311 if (getcwd(buf, size) == NULL) {
312 /* getcwd() sets errno */
313 return (-1);
314 }
315 return strlen(buf)+1;
316 }
317
318 #ifdef CONFIG_ATFILE
319 /*
320 * Host system seems to have atfile syscall stubs available. We
321 * now enable them one by one as specified by target syscall_nr.h.
322 */
323
324 #ifdef TARGET_NR_faccessat
325 static int sys_faccessat(int dirfd, const char *pathname, int mode)
326 {
327 return (faccessat(dirfd, pathname, mode, 0));
328 }
329 #endif
330 #ifdef TARGET_NR_fchmodat
331 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
332 {
333 return (fchmodat(dirfd, pathname, mode, 0));
334 }
335 #endif
336 #if defined(TARGET_NR_fchownat)
337 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
338 gid_t group, int flags)
339 {
340 return (fchownat(dirfd, pathname, owner, group, flags));
341 }
342 #endif
343 #ifdef __NR_fstatat64
344 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
345 int flags)
346 {
347 return (fstatat(dirfd, pathname, buf, flags));
348 }
349 #endif
350 #ifdef __NR_newfstatat
351 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
352 int flags)
353 {
354 return (fstatat(dirfd, pathname, buf, flags));
355 }
356 #endif
357 #ifdef TARGET_NR_futimesat
358 static int sys_futimesat(int dirfd, const char *pathname,
359 const struct timeval times[2])
360 {
361 return (futimesat(dirfd, pathname, times));
362 }
363 #endif
364 #ifdef TARGET_NR_linkat
365 static int sys_linkat(int olddirfd, const char *oldpath,
366 int newdirfd, const char *newpath, int flags)
367 {
368 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
369 }
370 #endif
371 #ifdef TARGET_NR_mkdirat
372 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
373 {
374 return (mkdirat(dirfd, pathname, mode));
375 }
376 #endif
377 #ifdef TARGET_NR_mknodat
378 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
379 dev_t dev)
380 {
381 return (mknodat(dirfd, pathname, mode, dev));
382 }
383 #endif
384 #ifdef TARGET_NR_openat
385 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
386 {
387 /*
388 * open(2) has extra parameter 'mode' when called with
389 * flag O_CREAT.
390 */
391 if ((flags & O_CREAT) != 0) {
392 return (openat(dirfd, pathname, flags, mode));
393 }
394 return (openat(dirfd, pathname, flags));
395 }
396 #endif
397 #ifdef TARGET_NR_readlinkat
398 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
399 {
400 return (readlinkat(dirfd, pathname, buf, bufsiz));
401 }
402 #endif
403 #ifdef TARGET_NR_renameat
404 static int sys_renameat(int olddirfd, const char *oldpath,
405 int newdirfd, const char *newpath)
406 {
407 return (renameat(olddirfd, oldpath, newdirfd, newpath));
408 }
409 #endif
410 #ifdef TARGET_NR_symlinkat
411 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
412 {
413 return (symlinkat(oldpath, newdirfd, newpath));
414 }
415 #endif
416 #ifdef TARGET_NR_unlinkat
417 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
418 {
419 return (unlinkat(dirfd, pathname, flags));
420 }
421 #endif
422 #else /* !CONFIG_ATFILE */
423
424 /*
425 * Try direct syscalls instead
426 */
427 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
428 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
429 #endif
430 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
431 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
432 #endif
433 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
434 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
435 uid_t,owner,gid_t,group,int,flags)
436 #endif
437 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
438 defined(__NR_fstatat64)
439 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
440 struct stat *,buf,int,flags)
441 #endif
442 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
443 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
444 const struct timeval *,times)
445 #endif
446 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
447 defined(__NR_newfstatat)
448 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
449 struct stat *,buf,int,flags)
450 #endif
451 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
452 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
453 int,newdirfd,const char *,newpath,int,flags)
454 #endif
455 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
456 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
457 #endif
458 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
459 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
460 mode_t,mode,dev_t,dev)
461 #endif
462 #if defined(TARGET_NR_openat) && defined(__NR_openat)
463 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
464 #endif
465 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
466 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
467 char *,buf,size_t,bufsize)
468 #endif
469 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
470 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
471 int,newdirfd,const char *,newpath)
472 #endif
473 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
474 _syscall3(int,sys_symlinkat,const char *,oldpath,
475 int,newdirfd,const char *,newpath)
476 #endif
477 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
478 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
479 #endif
480
481 #endif /* CONFIG_ATFILE */
482
483 #ifdef CONFIG_UTIMENSAT
484 static int sys_utimensat(int dirfd, const char *pathname,
485 const struct timespec times[2], int flags)
486 {
487 if (pathname == NULL)
488 return futimens(dirfd, times);
489 else
490 return utimensat(dirfd, pathname, times, flags);
491 }
492 #else
493 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
494 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
495 const struct timespec *,tsp,int,flags)
496 #endif
497 #endif /* CONFIG_UTIMENSAT */
498
499 #ifdef CONFIG_INOTIFY
500 #include <sys/inotify.h>
501
502 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
503 static int sys_inotify_init(void)
504 {
505 return (inotify_init());
506 }
507 #endif
508 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
509 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
510 {
511 return (inotify_add_watch(fd, pathname, mask));
512 }
513 #endif
514 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
515 static int sys_inotify_rm_watch(int fd, int32_t wd)
516 {
517 return (inotify_rm_watch(fd, wd));
518 }
519 #endif
520 #ifdef CONFIG_INOTIFY1
521 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
522 static int sys_inotify_init1(int flags)
523 {
524 return (inotify_init1(flags));
525 }
526 #endif
527 #endif
528 #else
529 /* Userspace can usually survive runtime without inotify */
530 #undef TARGET_NR_inotify_init
531 #undef TARGET_NR_inotify_init1
532 #undef TARGET_NR_inotify_add_watch
533 #undef TARGET_NR_inotify_rm_watch
534 #endif /* CONFIG_INOTIFY */
535
536 #if defined(TARGET_NR_ppoll)
537 #ifndef __NR_ppoll
538 # define __NR_ppoll -1
539 #endif
540 #define __NR_sys_ppoll __NR_ppoll
541 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
542 struct timespec *, timeout, const __sigset_t *, sigmask,
543 size_t, sigsetsize)
544 #endif
545
546 #if defined(TARGET_NR_pselect6)
547 #ifndef __NR_pselect6
548 # define __NR_pselect6 -1
549 #endif
550 #define __NR_sys_pselect6 __NR_pselect6
551 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
552 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
553 #endif
554
555 #if defined(TARGET_NR_prlimit64)
556 #ifndef __NR_prlimit64
557 # define __NR_prlimit64 -1
558 #endif
559 #define __NR_sys_prlimit64 __NR_prlimit64
560 /* The glibc rlimit structure may not be that used by the underlying syscall */
561 struct host_rlimit64 {
562 uint64_t rlim_cur;
563 uint64_t rlim_max;
564 };
565 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
566 const struct host_rlimit64 *, new_limit,
567 struct host_rlimit64 *, old_limit)
568 #endif
569
570 extern int personality(int);
571 extern int flock(int, int);
572 extern int setfsuid(int);
573 extern int setfsgid(int);
574 extern int setgroups(int, gid_t *);
575
576 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
577 #ifdef TARGET_ARM
578 static inline int regpairs_aligned(void *cpu_env) {
579 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
580 }
581 #elif defined(TARGET_MIPS)
582 static inline int regpairs_aligned(void *cpu_env) { return 1; }
583 #else
584 static inline int regpairs_aligned(void *cpu_env) { return 0; }
585 #endif
586
587 #define ERRNO_TABLE_SIZE 1200
588
589 /* target_to_host_errno_table[] is initialized from
590 * host_to_target_errno_table[] in syscall_init(). */
591 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
592 };
593
594 /*
595 * This list is the union of errno values overridden in asm-<arch>/errno.h
596 * minus the errnos that are not actually generic to all archs.
597 */
598 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
599 [EIDRM] = TARGET_EIDRM,
600 [ECHRNG] = TARGET_ECHRNG,
601 [EL2NSYNC] = TARGET_EL2NSYNC,
602 [EL3HLT] = TARGET_EL3HLT,
603 [EL3RST] = TARGET_EL3RST,
604 [ELNRNG] = TARGET_ELNRNG,
605 [EUNATCH] = TARGET_EUNATCH,
606 [ENOCSI] = TARGET_ENOCSI,
607 [EL2HLT] = TARGET_EL2HLT,
608 [EDEADLK] = TARGET_EDEADLK,
609 [ENOLCK] = TARGET_ENOLCK,
610 [EBADE] = TARGET_EBADE,
611 [EBADR] = TARGET_EBADR,
612 [EXFULL] = TARGET_EXFULL,
613 [ENOANO] = TARGET_ENOANO,
614 [EBADRQC] = TARGET_EBADRQC,
615 [EBADSLT] = TARGET_EBADSLT,
616 [EBFONT] = TARGET_EBFONT,
617 [ENOSTR] = TARGET_ENOSTR,
618 [ENODATA] = TARGET_ENODATA,
619 [ETIME] = TARGET_ETIME,
620 [ENOSR] = TARGET_ENOSR,
621 [ENONET] = TARGET_ENONET,
622 [ENOPKG] = TARGET_ENOPKG,
623 [EREMOTE] = TARGET_EREMOTE,
624 [ENOLINK] = TARGET_ENOLINK,
625 [EADV] = TARGET_EADV,
626 [ESRMNT] = TARGET_ESRMNT,
627 [ECOMM] = TARGET_ECOMM,
628 [EPROTO] = TARGET_EPROTO,
629 [EDOTDOT] = TARGET_EDOTDOT,
630 [EMULTIHOP] = TARGET_EMULTIHOP,
631 [EBADMSG] = TARGET_EBADMSG,
632 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
633 [EOVERFLOW] = TARGET_EOVERFLOW,
634 [ENOTUNIQ] = TARGET_ENOTUNIQ,
635 [EBADFD] = TARGET_EBADFD,
636 [EREMCHG] = TARGET_EREMCHG,
637 [ELIBACC] = TARGET_ELIBACC,
638 [ELIBBAD] = TARGET_ELIBBAD,
639 [ELIBSCN] = TARGET_ELIBSCN,
640 [ELIBMAX] = TARGET_ELIBMAX,
641 [ELIBEXEC] = TARGET_ELIBEXEC,
642 [EILSEQ] = TARGET_EILSEQ,
643 [ENOSYS] = TARGET_ENOSYS,
644 [ELOOP] = TARGET_ELOOP,
645 [ERESTART] = TARGET_ERESTART,
646 [ESTRPIPE] = TARGET_ESTRPIPE,
647 [ENOTEMPTY] = TARGET_ENOTEMPTY,
648 [EUSERS] = TARGET_EUSERS,
649 [ENOTSOCK] = TARGET_ENOTSOCK,
650 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
651 [EMSGSIZE] = TARGET_EMSGSIZE,
652 [EPROTOTYPE] = TARGET_EPROTOTYPE,
653 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
654 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
655 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
656 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
657 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
658 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
659 [EADDRINUSE] = TARGET_EADDRINUSE,
660 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
661 [ENETDOWN] = TARGET_ENETDOWN,
662 [ENETUNREACH] = TARGET_ENETUNREACH,
663 [ENETRESET] = TARGET_ENETRESET,
664 [ECONNABORTED] = TARGET_ECONNABORTED,
665 [ECONNRESET] = TARGET_ECONNRESET,
666 [ENOBUFS] = TARGET_ENOBUFS,
667 [EISCONN] = TARGET_EISCONN,
668 [ENOTCONN] = TARGET_ENOTCONN,
669 [EUCLEAN] = TARGET_EUCLEAN,
670 [ENOTNAM] = TARGET_ENOTNAM,
671 [ENAVAIL] = TARGET_ENAVAIL,
672 [EISNAM] = TARGET_EISNAM,
673 [EREMOTEIO] = TARGET_EREMOTEIO,
674 [ESHUTDOWN] = TARGET_ESHUTDOWN,
675 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
676 [ETIMEDOUT] = TARGET_ETIMEDOUT,
677 [ECONNREFUSED] = TARGET_ECONNREFUSED,
678 [EHOSTDOWN] = TARGET_EHOSTDOWN,
679 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
680 [EALREADY] = TARGET_EALREADY,
681 [EINPROGRESS] = TARGET_EINPROGRESS,
682 [ESTALE] = TARGET_ESTALE,
683 [ECANCELED] = TARGET_ECANCELED,
684 [ENOMEDIUM] = TARGET_ENOMEDIUM,
685 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
686 #ifdef ENOKEY
687 [ENOKEY] = TARGET_ENOKEY,
688 #endif
689 #ifdef EKEYEXPIRED
690 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
691 #endif
692 #ifdef EKEYREVOKED
693 [EKEYREVOKED] = TARGET_EKEYREVOKED,
694 #endif
695 #ifdef EKEYREJECTED
696 [EKEYREJECTED] = TARGET_EKEYREJECTED,
697 #endif
698 #ifdef EOWNERDEAD
699 [EOWNERDEAD] = TARGET_EOWNERDEAD,
700 #endif
701 #ifdef ENOTRECOVERABLE
702 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
703 #endif
704 };
705
706 static inline int host_to_target_errno(int err)
707 {
708 if(host_to_target_errno_table[err])
709 return host_to_target_errno_table[err];
710 return err;
711 }
712
713 static inline int target_to_host_errno(int err)
714 {
715 if (target_to_host_errno_table[err])
716 return target_to_host_errno_table[err];
717 return err;
718 }
719
720 static inline abi_long get_errno(abi_long ret)
721 {
722 if (ret == -1)
723 return -host_to_target_errno(errno);
724 else
725 return ret;
726 }
727
728 static inline int is_error(abi_long ret)
729 {
730 return (abi_ulong)ret >= (abi_ulong)(-4096);
731 }
732
733 char *target_strerror(int err)
734 {
735 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
736 return NULL;
737 }
738 return strerror(target_to_host_errno(err));
739 }
740
741 static abi_ulong target_brk;
742 static abi_ulong target_original_brk;
743 static abi_ulong brk_page;
744
745 void target_set_brk(abi_ulong new_brk)
746 {
747 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
748 brk_page = HOST_PAGE_ALIGN(target_brk);
749 }
750
751 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
752 #define DEBUGF_BRK(message, args...)
753
754 /* do_brk() must return target values and target errnos. */
755 abi_long do_brk(abi_ulong new_brk)
756 {
757 abi_long mapped_addr;
758 int new_alloc_size;
759
760 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
761
762 if (!new_brk) {
763 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
764 return target_brk;
765 }
766 if (new_brk < target_original_brk) {
767 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
768 target_brk);
769 return target_brk;
770 }
771
772 /* If the new brk is less than the highest page reserved to the
773 * target heap allocation, set it and we're almost done... */
774 if (new_brk <= brk_page) {
775 /* Heap contents are initialized to zero, as for anonymous
776 * mapped pages. */
777 if (new_brk > target_brk) {
778 memset(g2h(target_brk), 0, new_brk - target_brk);
779 }
780 target_brk = new_brk;
781 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
782 return target_brk;
783 }
784
785 /* We need to allocate more memory after the brk... Note that
786 * we don't use MAP_FIXED because that will map over the top of
787 * any existing mapping (like the one with the host libc or qemu
788 * itself); instead we treat "mapped but at wrong address" as
789 * a failure and unmap again.
790 */
791 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
792 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
793 PROT_READ|PROT_WRITE,
794 MAP_ANON|MAP_PRIVATE, 0, 0));
795
796 if (mapped_addr == brk_page) {
797 /* Heap contents are initialized to zero, as for anonymous
798 * mapped pages. Technically the new pages are already
799 * initialized to zero since they *are* anonymous mapped
800 * pages, however we have to take care with the contents that
801 * come from the remaining part of the previous page: it may
802 * contains garbage data due to a previous heap usage (grown
803 * then shrunken). */
804 memset(g2h(target_brk), 0, brk_page - target_brk);
805
806 target_brk = new_brk;
807 brk_page = HOST_PAGE_ALIGN(target_brk);
808 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
809 target_brk);
810 return target_brk;
811 } else if (mapped_addr != -1) {
812 /* Mapped but at wrong address, meaning there wasn't actually
813 * enough space for this brk.
814 */
815 target_munmap(mapped_addr, new_alloc_size);
816 mapped_addr = -1;
817 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
818 }
819 else {
820 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
821 }
822
823 #if defined(TARGET_ALPHA)
824 /* We (partially) emulate OSF/1 on Alpha, which requires we
825 return a proper errno, not an unchanged brk value. */
826 return -TARGET_ENOMEM;
827 #endif
828 /* For everything else, return the previous break. */
829 return target_brk;
830 }
831
832 static inline abi_long copy_from_user_fdset(fd_set *fds,
833 abi_ulong target_fds_addr,
834 int n)
835 {
836 int i, nw, j, k;
837 abi_ulong b, *target_fds;
838
839 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
840 if (!(target_fds = lock_user(VERIFY_READ,
841 target_fds_addr,
842 sizeof(abi_ulong) * nw,
843 1)))
844 return -TARGET_EFAULT;
845
846 FD_ZERO(fds);
847 k = 0;
848 for (i = 0; i < nw; i++) {
849 /* grab the abi_ulong */
850 __get_user(b, &target_fds[i]);
851 for (j = 0; j < TARGET_ABI_BITS; j++) {
852 /* check the bit inside the abi_ulong */
853 if ((b >> j) & 1)
854 FD_SET(k, fds);
855 k++;
856 }
857 }
858
859 unlock_user(target_fds, target_fds_addr, 0);
860
861 return 0;
862 }
863
864 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
865 abi_ulong target_fds_addr,
866 int n)
867 {
868 if (target_fds_addr) {
869 if (copy_from_user_fdset(fds, target_fds_addr, n))
870 return -TARGET_EFAULT;
871 *fds_ptr = fds;
872 } else {
873 *fds_ptr = NULL;
874 }
875 return 0;
876 }
877
878 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
879 const fd_set *fds,
880 int n)
881 {
882 int i, nw, j, k;
883 abi_long v;
884 abi_ulong *target_fds;
885
886 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
887 if (!(target_fds = lock_user(VERIFY_WRITE,
888 target_fds_addr,
889 sizeof(abi_ulong) * nw,
890 0)))
891 return -TARGET_EFAULT;
892
893 k = 0;
894 for (i = 0; i < nw; i++) {
895 v = 0;
896 for (j = 0; j < TARGET_ABI_BITS; j++) {
897 v |= ((FD_ISSET(k, fds) != 0) << j);
898 k++;
899 }
900 __put_user(v, &target_fds[i]);
901 }
902
903 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
904
905 return 0;
906 }
907
908 #if defined(__alpha__)
909 #define HOST_HZ 1024
910 #else
911 #define HOST_HZ 100
912 #endif
913
914 static inline abi_long host_to_target_clock_t(long ticks)
915 {
916 #if HOST_HZ == TARGET_HZ
917 return ticks;
918 #else
919 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
920 #endif
921 }
922
923 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
924 const struct rusage *rusage)
925 {
926 struct target_rusage *target_rusage;
927
928 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
929 return -TARGET_EFAULT;
930 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
931 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
932 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
933 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
934 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
935 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
936 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
937 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
938 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
939 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
940 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
941 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
942 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
943 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
944 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
945 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
946 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
947 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
948 unlock_user_struct(target_rusage, target_addr, 1);
949
950 return 0;
951 }
952
953 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
954 {
955 abi_ulong target_rlim_swap;
956 rlim_t result;
957
958 target_rlim_swap = tswapal(target_rlim);
959 if (target_rlim_swap == TARGET_RLIM_INFINITY)
960 return RLIM_INFINITY;
961
962 result = target_rlim_swap;
963 if (target_rlim_swap != (rlim_t)result)
964 return RLIM_INFINITY;
965
966 return result;
967 }
968
969 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
970 {
971 abi_ulong target_rlim_swap;
972 abi_ulong result;
973
974 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
975 target_rlim_swap = TARGET_RLIM_INFINITY;
976 else
977 target_rlim_swap = rlim;
978 result = tswapal(target_rlim_swap);
979
980 return result;
981 }
982
983 static inline int target_to_host_resource(int code)
984 {
985 switch (code) {
986 case TARGET_RLIMIT_AS:
987 return RLIMIT_AS;
988 case TARGET_RLIMIT_CORE:
989 return RLIMIT_CORE;
990 case TARGET_RLIMIT_CPU:
991 return RLIMIT_CPU;
992 case TARGET_RLIMIT_DATA:
993 return RLIMIT_DATA;
994 case TARGET_RLIMIT_FSIZE:
995 return RLIMIT_FSIZE;
996 case TARGET_RLIMIT_LOCKS:
997 return RLIMIT_LOCKS;
998 case TARGET_RLIMIT_MEMLOCK:
999 return RLIMIT_MEMLOCK;
1000 case TARGET_RLIMIT_MSGQUEUE:
1001 return RLIMIT_MSGQUEUE;
1002 case TARGET_RLIMIT_NICE:
1003 return RLIMIT_NICE;
1004 case TARGET_RLIMIT_NOFILE:
1005 return RLIMIT_NOFILE;
1006 case TARGET_RLIMIT_NPROC:
1007 return RLIMIT_NPROC;
1008 case TARGET_RLIMIT_RSS:
1009 return RLIMIT_RSS;
1010 case TARGET_RLIMIT_RTPRIO:
1011 return RLIMIT_RTPRIO;
1012 case TARGET_RLIMIT_SIGPENDING:
1013 return RLIMIT_SIGPENDING;
1014 case TARGET_RLIMIT_STACK:
1015 return RLIMIT_STACK;
1016 default:
1017 return code;
1018 }
1019 }
1020
1021 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1022 abi_ulong target_tv_addr)
1023 {
1024 struct target_timeval *target_tv;
1025
1026 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1027 return -TARGET_EFAULT;
1028
1029 __get_user(tv->tv_sec, &target_tv->tv_sec);
1030 __get_user(tv->tv_usec, &target_tv->tv_usec);
1031
1032 unlock_user_struct(target_tv, target_tv_addr, 0);
1033
1034 return 0;
1035 }
1036
1037 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1038 const struct timeval *tv)
1039 {
1040 struct target_timeval *target_tv;
1041
1042 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1043 return -TARGET_EFAULT;
1044
1045 __put_user(tv->tv_sec, &target_tv->tv_sec);
1046 __put_user(tv->tv_usec, &target_tv->tv_usec);
1047
1048 unlock_user_struct(target_tv, target_tv_addr, 1);
1049
1050 return 0;
1051 }
1052
1053 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1054 #include <mqueue.h>
1055
1056 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1057 abi_ulong target_mq_attr_addr)
1058 {
1059 struct target_mq_attr *target_mq_attr;
1060
1061 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1062 target_mq_attr_addr, 1))
1063 return -TARGET_EFAULT;
1064
1065 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1066 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1067 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1068 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1069
1070 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1071
1072 return 0;
1073 }
1074
1075 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1076 const struct mq_attr *attr)
1077 {
1078 struct target_mq_attr *target_mq_attr;
1079
1080 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1081 target_mq_attr_addr, 0))
1082 return -TARGET_EFAULT;
1083
1084 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1085 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1086 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1087 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1088
1089 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1090
1091 return 0;
1092 }
1093 #endif
1094
1095 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1096 /* do_select() must return target values and target errnos. */
1097 static abi_long do_select(int n,
1098 abi_ulong rfd_addr, abi_ulong wfd_addr,
1099 abi_ulong efd_addr, abi_ulong target_tv_addr)
1100 {
1101 fd_set rfds, wfds, efds;
1102 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1103 struct timeval tv, *tv_ptr;
1104 abi_long ret;
1105
1106 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1107 if (ret) {
1108 return ret;
1109 }
1110 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1111 if (ret) {
1112 return ret;
1113 }
1114 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1115 if (ret) {
1116 return ret;
1117 }
1118
1119 if (target_tv_addr) {
1120 if (copy_from_user_timeval(&tv, target_tv_addr))
1121 return -TARGET_EFAULT;
1122 tv_ptr = &tv;
1123 } else {
1124 tv_ptr = NULL;
1125 }
1126
1127 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1128
1129 if (!is_error(ret)) {
1130 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1131 return -TARGET_EFAULT;
1132 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1133 return -TARGET_EFAULT;
1134 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1135 return -TARGET_EFAULT;
1136
1137 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1138 return -TARGET_EFAULT;
1139 }
1140
1141 return ret;
1142 }
1143 #endif
1144
1145 static abi_long do_pipe2(int host_pipe[], int flags)
1146 {
1147 #ifdef CONFIG_PIPE2
1148 return pipe2(host_pipe, flags);
1149 #else
1150 return -ENOSYS;
1151 #endif
1152 }
1153
1154 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1155 int flags, int is_pipe2)
1156 {
1157 int host_pipe[2];
1158 abi_long ret;
1159 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1160
1161 if (is_error(ret))
1162 return get_errno(ret);
1163
1164 /* Several targets have special calling conventions for the original
1165 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1166 if (!is_pipe2) {
1167 #if defined(TARGET_ALPHA)
1168 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1169 return host_pipe[0];
1170 #elif defined(TARGET_MIPS)
1171 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1172 return host_pipe[0];
1173 #elif defined(TARGET_SH4)
1174 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1175 return host_pipe[0];
1176 #endif
1177 }
1178
1179 if (put_user_s32(host_pipe[0], pipedes)
1180 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1181 return -TARGET_EFAULT;
1182 return get_errno(ret);
1183 }
1184
1185 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1186 abi_ulong target_addr,
1187 socklen_t len)
1188 {
1189 struct target_ip_mreqn *target_smreqn;
1190
1191 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1192 if (!target_smreqn)
1193 return -TARGET_EFAULT;
1194 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1195 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1196 if (len == sizeof(struct target_ip_mreqn))
1197 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1198 unlock_user(target_smreqn, target_addr, 0);
1199
1200 return 0;
1201 }
1202
1203 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1204 abi_ulong target_addr,
1205 socklen_t len)
1206 {
1207 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1208 sa_family_t sa_family;
1209 struct target_sockaddr *target_saddr;
1210
1211 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1212 if (!target_saddr)
1213 return -TARGET_EFAULT;
1214
1215 sa_family = tswap16(target_saddr->sa_family);
1216
1217 /* Oops. The caller might send a incomplete sun_path; sun_path
1218 * must be terminated by \0 (see the manual page), but
1219 * unfortunately it is quite common to specify sockaddr_un
1220 * length as "strlen(x->sun_path)" while it should be
1221 * "strlen(...) + 1". We'll fix that here if needed.
1222 * Linux kernel has a similar feature.
1223 */
1224
1225 if (sa_family == AF_UNIX) {
1226 if (len < unix_maxlen && len > 0) {
1227 char *cp = (char*)target_saddr;
1228
1229 if ( cp[len-1] && !cp[len] )
1230 len++;
1231 }
1232 if (len > unix_maxlen)
1233 len = unix_maxlen;
1234 }
1235
1236 memcpy(addr, target_saddr, len);
1237 addr->sa_family = sa_family;
1238 unlock_user(target_saddr, target_addr, 0);
1239
1240 return 0;
1241 }
1242
1243 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1244 struct sockaddr *addr,
1245 socklen_t len)
1246 {
1247 struct target_sockaddr *target_saddr;
1248
1249 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1250 if (!target_saddr)
1251 return -TARGET_EFAULT;
1252 memcpy(target_saddr, addr, len);
1253 target_saddr->sa_family = tswap16(addr->sa_family);
1254 unlock_user(target_saddr, target_addr, len);
1255
1256 return 0;
1257 }
1258
1259 /* ??? Should this also swap msgh->name? */
1260 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1261 struct target_msghdr *target_msgh)
1262 {
1263 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1264 abi_long msg_controllen;
1265 abi_ulong target_cmsg_addr;
1266 struct target_cmsghdr *target_cmsg;
1267 socklen_t space = 0;
1268
1269 msg_controllen = tswapal(target_msgh->msg_controllen);
1270 if (msg_controllen < sizeof (struct target_cmsghdr))
1271 goto the_end;
1272 target_cmsg_addr = tswapal(target_msgh->msg_control);
1273 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1274 if (!target_cmsg)
1275 return -TARGET_EFAULT;
1276
1277 while (cmsg && target_cmsg) {
1278 void *data = CMSG_DATA(cmsg);
1279 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1280
1281 int len = tswapal(target_cmsg->cmsg_len)
1282 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1283
1284 space += CMSG_SPACE(len);
1285 if (space > msgh->msg_controllen) {
1286 space -= CMSG_SPACE(len);
1287 gemu_log("Host cmsg overflow\n");
1288 break;
1289 }
1290
1291 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1292 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1293 cmsg->cmsg_len = CMSG_LEN(len);
1294
1295 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1296 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1297 memcpy(data, target_data, len);
1298 } else {
1299 int *fd = (int *)data;
1300 int *target_fd = (int *)target_data;
1301 int i, numfds = len / sizeof(int);
1302
1303 for (i = 0; i < numfds; i++)
1304 fd[i] = tswap32(target_fd[i]);
1305 }
1306
1307 cmsg = CMSG_NXTHDR(msgh, cmsg);
1308 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1309 }
1310 unlock_user(target_cmsg, target_cmsg_addr, 0);
1311 the_end:
1312 msgh->msg_controllen = space;
1313 return 0;
1314 }
1315
1316 /* ??? Should this also swap msgh->name? */
1317 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1318 struct msghdr *msgh)
1319 {
1320 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1321 abi_long msg_controllen;
1322 abi_ulong target_cmsg_addr;
1323 struct target_cmsghdr *target_cmsg;
1324 socklen_t space = 0;
1325
1326 msg_controllen = tswapal(target_msgh->msg_controllen);
1327 if (msg_controllen < sizeof (struct target_cmsghdr))
1328 goto the_end;
1329 target_cmsg_addr = tswapal(target_msgh->msg_control);
1330 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1331 if (!target_cmsg)
1332 return -TARGET_EFAULT;
1333
1334 while (cmsg && target_cmsg) {
1335 void *data = CMSG_DATA(cmsg);
1336 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1337
1338 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1339
1340 space += TARGET_CMSG_SPACE(len);
1341 if (space > msg_controllen) {
1342 space -= TARGET_CMSG_SPACE(len);
1343 gemu_log("Target cmsg overflow\n");
1344 break;
1345 }
1346
1347 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1348 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1349 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1350
1351 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1352 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1353 memcpy(target_data, data, len);
1354 } else {
1355 int *fd = (int *)data;
1356 int *target_fd = (int *)target_data;
1357 int i, numfds = len / sizeof(int);
1358
1359 for (i = 0; i < numfds; i++)
1360 target_fd[i] = tswap32(fd[i]);
1361 }
1362
1363 cmsg = CMSG_NXTHDR(msgh, cmsg);
1364 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1365 }
1366 unlock_user(target_cmsg, target_cmsg_addr, space);
1367 the_end:
1368 target_msgh->msg_controllen = tswapal(space);
1369 return 0;
1370 }
1371
1372 /* do_setsockopt() Must return target values and target errnos. */
1373 static abi_long do_setsockopt(int sockfd, int level, int optname,
1374 abi_ulong optval_addr, socklen_t optlen)
1375 {
1376 abi_long ret;
1377 int val;
1378 struct ip_mreqn *ip_mreq;
1379 struct ip_mreq_source *ip_mreq_source;
1380
1381 switch(level) {
1382 case SOL_TCP:
1383 /* TCP options all take an 'int' value. */
1384 if (optlen < sizeof(uint32_t))
1385 return -TARGET_EINVAL;
1386
1387 if (get_user_u32(val, optval_addr))
1388 return -TARGET_EFAULT;
1389 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1390 break;
1391 case SOL_IP:
1392 switch(optname) {
1393 case IP_TOS:
1394 case IP_TTL:
1395 case IP_HDRINCL:
1396 case IP_ROUTER_ALERT:
1397 case IP_RECVOPTS:
1398 case IP_RETOPTS:
1399 case IP_PKTINFO:
1400 case IP_MTU_DISCOVER:
1401 case IP_RECVERR:
1402 case IP_RECVTOS:
1403 #ifdef IP_FREEBIND
1404 case IP_FREEBIND:
1405 #endif
1406 case IP_MULTICAST_TTL:
1407 case IP_MULTICAST_LOOP:
1408 val = 0;
1409 if (optlen >= sizeof(uint32_t)) {
1410 if (get_user_u32(val, optval_addr))
1411 return -TARGET_EFAULT;
1412 } else if (optlen >= 1) {
1413 if (get_user_u8(val, optval_addr))
1414 return -TARGET_EFAULT;
1415 }
1416 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1417 break;
1418 case IP_ADD_MEMBERSHIP:
1419 case IP_DROP_MEMBERSHIP:
1420 if (optlen < sizeof (struct target_ip_mreq) ||
1421 optlen > sizeof (struct target_ip_mreqn))
1422 return -TARGET_EINVAL;
1423
1424 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1425 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1426 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1427 break;
1428
1429 case IP_BLOCK_SOURCE:
1430 case IP_UNBLOCK_SOURCE:
1431 case IP_ADD_SOURCE_MEMBERSHIP:
1432 case IP_DROP_SOURCE_MEMBERSHIP:
1433 if (optlen != sizeof (struct target_ip_mreq_source))
1434 return -TARGET_EINVAL;
1435
1436 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1437 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1438 unlock_user (ip_mreq_source, optval_addr, 0);
1439 break;
1440
1441 default:
1442 goto unimplemented;
1443 }
1444 break;
1445 case TARGET_SOL_SOCKET:
1446 switch (optname) {
1447 /* Options with 'int' argument. */
1448 case TARGET_SO_DEBUG:
1449 optname = SO_DEBUG;
1450 break;
1451 case TARGET_SO_REUSEADDR:
1452 optname = SO_REUSEADDR;
1453 break;
1454 case TARGET_SO_TYPE:
1455 optname = SO_TYPE;
1456 break;
1457 case TARGET_SO_ERROR:
1458 optname = SO_ERROR;
1459 break;
1460 case TARGET_SO_DONTROUTE:
1461 optname = SO_DONTROUTE;
1462 break;
1463 case TARGET_SO_BROADCAST:
1464 optname = SO_BROADCAST;
1465 break;
1466 case TARGET_SO_SNDBUF:
1467 optname = SO_SNDBUF;
1468 break;
1469 case TARGET_SO_RCVBUF:
1470 optname = SO_RCVBUF;
1471 break;
1472 case TARGET_SO_KEEPALIVE:
1473 optname = SO_KEEPALIVE;
1474 break;
1475 case TARGET_SO_OOBINLINE:
1476 optname = SO_OOBINLINE;
1477 break;
1478 case TARGET_SO_NO_CHECK:
1479 optname = SO_NO_CHECK;
1480 break;
1481 case TARGET_SO_PRIORITY:
1482 optname = SO_PRIORITY;
1483 break;
1484 #ifdef SO_BSDCOMPAT
1485 case TARGET_SO_BSDCOMPAT:
1486 optname = SO_BSDCOMPAT;
1487 break;
1488 #endif
1489 case TARGET_SO_PASSCRED:
1490 optname = SO_PASSCRED;
1491 break;
1492 case TARGET_SO_TIMESTAMP:
1493 optname = SO_TIMESTAMP;
1494 break;
1495 case TARGET_SO_RCVLOWAT:
1496 optname = SO_RCVLOWAT;
1497 break;
1498 case TARGET_SO_RCVTIMEO:
1499 optname = SO_RCVTIMEO;
1500 break;
1501 case TARGET_SO_SNDTIMEO:
1502 optname = SO_SNDTIMEO;
1503 break;
1504 break;
1505 default:
1506 goto unimplemented;
1507 }
1508 if (optlen < sizeof(uint32_t))
1509 return -TARGET_EINVAL;
1510
1511 if (get_user_u32(val, optval_addr))
1512 return -TARGET_EFAULT;
1513 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1514 break;
1515 default:
1516 unimplemented:
1517 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1518 ret = -TARGET_ENOPROTOOPT;
1519 }
1520 return ret;
1521 }
1522
1523 /* do_getsockopt() Must return target values and target errnos. */
1524 static abi_long do_getsockopt(int sockfd, int level, int optname,
1525 abi_ulong optval_addr, abi_ulong optlen)
1526 {
1527 abi_long ret;
1528 int len, val;
1529 socklen_t lv;
1530
1531 switch(level) {
1532 case TARGET_SOL_SOCKET:
1533 level = SOL_SOCKET;
1534 switch (optname) {
1535 /* These don't just return a single integer */
1536 case TARGET_SO_LINGER:
1537 case TARGET_SO_RCVTIMEO:
1538 case TARGET_SO_SNDTIMEO:
1539 case TARGET_SO_PEERNAME:
1540 goto unimplemented;
1541 case TARGET_SO_PEERCRED: {
1542 struct ucred cr;
1543 socklen_t crlen;
1544 struct target_ucred *tcr;
1545
1546 if (get_user_u32(len, optlen)) {
1547 return -TARGET_EFAULT;
1548 }
1549 if (len < 0) {
1550 return -TARGET_EINVAL;
1551 }
1552
1553 crlen = sizeof(cr);
1554 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1555 &cr, &crlen));
1556 if (ret < 0) {
1557 return ret;
1558 }
1559 if (len > crlen) {
1560 len = crlen;
1561 }
1562 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1563 return -TARGET_EFAULT;
1564 }
1565 __put_user(cr.pid, &tcr->pid);
1566 __put_user(cr.uid, &tcr->uid);
1567 __put_user(cr.gid, &tcr->gid);
1568 unlock_user_struct(tcr, optval_addr, 1);
1569 if (put_user_u32(len, optlen)) {
1570 return -TARGET_EFAULT;
1571 }
1572 break;
1573 }
1574 /* Options with 'int' argument. */
1575 case TARGET_SO_DEBUG:
1576 optname = SO_DEBUG;
1577 goto int_case;
1578 case TARGET_SO_REUSEADDR:
1579 optname = SO_REUSEADDR;
1580 goto int_case;
1581 case TARGET_SO_TYPE:
1582 optname = SO_TYPE;
1583 goto int_case;
1584 case TARGET_SO_ERROR:
1585 optname = SO_ERROR;
1586 goto int_case;
1587 case TARGET_SO_DONTROUTE:
1588 optname = SO_DONTROUTE;
1589 goto int_case;
1590 case TARGET_SO_BROADCAST:
1591 optname = SO_BROADCAST;
1592 goto int_case;
1593 case TARGET_SO_SNDBUF:
1594 optname = SO_SNDBUF;
1595 goto int_case;
1596 case TARGET_SO_RCVBUF:
1597 optname = SO_RCVBUF;
1598 goto int_case;
1599 case TARGET_SO_KEEPALIVE:
1600 optname = SO_KEEPALIVE;
1601 goto int_case;
1602 case TARGET_SO_OOBINLINE:
1603 optname = SO_OOBINLINE;
1604 goto int_case;
1605 case TARGET_SO_NO_CHECK:
1606 optname = SO_NO_CHECK;
1607 goto int_case;
1608 case TARGET_SO_PRIORITY:
1609 optname = SO_PRIORITY;
1610 goto int_case;
1611 #ifdef SO_BSDCOMPAT
1612 case TARGET_SO_BSDCOMPAT:
1613 optname = SO_BSDCOMPAT;
1614 goto int_case;
1615 #endif
1616 case TARGET_SO_PASSCRED:
1617 optname = SO_PASSCRED;
1618 goto int_case;
1619 case TARGET_SO_TIMESTAMP:
1620 optname = SO_TIMESTAMP;
1621 goto int_case;
1622 case TARGET_SO_RCVLOWAT:
1623 optname = SO_RCVLOWAT;
1624 goto int_case;
1625 default:
1626 goto int_case;
1627 }
1628 break;
1629 case SOL_TCP:
1630 /* TCP options all take an 'int' value. */
1631 int_case:
1632 if (get_user_u32(len, optlen))
1633 return -TARGET_EFAULT;
1634 if (len < 0)
1635 return -TARGET_EINVAL;
1636 lv = sizeof(lv);
1637 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1638 if (ret < 0)
1639 return ret;
1640 if (len > lv)
1641 len = lv;
1642 if (len == 4) {
1643 if (put_user_u32(val, optval_addr))
1644 return -TARGET_EFAULT;
1645 } else {
1646 if (put_user_u8(val, optval_addr))
1647 return -TARGET_EFAULT;
1648 }
1649 if (put_user_u32(len, optlen))
1650 return -TARGET_EFAULT;
1651 break;
1652 case SOL_IP:
1653 switch(optname) {
1654 case IP_TOS:
1655 case IP_TTL:
1656 case IP_HDRINCL:
1657 case IP_ROUTER_ALERT:
1658 case IP_RECVOPTS:
1659 case IP_RETOPTS:
1660 case IP_PKTINFO:
1661 case IP_MTU_DISCOVER:
1662 case IP_RECVERR:
1663 case IP_RECVTOS:
1664 #ifdef IP_FREEBIND
1665 case IP_FREEBIND:
1666 #endif
1667 case IP_MULTICAST_TTL:
1668 case IP_MULTICAST_LOOP:
1669 if (get_user_u32(len, optlen))
1670 return -TARGET_EFAULT;
1671 if (len < 0)
1672 return -TARGET_EINVAL;
1673 lv = sizeof(lv);
1674 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1675 if (ret < 0)
1676 return ret;
1677 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1678 len = 1;
1679 if (put_user_u32(len, optlen)
1680 || put_user_u8(val, optval_addr))
1681 return -TARGET_EFAULT;
1682 } else {
1683 if (len > sizeof(int))
1684 len = sizeof(int);
1685 if (put_user_u32(len, optlen)
1686 || put_user_u32(val, optval_addr))
1687 return -TARGET_EFAULT;
1688 }
1689 break;
1690 default:
1691 ret = -TARGET_ENOPROTOOPT;
1692 break;
1693 }
1694 break;
1695 default:
1696 unimplemented:
1697 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1698 level, optname);
1699 ret = -TARGET_EOPNOTSUPP;
1700 break;
1701 }
1702 return ret;
1703 }
1704
1705 /* FIXME
1706 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1707 * other lock functions have a return code of 0 for failure.
1708 */
1709 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1710 int count, int copy)
1711 {
1712 struct target_iovec *target_vec;
1713 abi_ulong base;
1714 int i;
1715
1716 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1717 if (!target_vec)
1718 return -TARGET_EFAULT;
1719 for(i = 0;i < count; i++) {
1720 base = tswapal(target_vec[i].iov_base);
1721 vec[i].iov_len = tswapal(target_vec[i].iov_len);
1722 if (vec[i].iov_len != 0) {
1723 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1724 /* Don't check lock_user return value. We must call writev even
1725 if a element has invalid base address. */
1726 } else {
1727 /* zero length pointer is ignored */
1728 vec[i].iov_base = NULL;
1729 }
1730 }
1731 unlock_user (target_vec, target_addr, 0);
1732 return 0;
1733 }
1734
1735 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1736 int count, int copy)
1737 {
1738 struct target_iovec *target_vec;
1739 abi_ulong base;
1740 int i;
1741
1742 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1743 if (!target_vec)
1744 return -TARGET_EFAULT;
1745 for(i = 0;i < count; i++) {
1746 if (target_vec[i].iov_base) {
1747 base = tswapal(target_vec[i].iov_base);
1748 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1749 }
1750 }
1751 unlock_user (target_vec, target_addr, 0);
1752
1753 return 0;
1754 }
1755
1756 /* do_socket() Must return target values and target errnos. */
1757 static abi_long do_socket(int domain, int type, int protocol)
1758 {
1759 #if defined(TARGET_MIPS)
1760 switch(type) {
1761 case TARGET_SOCK_DGRAM:
1762 type = SOCK_DGRAM;
1763 break;
1764 case TARGET_SOCK_STREAM:
1765 type = SOCK_STREAM;
1766 break;
1767 case TARGET_SOCK_RAW:
1768 type = SOCK_RAW;
1769 break;
1770 case TARGET_SOCK_RDM:
1771 type = SOCK_RDM;
1772 break;
1773 case TARGET_SOCK_SEQPACKET:
1774 type = SOCK_SEQPACKET;
1775 break;
1776 case TARGET_SOCK_PACKET:
1777 type = SOCK_PACKET;
1778 break;
1779 }
1780 #endif
1781 if (domain == PF_NETLINK)
1782 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1783 return get_errno(socket(domain, type, protocol));
1784 }
1785
1786 /* do_bind() Must return target values and target errnos. */
1787 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1788 socklen_t addrlen)
1789 {
1790 void *addr;
1791 abi_long ret;
1792
1793 if ((int)addrlen < 0) {
1794 return -TARGET_EINVAL;
1795 }
1796
1797 addr = alloca(addrlen+1);
1798
1799 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1800 if (ret)
1801 return ret;
1802
1803 return get_errno(bind(sockfd, addr, addrlen));
1804 }
1805
1806 /* do_connect() Must return target values and target errnos. */
1807 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1808 socklen_t addrlen)
1809 {
1810 void *addr;
1811 abi_long ret;
1812
1813 if ((int)addrlen < 0) {
1814 return -TARGET_EINVAL;
1815 }
1816
1817 addr = alloca(addrlen);
1818
1819 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1820 if (ret)
1821 return ret;
1822
1823 return get_errno(connect(sockfd, addr, addrlen));
1824 }
1825
1826 /* do_sendrecvmsg() Must return target values and target errnos. */
1827 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1828 int flags, int send)
1829 {
1830 abi_long ret, len;
1831 struct target_msghdr *msgp;
1832 struct msghdr msg;
1833 int count;
1834 struct iovec *vec;
1835 abi_ulong target_vec;
1836
1837 /* FIXME */
1838 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1839 msgp,
1840 target_msg,
1841 send ? 1 : 0))
1842 return -TARGET_EFAULT;
1843 if (msgp->msg_name) {
1844 msg.msg_namelen = tswap32(msgp->msg_namelen);
1845 msg.msg_name = alloca(msg.msg_namelen);
1846 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1847 msg.msg_namelen);
1848 if (ret) {
1849 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1850 return ret;
1851 }
1852 } else {
1853 msg.msg_name = NULL;
1854 msg.msg_namelen = 0;
1855 }
1856 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1857 msg.msg_control = alloca(msg.msg_controllen);
1858 msg.msg_flags = tswap32(msgp->msg_flags);
1859
1860 count = tswapal(msgp->msg_iovlen);
1861 vec = alloca(count * sizeof(struct iovec));
1862 target_vec = tswapal(msgp->msg_iov);
1863 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1864 msg.msg_iovlen = count;
1865 msg.msg_iov = vec;
1866
1867 if (send) {
1868 ret = target_to_host_cmsg(&msg, msgp);
1869 if (ret == 0)
1870 ret = get_errno(sendmsg(fd, &msg, flags));
1871 } else {
1872 ret = get_errno(recvmsg(fd, &msg, flags));
1873 if (!is_error(ret)) {
1874 len = ret;
1875 ret = host_to_target_cmsg(msgp, &msg);
1876 if (!is_error(ret))
1877 ret = len;
1878 }
1879 }
1880 unlock_iovec(vec, target_vec, count, !send);
1881 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1882 return ret;
1883 }
1884
1885 /* do_accept() Must return target values and target errnos. */
1886 static abi_long do_accept(int fd, abi_ulong target_addr,
1887 abi_ulong target_addrlen_addr)
1888 {
1889 socklen_t addrlen;
1890 void *addr;
1891 abi_long ret;
1892
1893 if (target_addr == 0)
1894 return get_errno(accept(fd, NULL, NULL));
1895
1896 /* linux returns EINVAL if addrlen pointer is invalid */
1897 if (get_user_u32(addrlen, target_addrlen_addr))
1898 return -TARGET_EINVAL;
1899
1900 if ((int)addrlen < 0) {
1901 return -TARGET_EINVAL;
1902 }
1903
1904 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1905 return -TARGET_EINVAL;
1906
1907 addr = alloca(addrlen);
1908
1909 ret = get_errno(accept(fd, addr, &addrlen));
1910 if (!is_error(ret)) {
1911 host_to_target_sockaddr(target_addr, addr, addrlen);
1912 if (put_user_u32(addrlen, target_addrlen_addr))
1913 ret = -TARGET_EFAULT;
1914 }
1915 return ret;
1916 }
1917
1918 /* do_getpeername() Must return target values and target errnos. */
1919 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1920 abi_ulong target_addrlen_addr)
1921 {
1922 socklen_t addrlen;
1923 void *addr;
1924 abi_long ret;
1925
1926 if (get_user_u32(addrlen, target_addrlen_addr))
1927 return -TARGET_EFAULT;
1928
1929 if ((int)addrlen < 0) {
1930 return -TARGET_EINVAL;
1931 }
1932
1933 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1934 return -TARGET_EFAULT;
1935
1936 addr = alloca(addrlen);
1937
1938 ret = get_errno(getpeername(fd, addr, &addrlen));
1939 if (!is_error(ret)) {
1940 host_to_target_sockaddr(target_addr, addr, addrlen);
1941 if (put_user_u32(addrlen, target_addrlen_addr))
1942 ret = -TARGET_EFAULT;
1943 }
1944 return ret;
1945 }
1946
1947 /* do_getsockname() Must return target values and target errnos. */
1948 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1949 abi_ulong target_addrlen_addr)
1950 {
1951 socklen_t addrlen;
1952 void *addr;
1953 abi_long ret;
1954
1955 if (get_user_u32(addrlen, target_addrlen_addr))
1956 return -TARGET_EFAULT;
1957
1958 if ((int)addrlen < 0) {
1959 return -TARGET_EINVAL;
1960 }
1961
1962 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1963 return -TARGET_EFAULT;
1964
1965 addr = alloca(addrlen);
1966
1967 ret = get_errno(getsockname(fd, addr, &addrlen));
1968 if (!is_error(ret)) {
1969 host_to_target_sockaddr(target_addr, addr, addrlen);
1970 if (put_user_u32(addrlen, target_addrlen_addr))
1971 ret = -TARGET_EFAULT;
1972 }
1973 return ret;
1974 }
1975
1976 /* do_socketpair() Must return target values and target errnos. */
1977 static abi_long do_socketpair(int domain, int type, int protocol,
1978 abi_ulong target_tab_addr)
1979 {
1980 int tab[2];
1981 abi_long ret;
1982
1983 ret = get_errno(socketpair(domain, type, protocol, tab));
1984 if (!is_error(ret)) {
1985 if (put_user_s32(tab[0], target_tab_addr)
1986 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1987 ret = -TARGET_EFAULT;
1988 }
1989 return ret;
1990 }
1991
1992 /* do_sendto() Must return target values and target errnos. */
1993 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1994 abi_ulong target_addr, socklen_t addrlen)
1995 {
1996 void *addr;
1997 void *host_msg;
1998 abi_long ret;
1999
2000 if ((int)addrlen < 0) {
2001 return -TARGET_EINVAL;
2002 }
2003
2004 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2005 if (!host_msg)
2006 return -TARGET_EFAULT;
2007 if (target_addr) {
2008 addr = alloca(addrlen);
2009 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2010 if (ret) {
2011 unlock_user(host_msg, msg, 0);
2012 return ret;
2013 }
2014 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2015 } else {
2016 ret = get_errno(send(fd, host_msg, len, flags));
2017 }
2018 unlock_user(host_msg, msg, 0);
2019 return ret;
2020 }
2021
2022 /* do_recvfrom() Must return target values and target errnos. */
2023 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2024 abi_ulong target_addr,
2025 abi_ulong target_addrlen)
2026 {
2027 socklen_t addrlen;
2028 void *addr;
2029 void *host_msg;
2030 abi_long ret;
2031
2032 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2033 if (!host_msg)
2034 return -TARGET_EFAULT;
2035 if (target_addr) {
2036 if (get_user_u32(addrlen, target_addrlen)) {
2037 ret = -TARGET_EFAULT;
2038 goto fail;
2039 }
2040 if ((int)addrlen < 0) {
2041 ret = -TARGET_EINVAL;
2042 goto fail;
2043 }
2044 addr = alloca(addrlen);
2045 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2046 } else {
2047 addr = NULL; /* To keep compiler quiet. */
2048 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2049 }
2050 if (!is_error(ret)) {
2051 if (target_addr) {
2052 host_to_target_sockaddr(target_addr, addr, addrlen);
2053 if (put_user_u32(addrlen, target_addrlen)) {
2054 ret = -TARGET_EFAULT;
2055 goto fail;
2056 }
2057 }
2058 unlock_user(host_msg, msg, len);
2059 } else {
2060 fail:
2061 unlock_user(host_msg, msg, 0);
2062 }
2063 return ret;
2064 }
2065
2066 #ifdef TARGET_NR_socketcall
2067 /* do_socketcall() Must return target values and target errnos. */
2068 static abi_long do_socketcall(int num, abi_ulong vptr)
2069 {
2070 abi_long ret;
2071 const int n = sizeof(abi_ulong);
2072
2073 switch(num) {
2074 case SOCKOP_socket:
2075 {
2076 abi_ulong domain, type, protocol;
2077
2078 if (get_user_ual(domain, vptr)
2079 || get_user_ual(type, vptr + n)
2080 || get_user_ual(protocol, vptr + 2 * n))
2081 return -TARGET_EFAULT;
2082
2083 ret = do_socket(domain, type, protocol);
2084 }
2085 break;
2086 case SOCKOP_bind:
2087 {
2088 abi_ulong sockfd;
2089 abi_ulong target_addr;
2090 socklen_t addrlen;
2091
2092 if (get_user_ual(sockfd, vptr)
2093 || get_user_ual(target_addr, vptr + n)
2094 || get_user_ual(addrlen, vptr + 2 * n))
2095 return -TARGET_EFAULT;
2096
2097 ret = do_bind(sockfd, target_addr, addrlen);
2098 }
2099 break;
2100 case SOCKOP_connect:
2101 {
2102 abi_ulong sockfd;
2103 abi_ulong target_addr;
2104 socklen_t addrlen;
2105
2106 if (get_user_ual(sockfd, vptr)
2107 || get_user_ual(target_addr, vptr + n)
2108 || get_user_ual(addrlen, vptr + 2 * n))
2109 return -TARGET_EFAULT;
2110
2111 ret = do_connect(sockfd, target_addr, addrlen);
2112 }
2113 break;
2114 case SOCKOP_listen:
2115 {
2116 abi_ulong sockfd, backlog;
2117
2118 if (get_user_ual(sockfd, vptr)
2119 || get_user_ual(backlog, vptr + n))
2120 return -TARGET_EFAULT;
2121
2122 ret = get_errno(listen(sockfd, backlog));
2123 }
2124 break;
2125 case SOCKOP_accept:
2126 {
2127 abi_ulong sockfd;
2128 abi_ulong target_addr, target_addrlen;
2129
2130 if (get_user_ual(sockfd, vptr)
2131 || get_user_ual(target_addr, vptr + n)
2132 || get_user_ual(target_addrlen, vptr + 2 * n))
2133 return -TARGET_EFAULT;
2134
2135 ret = do_accept(sockfd, target_addr, target_addrlen);
2136 }
2137 break;
2138 case SOCKOP_getsockname:
2139 {
2140 abi_ulong sockfd;
2141 abi_ulong target_addr, target_addrlen;
2142
2143 if (get_user_ual(sockfd, vptr)
2144 || get_user_ual(target_addr, vptr + n)
2145 || get_user_ual(target_addrlen, vptr + 2 * n))
2146 return -TARGET_EFAULT;
2147
2148 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2149 }
2150 break;
2151 case SOCKOP_getpeername:
2152 {
2153 abi_ulong sockfd;
2154 abi_ulong target_addr, target_addrlen;
2155
2156 if (get_user_ual(sockfd, vptr)
2157 || get_user_ual(target_addr, vptr + n)
2158 || get_user_ual(target_addrlen, vptr + 2 * n))
2159 return -TARGET_EFAULT;
2160
2161 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2162 }
2163 break;
2164 case SOCKOP_socketpair:
2165 {
2166 abi_ulong domain, type, protocol;
2167 abi_ulong tab;
2168
2169 if (get_user_ual(domain, vptr)
2170 || get_user_ual(type, vptr + n)
2171 || get_user_ual(protocol, vptr + 2 * n)
2172 || get_user_ual(tab, vptr + 3 * n))
2173 return -TARGET_EFAULT;
2174
2175 ret = do_socketpair(domain, type, protocol, tab);
2176 }
2177 break;
2178 case SOCKOP_send:
2179 {
2180 abi_ulong sockfd;
2181 abi_ulong msg;
2182 size_t len;
2183 abi_ulong flags;
2184
2185 if (get_user_ual(sockfd, vptr)
2186 || get_user_ual(msg, vptr + n)
2187 || get_user_ual(len, vptr + 2 * n)
2188 || get_user_ual(flags, vptr + 3 * n))
2189 return -TARGET_EFAULT;
2190
2191 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2192 }
2193 break;
2194 case SOCKOP_recv:
2195 {
2196 abi_ulong sockfd;
2197 abi_ulong msg;
2198 size_t len;
2199 abi_ulong flags;
2200
2201 if (get_user_ual(sockfd, vptr)
2202 || get_user_ual(msg, vptr + n)
2203 || get_user_ual(len, vptr + 2 * n)
2204 || get_user_ual(flags, vptr + 3 * n))
2205 return -TARGET_EFAULT;
2206
2207 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2208 }
2209 break;
2210 case SOCKOP_sendto:
2211 {
2212 abi_ulong sockfd;
2213 abi_ulong msg;
2214 size_t len;
2215 abi_ulong flags;
2216 abi_ulong addr;
2217 socklen_t addrlen;
2218
2219 if (get_user_ual(sockfd, vptr)
2220 || get_user_ual(msg, vptr + n)
2221 || get_user_ual(len, vptr + 2 * n)
2222 || get_user_ual(flags, vptr + 3 * n)
2223 || get_user_ual(addr, vptr + 4 * n)
2224 || get_user_ual(addrlen, vptr + 5 * n))
2225 return -TARGET_EFAULT;
2226
2227 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2228 }
2229 break;
2230 case SOCKOP_recvfrom:
2231 {
2232 abi_ulong sockfd;
2233 abi_ulong msg;
2234 size_t len;
2235 abi_ulong flags;
2236 abi_ulong addr;
2237 socklen_t addrlen;
2238
2239 if (get_user_ual(sockfd, vptr)
2240 || get_user_ual(msg, vptr + n)
2241 || get_user_ual(len, vptr + 2 * n)
2242 || get_user_ual(flags, vptr + 3 * n)
2243 || get_user_ual(addr, vptr + 4 * n)
2244 || get_user_ual(addrlen, vptr + 5 * n))
2245 return -TARGET_EFAULT;
2246
2247 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2248 }
2249 break;
2250 case SOCKOP_shutdown:
2251 {
2252 abi_ulong sockfd, how;
2253
2254 if (get_user_ual(sockfd, vptr)
2255 || get_user_ual(how, vptr + n))
2256 return -TARGET_EFAULT;
2257
2258 ret = get_errno(shutdown(sockfd, how));
2259 }
2260 break;
2261 case SOCKOP_sendmsg:
2262 case SOCKOP_recvmsg:
2263 {
2264 abi_ulong fd;
2265 abi_ulong target_msg;
2266 abi_ulong flags;
2267
2268 if (get_user_ual(fd, vptr)
2269 || get_user_ual(target_msg, vptr + n)
2270 || get_user_ual(flags, vptr + 2 * n))
2271 return -TARGET_EFAULT;
2272
2273 ret = do_sendrecvmsg(fd, target_msg, flags,
2274 (num == SOCKOP_sendmsg));
2275 }
2276 break;
2277 case SOCKOP_setsockopt:
2278 {
2279 abi_ulong sockfd;
2280 abi_ulong level;
2281 abi_ulong optname;
2282 abi_ulong optval;
2283 socklen_t optlen;
2284
2285 if (get_user_ual(sockfd, vptr)
2286 || get_user_ual(level, vptr + n)
2287 || get_user_ual(optname, vptr + 2 * n)
2288 || get_user_ual(optval, vptr + 3 * n)
2289 || get_user_ual(optlen, vptr + 4 * n))
2290 return -TARGET_EFAULT;
2291
2292 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2293 }
2294 break;
2295 case SOCKOP_getsockopt:
2296 {
2297 abi_ulong sockfd;
2298 abi_ulong level;
2299 abi_ulong optname;
2300 abi_ulong optval;
2301 socklen_t optlen;
2302
2303 if (get_user_ual(sockfd, vptr)
2304 || get_user_ual(level, vptr + n)
2305 || get_user_ual(optname, vptr + 2 * n)
2306 || get_user_ual(optval, vptr + 3 * n)
2307 || get_user_ual(optlen, vptr + 4 * n))
2308 return -TARGET_EFAULT;
2309
2310 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2311 }
2312 break;
2313 default:
2314 gemu_log("Unsupported socketcall: %d\n", num);
2315 ret = -TARGET_ENOSYS;
2316 break;
2317 }
2318 return ret;
2319 }
2320 #endif
2321
2322 #define N_SHM_REGIONS 32
2323
2324 static struct shm_region {
2325 abi_ulong start;
2326 abi_ulong size;
2327 } shm_regions[N_SHM_REGIONS];
2328
2329 struct target_ipc_perm
2330 {
2331 abi_long __key;
2332 abi_ulong uid;
2333 abi_ulong gid;
2334 abi_ulong cuid;
2335 abi_ulong cgid;
2336 unsigned short int mode;
2337 unsigned short int __pad1;
2338 unsigned short int __seq;
2339 unsigned short int __pad2;
2340 abi_ulong __unused1;
2341 abi_ulong __unused2;
2342 };
2343
2344 struct target_semid_ds
2345 {
2346 struct target_ipc_perm sem_perm;
2347 abi_ulong sem_otime;
2348 abi_ulong __unused1;
2349 abi_ulong sem_ctime;
2350 abi_ulong __unused2;
2351 abi_ulong sem_nsems;
2352 abi_ulong __unused3;
2353 abi_ulong __unused4;
2354 };
2355
2356 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2357 abi_ulong target_addr)
2358 {
2359 struct target_ipc_perm *target_ip;
2360 struct target_semid_ds *target_sd;
2361
2362 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2363 return -TARGET_EFAULT;
2364 target_ip = &(target_sd->sem_perm);
2365 host_ip->__key = tswapal(target_ip->__key);
2366 host_ip->uid = tswapal(target_ip->uid);
2367 host_ip->gid = tswapal(target_ip->gid);
2368 host_ip->cuid = tswapal(target_ip->cuid);
2369 host_ip->cgid = tswapal(target_ip->cgid);
2370 host_ip->mode = tswap16(target_ip->mode);
2371 unlock_user_struct(target_sd, target_addr, 0);
2372 return 0;
2373 }
2374
2375 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2376 struct ipc_perm *host_ip)
2377 {
2378 struct target_ipc_perm *target_ip;
2379 struct target_semid_ds *target_sd;
2380
2381 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2382 return -TARGET_EFAULT;
2383 target_ip = &(target_sd->sem_perm);
2384 target_ip->__key = tswapal(host_ip->__key);
2385 target_ip->uid = tswapal(host_ip->uid);
2386 target_ip->gid = tswapal(host_ip->gid);
2387 target_ip->cuid = tswapal(host_ip->cuid);
2388 target_ip->cgid = tswapal(host_ip->cgid);
2389 target_ip->mode = tswap16(host_ip->mode);
2390 unlock_user_struct(target_sd, target_addr, 1);
2391 return 0;
2392 }
2393
2394 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2395 abi_ulong target_addr)
2396 {
2397 struct target_semid_ds *target_sd;
2398
2399 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2400 return -TARGET_EFAULT;
2401 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2402 return -TARGET_EFAULT;
2403 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2404 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2405 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2406 unlock_user_struct(target_sd, target_addr, 0);
2407 return 0;
2408 }
2409
2410 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2411 struct semid_ds *host_sd)
2412 {
2413 struct target_semid_ds *target_sd;
2414
2415 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2416 return -TARGET_EFAULT;
2417 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2418 return -TARGET_EFAULT;
2419 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2420 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2421 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2422 unlock_user_struct(target_sd, target_addr, 1);
2423 return 0;
2424 }
2425
2426 struct target_seminfo {
2427 int semmap;
2428 int semmni;
2429 int semmns;
2430 int semmnu;
2431 int semmsl;
2432 int semopm;
2433 int semume;
2434 int semusz;
2435 int semvmx;
2436 int semaem;
2437 };
2438
2439 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2440 struct seminfo *host_seminfo)
2441 {
2442 struct target_seminfo *target_seminfo;
2443 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2444 return -TARGET_EFAULT;
2445 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2446 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2447 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2448 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2449 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2450 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2451 __put_user(host_seminfo->semume, &target_seminfo->semume);
2452 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2453 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2454 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2455 unlock_user_struct(target_seminfo, target_addr, 1);
2456 return 0;
2457 }
2458
2459 union semun {
2460 int val;
2461 struct semid_ds *buf;
2462 unsigned short *array;
2463 struct seminfo *__buf;
2464 };
2465
2466 union target_semun {
2467 int val;
2468 abi_ulong buf;
2469 abi_ulong array;
2470 abi_ulong __buf;
2471 };
2472
2473 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2474 abi_ulong target_addr)
2475 {
2476 int nsems;
2477 unsigned short *array;
2478 union semun semun;
2479 struct semid_ds semid_ds;
2480 int i, ret;
2481
2482 semun.buf = &semid_ds;
2483
2484 ret = semctl(semid, 0, IPC_STAT, semun);
2485 if (ret == -1)
2486 return get_errno(ret);
2487
2488 nsems = semid_ds.sem_nsems;
2489
2490 *host_array = malloc(nsems*sizeof(unsigned short));
2491 array = lock_user(VERIFY_READ, target_addr,
2492 nsems*sizeof(unsigned short), 1);
2493 if (!array)
2494 return -TARGET_EFAULT;
2495
2496 for(i=0; i<nsems; i++) {
2497 __get_user((*host_array)[i], &array[i]);
2498 }
2499 unlock_user(array, target_addr, 0);
2500
2501 return 0;
2502 }
2503
2504 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2505 unsigned short **host_array)
2506 {
2507 int nsems;
2508 unsigned short *array;
2509 union semun semun;
2510 struct semid_ds semid_ds;
2511 int i, ret;
2512
2513 semun.buf = &semid_ds;
2514
2515 ret = semctl(semid, 0, IPC_STAT, semun);
2516 if (ret == -1)
2517 return get_errno(ret);
2518
2519 nsems = semid_ds.sem_nsems;
2520
2521 array = lock_user(VERIFY_WRITE, target_addr,
2522 nsems*sizeof(unsigned short), 0);
2523 if (!array)
2524 return -TARGET_EFAULT;
2525
2526 for(i=0; i<nsems; i++) {
2527 __put_user((*host_array)[i], &array[i]);
2528 }
2529 free(*host_array);
2530 unlock_user(array, target_addr, 1);
2531
2532 return 0;
2533 }
2534
2535 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2536 union target_semun target_su)
2537 {
2538 union semun arg;
2539 struct semid_ds dsarg;
2540 unsigned short *array = NULL;
2541 struct seminfo seminfo;
2542 abi_long ret = -TARGET_EINVAL;
2543 abi_long err;
2544 cmd &= 0xff;
2545
2546 switch( cmd ) {
2547 case GETVAL:
2548 case SETVAL:
2549 arg.val = tswap32(target_su.val);
2550 ret = get_errno(semctl(semid, semnum, cmd, arg));
2551 target_su.val = tswap32(arg.val);
2552 break;
2553 case GETALL:
2554 case SETALL:
2555 err = target_to_host_semarray(semid, &array, target_su.array);
2556 if (err)
2557 return err;
2558 arg.array = array;
2559 ret = get_errno(semctl(semid, semnum, cmd, arg));
2560 err = host_to_target_semarray(semid, target_su.array, &array);
2561 if (err)
2562 return err;
2563 break;
2564 case IPC_STAT:
2565 case IPC_SET:
2566 case SEM_STAT:
2567 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2568 if (err)
2569 return err;
2570 arg.buf = &dsarg;
2571 ret = get_errno(semctl(semid, semnum, cmd, arg));
2572 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2573 if (err)
2574 return err;
2575 break;
2576 case IPC_INFO:
2577 case SEM_INFO:
2578 arg.__buf = &seminfo;
2579 ret = get_errno(semctl(semid, semnum, cmd, arg));
2580 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2581 if (err)
2582 return err;
2583 break;
2584 case IPC_RMID:
2585 case GETPID:
2586 case GETNCNT:
2587 case GETZCNT:
2588 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2589 break;
2590 }
2591
2592 return ret;
2593 }
2594
2595 struct target_sembuf {
2596 unsigned short sem_num;
2597 short sem_op;
2598 short sem_flg;
2599 };
2600
2601 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2602 abi_ulong target_addr,
2603 unsigned nsops)
2604 {
2605 struct target_sembuf *target_sembuf;
2606 int i;
2607
2608 target_sembuf = lock_user(VERIFY_READ, target_addr,
2609 nsops*sizeof(struct target_sembuf), 1);
2610 if (!target_sembuf)
2611 return -TARGET_EFAULT;
2612
2613 for(i=0; i<nsops; i++) {
2614 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2615 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2616 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2617 }
2618
2619 unlock_user(target_sembuf, target_addr, 0);
2620
2621 return 0;
2622 }
2623
2624 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2625 {
2626 struct sembuf sops[nsops];
2627
2628 if (target_to_host_sembuf(sops, ptr, nsops))
2629 return -TARGET_EFAULT;
2630
2631 return semop(semid, sops, nsops);
2632 }
2633
2634 struct target_msqid_ds
2635 {
2636 struct target_ipc_perm msg_perm;
2637 abi_ulong msg_stime;
2638 #if TARGET_ABI_BITS == 32
2639 abi_ulong __unused1;
2640 #endif
2641 abi_ulong msg_rtime;
2642 #if TARGET_ABI_BITS == 32
2643 abi_ulong __unused2;
2644 #endif
2645 abi_ulong msg_ctime;
2646 #if TARGET_ABI_BITS == 32
2647 abi_ulong __unused3;
2648 #endif
2649 abi_ulong __msg_cbytes;
2650 abi_ulong msg_qnum;
2651 abi_ulong msg_qbytes;
2652 abi_ulong msg_lspid;
2653 abi_ulong msg_lrpid;
2654 abi_ulong __unused4;
2655 abi_ulong __unused5;
2656 };
2657
2658 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2659 abi_ulong target_addr)
2660 {
2661 struct target_msqid_ds *target_md;
2662
2663 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2664 return -TARGET_EFAULT;
2665 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2666 return -TARGET_EFAULT;
2667 host_md->msg_stime = tswapal(target_md->msg_stime);
2668 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2669 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2670 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2671 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2672 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2673 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2674 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2675 unlock_user_struct(target_md, target_addr, 0);
2676 return 0;
2677 }
2678
2679 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2680 struct msqid_ds *host_md)
2681 {
2682 struct target_msqid_ds *target_md;
2683
2684 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2685 return -TARGET_EFAULT;
2686 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2687 return -TARGET_EFAULT;
2688 target_md->msg_stime = tswapal(host_md->msg_stime);
2689 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2690 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2691 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2692 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2693 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2694 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2695 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2696 unlock_user_struct(target_md, target_addr, 1);
2697 return 0;
2698 }
2699
2700 struct target_msginfo {
2701 int msgpool;
2702 int msgmap;
2703 int msgmax;
2704 int msgmnb;
2705 int msgmni;
2706 int msgssz;
2707 int msgtql;
2708 unsigned short int msgseg;
2709 };
2710
2711 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2712 struct msginfo *host_msginfo)
2713 {
2714 struct target_msginfo *target_msginfo;
2715 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2716 return -TARGET_EFAULT;
2717 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2718 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2719 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2720 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2721 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2722 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2723 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2724 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2725 unlock_user_struct(target_msginfo, target_addr, 1);
2726 return 0;
2727 }
2728
2729 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2730 {
2731 struct msqid_ds dsarg;
2732 struct msginfo msginfo;
2733 abi_long ret = -TARGET_EINVAL;
2734
2735 cmd &= 0xff;
2736
2737 switch (cmd) {
2738 case IPC_STAT:
2739 case IPC_SET:
2740 case MSG_STAT:
2741 if (target_to_host_msqid_ds(&dsarg,ptr))
2742 return -TARGET_EFAULT;
2743 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2744 if (host_to_target_msqid_ds(ptr,&dsarg))
2745 return -TARGET_EFAULT;
2746 break;
2747 case IPC_RMID:
2748 ret = get_errno(msgctl(msgid, cmd, NULL));
2749 break;
2750 case IPC_INFO:
2751 case MSG_INFO:
2752 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2753 if (host_to_target_msginfo(ptr, &msginfo))
2754 return -TARGET_EFAULT;
2755 break;
2756 }
2757
2758 return ret;
2759 }
2760
2761 struct target_msgbuf {
2762 abi_long mtype;
2763 char mtext[1];
2764 };
2765
2766 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2767 unsigned int msgsz, int msgflg)
2768 {
2769 struct target_msgbuf *target_mb;
2770 struct msgbuf *host_mb;
2771 abi_long ret = 0;
2772
2773 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2774 return -TARGET_EFAULT;
2775 host_mb = malloc(msgsz+sizeof(long));
2776 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2777 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2778 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2779 free(host_mb);
2780 unlock_user_struct(target_mb, msgp, 0);
2781
2782 return ret;
2783 }
2784
2785 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2786 unsigned int msgsz, abi_long msgtyp,
2787 int msgflg)
2788 {
2789 struct target_msgbuf *target_mb;
2790 char *target_mtext;
2791 struct msgbuf *host_mb;
2792 abi_long ret = 0;
2793
2794 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2795 return -TARGET_EFAULT;
2796
2797 host_mb = malloc(msgsz+sizeof(long));
2798 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg));
2799
2800 if (ret > 0) {
2801 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2802 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2803 if (!target_mtext) {
2804 ret = -TARGET_EFAULT;
2805 goto end;
2806 }
2807 memcpy(target_mb->mtext, host_mb->mtext, ret);
2808 unlock_user(target_mtext, target_mtext_addr, ret);
2809 }
2810
2811 target_mb->mtype = tswapal(host_mb->mtype);
2812 free(host_mb);
2813
2814 end:
2815 if (target_mb)
2816 unlock_user_struct(target_mb, msgp, 1);
2817 return ret;
2818 }
2819
2820 struct target_shmid_ds
2821 {
2822 struct target_ipc_perm shm_perm;
2823 abi_ulong shm_segsz;
2824 abi_ulong shm_atime;
2825 #if TARGET_ABI_BITS == 32
2826 abi_ulong __unused1;
2827 #endif
2828 abi_ulong shm_dtime;
2829 #if TARGET_ABI_BITS == 32
2830 abi_ulong __unused2;
2831 #endif
2832 abi_ulong shm_ctime;
2833 #if TARGET_ABI_BITS == 32
2834 abi_ulong __unused3;
2835 #endif
2836 int shm_cpid;
2837 int shm_lpid;
2838 abi_ulong shm_nattch;
2839 unsigned long int __unused4;
2840 unsigned long int __unused5;
2841 };
2842
2843 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2844 abi_ulong target_addr)
2845 {
2846 struct target_shmid_ds *target_sd;
2847
2848 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2849 return -TARGET_EFAULT;
2850 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2851 return -TARGET_EFAULT;
2852 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2853 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2854 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2855 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2856 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2857 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2858 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2859 unlock_user_struct(target_sd, target_addr, 0);
2860 return 0;
2861 }
2862
2863 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2864 struct shmid_ds *host_sd)
2865 {
2866 struct target_shmid_ds *target_sd;
2867
2868 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2869 return -TARGET_EFAULT;
2870 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2871 return -TARGET_EFAULT;
2872 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2873 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2874 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2875 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2876 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2877 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2878 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2879 unlock_user_struct(target_sd, target_addr, 1);
2880 return 0;
2881 }
2882
2883 struct target_shminfo {
2884 abi_ulong shmmax;
2885 abi_ulong shmmin;
2886 abi_ulong shmmni;
2887 abi_ulong shmseg;
2888 abi_ulong shmall;
2889 };
2890
2891 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2892 struct shminfo *host_shminfo)
2893 {
2894 struct target_shminfo *target_shminfo;
2895 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2896 return -TARGET_EFAULT;
2897 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2898 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2899 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2900 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2901 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2902 unlock_user_struct(target_shminfo, target_addr, 1);
2903 return 0;
2904 }
2905
2906 struct target_shm_info {
2907 int used_ids;
2908 abi_ulong shm_tot;
2909 abi_ulong shm_rss;
2910 abi_ulong shm_swp;
2911 abi_ulong swap_attempts;
2912 abi_ulong swap_successes;
2913 };
2914
2915 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2916 struct shm_info *host_shm_info)
2917 {
2918 struct target_shm_info *target_shm_info;
2919 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2920 return -TARGET_EFAULT;
2921 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2922 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2923 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2924 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2925 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2926 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2927 unlock_user_struct(target_shm_info, target_addr, 1);
2928 return 0;
2929 }
2930
2931 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2932 {
2933 struct shmid_ds dsarg;
2934 struct shminfo shminfo;
2935 struct shm_info shm_info;
2936 abi_long ret = -TARGET_EINVAL;
2937
2938 cmd &= 0xff;
2939
2940 switch(cmd) {
2941 case IPC_STAT:
2942 case IPC_SET:
2943 case SHM_STAT:
2944 if (target_to_host_shmid_ds(&dsarg, buf))
2945 return -TARGET_EFAULT;
2946 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2947 if (host_to_target_shmid_ds(buf, &dsarg))
2948 return -TARGET_EFAULT;
2949 break;
2950 case IPC_INFO:
2951 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2952 if (host_to_target_shminfo(buf, &shminfo))
2953 return -TARGET_EFAULT;
2954 break;
2955 case SHM_INFO:
2956 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2957 if (host_to_target_shm_info(buf, &shm_info))
2958 return -TARGET_EFAULT;
2959 break;
2960 case IPC_RMID:
2961 case SHM_LOCK:
2962 case SHM_UNLOCK:
2963 ret = get_errno(shmctl(shmid, cmd, NULL));
2964 break;
2965 }
2966
2967 return ret;
2968 }
2969
2970 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2971 {
2972 abi_long raddr;
2973 void *host_raddr;
2974 struct shmid_ds shm_info;
2975 int i,ret;
2976
2977 /* find out the length of the shared memory segment */
2978 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2979 if (is_error(ret)) {
2980 /* can't get length, bail out */
2981 return ret;
2982 }
2983
2984 mmap_lock();
2985
2986 if (shmaddr)
2987 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2988 else {
2989 abi_ulong mmap_start;
2990
2991 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2992
2993 if (mmap_start == -1) {
2994 errno = ENOMEM;
2995 host_raddr = (void *)-1;
2996 } else
2997 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2998 }
2999
3000 if (host_raddr == (void *)-1) {
3001 mmap_unlock();
3002 return get_errno((long)host_raddr);
3003 }
3004 raddr=h2g((unsigned long)host_raddr);
3005
3006 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3007 PAGE_VALID | PAGE_READ |
3008 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3009
3010 for (i = 0; i < N_SHM_REGIONS; i++) {
3011 if (shm_regions[i].start == 0) {
3012 shm_regions[i].start = raddr;
3013 shm_regions[i].size = shm_info.shm_segsz;
3014 break;
3015 }
3016 }
3017
3018 mmap_unlock();
3019 return raddr;
3020
3021 }
3022
3023 static inline abi_long do_shmdt(abi_ulong shmaddr)
3024 {
3025 int i;
3026
3027 for (i = 0; i < N_SHM_REGIONS; ++i) {
3028 if (shm_regions[i].start == shmaddr) {
3029 shm_regions[i].start = 0;
3030 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3031 break;
3032 }
3033 }
3034
3035 return get_errno(shmdt(g2h(shmaddr)));
3036 }
3037
3038 #ifdef TARGET_NR_ipc
3039 /* ??? This only works with linear mappings. */
3040 /* do_ipc() must return target values and target errnos. */
3041 static abi_long do_ipc(unsigned int call, int first,
3042 int second, int third,
3043 abi_long ptr, abi_long fifth)
3044 {
3045 int version;
3046 abi_long ret = 0;
3047
3048 version = call >> 16;
3049 call &= 0xffff;
3050
3051 switch (call) {
3052 case IPCOP_semop:
3053 ret = do_semop(first, ptr, second);
3054 break;
3055
3056 case IPCOP_semget:
3057 ret = get_errno(semget(first, second, third));
3058 break;
3059
3060 case IPCOP_semctl:
3061 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3062 break;
3063
3064 case IPCOP_msgget:
3065 ret = get_errno(msgget(first, second));
3066 break;
3067
3068 case IPCOP_msgsnd:
3069 ret = do_msgsnd(first, ptr, second, third);
3070 break;
3071
3072 case IPCOP_msgctl:
3073 ret = do_msgctl(first, second, ptr);
3074 break;
3075
3076 case IPCOP_msgrcv:
3077 switch (version) {
3078 case 0:
3079 {
3080 struct target_ipc_kludge {
3081 abi_long msgp;
3082 abi_long msgtyp;
3083 } *tmp;
3084
3085 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3086 ret = -TARGET_EFAULT;
3087 break;
3088 }
3089
3090 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
3091
3092 unlock_user_struct(tmp, ptr, 0);
3093 break;
3094 }
3095 default:
3096 ret = do_msgrcv(first, ptr, second, fifth, third);
3097 }
3098 break;
3099
3100 case IPCOP_shmat:
3101 switch (version) {
3102 default:
3103 {
3104 abi_ulong raddr;
3105 raddr = do_shmat(first, ptr, second);
3106 if (is_error(raddr))
3107 return get_errno(raddr);
3108 if (put_user_ual(raddr, third))
3109 return -TARGET_EFAULT;
3110 break;
3111 }
3112 case 1:
3113 ret = -TARGET_EINVAL;
3114 break;
3115 }
3116 break;
3117 case IPCOP_shmdt:
3118 ret = do_shmdt(ptr);
3119 break;
3120
3121 case IPCOP_shmget:
3122 /* IPC_* flag values are the same on all linux platforms */
3123 ret = get_errno(shmget(first, second, third));
3124 break;
3125
3126 /* IPC_* and SHM_* command values are the same on all linux platforms */
3127 case IPCOP_shmctl:
3128 ret = do_shmctl(first, second, third);
3129 break;
3130 default:
3131 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3132 ret = -TARGET_ENOSYS;
3133 break;
3134 }
3135 return ret;
3136 }
3137 #endif
3138
3139 /* kernel structure types definitions */
3140
3141 #define STRUCT(name, ...) STRUCT_ ## name,
3142 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3143 enum {
3144 #include "syscall_types.h"
3145 };
3146 #undef STRUCT
3147 #undef STRUCT_SPECIAL
3148
3149 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3150 #define STRUCT_SPECIAL(name)
3151 #include "syscall_types.h"
3152 #undef STRUCT
3153 #undef STRUCT_SPECIAL
3154
3155 typedef struct IOCTLEntry IOCTLEntry;
3156
3157 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3158 int fd, abi_long cmd, abi_long arg);
3159
3160 struct IOCTLEntry {
3161 unsigned int target_cmd;
3162 unsigned int host_cmd;
3163 const char *name;
3164 int access;
3165 do_ioctl_fn *do_ioctl;
3166 const argtype arg_type[5];
3167 };
3168
3169 #define IOC_R 0x0001
3170 #define IOC_W 0x0002
3171 #define IOC_RW (IOC_R | IOC_W)
3172
3173 #define MAX_STRUCT_SIZE 4096
3174
3175 #ifdef CONFIG_FIEMAP
3176 /* So fiemap access checks don't overflow on 32 bit systems.
3177 * This is very slightly smaller than the limit imposed by
3178 * the underlying kernel.
3179 */
3180 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3181 / sizeof(struct fiemap_extent))
3182
3183 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3184 int fd, abi_long cmd, abi_long arg)
3185 {
3186 /* The parameter for this ioctl is a struct fiemap followed
3187 * by an array of struct fiemap_extent whose size is set
3188 * in fiemap->fm_extent_count. The array is filled in by the
3189 * ioctl.
3190 */
3191 int target_size_in, target_size_out;
3192 struct fiemap *fm;
3193 const argtype *arg_type = ie->arg_type;
3194 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3195 void *argptr, *p;
3196 abi_long ret;
3197 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3198 uint32_t outbufsz;
3199 int free_fm = 0;
3200
3201 assert(arg_type[0] == TYPE_PTR);
3202 assert(ie->access == IOC_RW);
3203 arg_type++;
3204 target_size_in = thunk_type_size(arg_type, 0);
3205 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3206 if (!argptr) {
3207 return -TARGET_EFAULT;
3208 }
3209 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3210 unlock_user(argptr, arg, 0);
3211 fm = (struct fiemap *)buf_temp;
3212 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3213 return -TARGET_EINVAL;
3214 }
3215
3216 outbufsz = sizeof (*fm) +
3217 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3218
3219 if (outbufsz > MAX_STRUCT_SIZE) {
3220 /* We can't fit all the extents into the fixed size buffer.
3221 * Allocate one that is large enough and use it instead.
3222 */
3223 fm = malloc(outbufsz);
3224 if (!fm) {
3225 return -TARGET_ENOMEM;
3226 }
3227 memcpy(fm, buf_temp, sizeof(struct fiemap));
3228 free_fm = 1;
3229 }
3230 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3231 if (!is_error(ret)) {
3232 target_size_out = target_size_in;
3233 /* An extent_count of 0 means we were only counting the extents
3234 * so there are no structs to copy
3235 */
3236 if (fm->fm_extent_count != 0) {
3237 target_size_out += fm->fm_mapped_extents * extent_size;
3238 }
3239 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3240 if (!argptr) {
3241 ret = -TARGET_EFAULT;
3242 } else {
3243 /* Convert the struct fiemap */
3244 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3245 if (fm->fm_extent_count != 0) {
3246 p = argptr + target_size_in;
3247 /* ...and then all the struct fiemap_extents */
3248 for (i = 0; i < fm->fm_mapped_extents; i++) {
3249 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3250 THUNK_TARGET);
3251 p += extent_size;
3252 }
3253 }
3254 unlock_user(argptr, arg, target_size_out);
3255 }
3256 }
3257 if (free_fm) {
3258 free(fm);
3259 }
3260 return ret;
3261 }
3262 #endif
3263
3264 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3265 int fd, abi_long cmd, abi_long arg)
3266 {
3267 const argtype *arg_type = ie->arg_type;
3268 int target_size;
3269 void *argptr;
3270 int ret;
3271 struct ifconf *host_ifconf;
3272 uint32_t outbufsz;
3273 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3274 int target_ifreq_size;
3275 int nb_ifreq;
3276 int free_buf = 0;
3277 int i;
3278 int target_ifc_len;
3279 abi_long target_ifc_buf;
3280 int host_ifc_len;
3281 char *host_ifc_buf;
3282
3283 assert(arg_type[0] == TYPE_PTR);
3284 assert(ie->access == IOC_RW);
3285
3286 arg_type++;
3287 target_size = thunk_type_size(arg_type, 0);
3288
3289 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3290 if (!argptr)
3291 return -TARGET_EFAULT;
3292 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3293 unlock_user(argptr, arg, 0);
3294
3295 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3296 target_ifc_len = host_ifconf->ifc_len;
3297 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3298
3299 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3300 nb_ifreq = target_ifc_len / target_ifreq_size;
3301 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3302
3303 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3304 if (outbufsz > MAX_STRUCT_SIZE) {
3305 /* We can't fit all the extents into the fixed size buffer.
3306 * Allocate one that is large enough and use it instead.
3307 */
3308 host_ifconf = malloc(outbufsz);
3309 if (!host_ifconf) {
3310 return -TARGET_ENOMEM;
3311 }
3312 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3313 free_buf = 1;
3314 }
3315 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3316
3317 host_ifconf->ifc_len = host_ifc_len;
3318 host_ifconf->ifc_buf = host_ifc_buf;
3319
3320 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3321 if (!is_error(ret)) {
3322 /* convert host ifc_len to target ifc_len */
3323
3324 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3325 target_ifc_len = nb_ifreq * target_ifreq_size;
3326 host_ifconf->ifc_len = target_ifc_len;
3327
3328 /* restore target ifc_buf */
3329
3330 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3331
3332 /* copy struct ifconf to target user */
3333
3334 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3335 if (!argptr)
3336 return -TARGET_EFAULT;
3337 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3338 unlock_user(argptr, arg, target_size);
3339
3340 /* copy ifreq[] to target user */
3341
3342 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3343 for (i = 0; i < nb_ifreq ; i++) {
3344 thunk_convert(argptr + i * target_ifreq_size,
3345 host_ifc_buf + i * sizeof(struct ifreq),
3346 ifreq_arg_type, THUNK_TARGET);
3347 }
3348 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3349 }
3350
3351 if (free_buf) {
3352 free(host_ifconf);
3353 }
3354
3355 return ret;
3356 }
3357
3358 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3359 abi_long cmd, abi_long arg)
3360 {
3361 void *argptr;
3362 struct dm_ioctl *host_dm;
3363 abi_long guest_data;
3364 uint32_t guest_data_size;
3365 int target_size;
3366 const argtype *arg_type = ie->arg_type;
3367 abi_long ret;
3368 void *big_buf = NULL;
3369 char *host_data;
3370
3371 arg_type++;
3372 target_size = thunk_type_size(arg_type, 0);
3373 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3374 if (!argptr) {
3375 ret = -TARGET_EFAULT;
3376 goto out;
3377 }
3378 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3379 unlock_user(argptr, arg, 0);
3380
3381 /* buf_temp is too small, so fetch things into a bigger buffer */
3382 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3383 memcpy(big_buf, buf_temp, target_size);
3384 buf_temp = big_buf;
3385 host_dm = big_buf;
3386
3387 guest_data = arg + host_dm->data_start;
3388 if ((guest_data - arg) < 0) {
3389 ret = -EINVAL;
3390 goto out;
3391 }
3392 guest_data_size = host_dm->data_size - host_dm->data_start;
3393 host_data = (char*)host_dm + host_dm->data_start;
3394
3395 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3396 switch (ie->host_cmd) {
3397 case DM_REMOVE_ALL:
3398 case DM_LIST_DEVICES:
3399 case DM_DEV_CREATE:
3400 case DM_DEV_REMOVE:
3401 case DM_DEV_SUSPEND:
3402 case DM_DEV_STATUS:
3403 case DM_DEV_WAIT:
3404 case DM_TABLE_STATUS:
3405 case DM_TABLE_CLEAR:
3406 case DM_TABLE_DEPS:
3407 case DM_LIST_VERSIONS:
3408 /* no input data */
3409 break;
3410 case DM_DEV_RENAME:
3411 case DM_DEV_SET_GEOMETRY:
3412 /* data contains only strings */
3413 memcpy(host_data, argptr, guest_data_size);
3414 break;
3415 case DM_TARGET_MSG:
3416 memcpy(host_data, argptr, guest_data_size);
3417 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3418 break;
3419 case DM_TABLE_LOAD:
3420 {
3421 void *gspec = argptr;
3422 void *cur_data = host_data;
3423 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3424 int spec_size = thunk_type_size(arg_type, 0);
3425 int i;
3426
3427 for (i = 0; i < host_dm->target_count; i++) {
3428 struct dm_target_spec *spec = cur_data;
3429 uint32_t next;
3430 int slen;
3431
3432 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3433 slen = strlen((char*)gspec + spec_size) + 1;
3434 next = spec->next;
3435 spec->next = sizeof(*spec) + slen;
3436 strcpy((char*)&spec[1], gspec + spec_size);
3437 gspec += next;
3438 cur_data += spec->next;
3439 }
3440 break;
3441 }
3442 default:
3443 ret = -TARGET_EINVAL;
3444 goto out;
3445 }
3446 unlock_user(argptr, guest_data, 0);
3447
3448 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3449 if (!is_error(ret)) {
3450 guest_data = arg + host_dm->data_start;
3451 guest_data_size = host_dm->data_size - host_dm->data_start;
3452 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3453 switch (ie->host_cmd) {
3454 case DM_REMOVE_ALL:
3455 case DM_DEV_CREATE:
3456 case DM_DEV_REMOVE:
3457 case DM_DEV_RENAME:
3458 case DM_DEV_SUSPEND:
3459 case DM_DEV_STATUS:
3460 case DM_TABLE_LOAD:
3461 case DM_TABLE_CLEAR:
3462 case DM_TARGET_MSG:
3463 case DM_DEV_SET_GEOMETRY:
3464 /* no return data */
3465 break;
3466 case DM_LIST_DEVICES:
3467 {
3468 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3469 uint32_t remaining_data = guest_data_size;
3470 void *cur_data = argptr;
3471 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3472 int nl_size = 12; /* can't use thunk_size due to alignment */
3473
3474 while (1) {
3475 uint32_t next = nl->next;
3476 if (next) {
3477 nl->next = nl_size + (strlen(nl->name) + 1);
3478 }
3479 if (remaining_data < nl->next) {
3480 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3481 break;
3482 }
3483 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3484 strcpy(cur_data + nl_size, nl->name);
3485 cur_data += nl->next;
3486 remaining_data -= nl->next;
3487 if (!next) {
3488 break;
3489 }
3490 nl = (void*)nl + next;
3491 }
3492 break;
3493 }
3494 case DM_DEV_WAIT:
3495 case DM_TABLE_STATUS:
3496 {
3497 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3498 void *cur_data = argptr;
3499 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3500 int spec_size = thunk_type_size(arg_type, 0);
3501 int i;
3502
3503 for (i = 0; i < host_dm->target_count; i++) {
3504 uint32_t next = spec->next;
3505 int slen = strlen((char*)&spec[1]) + 1;
3506 spec->next = (cur_data - argptr) + spec_size + slen;
3507 if (guest_data_size < spec->next) {
3508 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3509 break;
3510 }
3511 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3512 strcpy(cur_data + spec_size, (char*)&spec[1]);
3513 cur_data = argptr + spec->next;
3514 spec = (void*)host_dm + host_dm->data_start + next;
3515 }
3516 break;
3517 }
3518 case DM_TABLE_DEPS:
3519 {
3520 void *hdata = (void*)host_dm + host_dm->data_start;
3521 int count = *(uint32_t*)hdata;
3522 uint64_t *hdev = hdata + 8;
3523 uint64_t *gdev = argptr + 8;
3524 int i;
3525
3526 *(uint32_t*)argptr = tswap32(count);
3527 for (i = 0; i < count; i++) {
3528 *gdev = tswap64(*hdev);
3529 gdev++;
3530 hdev++;
3531 }
3532 break;
3533 }
3534 case DM_LIST_VERSIONS:
3535 {
3536 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3537 uint32_t remaining_data = guest_data_size;
3538 void *cur_data = argptr;
3539 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3540 int vers_size = thunk_type_size(arg_type, 0);
3541
3542 while (1) {
3543 uint32_t next = vers->next;
3544 if (next) {
3545 vers->next = vers_size + (strlen(vers->name) + 1);
3546 }
3547 if (remaining_data < vers->next) {
3548 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3549 break;
3550 }
3551 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3552 strcpy(cur_data + vers_size, vers->name);
3553 cur_data += vers->next;
3554 remaining_data -= vers->next;
3555 if (!next) {
3556 break;
3557 }
3558 vers = (void*)vers + next;
3559 }
3560 break;
3561 }
3562 default:
3563 ret = -TARGET_EINVAL;
3564 goto out;
3565 }
3566 unlock_user(argptr, guest_data, guest_data_size);
3567
3568 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3569 if (!argptr) {
3570 ret = -TARGET_EFAULT;
3571 goto out;
3572 }
3573 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3574 unlock_user(argptr, arg, target_size);
3575 }
3576 out:
3577 if (big_buf) {
3578 free(big_buf);
3579 }
3580 return ret;
3581 }
3582
3583 static IOCTLEntry ioctl_entries[] = {
3584 #define IOCTL(cmd, access, ...) \
3585 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3586 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3587 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3588 #include "ioctls.h"
3589 { 0, 0, },
3590 };
3591
3592 /* ??? Implement proper locking for ioctls. */
3593 /* do_ioctl() Must return target values and target errnos. */
3594 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3595 {
3596 const IOCTLEntry *ie;
3597 const argtype *arg_type;
3598 abi_long ret;
3599 uint8_t buf_temp[MAX_STRUCT_SIZE];
3600 int target_size;
3601 void *argptr;
3602
3603 ie = ioctl_entries;
3604 for(;;) {
3605 if (ie->target_cmd == 0) {
3606 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3607 return -TARGET_ENOSYS;
3608 }
3609 if (ie->target_cmd == cmd)
3610 break;
3611 ie++;
3612 }
3613 arg_type = ie->arg_type;
3614 #if defined(DEBUG)
3615 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3616 #endif
3617 if (ie->do_ioctl) {
3618 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3619 }
3620
3621 switch(arg_type[0]) {
3622 case TYPE_NULL:
3623 /* no argument */
3624 ret = get_errno(ioctl(fd, ie->host_cmd));
3625 break;
3626 case TYPE_PTRVOID:
3627 case TYPE_INT:
3628 /* int argment */
3629 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3630 break;
3631 case TYPE_PTR:
3632 arg_type++;
3633 target_size = thunk_type_size(arg_type, 0);
3634 switch(ie->access) {
3635 case IOC_R:
3636 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3637 if (!is_error(ret)) {
3638 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3639 if (!argptr)
3640 return -TARGET_EFAULT;
3641 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3642 unlock_user(argptr, arg, target_size);
3643 }
3644 break;
3645 case IOC_W:
3646 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3647 if (!argptr)
3648 return -TARGET_EFAULT;
3649 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3650 unlock_user(argptr, arg, 0);
3651 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3652 break;
3653 default:
3654 case IOC_RW:
3655 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3656 if (!argptr)
3657 return -TARGET_EFAULT;
3658 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3659 unlock_user(argptr, arg, 0);
3660 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3661 if (!is_error(ret)) {
3662 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3663 if (!argptr)
3664 return -TARGET_EFAULT;
3665 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3666 unlock_user(argptr, arg, target_size);
3667 }
3668 break;
3669 }
3670 break;
3671 default:
3672 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3673 (long)cmd, arg_type[0]);
3674 ret = -TARGET_ENOSYS;
3675 break;
3676 }
3677 return ret;
3678 }
3679
3680 static const bitmask_transtbl iflag_tbl[] = {
3681 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3682 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3683 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3684 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3685 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3686 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3687 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3688 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3689 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3690 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3691 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3692 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3693 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3694 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3695 { 0, 0, 0, 0 }
3696 };
3697
3698 static const bitmask_transtbl oflag_tbl[] = {
3699 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3700 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3701 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3702 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3703 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3704 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3705 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3706 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3707 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3708 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3709 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3710 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3711 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3712 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3713 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3714 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3715 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3716 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3717 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3718 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3719 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3720 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3721 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3722 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3723 { 0, 0, 0, 0 }
3724 };
3725
3726 static const bitmask_transtbl cflag_tbl[] = {
3727 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3728 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3729 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3730 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3731 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3732 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3733 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3734 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3735 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3736 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3737 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3738 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3739 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3740 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3741 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3742 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3743 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3744 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3745 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3746 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3747 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3748 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3749 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3750 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3751 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3752 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3753 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3754 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3755 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3756 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3757 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3758 { 0, 0, 0, 0 }
3759 };
3760
3761 static const bitmask_transtbl lflag_tbl[] = {
3762 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3763 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3764 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3765 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3766 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3767 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3768 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3769 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3770 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3771 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3772 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3773 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3774 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3775 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3776 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3777 { 0, 0, 0, 0 }
3778 };
3779
3780 static void target_to_host_termios (void *dst, const void *src)
3781 {
3782 struct host_termios *host = dst;
3783 const struct target_termios *target = src;
3784
3785 host->c_iflag =
3786 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3787 host->c_oflag =
3788 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3789 host->c_cflag =
3790 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3791 host->c_lflag =
3792 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3793 host->c_line = target->c_line;
3794
3795 memset(host->c_cc, 0, sizeof(host->c_cc));
3796 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3797 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3798 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3799 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3800 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3801 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3802 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3803 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3804 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3805 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3806 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3807 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3808 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3809 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3810 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3811 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3812 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3813 }
3814
3815 static void host_to_target_termios (void *dst, const void *src)
3816 {
3817 struct target_termios *target = dst;
3818 const struct host_termios *host = src;
3819
3820 target->c_iflag =
3821 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3822 target->c_oflag =
3823 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3824 target->c_cflag =
3825 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3826 target->c_lflag =
3827 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3828 target->c_line = host->c_line;
3829
3830 memset(target->c_cc, 0, sizeof(target->c_cc));
3831 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3832 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3833 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3834 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3835 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3836 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3837 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3838 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3839 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3840 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3841 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3842 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3843 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3844 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3845 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3846 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3847 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3848 }
3849
3850 static const StructEntry struct_termios_def = {
3851 .convert = { host_to_target_termios, target_to_host_termios },
3852 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3853 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3854 };
3855
3856 static bitmask_transtbl mmap_flags_tbl[] = {
3857 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3858 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3859 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3860 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3861 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3862 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3863 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3864 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3865 { 0, 0, 0, 0 }
3866 };
3867
3868 #if defined(TARGET_I386)
3869
3870 /* NOTE: there is really one LDT for all the threads */
3871 static uint8_t *ldt_table;
3872
3873 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3874 {
3875 int size;
3876 void *p;
3877
3878 if (!ldt_table)
3879 return 0;
3880 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3881 if (size > bytecount)
3882 size = bytecount;
3883 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3884 if (!p)
3885 return -TARGET_EFAULT;
3886 /* ??? Should this by byteswapped? */
3887 memcpy(p, ldt_table, size);
3888 unlock_user(p, ptr, size);
3889 return size;
3890 }
3891
3892 /* XXX: add locking support */
3893 static abi_long write_ldt(CPUX86State *env,
3894 abi_ulong ptr, unsigned long bytecount, int oldmode)
3895 {
3896 struct target_modify_ldt_ldt_s ldt_info;
3897 struct target_modify_ldt_ldt_s *target_ldt_info;
3898 int seg_32bit, contents, read_exec_only, limit_in_pages;
3899 int seg_not_present, useable, lm;
3900 uint32_t *lp, entry_1, entry_2;
3901
3902 if (bytecount != sizeof(ldt_info))
3903 return -TARGET_EINVAL;
3904 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3905 return -TARGET_EFAULT;
3906 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3907 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3908 ldt_info.limit = tswap32(target_ldt_info->limit);
3909 ldt_info.flags = tswap32(target_ldt_info->flags);
3910 unlock_user_struct(target_ldt_info, ptr, 0);
3911
3912 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3913 return -TARGET_EINVAL;
3914 seg_32bit = ldt_info.flags & 1;
3915 contents = (ldt_info.flags >> 1) & 3;
3916 read_exec_only = (ldt_info.flags >> 3) & 1;
3917 limit_in_pages = (ldt_info.flags >> 4) & 1;
3918 seg_not_present = (ldt_info.flags >> 5) & 1;
3919 useable = (ldt_info.flags >> 6) & 1;
3920 #ifdef TARGET_ABI32
3921 lm = 0;
3922 #else
3923 lm = (ldt_info.flags >> 7) & 1;
3924 #endif
3925 if (contents == 3) {
3926 if (oldmode)
3927 return -TARGET_EINVAL;
3928 if (seg_not_present == 0)
3929 return -TARGET_EINVAL;
3930 }
3931 /* allocate the LDT */
3932 if (!ldt_table) {
3933 env->ldt.base = target_mmap(0,
3934 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3935 PROT_READ|PROT_WRITE,
3936 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3937 if (env->ldt.base == -1)
3938 return -TARGET_ENOMEM;
3939 memset(g2h(env->ldt.base), 0,
3940 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3941 env->ldt.limit = 0xffff;
3942 ldt_table = g2h(env->ldt.base);
3943 }
3944
3945 /* NOTE: same code as Linux kernel */
3946 /* Allow LDTs to be cleared by the user. */
3947 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3948 if (oldmode ||
3949 (contents == 0 &&
3950 read_exec_only == 1 &&
3951 seg_32bit == 0 &&
3952 limit_in_pages == 0 &&
3953 seg_not_present == 1 &&
3954 useable == 0 )) {
3955 entry_1 = 0;
3956 entry_2 = 0;
3957 goto install;
3958 }
3959 }
3960
3961 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3962 (ldt_info.limit & 0x0ffff);
3963 entry_2 = (ldt_info.base_addr & 0xff000000) |
3964 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3965 (ldt_info.limit & 0xf0000) |
3966 ((read_exec_only ^ 1) << 9) |
3967 (contents << 10) |
3968 ((seg_not_present ^ 1) << 15) |
3969 (seg_32bit << 22) |
3970 (limit_in_pages << 23) |
3971 (lm << 21) |
3972 0x7000;
3973 if (!oldmode)
3974 entry_2 |= (useable << 20);
3975
3976 /* Install the new entry ... */
3977 install:
3978 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3979 lp[0] = tswap32(entry_1);
3980 lp[1] = tswap32(entry_2);
3981 return 0;
3982 }
3983
3984 /* specific and weird i386 syscalls */
3985 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3986 unsigned long bytecount)
3987 {
3988 abi_long ret;
3989
3990 switch (func) {
3991 case 0:
3992 ret = read_ldt(ptr, bytecount);
3993 break;
3994 case 1:
3995 ret = write_ldt(env, ptr, bytecount, 1);
3996 break;
3997 case 0x11:
3998 ret = write_ldt(env, ptr, bytecount, 0);
3999 break;
4000 default:
4001 ret = -TARGET_ENOSYS;
4002 break;
4003 }
4004 return ret;
4005 }
4006
4007 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4008 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4009 {
4010 uint64_t *gdt_table = g2h(env->gdt.base);
4011 struct target_modify_ldt_ldt_s ldt_info;
4012 struct target_modify_ldt_ldt_s *target_ldt_info;
4013 int seg_32bit, contents, read_exec_only, limit_in_pages;
4014 int seg_not_present, useable, lm;
4015 uint32_t *lp, entry_1, entry_2;
4016 int i;
4017
4018 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4019 if (!target_ldt_info)
4020 return -TARGET_EFAULT;
4021 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4022 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4023 ldt_info.limit = tswap32(target_ldt_info->limit);
4024 ldt_info.flags = tswap32(target_ldt_info->flags);
4025 if (ldt_info.entry_number == -1) {
4026 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4027 if (gdt_table[i] == 0) {
4028 ldt_info.entry_number = i;
4029 target_ldt_info->entry_number = tswap32(i);
4030 break;
4031 }
4032 }
4033 }
4034 unlock_user_struct(target_ldt_info, ptr, 1);
4035
4036 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4037 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4038 return -TARGET_EINVAL;
4039 seg_32bit = ldt_info.flags & 1;
4040 contents = (ldt_info.flags >> 1) & 3;
4041 read_exec_only = (ldt_info.flags >> 3) & 1;
4042 limit_in_pages = (ldt_info.flags >> 4) & 1;
4043 seg_not_present = (ldt_info.flags >> 5) & 1;
4044 useable = (ldt_info.flags >> 6) & 1;
4045 #ifdef TARGET_ABI32
4046 lm = 0;
4047 #else
4048 lm = (ldt_info.flags >> 7) & 1;
4049 #endif
4050
4051 if (contents == 3) {
4052 if (seg_not_present == 0)
4053 return -TARGET_EINVAL;
4054 }
4055
4056 /* NOTE: same code as Linux kernel */
4057 /* Allow LDTs to be cleared by the user. */
4058 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4059 if ((contents == 0 &&
4060 read_exec_only == 1 &&
4061 seg_32bit == 0 &&
4062 limit_in_pages == 0 &&
4063 seg_not_present == 1 &&
4064 useable == 0 )) {
4065 entry_1 = 0;
4066 entry_2 = 0;
4067 goto install;
4068 }
4069 }
4070
4071 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4072 (ldt_info.limit & 0x0ffff);
4073 entry_2 = (ldt_info.base_addr & 0xff000000) |
4074 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4075 (ldt_info.limit & 0xf0000) |
4076 ((read_exec_only ^ 1) << 9) |
4077 (contents << 10) |
4078 ((seg_not_present ^ 1) << 15) |
4079 (seg_32bit << 22) |
4080 (limit_in_pages << 23) |
4081 (useable << 20) |
4082 (lm << 21) |
4083 0x7000;
4084
4085 /* Install the new entry ... */
4086 install:
4087 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4088 lp[0] = tswap32(entry_1);
4089 lp[1] = tswap32(entry_2);
4090 return 0;
4091 }
4092
4093 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4094 {
4095 struct target_modify_ldt_ldt_s *target_ldt_info;
4096 uint64_t *gdt_table = g2h(env->gdt.base);
4097 uint32_t base_addr, limit, flags;
4098 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4099 int seg_not_present, useable, lm;
4100 uint32_t *lp, entry_1, entry_2;
4101
4102 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4103 if (!target_ldt_info)
4104 return -TARGET_EFAULT;
4105 idx = tswap32(target_ldt_info->entry_number);
4106 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4107 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4108 unlock_user_struct(target_ldt_info, ptr, 1);
4109 return -TARGET_EINVAL;
4110 }
4111 lp = (uint32_t *)(gdt_table + idx);
4112 entry_1 = tswap32(lp[0]);
4113 entry_2 = tswap32(lp[1]);
4114
4115 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4116 contents = (entry_2 >> 10) & 3;
4117 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4118 seg_32bit = (entry_2 >> 22) & 1;
4119 limit_in_pages = (entry_2 >> 23) & 1;
4120 useable = (entry_2 >> 20) & 1;
4121 #ifdef TARGET_ABI32
4122 lm = 0;
4123 #else
4124 lm = (entry_2 >> 21) & 1;
4125 #endif
4126 flags = (seg_32bit << 0) | (contents << 1) |
4127 (read_exec_only << 3) | (limit_in_pages << 4) |
4128 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4129 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4130 base_addr = (entry_1 >> 16) |
4131 (entry_2 & 0xff000000) |
4132 ((entry_2 & 0xff) << 16);
4133 target_ldt_info->base_addr = tswapal(base_addr);
4134 target_ldt_info->limit = tswap32(limit);
4135 target_ldt_info->flags = tswap32(flags);
4136 unlock_user_struct(target_ldt_info, ptr, 1);
4137 return 0;
4138 }
4139 #endif /* TARGET_I386 && TARGET_ABI32 */
4140
4141 #ifndef TARGET_ABI32
4142 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4143 {
4144 abi_long ret = 0;
4145 abi_ulong val;
4146 int idx;
4147
4148 switch(code) {
4149 case TARGET_ARCH_SET_GS:
4150 case TARGET_ARCH_SET_FS:
4151 if (code == TARGET_ARCH_SET_GS)
4152 idx = R_GS;
4153 else
4154 idx = R_FS;
4155 cpu_x86_load_seg(env, idx, 0);
4156 env->segs[idx].base = addr;
4157 break;
4158 case TARGET_ARCH_GET_GS:
4159 case TARGET_ARCH_GET_FS:
4160 if (code == TARGET_ARCH_GET_GS)
4161 idx = R_GS;
4162 else
4163 idx = R_FS;
4164 val = env->segs[idx].base;
4165 if (put_user(val, addr, abi_ulong))
4166 ret = -TARGET_EFAULT;
4167 break;
4168 default:
4169 ret = -TARGET_EINVAL;
4170 break;
4171 }
4172 return ret;
4173 }
4174 #endif
4175
4176 #endif /* defined(TARGET_I386) */
4177
4178 #define NEW_STACK_SIZE 0x40000
4179
4180 #if defined(CONFIG_USE_NPTL)
4181
4182 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4183 typedef struct {
4184 CPUArchState *env;
4185 pthread_mutex_t mutex;
4186 pthread_cond_t cond;
4187 pthread_t thread;
4188 uint32_t tid;
4189 abi_ulong child_tidptr;
4190 abi_ulong parent_tidptr;
4191 sigset_t sigmask;
4192 } new_thread_info;
4193
4194 static void *clone_func(void *arg)
4195 {
4196 new_thread_info *info = arg;
4197 CPUArchState *env;
4198 TaskState *ts;
4199
4200 env = info->env;
4201 thread_env = env;
4202 ts = (TaskState *)thread_env->opaque;
4203 info->tid = gettid();
4204 env->host_tid = info->tid;
4205 task_settid(ts);
4206 if (info->child_tidptr)
4207 put_user_u32(info->tid, info->child_tidptr);
4208 if (info->parent_tidptr)
4209 put_user_u32(info->tid, info->parent_tidptr);
4210 /* Enable signals. */
4211 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4212 /* Signal to the parent that we're ready. */
4213 pthread_mutex_lock(&info->mutex);
4214 pthread_cond_broadcast(&info->cond);
4215 pthread_mutex_unlock(&info->mutex);
4216 /* Wait until the parent has finshed initializing the tls state. */
4217 pthread_mutex_lock(&clone_lock);
4218 pthread_mutex_unlock(&clone_lock);
4219 cpu_loop(env);
4220 /* never exits */
4221 return NULL;
4222 }
4223 #else
4224
4225 static int clone_func(void *arg)
4226 {
4227 CPUArchState *env = arg;
4228 cpu_loop(env);
4229 /* never exits */
4230 return 0;
4231 }
4232 #endif
4233
4234 /* do_fork() Must return host values and target errnos (unlike most
4235 do_*() functions). */
4236 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4237 abi_ulong parent_tidptr, target_ulong newtls,
4238 abi_ulong child_tidptr)
4239 {
4240 int ret;
4241 TaskState *ts;
4242 CPUArchState *new_env;
4243 #if defined(CONFIG_USE_NPTL)
4244 unsigned int nptl_flags;
4245 sigset_t sigmask;
4246 #else
4247 uint8_t *new_stack;
4248 #endif
4249
4250 /* Emulate vfork() with fork() */
4251 if (flags & CLONE_VFORK)
4252 flags &= ~(CLONE_VFORK | CLONE_VM);
4253
4254 if (flags & CLONE_VM) {
4255 TaskState *parent_ts = (TaskState *)env->opaque;
4256 #if defined(CONFIG_USE_NPTL)
4257 new_thread_info info;
4258 pthread_attr_t attr;
4259 #endif
4260 ts = g_malloc0(sizeof(TaskState));
4261 init_task_state(ts);
4262 /* we create a new CPU instance. */
4263 new_env = cpu_copy(env);
4264 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4265 cpu_state_reset(new_env);
4266 #endif
4267 /* Init regs that differ from the parent. */
4268 cpu_clone_regs(new_env, newsp);
4269 new_env->opaque = ts;
4270 ts->bprm = parent_ts->bprm;
4271 ts->info = parent_ts->info;
4272 #if defined(CONFIG_USE_NPTL)
4273 nptl_flags = flags;
4274 flags &= ~CLONE_NPTL_FLAGS2;
4275
4276 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4277 ts->child_tidptr = child_tidptr;
4278 }
4279
4280 if (nptl_flags & CLONE_SETTLS)
4281 cpu_set_tls (new_env, newtls);
4282
4283 /* Grab a mutex so that thread setup appears atomic. */
4284 pthread_mutex_lock(&clone_lock);
4285
4286 memset(&info, 0, sizeof(info));
4287 pthread_mutex_init(&info.mutex, NULL);
4288 pthread_mutex_lock(&info.mutex);
4289 pthread_cond_init(&info.cond, NULL);
4290 info.env = new_env;
4291 if (nptl_flags & CLONE_CHILD_SETTID)
4292 info.child_tidptr = child_tidptr;
4293 if (nptl_flags & CLONE_PARENT_SETTID)
4294 info.parent_tidptr = parent_tidptr;
4295
4296 ret = pthread_attr_init(&attr);
4297 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4298 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4299 /* It is not safe to deliver signals until the child has finished
4300 initializing, so temporarily block all signals. */
4301 sigfillset(&sigmask);
4302 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4303
4304 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4305 /* TODO: Free new CPU state if thread creation failed. */
4306
4307 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4308 pthread_attr_destroy(&attr);
4309 if (ret == 0) {
4310 /* Wait for the child to initialize. */
4311 pthread_cond_wait(&info.cond, &info.mutex);
4312 ret = info.tid;
4313 if (flags & CLONE_PARENT_SETTID)
4314 put_user_u32(ret, parent_tidptr);
4315 } else {
4316 ret = -1;
4317 }
4318 pthread_mutex_unlock(&info.mutex);
4319 pthread_cond_destroy(&info.cond);
4320 pthread_mutex_destroy(&info.mutex);
4321 pthread_mutex_unlock(&clone_lock);
4322 #else
4323 if (flags & CLONE_NPTL_FLAGS2)
4324 return -EINVAL;
4325 /* This is probably going to die very quickly, but do it anyway. */
4326 new_stack = g_malloc0 (NEW_STACK_SIZE);
4327 #ifdef __ia64__
4328 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4329 #else
4330 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4331 #endif
4332 #endif
4333 } else {
4334 /* if no CLONE_VM, we consider it is a fork */
4335 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4336 return -EINVAL;
4337 fork_start();
4338 ret = fork();
4339 if (ret == 0) {
4340 /* Child Process. */
4341 cpu_clone_regs(env, newsp);
4342 fork_end(1);
4343 #if defined(CONFIG_USE_NPTL)
4344 /* There is a race condition here. The parent process could
4345 theoretically read the TID in the child process before the child
4346 tid is set. This would require using either ptrace
4347 (not implemented) or having *_tidptr to point at a shared memory
4348 mapping. We can't repeat the spinlock hack used above because
4349 the child process gets its own copy of the lock. */
4350 if (flags & CLONE_CHILD_SETTID)
4351 put_user_u32(gettid(), child_tidptr);
4352 if (flags & CLONE_PARENT_SETTID)
4353 put_user_u32(gettid(), parent_tidptr);
4354 ts = (TaskState *)env->opaque;
4355 if (flags & CLONE_SETTLS)
4356 cpu_set_tls (env, newtls);
4357 if (flags & CLONE_CHILD_CLEARTID)
4358 ts->child_tidptr = child_tidptr;
4359 #endif
4360 } else {
4361 fork_end(0);
4362 }
4363 }
4364 return ret;
4365 }
4366
4367 /* warning : doesn't handle linux specific flags... */
4368 static int target_to_host_fcntl_cmd(int cmd)
4369 {
4370 switch(cmd) {
4371 case TARGET_F_DUPFD:
4372 case TARGET_F_GETFD:
4373 case TARGET_F_SETFD:
4374 case TARGET_F_GETFL:
4375 case TARGET_F_SETFL:
4376 return cmd;
4377 case TARGET_F_GETLK:
4378 return F_GETLK;
4379 case TARGET_F_SETLK:
4380 return F_SETLK;
4381 case TARGET_F_SETLKW:
4382 return F_SETLKW;
4383 case TARGET_F_GETOWN:
4384 return F_GETOWN;
4385 case TARGET_F_SETOWN:
4386 return F_SETOWN;
4387 case TARGET_F_GETSIG:
4388 return F_GETSIG;
4389 case TARGET_F_SETSIG:
4390 return F_SETSIG;
4391 #if TARGET_ABI_BITS == 32
4392 case TARGET_F_GETLK64:
4393 return F_GETLK64;
4394 case TARGET_F_SETLK64:
4395 return F_SETLK64;
4396 case TARGET_F_SETLKW64:
4397 return F_SETLKW64;
4398 #endif
4399 case TARGET_F_SETLEASE:
4400 return F_SETLEASE;
4401 case TARGET_F_GETLEASE:
4402 return F_GETLEASE;
4403 #ifdef F_DUPFD_CLOEXEC
4404 case TARGET_F_DUPFD_CLOEXEC:
4405 return F_DUPFD_CLOEXEC;
4406 #endif
4407 case TARGET_F_NOTIFY:
4408 return F_NOTIFY;
4409 default:
4410 return -TARGET_EINVAL;
4411 }
4412 return -TARGET_EINVAL;
4413 }
4414
4415 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4416 {
4417 struct flock fl;
4418 struct target_flock *target_fl;
4419 struct flock64 fl64;
4420 struct target_flock64 *target_fl64;
4421 abi_long ret;
4422 int host_cmd = target_to_host_fcntl_cmd(cmd);
4423
4424 if (host_cmd == -TARGET_EINVAL)
4425 return host_cmd;
4426
4427 switch(cmd) {
4428 case TARGET_F_GETLK:
4429 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4430 return -TARGET_EFAULT;
4431 fl.l_type = tswap16(target_fl->l_type);
4432 fl.l_whence = tswap16(target_fl->l_whence);
4433 fl.l_start = tswapal(target_fl->l_start);
4434 fl.l_len = tswapal(target_fl->l_len);
4435 fl.l_pid = tswap32(target_fl->l_pid);
4436 unlock_user_struct(target_fl, arg, 0);
4437 ret = get_errno(fcntl(fd, host_cmd, &fl));
4438 if (ret == 0) {
4439 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4440 return -TARGET_EFAULT;
4441 target_fl->l_type = tswap16(fl.l_type);
4442 target_fl->l_whence = tswap16(fl.l_whence);
4443 target_fl->l_start = tswapal(fl.l_start);
4444 target_fl->l_len = tswapal(fl.l_len);
4445 target_fl->l_pid = tswap32(fl.l_pid);
4446 unlock_user_struct(target_fl, arg, 1);
4447 }
4448 break;
4449
4450 case TARGET_F_SETLK:
4451 case TARGET_F_SETLKW:
4452 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4453 return -TARGET_EFAULT;
4454 fl.l_type = tswap16(target_fl->l_type);
4455 fl.l_whence = tswap16(target_fl->l_whence);
4456 fl.l_start = tswapal(target_fl->l_start);
4457 fl.l_len = tswapal(target_fl->l_len);
4458 fl.l_pid = tswap32(target_fl->l_pid);
4459 unlock_user_struct(target_fl, arg, 0);
4460 ret = get_errno(fcntl(fd, host_cmd, &fl));
4461 break;
4462
4463 case TARGET_F_GETLK64:
4464 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4465 return -TARGET_EFAULT;
4466 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4467 fl64.l_whence = tswap16(target_fl64->l_whence);
4468 fl64.l_start = tswap64(target_fl64->l_start);
4469 fl64.l_len = tswap64(target_fl64->l_len);
4470 fl64.l_pid = tswap32(target_fl64->l_pid);
4471 unlock_user_struct(target_fl64, arg, 0);
4472 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4473 if (ret == 0) {
4474 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4475 return -TARGET_EFAULT;
4476 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4477 target_fl64->l_whence = tswap16(fl64.l_whence);
4478 target_fl64->l_start = tswap64(fl64.l_start);
4479 target_fl64->l_len = tswap64(fl64.l_len);
4480 target_fl64->l_pid = tswap32(fl64.l_pid);
4481 unlock_user_struct(target_fl64, arg, 1);
4482 }
4483 break;
4484 case TARGET_F_SETLK64:
4485 case TARGET_F_SETLKW64:
4486 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4487 return -TARGET_EFAULT;
4488 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4489 fl64.l_whence = tswap16(target_fl64->l_whence);
4490 fl64.l_start = tswap64(target_fl64->l_start);
4491 fl64.l_len = tswap64(target_fl64->l_len);
4492 fl64.l_pid = tswap32(target_fl64->l_pid);
4493 unlock_user_struct(target_fl64, arg, 0);
4494 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4495 break;
4496
4497 case TARGET_F_GETFL:
4498 ret = get_errno(fcntl(fd, host_cmd, arg));
4499 if (ret >= 0) {
4500 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4501 }
4502 break;
4503
4504 case TARGET_F_SETFL:
4505 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4506 break;
4507
4508 case TARGET_F_SETOWN:
4509 case TARGET_F_GETOWN:
4510 case TARGET_F_SETSIG:
4511 case TARGET_F_GETSIG:
4512 case TARGET_F_SETLEASE:
4513 case TARGET_F_GETLEASE:
4514 ret = get_errno(fcntl(fd, host_cmd, arg));
4515 break;
4516
4517 default:
4518 ret = get_errno(fcntl(fd, cmd, arg));
4519 break;
4520 }
4521 return ret;
4522 }
4523
4524 #ifdef USE_UID16
4525
4526 static inline int high2lowuid(int uid)
4527 {
4528 if (uid > 65535)
4529 return 65534;
4530 else
4531 return uid;
4532 }
4533
4534 static inline int high2lowgid(int gid)
4535 {
4536 if (gid > 65535)
4537 return 65534;
4538 else
4539 return gid;
4540 }
4541
4542 static inline int low2highuid(int uid)
4543 {
4544 if ((int16_t)uid == -1)
4545 return -1;
4546 else
4547 return uid;
4548 }
4549
4550 static inline int low2highgid(int gid)
4551 {
4552 if ((int16_t)gid == -1)
4553 return -1;
4554 else
4555 return gid;
4556 }
4557 static inline int tswapid(int id)
4558 {
4559 return tswap16(id);
4560 }
4561 #else /* !USE_UID16 */
4562 static inline int high2lowuid(int uid)
4563 {
4564 return uid;
4565 }
4566 static inline int high2lowgid(int gid)
4567 {
4568 return gid;
4569 }
4570 static inline int low2highuid(int uid)
4571 {
4572 return uid;
4573 }
4574 static inline int low2highgid(int gid)
4575 {
4576 return gid;
4577 }
4578 static inline int tswapid(int id)
4579 {
4580 return tswap32(id);
4581 }
4582 #endif /* USE_UID16 */
4583
4584 void syscall_init(void)
4585 {
4586 IOCTLEntry *ie;
4587 const argtype *arg_type;
4588 int size;
4589 int i;
4590
4591 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4592 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4593 #include "syscall_types.h"
4594 #undef STRUCT
4595 #undef STRUCT_SPECIAL
4596
4597 /* we patch the ioctl size if necessary. We rely on the fact that
4598 no ioctl has all the bits at '1' in the size field */
4599 ie = ioctl_entries;
4600 while (ie->target_cmd != 0) {
4601 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4602 TARGET_IOC_SIZEMASK) {
4603 arg_type = ie->arg_type;
4604 if (arg_type[0] != TYPE_PTR) {
4605 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4606 ie->target_cmd);
4607 exit(1);
4608 }
4609 arg_type++;
4610 size = thunk_type_size(arg_type, 0);
4611 ie->target_cmd = (ie->target_cmd &
4612 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4613 (size << TARGET_IOC_SIZESHIFT);
4614 }
4615
4616 /* Build target_to_host_errno_table[] table from
4617 * host_to_target_errno_table[]. */
4618 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4619 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4620
4621 /* automatic consistency check if same arch */
4622 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4623 (defined(__x86_64__) && defined(TARGET_X86_64))
4624 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4625 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4626 ie->name, ie->target_cmd, ie->host_cmd);
4627 }
4628 #endif
4629 ie++;
4630 }
4631 }
4632
4633 #if TARGET_ABI_BITS == 32
4634 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4635 {
4636 #ifdef TARGET_WORDS_BIGENDIAN
4637 return ((uint64_t)word0 << 32) | word1;
4638 #else
4639 return ((uint64_t)word1 << 32) | word0;
4640 #endif
4641 }
4642 #else /* TARGET_ABI_BITS == 32 */
4643 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4644 {
4645 return word0;
4646 }
4647 #endif /* TARGET_ABI_BITS != 32 */
4648
4649 #ifdef TARGET_NR_truncate64
4650 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4651 abi_long arg2,
4652 abi_long arg3,
4653 abi_long arg4)
4654 {
4655 if (regpairs_aligned(cpu_env)) {
4656 arg2 = arg3;
4657 arg3 = arg4;
4658 }
4659 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4660 }
4661 #endif
4662
4663 #ifdef TARGET_NR_ftruncate64
4664 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4665 abi_long arg2,
4666 abi_long arg3,
4667 abi_long arg4)
4668 {
4669 if (regpairs_aligned(cpu_env)) {
4670 arg2 = arg3;
4671 arg3 = arg4;
4672 }
4673 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4674 }
4675 #endif
4676
4677 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4678 abi_ulong target_addr)
4679 {
4680 struct target_timespec *target_ts;
4681
4682 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4683 return -TARGET_EFAULT;
4684 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4685 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4686 unlock_user_struct(target_ts, target_addr, 0);
4687 return 0;
4688 }
4689
4690 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4691 struct timespec *host_ts)
4692 {
4693 struct target_timespec *target_ts;
4694
4695 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4696 return -TARGET_EFAULT;
4697 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4698 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4699 unlock_user_struct(target_ts, target_addr, 1);
4700 return 0;
4701 }
4702
4703 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4704 static inline abi_long host_to_target_stat64(void *cpu_env,
4705 abi_ulong target_addr,
4706 struct stat *host_st)
4707 {
4708 #ifdef TARGET_ARM
4709 if (((CPUARMState *)cpu_env)->eabi) {
4710 struct target_eabi_stat64 *target_st;
4711
4712 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4713 return -TARGET_EFAULT;
4714 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4715 __put_user(host_st->st_dev, &target_st->st_dev);
4716 __put_user(host_st->st_ino, &target_st->st_ino);
4717 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4718 __put_user(host_st->st_ino, &target_st->__st_ino);
4719 #endif
4720 __put_user(host_st->st_mode, &target_st->st_mode);
4721 __put_user(host_st->st_nlink, &target_st->st_nlink);
4722 __put_user(host_st->st_uid, &target_st->st_uid);
4723 __put_user(host_st->st_gid, &target_st->st_gid);
4724 __put_user(host_st->st_rdev, &target_st->st_rdev);
4725 __put_user(host_st->st_size, &target_st->st_size);
4726 __put_user(host_st->st_blksize, &target_st->st_blksize);
4727 __put_user(host_st->st_blocks, &target_st->st_blocks);
4728 __put_user(host_st->st_atime, &target_st->target_st_atime);
4729 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4730 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4731 unlock_user_struct(target_st, target_addr, 1);
4732 } else
4733 #endif
4734 {
4735 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4736 struct target_stat *target_st;
4737 #else
4738 struct target_stat64 *target_st;
4739 #endif
4740
4741 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4742 return -TARGET_EFAULT;
4743 memset(target_st, 0, sizeof(*target_st));
4744 __put_user(host_st->st_dev, &target_st->st_dev);
4745 __put_user(host_st->st_ino, &target_st->st_ino);
4746 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4747 __put_user(host_st->st_ino, &target_st->__st_ino);
4748 #endif
4749 __put_user(host_st->st_mode, &target_st->st_mode);
4750 __put_user(host_st->st_nlink, &target_st->st_nlink);
4751 __put_user(host_st->st_uid, &target_st->st_uid);
4752 __put_user(host_st->st_gid, &target_st->st_gid);
4753 __put_user(host_st->st_rdev, &target_st->st_rdev);
4754 /* XXX: better use of kernel struct */
4755 __put_user(host_st->st_size, &target_st->st_size);
4756 __put_user(host_st->st_blksize, &target_st->st_blksize);
4757 __put_user(host_st->st_blocks, &target_st->st_blocks);
4758 __put_user(host_st->st_atime, &target_st->target_st_atime);
4759 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4760 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4761 unlock_user_struct(target_st, target_addr, 1);
4762 }
4763
4764 return 0;
4765 }
4766 #endif
4767
4768 #if defined(CONFIG_USE_NPTL)
4769 /* ??? Using host futex calls even when target atomic operations
4770 are not really atomic probably breaks things. However implementing
4771 futexes locally would make futexes shared between multiple processes
4772 tricky. However they're probably useless because guest atomic
4773 operations won't work either. */
4774 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4775 target_ulong uaddr2, int val3)
4776 {
4777 struct timespec ts, *pts;
4778 int base_op;
4779
4780 /* ??? We assume FUTEX_* constants are the same on both host
4781 and target. */
4782 #ifdef FUTEX_CMD_MASK
4783 base_op = op & FUTEX_CMD_MASK;
4784 #else
4785 base_op = op;
4786 #endif
4787 switch (base_op) {
4788 case FUTEX_WAIT:
4789 if (timeout) {
4790 pts = &ts;
4791 target_to_host_timespec(pts, timeout);
4792 } else {
4793 pts = NULL;
4794 }
4795 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4796 pts, NULL, 0));
4797 case FUTEX_WAKE:
4798 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4799 case FUTEX_FD:
4800 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4801 case FUTEX_REQUEUE:
4802 case FUTEX_CMP_REQUEUE:
4803 case FUTEX_WAKE_OP:
4804 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4805 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4806 But the prototype takes a `struct timespec *'; insert casts
4807 to satisfy the compiler. We do not need to tswap TIMEOUT
4808 since it's not compared to guest memory. */
4809 pts = (struct timespec *)(uintptr_t) timeout;
4810 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4811 g2h(uaddr2),
4812 (base_op == FUTEX_CMP_REQUEUE
4813 ? tswap32(val3)
4814 : val3)));
4815 default:
4816 return -TARGET_ENOSYS;
4817 }
4818 }
4819 #endif
4820
4821 /* Map host to target signal numbers for the wait family of syscalls.
4822 Assume all other status bits are the same. */
4823 static int host_to_target_waitstatus(int status)
4824 {
4825 if (WIFSIGNALED(status)) {
4826 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4827 }
4828 if (WIFSTOPPED(status)) {
4829 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4830 | (status & 0xff);
4831 }
4832 return status;
4833 }
4834
4835 int get_osversion(void)
4836 {
4837 static int osversion;
4838 struct new_utsname buf;
4839 const char *s;
4840 int i, n, tmp;
4841 if (osversion)
4842 return osversion;
4843 if (qemu_uname_release && *qemu_uname_release) {
4844 s = qemu_uname_release;
4845 } else {
4846 if (sys_uname(&buf))
4847 return 0;
4848 s = buf.release;
4849 }
4850 tmp = 0;
4851 for (i = 0; i < 3; i++) {
4852 n = 0;
4853 while (*s >= '0' && *s <= '9') {
4854 n *= 10;
4855 n += *s - '0';
4856 s++;
4857 }
4858 tmp = (tmp << 8) + n;
4859 if (*s == '.')
4860 s++;
4861 }
4862 osversion = tmp;
4863 return osversion;
4864 }
4865
4866
4867 static int open_self_maps(void *cpu_env, int fd)
4868 {
4869 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4870
4871 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4872 (unsigned long long)ts->info->stack_limit,
4873 (unsigned long long)(ts->stack_base + (TARGET_PAGE_SIZE - 1))
4874 & TARGET_PAGE_MASK,
4875 (unsigned long long)ts->stack_base);
4876
4877 return 0;
4878 }
4879
4880 static int open_self_stat(void *cpu_env, int fd)
4881 {
4882 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4883 abi_ulong start_stack = ts->info->start_stack;
4884 int i;
4885
4886 for (i = 0; i < 44; i++) {
4887 char buf[128];
4888 int len;
4889 uint64_t val = 0;
4890
4891 if (i == 0) {
4892 /* pid */
4893 val = getpid();
4894 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4895 } else if (i == 1) {
4896 /* app name */
4897 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
4898 } else if (i == 27) {
4899 /* stack bottom */
4900 val = start_stack;
4901 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4902 } else {
4903 /* for the rest, there is MasterCard */
4904 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
4905 }
4906
4907 len = strlen(buf);
4908 if (write(fd, buf, len) != len) {
4909 return -1;
4910 }
4911 }
4912
4913 return 0;
4914 }
4915
4916 static int open_self_auxv(void *cpu_env, int fd)
4917 {
4918 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4919 abi_ulong auxv = ts->info->saved_auxv;
4920 abi_ulong len = ts->info->auxv_len;
4921 char *ptr;
4922
4923 /*
4924 * Auxiliary vector is stored in target process stack.
4925 * read in whole auxv vector and copy it to file
4926 */
4927 ptr = lock_user(VERIFY_READ, auxv, len, 0);
4928 if (ptr != NULL) {
4929 while (len > 0) {
4930 ssize_t r;
4931 r = write(fd, ptr, len);
4932 if (r <= 0) {
4933 break;
4934 }
4935 len -= r;
4936 ptr += r;
4937 }
4938 lseek(fd, 0, SEEK_SET);
4939 unlock_user(ptr, auxv, len);
4940 }
4941
4942 return 0;
4943 }
4944
4945 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
4946 {
4947 struct fake_open {
4948 const char *filename;
4949 int (*fill)(void *cpu_env, int fd);
4950 };
4951 const struct fake_open *fake_open;
4952 static const struct fake_open fakes[] = {
4953 { "/proc/self/maps", open_self_maps },
4954 { "/proc/self/stat", open_self_stat },
4955 { "/proc/self/auxv", open_self_auxv },
4956 { NULL, NULL }
4957 };
4958
4959 for (fake_open = fakes; fake_open->filename; fake_open++) {
4960 if (!strncmp(pathname, fake_open->filename,
4961 strlen(fake_open->filename))) {
4962 break;
4963 }
4964 }
4965
4966 if (fake_open->filename) {
4967 const char *tmpdir;
4968 char filename[PATH_MAX];
4969 int fd, r;
4970
4971 /* create temporary file to map stat to */
4972 tmpdir = getenv("TMPDIR");
4973 if (!tmpdir)
4974 tmpdir = "/tmp";
4975 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
4976 fd = mkstemp(filename);
4977 if (fd < 0) {
4978 return fd;
4979 }
4980 unlink(filename);
4981
4982 if ((r = fake_open->fill(cpu_env, fd))) {
4983 close(fd);
4984 return r;
4985 }
4986 lseek(fd, 0, SEEK_SET);
4987
4988 return fd;
4989 }
4990
4991 return get_errno(open(path(pathname), flags, mode));
4992 }
4993
4994 /* do_syscall() should always have a single exit point at the end so
4995 that actions, such as logging of syscall results, can be performed.
4996 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4997 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4998 abi_long arg2, abi_long arg3, abi_long arg4,
4999 abi_long arg5, abi_long arg6, abi_long arg7,
5000 abi_long arg8)
5001 {
5002 abi_long ret;
5003 struct stat st;
5004 struct statfs stfs;
5005 void *p;
5006
5007 #ifdef DEBUG
5008 gemu_log("syscall %d", num);
5009 #endif
5010 if(do_strace)
5011 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5012
5013 switch(num) {
5014 case TARGET_NR_exit:
5015 #ifdef CONFIG_USE_NPTL
5016 /* In old applications this may be used to implement _exit(2).
5017 However in threaded applictions it is used for thread termination,
5018 and _exit_group is used for application termination.
5019 Do thread termination if we have more then one thread. */
5020 /* FIXME: This probably breaks if a signal arrives. We should probably
5021 be disabling signals. */
5022 if (first_cpu->next_cpu) {
5023 TaskState *ts;
5024 CPUArchState **lastp;
5025 CPUArchState *p;
5026
5027 cpu_list_lock();
5028 lastp = &first_cpu;
5029 p = first_cpu;
5030 while (p && p != (CPUArchState *)cpu_env) {
5031 lastp = &p->next_cpu;
5032 p = p->next_cpu;
5033 }
5034 /* If we didn't find the CPU for this thread then something is
5035 horribly wrong. */
5036 if (!p)
5037 abort();
5038 /* Remove the CPU from the list. */
5039 *lastp = p->next_cpu;
5040 cpu_list_unlock();
5041 ts = ((CPUArchState *)cpu_env)->opaque;
5042 if (ts->child_tidptr) {
5043 put_user_u32(0, ts->child_tidptr);
5044 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5045 NULL, NULL, 0);
5046 }
5047 thread_env = NULL;
5048 g_free(cpu_env);
5049 g_free(ts);
5050 pthread_exit(NULL);
5051 }
5052 #endif
5053 #ifdef TARGET_GPROF
5054 _mcleanup();
5055 #endif
5056 gdb_exit(cpu_env, arg1);
5057 _exit(arg1);
5058 ret = 0; /* avoid warning */
5059 break;
5060 case TARGET_NR_read:
5061 if (arg3 == 0)
5062 ret = 0;
5063 else {
5064 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5065 goto efault;
5066 ret = get_errno(read(arg1, p, arg3));
5067 unlock_user(p, arg2, ret);
5068 }
5069 break;
5070 case TARGET_NR_write:
5071 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5072 goto efault;
5073 ret = get_errno(write(arg1, p, arg3));
5074 unlock_user(p, arg2, 0);
5075 break;
5076 case TARGET_NR_open:
5077 if (!(p = lock_user_string(arg1)))
5078 goto efault;
5079 ret = get_errno(do_open(cpu_env, p,
5080 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5081 arg3));
5082 unlock_user(p, arg1, 0);
5083 break;
5084 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5085 case TARGET_NR_openat:
5086 if (!(p = lock_user_string(arg2)))
5087 goto efault;
5088 ret = get_errno(sys_openat(arg1,
5089 path(p),
5090 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5091 arg4));
5092 unlock_user(p, arg2, 0);
5093 break;
5094 #endif
5095 case TARGET_NR_close:
5096 ret = get_errno(close(arg1));
5097 break;
5098 case TARGET_NR_brk:
5099 ret = do_brk(arg1);
5100 break;
5101 case TARGET_NR_fork:
5102 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5103 break;
5104 #ifdef TARGET_NR_waitpid
5105 case TARGET_NR_waitpid:
5106 {
5107 int status;
5108 ret = get_errno(waitpid(arg1, &status, arg3));
5109 if (!is_error(ret) && arg2 && ret
5110 && put_user_s32(host_to_target_waitstatus(status), arg2))
5111 goto efault;
5112 }
5113 break;
5114 #endif
5115 #ifdef TARGET_NR_waitid
5116 case TARGET_NR_waitid:
5117 {
5118 siginfo_t info;
5119 info.si_pid = 0;
5120 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5121 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5122 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5123 goto efault;
5124 host_to_target_siginfo(p, &info);
5125 unlock_user(p, arg3, sizeof(target_siginfo_t));
5126 }
5127 }
5128 break;
5129 #endif
5130 #ifdef TARGET_NR_creat /* not on alpha */
5131 case TARGET_NR_creat:
5132 if (!(p = lock_user_string(arg1)))
5133 goto efault;
5134 ret = get_errno(creat(p, arg2));
5135 unlock_user(p, arg1, 0);
5136 break;
5137 #endif
5138 case TARGET_NR_link:
5139 {
5140 void * p2;
5141 p = lock_user_string(arg1);
5142 p2 = lock_user_string(arg2);
5143 if (!p || !p2)
5144 ret = -TARGET_EFAULT;
5145 else
5146 ret = get_errno(link(p, p2));
5147 unlock_user(p2, arg2, 0);
5148 unlock_user(p, arg1, 0);
5149 }
5150 break;
5151 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5152 case TARGET_NR_linkat:
5153 {
5154 void * p2 = NULL;
5155 if (!arg2 || !arg4)
5156 goto efault;
5157 p = lock_user_string(arg2);
5158 p2 = lock_user_string(arg4);
5159 if (!p || !p2)
5160 ret = -TARGET_EFAULT;
5161 else
5162 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
5163 unlock_user(p, arg2, 0);
5164 unlock_user(p2, arg4, 0);
5165 }
5166 break;
5167 #endif
5168 case TARGET_NR_unlink:
5169 if (!(p = lock_user_string(arg1)))
5170 goto efault;
5171 ret = get_errno(unlink(p));
5172 unlock_user(p, arg1, 0);
5173 break;
5174 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5175 case TARGET_NR_unlinkat:
5176 if (!(p = lock_user_string(arg2)))
5177 goto efault;
5178 ret = get_errno(sys_unlinkat(arg1, p, arg3));
5179 unlock_user(p, arg2, 0);
5180 break;
5181 #endif
5182 case TARGET_NR_execve:
5183 {
5184 char **argp, **envp;
5185 int argc, envc;
5186 abi_ulong gp;
5187 abi_ulong guest_argp;
5188 abi_ulong guest_envp;
5189 abi_ulong addr;
5190 char **q;
5191 int total_size = 0;
5192
5193 argc = 0;
5194 guest_argp = arg2;
5195 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5196 if (get_user_ual(addr, gp))
5197 goto efault;
5198 if (!addr)
5199 break;
5200 argc++;
5201 }
5202 envc = 0;
5203 guest_envp = arg3;
5204 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5205 if (get_user_ual(addr, gp))
5206 goto efault;
5207 if (!addr)
5208 break;
5209 envc++;
5210 }
5211
5212 argp = alloca((argc + 1) * sizeof(void *));
5213 envp = alloca((envc + 1) * sizeof(void *));
5214
5215 for (gp = guest_argp, q = argp; gp;
5216 gp += sizeof(abi_ulong), q++) {
5217 if (get_user_ual(addr, gp))
5218 goto execve_efault;
5219 if (!addr)
5220 break;
5221 if (!(*q = lock_user_string(addr)))
5222 goto execve_efault;
5223 total_size += strlen(*q) + 1;
5224 }
5225 *q = NULL;
5226
5227 for (gp = guest_envp, q = envp; gp;
5228 gp += sizeof(abi_ulong), q++) {
5229 if (get_user_ual(addr, gp))
5230 goto execve_efault;
5231 if (!addr)
5232 break;
5233 if (!(*q = lock_user_string(addr)))
5234 goto execve_efault;
5235 total_size += strlen(*q) + 1;
5236 }
5237 *q = NULL;
5238
5239 /* This case will not be caught by the host's execve() if its
5240 page size is bigger than the target's. */
5241 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5242 ret = -TARGET_E2BIG;
5243 goto execve_end;
5244 }
5245 if (!(p = lock_user_string(arg1)))
5246 goto execve_efault;
5247 ret = get_errno(execve(p, argp, envp));
5248 unlock_user(p, arg1, 0);
5249
5250 goto execve_end;
5251
5252 execve_efault:
5253 ret = -TARGET_EFAULT;
5254
5255 execve_end:
5256 for (gp = guest_argp, q = argp; *q;
5257 gp += sizeof(abi_ulong), q++) {
5258 if (get_user_ual(addr, gp)
5259 || !addr)
5260 break;
5261 unlock_user(*q, addr, 0);
5262 }
5263 for (gp = guest_envp, q = envp; *q;
5264 gp += sizeof(abi_ulong), q++) {
5265 if (get_user_ual(addr, gp)
5266 || !addr)
5267 break;
5268 unlock_user(*q, addr, 0);
5269 }
5270 }
5271 break;
5272 case TARGET_NR_chdir:
5273 if (!(p = lock_user_string(arg1)))
5274 goto efault;
5275 ret = get_errno(chdir(p));
5276 unlock_user(p, arg1, 0);
5277 break;
5278 #ifdef TARGET_NR_time
5279 case TARGET_NR_time:
5280 {
5281 time_t host_time;
5282 ret = get_errno(time(&host_time));
5283 if (!is_error(ret)
5284 && arg1
5285 && put_user_sal(host_time, arg1))
5286 goto efault;
5287 }
5288 break;
5289 #endif
5290 case TARGET_NR_mknod:
5291 if (!(p = lock_user_string(arg1)))
5292 goto efault;
5293 ret = get_errno(mknod(p, arg2, arg3));
5294 unlock_user(p, arg1, 0);
5295 break;
5296 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5297 case TARGET_NR_mknodat:
5298 if (!(p = lock_user_string(arg2)))
5299 goto efault;
5300 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
5301 unlock_user(p, arg2, 0);
5302 break;
5303 #endif
5304 case TARGET_NR_chmod:
5305 if (!(p = lock_user_string(arg1)))
5306 goto efault;
5307 ret = get_errno(chmod(p, arg2));
5308 unlock_user(p, arg1, 0);
5309 break;
5310 #ifdef TARGET_NR_break
5311 case TARGET_NR_break:
5312 goto unimplemented;
5313 #endif
5314 #ifdef TARGET_NR_oldstat
5315 case TARGET_NR_oldstat:
5316 goto unimplemented;
5317 #endif
5318 case TARGET_NR_lseek:
5319 ret = get_errno(lseek(arg1, arg2, arg3));
5320 break;
5321 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5322 /* Alpha specific */
5323 case TARGET_NR_getxpid:
5324 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5325 ret = get_errno(getpid());
5326 break;
5327 #endif
5328 #ifdef TARGET_NR_getpid
5329 case TARGET_NR_getpid:
5330 ret = get_errno(getpid());
5331 break;
5332 #endif
5333 case TARGET_NR_mount:
5334 {
5335 /* need to look at the data field */
5336 void *p2, *p3;
5337 p = lock_user_string(arg1);
5338 p2 = lock_user_string(arg2);
5339 p3 = lock_user_string(arg3);
5340 if (!p || !p2 || !p3)
5341 ret = -TARGET_EFAULT;
5342 else {
5343 /* FIXME - arg5 should be locked, but it isn't clear how to
5344 * do that since it's not guaranteed to be a NULL-terminated
5345 * string.
5346 */
5347 if ( ! arg5 )
5348 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5349 else
5350 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5351 }
5352 unlock_user(p, arg1, 0);
5353 unlock_user(p2, arg2, 0);
5354 unlock_user(p3, arg3, 0);
5355 break;
5356 }
5357 #ifdef TARGET_NR_umount
5358 case TARGET_NR_umount:
5359 if (!(p = lock_user_string(arg1)))
5360 goto efault;
5361 ret = get_errno(umount(p));
5362 unlock_user(p, arg1, 0);
5363 break;
5364 #endif
5365 #ifdef TARGET_NR_stime /* not on alpha */
5366 case TARGET_NR_stime:
5367 {
5368 time_t host_time;
5369 if (get_user_sal(host_time, arg1))
5370 goto efault;
5371 ret = get_errno(stime(&host_time));
5372 }
5373 break;
5374 #endif
5375 case TARGET_NR_ptrace:
5376 goto unimplemented;
5377 #ifdef TARGET_NR_alarm /* not on alpha */
5378 case TARGET_NR_alarm:
5379 ret = alarm(arg1);
5380 break;
5381 #endif
5382 #ifdef TARGET_NR_oldfstat
5383 case TARGET_NR_oldfstat:
5384 goto unimplemented;
5385 #endif
5386 #ifdef TARGET_NR_pause /* not on alpha */
5387 case TARGET_NR_pause:
5388 ret = get_errno(pause());
5389 break;
5390 #endif
5391 #ifdef TARGET_NR_utime
5392 case TARGET_NR_utime:
5393 {
5394 struct utimbuf tbuf, *host_tbuf;
5395 struct target_utimbuf *target_tbuf;
5396 if (arg2) {
5397 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5398 goto efault;
5399 tbuf.actime = tswapal(target_tbuf->actime);
5400 tbuf.modtime = tswapal(target_tbuf->modtime);
5401 unlock_user_struct(target_tbuf, arg2, 0);
5402 host_tbuf = &tbuf;
5403 } else {
5404 host_tbuf = NULL;
5405 }
5406 if (!(p = lock_user_string(arg1)))
5407 goto efault;
5408 ret = get_errno(utime(p, host_tbuf));
5409 unlock_user(p, arg1, 0);
5410 }
5411 break;
5412 #endif
5413 case TARGET_NR_utimes:
5414 {
5415 struct timeval *tvp, tv[2];
5416 if (arg2) {
5417 if (copy_from_user_timeval(&tv[0], arg2)
5418 || copy_from_user_timeval(&tv[1],
5419 arg2 + sizeof(struct target_timeval)))
5420 goto efault;
5421 tvp = tv;
5422 } else {
5423 tvp = NULL;
5424 }
5425 if (!(p = lock_user_string(arg1)))
5426 goto efault;
5427 ret = get_errno(utimes(p, tvp));
5428 unlock_user(p, arg1, 0);
5429 }
5430 break;
5431 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5432 case TARGET_NR_futimesat:
5433 {
5434 struct timeval *tvp, tv[2];
5435 if (arg3) {
5436 if (copy_from_user_timeval(&tv[0], arg3)
5437 || copy_from_user_timeval(&tv[1],
5438 arg3 + sizeof(struct target_timeval)))
5439 goto efault;
5440 tvp = tv;
5441 } else {
5442 tvp = NULL;
5443 }
5444 if (!(p = lock_user_string(arg2)))
5445 goto efault;
5446 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5447 unlock_user(p, arg2, 0);
5448 }
5449 break;
5450 #endif
5451 #ifdef TARGET_NR_stty
5452 case TARGET_NR_stty:
5453 goto unimplemented;
5454 #endif
5455 #ifdef TARGET_NR_gtty
5456 case TARGET_NR_gtty:
5457 goto unimplemented;
5458 #endif
5459 case TARGET_NR_access:
5460 if (!(p = lock_user_string(arg1)))
5461 goto efault;
5462 ret = get_errno(access(path(p), arg2));
5463 unlock_user(p, arg1, 0);
5464 break;
5465 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5466 case TARGET_NR_faccessat:
5467 if (!(p = lock_user_string(arg2)))
5468 goto efault;
5469 ret = get_errno(sys_faccessat(arg1, p, arg3));
5470 unlock_user(p, arg2, 0);
5471 break;
5472 #endif
5473 #ifdef TARGET_NR_nice /* not on alpha */
5474 case TARGET_NR_nice:
5475 ret = get_errno(nice(arg1));
5476 break;
5477 #endif
5478 #ifdef TARGET_NR_ftime
5479 case TARGET_NR_ftime:
5480 goto unimplemented;
5481 #endif
5482 case TARGET_NR_sync:
5483 sync();
5484 ret = 0;
5485 break;
5486 case TARGET_NR_kill:
5487 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5488 break;
5489 case TARGET_NR_rename:
5490 {
5491 void *p2;
5492 p = lock_user_string(arg1);
5493 p2 = lock_user_string(arg2);
5494 if (!p || !p2)
5495 ret = -TARGET_EFAULT;
5496 else
5497 ret = get_errno(rename(p, p2));
5498 unlock_user(p2, arg2, 0);
5499 unlock_user(p, arg1, 0);
5500 }
5501 break;
5502 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5503 case TARGET_NR_renameat:
5504 {
5505 void *p2;
5506 p = lock_user_string(arg2);
5507 p2 = lock_user_string(arg4);
5508 if (!p || !p2)
5509 ret = -TARGET_EFAULT;
5510 else
5511 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5512 unlock_user(p2, arg4, 0);
5513 unlock_user(p, arg2, 0);
5514 }
5515 break;
5516 #endif
5517 case TARGET_NR_mkdir:
5518 if (!(p = lock_user_string(arg1)))
5519 goto efault;
5520 ret = get_errno(mkdir(p, arg2));
5521 unlock_user(p, arg1, 0);
5522 break;
5523 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5524 case TARGET_NR_mkdirat:
5525 if (!(p = lock_user_string(arg2)))
5526 goto efault;
5527 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5528 unlock_user(p, arg2, 0);
5529 break;
5530 #endif
5531 case TARGET_NR_rmdir:
5532 if (!(p = lock_user_string(arg1)))
5533 goto efault;
5534 ret = get_errno(rmdir(p));
5535 unlock_user(p, arg1, 0);
5536 break;
5537 case TARGET_NR_dup:
5538 ret = get_errno(dup(arg1));
5539 break;
5540 case TARGET_NR_pipe:
5541 ret = do_pipe(cpu_env, arg1, 0, 0);
5542 break;
5543 #ifdef TARGET_NR_pipe2
5544 case TARGET_NR_pipe2:
5545 ret = do_pipe(cpu_env, arg1, arg2, 1);
5546 break;
5547 #endif
5548 case TARGET_NR_times:
5549 {
5550 struct target_tms *tmsp;
5551 struct tms tms;
5552 ret = get_errno(times(&tms));
5553 if (arg1) {
5554 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5555 if (!tmsp)
5556 goto efault;
5557 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5558 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5559 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5560 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5561 }
5562 if (!is_error(ret))
5563 ret = host_to_target_clock_t(ret);
5564 }
5565 break;
5566 #ifdef TARGET_NR_prof
5567 case TARGET_NR_prof:
5568 goto unimplemented;
5569 #endif
5570 #ifdef TARGET_NR_signal
5571 case TARGET_NR_signal:
5572 goto unimplemented;
5573 #endif
5574 case TARGET_NR_acct:
5575 if (arg1 == 0) {
5576 ret = get_errno(acct(NULL));
5577 } else {
5578 if (!(p = lock_user_string(arg1)))
5579 goto efault;
5580 ret = get_errno(acct(path(p)));
5581 unlock_user(p, arg1, 0);
5582 }
5583 break;
5584 #ifdef TARGET_NR_umount2 /* not on alpha */
5585 case TARGET_NR_umount2:
5586 if (!(p = lock_user_string(arg1)))
5587 goto efault;
5588 ret = get_errno(umount2(p, arg2));
5589 unlock_user(p, arg1, 0);
5590 break;
5591 #endif
5592 #ifdef TARGET_NR_lock
5593 case TARGET_NR_lock:
5594 goto unimplemented;
5595 #endif
5596 case TARGET_NR_ioctl:
5597 ret = do_ioctl(arg1, arg2, arg3);
5598 break;
5599 case TARGET_NR_fcntl:
5600 ret = do_fcntl(arg1, arg2, arg3);
5601 break;
5602 #ifdef TARGET_NR_mpx
5603 case TARGET_NR_mpx:
5604 goto unimplemented;
5605 #endif
5606 case TARGET_NR_setpgid:
5607 ret = get_errno(setpgid(arg1, arg2));
5608 break;
5609 #ifdef TARGET_NR_ulimit
5610 case TARGET_NR_ulimit:
5611 goto unimplemented;
5612 #endif
5613 #ifdef TARGET_NR_oldolduname
5614 case TARGET_NR_oldolduname:
5615 goto unimplemented;
5616 #endif
5617 case TARGET_NR_umask:
5618 ret = get_errno(umask(arg1));
5619 break;
5620 case TARGET_NR_chroot:
5621 if (!(p = lock_user_string(arg1)))
5622 goto efault;
5623 ret = get_errno(chroot(p));
5624 unlock_user(p, arg1, 0);
5625 break;
5626 case TARGET_NR_ustat:
5627 goto unimplemented;
5628 case TARGET_NR_dup2:
5629 ret = get_errno(dup2(arg1, arg2));
5630 break;
5631 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5632 case TARGET_NR_dup3:
5633 ret = get_errno(dup3(arg1, arg2, arg3));
5634 break;
5635 #endif
5636 #ifdef TARGET_NR_getppid /* not on alpha */
5637 case TARGET_NR_getppid:
5638 ret = get_errno(getppid());
5639 break;
5640 #endif
5641 case TARGET_NR_getpgrp:
5642 ret = get_errno(getpgrp());
5643 break;
5644 case TARGET_NR_setsid:
5645 ret = get_errno(setsid());
5646 break;
5647 #ifdef TARGET_NR_sigaction
5648 case TARGET_NR_sigaction:
5649 {
5650 #if defined(TARGET_ALPHA)
5651 struct target_sigaction act, oact, *pact = 0;
5652 struct target_old_sigaction *old_act;
5653 if (arg2) {
5654 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5655 goto efault;
5656 act._sa_handler = old_act->_sa_handler;
5657 target_siginitset(&act.sa_mask, old_act->sa_mask);
5658 act.sa_flags = old_act->sa_flags;
5659 act.sa_restorer = 0;
5660 unlock_user_struct(old_act, arg2, 0);
5661 pact = &act;
5662 }
5663 ret = get_errno(do_sigaction(arg1, pact, &oact));
5664 if (!is_error(ret) && arg3) {
5665 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5666 goto efault;
5667 old_act->_sa_handler = oact._sa_handler;
5668 old_act->sa_mask = oact.sa_mask.sig[0];
5669 old_act->sa_flags = oact.sa_flags;
5670 unlock_user_struct(old_act, arg3, 1);
5671 }
5672 #elif defined(TARGET_MIPS)
5673 struct target_sigaction act, oact, *pact, *old_act;
5674
5675 if (arg2) {
5676 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5677 goto efault;
5678 act._sa_handler = old_act->_sa_handler;
5679 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5680 act.sa_flags = old_act->sa_flags;
5681 unlock_user_struct(old_act, arg2, 0);
5682 pact = &act;
5683 } else {
5684 pact = NULL;
5685 }
5686
5687 ret = get_errno(do_sigaction(arg1, pact, &oact));
5688
5689 if (!is_error(ret) && arg3) {
5690 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5691 goto efault;
5692 old_act->_sa_handler = oact._sa_handler;
5693 old_act->sa_flags = oact.sa_flags;
5694 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5695 old_act->sa_mask.sig[1] = 0;
5696 old_act->sa_mask.sig[2] = 0;
5697 old_act->sa_mask.sig[3] = 0;
5698 unlock_user_struct(old_act, arg3, 1);
5699 }
5700 #else
5701 struct target_old_sigaction *old_act;
5702 struct target_sigaction act, oact, *pact;
5703 if (arg2) {
5704 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5705 goto efault;
5706 act._sa_handler = old_act->_sa_handler;
5707 target_siginitset(&act.sa_mask, old_act->sa_mask);
5708 act.sa_flags = old_act->sa_flags;
5709 act.sa_restorer = old_act->sa_restorer;
5710 unlock_user_struct(old_act, arg2, 0);
5711 pact = &act;
5712 } else {
5713 pact = NULL;
5714 }
5715 ret = get_errno(do_sigaction(arg1, pact, &oact));
5716 if (!is_error(ret) && arg3) {
5717 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5718 goto efault;
5719 old_act->_sa_handler = oact._sa_handler;
5720 old_act->sa_mask = oact.sa_mask.sig[0];
5721 old_act->sa_flags = oact.sa_flags;
5722 old_act->sa_restorer = oact.sa_restorer;
5723 unlock_user_struct(old_act, arg3, 1);
5724 }
5725 #endif
5726 }
5727 break;
5728 #endif
5729 case TARGET_NR_rt_sigaction:
5730 {
5731 #if defined(TARGET_ALPHA)
5732 struct target_sigaction act, oact, *pact = 0;
5733 struct target_rt_sigaction *rt_act;
5734 /* ??? arg4 == sizeof(sigset_t). */
5735 if (arg2) {
5736 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5737 goto efault;
5738 act._sa_handler = rt_act->_sa_handler;
5739 act.sa_mask = rt_act->sa_mask;
5740 act.sa_flags = rt_act->sa_flags;
5741 act.sa_restorer = arg5;
5742 unlock_user_struct(rt_act, arg2, 0);
5743 pact = &act;
5744 }
5745 ret = get_errno(do_sigaction(arg1, pact, &oact));
5746 if (!is_error(ret) && arg3) {
5747 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5748 goto efault;
5749 rt_act->_sa_handler = oact._sa_handler;
5750 rt_act->sa_mask = oact.sa_mask;
5751 rt_act->sa_flags = oact.sa_flags;
5752 unlock_user_struct(rt_act, arg3, 1);
5753 }
5754 #else
5755 struct target_sigaction *act;
5756 struct target_sigaction *oact;
5757
5758 if (arg2) {
5759 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5760 goto efault;
5761 } else
5762 act = NULL;
5763 if (arg3) {
5764 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5765 ret = -TARGET_EFAULT;
5766 goto rt_sigaction_fail;
5767 }
5768 } else
5769 oact = NULL;
5770 ret = get_errno(do_sigaction(arg1, act, oact));
5771 rt_sigaction_fail:
5772 if (act)
5773 unlock_user_struct(act, arg2, 0);
5774 if (oact)
5775 unlock_user_struct(oact, arg3, 1);
5776 #endif
5777 }
5778 break;
5779 #ifdef TARGET_NR_sgetmask /* not on alpha */
5780 case TARGET_NR_sgetmask:
5781 {
5782 sigset_t cur_set;
5783 abi_ulong target_set;
5784 sigprocmask(0, NULL, &cur_set);
5785 host_to_target_old_sigset(&target_set, &cur_set);
5786 ret = target_set;
5787 }
5788 break;
5789 #endif
5790 #ifdef TARGET_NR_ssetmask /* not on alpha */
5791 case TARGET_NR_ssetmask:
5792 {
5793 sigset_t set, oset, cur_set;
5794 abi_ulong target_set = arg1;
5795 sigprocmask(0, NULL, &cur_set);
5796 target_to_host_old_sigset(&set, &target_set);
5797 sigorset(&set, &set, &cur_set);
5798 sigprocmask(SIG_SETMASK, &set, &oset);
5799 host_to_target_old_sigset(&target_set, &oset);
5800 ret = target_set;
5801 }
5802 break;
5803 #endif
5804 #ifdef TARGET_NR_sigprocmask
5805 case TARGET_NR_sigprocmask:
5806 {
5807 #if defined(TARGET_ALPHA)
5808 sigset_t set, oldset;
5809 abi_ulong mask;
5810 int how;
5811
5812 switch (arg1) {
5813 case TARGET_SIG_BLOCK:
5814 how = SIG_BLOCK;
5815 break;
5816 case TARGET_SIG_UNBLOCK:
5817 how = SIG_UNBLOCK;
5818 break;
5819 case TARGET_SIG_SETMASK:
5820 how = SIG_SETMASK;
5821 break;
5822 default:
5823 ret = -TARGET_EINVAL;
5824 goto fail;
5825 }
5826 mask = arg2;
5827 target_to_host_old_sigset(&set, &mask);
5828
5829 ret = get_errno(sigprocmask(how, &set, &oldset));
5830
5831 if (!is_error(ret)) {
5832 host_to_target_old_sigset(&mask, &oldset);
5833 ret = mask;
5834 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5835 }
5836 #else
5837 sigset_t set, oldset, *set_ptr;
5838 int how;
5839
5840 if (arg2) {
5841 switch (arg1) {
5842 case TARGET_SIG_BLOCK:
5843 how = SIG_BLOCK;
5844 break;
5845 case TARGET_SIG_UNBLOCK:
5846 how = SIG_UNBLOCK;
5847 break;
5848 case TARGET_SIG_SETMASK:
5849 how = SIG_SETMASK;
5850 break;
5851 default:
5852 ret = -TARGET_EINVAL;
5853 goto fail;
5854 }
5855 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5856 goto efault;
5857 target_to_host_old_sigset(&set, p);
5858 unlock_user(p, arg2, 0);
5859 set_ptr = &set;
5860 } else {
5861 how = 0;
5862 set_ptr = NULL;
5863 }
5864 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5865 if (!is_error(ret) && arg3) {
5866 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5867 goto efault;
5868 host_to_target_old_sigset(p, &oldset);
5869 unlock_user(p, arg3, sizeof(target_sigset_t));
5870 }
5871 #endif
5872 }
5873 break;
5874 #endif
5875 case TARGET_NR_rt_sigprocmask:
5876 {
5877 int how = arg1;
5878 sigset_t set, oldset, *set_ptr;
5879
5880 if (arg2) {
5881 switch(how) {
5882 case TARGET_SIG_BLOCK:
5883 how = SIG_BLOCK;
5884 break;
5885 case TARGET_SIG_UNBLOCK:
5886 how = SIG_UNBLOCK;
5887 break;
5888 case TARGET_SIG_SETMASK:
5889 how = SIG_SETMASK;
5890 break;
5891 default:
5892 ret = -TARGET_EINVAL;
5893 goto fail;
5894 }
5895 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5896 goto efault;
5897 target_to_host_sigset(&set, p);
5898 unlock_user(p, arg2, 0);
5899 set_ptr = &set;
5900 } else {
5901 how = 0;
5902 set_ptr = NULL;
5903 }
5904 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5905 if (!is_error(ret) && arg3) {
5906 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5907 goto efault;
5908 host_to_target_sigset(p, &oldset);
5909 unlock_user(p, arg3, sizeof(target_sigset_t));
5910 }
5911 }
5912 break;
5913 #ifdef TARGET_NR_sigpending
5914 case TARGET_NR_sigpending:
5915 {
5916 sigset_t set;
5917 ret = get_errno(sigpending(&set));
5918 if (!is_error(ret)) {
5919 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5920 goto efault;
5921 host_to_target_old_sigset(p, &set);
5922 unlock_user(p, arg1, sizeof(target_sigset_t));
5923 }
5924 }
5925 break;
5926 #endif
5927 case TARGET_NR_rt_sigpending:
5928 {
5929 sigset_t set;
5930 ret = get_errno(sigpending(&set));
5931 if (!is_error(ret)) {
5932 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5933 goto efault;
5934 host_to_target_sigset(p, &set);
5935 unlock_user(p, arg1, sizeof(target_sigset_t));
5936 }
5937 }
5938 break;
5939 #ifdef TARGET_NR_sigsuspend
5940 case TARGET_NR_sigsuspend:
5941 {
5942 sigset_t set;
5943 #if defined(TARGET_ALPHA)
5944 abi_ulong mask = arg1;
5945 target_to_host_old_sigset(&set, &mask);
5946 #else
5947 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5948 goto efault;
5949 target_to_host_old_sigset(&set, p);
5950 unlock_user(p, arg1, 0);
5951 #endif
5952 ret = get_errno(sigsuspend(&set));
5953 }
5954 break;
5955 #endif
5956 case TARGET_NR_rt_sigsuspend:
5957 {
5958 sigset_t set;
5959 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5960 goto efault;
5961 target_to_host_sigset(&set, p);
5962 unlock_user(p, arg1, 0);
5963 ret = get_errno(sigsuspend(&set));
5964 }
5965 break;
5966 case TARGET_NR_rt_sigtimedwait:
5967 {
5968 sigset_t set;
5969 struct timespec uts, *puts;
5970 siginfo_t uinfo;
5971
5972 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5973 goto efault;
5974 target_to_host_sigset(&set, p);
5975 unlock_user(p, arg1, 0);
5976 if (arg3) {
5977 puts = &uts;
5978 target_to_host_timespec(puts, arg3);
5979 } else {
5980 puts = NULL;
5981 }
5982 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
5983 if (!is_error(ret) && arg2) {
5984 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
5985 goto efault;
5986 host_to_target_siginfo(p, &uinfo);
5987 unlock_user(p, arg2, sizeof(target_siginfo_t));
5988 }
5989 }
5990 break;
5991 case TARGET_NR_rt_sigqueueinfo:
5992 {
5993 siginfo_t uinfo;
5994 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
5995 goto efault;
5996 target_to_host_siginfo(&uinfo, p);
5997 unlock_user(p, arg1, 0);
5998 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
5999 }
6000 break;
6001 #ifdef TARGET_NR_sigreturn
6002 case TARGET_NR_sigreturn:
6003 /* NOTE: ret is eax, so not transcoding must be done */
6004 ret = do_sigreturn(cpu_env);
6005 break;
6006 #endif
6007 case TARGET_NR_rt_sigreturn:
6008 /* NOTE: ret is eax, so not transcoding must be done */
6009 ret = do_rt_sigreturn(cpu_env);
6010 break;
6011 case TARGET_NR_sethostname:
6012 if (!(p = lock_user_string(arg1)))
6013 goto efault;
6014 ret = get_errno(sethostname(p, arg2));
6015 unlock_user(p, arg1, 0);
6016 break;
6017 case TARGET_NR_setrlimit:
6018 {
6019 int resource = target_to_host_resource(arg1);
6020 struct target_rlimit *target_rlim;
6021 struct rlimit rlim;
6022 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6023 goto efault;
6024 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6025 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6026 unlock_user_struct(target_rlim, arg2, 0);
6027 ret = get_errno(setrlimit(resource, &rlim));
6028 }
6029 break;
6030 case TARGET_NR_getrlimit:
6031 {
6032 int resource = target_to_host_resource(arg1);
6033 struct target_rlimit *target_rlim;
6034 struct rlimit rlim;
6035
6036 ret = get_errno(getrlimit(resource, &rlim));
6037 if (!is_error(ret)) {
6038 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6039 goto efault;
6040 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6041 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6042 unlock_user_struct(target_rlim, arg2, 1);
6043 }
6044 }
6045 break;
6046 case TARGET_NR_getrusage:
6047 {
6048 struct rusage rusage;
6049 ret = get_errno(getrusage(arg1, &rusage));
6050 if (!is_error(ret)) {
6051 host_to_target_rusage(arg2, &rusage);
6052 }
6053 }
6054 break;
6055 case TARGET_NR_gettimeofday:
6056 {
6057 struct timeval tv;
6058 ret = get_errno(gettimeofday(&tv, NULL));
6059 if (!is_error(ret)) {
6060 if (copy_to_user_timeval(arg1, &tv))
6061 goto efault;
6062 }
6063 }
6064 break;
6065 case TARGET_NR_settimeofday:
6066 {
6067 struct timeval tv;
6068 if (copy_from_user_timeval(&tv, arg1))
6069 goto efault;
6070 ret = get_errno(settimeofday(&tv, NULL));
6071 }
6072 break;
6073 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
6074 case TARGET_NR_select:
6075 {
6076 struct target_sel_arg_struct *sel;
6077 abi_ulong inp, outp, exp, tvp;
6078 long nsel;
6079
6080 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6081 goto efault;
6082 nsel = tswapal(sel->n);
6083 inp = tswapal(sel->inp);
6084 outp = tswapal(sel->outp);
6085 exp = tswapal(sel->exp);
6086 tvp = tswapal(sel->tvp);
6087 unlock_user_struct(sel, arg1, 0);
6088 ret = do_select(nsel, inp, outp, exp, tvp);
6089 }
6090 break;
6091 #endif
6092 #ifdef TARGET_NR_pselect6
6093 case TARGET_NR_pselect6:
6094 {
6095 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6096 fd_set rfds, wfds, efds;
6097 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6098 struct timespec ts, *ts_ptr;
6099
6100 /*
6101 * The 6th arg is actually two args smashed together,
6102 * so we cannot use the C library.
6103 */
6104 sigset_t set;
6105 struct {
6106 sigset_t *set;
6107 size_t size;
6108 } sig, *sig_ptr;
6109
6110 abi_ulong arg_sigset, arg_sigsize, *arg7;
6111 target_sigset_t *target_sigset;
6112
6113 n = arg1;
6114 rfd_addr = arg2;
6115 wfd_addr = arg3;
6116 efd_addr = arg4;
6117 ts_addr = arg5;
6118
6119 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6120 if (ret) {
6121 goto fail;
6122 }
6123 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6124 if (ret) {
6125 goto fail;
6126 }
6127 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6128 if (ret) {
6129 goto fail;
6130 }
6131
6132 /*
6133 * This takes a timespec, and not a timeval, so we cannot
6134 * use the do_select() helper ...
6135 */
6136 if (ts_addr) {
6137 if (target_to_host_timespec(&ts, ts_addr)) {
6138 goto efault;
6139 }
6140 ts_ptr = &ts;
6141 } else {
6142 ts_ptr = NULL;
6143 }
6144
6145 /* Extract the two packed args for the sigset */
6146 if (arg6) {
6147 sig_ptr = &sig;
6148 sig.size = _NSIG / 8;
6149
6150 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6151 if (!arg7) {
6152 goto efault;
6153 }
6154 arg_sigset = tswapal(arg7[0]);
6155 arg_sigsize = tswapal(arg7[1]);
6156 unlock_user(arg7, arg6, 0);
6157
6158 if (arg_sigset) {
6159 sig.set = &set;
6160 if (arg_sigsize != sizeof(*target_sigset)) {
6161 /* Like the kernel, we enforce correct size sigsets */
6162 ret = -TARGET_EINVAL;
6163 goto fail;
6164 }
6165 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6166 sizeof(*target_sigset), 1);
6167 if (!target_sigset) {
6168 goto efault;
6169 }
6170 target_to_host_sigset(&set, target_sigset);
6171 unlock_user(target_sigset, arg_sigset, 0);
6172 } else {
6173 sig.set = NULL;
6174 }
6175 } else {
6176 sig_ptr = NULL;
6177 }
6178
6179 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6180 ts_ptr, sig_ptr));
6181
6182 if (!is_error(ret)) {
6183 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6184 goto efault;
6185 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6186 goto efault;
6187 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6188 goto efault;
6189
6190 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6191 goto efault;
6192 }
6193 }
6194 break;
6195 #endif
6196 case TARGET_NR_symlink:
6197 {
6198 void *p2;
6199 p = lock_user_string(arg1);
6200 p2 = lock_user_string(arg2);
6201 if (!p || !p2)
6202 ret = -TARGET_EFAULT;
6203 else
6204 ret = get_errno(symlink(p, p2));
6205 unlock_user(p2, arg2, 0);
6206 unlock_user(p, arg1, 0);
6207 }
6208 break;
6209 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6210 case TARGET_NR_symlinkat:
6211 {
6212 void *p2;
6213 p = lock_user_string(arg1);
6214 p2 = lock_user_string(arg3);
6215 if (!p || !p2)
6216 ret = -TARGET_EFAULT;
6217 else
6218 ret = get_errno(sys_symlinkat(p, arg2, p2));
6219 unlock_user(p2, arg3, 0);
6220 unlock_user(p, arg1, 0);
6221 }
6222 break;
6223 #endif
6224 #ifdef TARGET_NR_oldlstat
6225 case TARGET_NR_oldlstat:
6226 goto unimplemented;
6227 #endif
6228 case TARGET_NR_readlink:
6229 {
6230 void *p2, *temp;
6231 p = lock_user_string(arg1);
6232 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6233 if (!p || !p2)
6234 ret = -TARGET_EFAULT;
6235 else {
6236 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
6237 char real[PATH_MAX];
6238 temp = realpath(exec_path,real);
6239 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
6240 snprintf((char *)p2, arg3, "%s", real);
6241 }
6242 else
6243 ret = get_errno(readlink(path(p), p2, arg3));
6244 }
6245 unlock_user(p2, arg2, ret);
6246 unlock_user(p, arg1, 0);
6247 }
6248 break;
6249 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6250 case TARGET_NR_readlinkat:
6251 {
6252 void *p2;
6253 p = lock_user_string(arg2);
6254 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6255 if (!p || !p2)
6256 ret = -TARGET_EFAULT;
6257 else
6258 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
6259 unlock_user(p2, arg3, ret);
6260 unlock_user(p, arg2, 0);
6261 }
6262 break;
6263 #endif
6264 #ifdef TARGET_NR_uselib
6265 case TARGET_NR_uselib:
6266 goto unimplemented;
6267 #endif
6268 #ifdef TARGET_NR_swapon
6269 case TARGET_NR_swapon:
6270 if (!(p = lock_user_string(arg1)))
6271 goto efault;
6272 ret = get_errno(swapon(p, arg2));
6273 unlock_user(p, arg1, 0);
6274 break;
6275 #endif
6276 case TARGET_NR_reboot:
6277 if (!(p = lock_user_string(arg4)))
6278 goto efault;
6279 ret = reboot(arg1, arg2, arg3, p);
6280 unlock_user(p, arg4, 0);
6281 break;
6282 #ifdef TARGET_NR_readdir
6283 case TARGET_NR_readdir:
6284 goto unimplemented;
6285 #endif
6286 #ifdef TARGET_NR_mmap
6287 case TARGET_NR_mmap:
6288 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6289 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6290 || defined(TARGET_S390X)
6291 {
6292 abi_ulong *v;
6293 abi_ulong v1, v2, v3, v4, v5, v6;
6294 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6295 goto efault;
6296 v1 = tswapal(v[0]);
6297 v2 = tswapal(v[1]);
6298 v3 = tswapal(v[2]);
6299 v4 = tswapal(v[3]);
6300 v5 = tswapal(v[4]);
6301 v6 = tswapal(v[5]);
6302 unlock_user(v, arg1, 0);
6303 ret = get_errno(target_mmap(v1, v2, v3,
6304 target_to_host_bitmask(v4, mmap_flags_tbl),
6305 v5, v6));
6306 }
6307 #else
6308 ret = get_errno(target_mmap(arg1, arg2, arg3,
6309 target_to_host_bitmask(arg4, mmap_flags_tbl),
6310 arg5,
6311 arg6));
6312 #endif
6313 break;
6314 #endif
6315 #ifdef TARGET_NR_mmap2
6316 case TARGET_NR_mmap2:
6317 #ifndef MMAP_SHIFT
6318 #define MMAP_SHIFT 12
6319 #endif
6320 ret = get_errno(target_mmap(arg1, arg2, arg3,
6321 target_to_host_bitmask(arg4, mmap_flags_tbl),
6322 arg5,
6323 arg6 << MMAP_SHIFT));
6324 break;
6325 #endif
6326 case TARGET_NR_munmap:
6327 ret = get_errno(target_munmap(arg1, arg2));
6328 break;
6329 case TARGET_NR_mprotect:
6330 {
6331 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6332 /* Special hack to detect libc making the stack executable. */
6333 if ((arg3 & PROT_GROWSDOWN)
6334 && arg1 >= ts->info->stack_limit
6335 && arg1 <= ts->info->start_stack) {
6336 arg3 &= ~PROT_GROWSDOWN;
6337 arg2 = arg2 + arg1 - ts->info->stack_limit;
6338 arg1 = ts->info->stack_limit;
6339 }
6340 }
6341 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6342 break;
6343 #ifdef TARGET_NR_mremap
6344 case TARGET_NR_mremap:
6345 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6346 break;
6347 #endif
6348 /* ??? msync/mlock/munlock are broken for softmmu. */
6349 #ifdef TARGET_NR_msync
6350 case TARGET_NR_msync:
6351 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6352 break;
6353 #endif
6354 #ifdef TARGET_NR_mlock
6355 case TARGET_NR_mlock:
6356 ret = get_errno(mlock(g2h(arg1), arg2));
6357 break;
6358 #endif
6359 #ifdef TARGET_NR_munlock
6360 case TARGET_NR_munlock:
6361 ret = get_errno(munlock(g2h(arg1), arg2));
6362 break;
6363 #endif
6364 #ifdef TARGET_NR_mlockall
6365 case TARGET_NR_mlockall:
6366 ret = get_errno(mlockall(arg1));
6367 break;
6368 #endif
6369 #ifdef TARGET_NR_munlockall
6370 case TARGET_NR_munlockall:
6371 ret = get_errno(munlockall());
6372 break;
6373 #endif
6374 case TARGET_NR_truncate:
6375 if (!(p = lock_user_string(arg1)))
6376 goto efault;
6377 ret = get_errno(truncate(p, arg2));
6378 unlock_user(p, arg1, 0);
6379 break;
6380 case TARGET_NR_ftruncate:
6381 ret = get_errno(ftruncate(arg1, arg2));
6382 break;
6383 case TARGET_NR_fchmod:
6384 ret = get_errno(fchmod(arg1, arg2));
6385 break;
6386 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6387 case TARGET_NR_fchmodat:
6388 if (!(p = lock_user_string(arg2)))
6389 goto efault;
6390 ret = get_errno(sys_fchmodat(arg1, p, arg3));
6391 unlock_user(p, arg2, 0);
6392 break;
6393 #endif
6394 case TARGET_NR_getpriority:
6395 /* libc does special remapping of the return value of
6396 * sys_getpriority() so it's just easiest to call
6397 * sys_getpriority() directly rather than through libc. */
6398 ret = get_errno(sys_getpriority(arg1, arg2));
6399 break;
6400 case TARGET_NR_setpriority:
6401 ret = get_errno(setpriority(arg1, arg2, arg3));
6402 break;
6403 #ifdef TARGET_NR_profil
6404 case TARGET_NR_profil:
6405 goto unimplemented;
6406 #endif
6407 case TARGET_NR_statfs:
6408 if (!(p = lock_user_string(arg1)))
6409 goto efault;
6410 ret = get_errno(statfs(path(p), &stfs));
6411 unlock_user(p, arg1, 0);
6412 convert_statfs:
6413 if (!is_error(ret)) {
6414 struct target_statfs *target_stfs;
6415
6416 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6417 goto efault;
6418 __put_user(stfs.f_type, &target_stfs->f_type);
6419 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6420 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6421 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6422 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6423 __put_user(stfs.f_files, &target_stfs->f_files);
6424 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6425 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6426 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6427 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6428 unlock_user_struct(target_stfs, arg2, 1);
6429 }
6430 break;
6431 case TARGET_NR_fstatfs:
6432 ret = get_errno(fstatfs(arg1, &stfs));
6433 goto convert_statfs;
6434 #ifdef TARGET_NR_statfs64
6435 case TARGET_NR_statfs64:
6436 if (!(p = lock_user_string(arg1)))
6437 goto efault;
6438 ret = get_errno(statfs(path(p), &stfs));
6439 unlock_user(p, arg1, 0);
6440 convert_statfs64:
6441 if (!is_error(ret)) {
6442 struct target_statfs64 *target_stfs;
6443
6444 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6445 goto efault;
6446 __put_user(stfs.f_type, &target_stfs->f_type);
6447 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6448 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6449 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6450 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6451 __put_user(stfs.f_files, &target_stfs->f_files);
6452 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6453 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6454 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6455 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6456 unlock_user_struct(target_stfs, arg3, 1);
6457 }
6458 break;
6459 case TARGET_NR_fstatfs64:
6460 ret = get_errno(fstatfs(arg1, &stfs));
6461 goto convert_statfs64;
6462 #endif
6463 #ifdef TARGET_NR_ioperm
6464 case TARGET_NR_ioperm:
6465 goto unimplemented;
6466 #endif
6467 #ifdef TARGET_NR_socketcall
6468 case TARGET_NR_socketcall:
6469 ret = do_socketcall(arg1, arg2);
6470 break;
6471 #endif
6472 #ifdef TARGET_NR_accept
6473 case TARGET_NR_accept:
6474 ret = do_accept(arg1, arg2, arg3);
6475 break;
6476 #endif
6477 #ifdef TARGET_NR_bind
6478 case TARGET_NR_bind:
6479 ret = do_bind(arg1, arg2, arg3);
6480 break;
6481 #endif
6482 #ifdef TARGET_NR_connect
6483 case TARGET_NR_connect:
6484 ret = do_connect(arg1, arg2, arg3);
6485 break;
6486 #endif
6487 #ifdef TARGET_NR_getpeername
6488 case TARGET_NR_getpeername:
6489 ret = do_getpeername(arg1, arg2, arg3);
6490 break;
6491 #endif
6492 #ifdef TARGET_NR_getsockname
6493 case TARGET_NR_getsockname:
6494 ret = do_getsockname(arg1, arg2, arg3);
6495 break;
6496 #endif
6497 #ifdef TARGET_NR_getsockopt
6498 case TARGET_NR_getsockopt:
6499 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6500 break;
6501 #endif
6502 #ifdef TARGET_NR_listen
6503 case TARGET_NR_listen:
6504 ret = get_errno(listen(arg1, arg2));
6505 break;
6506 #endif
6507 #ifdef TARGET_NR_recv
6508 case TARGET_NR_recv:
6509 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6510 break;
6511 #endif
6512 #ifdef TARGET_NR_recvfrom
6513 case TARGET_NR_recvfrom:
6514 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6515 break;
6516 #endif
6517 #ifdef TARGET_NR_recvmsg
6518 case TARGET_NR_recvmsg:
6519 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6520 break;
6521 #endif
6522 #ifdef TARGET_NR_send
6523 case TARGET_NR_send:
6524 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6525 break;
6526 #endif
6527 #ifdef TARGET_NR_sendmsg
6528 case TARGET_NR_sendmsg:
6529 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6530 break;
6531 #endif
6532 #ifdef TARGET_NR_sendto
6533 case TARGET_NR_sendto:
6534 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6535 break;
6536 #endif
6537 #ifdef TARGET_NR_shutdown
6538 case TARGET_NR_shutdown:
6539 ret = get_errno(shutdown(arg1, arg2));
6540 break;
6541 #endif
6542 #ifdef TARGET_NR_socket
6543 case TARGET_NR_socket:
6544 ret = do_socket(arg1, arg2, arg3);
6545 break;
6546 #endif
6547 #ifdef TARGET_NR_socketpair
6548 case TARGET_NR_socketpair:
6549 ret = do_socketpair(arg1, arg2, arg3, arg4);
6550 break;
6551 #endif
6552 #ifdef TARGET_NR_setsockopt
6553 case TARGET_NR_setsockopt:
6554 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6555 break;
6556 #endif
6557
6558 case TARGET_NR_syslog:
6559 if (!(p = lock_user_string(arg2)))
6560 goto efault;
6561 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6562 unlock_user(p, arg2, 0);
6563 break;
6564
6565 case TARGET_NR_setitimer:
6566 {
6567 struct itimerval value, ovalue, *pvalue;
6568
6569 if (arg2) {
6570 pvalue = &value;
6571 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6572 || copy_from_user_timeval(&pvalue->it_value,
6573 arg2 + sizeof(struct target_timeval)))
6574 goto efault;
6575 } else {
6576 pvalue = NULL;
6577 }
6578 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6579 if (!is_error(ret) && arg3) {
6580 if (copy_to_user_timeval(arg3,
6581 &ovalue.it_interval)
6582 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6583 &ovalue.it_value))
6584 goto efault;
6585 }
6586 }
6587 break;
6588 case TARGET_NR_getitimer:
6589 {
6590 struct itimerval value;
6591
6592 ret = get_errno(getitimer(arg1, &value));
6593 if (!is_error(ret) && arg2) {
6594 if (copy_to_user_timeval(arg2,
6595 &value.it_interval)
6596 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6597 &value.it_value))
6598 goto efault;
6599 }
6600 }
6601 break;
6602 case TARGET_NR_stat:
6603 if (!(p = lock_user_string(arg1)))
6604 goto efault;
6605 ret = get_errno(stat(path(p), &st));
6606 unlock_user(p, arg1, 0);
6607 goto do_stat;
6608 case TARGET_NR_lstat:
6609 if (!(p = lock_user_string(arg1)))
6610 goto efault;
6611 ret = get_errno(lstat(path(p), &st));
6612 unlock_user(p, arg1, 0);
6613 goto do_stat;
6614 case TARGET_NR_fstat:
6615 {
6616 ret = get_errno(fstat(arg1, &st));
6617 do_stat:
6618 if (!is_error(ret)) {
6619 struct target_stat *target_st;
6620
6621 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6622 goto efault;
6623 memset(target_st, 0, sizeof(*target_st));
6624 __put_user(st.st_dev, &target_st->st_dev);
6625 __put_user(st.st_ino, &target_st->st_ino);
6626 __put_user(st.st_mode, &target_st->st_mode);
6627 __put_user(st.st_uid, &target_st->st_uid);
6628 __put_user(st.st_gid, &target_st->st_gid);
6629 __put_user(st.st_nlink, &target_st->st_nlink);
6630 __put_user(st.st_rdev, &target_st->st_rdev);
6631 __put_user(st.st_size, &target_st->st_size);
6632 __put_user(st.st_blksize, &target_st->st_blksize);
6633 __put_user(st.st_blocks, &target_st->st_blocks);
6634 __put_user(st.st_atime, &target_st->target_st_atime);
6635 __put_user(st.st_mtime, &target_st->target_st_mtime);
6636 __put_user(st.st_ctime, &target_st->target_st_ctime);
6637 unlock_user_struct(target_st, arg2, 1);
6638 }
6639 }
6640 break;
6641 #ifdef TARGET_NR_olduname
6642 case TARGET_NR_olduname:
6643 goto unimplemented;
6644 #endif
6645 #ifdef TARGET_NR_iopl
6646 case TARGET_NR_iopl:
6647 goto unimplemented;
6648 #endif
6649 case TARGET_NR_vhangup:
6650 ret = get_errno(vhangup());
6651 break;
6652 #ifdef TARGET_NR_idle
6653 case TARGET_NR_idle:
6654 goto unimplemented;
6655 #endif
6656 #ifdef TARGET_NR_syscall
6657 case TARGET_NR_syscall:
6658 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6659 arg6, arg7, arg8, 0);
6660 break;
6661 #endif
6662 case TARGET_NR_wait4:
6663 {
6664 int status;
6665 abi_long status_ptr = arg2;
6666 struct rusage rusage, *rusage_ptr;
6667 abi_ulong target_rusage = arg4;
6668 if (target_rusage)
6669 rusage_ptr = &rusage;
6670 else
6671 rusage_ptr = NULL;
6672 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6673 if (!is_error(ret)) {
6674 if (status_ptr && ret) {
6675 status = host_to_target_waitstatus(status);
6676 if (put_user_s32(status, status_ptr))
6677 goto efault;
6678 }
6679 if (target_rusage)
6680 host_to_target_rusage(target_rusage, &rusage);
6681 }
6682 }
6683 break;
6684 #ifdef TARGET_NR_swapoff
6685 case TARGET_NR_swapoff:
6686 if (!(p = lock_user_string(arg1)))
6687 goto efault;
6688 ret = get_errno(swapoff(p));
6689 unlock_user(p, arg1, 0);
6690 break;
6691 #endif
6692 case TARGET_NR_sysinfo:
6693 {
6694 struct target_sysinfo *target_value;
6695 struct sysinfo value;
6696 ret = get_errno(sysinfo(&value));
6697 if (!is_error(ret) && arg1)
6698 {
6699 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6700 goto efault;
6701 __put_user(value.uptime, &target_value->uptime);
6702 __put_user(value.loads[0], &target_value->loads[0]);
6703 __put_user(value.loads[1], &target_value->loads[1]);
6704 __put_user(value.loads[2], &target_value->loads[2]);
6705 __put_user(value.totalram, &target_value->totalram);
6706 __put_user(value.freeram, &target_value->freeram);
6707 __put_user(value.sharedram, &target_value->sharedram);
6708 __put_user(value.bufferram, &target_value->bufferram);
6709 __put_user(value.totalswap, &target_value->totalswap);
6710 __put_user(value.freeswap, &target_value->freeswap);
6711 __put_user(value.procs, &target_value->procs);
6712 __put_user(value.totalhigh, &target_value->totalhigh);
6713 __put_user(value.freehigh, &target_value->freehigh);
6714 __put_user(value.mem_unit, &target_value->mem_unit);
6715 unlock_user_struct(target_value, arg1, 1);
6716 }
6717 }
6718 break;
6719 #ifdef TARGET_NR_ipc
6720 case TARGET_NR_ipc:
6721 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6722 break;
6723 #endif
6724 #ifdef TARGET_NR_semget
6725 case TARGET_NR_semget:
6726 ret = get_errno(semget(arg1, arg2, arg3));
6727 break;
6728 #endif
6729 #ifdef TARGET_NR_semop
6730 case TARGET_NR_semop:
6731 ret = get_errno(do_semop(arg1, arg2, arg3));
6732 break;
6733 #endif
6734 #ifdef TARGET_NR_semctl
6735 case TARGET_NR_semctl:
6736 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6737 break;
6738 #endif
6739 #ifdef TARGET_NR_msgctl
6740 case TARGET_NR_msgctl:
6741 ret = do_msgctl(arg1, arg2, arg3);
6742 break;
6743 #endif
6744 #ifdef TARGET_NR_msgget
6745 case TARGET_NR_msgget:
6746 ret = get_errno(msgget(arg1, arg2));
6747 break;
6748 #endif
6749 #ifdef TARGET_NR_msgrcv
6750 case TARGET_NR_msgrcv:
6751 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6752 break;
6753 #endif
6754 #ifdef TARGET_NR_msgsnd
6755 case TARGET_NR_msgsnd:
6756 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6757 break;
6758 #endif
6759 #ifdef TARGET_NR_shmget
6760 case TARGET_NR_shmget:
6761 ret = get_errno(shmget(arg1, arg2, arg3));
6762 break;
6763 #endif
6764 #ifdef TARGET_NR_shmctl
6765 case TARGET_NR_shmctl:
6766 ret = do_shmctl(arg1, arg2, arg3);
6767 break;
6768 #endif
6769 #ifdef TARGET_NR_shmat
6770 case TARGET_NR_shmat:
6771 ret = do_shmat(arg1, arg2, arg3);
6772 break;
6773 #endif
6774 #ifdef TARGET_NR_shmdt
6775 case TARGET_NR_shmdt:
6776 ret = do_shmdt(arg1);
6777 break;
6778 #endif
6779 case TARGET_NR_fsync:
6780 ret = get_errno(fsync(arg1));
6781 break;
6782 case TARGET_NR_clone:
6783 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6784 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6785 #elif defined(TARGET_CRIS)
6786 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6787 #elif defined(TARGET_S390X)
6788 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6789 #else
6790 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6791 #endif
6792 break;
6793 #ifdef __NR_exit_group
6794 /* new thread calls */
6795 case TARGET_NR_exit_group:
6796 #ifdef TARGET_GPROF
6797 _mcleanup();
6798 #endif
6799 gdb_exit(cpu_env, arg1);
6800 ret = get_errno(exit_group(arg1));
6801 break;
6802 #endif
6803 case TARGET_NR_setdomainname:
6804 if (!(p = lock_user_string(arg1)))
6805 goto efault;
6806 ret = get_errno(setdomainname(p, arg2));
6807 unlock_user(p, arg1, 0);
6808 break;
6809 case TARGET_NR_uname:
6810 /* no need to transcode because we use the linux syscall */
6811 {
6812 struct new_utsname * buf;
6813
6814 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6815 goto efault;
6816 ret = get_errno(sys_uname(buf));
6817 if (!is_error(ret)) {
6818 /* Overrite the native machine name with whatever is being
6819 emulated. */
6820 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6821 /* Allow the user to override the reported release. */
6822 if (qemu_uname_release && *qemu_uname_release)
6823 strcpy (buf->release, qemu_uname_release);
6824 }
6825 unlock_user_struct(buf, arg1, 1);
6826 }
6827 break;
6828 #ifdef TARGET_I386
6829 case TARGET_NR_modify_ldt:
6830 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6831 break;
6832 #if !defined(TARGET_X86_64)
6833 case TARGET_NR_vm86old:
6834 goto unimplemented;
6835 case TARGET_NR_vm86:
6836 ret = do_vm86(cpu_env, arg1, arg2);
6837 break;
6838 #endif
6839 #endif
6840 case TARGET_NR_adjtimex:
6841 goto unimplemented;
6842 #ifdef TARGET_NR_create_module
6843 case TARGET_NR_create_module:
6844 #endif
6845 case TARGET_NR_init_module:
6846 case TARGET_NR_delete_module:
6847 #ifdef TARGET_NR_get_kernel_syms
6848 case TARGET_NR_get_kernel_syms:
6849 #endif
6850 goto unimplemented;
6851 case TARGET_NR_quotactl:
6852 goto unimplemented;
6853 case TARGET_NR_getpgid:
6854 ret = get_errno(getpgid(arg1));
6855 break;
6856 case TARGET_NR_fchdir:
6857 ret = get_errno(fchdir(arg1));
6858 break;
6859 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6860 case TARGET_NR_bdflush:
6861 goto unimplemented;
6862 #endif
6863 #ifdef TARGET_NR_sysfs
6864 case TARGET_NR_sysfs:
6865 goto unimplemented;
6866 #endif
6867 case TARGET_NR_personality:
6868 ret = get_errno(personality(arg1));
6869 break;
6870 #ifdef TARGET_NR_afs_syscall
6871 case TARGET_NR_afs_syscall:
6872 goto unimplemented;
6873 #endif
6874 #ifdef TARGET_NR__llseek /* Not on alpha */
6875 case TARGET_NR__llseek:
6876 {
6877 int64_t res;
6878 #if !defined(__NR_llseek)
6879 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6880 if (res == -1) {
6881 ret = get_errno(res);
6882 } else {
6883 ret = 0;
6884 }
6885 #else
6886 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6887 #endif
6888 if ((ret == 0) && put_user_s64(res, arg4)) {
6889 goto efault;
6890 }
6891 }
6892 break;
6893 #endif
6894 case TARGET_NR_getdents:
6895 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6896 {
6897 struct target_dirent *target_dirp;
6898 struct linux_dirent *dirp;
6899 abi_long count = arg3;
6900
6901 dirp = malloc(count);
6902 if (!dirp) {
6903 ret = -TARGET_ENOMEM;
6904 goto fail;
6905 }
6906
6907 ret = get_errno(sys_getdents(arg1, dirp, count));
6908 if (!is_error(ret)) {
6909 struct linux_dirent *de;
6910 struct target_dirent *tde;
6911 int len = ret;
6912 int reclen, treclen;
6913 int count1, tnamelen;
6914
6915 count1 = 0;
6916 de = dirp;
6917 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6918 goto efault;
6919 tde = target_dirp;
6920 while (len > 0) {
6921 reclen = de->d_reclen;
6922 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6923 tde->d_reclen = tswap16(treclen);
6924 tde->d_ino = tswapal(de->d_ino);
6925 tde->d_off = tswapal(de->d_off);
6926 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6927 if (tnamelen > 256)
6928 tnamelen = 256;
6929 /* XXX: may not be correct */
6930 pstrcpy(tde->d_name, tnamelen, de->d_name);
6931 de = (struct linux_dirent *)((char *)de + reclen);
6932 len -= reclen;
6933 tde = (struct target_dirent *)((char *)tde + treclen);
6934 count1 += treclen;
6935 }
6936 ret = count1;
6937 unlock_user(target_dirp, arg2, ret);
6938 }
6939 free(dirp);
6940 }
6941 #else
6942 {
6943 struct linux_dirent *dirp;
6944 abi_long count = arg3;
6945
6946 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6947 goto efault;
6948 ret = get_errno(sys_getdents(arg1, dirp, count));
6949 if (!is_error(ret)) {
6950 struct linux_dirent *de;
6951 int len = ret;
6952 int reclen;
6953 de = dirp;
6954 while (len > 0) {
6955 reclen = de->d_reclen;
6956 if (reclen > len)
6957 break;
6958 de->d_reclen = tswap16(reclen);
6959 tswapls(&de->d_ino);
6960 tswapls(&de->d_off);
6961 de = (struct linux_dirent *)((char *)de + reclen);
6962 len -= reclen;
6963 }
6964 }
6965 unlock_user(dirp, arg2, ret);
6966 }
6967 #endif
6968 break;
6969 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6970 case TARGET_NR_getdents64:
6971 {
6972 struct linux_dirent64 *dirp;
6973 abi_long count = arg3;
6974 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6975 goto efault;
6976 ret = get_errno(sys_getdents64(arg1, dirp, count));
6977 if (!is_error(ret)) {
6978 struct linux_dirent64 *de;
6979 int len = ret;
6980 int reclen;
6981 de = dirp;
6982 while (len > 0) {
6983 reclen = de->d_reclen;
6984 if (reclen > len)
6985 break;
6986 de->d_reclen = tswap16(reclen);
6987 tswap64s((uint64_t *)&de->d_ino);
6988 tswap64s((uint64_t *)&de->d_off);
6989 de = (struct linux_dirent64 *)((char *)de + reclen);
6990 len -= reclen;
6991 }
6992 }
6993 unlock_user(dirp, arg2, ret);
6994 }
6995 break;
6996 #endif /* TARGET_NR_getdents64 */
6997 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6998 #ifdef TARGET_S390X
6999 case TARGET_NR_select:
7000 #else
7001 case TARGET_NR__newselect:
7002 #endif
7003 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7004 break;
7005 #endif
7006 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7007 # ifdef TARGET_NR_poll
7008 case TARGET_NR_poll:
7009 # endif
7010 # ifdef TARGET_NR_ppoll
7011 case TARGET_NR_ppoll:
7012 # endif
7013 {
7014 struct target_pollfd *target_pfd;
7015 unsigned int nfds = arg2;
7016 int timeout = arg3;
7017 struct pollfd *pfd;
7018 unsigned int i;
7019
7020 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7021 if (!target_pfd)
7022 goto efault;
7023
7024 pfd = alloca(sizeof(struct pollfd) * nfds);
7025 for(i = 0; i < nfds; i++) {
7026 pfd[i].fd = tswap32(target_pfd[i].fd);
7027 pfd[i].events = tswap16(target_pfd[i].events);
7028 }
7029
7030 # ifdef TARGET_NR_ppoll
7031 if (num == TARGET_NR_ppoll) {
7032 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7033 target_sigset_t *target_set;
7034 sigset_t _set, *set = &_set;
7035
7036 if (arg3) {
7037 if (target_to_host_timespec(timeout_ts, arg3)) {
7038 unlock_user(target_pfd, arg1, 0);
7039 goto efault;
7040 }
7041 } else {
7042 timeout_ts = NULL;
7043 }
7044
7045 if (arg4) {
7046 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7047 if (!target_set) {
7048 unlock_user(target_pfd, arg1, 0);
7049 goto efault;
7050 }
7051 target_to_host_sigset(set, target_set);
7052 } else {
7053 set = NULL;
7054 }
7055
7056 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7057
7058 if (!is_error(ret) && arg3) {
7059 host_to_target_timespec(arg3, timeout_ts);
7060 }
7061 if (arg4) {
7062 unlock_user(target_set, arg4, 0);
7063 }
7064 } else
7065 # endif
7066 ret = get_errno(poll(pfd, nfds, timeout));
7067
7068 if (!is_error(ret)) {
7069 for(i = 0; i < nfds; i++) {
7070 target_pfd[i].revents = tswap16(pfd[i].revents);
7071 }
7072 }
7073 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7074 }
7075 break;
7076 #endif
7077 case TARGET_NR_flock:
7078 /* NOTE: the flock constant seems to be the same for every
7079 Linux platform */
7080 ret = get_errno(flock(arg1, arg2));
7081 break;
7082 case TARGET_NR_readv:
7083 {
7084 int count = arg3;
7085 struct iovec *vec;
7086
7087 vec = alloca(count * sizeof(struct iovec));
7088 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
7089 goto efault;
7090 ret = get_errno(readv(arg1, vec, count));
7091 unlock_iovec(vec, arg2, count, 1);
7092 }
7093 break;
7094 case TARGET_NR_writev:
7095 {
7096 int count = arg3;
7097 struct iovec *vec;
7098
7099 vec = alloca(count * sizeof(struct iovec));
7100 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7101 goto efault;
7102 ret = get_errno(writev(arg1, vec, count));
7103 unlock_iovec(vec, arg2, count, 0);
7104 }
7105 break;
7106 case TARGET_NR_getsid:
7107 ret = get_errno(getsid(arg1));
7108 break;
7109 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7110 case TARGET_NR_fdatasync:
7111 ret = get_errno(fdatasync(arg1));
7112 break;
7113 #endif
7114 case TARGET_NR__sysctl:
7115 /* We don't implement this, but ENOTDIR is always a safe
7116 return value. */
7117 ret = -TARGET_ENOTDIR;
7118 break;
7119 case TARGET_NR_sched_getaffinity:
7120 {
7121 unsigned int mask_size;
7122 unsigned long *mask;
7123
7124 /*
7125 * sched_getaffinity needs multiples of ulong, so need to take
7126 * care of mismatches between target ulong and host ulong sizes.
7127 */
7128 if (arg2 & (sizeof(abi_ulong) - 1)) {
7129 ret = -TARGET_EINVAL;
7130 break;
7131 }
7132 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7133
7134 mask = alloca(mask_size);
7135 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7136
7137 if (!is_error(ret)) {
7138 if (copy_to_user(arg3, mask, ret)) {
7139 goto efault;
7140 }
7141 }
7142 }
7143 break;
7144 case TARGET_NR_sched_setaffinity:
7145 {
7146 unsigned int mask_size;
7147 unsigned long *mask;
7148
7149 /*
7150 * sched_setaffinity needs multiples of ulong, so need to take
7151 * care of mismatches between target ulong and host ulong sizes.
7152 */
7153 if (arg2 & (sizeof(abi_ulong) - 1)) {
7154 ret = -TARGET_EINVAL;
7155 break;
7156 }
7157 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7158
7159 mask = alloca(mask_size);
7160 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7161 goto efault;
7162 }
7163 memcpy(mask, p, arg2);
7164 unlock_user_struct(p, arg2, 0);
7165
7166 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7167 }
7168 break;
7169 case TARGET_NR_sched_setparam:
7170 {
7171 struct sched_param *target_schp;
7172 struct sched_param schp;
7173
7174 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7175 goto efault;
7176 schp.sched_priority = tswap32(target_schp->sched_priority);
7177 unlock_user_struct(target_schp, arg2, 0);
7178 ret = get_errno(sched_setparam(arg1, &schp));
7179 }
7180 break;
7181 case TARGET_NR_sched_getparam:
7182 {
7183 struct sched_param *target_schp;
7184 struct sched_param schp;
7185 ret = get_errno(sched_getparam(arg1, &schp));
7186 if (!is_error(ret)) {
7187 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7188 goto efault;
7189 target_schp->sched_priority = tswap32(schp.sched_priority);
7190 unlock_user_struct(target_schp, arg2, 1);
7191 }
7192 }
7193 break;
7194 case TARGET_NR_sched_setscheduler:
7195 {
7196 struct sched_param *target_schp;
7197 struct sched_param schp;
7198 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7199 goto efault;
7200 schp.sched_priority = tswap32(target_schp->sched_priority);
7201 unlock_user_struct(target_schp, arg3, 0);
7202 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7203 }
7204 break;
7205 case TARGET_NR_sched_getscheduler:
7206 ret = get_errno(sched_getscheduler(arg1));
7207 break;
7208 case TARGET_NR_sched_yield:
7209 ret = get_errno(sched_yield());
7210 break;
7211 case TARGET_NR_sched_get_priority_max:
7212 ret = get_errno(sched_get_priority_max(arg1));
7213 break;
7214 case TARGET_NR_sched_get_priority_min:
7215 ret = get_errno(sched_get_priority_min(arg1));
7216 break;
7217 case TARGET_NR_sched_rr_get_interval:
7218 {
7219 struct timespec ts;
7220 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7221 if (!is_error(ret)) {
7222 host_to_target_timespec(arg2, &ts);
7223 }
7224 }
7225 break;
7226 case TARGET_NR_nanosleep:
7227 {
7228 struct timespec req, rem;
7229 target_to_host_timespec(&req, arg1);
7230 ret = get_errno(nanosleep(&req, &rem));
7231 if (is_error(ret) && arg2) {
7232 host_to_target_timespec(arg2, &rem);
7233 }
7234 }
7235 break;
7236 #ifdef TARGET_NR_query_module
7237 case TARGET_NR_query_module:
7238 goto unimplemented;
7239 #endif
7240 #ifdef TARGET_NR_nfsservctl
7241 case TARGET_NR_nfsservctl:
7242 goto unimplemented;
7243 #endif
7244 case TARGET_NR_prctl:
7245 switch (arg1) {
7246 case PR_GET_PDEATHSIG:
7247 {
7248 int deathsig;
7249 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7250 if (!is_error(ret) && arg2
7251 && put_user_ual(deathsig, arg2)) {
7252 goto efault;
7253 }
7254 break;
7255 }
7256 #ifdef PR_GET_NAME
7257 case PR_GET_NAME:
7258 {
7259 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7260 if (!name) {
7261 goto efault;
7262 }
7263 ret = get_errno(prctl(arg1, (unsigned long)name,
7264 arg3, arg4, arg5));
7265 unlock_user(name, arg2, 16);
7266 break;
7267 }
7268 case PR_SET_NAME:
7269 {
7270 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7271 if (!name) {
7272 goto efault;
7273 }
7274 ret = get_errno(prctl(arg1, (unsigned long)name,
7275 arg3, arg4, arg5));
7276 unlock_user(name, arg2, 0);
7277 break;
7278 }
7279 #endif
7280 default:
7281 /* Most prctl options have no pointer arguments */
7282 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7283 break;
7284 }
7285 break;
7286 #ifdef TARGET_NR_arch_prctl
7287 case TARGET_NR_arch_prctl:
7288 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7289 ret = do_arch_prctl(cpu_env, arg1, arg2);
7290 break;
7291 #else
7292 goto unimplemented;
7293 #endif
7294 #endif
7295 #ifdef TARGET_NR_pread
7296 case TARGET_NR_pread:
7297 if (regpairs_aligned(cpu_env))
7298 arg4 = arg5;
7299 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7300 goto efault;
7301 ret = get_errno(pread(arg1, p, arg3, arg4));
7302 unlock_user(p, arg2, ret);
7303 break;
7304 case TARGET_NR_pwrite:
7305 if (regpairs_aligned(cpu_env))
7306 arg4 = arg5;
7307 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7308 goto efault;
7309 ret = get_errno(pwrite(arg1, p, arg3, arg4));
7310 unlock_user(p, arg2, 0);
7311 break;
7312 #endif
7313 #ifdef TARGET_NR_pread64
7314 case TARGET_NR_pread64:
7315 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7316 goto efault;
7317 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7318 unlock_user(p, arg2, ret);
7319 break;
7320 case TARGET_NR_pwrite64:
7321 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7322 goto efault;
7323 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7324 unlock_user(p, arg2, 0);
7325 break;
7326 #endif
7327 case TARGET_NR_getcwd:
7328 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7329 goto efault;
7330 ret = get_errno(sys_getcwd1(p, arg2));
7331 unlock_user(p, arg1, ret);
7332 break;
7333 case TARGET_NR_capget:
7334 goto unimplemented;
7335 case TARGET_NR_capset:
7336 goto unimplemented;
7337 case TARGET_NR_sigaltstack:
7338 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7339 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7340 defined(TARGET_M68K) || defined(TARGET_S390X)
7341 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7342 break;
7343 #else
7344 goto unimplemented;
7345 #endif
7346 case TARGET_NR_sendfile:
7347 goto unimplemented;
7348 #ifdef TARGET_NR_getpmsg
7349 case TARGET_NR_getpmsg:
7350 goto unimplemented;
7351 #endif
7352 #ifdef TARGET_NR_putpmsg
7353 case TARGET_NR_putpmsg:
7354 goto unimplemented;
7355 #endif
7356 #ifdef TARGET_NR_vfork
7357 case TARGET_NR_vfork:
7358 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7359 0, 0, 0, 0));
7360 break;
7361 #endif
7362 #ifdef TARGET_NR_ugetrlimit
7363 case TARGET_NR_ugetrlimit:
7364 {
7365 struct rlimit rlim;
7366 int resource = target_to_host_resource(arg1);
7367 ret = get_errno(getrlimit(resource, &rlim));
7368 if (!is_error(ret)) {
7369 struct target_rlimit *target_rlim;
7370 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7371 goto efault;
7372 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7373 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7374 unlock_user_struct(target_rlim, arg2, 1);
7375 }
7376 break;
7377 }
7378 #endif
7379 #ifdef TARGET_NR_truncate64
7380 case TARGET_NR_truncate64:
7381 if (!(p = lock_user_string(arg1)))
7382 goto efault;
7383 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7384 unlock_user(p, arg1, 0);
7385 break;
7386 #endif
7387 #ifdef TARGET_NR_ftruncate64
7388 case TARGET_NR_ftruncate64:
7389 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7390 break;
7391 #endif
7392 #ifdef TARGET_NR_stat64
7393 case TARGET_NR_stat64:
7394 if (!(p = lock_user_string(arg1)))
7395 goto efault;
7396 ret = get_errno(stat(path(p), &st));
7397 unlock_user(p, arg1, 0);
7398 if (!is_error(ret))
7399 ret = host_to_target_stat64(cpu_env, arg2, &st);
7400 break;
7401 #endif
7402 #ifdef TARGET_NR_lstat64
7403 case TARGET_NR_lstat64:
7404 if (!(p = lock_user_string(arg1)))
7405 goto efault;
7406 ret = get_errno(lstat(path(p), &st));
7407 unlock_user(p, arg1, 0);
7408 if (!is_error(ret))
7409 ret = host_to_target_stat64(cpu_env, arg2, &st);
7410 break;
7411 #endif
7412 #ifdef TARGET_NR_fstat64
7413 case TARGET_NR_fstat64:
7414 ret = get_errno(fstat(arg1, &st));
7415 if (!is_error(ret))
7416 ret = host_to_target_stat64(cpu_env, arg2, &st);
7417 break;
7418 #endif
7419 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7420 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7421 #ifdef TARGET_NR_fstatat64
7422 case TARGET_NR_fstatat64:
7423 #endif
7424 #ifdef TARGET_NR_newfstatat
7425 case TARGET_NR_newfstatat:
7426 #endif
7427 if (!(p = lock_user_string(arg2)))
7428 goto efault;
7429 #ifdef __NR_fstatat64
7430 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7431 #else
7432 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7433 #endif
7434 if (!is_error(ret))
7435 ret = host_to_target_stat64(cpu_env, arg3, &st);
7436 break;
7437 #endif
7438 case TARGET_NR_lchown:
7439 if (!(p = lock_user_string(arg1)))
7440 goto efault;
7441 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7442 unlock_user(p, arg1, 0);
7443 break;
7444 #ifdef TARGET_NR_getuid
7445 case TARGET_NR_getuid:
7446 ret = get_errno(high2lowuid(getuid()));
7447 break;
7448 #endif
7449 #ifdef TARGET_NR_getgid
7450 case TARGET_NR_getgid:
7451 ret = get_errno(high2lowgid(getgid()));
7452 break;
7453 #endif
7454 #ifdef TARGET_NR_geteuid
7455 case TARGET_NR_geteuid:
7456 ret = get_errno(high2lowuid(geteuid()));
7457 break;
7458 #endif
7459 #ifdef TARGET_NR_getegid
7460 case TARGET_NR_getegid:
7461 ret = get_errno(high2lowgid(getegid()));
7462 break;
7463 #endif
7464 case TARGET_NR_setreuid:
7465 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7466 break;
7467 case TARGET_NR_setregid:
7468 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7469 break;
7470 case TARGET_NR_getgroups:
7471 {
7472 int gidsetsize = arg1;
7473 target_id *target_grouplist;
7474 gid_t *grouplist;
7475 int i;
7476
7477 grouplist = alloca(gidsetsize * sizeof(gid_t));
7478 ret = get_errno(getgroups(gidsetsize, grouplist));
7479 if (gidsetsize == 0)
7480 break;
7481 if (!is_error(ret)) {
7482 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7483 if (!target_grouplist)
7484 goto efault;
7485 for(i = 0;i < ret; i++)
7486 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7487 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7488 }
7489 }
7490 break;
7491 case TARGET_NR_setgroups:
7492 {
7493 int gidsetsize = arg1;
7494 target_id *target_grouplist;
7495 gid_t *grouplist;
7496 int i;
7497
7498 grouplist = alloca(gidsetsize * sizeof(gid_t));
7499 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7500 if (!target_grouplist) {
7501 ret = -TARGET_EFAULT;
7502 goto fail;
7503 }
7504 for(i = 0;i < gidsetsize; i++)
7505 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7506 unlock_user(target_grouplist, arg2, 0);
7507 ret = get_errno(setgroups(gidsetsize, grouplist));
7508 }
7509 break;
7510 case TARGET_NR_fchown:
7511 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7512 break;
7513 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7514 case TARGET_NR_fchownat:
7515 if (!(p = lock_user_string(arg2)))
7516 goto efault;
7517 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7518 unlock_user(p, arg2, 0);
7519 break;
7520 #endif
7521 #ifdef TARGET_NR_setresuid
7522 case TARGET_NR_setresuid:
7523 ret = get_errno(setresuid(low2highuid(arg1),
7524 low2highuid(arg2),
7525 low2highuid(arg3)));
7526 break;
7527 #endif
7528 #ifdef TARGET_NR_getresuid
7529 case TARGET_NR_getresuid:
7530 {
7531 uid_t ruid, euid, suid;
7532 ret = get_errno(getresuid(&ruid, &euid, &suid));
7533 if (!is_error(ret)) {
7534 if (put_user_u16(high2lowuid(ruid), arg1)
7535 || put_user_u16(high2lowuid(euid), arg2)
7536 || put_user_u16(high2lowuid(suid), arg3))
7537 goto efault;
7538 }
7539 }
7540 break;
7541 #endif
7542 #ifdef TARGET_NR_getresgid
7543 case TARGET_NR_setresgid:
7544 ret = get_errno(setresgid(low2highgid(arg1),
7545 low2highgid(arg2),
7546 low2highgid(arg3)));
7547 break;
7548 #endif
7549 #ifdef TARGET_NR_getresgid
7550 case TARGET_NR_getresgid:
7551 {
7552 gid_t rgid, egid, sgid;
7553 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7554 if (!is_error(ret)) {
7555 if (put_user_u16(high2lowgid(rgid), arg1)
7556 || put_user_u16(high2lowgid(egid), arg2)
7557 || put_user_u16(high2lowgid(sgid), arg3))
7558 goto efault;
7559 }
7560 }
7561 break;
7562 #endif
7563 case TARGET_NR_chown:
7564 if (!(p = lock_user_string(arg1)))
7565 goto efault;
7566 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7567 unlock_user(p, arg1, 0);
7568 break;
7569 case TARGET_NR_setuid:
7570 ret = get_errno(setuid(low2highuid(arg1)));
7571 break;
7572 case TARGET_NR_setgid:
7573 ret = get_errno(setgid(low2highgid(arg1)));
7574 break;
7575 case TARGET_NR_setfsuid:
7576 ret = get_errno(setfsuid(arg1));
7577 break;
7578 case TARGET_NR_setfsgid:
7579 ret = get_errno(setfsgid(arg1));
7580 break;
7581
7582 #ifdef TARGET_NR_lchown32
7583 case TARGET_NR_lchown32:
7584 if (!(p = lock_user_string(arg1)))
7585 goto efault;
7586 ret = get_errno(lchown(p, arg2, arg3));
7587 unlock_user(p, arg1, 0);
7588 break;
7589 #endif
7590 #ifdef TARGET_NR_getuid32
7591 case TARGET_NR_getuid32:
7592 ret = get_errno(getuid());
7593 break;
7594 #endif
7595
7596 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7597 /* Alpha specific */
7598 case TARGET_NR_getxuid:
7599 {
7600 uid_t euid;
7601 euid=geteuid();
7602 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7603 }
7604 ret = get_errno(getuid());
7605 break;
7606 #endif
7607 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7608 /* Alpha specific */
7609 case TARGET_NR_getxgid:
7610 {
7611 uid_t egid;
7612 egid=getegid();
7613 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7614 }
7615 ret = get_errno(getgid());
7616 break;
7617 #endif
7618 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7619 /* Alpha specific */
7620 case TARGET_NR_osf_getsysinfo:
7621 ret = -TARGET_EOPNOTSUPP;
7622 switch (arg1) {
7623 case TARGET_GSI_IEEE_FP_CONTROL:
7624 {
7625 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7626
7627 /* Copied from linux ieee_fpcr_to_swcr. */
7628 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7629 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7630 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7631 | SWCR_TRAP_ENABLE_DZE
7632 | SWCR_TRAP_ENABLE_OVF);
7633 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7634 | SWCR_TRAP_ENABLE_INE);
7635 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7636 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7637
7638 if (put_user_u64 (swcr, arg2))
7639 goto efault;
7640 ret = 0;
7641 }
7642 break;
7643
7644 /* case GSI_IEEE_STATE_AT_SIGNAL:
7645 -- Not implemented in linux kernel.
7646 case GSI_UACPROC:
7647 -- Retrieves current unaligned access state; not much used.
7648 case GSI_PROC_TYPE:
7649 -- Retrieves implver information; surely not used.
7650 case GSI_GET_HWRPB:
7651 -- Grabs a copy of the HWRPB; surely not used.
7652 */
7653 }
7654 break;
7655 #endif
7656 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7657 /* Alpha specific */
7658 case TARGET_NR_osf_setsysinfo:
7659 ret = -TARGET_EOPNOTSUPP;
7660 switch (arg1) {
7661 case TARGET_SSI_IEEE_FP_CONTROL:
7662 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7663 {
7664 uint64_t swcr, fpcr, orig_fpcr;
7665
7666 if (get_user_u64 (swcr, arg2))
7667 goto efault;
7668 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
7669 fpcr = orig_fpcr & FPCR_DYN_MASK;
7670
7671 /* Copied from linux ieee_swcr_to_fpcr. */
7672 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7673 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7674 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7675 | SWCR_TRAP_ENABLE_DZE
7676 | SWCR_TRAP_ENABLE_OVF)) << 48;
7677 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7678 | SWCR_TRAP_ENABLE_INE)) << 57;
7679 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7680 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7681
7682 cpu_alpha_store_fpcr (cpu_env, fpcr);
7683 ret = 0;
7684
7685 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
7686 /* Old exceptions are not signaled. */
7687 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7688
7689 /* If any exceptions set by this call, and are unmasked,
7690 send a signal. */
7691 /* ??? FIXME */
7692 }
7693 }
7694 break;
7695
7696 /* case SSI_NVPAIRS:
7697 -- Used with SSIN_UACPROC to enable unaligned accesses.
7698 case SSI_IEEE_STATE_AT_SIGNAL:
7699 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7700 -- Not implemented in linux kernel
7701 */
7702 }
7703 break;
7704 #endif
7705 #ifdef TARGET_NR_osf_sigprocmask
7706 /* Alpha specific. */
7707 case TARGET_NR_osf_sigprocmask:
7708 {
7709 abi_ulong mask;
7710 int how;
7711 sigset_t set, oldset;
7712
7713 switch(arg1) {
7714 case TARGET_SIG_BLOCK:
7715 how = SIG_BLOCK;
7716 break;
7717 case TARGET_SIG_UNBLOCK:
7718 how = SIG_UNBLOCK;
7719 break;
7720 case TARGET_SIG_SETMASK:
7721 how = SIG_SETMASK;
7722 break;
7723 default:
7724 ret = -TARGET_EINVAL;
7725 goto fail;
7726 }
7727 mask = arg2;
7728 target_to_host_old_sigset(&set, &mask);
7729 sigprocmask(how, &set, &oldset);
7730 host_to_target_old_sigset(&mask, &oldset);
7731 ret = mask;
7732 }
7733 break;
7734 #endif
7735
7736 #ifdef TARGET_NR_getgid32
7737 case TARGET_NR_getgid32:
7738 ret = get_errno(getgid());
7739 break;
7740 #endif
7741 #ifdef TARGET_NR_geteuid32
7742 case TARGET_NR_geteuid32:
7743 ret = get_errno(geteuid());
7744 break;
7745 #endif
7746 #ifdef TARGET_NR_getegid32
7747 case TARGET_NR_getegid32:
7748 ret = get_errno(getegid());
7749 break;
7750 #endif
7751 #ifdef TARGET_NR_setreuid32
7752 case TARGET_NR_setreuid32:
7753 ret = get_errno(setreuid(arg1, arg2));
7754 break;
7755 #endif
7756 #ifdef TARGET_NR_setregid32
7757 case TARGET_NR_setregid32:
7758 ret = get_errno(setregid(arg1, arg2));
7759 break;
7760 #endif
7761 #ifdef TARGET_NR_getgroups32
7762 case TARGET_NR_getgroups32:
7763 {
7764 int gidsetsize = arg1;
7765 uint32_t *target_grouplist;
7766 gid_t *grouplist;
7767 int i;
7768
7769 grouplist = alloca(gidsetsize * sizeof(gid_t));
7770 ret = get_errno(getgroups(gidsetsize, grouplist));
7771 if (gidsetsize == 0)
7772 break;
7773 if (!is_error(ret)) {
7774 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7775 if (!target_grouplist) {
7776 ret = -TARGET_EFAULT;
7777 goto fail;
7778 }
7779 for(i = 0;i < ret; i++)
7780 target_grouplist[i] = tswap32(grouplist[i]);
7781 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7782 }
7783 }
7784 break;
7785 #endif
7786 #ifdef TARGET_NR_setgroups32
7787 case TARGET_NR_setgroups32:
7788 {
7789 int gidsetsize = arg1;
7790 uint32_t *target_grouplist;
7791 gid_t *grouplist;
7792 int i;
7793
7794 grouplist = alloca(gidsetsize * sizeof(gid_t));
7795 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7796 if (!target_grouplist) {
7797 ret = -TARGET_EFAULT;
7798 goto fail;
7799 }
7800 for(i = 0;i < gidsetsize; i++)
7801 grouplist[i] = tswap32(target_grouplist[i]);
7802 unlock_user(target_grouplist, arg2, 0);
7803 ret = get_errno(setgroups(gidsetsize, grouplist));
7804 }
7805 break;
7806 #endif
7807 #ifdef TARGET_NR_fchown32
7808 case TARGET_NR_fchown32:
7809 ret = get_errno(fchown(arg1, arg2, arg3));
7810 break;
7811 #endif
7812 #ifdef TARGET_NR_setresuid32
7813 case TARGET_NR_setresuid32:
7814 ret = get_errno(setresuid(arg1, arg2, arg3));
7815 break;
7816 #endif
7817 #ifdef TARGET_NR_getresuid32
7818 case TARGET_NR_getresuid32:
7819 {
7820 uid_t ruid, euid, suid;
7821 ret = get_errno(getresuid(&ruid, &euid, &suid));
7822 if (!is_error(ret)) {
7823 if (put_user_u32(ruid, arg1)
7824 || put_user_u32(euid, arg2)
7825 || put_user_u32(suid, arg3))
7826 goto efault;
7827 }
7828 }
7829 break;
7830 #endif
7831 #ifdef TARGET_NR_setresgid32
7832 case TARGET_NR_setresgid32:
7833 ret = get_errno(setresgid(arg1, arg2, arg3));
7834 break;
7835 #endif
7836 #ifdef TARGET_NR_getresgid32
7837 case TARGET_NR_getresgid32:
7838 {
7839 gid_t rgid, egid, sgid;
7840 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7841 if (!is_error(ret)) {
7842 if (put_user_u32(rgid, arg1)
7843 || put_user_u32(egid, arg2)
7844 || put_user_u32(sgid, arg3))
7845 goto efault;
7846 }
7847 }
7848 break;
7849 #endif
7850 #ifdef TARGET_NR_chown32
7851 case TARGET_NR_chown32:
7852 if (!(p = lock_user_string(arg1)))
7853 goto efault;
7854 ret = get_errno(chown(p, arg2, arg3));
7855 unlock_user(p, arg1, 0);
7856 break;
7857 #endif
7858 #ifdef TARGET_NR_setuid32
7859 case TARGET_NR_setuid32:
7860 ret = get_errno(setuid(arg1));
7861 break;
7862 #endif
7863 #ifdef TARGET_NR_setgid32
7864 case TARGET_NR_setgid32:
7865 ret = get_errno(setgid(arg1));
7866 break;
7867 #endif
7868 #ifdef TARGET_NR_setfsuid32
7869 case TARGET_NR_setfsuid32:
7870 ret = get_errno(setfsuid(arg1));
7871 break;
7872 #endif
7873 #ifdef TARGET_NR_setfsgid32
7874 case TARGET_NR_setfsgid32:
7875 ret = get_errno(setfsgid(arg1));
7876 break;
7877 #endif
7878
7879 case TARGET_NR_pivot_root:
7880 goto unimplemented;
7881 #ifdef TARGET_NR_mincore
7882 case TARGET_NR_mincore:
7883 {
7884 void *a;
7885 ret = -TARGET_EFAULT;
7886 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7887 goto efault;
7888 if (!(p = lock_user_string(arg3)))
7889 goto mincore_fail;
7890 ret = get_errno(mincore(a, arg2, p));
7891 unlock_user(p, arg3, ret);
7892 mincore_fail:
7893 unlock_user(a, arg1, 0);
7894 }
7895 break;
7896 #endif
7897 #ifdef TARGET_NR_arm_fadvise64_64
7898 case TARGET_NR_arm_fadvise64_64:
7899 {
7900 /*
7901 * arm_fadvise64_64 looks like fadvise64_64 but
7902 * with different argument order
7903 */
7904 abi_long temp;
7905 temp = arg3;
7906 arg3 = arg4;
7907 arg4 = temp;
7908 }
7909 #endif
7910 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7911 #ifdef TARGET_NR_fadvise64_64
7912 case TARGET_NR_fadvise64_64:
7913 #endif
7914 #ifdef TARGET_NR_fadvise64
7915 case TARGET_NR_fadvise64:
7916 #endif
7917 #ifdef TARGET_S390X
7918 switch (arg4) {
7919 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
7920 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
7921 case 6: arg4 = POSIX_FADV_DONTNEED; break;
7922 case 7: arg4 = POSIX_FADV_NOREUSE; break;
7923 default: break;
7924 }
7925 #endif
7926 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
7927 break;
7928 #endif
7929 #ifdef TARGET_NR_madvise
7930 case TARGET_NR_madvise:
7931 /* A straight passthrough may not be safe because qemu sometimes
7932 turns private flie-backed mappings into anonymous mappings.
7933 This will break MADV_DONTNEED.
7934 This is a hint, so ignoring and returning success is ok. */
7935 ret = get_errno(0);
7936 break;
7937 #endif
7938 #if TARGET_ABI_BITS == 32
7939 case TARGET_NR_fcntl64:
7940 {
7941 int cmd;
7942 struct flock64 fl;
7943 struct target_flock64 *target_fl;
7944 #ifdef TARGET_ARM
7945 struct target_eabi_flock64 *target_efl;
7946 #endif
7947
7948 cmd = target_to_host_fcntl_cmd(arg2);
7949 if (cmd == -TARGET_EINVAL) {
7950 ret = cmd;
7951 break;
7952 }
7953
7954 switch(arg2) {
7955 case TARGET_F_GETLK64:
7956 #ifdef TARGET_ARM
7957 if (((CPUARMState *)cpu_env)->eabi) {
7958 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7959 goto efault;
7960 fl.l_type = tswap16(target_efl->l_type);
7961 fl.l_whence = tswap16(target_efl->l_whence);
7962 fl.l_start = tswap64(target_efl->l_start);
7963 fl.l_len = tswap64(target_efl->l_len);
7964 fl.l_pid = tswap32(target_efl->l_pid);
7965 unlock_user_struct(target_efl, arg3, 0);
7966 } else
7967 #endif
7968 {
7969 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7970 goto efault;
7971 fl.l_type = tswap16(target_fl->l_type);
7972 fl.l_whence = tswap16(target_fl->l_whence);
7973 fl.l_start = tswap64(target_fl->l_start);
7974 fl.l_len = tswap64(target_fl->l_len);
7975 fl.l_pid = tswap32(target_fl->l_pid);
7976 unlock_user_struct(target_fl, arg3, 0);
7977 }
7978 ret = get_errno(fcntl(arg1, cmd, &fl));
7979 if (ret == 0) {
7980 #ifdef TARGET_ARM
7981 if (((CPUARMState *)cpu_env)->eabi) {
7982 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
7983 goto efault;
7984 target_efl->l_type = tswap16(fl.l_type);
7985 target_efl->l_whence = tswap16(fl.l_whence);
7986 target_efl->l_start = tswap64(fl.l_start);
7987 target_efl->l_len = tswap64(fl.l_len);
7988 target_efl->l_pid = tswap32(fl.l_pid);
7989 unlock_user_struct(target_efl, arg3, 1);
7990 } else
7991 #endif
7992 {
7993 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
7994 goto efault;
7995 target_fl->l_type = tswap16(fl.l_type);
7996 target_fl->l_whence = tswap16(fl.l_whence);
7997 target_fl->l_start = tswap64(fl.l_start);
7998 target_fl->l_len = tswap64(fl.l_len);
7999 target_fl->l_pid = tswap32(fl.l_pid);
8000 unlock_user_struct(target_fl, arg3, 1);
8001 }
8002 }
8003 break;
8004
8005 case TARGET_F_SETLK64:
8006 case TARGET_F_SETLKW64:
8007 #ifdef TARGET_ARM
8008 if (((CPUARMState *)cpu_env)->eabi) {
8009 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8010 goto efault;
8011 fl.l_type = tswap16(target_efl->l_type);
8012 fl.l_whence = tswap16(target_efl->l_whence);
8013 fl.l_start = tswap64(target_efl->l_start);
8014 fl.l_len = tswap64(target_efl->l_len);
8015 fl.l_pid = tswap32(target_efl->l_pid);
8016 unlock_user_struct(target_efl, arg3, 0);
8017 } else
8018 #endif
8019 {
8020 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8021 goto efault;
8022 fl.l_type = tswap16(target_fl->l_type);
8023 fl.l_whence = tswap16(target_fl->l_whence);
8024 fl.l_start = tswap64(target_fl->l_start);
8025 fl.l_len = tswap64(target_fl->l_len);
8026 fl.l_pid = tswap32(target_fl->l_pid);
8027 unlock_user_struct(target_fl, arg3, 0);
8028 }
8029 ret = get_errno(fcntl(arg1, cmd, &fl));
8030 break;
8031 default:
8032 ret = do_fcntl(arg1, arg2, arg3);
8033 break;
8034 }
8035 break;
8036 }
8037 #endif
8038 #ifdef TARGET_NR_cacheflush
8039 case TARGET_NR_cacheflush:
8040 /* self-modifying code is handled automatically, so nothing needed */
8041 ret = 0;
8042 break;
8043 #endif
8044 #ifdef TARGET_NR_security
8045 case TARGET_NR_security:
8046 goto unimplemented;
8047 #endif
8048 #ifdef TARGET_NR_getpagesize
8049 case TARGET_NR_getpagesize:
8050 ret = TARGET_PAGE_SIZE;
8051 break;
8052 #endif
8053 case TARGET_NR_gettid:
8054 ret = get_errno(gettid());
8055 break;
8056 #ifdef TARGET_NR_readahead
8057 case TARGET_NR_readahead:
8058 #if TARGET_ABI_BITS == 32
8059 if (regpairs_aligned(cpu_env)) {
8060 arg2 = arg3;
8061 arg3 = arg4;
8062 arg4 = arg5;
8063 }
8064 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8065 #else
8066 ret = get_errno(readahead(arg1, arg2, arg3));
8067 #endif
8068 break;
8069 #endif
8070 #ifdef CONFIG_ATTR
8071 #ifdef TARGET_NR_setxattr
8072 case TARGET_NR_listxattr:
8073 case TARGET_NR_llistxattr:
8074 {
8075 void *p, *b = 0;
8076 if (arg2) {
8077 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8078 if (!b) {
8079 ret = -TARGET_EFAULT;
8080 break;
8081 }
8082 }
8083 p = lock_user_string(arg1);
8084 if (p) {
8085 if (num == TARGET_NR_listxattr) {
8086 ret = get_errno(listxattr(p, b, arg3));
8087 } else {
8088 ret = get_errno(llistxattr(p, b, arg3));
8089 }
8090 } else {
8091 ret = -TARGET_EFAULT;
8092 }
8093 unlock_user(p, arg1, 0);
8094 unlock_user(b, arg2, arg3);
8095 break;
8096 }
8097 case TARGET_NR_flistxattr:
8098 {
8099 void *b = 0;
8100 if (arg2) {
8101 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8102 if (!b) {
8103 ret = -TARGET_EFAULT;
8104 break;
8105 }
8106 }
8107 ret = get_errno(flistxattr(arg1, b, arg3));
8108 unlock_user(b, arg2, arg3);
8109 break;
8110 }
8111 case TARGET_NR_setxattr:
8112 case TARGET_NR_lsetxattr:
8113 {
8114 void *p, *n, *v = 0;
8115 if (arg3) {
8116 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8117 if (!v) {
8118 ret = -TARGET_EFAULT;
8119 break;
8120 }
8121 }
8122 p = lock_user_string(arg1);
8123 n = lock_user_string(arg2);
8124 if (p && n) {
8125 if (num == TARGET_NR_setxattr) {
8126 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8127 } else {
8128 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8129 }
8130 } else {
8131 ret = -TARGET_EFAULT;
8132 }
8133 unlock_user(p, arg1, 0);
8134 unlock_user(n, arg2, 0);
8135 unlock_user(v, arg3, 0);
8136 }
8137 break;
8138 case TARGET_NR_fsetxattr:
8139 {
8140 void *n, *v = 0;
8141 if (arg3) {
8142 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8143 if (!v) {
8144 ret = -TARGET_EFAULT;
8145 break;
8146 }
8147 }
8148 n = lock_user_string(arg2);
8149 if (n) {
8150 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8151 } else {
8152 ret = -TARGET_EFAULT;
8153 }
8154 unlock_user(n, arg2, 0);
8155 unlock_user(v, arg3, 0);
8156 }
8157 break;
8158 case TARGET_NR_getxattr:
8159 case TARGET_NR_lgetxattr:
8160 {
8161 void *p, *n, *v = 0;
8162 if (arg3) {
8163 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8164 if (!v) {
8165 ret = -TARGET_EFAULT;
8166 break;
8167 }
8168 }
8169 p = lock_user_string(arg1);
8170 n = lock_user_string(arg2);
8171 if (p && n) {
8172 if (num == TARGET_NR_getxattr) {
8173 ret = get_errno(getxattr(p, n, v, arg4));
8174 } else {
8175 ret = get_errno(lgetxattr(p, n, v, arg4));
8176 }
8177 } else {
8178 ret = -TARGET_EFAULT;
8179 }
8180 unlock_user(p, arg1, 0);
8181 unlock_user(n, arg2, 0);
8182 unlock_user(v, arg3, arg4);
8183 }
8184 break;
8185 case TARGET_NR_fgetxattr:
8186 {
8187 void *n, *v = 0;
8188 if (arg3) {
8189 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8190 if (!v) {
8191 ret = -TARGET_EFAULT;
8192 break;
8193 }
8194 }
8195 n = lock_user_string(arg2);
8196 if (n) {
8197 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8198 } else {
8199 ret = -TARGET_EFAULT;
8200 }
8201 unlock_user(n, arg2, 0);
8202 unlock_user(v, arg3, arg4);
8203 }
8204 break;
8205 case TARGET_NR_removexattr:
8206 case TARGET_NR_lremovexattr:
8207 {
8208 void *p, *n;
8209 p = lock_user_string(arg1);
8210 n = lock_user_string(arg2);
8211 if (p && n) {
8212 if (num == TARGET_NR_removexattr) {
8213 ret = get_errno(removexattr(p, n));
8214 } else {
8215 ret = get_errno(lremovexattr(p, n));
8216 }
8217 } else {
8218 ret = -TARGET_EFAULT;
8219 }
8220 unlock_user(p, arg1, 0);
8221 unlock_user(n, arg2, 0);
8222 }
8223 break;
8224 case TARGET_NR_fremovexattr:
8225 {
8226 void *n;
8227 n = lock_user_string(arg2);
8228 if (n) {
8229 ret = get_errno(fremovexattr(arg1, n));
8230 } else {
8231 ret = -TARGET_EFAULT;
8232 }
8233 unlock_user(n, arg2, 0);
8234 }
8235 break;
8236 #endif
8237 #endif /* CONFIG_ATTR */
8238 #ifdef TARGET_NR_set_thread_area
8239 case TARGET_NR_set_thread_area:
8240 #if defined(TARGET_MIPS)
8241 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8242 ret = 0;
8243 break;
8244 #elif defined(TARGET_CRIS)
8245 if (arg1 & 0xff)
8246 ret = -TARGET_EINVAL;
8247 else {
8248 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8249 ret = 0;
8250 }
8251 break;
8252 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8253 ret = do_set_thread_area(cpu_env, arg1);
8254 break;
8255 #else
8256 goto unimplemented_nowarn;
8257 #endif
8258 #endif
8259 #ifdef TARGET_NR_get_thread_area
8260 case TARGET_NR_get_thread_area:
8261 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8262 ret = do_get_thread_area(cpu_env, arg1);
8263 #else
8264 goto unimplemented_nowarn;
8265 #endif
8266 #endif
8267 #ifdef TARGET_NR_getdomainname
8268 case TARGET_NR_getdomainname:
8269 goto unimplemented_nowarn;
8270 #endif
8271
8272 #ifdef TARGET_NR_clock_gettime
8273 case TARGET_NR_clock_gettime:
8274 {
8275 struct timespec ts;
8276 ret = get_errno(clock_gettime(arg1, &ts));
8277 if (!is_error(ret)) {
8278 host_to_target_timespec(arg2, &ts);
8279 }
8280 break;
8281 }
8282 #endif
8283 #ifdef TARGET_NR_clock_getres
8284 case TARGET_NR_clock_getres:
8285 {
8286 struct timespec ts;
8287 ret = get_errno(clock_getres(arg1, &ts));
8288 if (!is_error(ret)) {
8289 host_to_target_timespec(arg2, &ts);
8290 }
8291 break;
8292 }
8293 #endif
8294 #ifdef TARGET_NR_clock_nanosleep
8295 case TARGET_NR_clock_nanosleep:
8296 {
8297 struct timespec ts;
8298 target_to_host_timespec(&ts, arg3);
8299 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8300 if (arg4)
8301 host_to_target_timespec(arg4, &ts);
8302 break;
8303 }
8304 #endif
8305
8306 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8307 case TARGET_NR_set_tid_address:
8308 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8309 break;
8310 #endif
8311
8312 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8313 case TARGET_NR_tkill:
8314 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8315 break;
8316 #endif
8317
8318 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8319 case TARGET_NR_tgkill:
8320 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8321 target_to_host_signal(arg3)));
8322 break;
8323 #endif
8324
8325 #ifdef TARGET_NR_set_robust_list
8326 case TARGET_NR_set_robust_list:
8327 goto unimplemented_nowarn;
8328 #endif
8329
8330 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8331 case TARGET_NR_utimensat:
8332 {
8333 struct timespec *tsp, ts[2];
8334 if (!arg3) {
8335 tsp = NULL;
8336 } else {
8337 target_to_host_timespec(ts, arg3);
8338 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8339 tsp = ts;
8340 }
8341 if (!arg2)
8342 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8343 else {
8344 if (!(p = lock_user_string(arg2))) {
8345 ret = -TARGET_EFAULT;
8346 goto fail;
8347 }
8348 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8349 unlock_user(p, arg2, 0);
8350 }
8351 }
8352 break;
8353 #endif
8354 #if defined(CONFIG_USE_NPTL)
8355 case TARGET_NR_futex:
8356 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8357 break;
8358 #endif
8359 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8360 case TARGET_NR_inotify_init:
8361 ret = get_errno(sys_inotify_init());
8362 break;
8363 #endif
8364 #ifdef CONFIG_INOTIFY1
8365 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8366 case TARGET_NR_inotify_init1:
8367 ret = get_errno(sys_inotify_init1(arg1));
8368 break;
8369 #endif
8370 #endif
8371 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8372 case TARGET_NR_inotify_add_watch:
8373 p = lock_user_string(arg2);
8374 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8375 unlock_user(p, arg2, 0);
8376 break;
8377 #endif
8378 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8379 case TARGET_NR_inotify_rm_watch:
8380 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8381 break;
8382 #endif
8383
8384 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8385 case TARGET_NR_mq_open:
8386 {
8387 struct mq_attr posix_mq_attr;
8388
8389 p = lock_user_string(arg1 - 1);
8390 if (arg4 != 0)
8391 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8392 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8393 unlock_user (p, arg1, 0);
8394 }
8395 break;
8396
8397 case TARGET_NR_mq_unlink:
8398 p = lock_user_string(arg1 - 1);
8399 ret = get_errno(mq_unlink(p));
8400 unlock_user (p, arg1, 0);
8401 break;
8402
8403 case TARGET_NR_mq_timedsend:
8404 {
8405 struct timespec ts;
8406
8407 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8408 if (arg5 != 0) {
8409 target_to_host_timespec(&ts, arg5);
8410 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8411 host_to_target_timespec(arg5, &ts);
8412 }
8413 else
8414 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8415 unlock_user (p, arg2, arg3);
8416 }
8417 break;
8418
8419 case TARGET_NR_mq_timedreceive:
8420 {
8421 struct timespec ts;
8422 unsigned int prio;
8423
8424 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8425 if (arg5 != 0) {
8426 target_to_host_timespec(&ts, arg5);
8427 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8428 host_to_target_timespec(arg5, &ts);
8429 }
8430 else
8431 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8432 unlock_user (p, arg2, arg3);
8433 if (arg4 != 0)
8434 put_user_u32(prio, arg4);
8435 }
8436 break;
8437
8438 /* Not implemented for now... */
8439 /* case TARGET_NR_mq_notify: */
8440 /* break; */
8441
8442 case TARGET_NR_mq_getsetattr:
8443 {
8444 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8445 ret = 0;
8446 if (arg3 != 0) {
8447 ret = mq_getattr(arg1, &posix_mq_attr_out);
8448 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8449 }
8450 if (arg2 != 0) {
8451 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8452 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8453 }
8454
8455 }
8456 break;
8457 #endif
8458
8459 #ifdef CONFIG_SPLICE
8460 #ifdef TARGET_NR_tee
8461 case TARGET_NR_tee:
8462 {
8463 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8464 }
8465 break;
8466 #endif
8467 #ifdef TARGET_NR_splice
8468 case TARGET_NR_splice:
8469 {
8470 loff_t loff_in, loff_out;
8471 loff_t *ploff_in = NULL, *ploff_out = NULL;
8472 if(arg2) {
8473 get_user_u64(loff_in, arg2);
8474 ploff_in = &loff_in;
8475 }
8476 if(arg4) {
8477 get_user_u64(loff_out, arg2);
8478 ploff_out = &loff_out;
8479 }
8480 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8481 }
8482 break;
8483 #endif
8484 #ifdef TARGET_NR_vmsplice
8485 case TARGET_NR_vmsplice:
8486 {
8487 int count = arg3;
8488 struct iovec *vec;
8489
8490 vec = alloca(count * sizeof(struct iovec));
8491 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
8492 goto efault;
8493 ret = get_errno(vmsplice(arg1, vec, count, arg4));
8494 unlock_iovec(vec, arg2, count, 0);
8495 }
8496 break;
8497 #endif
8498 #endif /* CONFIG_SPLICE */
8499 #ifdef CONFIG_EVENTFD
8500 #if defined(TARGET_NR_eventfd)
8501 case TARGET_NR_eventfd:
8502 ret = get_errno(eventfd(arg1, 0));
8503 break;
8504 #endif
8505 #if defined(TARGET_NR_eventfd2)
8506 case TARGET_NR_eventfd2:
8507 ret = get_errno(eventfd(arg1, arg2));
8508 break;
8509 #endif
8510 #endif /* CONFIG_EVENTFD */
8511 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8512 case TARGET_NR_fallocate:
8513 #if TARGET_ABI_BITS == 32
8514 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8515 target_offset64(arg5, arg6)));
8516 #else
8517 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8518 #endif
8519 break;
8520 #endif
8521 #if defined(CONFIG_SYNC_FILE_RANGE)
8522 #if defined(TARGET_NR_sync_file_range)
8523 case TARGET_NR_sync_file_range:
8524 #if TARGET_ABI_BITS == 32
8525 #if defined(TARGET_MIPS)
8526 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8527 target_offset64(arg5, arg6), arg7));
8528 #else
8529 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8530 target_offset64(arg4, arg5), arg6));
8531 #endif /* !TARGET_MIPS */
8532 #else
8533 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8534 #endif
8535 break;
8536 #endif
8537 #if defined(TARGET_NR_sync_file_range2)
8538 case TARGET_NR_sync_file_range2:
8539 /* This is like sync_file_range but the arguments are reordered */
8540 #if TARGET_ABI_BITS == 32
8541 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8542 target_offset64(arg5, arg6), arg2));
8543 #else
8544 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8545 #endif
8546 break;
8547 #endif
8548 #endif
8549 #if defined(CONFIG_EPOLL)
8550 #if defined(TARGET_NR_epoll_create)
8551 case TARGET_NR_epoll_create:
8552 ret = get_errno(epoll_create(arg1));
8553 break;
8554 #endif
8555 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8556 case TARGET_NR_epoll_create1:
8557 ret = get_errno(epoll_create1(arg1));
8558 break;
8559 #endif
8560 #if defined(TARGET_NR_epoll_ctl)
8561 case TARGET_NR_epoll_ctl:
8562 {
8563 struct epoll_event ep;
8564 struct epoll_event *epp = 0;
8565 if (arg4) {
8566 struct target_epoll_event *target_ep;
8567 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8568 goto efault;
8569 }
8570 ep.events = tswap32(target_ep->events);
8571 /* The epoll_data_t union is just opaque data to the kernel,
8572 * so we transfer all 64 bits across and need not worry what
8573 * actual data type it is.
8574 */
8575 ep.data.u64 = tswap64(target_ep->data.u64);
8576 unlock_user_struct(target_ep, arg4, 0);
8577 epp = &ep;
8578 }
8579 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8580 break;
8581 }
8582 #endif
8583
8584 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8585 #define IMPLEMENT_EPOLL_PWAIT
8586 #endif
8587 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8588 #if defined(TARGET_NR_epoll_wait)
8589 case TARGET_NR_epoll_wait:
8590 #endif
8591 #if defined(IMPLEMENT_EPOLL_PWAIT)
8592 case TARGET_NR_epoll_pwait:
8593 #endif
8594 {
8595 struct target_epoll_event *target_ep;
8596 struct epoll_event *ep;
8597 int epfd = arg1;
8598 int maxevents = arg3;
8599 int timeout = arg4;
8600
8601 target_ep = lock_user(VERIFY_WRITE, arg2,
8602 maxevents * sizeof(struct target_epoll_event), 1);
8603 if (!target_ep) {
8604 goto efault;
8605 }
8606
8607 ep = alloca(maxevents * sizeof(struct epoll_event));
8608
8609 switch (num) {
8610 #if defined(IMPLEMENT_EPOLL_PWAIT)
8611 case TARGET_NR_epoll_pwait:
8612 {
8613 target_sigset_t *target_set;
8614 sigset_t _set, *set = &_set;
8615
8616 if (arg5) {
8617 target_set = lock_user(VERIFY_READ, arg5,
8618 sizeof(target_sigset_t), 1);
8619 if (!target_set) {
8620 unlock_user(target_ep, arg2, 0);
8621 goto efault;
8622 }
8623 target_to_host_sigset(set, target_set);
8624 unlock_user(target_set, arg5, 0);
8625 } else {
8626 set = NULL;
8627 }
8628
8629 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8630 break;
8631 }
8632 #endif
8633 #if defined(TARGET_NR_epoll_wait)
8634 case TARGET_NR_epoll_wait:
8635 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8636 break;
8637 #endif
8638 default:
8639 ret = -TARGET_ENOSYS;
8640 }
8641 if (!is_error(ret)) {
8642 int i;
8643 for (i = 0; i < ret; i++) {
8644 target_ep[i].events = tswap32(ep[i].events);
8645 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8646 }
8647 }
8648 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8649 break;
8650 }
8651 #endif
8652 #endif
8653 #ifdef TARGET_NR_prlimit64
8654 case TARGET_NR_prlimit64:
8655 {
8656 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8657 struct target_rlimit64 *target_rnew, *target_rold;
8658 struct host_rlimit64 rnew, rold, *rnewp = 0;
8659 if (arg3) {
8660 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8661 goto efault;
8662 }
8663 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8664 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8665 unlock_user_struct(target_rnew, arg3, 0);
8666 rnewp = &rnew;
8667 }
8668
8669 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8670 if (!is_error(ret) && arg4) {
8671 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8672 goto efault;
8673 }
8674 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8675 target_rold->rlim_max = tswap64(rold.rlim_max);
8676 unlock_user_struct(target_rold, arg4, 1);
8677 }
8678 break;
8679 }
8680 #endif
8681 default:
8682 unimplemented:
8683 gemu_log("qemu: Unsupported syscall: %d\n", num);
8684 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8685 unimplemented_nowarn:
8686 #endif
8687 ret = -TARGET_ENOSYS;
8688 break;
8689 }
8690 fail:
8691 #ifdef DEBUG
8692 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8693 #endif
8694 if(do_strace)
8695 print_syscall_ret(num, ret);
8696 return ret;
8697 efault:
8698 ret = -TARGET_EFAULT;
8699 goto fail;
8700 }