]> git.proxmox.com Git - qemu.git/blob - linux-user/syscall.c
Merge remote-tracking branch 'mst/tags/for_anthony' into staging
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include "qemu-common.h"
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
73 #ifdef CONFIG_ATTR
74 #include "qemu-xattr.h"
75 #endif
76
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
83
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/utsname.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include "linux_loop.h"
100 #include "cpu-uname.h"
101
102 #include "qemu.h"
103
104 #if defined(CONFIG_USE_NPTL)
105 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
106 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
107 #else
108 /* XXX: Hardcode the above values. */
109 #define CLONE_NPTL_FLAGS2 0
110 #endif
111
112 //#define DEBUG
113
114 //#include <linux/msdos_fs.h>
115 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
116 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
117
118
119 #undef _syscall0
120 #undef _syscall1
121 #undef _syscall2
122 #undef _syscall3
123 #undef _syscall4
124 #undef _syscall5
125 #undef _syscall6
126
127 #define _syscall0(type,name) \
128 static type name (void) \
129 { \
130 return syscall(__NR_##name); \
131 }
132
133 #define _syscall1(type,name,type1,arg1) \
134 static type name (type1 arg1) \
135 { \
136 return syscall(__NR_##name, arg1); \
137 }
138
139 #define _syscall2(type,name,type1,arg1,type2,arg2) \
140 static type name (type1 arg1,type2 arg2) \
141 { \
142 return syscall(__NR_##name, arg1, arg2); \
143 }
144
145 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
146 static type name (type1 arg1,type2 arg2,type3 arg3) \
147 { \
148 return syscall(__NR_##name, arg1, arg2, arg3); \
149 }
150
151 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
152 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
153 { \
154 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
155 }
156
157 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
158 type5,arg5) \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
160 { \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
162 }
163
164
165 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
166 type5,arg5,type6,arg6) \
167 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
168 type6 arg6) \
169 { \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
171 }
172
173
174 #define __NR_sys_uname __NR_uname
175 #define __NR_sys_faccessat __NR_faccessat
176 #define __NR_sys_fchmodat __NR_fchmodat
177 #define __NR_sys_fchownat __NR_fchownat
178 #define __NR_sys_fstatat64 __NR_fstatat64
179 #define __NR_sys_futimesat __NR_futimesat
180 #define __NR_sys_getcwd1 __NR_getcwd
181 #define __NR_sys_getdents __NR_getdents
182 #define __NR_sys_getdents64 __NR_getdents64
183 #define __NR_sys_getpriority __NR_getpriority
184 #define __NR_sys_linkat __NR_linkat
185 #define __NR_sys_mkdirat __NR_mkdirat
186 #define __NR_sys_mknodat __NR_mknodat
187 #define __NR_sys_newfstatat __NR_newfstatat
188 #define __NR_sys_openat __NR_openat
189 #define __NR_sys_readlinkat __NR_readlinkat
190 #define __NR_sys_renameat __NR_renameat
191 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
192 #define __NR_sys_symlinkat __NR_symlinkat
193 #define __NR_sys_syslog __NR_syslog
194 #define __NR_sys_tgkill __NR_tgkill
195 #define __NR_sys_tkill __NR_tkill
196 #define __NR_sys_unlinkat __NR_unlinkat
197 #define __NR_sys_utimensat __NR_utimensat
198 #define __NR_sys_futex __NR_futex
199 #define __NR_sys_inotify_init __NR_inotify_init
200 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
201 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
202
203 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
204 defined(__s390x__)
205 #define __NR__llseek __NR_lseek
206 #endif
207
208 #ifdef __NR_gettid
209 _syscall0(int, gettid)
210 #else
211 /* This is a replacement for the host gettid() and must return a host
212 errno. */
213 static int gettid(void) {
214 return -ENOSYS;
215 }
216 #endif
217 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
218 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
219 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
220 #endif
221 _syscall2(int, sys_getpriority, int, which, int, who);
222 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
223 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
224 loff_t *, res, uint, wh);
225 #endif
226 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
227 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
228 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
229 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
230 #endif
231 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
232 _syscall2(int,sys_tkill,int,tid,int,sig)
233 #endif
234 #ifdef __NR_exit_group
235 _syscall1(int,exit_group,int,error_code)
236 #endif
237 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
238 _syscall1(int,set_tid_address,int *,tidptr)
239 #endif
240 #if defined(CONFIG_USE_NPTL)
241 #if defined(TARGET_NR_futex) && defined(__NR_futex)
242 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
243 const struct timespec *,timeout,int *,uaddr2,int,val3)
244 #endif
245 #endif
246 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
247 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
249 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
250 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
251 unsigned long *, user_mask_ptr);
252 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
253 void *, arg);
254
255 static bitmask_transtbl fcntl_flags_tbl[] = {
256 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
257 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
258 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
259 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
260 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
261 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
262 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
263 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
264 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
265 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
266 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
267 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
268 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
269 #if defined(O_DIRECT)
270 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
271 #endif
272 { 0, 0, 0, 0 }
273 };
274
275 #define COPY_UTSNAME_FIELD(dest, src) \
276 do { \
277 /* __NEW_UTS_LEN doesn't include terminating null */ \
278 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
279 (dest)[__NEW_UTS_LEN] = '\0'; \
280 } while (0)
281
282 static int sys_uname(struct new_utsname *buf)
283 {
284 struct utsname uts_buf;
285
286 if (uname(&uts_buf) < 0)
287 return (-1);
288
289 /*
290 * Just in case these have some differences, we
291 * translate utsname to new_utsname (which is the
292 * struct linux kernel uses).
293 */
294
295 memset(buf, 0, sizeof(*buf));
296 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
297 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
298 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
299 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
300 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
301 #ifdef _GNU_SOURCE
302 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
303 #endif
304 return (0);
305
306 #undef COPY_UTSNAME_FIELD
307 }
308
309 static int sys_getcwd1(char *buf, size_t size)
310 {
311 if (getcwd(buf, size) == NULL) {
312 /* getcwd() sets errno */
313 return (-1);
314 }
315 return strlen(buf)+1;
316 }
317
318 #ifdef CONFIG_ATFILE
319 /*
320 * Host system seems to have atfile syscall stubs available. We
321 * now enable them one by one as specified by target syscall_nr.h.
322 */
323
324 #ifdef TARGET_NR_faccessat
325 static int sys_faccessat(int dirfd, const char *pathname, int mode)
326 {
327 return (faccessat(dirfd, pathname, mode, 0));
328 }
329 #endif
330 #ifdef TARGET_NR_fchmodat
331 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
332 {
333 return (fchmodat(dirfd, pathname, mode, 0));
334 }
335 #endif
336 #if defined(TARGET_NR_fchownat)
337 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
338 gid_t group, int flags)
339 {
340 return (fchownat(dirfd, pathname, owner, group, flags));
341 }
342 #endif
343 #ifdef __NR_fstatat64
344 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
345 int flags)
346 {
347 return (fstatat(dirfd, pathname, buf, flags));
348 }
349 #endif
350 #ifdef __NR_newfstatat
351 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
352 int flags)
353 {
354 return (fstatat(dirfd, pathname, buf, flags));
355 }
356 #endif
357 #ifdef TARGET_NR_futimesat
358 static int sys_futimesat(int dirfd, const char *pathname,
359 const struct timeval times[2])
360 {
361 return (futimesat(dirfd, pathname, times));
362 }
363 #endif
364 #ifdef TARGET_NR_linkat
365 static int sys_linkat(int olddirfd, const char *oldpath,
366 int newdirfd, const char *newpath, int flags)
367 {
368 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
369 }
370 #endif
371 #ifdef TARGET_NR_mkdirat
372 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
373 {
374 return (mkdirat(dirfd, pathname, mode));
375 }
376 #endif
377 #ifdef TARGET_NR_mknodat
378 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
379 dev_t dev)
380 {
381 return (mknodat(dirfd, pathname, mode, dev));
382 }
383 #endif
384 #ifdef TARGET_NR_openat
385 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
386 {
387 /*
388 * open(2) has extra parameter 'mode' when called with
389 * flag O_CREAT.
390 */
391 if ((flags & O_CREAT) != 0) {
392 return (openat(dirfd, pathname, flags, mode));
393 }
394 return (openat(dirfd, pathname, flags));
395 }
396 #endif
397 #ifdef TARGET_NR_readlinkat
398 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
399 {
400 return (readlinkat(dirfd, pathname, buf, bufsiz));
401 }
402 #endif
403 #ifdef TARGET_NR_renameat
404 static int sys_renameat(int olddirfd, const char *oldpath,
405 int newdirfd, const char *newpath)
406 {
407 return (renameat(olddirfd, oldpath, newdirfd, newpath));
408 }
409 #endif
410 #ifdef TARGET_NR_symlinkat
411 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
412 {
413 return (symlinkat(oldpath, newdirfd, newpath));
414 }
415 #endif
416 #ifdef TARGET_NR_unlinkat
417 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
418 {
419 return (unlinkat(dirfd, pathname, flags));
420 }
421 #endif
422 #else /* !CONFIG_ATFILE */
423
424 /*
425 * Try direct syscalls instead
426 */
427 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
428 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
429 #endif
430 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
431 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
432 #endif
433 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
434 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
435 uid_t,owner,gid_t,group,int,flags)
436 #endif
437 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
438 defined(__NR_fstatat64)
439 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
440 struct stat *,buf,int,flags)
441 #endif
442 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
443 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
444 const struct timeval *,times)
445 #endif
446 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
447 defined(__NR_newfstatat)
448 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
449 struct stat *,buf,int,flags)
450 #endif
451 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
452 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
453 int,newdirfd,const char *,newpath,int,flags)
454 #endif
455 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
456 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
457 #endif
458 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
459 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
460 mode_t,mode,dev_t,dev)
461 #endif
462 #if defined(TARGET_NR_openat) && defined(__NR_openat)
463 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
464 #endif
465 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
466 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
467 char *,buf,size_t,bufsize)
468 #endif
469 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
470 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
471 int,newdirfd,const char *,newpath)
472 #endif
473 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
474 _syscall3(int,sys_symlinkat,const char *,oldpath,
475 int,newdirfd,const char *,newpath)
476 #endif
477 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
478 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
479 #endif
480
481 #endif /* CONFIG_ATFILE */
482
483 #ifdef CONFIG_UTIMENSAT
484 static int sys_utimensat(int dirfd, const char *pathname,
485 const struct timespec times[2], int flags)
486 {
487 if (pathname == NULL)
488 return futimens(dirfd, times);
489 else
490 return utimensat(dirfd, pathname, times, flags);
491 }
492 #else
493 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
494 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
495 const struct timespec *,tsp,int,flags)
496 #endif
497 #endif /* CONFIG_UTIMENSAT */
498
499 #ifdef CONFIG_INOTIFY
500 #include <sys/inotify.h>
501
502 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
503 static int sys_inotify_init(void)
504 {
505 return (inotify_init());
506 }
507 #endif
508 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
509 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
510 {
511 return (inotify_add_watch(fd, pathname, mask));
512 }
513 #endif
514 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
515 static int sys_inotify_rm_watch(int fd, int32_t wd)
516 {
517 return (inotify_rm_watch(fd, wd));
518 }
519 #endif
520 #ifdef CONFIG_INOTIFY1
521 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
522 static int sys_inotify_init1(int flags)
523 {
524 return (inotify_init1(flags));
525 }
526 #endif
527 #endif
528 #else
529 /* Userspace can usually survive runtime without inotify */
530 #undef TARGET_NR_inotify_init
531 #undef TARGET_NR_inotify_init1
532 #undef TARGET_NR_inotify_add_watch
533 #undef TARGET_NR_inotify_rm_watch
534 #endif /* CONFIG_INOTIFY */
535
536 #if defined(TARGET_NR_ppoll)
537 #ifndef __NR_ppoll
538 # define __NR_ppoll -1
539 #endif
540 #define __NR_sys_ppoll __NR_ppoll
541 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
542 struct timespec *, timeout, const __sigset_t *, sigmask,
543 size_t, sigsetsize)
544 #endif
545
546 #if defined(TARGET_NR_pselect6)
547 #ifndef __NR_pselect6
548 # define __NR_pselect6 -1
549 #endif
550 #define __NR_sys_pselect6 __NR_pselect6
551 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
552 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
553 #endif
554
555 #if defined(TARGET_NR_prlimit64)
556 #ifndef __NR_prlimit64
557 # define __NR_prlimit64 -1
558 #endif
559 #define __NR_sys_prlimit64 __NR_prlimit64
560 /* The glibc rlimit structure may not be that used by the underlying syscall */
561 struct host_rlimit64 {
562 uint64_t rlim_cur;
563 uint64_t rlim_max;
564 };
565 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
566 const struct host_rlimit64 *, new_limit,
567 struct host_rlimit64 *, old_limit)
568 #endif
569
570 extern int personality(int);
571 extern int flock(int, int);
572 extern int setfsuid(int);
573 extern int setfsgid(int);
574 extern int setgroups(int, gid_t *);
575
576 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
577 #ifdef TARGET_ARM
578 static inline int regpairs_aligned(void *cpu_env) {
579 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
580 }
581 #elif defined(TARGET_MIPS)
582 static inline int regpairs_aligned(void *cpu_env) { return 1; }
583 #else
584 static inline int regpairs_aligned(void *cpu_env) { return 0; }
585 #endif
586
587 #define ERRNO_TABLE_SIZE 1200
588
589 /* target_to_host_errno_table[] is initialized from
590 * host_to_target_errno_table[] in syscall_init(). */
591 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
592 };
593
594 /*
595 * This list is the union of errno values overridden in asm-<arch>/errno.h
596 * minus the errnos that are not actually generic to all archs.
597 */
598 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
599 [EIDRM] = TARGET_EIDRM,
600 [ECHRNG] = TARGET_ECHRNG,
601 [EL2NSYNC] = TARGET_EL2NSYNC,
602 [EL3HLT] = TARGET_EL3HLT,
603 [EL3RST] = TARGET_EL3RST,
604 [ELNRNG] = TARGET_ELNRNG,
605 [EUNATCH] = TARGET_EUNATCH,
606 [ENOCSI] = TARGET_ENOCSI,
607 [EL2HLT] = TARGET_EL2HLT,
608 [EDEADLK] = TARGET_EDEADLK,
609 [ENOLCK] = TARGET_ENOLCK,
610 [EBADE] = TARGET_EBADE,
611 [EBADR] = TARGET_EBADR,
612 [EXFULL] = TARGET_EXFULL,
613 [ENOANO] = TARGET_ENOANO,
614 [EBADRQC] = TARGET_EBADRQC,
615 [EBADSLT] = TARGET_EBADSLT,
616 [EBFONT] = TARGET_EBFONT,
617 [ENOSTR] = TARGET_ENOSTR,
618 [ENODATA] = TARGET_ENODATA,
619 [ETIME] = TARGET_ETIME,
620 [ENOSR] = TARGET_ENOSR,
621 [ENONET] = TARGET_ENONET,
622 [ENOPKG] = TARGET_ENOPKG,
623 [EREMOTE] = TARGET_EREMOTE,
624 [ENOLINK] = TARGET_ENOLINK,
625 [EADV] = TARGET_EADV,
626 [ESRMNT] = TARGET_ESRMNT,
627 [ECOMM] = TARGET_ECOMM,
628 [EPROTO] = TARGET_EPROTO,
629 [EDOTDOT] = TARGET_EDOTDOT,
630 [EMULTIHOP] = TARGET_EMULTIHOP,
631 [EBADMSG] = TARGET_EBADMSG,
632 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
633 [EOVERFLOW] = TARGET_EOVERFLOW,
634 [ENOTUNIQ] = TARGET_ENOTUNIQ,
635 [EBADFD] = TARGET_EBADFD,
636 [EREMCHG] = TARGET_EREMCHG,
637 [ELIBACC] = TARGET_ELIBACC,
638 [ELIBBAD] = TARGET_ELIBBAD,
639 [ELIBSCN] = TARGET_ELIBSCN,
640 [ELIBMAX] = TARGET_ELIBMAX,
641 [ELIBEXEC] = TARGET_ELIBEXEC,
642 [EILSEQ] = TARGET_EILSEQ,
643 [ENOSYS] = TARGET_ENOSYS,
644 [ELOOP] = TARGET_ELOOP,
645 [ERESTART] = TARGET_ERESTART,
646 [ESTRPIPE] = TARGET_ESTRPIPE,
647 [ENOTEMPTY] = TARGET_ENOTEMPTY,
648 [EUSERS] = TARGET_EUSERS,
649 [ENOTSOCK] = TARGET_ENOTSOCK,
650 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
651 [EMSGSIZE] = TARGET_EMSGSIZE,
652 [EPROTOTYPE] = TARGET_EPROTOTYPE,
653 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
654 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
655 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
656 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
657 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
658 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
659 [EADDRINUSE] = TARGET_EADDRINUSE,
660 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
661 [ENETDOWN] = TARGET_ENETDOWN,
662 [ENETUNREACH] = TARGET_ENETUNREACH,
663 [ENETRESET] = TARGET_ENETRESET,
664 [ECONNABORTED] = TARGET_ECONNABORTED,
665 [ECONNRESET] = TARGET_ECONNRESET,
666 [ENOBUFS] = TARGET_ENOBUFS,
667 [EISCONN] = TARGET_EISCONN,
668 [ENOTCONN] = TARGET_ENOTCONN,
669 [EUCLEAN] = TARGET_EUCLEAN,
670 [ENOTNAM] = TARGET_ENOTNAM,
671 [ENAVAIL] = TARGET_ENAVAIL,
672 [EISNAM] = TARGET_EISNAM,
673 [EREMOTEIO] = TARGET_EREMOTEIO,
674 [ESHUTDOWN] = TARGET_ESHUTDOWN,
675 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
676 [ETIMEDOUT] = TARGET_ETIMEDOUT,
677 [ECONNREFUSED] = TARGET_ECONNREFUSED,
678 [EHOSTDOWN] = TARGET_EHOSTDOWN,
679 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
680 [EALREADY] = TARGET_EALREADY,
681 [EINPROGRESS] = TARGET_EINPROGRESS,
682 [ESTALE] = TARGET_ESTALE,
683 [ECANCELED] = TARGET_ECANCELED,
684 [ENOMEDIUM] = TARGET_ENOMEDIUM,
685 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
686 #ifdef ENOKEY
687 [ENOKEY] = TARGET_ENOKEY,
688 #endif
689 #ifdef EKEYEXPIRED
690 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
691 #endif
692 #ifdef EKEYREVOKED
693 [EKEYREVOKED] = TARGET_EKEYREVOKED,
694 #endif
695 #ifdef EKEYREJECTED
696 [EKEYREJECTED] = TARGET_EKEYREJECTED,
697 #endif
698 #ifdef EOWNERDEAD
699 [EOWNERDEAD] = TARGET_EOWNERDEAD,
700 #endif
701 #ifdef ENOTRECOVERABLE
702 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
703 #endif
704 };
705
706 static inline int host_to_target_errno(int err)
707 {
708 if(host_to_target_errno_table[err])
709 return host_to_target_errno_table[err];
710 return err;
711 }
712
713 static inline int target_to_host_errno(int err)
714 {
715 if (target_to_host_errno_table[err])
716 return target_to_host_errno_table[err];
717 return err;
718 }
719
720 static inline abi_long get_errno(abi_long ret)
721 {
722 if (ret == -1)
723 return -host_to_target_errno(errno);
724 else
725 return ret;
726 }
727
728 static inline int is_error(abi_long ret)
729 {
730 return (abi_ulong)ret >= (abi_ulong)(-4096);
731 }
732
733 char *target_strerror(int err)
734 {
735 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
736 return NULL;
737 }
738 return strerror(target_to_host_errno(err));
739 }
740
741 static abi_ulong target_brk;
742 static abi_ulong target_original_brk;
743 static abi_ulong brk_page;
744
745 void target_set_brk(abi_ulong new_brk)
746 {
747 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
748 brk_page = HOST_PAGE_ALIGN(target_brk);
749 }
750
751 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
752 #define DEBUGF_BRK(message, args...)
753
754 /* do_brk() must return target values and target errnos. */
755 abi_long do_brk(abi_ulong new_brk)
756 {
757 abi_long mapped_addr;
758 int new_alloc_size;
759
760 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
761
762 if (!new_brk) {
763 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
764 return target_brk;
765 }
766 if (new_brk < target_original_brk) {
767 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
768 target_brk);
769 return target_brk;
770 }
771
772 /* If the new brk is less than the highest page reserved to the
773 * target heap allocation, set it and we're almost done... */
774 if (new_brk <= brk_page) {
775 /* Heap contents are initialized to zero, as for anonymous
776 * mapped pages. */
777 if (new_brk > target_brk) {
778 memset(g2h(target_brk), 0, new_brk - target_brk);
779 }
780 target_brk = new_brk;
781 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
782 return target_brk;
783 }
784
785 /* We need to allocate more memory after the brk... Note that
786 * we don't use MAP_FIXED because that will map over the top of
787 * any existing mapping (like the one with the host libc or qemu
788 * itself); instead we treat "mapped but at wrong address" as
789 * a failure and unmap again.
790 */
791 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
792 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
793 PROT_READ|PROT_WRITE,
794 MAP_ANON|MAP_PRIVATE, 0, 0));
795
796 if (mapped_addr == brk_page) {
797 /* Heap contents are initialized to zero, as for anonymous
798 * mapped pages. Technically the new pages are already
799 * initialized to zero since they *are* anonymous mapped
800 * pages, however we have to take care with the contents that
801 * come from the remaining part of the previous page: it may
802 * contains garbage data due to a previous heap usage (grown
803 * then shrunken). */
804 memset(g2h(target_brk), 0, brk_page - target_brk);
805
806 target_brk = new_brk;
807 brk_page = HOST_PAGE_ALIGN(target_brk);
808 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
809 target_brk);
810 return target_brk;
811 } else if (mapped_addr != -1) {
812 /* Mapped but at wrong address, meaning there wasn't actually
813 * enough space for this brk.
814 */
815 target_munmap(mapped_addr, new_alloc_size);
816 mapped_addr = -1;
817 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
818 }
819 else {
820 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
821 }
822
823 #if defined(TARGET_ALPHA)
824 /* We (partially) emulate OSF/1 on Alpha, which requires we
825 return a proper errno, not an unchanged brk value. */
826 return -TARGET_ENOMEM;
827 #endif
828 /* For everything else, return the previous break. */
829 return target_brk;
830 }
831
832 static inline abi_long copy_from_user_fdset(fd_set *fds,
833 abi_ulong target_fds_addr,
834 int n)
835 {
836 int i, nw, j, k;
837 abi_ulong b, *target_fds;
838
839 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
840 if (!(target_fds = lock_user(VERIFY_READ,
841 target_fds_addr,
842 sizeof(abi_ulong) * nw,
843 1)))
844 return -TARGET_EFAULT;
845
846 FD_ZERO(fds);
847 k = 0;
848 for (i = 0; i < nw; i++) {
849 /* grab the abi_ulong */
850 __get_user(b, &target_fds[i]);
851 for (j = 0; j < TARGET_ABI_BITS; j++) {
852 /* check the bit inside the abi_ulong */
853 if ((b >> j) & 1)
854 FD_SET(k, fds);
855 k++;
856 }
857 }
858
859 unlock_user(target_fds, target_fds_addr, 0);
860
861 return 0;
862 }
863
864 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
865 abi_ulong target_fds_addr,
866 int n)
867 {
868 if (target_fds_addr) {
869 if (copy_from_user_fdset(fds, target_fds_addr, n))
870 return -TARGET_EFAULT;
871 *fds_ptr = fds;
872 } else {
873 *fds_ptr = NULL;
874 }
875 return 0;
876 }
877
878 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
879 const fd_set *fds,
880 int n)
881 {
882 int i, nw, j, k;
883 abi_long v;
884 abi_ulong *target_fds;
885
886 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
887 if (!(target_fds = lock_user(VERIFY_WRITE,
888 target_fds_addr,
889 sizeof(abi_ulong) * nw,
890 0)))
891 return -TARGET_EFAULT;
892
893 k = 0;
894 for (i = 0; i < nw; i++) {
895 v = 0;
896 for (j = 0; j < TARGET_ABI_BITS; j++) {
897 v |= ((FD_ISSET(k, fds) != 0) << j);
898 k++;
899 }
900 __put_user(v, &target_fds[i]);
901 }
902
903 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
904
905 return 0;
906 }
907
908 #if defined(__alpha__)
909 #define HOST_HZ 1024
910 #else
911 #define HOST_HZ 100
912 #endif
913
914 static inline abi_long host_to_target_clock_t(long ticks)
915 {
916 #if HOST_HZ == TARGET_HZ
917 return ticks;
918 #else
919 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
920 #endif
921 }
922
923 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
924 const struct rusage *rusage)
925 {
926 struct target_rusage *target_rusage;
927
928 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
929 return -TARGET_EFAULT;
930 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
931 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
932 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
933 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
934 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
935 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
936 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
937 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
938 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
939 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
940 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
941 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
942 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
943 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
944 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
945 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
946 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
947 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
948 unlock_user_struct(target_rusage, target_addr, 1);
949
950 return 0;
951 }
952
953 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
954 {
955 abi_ulong target_rlim_swap;
956 rlim_t result;
957
958 target_rlim_swap = tswapal(target_rlim);
959 if (target_rlim_swap == TARGET_RLIM_INFINITY)
960 return RLIM_INFINITY;
961
962 result = target_rlim_swap;
963 if (target_rlim_swap != (rlim_t)result)
964 return RLIM_INFINITY;
965
966 return result;
967 }
968
969 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
970 {
971 abi_ulong target_rlim_swap;
972 abi_ulong result;
973
974 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
975 target_rlim_swap = TARGET_RLIM_INFINITY;
976 else
977 target_rlim_swap = rlim;
978 result = tswapal(target_rlim_swap);
979
980 return result;
981 }
982
983 static inline int target_to_host_resource(int code)
984 {
985 switch (code) {
986 case TARGET_RLIMIT_AS:
987 return RLIMIT_AS;
988 case TARGET_RLIMIT_CORE:
989 return RLIMIT_CORE;
990 case TARGET_RLIMIT_CPU:
991 return RLIMIT_CPU;
992 case TARGET_RLIMIT_DATA:
993 return RLIMIT_DATA;
994 case TARGET_RLIMIT_FSIZE:
995 return RLIMIT_FSIZE;
996 case TARGET_RLIMIT_LOCKS:
997 return RLIMIT_LOCKS;
998 case TARGET_RLIMIT_MEMLOCK:
999 return RLIMIT_MEMLOCK;
1000 case TARGET_RLIMIT_MSGQUEUE:
1001 return RLIMIT_MSGQUEUE;
1002 case TARGET_RLIMIT_NICE:
1003 return RLIMIT_NICE;
1004 case TARGET_RLIMIT_NOFILE:
1005 return RLIMIT_NOFILE;
1006 case TARGET_RLIMIT_NPROC:
1007 return RLIMIT_NPROC;
1008 case TARGET_RLIMIT_RSS:
1009 return RLIMIT_RSS;
1010 case TARGET_RLIMIT_RTPRIO:
1011 return RLIMIT_RTPRIO;
1012 case TARGET_RLIMIT_SIGPENDING:
1013 return RLIMIT_SIGPENDING;
1014 case TARGET_RLIMIT_STACK:
1015 return RLIMIT_STACK;
1016 default:
1017 return code;
1018 }
1019 }
1020
1021 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1022 abi_ulong target_tv_addr)
1023 {
1024 struct target_timeval *target_tv;
1025
1026 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1027 return -TARGET_EFAULT;
1028
1029 __get_user(tv->tv_sec, &target_tv->tv_sec);
1030 __get_user(tv->tv_usec, &target_tv->tv_usec);
1031
1032 unlock_user_struct(target_tv, target_tv_addr, 0);
1033
1034 return 0;
1035 }
1036
1037 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1038 const struct timeval *tv)
1039 {
1040 struct target_timeval *target_tv;
1041
1042 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1043 return -TARGET_EFAULT;
1044
1045 __put_user(tv->tv_sec, &target_tv->tv_sec);
1046 __put_user(tv->tv_usec, &target_tv->tv_usec);
1047
1048 unlock_user_struct(target_tv, target_tv_addr, 1);
1049
1050 return 0;
1051 }
1052
1053 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1054 #include <mqueue.h>
1055
1056 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1057 abi_ulong target_mq_attr_addr)
1058 {
1059 struct target_mq_attr *target_mq_attr;
1060
1061 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1062 target_mq_attr_addr, 1))
1063 return -TARGET_EFAULT;
1064
1065 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1066 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1067 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1068 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1069
1070 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1071
1072 return 0;
1073 }
1074
1075 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1076 const struct mq_attr *attr)
1077 {
1078 struct target_mq_attr *target_mq_attr;
1079
1080 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1081 target_mq_attr_addr, 0))
1082 return -TARGET_EFAULT;
1083
1084 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1085 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1086 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1087 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1088
1089 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1090
1091 return 0;
1092 }
1093 #endif
1094
1095 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1096 /* do_select() must return target values and target errnos. */
1097 static abi_long do_select(int n,
1098 abi_ulong rfd_addr, abi_ulong wfd_addr,
1099 abi_ulong efd_addr, abi_ulong target_tv_addr)
1100 {
1101 fd_set rfds, wfds, efds;
1102 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1103 struct timeval tv, *tv_ptr;
1104 abi_long ret;
1105
1106 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1107 if (ret) {
1108 return ret;
1109 }
1110 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1111 if (ret) {
1112 return ret;
1113 }
1114 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1115 if (ret) {
1116 return ret;
1117 }
1118
1119 if (target_tv_addr) {
1120 if (copy_from_user_timeval(&tv, target_tv_addr))
1121 return -TARGET_EFAULT;
1122 tv_ptr = &tv;
1123 } else {
1124 tv_ptr = NULL;
1125 }
1126
1127 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1128
1129 if (!is_error(ret)) {
1130 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1131 return -TARGET_EFAULT;
1132 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1133 return -TARGET_EFAULT;
1134 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1135 return -TARGET_EFAULT;
1136
1137 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1138 return -TARGET_EFAULT;
1139 }
1140
1141 return ret;
1142 }
1143 #endif
1144
1145 static abi_long do_pipe2(int host_pipe[], int flags)
1146 {
1147 #ifdef CONFIG_PIPE2
1148 return pipe2(host_pipe, flags);
1149 #else
1150 return -ENOSYS;
1151 #endif
1152 }
1153
1154 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1155 int flags, int is_pipe2)
1156 {
1157 int host_pipe[2];
1158 abi_long ret;
1159 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1160
1161 if (is_error(ret))
1162 return get_errno(ret);
1163
1164 /* Several targets have special calling conventions for the original
1165 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1166 if (!is_pipe2) {
1167 #if defined(TARGET_ALPHA)
1168 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1169 return host_pipe[0];
1170 #elif defined(TARGET_MIPS)
1171 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1172 return host_pipe[0];
1173 #elif defined(TARGET_SH4)
1174 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1175 return host_pipe[0];
1176 #endif
1177 }
1178
1179 if (put_user_s32(host_pipe[0], pipedes)
1180 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1181 return -TARGET_EFAULT;
1182 return get_errno(ret);
1183 }
1184
1185 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1186 abi_ulong target_addr,
1187 socklen_t len)
1188 {
1189 struct target_ip_mreqn *target_smreqn;
1190
1191 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1192 if (!target_smreqn)
1193 return -TARGET_EFAULT;
1194 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1195 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1196 if (len == sizeof(struct target_ip_mreqn))
1197 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1198 unlock_user(target_smreqn, target_addr, 0);
1199
1200 return 0;
1201 }
1202
1203 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1204 abi_ulong target_addr,
1205 socklen_t len)
1206 {
1207 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1208 sa_family_t sa_family;
1209 struct target_sockaddr *target_saddr;
1210
1211 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1212 if (!target_saddr)
1213 return -TARGET_EFAULT;
1214
1215 sa_family = tswap16(target_saddr->sa_family);
1216
1217 /* Oops. The caller might send a incomplete sun_path; sun_path
1218 * must be terminated by \0 (see the manual page), but
1219 * unfortunately it is quite common to specify sockaddr_un
1220 * length as "strlen(x->sun_path)" while it should be
1221 * "strlen(...) + 1". We'll fix that here if needed.
1222 * Linux kernel has a similar feature.
1223 */
1224
1225 if (sa_family == AF_UNIX) {
1226 if (len < unix_maxlen && len > 0) {
1227 char *cp = (char*)target_saddr;
1228
1229 if ( cp[len-1] && !cp[len] )
1230 len++;
1231 }
1232 if (len > unix_maxlen)
1233 len = unix_maxlen;
1234 }
1235
1236 memcpy(addr, target_saddr, len);
1237 addr->sa_family = sa_family;
1238 unlock_user(target_saddr, target_addr, 0);
1239
1240 return 0;
1241 }
1242
1243 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1244 struct sockaddr *addr,
1245 socklen_t len)
1246 {
1247 struct target_sockaddr *target_saddr;
1248
1249 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1250 if (!target_saddr)
1251 return -TARGET_EFAULT;
1252 memcpy(target_saddr, addr, len);
1253 target_saddr->sa_family = tswap16(addr->sa_family);
1254 unlock_user(target_saddr, target_addr, len);
1255
1256 return 0;
1257 }
1258
1259 /* ??? Should this also swap msgh->name? */
1260 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1261 struct target_msghdr *target_msgh)
1262 {
1263 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1264 abi_long msg_controllen;
1265 abi_ulong target_cmsg_addr;
1266 struct target_cmsghdr *target_cmsg;
1267 socklen_t space = 0;
1268
1269 msg_controllen = tswapal(target_msgh->msg_controllen);
1270 if (msg_controllen < sizeof (struct target_cmsghdr))
1271 goto the_end;
1272 target_cmsg_addr = tswapal(target_msgh->msg_control);
1273 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1274 if (!target_cmsg)
1275 return -TARGET_EFAULT;
1276
1277 while (cmsg && target_cmsg) {
1278 void *data = CMSG_DATA(cmsg);
1279 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1280
1281 int len = tswapal(target_cmsg->cmsg_len)
1282 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1283
1284 space += CMSG_SPACE(len);
1285 if (space > msgh->msg_controllen) {
1286 space -= CMSG_SPACE(len);
1287 gemu_log("Host cmsg overflow\n");
1288 break;
1289 }
1290
1291 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1292 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1293 cmsg->cmsg_len = CMSG_LEN(len);
1294
1295 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1296 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1297 memcpy(data, target_data, len);
1298 } else {
1299 int *fd = (int *)data;
1300 int *target_fd = (int *)target_data;
1301 int i, numfds = len / sizeof(int);
1302
1303 for (i = 0; i < numfds; i++)
1304 fd[i] = tswap32(target_fd[i]);
1305 }
1306
1307 cmsg = CMSG_NXTHDR(msgh, cmsg);
1308 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1309 }
1310 unlock_user(target_cmsg, target_cmsg_addr, 0);
1311 the_end:
1312 msgh->msg_controllen = space;
1313 return 0;
1314 }
1315
1316 /* ??? Should this also swap msgh->name? */
1317 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1318 struct msghdr *msgh)
1319 {
1320 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1321 abi_long msg_controllen;
1322 abi_ulong target_cmsg_addr;
1323 struct target_cmsghdr *target_cmsg;
1324 socklen_t space = 0;
1325
1326 msg_controllen = tswapal(target_msgh->msg_controllen);
1327 if (msg_controllen < sizeof (struct target_cmsghdr))
1328 goto the_end;
1329 target_cmsg_addr = tswapal(target_msgh->msg_control);
1330 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1331 if (!target_cmsg)
1332 return -TARGET_EFAULT;
1333
1334 while (cmsg && target_cmsg) {
1335 void *data = CMSG_DATA(cmsg);
1336 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1337
1338 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1339
1340 space += TARGET_CMSG_SPACE(len);
1341 if (space > msg_controllen) {
1342 space -= TARGET_CMSG_SPACE(len);
1343 gemu_log("Target cmsg overflow\n");
1344 break;
1345 }
1346
1347 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1348 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1349 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1350
1351 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1352 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1353 memcpy(target_data, data, len);
1354 } else {
1355 int *fd = (int *)data;
1356 int *target_fd = (int *)target_data;
1357 int i, numfds = len / sizeof(int);
1358
1359 for (i = 0; i < numfds; i++)
1360 target_fd[i] = tswap32(fd[i]);
1361 }
1362
1363 cmsg = CMSG_NXTHDR(msgh, cmsg);
1364 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1365 }
1366 unlock_user(target_cmsg, target_cmsg_addr, space);
1367 the_end:
1368 target_msgh->msg_controllen = tswapal(space);
1369 return 0;
1370 }
1371
1372 /* do_setsockopt() Must return target values and target errnos. */
1373 static abi_long do_setsockopt(int sockfd, int level, int optname,
1374 abi_ulong optval_addr, socklen_t optlen)
1375 {
1376 abi_long ret;
1377 int val;
1378 struct ip_mreqn *ip_mreq;
1379 struct ip_mreq_source *ip_mreq_source;
1380
1381 switch(level) {
1382 case SOL_TCP:
1383 /* TCP options all take an 'int' value. */
1384 if (optlen < sizeof(uint32_t))
1385 return -TARGET_EINVAL;
1386
1387 if (get_user_u32(val, optval_addr))
1388 return -TARGET_EFAULT;
1389 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1390 break;
1391 case SOL_IP:
1392 switch(optname) {
1393 case IP_TOS:
1394 case IP_TTL:
1395 case IP_HDRINCL:
1396 case IP_ROUTER_ALERT:
1397 case IP_RECVOPTS:
1398 case IP_RETOPTS:
1399 case IP_PKTINFO:
1400 case IP_MTU_DISCOVER:
1401 case IP_RECVERR:
1402 case IP_RECVTOS:
1403 #ifdef IP_FREEBIND
1404 case IP_FREEBIND:
1405 #endif
1406 case IP_MULTICAST_TTL:
1407 case IP_MULTICAST_LOOP:
1408 val = 0;
1409 if (optlen >= sizeof(uint32_t)) {
1410 if (get_user_u32(val, optval_addr))
1411 return -TARGET_EFAULT;
1412 } else if (optlen >= 1) {
1413 if (get_user_u8(val, optval_addr))
1414 return -TARGET_EFAULT;
1415 }
1416 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1417 break;
1418 case IP_ADD_MEMBERSHIP:
1419 case IP_DROP_MEMBERSHIP:
1420 if (optlen < sizeof (struct target_ip_mreq) ||
1421 optlen > sizeof (struct target_ip_mreqn))
1422 return -TARGET_EINVAL;
1423
1424 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1425 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1426 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1427 break;
1428
1429 case IP_BLOCK_SOURCE:
1430 case IP_UNBLOCK_SOURCE:
1431 case IP_ADD_SOURCE_MEMBERSHIP:
1432 case IP_DROP_SOURCE_MEMBERSHIP:
1433 if (optlen != sizeof (struct target_ip_mreq_source))
1434 return -TARGET_EINVAL;
1435
1436 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1437 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1438 unlock_user (ip_mreq_source, optval_addr, 0);
1439 break;
1440
1441 default:
1442 goto unimplemented;
1443 }
1444 break;
1445 case TARGET_SOL_SOCKET:
1446 switch (optname) {
1447 /* Options with 'int' argument. */
1448 case TARGET_SO_DEBUG:
1449 optname = SO_DEBUG;
1450 break;
1451 case TARGET_SO_REUSEADDR:
1452 optname = SO_REUSEADDR;
1453 break;
1454 case TARGET_SO_TYPE:
1455 optname = SO_TYPE;
1456 break;
1457 case TARGET_SO_ERROR:
1458 optname = SO_ERROR;
1459 break;
1460 case TARGET_SO_DONTROUTE:
1461 optname = SO_DONTROUTE;
1462 break;
1463 case TARGET_SO_BROADCAST:
1464 optname = SO_BROADCAST;
1465 break;
1466 case TARGET_SO_SNDBUF:
1467 optname = SO_SNDBUF;
1468 break;
1469 case TARGET_SO_RCVBUF:
1470 optname = SO_RCVBUF;
1471 break;
1472 case TARGET_SO_KEEPALIVE:
1473 optname = SO_KEEPALIVE;
1474 break;
1475 case TARGET_SO_OOBINLINE:
1476 optname = SO_OOBINLINE;
1477 break;
1478 case TARGET_SO_NO_CHECK:
1479 optname = SO_NO_CHECK;
1480 break;
1481 case TARGET_SO_PRIORITY:
1482 optname = SO_PRIORITY;
1483 break;
1484 #ifdef SO_BSDCOMPAT
1485 case TARGET_SO_BSDCOMPAT:
1486 optname = SO_BSDCOMPAT;
1487 break;
1488 #endif
1489 case TARGET_SO_PASSCRED:
1490 optname = SO_PASSCRED;
1491 break;
1492 case TARGET_SO_TIMESTAMP:
1493 optname = SO_TIMESTAMP;
1494 break;
1495 case TARGET_SO_RCVLOWAT:
1496 optname = SO_RCVLOWAT;
1497 break;
1498 case TARGET_SO_RCVTIMEO:
1499 optname = SO_RCVTIMEO;
1500 break;
1501 case TARGET_SO_SNDTIMEO:
1502 optname = SO_SNDTIMEO;
1503 break;
1504 break;
1505 default:
1506 goto unimplemented;
1507 }
1508 if (optlen < sizeof(uint32_t))
1509 return -TARGET_EINVAL;
1510
1511 if (get_user_u32(val, optval_addr))
1512 return -TARGET_EFAULT;
1513 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1514 break;
1515 default:
1516 unimplemented:
1517 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1518 ret = -TARGET_ENOPROTOOPT;
1519 }
1520 return ret;
1521 }
1522
1523 /* do_getsockopt() Must return target values and target errnos. */
1524 static abi_long do_getsockopt(int sockfd, int level, int optname,
1525 abi_ulong optval_addr, abi_ulong optlen)
1526 {
1527 abi_long ret;
1528 int len, val;
1529 socklen_t lv;
1530
1531 switch(level) {
1532 case TARGET_SOL_SOCKET:
1533 level = SOL_SOCKET;
1534 switch (optname) {
1535 /* These don't just return a single integer */
1536 case TARGET_SO_LINGER:
1537 case TARGET_SO_RCVTIMEO:
1538 case TARGET_SO_SNDTIMEO:
1539 case TARGET_SO_PEERNAME:
1540 goto unimplemented;
1541 case TARGET_SO_PEERCRED: {
1542 struct ucred cr;
1543 socklen_t crlen;
1544 struct target_ucred *tcr;
1545
1546 if (get_user_u32(len, optlen)) {
1547 return -TARGET_EFAULT;
1548 }
1549 if (len < 0) {
1550 return -TARGET_EINVAL;
1551 }
1552
1553 crlen = sizeof(cr);
1554 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1555 &cr, &crlen));
1556 if (ret < 0) {
1557 return ret;
1558 }
1559 if (len > crlen) {
1560 len = crlen;
1561 }
1562 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1563 return -TARGET_EFAULT;
1564 }
1565 __put_user(cr.pid, &tcr->pid);
1566 __put_user(cr.uid, &tcr->uid);
1567 __put_user(cr.gid, &tcr->gid);
1568 unlock_user_struct(tcr, optval_addr, 1);
1569 if (put_user_u32(len, optlen)) {
1570 return -TARGET_EFAULT;
1571 }
1572 break;
1573 }
1574 /* Options with 'int' argument. */
1575 case TARGET_SO_DEBUG:
1576 optname = SO_DEBUG;
1577 goto int_case;
1578 case TARGET_SO_REUSEADDR:
1579 optname = SO_REUSEADDR;
1580 goto int_case;
1581 case TARGET_SO_TYPE:
1582 optname = SO_TYPE;
1583 goto int_case;
1584 case TARGET_SO_ERROR:
1585 optname = SO_ERROR;
1586 goto int_case;
1587 case TARGET_SO_DONTROUTE:
1588 optname = SO_DONTROUTE;
1589 goto int_case;
1590 case TARGET_SO_BROADCAST:
1591 optname = SO_BROADCAST;
1592 goto int_case;
1593 case TARGET_SO_SNDBUF:
1594 optname = SO_SNDBUF;
1595 goto int_case;
1596 case TARGET_SO_RCVBUF:
1597 optname = SO_RCVBUF;
1598 goto int_case;
1599 case TARGET_SO_KEEPALIVE:
1600 optname = SO_KEEPALIVE;
1601 goto int_case;
1602 case TARGET_SO_OOBINLINE:
1603 optname = SO_OOBINLINE;
1604 goto int_case;
1605 case TARGET_SO_NO_CHECK:
1606 optname = SO_NO_CHECK;
1607 goto int_case;
1608 case TARGET_SO_PRIORITY:
1609 optname = SO_PRIORITY;
1610 goto int_case;
1611 #ifdef SO_BSDCOMPAT
1612 case TARGET_SO_BSDCOMPAT:
1613 optname = SO_BSDCOMPAT;
1614 goto int_case;
1615 #endif
1616 case TARGET_SO_PASSCRED:
1617 optname = SO_PASSCRED;
1618 goto int_case;
1619 case TARGET_SO_TIMESTAMP:
1620 optname = SO_TIMESTAMP;
1621 goto int_case;
1622 case TARGET_SO_RCVLOWAT:
1623 optname = SO_RCVLOWAT;
1624 goto int_case;
1625 default:
1626 goto int_case;
1627 }
1628 break;
1629 case SOL_TCP:
1630 /* TCP options all take an 'int' value. */
1631 int_case:
1632 if (get_user_u32(len, optlen))
1633 return -TARGET_EFAULT;
1634 if (len < 0)
1635 return -TARGET_EINVAL;
1636 lv = sizeof(lv);
1637 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1638 if (ret < 0)
1639 return ret;
1640 if (len > lv)
1641 len = lv;
1642 if (len == 4) {
1643 if (put_user_u32(val, optval_addr))
1644 return -TARGET_EFAULT;
1645 } else {
1646 if (put_user_u8(val, optval_addr))
1647 return -TARGET_EFAULT;
1648 }
1649 if (put_user_u32(len, optlen))
1650 return -TARGET_EFAULT;
1651 break;
1652 case SOL_IP:
1653 switch(optname) {
1654 case IP_TOS:
1655 case IP_TTL:
1656 case IP_HDRINCL:
1657 case IP_ROUTER_ALERT:
1658 case IP_RECVOPTS:
1659 case IP_RETOPTS:
1660 case IP_PKTINFO:
1661 case IP_MTU_DISCOVER:
1662 case IP_RECVERR:
1663 case IP_RECVTOS:
1664 #ifdef IP_FREEBIND
1665 case IP_FREEBIND:
1666 #endif
1667 case IP_MULTICAST_TTL:
1668 case IP_MULTICAST_LOOP:
1669 if (get_user_u32(len, optlen))
1670 return -TARGET_EFAULT;
1671 if (len < 0)
1672 return -TARGET_EINVAL;
1673 lv = sizeof(lv);
1674 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1675 if (ret < 0)
1676 return ret;
1677 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1678 len = 1;
1679 if (put_user_u32(len, optlen)
1680 || put_user_u8(val, optval_addr))
1681 return -TARGET_EFAULT;
1682 } else {
1683 if (len > sizeof(int))
1684 len = sizeof(int);
1685 if (put_user_u32(len, optlen)
1686 || put_user_u32(val, optval_addr))
1687 return -TARGET_EFAULT;
1688 }
1689 break;
1690 default:
1691 ret = -TARGET_ENOPROTOOPT;
1692 break;
1693 }
1694 break;
1695 default:
1696 unimplemented:
1697 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1698 level, optname);
1699 ret = -TARGET_EOPNOTSUPP;
1700 break;
1701 }
1702 return ret;
1703 }
1704
1705 /* FIXME
1706 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1707 * other lock functions have a return code of 0 for failure.
1708 */
1709 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1710 int count, int copy)
1711 {
1712 struct target_iovec *target_vec;
1713 abi_ulong base;
1714 int i;
1715
1716 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1717 if (!target_vec)
1718 return -TARGET_EFAULT;
1719 for(i = 0;i < count; i++) {
1720 base = tswapal(target_vec[i].iov_base);
1721 vec[i].iov_len = tswapal(target_vec[i].iov_len);
1722 if (vec[i].iov_len != 0) {
1723 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1724 /* Don't check lock_user return value. We must call writev even
1725 if a element has invalid base address. */
1726 } else {
1727 /* zero length pointer is ignored */
1728 vec[i].iov_base = NULL;
1729 }
1730 }
1731 unlock_user (target_vec, target_addr, 0);
1732 return 0;
1733 }
1734
1735 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1736 int count, int copy)
1737 {
1738 struct target_iovec *target_vec;
1739 abi_ulong base;
1740 int i;
1741
1742 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1743 if (!target_vec)
1744 return -TARGET_EFAULT;
1745 for(i = 0;i < count; i++) {
1746 if (target_vec[i].iov_base) {
1747 base = tswapal(target_vec[i].iov_base);
1748 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1749 }
1750 }
1751 unlock_user (target_vec, target_addr, 0);
1752
1753 return 0;
1754 }
1755
1756 /* do_socket() Must return target values and target errnos. */
1757 static abi_long do_socket(int domain, int type, int protocol)
1758 {
1759 #if defined(TARGET_MIPS)
1760 switch(type) {
1761 case TARGET_SOCK_DGRAM:
1762 type = SOCK_DGRAM;
1763 break;
1764 case TARGET_SOCK_STREAM:
1765 type = SOCK_STREAM;
1766 break;
1767 case TARGET_SOCK_RAW:
1768 type = SOCK_RAW;
1769 break;
1770 case TARGET_SOCK_RDM:
1771 type = SOCK_RDM;
1772 break;
1773 case TARGET_SOCK_SEQPACKET:
1774 type = SOCK_SEQPACKET;
1775 break;
1776 case TARGET_SOCK_PACKET:
1777 type = SOCK_PACKET;
1778 break;
1779 }
1780 #endif
1781 if (domain == PF_NETLINK)
1782 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1783 return get_errno(socket(domain, type, protocol));
1784 }
1785
1786 /* do_bind() Must return target values and target errnos. */
1787 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1788 socklen_t addrlen)
1789 {
1790 void *addr;
1791 abi_long ret;
1792
1793 if ((int)addrlen < 0) {
1794 return -TARGET_EINVAL;
1795 }
1796
1797 addr = alloca(addrlen+1);
1798
1799 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1800 if (ret)
1801 return ret;
1802
1803 return get_errno(bind(sockfd, addr, addrlen));
1804 }
1805
1806 /* do_connect() Must return target values and target errnos. */
1807 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1808 socklen_t addrlen)
1809 {
1810 void *addr;
1811 abi_long ret;
1812
1813 if ((int)addrlen < 0) {
1814 return -TARGET_EINVAL;
1815 }
1816
1817 addr = alloca(addrlen);
1818
1819 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1820 if (ret)
1821 return ret;
1822
1823 return get_errno(connect(sockfd, addr, addrlen));
1824 }
1825
1826 /* do_sendrecvmsg() Must return target values and target errnos. */
1827 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1828 int flags, int send)
1829 {
1830 abi_long ret, len;
1831 struct target_msghdr *msgp;
1832 struct msghdr msg;
1833 int count;
1834 struct iovec *vec;
1835 abi_ulong target_vec;
1836
1837 /* FIXME */
1838 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1839 msgp,
1840 target_msg,
1841 send ? 1 : 0))
1842 return -TARGET_EFAULT;
1843 if (msgp->msg_name) {
1844 msg.msg_namelen = tswap32(msgp->msg_namelen);
1845 msg.msg_name = alloca(msg.msg_namelen);
1846 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1847 msg.msg_namelen);
1848 if (ret) {
1849 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1850 return ret;
1851 }
1852 } else {
1853 msg.msg_name = NULL;
1854 msg.msg_namelen = 0;
1855 }
1856 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1857 msg.msg_control = alloca(msg.msg_controllen);
1858 msg.msg_flags = tswap32(msgp->msg_flags);
1859
1860 count = tswapal(msgp->msg_iovlen);
1861 vec = alloca(count * sizeof(struct iovec));
1862 target_vec = tswapal(msgp->msg_iov);
1863 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1864 msg.msg_iovlen = count;
1865 msg.msg_iov = vec;
1866
1867 if (send) {
1868 ret = target_to_host_cmsg(&msg, msgp);
1869 if (ret == 0)
1870 ret = get_errno(sendmsg(fd, &msg, flags));
1871 } else {
1872 ret = get_errno(recvmsg(fd, &msg, flags));
1873 if (!is_error(ret)) {
1874 len = ret;
1875 ret = host_to_target_cmsg(msgp, &msg);
1876 if (!is_error(ret))
1877 ret = len;
1878 }
1879 }
1880 unlock_iovec(vec, target_vec, count, !send);
1881 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1882 return ret;
1883 }
1884
1885 /* do_accept() Must return target values and target errnos. */
1886 static abi_long do_accept(int fd, abi_ulong target_addr,
1887 abi_ulong target_addrlen_addr)
1888 {
1889 socklen_t addrlen;
1890 void *addr;
1891 abi_long ret;
1892
1893 if (target_addr == 0)
1894 return get_errno(accept(fd, NULL, NULL));
1895
1896 /* linux returns EINVAL if addrlen pointer is invalid */
1897 if (get_user_u32(addrlen, target_addrlen_addr))
1898 return -TARGET_EINVAL;
1899
1900 if ((int)addrlen < 0) {
1901 return -TARGET_EINVAL;
1902 }
1903
1904 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1905 return -TARGET_EINVAL;
1906
1907 addr = alloca(addrlen);
1908
1909 ret = get_errno(accept(fd, addr, &addrlen));
1910 if (!is_error(ret)) {
1911 host_to_target_sockaddr(target_addr, addr, addrlen);
1912 if (put_user_u32(addrlen, target_addrlen_addr))
1913 ret = -TARGET_EFAULT;
1914 }
1915 return ret;
1916 }
1917
1918 /* do_getpeername() Must return target values and target errnos. */
1919 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1920 abi_ulong target_addrlen_addr)
1921 {
1922 socklen_t addrlen;
1923 void *addr;
1924 abi_long ret;
1925
1926 if (get_user_u32(addrlen, target_addrlen_addr))
1927 return -TARGET_EFAULT;
1928
1929 if ((int)addrlen < 0) {
1930 return -TARGET_EINVAL;
1931 }
1932
1933 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1934 return -TARGET_EFAULT;
1935
1936 addr = alloca(addrlen);
1937
1938 ret = get_errno(getpeername(fd, addr, &addrlen));
1939 if (!is_error(ret)) {
1940 host_to_target_sockaddr(target_addr, addr, addrlen);
1941 if (put_user_u32(addrlen, target_addrlen_addr))
1942 ret = -TARGET_EFAULT;
1943 }
1944 return ret;
1945 }
1946
1947 /* do_getsockname() Must return target values and target errnos. */
1948 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1949 abi_ulong target_addrlen_addr)
1950 {
1951 socklen_t addrlen;
1952 void *addr;
1953 abi_long ret;
1954
1955 if (get_user_u32(addrlen, target_addrlen_addr))
1956 return -TARGET_EFAULT;
1957
1958 if ((int)addrlen < 0) {
1959 return -TARGET_EINVAL;
1960 }
1961
1962 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1963 return -TARGET_EFAULT;
1964
1965 addr = alloca(addrlen);
1966
1967 ret = get_errno(getsockname(fd, addr, &addrlen));
1968 if (!is_error(ret)) {
1969 host_to_target_sockaddr(target_addr, addr, addrlen);
1970 if (put_user_u32(addrlen, target_addrlen_addr))
1971 ret = -TARGET_EFAULT;
1972 }
1973 return ret;
1974 }
1975
1976 /* do_socketpair() Must return target values and target errnos. */
1977 static abi_long do_socketpair(int domain, int type, int protocol,
1978 abi_ulong target_tab_addr)
1979 {
1980 int tab[2];
1981 abi_long ret;
1982
1983 ret = get_errno(socketpair(domain, type, protocol, tab));
1984 if (!is_error(ret)) {
1985 if (put_user_s32(tab[0], target_tab_addr)
1986 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1987 ret = -TARGET_EFAULT;
1988 }
1989 return ret;
1990 }
1991
1992 /* do_sendto() Must return target values and target errnos. */
1993 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1994 abi_ulong target_addr, socklen_t addrlen)
1995 {
1996 void *addr;
1997 void *host_msg;
1998 abi_long ret;
1999
2000 if ((int)addrlen < 0) {
2001 return -TARGET_EINVAL;
2002 }
2003
2004 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2005 if (!host_msg)
2006 return -TARGET_EFAULT;
2007 if (target_addr) {
2008 addr = alloca(addrlen);
2009 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2010 if (ret) {
2011 unlock_user(host_msg, msg, 0);
2012 return ret;
2013 }
2014 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2015 } else {
2016 ret = get_errno(send(fd, host_msg, len, flags));
2017 }
2018 unlock_user(host_msg, msg, 0);
2019 return ret;
2020 }
2021
2022 /* do_recvfrom() Must return target values and target errnos. */
2023 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2024 abi_ulong target_addr,
2025 abi_ulong target_addrlen)
2026 {
2027 socklen_t addrlen;
2028 void *addr;
2029 void *host_msg;
2030 abi_long ret;
2031
2032 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2033 if (!host_msg)
2034 return -TARGET_EFAULT;
2035 if (target_addr) {
2036 if (get_user_u32(addrlen, target_addrlen)) {
2037 ret = -TARGET_EFAULT;
2038 goto fail;
2039 }
2040 if ((int)addrlen < 0) {
2041 ret = -TARGET_EINVAL;
2042 goto fail;
2043 }
2044 addr = alloca(addrlen);
2045 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2046 } else {
2047 addr = NULL; /* To keep compiler quiet. */
2048 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2049 }
2050 if (!is_error(ret)) {
2051 if (target_addr) {
2052 host_to_target_sockaddr(target_addr, addr, addrlen);
2053 if (put_user_u32(addrlen, target_addrlen)) {
2054 ret = -TARGET_EFAULT;
2055 goto fail;
2056 }
2057 }
2058 unlock_user(host_msg, msg, len);
2059 } else {
2060 fail:
2061 unlock_user(host_msg, msg, 0);
2062 }
2063 return ret;
2064 }
2065
2066 #ifdef TARGET_NR_socketcall
2067 /* do_socketcall() Must return target values and target errnos. */
2068 static abi_long do_socketcall(int num, abi_ulong vptr)
2069 {
2070 abi_long ret;
2071 const int n = sizeof(abi_ulong);
2072
2073 switch(num) {
2074 case SOCKOP_socket:
2075 {
2076 abi_ulong domain, type, protocol;
2077
2078 if (get_user_ual(domain, vptr)
2079 || get_user_ual(type, vptr + n)
2080 || get_user_ual(protocol, vptr + 2 * n))
2081 return -TARGET_EFAULT;
2082
2083 ret = do_socket(domain, type, protocol);
2084 }
2085 break;
2086 case SOCKOP_bind:
2087 {
2088 abi_ulong sockfd;
2089 abi_ulong target_addr;
2090 socklen_t addrlen;
2091
2092 if (get_user_ual(sockfd, vptr)
2093 || get_user_ual(target_addr, vptr + n)
2094 || get_user_ual(addrlen, vptr + 2 * n))
2095 return -TARGET_EFAULT;
2096
2097 ret = do_bind(sockfd, target_addr, addrlen);
2098 }
2099 break;
2100 case SOCKOP_connect:
2101 {
2102 abi_ulong sockfd;
2103 abi_ulong target_addr;
2104 socklen_t addrlen;
2105
2106 if (get_user_ual(sockfd, vptr)
2107 || get_user_ual(target_addr, vptr + n)
2108 || get_user_ual(addrlen, vptr + 2 * n))
2109 return -TARGET_EFAULT;
2110
2111 ret = do_connect(sockfd, target_addr, addrlen);
2112 }
2113 break;
2114 case SOCKOP_listen:
2115 {
2116 abi_ulong sockfd, backlog;
2117
2118 if (get_user_ual(sockfd, vptr)
2119 || get_user_ual(backlog, vptr + n))
2120 return -TARGET_EFAULT;
2121
2122 ret = get_errno(listen(sockfd, backlog));
2123 }
2124 break;
2125 case SOCKOP_accept:
2126 {
2127 abi_ulong sockfd;
2128 abi_ulong target_addr, target_addrlen;
2129
2130 if (get_user_ual(sockfd, vptr)
2131 || get_user_ual(target_addr, vptr + n)
2132 || get_user_ual(target_addrlen, vptr + 2 * n))
2133 return -TARGET_EFAULT;
2134
2135 ret = do_accept(sockfd, target_addr, target_addrlen);
2136 }
2137 break;
2138 case SOCKOP_getsockname:
2139 {
2140 abi_ulong sockfd;
2141 abi_ulong target_addr, target_addrlen;
2142
2143 if (get_user_ual(sockfd, vptr)
2144 || get_user_ual(target_addr, vptr + n)
2145 || get_user_ual(target_addrlen, vptr + 2 * n))
2146 return -TARGET_EFAULT;
2147
2148 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2149 }
2150 break;
2151 case SOCKOP_getpeername:
2152 {
2153 abi_ulong sockfd;
2154 abi_ulong target_addr, target_addrlen;
2155
2156 if (get_user_ual(sockfd, vptr)
2157 || get_user_ual(target_addr, vptr + n)
2158 || get_user_ual(target_addrlen, vptr + 2 * n))
2159 return -TARGET_EFAULT;
2160
2161 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2162 }
2163 break;
2164 case SOCKOP_socketpair:
2165 {
2166 abi_ulong domain, type, protocol;
2167 abi_ulong tab;
2168
2169 if (get_user_ual(domain, vptr)
2170 || get_user_ual(type, vptr + n)
2171 || get_user_ual(protocol, vptr + 2 * n)
2172 || get_user_ual(tab, vptr + 3 * n))
2173 return -TARGET_EFAULT;
2174
2175 ret = do_socketpair(domain, type, protocol, tab);
2176 }
2177 break;
2178 case SOCKOP_send:
2179 {
2180 abi_ulong sockfd;
2181 abi_ulong msg;
2182 size_t len;
2183 abi_ulong flags;
2184
2185 if (get_user_ual(sockfd, vptr)
2186 || get_user_ual(msg, vptr + n)
2187 || get_user_ual(len, vptr + 2 * n)
2188 || get_user_ual(flags, vptr + 3 * n))
2189 return -TARGET_EFAULT;
2190
2191 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2192 }
2193 break;
2194 case SOCKOP_recv:
2195 {
2196 abi_ulong sockfd;
2197 abi_ulong msg;
2198 size_t len;
2199 abi_ulong flags;
2200
2201 if (get_user_ual(sockfd, vptr)
2202 || get_user_ual(msg, vptr + n)
2203 || get_user_ual(len, vptr + 2 * n)
2204 || get_user_ual(flags, vptr + 3 * n))
2205 return -TARGET_EFAULT;
2206
2207 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2208 }
2209 break;
2210 case SOCKOP_sendto:
2211 {
2212 abi_ulong sockfd;
2213 abi_ulong msg;
2214 size_t len;
2215 abi_ulong flags;
2216 abi_ulong addr;
2217 socklen_t addrlen;
2218
2219 if (get_user_ual(sockfd, vptr)
2220 || get_user_ual(msg, vptr + n)
2221 || get_user_ual(len, vptr + 2 * n)
2222 || get_user_ual(flags, vptr + 3 * n)
2223 || get_user_ual(addr, vptr + 4 * n)
2224 || get_user_ual(addrlen, vptr + 5 * n))
2225 return -TARGET_EFAULT;
2226
2227 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2228 }
2229 break;
2230 case SOCKOP_recvfrom:
2231 {
2232 abi_ulong sockfd;
2233 abi_ulong msg;
2234 size_t len;
2235 abi_ulong flags;
2236 abi_ulong addr;
2237 socklen_t addrlen;
2238
2239 if (get_user_ual(sockfd, vptr)
2240 || get_user_ual(msg, vptr + n)
2241 || get_user_ual(len, vptr + 2 * n)
2242 || get_user_ual(flags, vptr + 3 * n)
2243 || get_user_ual(addr, vptr + 4 * n)
2244 || get_user_ual(addrlen, vptr + 5 * n))
2245 return -TARGET_EFAULT;
2246
2247 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2248 }
2249 break;
2250 case SOCKOP_shutdown:
2251 {
2252 abi_ulong sockfd, how;
2253
2254 if (get_user_ual(sockfd, vptr)
2255 || get_user_ual(how, vptr + n))
2256 return -TARGET_EFAULT;
2257
2258 ret = get_errno(shutdown(sockfd, how));
2259 }
2260 break;
2261 case SOCKOP_sendmsg:
2262 case SOCKOP_recvmsg:
2263 {
2264 abi_ulong fd;
2265 abi_ulong target_msg;
2266 abi_ulong flags;
2267
2268 if (get_user_ual(fd, vptr)
2269 || get_user_ual(target_msg, vptr + n)
2270 || get_user_ual(flags, vptr + 2 * n))
2271 return -TARGET_EFAULT;
2272
2273 ret = do_sendrecvmsg(fd, target_msg, flags,
2274 (num == SOCKOP_sendmsg));
2275 }
2276 break;
2277 case SOCKOP_setsockopt:
2278 {
2279 abi_ulong sockfd;
2280 abi_ulong level;
2281 abi_ulong optname;
2282 abi_ulong optval;
2283 socklen_t optlen;
2284
2285 if (get_user_ual(sockfd, vptr)
2286 || get_user_ual(level, vptr + n)
2287 || get_user_ual(optname, vptr + 2 * n)
2288 || get_user_ual(optval, vptr + 3 * n)
2289 || get_user_ual(optlen, vptr + 4 * n))
2290 return -TARGET_EFAULT;
2291
2292 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2293 }
2294 break;
2295 case SOCKOP_getsockopt:
2296 {
2297 abi_ulong sockfd;
2298 abi_ulong level;
2299 abi_ulong optname;
2300 abi_ulong optval;
2301 socklen_t optlen;
2302
2303 if (get_user_ual(sockfd, vptr)
2304 || get_user_ual(level, vptr + n)
2305 || get_user_ual(optname, vptr + 2 * n)
2306 || get_user_ual(optval, vptr + 3 * n)
2307 || get_user_ual(optlen, vptr + 4 * n))
2308 return -TARGET_EFAULT;
2309
2310 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2311 }
2312 break;
2313 default:
2314 gemu_log("Unsupported socketcall: %d\n", num);
2315 ret = -TARGET_ENOSYS;
2316 break;
2317 }
2318 return ret;
2319 }
2320 #endif
2321
2322 #define N_SHM_REGIONS 32
2323
2324 static struct shm_region {
2325 abi_ulong start;
2326 abi_ulong size;
2327 } shm_regions[N_SHM_REGIONS];
2328
2329 struct target_ipc_perm
2330 {
2331 abi_long __key;
2332 abi_ulong uid;
2333 abi_ulong gid;
2334 abi_ulong cuid;
2335 abi_ulong cgid;
2336 unsigned short int mode;
2337 unsigned short int __pad1;
2338 unsigned short int __seq;
2339 unsigned short int __pad2;
2340 abi_ulong __unused1;
2341 abi_ulong __unused2;
2342 };
2343
2344 struct target_semid_ds
2345 {
2346 struct target_ipc_perm sem_perm;
2347 abi_ulong sem_otime;
2348 abi_ulong __unused1;
2349 abi_ulong sem_ctime;
2350 abi_ulong __unused2;
2351 abi_ulong sem_nsems;
2352 abi_ulong __unused3;
2353 abi_ulong __unused4;
2354 };
2355
2356 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2357 abi_ulong target_addr)
2358 {
2359 struct target_ipc_perm *target_ip;
2360 struct target_semid_ds *target_sd;
2361
2362 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2363 return -TARGET_EFAULT;
2364 target_ip = &(target_sd->sem_perm);
2365 host_ip->__key = tswapal(target_ip->__key);
2366 host_ip->uid = tswapal(target_ip->uid);
2367 host_ip->gid = tswapal(target_ip->gid);
2368 host_ip->cuid = tswapal(target_ip->cuid);
2369 host_ip->cgid = tswapal(target_ip->cgid);
2370 host_ip->mode = tswap16(target_ip->mode);
2371 unlock_user_struct(target_sd, target_addr, 0);
2372 return 0;
2373 }
2374
2375 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2376 struct ipc_perm *host_ip)
2377 {
2378 struct target_ipc_perm *target_ip;
2379 struct target_semid_ds *target_sd;
2380
2381 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2382 return -TARGET_EFAULT;
2383 target_ip = &(target_sd->sem_perm);
2384 target_ip->__key = tswapal(host_ip->__key);
2385 target_ip->uid = tswapal(host_ip->uid);
2386 target_ip->gid = tswapal(host_ip->gid);
2387 target_ip->cuid = tswapal(host_ip->cuid);
2388 target_ip->cgid = tswapal(host_ip->cgid);
2389 target_ip->mode = tswap16(host_ip->mode);
2390 unlock_user_struct(target_sd, target_addr, 1);
2391 return 0;
2392 }
2393
2394 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2395 abi_ulong target_addr)
2396 {
2397 struct target_semid_ds *target_sd;
2398
2399 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2400 return -TARGET_EFAULT;
2401 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2402 return -TARGET_EFAULT;
2403 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2404 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2405 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2406 unlock_user_struct(target_sd, target_addr, 0);
2407 return 0;
2408 }
2409
2410 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2411 struct semid_ds *host_sd)
2412 {
2413 struct target_semid_ds *target_sd;
2414
2415 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2416 return -TARGET_EFAULT;
2417 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2418 return -TARGET_EFAULT;
2419 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2420 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2421 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2422 unlock_user_struct(target_sd, target_addr, 1);
2423 return 0;
2424 }
2425
2426 struct target_seminfo {
2427 int semmap;
2428 int semmni;
2429 int semmns;
2430 int semmnu;
2431 int semmsl;
2432 int semopm;
2433 int semume;
2434 int semusz;
2435 int semvmx;
2436 int semaem;
2437 };
2438
2439 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2440 struct seminfo *host_seminfo)
2441 {
2442 struct target_seminfo *target_seminfo;
2443 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2444 return -TARGET_EFAULT;
2445 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2446 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2447 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2448 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2449 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2450 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2451 __put_user(host_seminfo->semume, &target_seminfo->semume);
2452 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2453 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2454 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2455 unlock_user_struct(target_seminfo, target_addr, 1);
2456 return 0;
2457 }
2458
2459 union semun {
2460 int val;
2461 struct semid_ds *buf;
2462 unsigned short *array;
2463 struct seminfo *__buf;
2464 };
2465
2466 union target_semun {
2467 int val;
2468 abi_ulong buf;
2469 abi_ulong array;
2470 abi_ulong __buf;
2471 };
2472
2473 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2474 abi_ulong target_addr)
2475 {
2476 int nsems;
2477 unsigned short *array;
2478 union semun semun;
2479 struct semid_ds semid_ds;
2480 int i, ret;
2481
2482 semun.buf = &semid_ds;
2483
2484 ret = semctl(semid, 0, IPC_STAT, semun);
2485 if (ret == -1)
2486 return get_errno(ret);
2487
2488 nsems = semid_ds.sem_nsems;
2489
2490 *host_array = malloc(nsems*sizeof(unsigned short));
2491 array = lock_user(VERIFY_READ, target_addr,
2492 nsems*sizeof(unsigned short), 1);
2493 if (!array)
2494 return -TARGET_EFAULT;
2495
2496 for(i=0; i<nsems; i++) {
2497 __get_user((*host_array)[i], &array[i]);
2498 }
2499 unlock_user(array, target_addr, 0);
2500
2501 return 0;
2502 }
2503
2504 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2505 unsigned short **host_array)
2506 {
2507 int nsems;
2508 unsigned short *array;
2509 union semun semun;
2510 struct semid_ds semid_ds;
2511 int i, ret;
2512
2513 semun.buf = &semid_ds;
2514
2515 ret = semctl(semid, 0, IPC_STAT, semun);
2516 if (ret == -1)
2517 return get_errno(ret);
2518
2519 nsems = semid_ds.sem_nsems;
2520
2521 array = lock_user(VERIFY_WRITE, target_addr,
2522 nsems*sizeof(unsigned short), 0);
2523 if (!array)
2524 return -TARGET_EFAULT;
2525
2526 for(i=0; i<nsems; i++) {
2527 __put_user((*host_array)[i], &array[i]);
2528 }
2529 free(*host_array);
2530 unlock_user(array, target_addr, 1);
2531
2532 return 0;
2533 }
2534
2535 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2536 union target_semun target_su)
2537 {
2538 union semun arg;
2539 struct semid_ds dsarg;
2540 unsigned short *array = NULL;
2541 struct seminfo seminfo;
2542 abi_long ret = -TARGET_EINVAL;
2543 abi_long err;
2544 cmd &= 0xff;
2545
2546 switch( cmd ) {
2547 case GETVAL:
2548 case SETVAL:
2549 arg.val = tswap32(target_su.val);
2550 ret = get_errno(semctl(semid, semnum, cmd, arg));
2551 target_su.val = tswap32(arg.val);
2552 break;
2553 case GETALL:
2554 case SETALL:
2555 err = target_to_host_semarray(semid, &array, target_su.array);
2556 if (err)
2557 return err;
2558 arg.array = array;
2559 ret = get_errno(semctl(semid, semnum, cmd, arg));
2560 err = host_to_target_semarray(semid, target_su.array, &array);
2561 if (err)
2562 return err;
2563 break;
2564 case IPC_STAT:
2565 case IPC_SET:
2566 case SEM_STAT:
2567 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2568 if (err)
2569 return err;
2570 arg.buf = &dsarg;
2571 ret = get_errno(semctl(semid, semnum, cmd, arg));
2572 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2573 if (err)
2574 return err;
2575 break;
2576 case IPC_INFO:
2577 case SEM_INFO:
2578 arg.__buf = &seminfo;
2579 ret = get_errno(semctl(semid, semnum, cmd, arg));
2580 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2581 if (err)
2582 return err;
2583 break;
2584 case IPC_RMID:
2585 case GETPID:
2586 case GETNCNT:
2587 case GETZCNT:
2588 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2589 break;
2590 }
2591
2592 return ret;
2593 }
2594
2595 struct target_sembuf {
2596 unsigned short sem_num;
2597 short sem_op;
2598 short sem_flg;
2599 };
2600
2601 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2602 abi_ulong target_addr,
2603 unsigned nsops)
2604 {
2605 struct target_sembuf *target_sembuf;
2606 int i;
2607
2608 target_sembuf = lock_user(VERIFY_READ, target_addr,
2609 nsops*sizeof(struct target_sembuf), 1);
2610 if (!target_sembuf)
2611 return -TARGET_EFAULT;
2612
2613 for(i=0; i<nsops; i++) {
2614 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2615 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2616 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2617 }
2618
2619 unlock_user(target_sembuf, target_addr, 0);
2620
2621 return 0;
2622 }
2623
2624 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2625 {
2626 struct sembuf sops[nsops];
2627
2628 if (target_to_host_sembuf(sops, ptr, nsops))
2629 return -TARGET_EFAULT;
2630
2631 return semop(semid, sops, nsops);
2632 }
2633
2634 struct target_msqid_ds
2635 {
2636 struct target_ipc_perm msg_perm;
2637 abi_ulong msg_stime;
2638 #if TARGET_ABI_BITS == 32
2639 abi_ulong __unused1;
2640 #endif
2641 abi_ulong msg_rtime;
2642 #if TARGET_ABI_BITS == 32
2643 abi_ulong __unused2;
2644 #endif
2645 abi_ulong msg_ctime;
2646 #if TARGET_ABI_BITS == 32
2647 abi_ulong __unused3;
2648 #endif
2649 abi_ulong __msg_cbytes;
2650 abi_ulong msg_qnum;
2651 abi_ulong msg_qbytes;
2652 abi_ulong msg_lspid;
2653 abi_ulong msg_lrpid;
2654 abi_ulong __unused4;
2655 abi_ulong __unused5;
2656 };
2657
2658 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2659 abi_ulong target_addr)
2660 {
2661 struct target_msqid_ds *target_md;
2662
2663 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2664 return -TARGET_EFAULT;
2665 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2666 return -TARGET_EFAULT;
2667 host_md->msg_stime = tswapal(target_md->msg_stime);
2668 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2669 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2670 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2671 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2672 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2673 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2674 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2675 unlock_user_struct(target_md, target_addr, 0);
2676 return 0;
2677 }
2678
2679 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2680 struct msqid_ds *host_md)
2681 {
2682 struct target_msqid_ds *target_md;
2683
2684 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2685 return -TARGET_EFAULT;
2686 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2687 return -TARGET_EFAULT;
2688 target_md->msg_stime = tswapal(host_md->msg_stime);
2689 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2690 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2691 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2692 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2693 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2694 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2695 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2696 unlock_user_struct(target_md, target_addr, 1);
2697 return 0;
2698 }
2699
2700 struct target_msginfo {
2701 int msgpool;
2702 int msgmap;
2703 int msgmax;
2704 int msgmnb;
2705 int msgmni;
2706 int msgssz;
2707 int msgtql;
2708 unsigned short int msgseg;
2709 };
2710
2711 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2712 struct msginfo *host_msginfo)
2713 {
2714 struct target_msginfo *target_msginfo;
2715 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2716 return -TARGET_EFAULT;
2717 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2718 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2719 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2720 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2721 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2722 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2723 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2724 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2725 unlock_user_struct(target_msginfo, target_addr, 1);
2726 return 0;
2727 }
2728
2729 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2730 {
2731 struct msqid_ds dsarg;
2732 struct msginfo msginfo;
2733 abi_long ret = -TARGET_EINVAL;
2734
2735 cmd &= 0xff;
2736
2737 switch (cmd) {
2738 case IPC_STAT:
2739 case IPC_SET:
2740 case MSG_STAT:
2741 if (target_to_host_msqid_ds(&dsarg,ptr))
2742 return -TARGET_EFAULT;
2743 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2744 if (host_to_target_msqid_ds(ptr,&dsarg))
2745 return -TARGET_EFAULT;
2746 break;
2747 case IPC_RMID:
2748 ret = get_errno(msgctl(msgid, cmd, NULL));
2749 break;
2750 case IPC_INFO:
2751 case MSG_INFO:
2752 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2753 if (host_to_target_msginfo(ptr, &msginfo))
2754 return -TARGET_EFAULT;
2755 break;
2756 }
2757
2758 return ret;
2759 }
2760
2761 struct target_msgbuf {
2762 abi_long mtype;
2763 char mtext[1];
2764 };
2765
2766 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2767 unsigned int msgsz, int msgflg)
2768 {
2769 struct target_msgbuf *target_mb;
2770 struct msgbuf *host_mb;
2771 abi_long ret = 0;
2772
2773 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2774 return -TARGET_EFAULT;
2775 host_mb = malloc(msgsz+sizeof(long));
2776 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2777 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2778 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2779 free(host_mb);
2780 unlock_user_struct(target_mb, msgp, 0);
2781
2782 return ret;
2783 }
2784
2785 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2786 unsigned int msgsz, abi_long msgtyp,
2787 int msgflg)
2788 {
2789 struct target_msgbuf *target_mb;
2790 char *target_mtext;
2791 struct msgbuf *host_mb;
2792 abi_long ret = 0;
2793
2794 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2795 return -TARGET_EFAULT;
2796
2797 host_mb = malloc(msgsz+sizeof(long));
2798 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg));
2799
2800 if (ret > 0) {
2801 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2802 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2803 if (!target_mtext) {
2804 ret = -TARGET_EFAULT;
2805 goto end;
2806 }
2807 memcpy(target_mb->mtext, host_mb->mtext, ret);
2808 unlock_user(target_mtext, target_mtext_addr, ret);
2809 }
2810
2811 target_mb->mtype = tswapal(host_mb->mtype);
2812 free(host_mb);
2813
2814 end:
2815 if (target_mb)
2816 unlock_user_struct(target_mb, msgp, 1);
2817 return ret;
2818 }
2819
2820 struct target_shmid_ds
2821 {
2822 struct target_ipc_perm shm_perm;
2823 abi_ulong shm_segsz;
2824 abi_ulong shm_atime;
2825 #if TARGET_ABI_BITS == 32
2826 abi_ulong __unused1;
2827 #endif
2828 abi_ulong shm_dtime;
2829 #if TARGET_ABI_BITS == 32
2830 abi_ulong __unused2;
2831 #endif
2832 abi_ulong shm_ctime;
2833 #if TARGET_ABI_BITS == 32
2834 abi_ulong __unused3;
2835 #endif
2836 int shm_cpid;
2837 int shm_lpid;
2838 abi_ulong shm_nattch;
2839 unsigned long int __unused4;
2840 unsigned long int __unused5;
2841 };
2842
2843 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2844 abi_ulong target_addr)
2845 {
2846 struct target_shmid_ds *target_sd;
2847
2848 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2849 return -TARGET_EFAULT;
2850 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2851 return -TARGET_EFAULT;
2852 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2853 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2854 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2855 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2856 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2857 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2858 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2859 unlock_user_struct(target_sd, target_addr, 0);
2860 return 0;
2861 }
2862
2863 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2864 struct shmid_ds *host_sd)
2865 {
2866 struct target_shmid_ds *target_sd;
2867
2868 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2869 return -TARGET_EFAULT;
2870 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2871 return -TARGET_EFAULT;
2872 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2873 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2874 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2875 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2876 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2877 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2878 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2879 unlock_user_struct(target_sd, target_addr, 1);
2880 return 0;
2881 }
2882
2883 struct target_shminfo {
2884 abi_ulong shmmax;
2885 abi_ulong shmmin;
2886 abi_ulong shmmni;
2887 abi_ulong shmseg;
2888 abi_ulong shmall;
2889 };
2890
2891 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2892 struct shminfo *host_shminfo)
2893 {
2894 struct target_shminfo *target_shminfo;
2895 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2896 return -TARGET_EFAULT;
2897 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2898 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2899 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2900 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2901 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2902 unlock_user_struct(target_shminfo, target_addr, 1);
2903 return 0;
2904 }
2905
2906 struct target_shm_info {
2907 int used_ids;
2908 abi_ulong shm_tot;
2909 abi_ulong shm_rss;
2910 abi_ulong shm_swp;
2911 abi_ulong swap_attempts;
2912 abi_ulong swap_successes;
2913 };
2914
2915 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2916 struct shm_info *host_shm_info)
2917 {
2918 struct target_shm_info *target_shm_info;
2919 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2920 return -TARGET_EFAULT;
2921 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2922 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2923 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2924 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2925 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2926 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2927 unlock_user_struct(target_shm_info, target_addr, 1);
2928 return 0;
2929 }
2930
2931 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2932 {
2933 struct shmid_ds dsarg;
2934 struct shminfo shminfo;
2935 struct shm_info shm_info;
2936 abi_long ret = -TARGET_EINVAL;
2937
2938 cmd &= 0xff;
2939
2940 switch(cmd) {
2941 case IPC_STAT:
2942 case IPC_SET:
2943 case SHM_STAT:
2944 if (target_to_host_shmid_ds(&dsarg, buf))
2945 return -TARGET_EFAULT;
2946 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2947 if (host_to_target_shmid_ds(buf, &dsarg))
2948 return -TARGET_EFAULT;
2949 break;
2950 case IPC_INFO:
2951 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2952 if (host_to_target_shminfo(buf, &shminfo))
2953 return -TARGET_EFAULT;
2954 break;
2955 case SHM_INFO:
2956 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2957 if (host_to_target_shm_info(buf, &shm_info))
2958 return -TARGET_EFAULT;
2959 break;
2960 case IPC_RMID:
2961 case SHM_LOCK:
2962 case SHM_UNLOCK:
2963 ret = get_errno(shmctl(shmid, cmd, NULL));
2964 break;
2965 }
2966
2967 return ret;
2968 }
2969
2970 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2971 {
2972 abi_long raddr;
2973 void *host_raddr;
2974 struct shmid_ds shm_info;
2975 int i,ret;
2976
2977 /* find out the length of the shared memory segment */
2978 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2979 if (is_error(ret)) {
2980 /* can't get length, bail out */
2981 return ret;
2982 }
2983
2984 mmap_lock();
2985
2986 if (shmaddr)
2987 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2988 else {
2989 abi_ulong mmap_start;
2990
2991 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2992
2993 if (mmap_start == -1) {
2994 errno = ENOMEM;
2995 host_raddr = (void *)-1;
2996 } else
2997 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2998 }
2999
3000 if (host_raddr == (void *)-1) {
3001 mmap_unlock();
3002 return get_errno((long)host_raddr);
3003 }
3004 raddr=h2g((unsigned long)host_raddr);
3005
3006 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3007 PAGE_VALID | PAGE_READ |
3008 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3009
3010 for (i = 0; i < N_SHM_REGIONS; i++) {
3011 if (shm_regions[i].start == 0) {
3012 shm_regions[i].start = raddr;
3013 shm_regions[i].size = shm_info.shm_segsz;
3014 break;
3015 }
3016 }
3017
3018 mmap_unlock();
3019 return raddr;
3020
3021 }
3022
3023 static inline abi_long do_shmdt(abi_ulong shmaddr)
3024 {
3025 int i;
3026
3027 for (i = 0; i < N_SHM_REGIONS; ++i) {
3028 if (shm_regions[i].start == shmaddr) {
3029 shm_regions[i].start = 0;
3030 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3031 break;
3032 }
3033 }
3034
3035 return get_errno(shmdt(g2h(shmaddr)));
3036 }
3037
3038 #ifdef TARGET_NR_ipc
3039 /* ??? This only works with linear mappings. */
3040 /* do_ipc() must return target values and target errnos. */
3041 static abi_long do_ipc(unsigned int call, int first,
3042 int second, int third,
3043 abi_long ptr, abi_long fifth)
3044 {
3045 int version;
3046 abi_long ret = 0;
3047
3048 version = call >> 16;
3049 call &= 0xffff;
3050
3051 switch (call) {
3052 case IPCOP_semop:
3053 ret = do_semop(first, ptr, second);
3054 break;
3055
3056 case IPCOP_semget:
3057 ret = get_errno(semget(first, second, third));
3058 break;
3059
3060 case IPCOP_semctl:
3061 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3062 break;
3063
3064 case IPCOP_msgget:
3065 ret = get_errno(msgget(first, second));
3066 break;
3067
3068 case IPCOP_msgsnd:
3069 ret = do_msgsnd(first, ptr, second, third);
3070 break;
3071
3072 case IPCOP_msgctl:
3073 ret = do_msgctl(first, second, ptr);
3074 break;
3075
3076 case IPCOP_msgrcv:
3077 switch (version) {
3078 case 0:
3079 {
3080 struct target_ipc_kludge {
3081 abi_long msgp;
3082 abi_long msgtyp;
3083 } *tmp;
3084
3085 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3086 ret = -TARGET_EFAULT;
3087 break;
3088 }
3089
3090 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
3091
3092 unlock_user_struct(tmp, ptr, 0);
3093 break;
3094 }
3095 default:
3096 ret = do_msgrcv(first, ptr, second, fifth, third);
3097 }
3098 break;
3099
3100 case IPCOP_shmat:
3101 switch (version) {
3102 default:
3103 {
3104 abi_ulong raddr;
3105 raddr = do_shmat(first, ptr, second);
3106 if (is_error(raddr))
3107 return get_errno(raddr);
3108 if (put_user_ual(raddr, third))
3109 return -TARGET_EFAULT;
3110 break;
3111 }
3112 case 1:
3113 ret = -TARGET_EINVAL;
3114 break;
3115 }
3116 break;
3117 case IPCOP_shmdt:
3118 ret = do_shmdt(ptr);
3119 break;
3120
3121 case IPCOP_shmget:
3122 /* IPC_* flag values are the same on all linux platforms */
3123 ret = get_errno(shmget(first, second, third));
3124 break;
3125
3126 /* IPC_* and SHM_* command values are the same on all linux platforms */
3127 case IPCOP_shmctl:
3128 ret = do_shmctl(first, second, third);
3129 break;
3130 default:
3131 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3132 ret = -TARGET_ENOSYS;
3133 break;
3134 }
3135 return ret;
3136 }
3137 #endif
3138
3139 /* kernel structure types definitions */
3140
3141 #define STRUCT(name, ...) STRUCT_ ## name,
3142 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3143 enum {
3144 #include "syscall_types.h"
3145 };
3146 #undef STRUCT
3147 #undef STRUCT_SPECIAL
3148
3149 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3150 #define STRUCT_SPECIAL(name)
3151 #include "syscall_types.h"
3152 #undef STRUCT
3153 #undef STRUCT_SPECIAL
3154
3155 typedef struct IOCTLEntry IOCTLEntry;
3156
3157 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3158 int fd, abi_long cmd, abi_long arg);
3159
3160 struct IOCTLEntry {
3161 unsigned int target_cmd;
3162 unsigned int host_cmd;
3163 const char *name;
3164 int access;
3165 do_ioctl_fn *do_ioctl;
3166 const argtype arg_type[5];
3167 };
3168
3169 #define IOC_R 0x0001
3170 #define IOC_W 0x0002
3171 #define IOC_RW (IOC_R | IOC_W)
3172
3173 #define MAX_STRUCT_SIZE 4096
3174
3175 #ifdef CONFIG_FIEMAP
3176 /* So fiemap access checks don't overflow on 32 bit systems.
3177 * This is very slightly smaller than the limit imposed by
3178 * the underlying kernel.
3179 */
3180 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3181 / sizeof(struct fiemap_extent))
3182
3183 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3184 int fd, abi_long cmd, abi_long arg)
3185 {
3186 /* The parameter for this ioctl is a struct fiemap followed
3187 * by an array of struct fiemap_extent whose size is set
3188 * in fiemap->fm_extent_count. The array is filled in by the
3189 * ioctl.
3190 */
3191 int target_size_in, target_size_out;
3192 struct fiemap *fm;
3193 const argtype *arg_type = ie->arg_type;
3194 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3195 void *argptr, *p;
3196 abi_long ret;
3197 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3198 uint32_t outbufsz;
3199 int free_fm = 0;
3200
3201 assert(arg_type[0] == TYPE_PTR);
3202 assert(ie->access == IOC_RW);
3203 arg_type++;
3204 target_size_in = thunk_type_size(arg_type, 0);
3205 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3206 if (!argptr) {
3207 return -TARGET_EFAULT;
3208 }
3209 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3210 unlock_user(argptr, arg, 0);
3211 fm = (struct fiemap *)buf_temp;
3212 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3213 return -TARGET_EINVAL;
3214 }
3215
3216 outbufsz = sizeof (*fm) +
3217 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3218
3219 if (outbufsz > MAX_STRUCT_SIZE) {
3220 /* We can't fit all the extents into the fixed size buffer.
3221 * Allocate one that is large enough and use it instead.
3222 */
3223 fm = malloc(outbufsz);
3224 if (!fm) {
3225 return -TARGET_ENOMEM;
3226 }
3227 memcpy(fm, buf_temp, sizeof(struct fiemap));
3228 free_fm = 1;
3229 }
3230 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3231 if (!is_error(ret)) {
3232 target_size_out = target_size_in;
3233 /* An extent_count of 0 means we were only counting the extents
3234 * so there are no structs to copy
3235 */
3236 if (fm->fm_extent_count != 0) {
3237 target_size_out += fm->fm_mapped_extents * extent_size;
3238 }
3239 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3240 if (!argptr) {
3241 ret = -TARGET_EFAULT;
3242 } else {
3243 /* Convert the struct fiemap */
3244 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3245 if (fm->fm_extent_count != 0) {
3246 p = argptr + target_size_in;
3247 /* ...and then all the struct fiemap_extents */
3248 for (i = 0; i < fm->fm_mapped_extents; i++) {
3249 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3250 THUNK_TARGET);
3251 p += extent_size;
3252 }
3253 }
3254 unlock_user(argptr, arg, target_size_out);
3255 }
3256 }
3257 if (free_fm) {
3258 free(fm);
3259 }
3260 return ret;
3261 }
3262 #endif
3263
3264 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3265 int fd, abi_long cmd, abi_long arg)
3266 {
3267 const argtype *arg_type = ie->arg_type;
3268 int target_size;
3269 void *argptr;
3270 int ret;
3271 struct ifconf *host_ifconf;
3272 uint32_t outbufsz;
3273 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3274 int target_ifreq_size;
3275 int nb_ifreq;
3276 int free_buf = 0;
3277 int i;
3278 int target_ifc_len;
3279 abi_long target_ifc_buf;
3280 int host_ifc_len;
3281 char *host_ifc_buf;
3282
3283 assert(arg_type[0] == TYPE_PTR);
3284 assert(ie->access == IOC_RW);
3285
3286 arg_type++;
3287 target_size = thunk_type_size(arg_type, 0);
3288
3289 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3290 if (!argptr)
3291 return -TARGET_EFAULT;
3292 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3293 unlock_user(argptr, arg, 0);
3294
3295 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3296 target_ifc_len = host_ifconf->ifc_len;
3297 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3298
3299 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3300 nb_ifreq = target_ifc_len / target_ifreq_size;
3301 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3302
3303 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3304 if (outbufsz > MAX_STRUCT_SIZE) {
3305 /* We can't fit all the extents into the fixed size buffer.
3306 * Allocate one that is large enough and use it instead.
3307 */
3308 host_ifconf = malloc(outbufsz);
3309 if (!host_ifconf) {
3310 return -TARGET_ENOMEM;
3311 }
3312 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3313 free_buf = 1;
3314 }
3315 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3316
3317 host_ifconf->ifc_len = host_ifc_len;
3318 host_ifconf->ifc_buf = host_ifc_buf;
3319
3320 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3321 if (!is_error(ret)) {
3322 /* convert host ifc_len to target ifc_len */
3323
3324 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3325 target_ifc_len = nb_ifreq * target_ifreq_size;
3326 host_ifconf->ifc_len = target_ifc_len;
3327
3328 /* restore target ifc_buf */
3329
3330 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3331
3332 /* copy struct ifconf to target user */
3333
3334 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3335 if (!argptr)
3336 return -TARGET_EFAULT;
3337 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3338 unlock_user(argptr, arg, target_size);
3339
3340 /* copy ifreq[] to target user */
3341
3342 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3343 for (i = 0; i < nb_ifreq ; i++) {
3344 thunk_convert(argptr + i * target_ifreq_size,
3345 host_ifc_buf + i * sizeof(struct ifreq),
3346 ifreq_arg_type, THUNK_TARGET);
3347 }
3348 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3349 }
3350
3351 if (free_buf) {
3352 free(host_ifconf);
3353 }
3354
3355 return ret;
3356 }
3357
3358 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3359 abi_long cmd, abi_long arg)
3360 {
3361 void *argptr;
3362 struct dm_ioctl *host_dm;
3363 abi_long guest_data;
3364 uint32_t guest_data_size;
3365 int target_size;
3366 const argtype *arg_type = ie->arg_type;
3367 abi_long ret;
3368 void *big_buf = NULL;
3369 char *host_data;
3370
3371 arg_type++;
3372 target_size = thunk_type_size(arg_type, 0);
3373 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3374 if (!argptr) {
3375 ret = -TARGET_EFAULT;
3376 goto out;
3377 }
3378 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3379 unlock_user(argptr, arg, 0);
3380
3381 /* buf_temp is too small, so fetch things into a bigger buffer */
3382 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3383 memcpy(big_buf, buf_temp, target_size);
3384 buf_temp = big_buf;
3385 host_dm = big_buf;
3386
3387 guest_data = arg + host_dm->data_start;
3388 if ((guest_data - arg) < 0) {
3389 ret = -EINVAL;
3390 goto out;
3391 }
3392 guest_data_size = host_dm->data_size - host_dm->data_start;
3393 host_data = (char*)host_dm + host_dm->data_start;
3394
3395 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3396 switch (ie->host_cmd) {
3397 case DM_REMOVE_ALL:
3398 case DM_LIST_DEVICES:
3399 case DM_DEV_CREATE:
3400 case DM_DEV_REMOVE:
3401 case DM_DEV_SUSPEND:
3402 case DM_DEV_STATUS:
3403 case DM_DEV_WAIT:
3404 case DM_TABLE_STATUS:
3405 case DM_TABLE_CLEAR:
3406 case DM_TABLE_DEPS:
3407 case DM_LIST_VERSIONS:
3408 /* no input data */
3409 break;
3410 case DM_DEV_RENAME:
3411 case DM_DEV_SET_GEOMETRY:
3412 /* data contains only strings */
3413 memcpy(host_data, argptr, guest_data_size);
3414 break;
3415 case DM_TARGET_MSG:
3416 memcpy(host_data, argptr, guest_data_size);
3417 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3418 break;
3419 case DM_TABLE_LOAD:
3420 {
3421 void *gspec = argptr;
3422 void *cur_data = host_data;
3423 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3424 int spec_size = thunk_type_size(arg_type, 0);
3425 int i;
3426
3427 for (i = 0; i < host_dm->target_count; i++) {
3428 struct dm_target_spec *spec = cur_data;
3429 uint32_t next;
3430 int slen;
3431
3432 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3433 slen = strlen((char*)gspec + spec_size) + 1;
3434 next = spec->next;
3435 spec->next = sizeof(*spec) + slen;
3436 strcpy((char*)&spec[1], gspec + spec_size);
3437 gspec += next;
3438 cur_data += spec->next;
3439 }
3440 break;
3441 }
3442 default:
3443 ret = -TARGET_EINVAL;
3444 goto out;
3445 }
3446 unlock_user(argptr, guest_data, 0);
3447
3448 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3449 if (!is_error(ret)) {
3450 guest_data = arg + host_dm->data_start;
3451 guest_data_size = host_dm->data_size - host_dm->data_start;
3452 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3453 switch (ie->host_cmd) {
3454 case DM_REMOVE_ALL:
3455 case DM_DEV_CREATE:
3456 case DM_DEV_REMOVE:
3457 case DM_DEV_RENAME:
3458 case DM_DEV_SUSPEND:
3459 case DM_DEV_STATUS:
3460 case DM_TABLE_LOAD:
3461 case DM_TABLE_CLEAR:
3462 case DM_TARGET_MSG:
3463 case DM_DEV_SET_GEOMETRY:
3464 /* no return data */
3465 break;
3466 case DM_LIST_DEVICES:
3467 {
3468 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3469 uint32_t remaining_data = guest_data_size;
3470 void *cur_data = argptr;
3471 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3472 int nl_size = 12; /* can't use thunk_size due to alignment */
3473
3474 while (1) {
3475 uint32_t next = nl->next;
3476 if (next) {
3477 nl->next = nl_size + (strlen(nl->name) + 1);
3478 }
3479 if (remaining_data < nl->next) {
3480 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3481 break;
3482 }
3483 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3484 strcpy(cur_data + nl_size, nl->name);
3485 cur_data += nl->next;
3486 remaining_data -= nl->next;
3487 if (!next) {
3488 break;
3489 }
3490 nl = (void*)nl + next;
3491 }
3492 break;
3493 }
3494 case DM_DEV_WAIT:
3495 case DM_TABLE_STATUS:
3496 {
3497 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3498 void *cur_data = argptr;
3499 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3500 int spec_size = thunk_type_size(arg_type, 0);
3501 int i;
3502
3503 for (i = 0; i < host_dm->target_count; i++) {
3504 uint32_t next = spec->next;
3505 int slen = strlen((char*)&spec[1]) + 1;
3506 spec->next = (cur_data - argptr) + spec_size + slen;
3507 if (guest_data_size < spec->next) {
3508 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3509 break;
3510 }
3511 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3512 strcpy(cur_data + spec_size, (char*)&spec[1]);
3513 cur_data = argptr + spec->next;
3514 spec = (void*)host_dm + host_dm->data_start + next;
3515 }
3516 break;
3517 }
3518 case DM_TABLE_DEPS:
3519 {
3520 void *hdata = (void*)host_dm + host_dm->data_start;
3521 int count = *(uint32_t*)hdata;
3522 uint64_t *hdev = hdata + 8;
3523 uint64_t *gdev = argptr + 8;
3524 int i;
3525
3526 *(uint32_t*)argptr = tswap32(count);
3527 for (i = 0; i < count; i++) {
3528 *gdev = tswap64(*hdev);
3529 gdev++;
3530 hdev++;
3531 }
3532 break;
3533 }
3534 case DM_LIST_VERSIONS:
3535 {
3536 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3537 uint32_t remaining_data = guest_data_size;
3538 void *cur_data = argptr;
3539 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3540 int vers_size = thunk_type_size(arg_type, 0);
3541
3542 while (1) {
3543 uint32_t next = vers->next;
3544 if (next) {
3545 vers->next = vers_size + (strlen(vers->name) + 1);
3546 }
3547 if (remaining_data < vers->next) {
3548 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3549 break;
3550 }
3551 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3552 strcpy(cur_data + vers_size, vers->name);
3553 cur_data += vers->next;
3554 remaining_data -= vers->next;
3555 if (!next) {
3556 break;
3557 }
3558 vers = (void*)vers + next;
3559 }
3560 break;
3561 }
3562 default:
3563 ret = -TARGET_EINVAL;
3564 goto out;
3565 }
3566 unlock_user(argptr, guest_data, guest_data_size);
3567
3568 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3569 if (!argptr) {
3570 ret = -TARGET_EFAULT;
3571 goto out;
3572 }
3573 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3574 unlock_user(argptr, arg, target_size);
3575 }
3576 out:
3577 if (big_buf) {
3578 free(big_buf);
3579 }
3580 return ret;
3581 }
3582
3583 static IOCTLEntry ioctl_entries[] = {
3584 #define IOCTL(cmd, access, ...) \
3585 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3586 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3587 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3588 #include "ioctls.h"
3589 { 0, 0, },
3590 };
3591
3592 /* ??? Implement proper locking for ioctls. */
3593 /* do_ioctl() Must return target values and target errnos. */
3594 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3595 {
3596 const IOCTLEntry *ie;
3597 const argtype *arg_type;
3598 abi_long ret;
3599 uint8_t buf_temp[MAX_STRUCT_SIZE];
3600 int target_size;
3601 void *argptr;
3602
3603 ie = ioctl_entries;
3604 for(;;) {
3605 if (ie->target_cmd == 0) {
3606 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3607 return -TARGET_ENOSYS;
3608 }
3609 if (ie->target_cmd == cmd)
3610 break;
3611 ie++;
3612 }
3613 arg_type = ie->arg_type;
3614 #if defined(DEBUG)
3615 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3616 #endif
3617 if (ie->do_ioctl) {
3618 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3619 }
3620
3621 switch(arg_type[0]) {
3622 case TYPE_NULL:
3623 /* no argument */
3624 ret = get_errno(ioctl(fd, ie->host_cmd));
3625 break;
3626 case TYPE_PTRVOID:
3627 case TYPE_INT:
3628 /* int argment */
3629 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3630 break;
3631 case TYPE_PTR:
3632 arg_type++;
3633 target_size = thunk_type_size(arg_type, 0);
3634 switch(ie->access) {
3635 case IOC_R:
3636 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3637 if (!is_error(ret)) {
3638 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3639 if (!argptr)
3640 return -TARGET_EFAULT;
3641 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3642 unlock_user(argptr, arg, target_size);
3643 }
3644 break;
3645 case IOC_W:
3646 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3647 if (!argptr)
3648 return -TARGET_EFAULT;
3649 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3650 unlock_user(argptr, arg, 0);
3651 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3652 break;
3653 default:
3654 case IOC_RW:
3655 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3656 if (!argptr)
3657 return -TARGET_EFAULT;
3658 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3659 unlock_user(argptr, arg, 0);
3660 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3661 if (!is_error(ret)) {
3662 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3663 if (!argptr)
3664 return -TARGET_EFAULT;
3665 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3666 unlock_user(argptr, arg, target_size);
3667 }
3668 break;
3669 }
3670 break;
3671 default:
3672 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3673 (long)cmd, arg_type[0]);
3674 ret = -TARGET_ENOSYS;
3675 break;
3676 }
3677 return ret;
3678 }
3679
3680 static const bitmask_transtbl iflag_tbl[] = {
3681 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3682 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3683 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3684 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3685 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3686 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3687 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3688 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3689 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3690 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3691 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3692 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3693 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3694 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3695 { 0, 0, 0, 0 }
3696 };
3697
3698 static const bitmask_transtbl oflag_tbl[] = {
3699 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3700 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3701 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3702 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3703 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3704 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3705 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3706 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3707 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3708 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3709 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3710 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3711 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3712 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3713 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3714 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3715 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3716 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3717 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3718 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3719 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3720 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3721 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3722 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3723 { 0, 0, 0, 0 }
3724 };
3725
3726 static const bitmask_transtbl cflag_tbl[] = {
3727 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3728 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3729 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3730 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3731 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3732 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3733 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3734 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3735 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3736 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3737 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3738 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3739 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3740 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3741 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3742 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3743 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3744 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3745 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3746 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3747 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3748 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3749 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3750 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3751 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3752 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3753 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3754 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3755 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3756 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3757 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3758 { 0, 0, 0, 0 }
3759 };
3760
3761 static const bitmask_transtbl lflag_tbl[] = {
3762 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3763 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3764 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3765 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3766 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3767 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3768 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3769 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3770 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3771 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3772 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3773 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3774 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3775 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3776 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3777 { 0, 0, 0, 0 }
3778 };
3779
3780 static void target_to_host_termios (void *dst, const void *src)
3781 {
3782 struct host_termios *host = dst;
3783 const struct target_termios *target = src;
3784
3785 host->c_iflag =
3786 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3787 host->c_oflag =
3788 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3789 host->c_cflag =
3790 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3791 host->c_lflag =
3792 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3793 host->c_line = target->c_line;
3794
3795 memset(host->c_cc, 0, sizeof(host->c_cc));
3796 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3797 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3798 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3799 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3800 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3801 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3802 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3803 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3804 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3805 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3806 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3807 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3808 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3809 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3810 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3811 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3812 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3813 }
3814
3815 static void host_to_target_termios (void *dst, const void *src)
3816 {
3817 struct target_termios *target = dst;
3818 const struct host_termios *host = src;
3819
3820 target->c_iflag =
3821 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3822 target->c_oflag =
3823 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3824 target->c_cflag =
3825 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3826 target->c_lflag =
3827 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3828 target->c_line = host->c_line;
3829
3830 memset(target->c_cc, 0, sizeof(target->c_cc));
3831 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3832 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3833 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3834 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3835 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3836 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3837 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3838 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3839 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3840 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3841 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3842 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3843 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3844 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3845 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3846 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3847 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3848 }
3849
3850 static const StructEntry struct_termios_def = {
3851 .convert = { host_to_target_termios, target_to_host_termios },
3852 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3853 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3854 };
3855
3856 static bitmask_transtbl mmap_flags_tbl[] = {
3857 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3858 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3859 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3860 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3861 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3862 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3863 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3864 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3865 { 0, 0, 0, 0 }
3866 };
3867
3868 #if defined(TARGET_I386)
3869
3870 /* NOTE: there is really one LDT for all the threads */
3871 static uint8_t *ldt_table;
3872
3873 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3874 {
3875 int size;
3876 void *p;
3877
3878 if (!ldt_table)
3879 return 0;
3880 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3881 if (size > bytecount)
3882 size = bytecount;
3883 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3884 if (!p)
3885 return -TARGET_EFAULT;
3886 /* ??? Should this by byteswapped? */
3887 memcpy(p, ldt_table, size);
3888 unlock_user(p, ptr, size);
3889 return size;
3890 }
3891
3892 /* XXX: add locking support */
3893 static abi_long write_ldt(CPUX86State *env,
3894 abi_ulong ptr, unsigned long bytecount, int oldmode)
3895 {
3896 struct target_modify_ldt_ldt_s ldt_info;
3897 struct target_modify_ldt_ldt_s *target_ldt_info;
3898 int seg_32bit, contents, read_exec_only, limit_in_pages;
3899 int seg_not_present, useable, lm;
3900 uint32_t *lp, entry_1, entry_2;
3901
3902 if (bytecount != sizeof(ldt_info))
3903 return -TARGET_EINVAL;
3904 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3905 return -TARGET_EFAULT;
3906 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3907 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3908 ldt_info.limit = tswap32(target_ldt_info->limit);
3909 ldt_info.flags = tswap32(target_ldt_info->flags);
3910 unlock_user_struct(target_ldt_info, ptr, 0);
3911
3912 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3913 return -TARGET_EINVAL;
3914 seg_32bit = ldt_info.flags & 1;
3915 contents = (ldt_info.flags >> 1) & 3;
3916 read_exec_only = (ldt_info.flags >> 3) & 1;
3917 limit_in_pages = (ldt_info.flags >> 4) & 1;
3918 seg_not_present = (ldt_info.flags >> 5) & 1;
3919 useable = (ldt_info.flags >> 6) & 1;
3920 #ifdef TARGET_ABI32
3921 lm = 0;
3922 #else
3923 lm = (ldt_info.flags >> 7) & 1;
3924 #endif
3925 if (contents == 3) {
3926 if (oldmode)
3927 return -TARGET_EINVAL;
3928 if (seg_not_present == 0)
3929 return -TARGET_EINVAL;
3930 }
3931 /* allocate the LDT */
3932 if (!ldt_table) {
3933 env->ldt.base = target_mmap(0,
3934 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3935 PROT_READ|PROT_WRITE,
3936 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3937 if (env->ldt.base == -1)
3938 return -TARGET_ENOMEM;
3939 memset(g2h(env->ldt.base), 0,
3940 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3941 env->ldt.limit = 0xffff;
3942 ldt_table = g2h(env->ldt.base);
3943 }
3944
3945 /* NOTE: same code as Linux kernel */
3946 /* Allow LDTs to be cleared by the user. */
3947 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3948 if (oldmode ||
3949 (contents == 0 &&
3950 read_exec_only == 1 &&
3951 seg_32bit == 0 &&
3952 limit_in_pages == 0 &&
3953 seg_not_present == 1 &&
3954 useable == 0 )) {
3955 entry_1 = 0;
3956 entry_2 = 0;
3957 goto install;
3958 }
3959 }
3960
3961 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3962 (ldt_info.limit & 0x0ffff);
3963 entry_2 = (ldt_info.base_addr & 0xff000000) |
3964 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3965 (ldt_info.limit & 0xf0000) |
3966 ((read_exec_only ^ 1) << 9) |
3967 (contents << 10) |
3968 ((seg_not_present ^ 1) << 15) |
3969 (seg_32bit << 22) |
3970 (limit_in_pages << 23) |
3971 (lm << 21) |
3972 0x7000;
3973 if (!oldmode)
3974 entry_2 |= (useable << 20);
3975
3976 /* Install the new entry ... */
3977 install:
3978 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3979 lp[0] = tswap32(entry_1);
3980 lp[1] = tswap32(entry_2);
3981 return 0;
3982 }
3983
3984 /* specific and weird i386 syscalls */
3985 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3986 unsigned long bytecount)
3987 {
3988 abi_long ret;
3989
3990 switch (func) {
3991 case 0:
3992 ret = read_ldt(ptr, bytecount);
3993 break;
3994 case 1:
3995 ret = write_ldt(env, ptr, bytecount, 1);
3996 break;
3997 case 0x11:
3998 ret = write_ldt(env, ptr, bytecount, 0);
3999 break;
4000 default:
4001 ret = -TARGET_ENOSYS;
4002 break;
4003 }
4004 return ret;
4005 }
4006
4007 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4008 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4009 {
4010 uint64_t *gdt_table = g2h(env->gdt.base);
4011 struct target_modify_ldt_ldt_s ldt_info;
4012 struct target_modify_ldt_ldt_s *target_ldt_info;
4013 int seg_32bit, contents, read_exec_only, limit_in_pages;
4014 int seg_not_present, useable, lm;
4015 uint32_t *lp, entry_1, entry_2;
4016 int i;
4017
4018 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4019 if (!target_ldt_info)
4020 return -TARGET_EFAULT;
4021 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4022 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4023 ldt_info.limit = tswap32(target_ldt_info->limit);
4024 ldt_info.flags = tswap32(target_ldt_info->flags);
4025 if (ldt_info.entry_number == -1) {
4026 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4027 if (gdt_table[i] == 0) {
4028 ldt_info.entry_number = i;
4029 target_ldt_info->entry_number = tswap32(i);
4030 break;
4031 }
4032 }
4033 }
4034 unlock_user_struct(target_ldt_info, ptr, 1);
4035
4036 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4037 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4038 return -TARGET_EINVAL;
4039 seg_32bit = ldt_info.flags & 1;
4040 contents = (ldt_info.flags >> 1) & 3;
4041 read_exec_only = (ldt_info.flags >> 3) & 1;
4042 limit_in_pages = (ldt_info.flags >> 4) & 1;
4043 seg_not_present = (ldt_info.flags >> 5) & 1;
4044 useable = (ldt_info.flags >> 6) & 1;
4045 #ifdef TARGET_ABI32
4046 lm = 0;
4047 #else
4048 lm = (ldt_info.flags >> 7) & 1;
4049 #endif
4050
4051 if (contents == 3) {
4052 if (seg_not_present == 0)
4053 return -TARGET_EINVAL;
4054 }
4055
4056 /* NOTE: same code as Linux kernel */
4057 /* Allow LDTs to be cleared by the user. */
4058 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4059 if ((contents == 0 &&
4060 read_exec_only == 1 &&
4061 seg_32bit == 0 &&
4062 limit_in_pages == 0 &&
4063 seg_not_present == 1 &&
4064 useable == 0 )) {
4065 entry_1 = 0;
4066 entry_2 = 0;
4067 goto install;
4068 }
4069 }
4070
4071 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4072 (ldt_info.limit & 0x0ffff);
4073 entry_2 = (ldt_info.base_addr & 0xff000000) |
4074 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4075 (ldt_info.limit & 0xf0000) |
4076 ((read_exec_only ^ 1) << 9) |
4077 (contents << 10) |
4078 ((seg_not_present ^ 1) << 15) |
4079 (seg_32bit << 22) |
4080 (limit_in_pages << 23) |
4081 (useable << 20) |
4082 (lm << 21) |
4083 0x7000;
4084
4085 /* Install the new entry ... */
4086 install:
4087 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4088 lp[0] = tswap32(entry_1);
4089 lp[1] = tswap32(entry_2);
4090 return 0;
4091 }
4092
4093 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4094 {
4095 struct target_modify_ldt_ldt_s *target_ldt_info;
4096 uint64_t *gdt_table = g2h(env->gdt.base);
4097 uint32_t base_addr, limit, flags;
4098 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4099 int seg_not_present, useable, lm;
4100 uint32_t *lp, entry_1, entry_2;
4101
4102 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4103 if (!target_ldt_info)
4104 return -TARGET_EFAULT;
4105 idx = tswap32(target_ldt_info->entry_number);
4106 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4107 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4108 unlock_user_struct(target_ldt_info, ptr, 1);
4109 return -TARGET_EINVAL;
4110 }
4111 lp = (uint32_t *)(gdt_table + idx);
4112 entry_1 = tswap32(lp[0]);
4113 entry_2 = tswap32(lp[1]);
4114
4115 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4116 contents = (entry_2 >> 10) & 3;
4117 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4118 seg_32bit = (entry_2 >> 22) & 1;
4119 limit_in_pages = (entry_2 >> 23) & 1;
4120 useable = (entry_2 >> 20) & 1;
4121 #ifdef TARGET_ABI32
4122 lm = 0;
4123 #else
4124 lm = (entry_2 >> 21) & 1;
4125 #endif
4126 flags = (seg_32bit << 0) | (contents << 1) |
4127 (read_exec_only << 3) | (limit_in_pages << 4) |
4128 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4129 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4130 base_addr = (entry_1 >> 16) |
4131 (entry_2 & 0xff000000) |
4132 ((entry_2 & 0xff) << 16);
4133 target_ldt_info->base_addr = tswapal(base_addr);
4134 target_ldt_info->limit = tswap32(limit);
4135 target_ldt_info->flags = tswap32(flags);
4136 unlock_user_struct(target_ldt_info, ptr, 1);
4137 return 0;
4138 }
4139 #endif /* TARGET_I386 && TARGET_ABI32 */
4140
4141 #ifndef TARGET_ABI32
4142 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4143 {
4144 abi_long ret = 0;
4145 abi_ulong val;
4146 int idx;
4147
4148 switch(code) {
4149 case TARGET_ARCH_SET_GS:
4150 case TARGET_ARCH_SET_FS:
4151 if (code == TARGET_ARCH_SET_GS)
4152 idx = R_GS;
4153 else
4154 idx = R_FS;
4155 cpu_x86_load_seg(env, idx, 0);
4156 env->segs[idx].base = addr;
4157 break;
4158 case TARGET_ARCH_GET_GS:
4159 case TARGET_ARCH_GET_FS:
4160 if (code == TARGET_ARCH_GET_GS)
4161 idx = R_GS;
4162 else
4163 idx = R_FS;
4164 val = env->segs[idx].base;
4165 if (put_user(val, addr, abi_ulong))
4166 ret = -TARGET_EFAULT;
4167 break;
4168 default:
4169 ret = -TARGET_EINVAL;
4170 break;
4171 }
4172 return ret;
4173 }
4174 #endif
4175
4176 #endif /* defined(TARGET_I386) */
4177
4178 #define NEW_STACK_SIZE 0x40000
4179
4180 #if defined(CONFIG_USE_NPTL)
4181
4182 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4183 typedef struct {
4184 CPUArchState *env;
4185 pthread_mutex_t mutex;
4186 pthread_cond_t cond;
4187 pthread_t thread;
4188 uint32_t tid;
4189 abi_ulong child_tidptr;
4190 abi_ulong parent_tidptr;
4191 sigset_t sigmask;
4192 } new_thread_info;
4193
4194 static void *clone_func(void *arg)
4195 {
4196 new_thread_info *info = arg;
4197 CPUArchState *env;
4198 TaskState *ts;
4199
4200 env = info->env;
4201 thread_env = env;
4202 ts = (TaskState *)thread_env->opaque;
4203 info->tid = gettid();
4204 env->host_tid = info->tid;
4205 task_settid(ts);
4206 if (info->child_tidptr)
4207 put_user_u32(info->tid, info->child_tidptr);
4208 if (info->parent_tidptr)
4209 put_user_u32(info->tid, info->parent_tidptr);
4210 /* Enable signals. */
4211 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4212 /* Signal to the parent that we're ready. */
4213 pthread_mutex_lock(&info->mutex);
4214 pthread_cond_broadcast(&info->cond);
4215 pthread_mutex_unlock(&info->mutex);
4216 /* Wait until the parent has finshed initializing the tls state. */
4217 pthread_mutex_lock(&clone_lock);
4218 pthread_mutex_unlock(&clone_lock);
4219 cpu_loop(env);
4220 /* never exits */
4221 return NULL;
4222 }
4223 #else
4224
4225 static int clone_func(void *arg)
4226 {
4227 CPUArchState *env = arg;
4228 cpu_loop(env);
4229 /* never exits */
4230 return 0;
4231 }
4232 #endif
4233
4234 /* do_fork() Must return host values and target errnos (unlike most
4235 do_*() functions). */
4236 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4237 abi_ulong parent_tidptr, target_ulong newtls,
4238 abi_ulong child_tidptr)
4239 {
4240 int ret;
4241 TaskState *ts;
4242 CPUArchState *new_env;
4243 #if defined(CONFIG_USE_NPTL)
4244 unsigned int nptl_flags;
4245 sigset_t sigmask;
4246 #else
4247 uint8_t *new_stack;
4248 #endif
4249
4250 /* Emulate vfork() with fork() */
4251 if (flags & CLONE_VFORK)
4252 flags &= ~(CLONE_VFORK | CLONE_VM);
4253
4254 if (flags & CLONE_VM) {
4255 TaskState *parent_ts = (TaskState *)env->opaque;
4256 #if defined(CONFIG_USE_NPTL)
4257 new_thread_info info;
4258 pthread_attr_t attr;
4259 #endif
4260 ts = g_malloc0(sizeof(TaskState));
4261 init_task_state(ts);
4262 /* we create a new CPU instance. */
4263 new_env = cpu_copy(env);
4264 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4265 cpu_reset(ENV_GET_CPU(new_env));
4266 #endif
4267 /* Init regs that differ from the parent. */
4268 cpu_clone_regs(new_env, newsp);
4269 new_env->opaque = ts;
4270 ts->bprm = parent_ts->bprm;
4271 ts->info = parent_ts->info;
4272 #if defined(CONFIG_USE_NPTL)
4273 nptl_flags = flags;
4274 flags &= ~CLONE_NPTL_FLAGS2;
4275
4276 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4277 ts->child_tidptr = child_tidptr;
4278 }
4279
4280 if (nptl_flags & CLONE_SETTLS)
4281 cpu_set_tls (new_env, newtls);
4282
4283 /* Grab a mutex so that thread setup appears atomic. */
4284 pthread_mutex_lock(&clone_lock);
4285
4286 memset(&info, 0, sizeof(info));
4287 pthread_mutex_init(&info.mutex, NULL);
4288 pthread_mutex_lock(&info.mutex);
4289 pthread_cond_init(&info.cond, NULL);
4290 info.env = new_env;
4291 if (nptl_flags & CLONE_CHILD_SETTID)
4292 info.child_tidptr = child_tidptr;
4293 if (nptl_flags & CLONE_PARENT_SETTID)
4294 info.parent_tidptr = parent_tidptr;
4295
4296 ret = pthread_attr_init(&attr);
4297 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4298 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4299 /* It is not safe to deliver signals until the child has finished
4300 initializing, so temporarily block all signals. */
4301 sigfillset(&sigmask);
4302 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4303
4304 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4305 /* TODO: Free new CPU state if thread creation failed. */
4306
4307 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4308 pthread_attr_destroy(&attr);
4309 if (ret == 0) {
4310 /* Wait for the child to initialize. */
4311 pthread_cond_wait(&info.cond, &info.mutex);
4312 ret = info.tid;
4313 if (flags & CLONE_PARENT_SETTID)
4314 put_user_u32(ret, parent_tidptr);
4315 } else {
4316 ret = -1;
4317 }
4318 pthread_mutex_unlock(&info.mutex);
4319 pthread_cond_destroy(&info.cond);
4320 pthread_mutex_destroy(&info.mutex);
4321 pthread_mutex_unlock(&clone_lock);
4322 #else
4323 if (flags & CLONE_NPTL_FLAGS2)
4324 return -EINVAL;
4325 /* This is probably going to die very quickly, but do it anyway. */
4326 new_stack = g_malloc0 (NEW_STACK_SIZE);
4327 #ifdef __ia64__
4328 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4329 #else
4330 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4331 #endif
4332 #endif
4333 } else {
4334 /* if no CLONE_VM, we consider it is a fork */
4335 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4336 return -EINVAL;
4337 fork_start();
4338 ret = fork();
4339 if (ret == 0) {
4340 /* Child Process. */
4341 cpu_clone_regs(env, newsp);
4342 fork_end(1);
4343 #if defined(CONFIG_USE_NPTL)
4344 /* There is a race condition here. The parent process could
4345 theoretically read the TID in the child process before the child
4346 tid is set. This would require using either ptrace
4347 (not implemented) or having *_tidptr to point at a shared memory
4348 mapping. We can't repeat the spinlock hack used above because
4349 the child process gets its own copy of the lock. */
4350 if (flags & CLONE_CHILD_SETTID)
4351 put_user_u32(gettid(), child_tidptr);
4352 if (flags & CLONE_PARENT_SETTID)
4353 put_user_u32(gettid(), parent_tidptr);
4354 ts = (TaskState *)env->opaque;
4355 if (flags & CLONE_SETTLS)
4356 cpu_set_tls (env, newtls);
4357 if (flags & CLONE_CHILD_CLEARTID)
4358 ts->child_tidptr = child_tidptr;
4359 #endif
4360 } else {
4361 fork_end(0);
4362 }
4363 }
4364 return ret;
4365 }
4366
4367 /* warning : doesn't handle linux specific flags... */
4368 static int target_to_host_fcntl_cmd(int cmd)
4369 {
4370 switch(cmd) {
4371 case TARGET_F_DUPFD:
4372 case TARGET_F_GETFD:
4373 case TARGET_F_SETFD:
4374 case TARGET_F_GETFL:
4375 case TARGET_F_SETFL:
4376 return cmd;
4377 case TARGET_F_GETLK:
4378 return F_GETLK;
4379 case TARGET_F_SETLK:
4380 return F_SETLK;
4381 case TARGET_F_SETLKW:
4382 return F_SETLKW;
4383 case TARGET_F_GETOWN:
4384 return F_GETOWN;
4385 case TARGET_F_SETOWN:
4386 return F_SETOWN;
4387 case TARGET_F_GETSIG:
4388 return F_GETSIG;
4389 case TARGET_F_SETSIG:
4390 return F_SETSIG;
4391 #if TARGET_ABI_BITS == 32
4392 case TARGET_F_GETLK64:
4393 return F_GETLK64;
4394 case TARGET_F_SETLK64:
4395 return F_SETLK64;
4396 case TARGET_F_SETLKW64:
4397 return F_SETLKW64;
4398 #endif
4399 case TARGET_F_SETLEASE:
4400 return F_SETLEASE;
4401 case TARGET_F_GETLEASE:
4402 return F_GETLEASE;
4403 #ifdef F_DUPFD_CLOEXEC
4404 case TARGET_F_DUPFD_CLOEXEC:
4405 return F_DUPFD_CLOEXEC;
4406 #endif
4407 case TARGET_F_NOTIFY:
4408 return F_NOTIFY;
4409 default:
4410 return -TARGET_EINVAL;
4411 }
4412 return -TARGET_EINVAL;
4413 }
4414
4415 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4416 {
4417 struct flock fl;
4418 struct target_flock *target_fl;
4419 struct flock64 fl64;
4420 struct target_flock64 *target_fl64;
4421 abi_long ret;
4422 int host_cmd = target_to_host_fcntl_cmd(cmd);
4423
4424 if (host_cmd == -TARGET_EINVAL)
4425 return host_cmd;
4426
4427 switch(cmd) {
4428 case TARGET_F_GETLK:
4429 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4430 return -TARGET_EFAULT;
4431 fl.l_type = tswap16(target_fl->l_type);
4432 fl.l_whence = tswap16(target_fl->l_whence);
4433 fl.l_start = tswapal(target_fl->l_start);
4434 fl.l_len = tswapal(target_fl->l_len);
4435 fl.l_pid = tswap32(target_fl->l_pid);
4436 unlock_user_struct(target_fl, arg, 0);
4437 ret = get_errno(fcntl(fd, host_cmd, &fl));
4438 if (ret == 0) {
4439 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4440 return -TARGET_EFAULT;
4441 target_fl->l_type = tswap16(fl.l_type);
4442 target_fl->l_whence = tswap16(fl.l_whence);
4443 target_fl->l_start = tswapal(fl.l_start);
4444 target_fl->l_len = tswapal(fl.l_len);
4445 target_fl->l_pid = tswap32(fl.l_pid);
4446 unlock_user_struct(target_fl, arg, 1);
4447 }
4448 break;
4449
4450 case TARGET_F_SETLK:
4451 case TARGET_F_SETLKW:
4452 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4453 return -TARGET_EFAULT;
4454 fl.l_type = tswap16(target_fl->l_type);
4455 fl.l_whence = tswap16(target_fl->l_whence);
4456 fl.l_start = tswapal(target_fl->l_start);
4457 fl.l_len = tswapal(target_fl->l_len);
4458 fl.l_pid = tswap32(target_fl->l_pid);
4459 unlock_user_struct(target_fl, arg, 0);
4460 ret = get_errno(fcntl(fd, host_cmd, &fl));
4461 break;
4462
4463 case TARGET_F_GETLK64:
4464 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4465 return -TARGET_EFAULT;
4466 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4467 fl64.l_whence = tswap16(target_fl64->l_whence);
4468 fl64.l_start = tswap64(target_fl64->l_start);
4469 fl64.l_len = tswap64(target_fl64->l_len);
4470 fl64.l_pid = tswap32(target_fl64->l_pid);
4471 unlock_user_struct(target_fl64, arg, 0);
4472 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4473 if (ret == 0) {
4474 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4475 return -TARGET_EFAULT;
4476 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4477 target_fl64->l_whence = tswap16(fl64.l_whence);
4478 target_fl64->l_start = tswap64(fl64.l_start);
4479 target_fl64->l_len = tswap64(fl64.l_len);
4480 target_fl64->l_pid = tswap32(fl64.l_pid);
4481 unlock_user_struct(target_fl64, arg, 1);
4482 }
4483 break;
4484 case TARGET_F_SETLK64:
4485 case TARGET_F_SETLKW64:
4486 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4487 return -TARGET_EFAULT;
4488 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4489 fl64.l_whence = tswap16(target_fl64->l_whence);
4490 fl64.l_start = tswap64(target_fl64->l_start);
4491 fl64.l_len = tswap64(target_fl64->l_len);
4492 fl64.l_pid = tswap32(target_fl64->l_pid);
4493 unlock_user_struct(target_fl64, arg, 0);
4494 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4495 break;
4496
4497 case TARGET_F_GETFL:
4498 ret = get_errno(fcntl(fd, host_cmd, arg));
4499 if (ret >= 0) {
4500 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4501 }
4502 break;
4503
4504 case TARGET_F_SETFL:
4505 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4506 break;
4507
4508 case TARGET_F_SETOWN:
4509 case TARGET_F_GETOWN:
4510 case TARGET_F_SETSIG:
4511 case TARGET_F_GETSIG:
4512 case TARGET_F_SETLEASE:
4513 case TARGET_F_GETLEASE:
4514 ret = get_errno(fcntl(fd, host_cmd, arg));
4515 break;
4516
4517 default:
4518 ret = get_errno(fcntl(fd, cmd, arg));
4519 break;
4520 }
4521 return ret;
4522 }
4523
4524 #ifdef USE_UID16
4525
4526 static inline int high2lowuid(int uid)
4527 {
4528 if (uid > 65535)
4529 return 65534;
4530 else
4531 return uid;
4532 }
4533
4534 static inline int high2lowgid(int gid)
4535 {
4536 if (gid > 65535)
4537 return 65534;
4538 else
4539 return gid;
4540 }
4541
4542 static inline int low2highuid(int uid)
4543 {
4544 if ((int16_t)uid == -1)
4545 return -1;
4546 else
4547 return uid;
4548 }
4549
4550 static inline int low2highgid(int gid)
4551 {
4552 if ((int16_t)gid == -1)
4553 return -1;
4554 else
4555 return gid;
4556 }
4557 static inline int tswapid(int id)
4558 {
4559 return tswap16(id);
4560 }
4561 #else /* !USE_UID16 */
4562 static inline int high2lowuid(int uid)
4563 {
4564 return uid;
4565 }
4566 static inline int high2lowgid(int gid)
4567 {
4568 return gid;
4569 }
4570 static inline int low2highuid(int uid)
4571 {
4572 return uid;
4573 }
4574 static inline int low2highgid(int gid)
4575 {
4576 return gid;
4577 }
4578 static inline int tswapid(int id)
4579 {
4580 return tswap32(id);
4581 }
4582 #endif /* USE_UID16 */
4583
4584 void syscall_init(void)
4585 {
4586 IOCTLEntry *ie;
4587 const argtype *arg_type;
4588 int size;
4589 int i;
4590
4591 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4592 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4593 #include "syscall_types.h"
4594 #undef STRUCT
4595 #undef STRUCT_SPECIAL
4596
4597 /* we patch the ioctl size if necessary. We rely on the fact that
4598 no ioctl has all the bits at '1' in the size field */
4599 ie = ioctl_entries;
4600 while (ie->target_cmd != 0) {
4601 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4602 TARGET_IOC_SIZEMASK) {
4603 arg_type = ie->arg_type;
4604 if (arg_type[0] != TYPE_PTR) {
4605 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4606 ie->target_cmd);
4607 exit(1);
4608 }
4609 arg_type++;
4610 size = thunk_type_size(arg_type, 0);
4611 ie->target_cmd = (ie->target_cmd &
4612 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4613 (size << TARGET_IOC_SIZESHIFT);
4614 }
4615
4616 /* Build target_to_host_errno_table[] table from
4617 * host_to_target_errno_table[]. */
4618 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4619 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4620
4621 /* automatic consistency check if same arch */
4622 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4623 (defined(__x86_64__) && defined(TARGET_X86_64))
4624 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4625 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4626 ie->name, ie->target_cmd, ie->host_cmd);
4627 }
4628 #endif
4629 ie++;
4630 }
4631 }
4632
4633 #if TARGET_ABI_BITS == 32
4634 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4635 {
4636 #ifdef TARGET_WORDS_BIGENDIAN
4637 return ((uint64_t)word0 << 32) | word1;
4638 #else
4639 return ((uint64_t)word1 << 32) | word0;
4640 #endif
4641 }
4642 #else /* TARGET_ABI_BITS == 32 */
4643 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4644 {
4645 return word0;
4646 }
4647 #endif /* TARGET_ABI_BITS != 32 */
4648
4649 #ifdef TARGET_NR_truncate64
4650 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4651 abi_long arg2,
4652 abi_long arg3,
4653 abi_long arg4)
4654 {
4655 if (regpairs_aligned(cpu_env)) {
4656 arg2 = arg3;
4657 arg3 = arg4;
4658 }
4659 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4660 }
4661 #endif
4662
4663 #ifdef TARGET_NR_ftruncate64
4664 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4665 abi_long arg2,
4666 abi_long arg3,
4667 abi_long arg4)
4668 {
4669 if (regpairs_aligned(cpu_env)) {
4670 arg2 = arg3;
4671 arg3 = arg4;
4672 }
4673 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4674 }
4675 #endif
4676
4677 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4678 abi_ulong target_addr)
4679 {
4680 struct target_timespec *target_ts;
4681
4682 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4683 return -TARGET_EFAULT;
4684 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4685 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4686 unlock_user_struct(target_ts, target_addr, 0);
4687 return 0;
4688 }
4689
4690 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4691 struct timespec *host_ts)
4692 {
4693 struct target_timespec *target_ts;
4694
4695 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4696 return -TARGET_EFAULT;
4697 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4698 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4699 unlock_user_struct(target_ts, target_addr, 1);
4700 return 0;
4701 }
4702
4703 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4704 static inline abi_long host_to_target_stat64(void *cpu_env,
4705 abi_ulong target_addr,
4706 struct stat *host_st)
4707 {
4708 #ifdef TARGET_ARM
4709 if (((CPUARMState *)cpu_env)->eabi) {
4710 struct target_eabi_stat64 *target_st;
4711
4712 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4713 return -TARGET_EFAULT;
4714 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4715 __put_user(host_st->st_dev, &target_st->st_dev);
4716 __put_user(host_st->st_ino, &target_st->st_ino);
4717 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4718 __put_user(host_st->st_ino, &target_st->__st_ino);
4719 #endif
4720 __put_user(host_st->st_mode, &target_st->st_mode);
4721 __put_user(host_st->st_nlink, &target_st->st_nlink);
4722 __put_user(host_st->st_uid, &target_st->st_uid);
4723 __put_user(host_st->st_gid, &target_st->st_gid);
4724 __put_user(host_st->st_rdev, &target_st->st_rdev);
4725 __put_user(host_st->st_size, &target_st->st_size);
4726 __put_user(host_st->st_blksize, &target_st->st_blksize);
4727 __put_user(host_st->st_blocks, &target_st->st_blocks);
4728 __put_user(host_st->st_atime, &target_st->target_st_atime);
4729 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4730 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4731 unlock_user_struct(target_st, target_addr, 1);
4732 } else
4733 #endif
4734 {
4735 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4736 struct target_stat *target_st;
4737 #else
4738 struct target_stat64 *target_st;
4739 #endif
4740
4741 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4742 return -TARGET_EFAULT;
4743 memset(target_st, 0, sizeof(*target_st));
4744 __put_user(host_st->st_dev, &target_st->st_dev);
4745 __put_user(host_st->st_ino, &target_st->st_ino);
4746 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4747 __put_user(host_st->st_ino, &target_st->__st_ino);
4748 #endif
4749 __put_user(host_st->st_mode, &target_st->st_mode);
4750 __put_user(host_st->st_nlink, &target_st->st_nlink);
4751 __put_user(host_st->st_uid, &target_st->st_uid);
4752 __put_user(host_st->st_gid, &target_st->st_gid);
4753 __put_user(host_st->st_rdev, &target_st->st_rdev);
4754 /* XXX: better use of kernel struct */
4755 __put_user(host_st->st_size, &target_st->st_size);
4756 __put_user(host_st->st_blksize, &target_st->st_blksize);
4757 __put_user(host_st->st_blocks, &target_st->st_blocks);
4758 __put_user(host_st->st_atime, &target_st->target_st_atime);
4759 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4760 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4761 unlock_user_struct(target_st, target_addr, 1);
4762 }
4763
4764 return 0;
4765 }
4766 #endif
4767
4768 #if defined(CONFIG_USE_NPTL)
4769 /* ??? Using host futex calls even when target atomic operations
4770 are not really atomic probably breaks things. However implementing
4771 futexes locally would make futexes shared between multiple processes
4772 tricky. However they're probably useless because guest atomic
4773 operations won't work either. */
4774 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4775 target_ulong uaddr2, int val3)
4776 {
4777 struct timespec ts, *pts;
4778 int base_op;
4779
4780 /* ??? We assume FUTEX_* constants are the same on both host
4781 and target. */
4782 #ifdef FUTEX_CMD_MASK
4783 base_op = op & FUTEX_CMD_MASK;
4784 #else
4785 base_op = op;
4786 #endif
4787 switch (base_op) {
4788 case FUTEX_WAIT:
4789 if (timeout) {
4790 pts = &ts;
4791 target_to_host_timespec(pts, timeout);
4792 } else {
4793 pts = NULL;
4794 }
4795 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4796 pts, NULL, 0));
4797 case FUTEX_WAKE:
4798 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4799 case FUTEX_FD:
4800 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4801 case FUTEX_REQUEUE:
4802 case FUTEX_CMP_REQUEUE:
4803 case FUTEX_WAKE_OP:
4804 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4805 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4806 But the prototype takes a `struct timespec *'; insert casts
4807 to satisfy the compiler. We do not need to tswap TIMEOUT
4808 since it's not compared to guest memory. */
4809 pts = (struct timespec *)(uintptr_t) timeout;
4810 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4811 g2h(uaddr2),
4812 (base_op == FUTEX_CMP_REQUEUE
4813 ? tswap32(val3)
4814 : val3)));
4815 default:
4816 return -TARGET_ENOSYS;
4817 }
4818 }
4819 #endif
4820
4821 /* Map host to target signal numbers for the wait family of syscalls.
4822 Assume all other status bits are the same. */
4823 static int host_to_target_waitstatus(int status)
4824 {
4825 if (WIFSIGNALED(status)) {
4826 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4827 }
4828 if (WIFSTOPPED(status)) {
4829 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4830 | (status & 0xff);
4831 }
4832 return status;
4833 }
4834
4835 int get_osversion(void)
4836 {
4837 static int osversion;
4838 struct new_utsname buf;
4839 const char *s;
4840 int i, n, tmp;
4841 if (osversion)
4842 return osversion;
4843 if (qemu_uname_release && *qemu_uname_release) {
4844 s = qemu_uname_release;
4845 } else {
4846 if (sys_uname(&buf))
4847 return 0;
4848 s = buf.release;
4849 }
4850 tmp = 0;
4851 for (i = 0; i < 3; i++) {
4852 n = 0;
4853 while (*s >= '0' && *s <= '9') {
4854 n *= 10;
4855 n += *s - '0';
4856 s++;
4857 }
4858 tmp = (tmp << 8) + n;
4859 if (*s == '.')
4860 s++;
4861 }
4862 osversion = tmp;
4863 return osversion;
4864 }
4865
4866
4867 static int open_self_maps(void *cpu_env, int fd)
4868 {
4869 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4870 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4871 #endif
4872 FILE *fp;
4873 char *line = NULL;
4874 size_t len = 0;
4875 ssize_t read;
4876
4877 fp = fopen("/proc/self/maps", "r");
4878 if (fp == NULL) {
4879 return -EACCES;
4880 }
4881
4882 while ((read = getline(&line, &len, fp)) != -1) {
4883 int fields, dev_maj, dev_min, inode;
4884 uint64_t min, max, offset;
4885 char flag_r, flag_w, flag_x, flag_p;
4886 char path[512] = "";
4887 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
4888 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
4889 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
4890
4891 if ((fields < 10) || (fields > 11)) {
4892 continue;
4893 }
4894 if (!strncmp(path, "[stack]", 7)) {
4895 continue;
4896 }
4897 if (h2g_valid(min) && h2g_valid(max)) {
4898 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
4899 " %c%c%c%c %08" PRIx64 " %02x:%02x %d%s%s\n",
4900 h2g(min), h2g(max), flag_r, flag_w,
4901 flag_x, flag_p, offset, dev_maj, dev_min, inode,
4902 path[0] ? " " : "", path);
4903 }
4904 }
4905
4906 free(line);
4907 fclose(fp);
4908
4909 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4910 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4911 (unsigned long long)ts->info->stack_limit,
4912 (unsigned long long)(ts->stack_base + (TARGET_PAGE_SIZE - 1))
4913 & TARGET_PAGE_MASK,
4914 (unsigned long long)0);
4915 #endif
4916
4917 return 0;
4918 }
4919
4920 static int open_self_stat(void *cpu_env, int fd)
4921 {
4922 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4923 abi_ulong start_stack = ts->info->start_stack;
4924 int i;
4925
4926 for (i = 0; i < 44; i++) {
4927 char buf[128];
4928 int len;
4929 uint64_t val = 0;
4930
4931 if (i == 0) {
4932 /* pid */
4933 val = getpid();
4934 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4935 } else if (i == 1) {
4936 /* app name */
4937 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
4938 } else if (i == 27) {
4939 /* stack bottom */
4940 val = start_stack;
4941 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4942 } else {
4943 /* for the rest, there is MasterCard */
4944 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
4945 }
4946
4947 len = strlen(buf);
4948 if (write(fd, buf, len) != len) {
4949 return -1;
4950 }
4951 }
4952
4953 return 0;
4954 }
4955
4956 static int open_self_auxv(void *cpu_env, int fd)
4957 {
4958 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4959 abi_ulong auxv = ts->info->saved_auxv;
4960 abi_ulong len = ts->info->auxv_len;
4961 char *ptr;
4962
4963 /*
4964 * Auxiliary vector is stored in target process stack.
4965 * read in whole auxv vector and copy it to file
4966 */
4967 ptr = lock_user(VERIFY_READ, auxv, len, 0);
4968 if (ptr != NULL) {
4969 while (len > 0) {
4970 ssize_t r;
4971 r = write(fd, ptr, len);
4972 if (r <= 0) {
4973 break;
4974 }
4975 len -= r;
4976 ptr += r;
4977 }
4978 lseek(fd, 0, SEEK_SET);
4979 unlock_user(ptr, auxv, len);
4980 }
4981
4982 return 0;
4983 }
4984
4985 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
4986 {
4987 struct fake_open {
4988 const char *filename;
4989 int (*fill)(void *cpu_env, int fd);
4990 };
4991 const struct fake_open *fake_open;
4992 static const struct fake_open fakes[] = {
4993 { "/proc/self/maps", open_self_maps },
4994 { "/proc/self/stat", open_self_stat },
4995 { "/proc/self/auxv", open_self_auxv },
4996 { NULL, NULL }
4997 };
4998
4999 for (fake_open = fakes; fake_open->filename; fake_open++) {
5000 if (!strncmp(pathname, fake_open->filename,
5001 strlen(fake_open->filename))) {
5002 break;
5003 }
5004 }
5005
5006 if (fake_open->filename) {
5007 const char *tmpdir;
5008 char filename[PATH_MAX];
5009 int fd, r;
5010
5011 /* create temporary file to map stat to */
5012 tmpdir = getenv("TMPDIR");
5013 if (!tmpdir)
5014 tmpdir = "/tmp";
5015 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5016 fd = mkstemp(filename);
5017 if (fd < 0) {
5018 return fd;
5019 }
5020 unlink(filename);
5021
5022 if ((r = fake_open->fill(cpu_env, fd))) {
5023 close(fd);
5024 return r;
5025 }
5026 lseek(fd, 0, SEEK_SET);
5027
5028 return fd;
5029 }
5030
5031 return get_errno(open(path(pathname), flags, mode));
5032 }
5033
5034 /* do_syscall() should always have a single exit point at the end so
5035 that actions, such as logging of syscall results, can be performed.
5036 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5037 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5038 abi_long arg2, abi_long arg3, abi_long arg4,
5039 abi_long arg5, abi_long arg6, abi_long arg7,
5040 abi_long arg8)
5041 {
5042 abi_long ret;
5043 struct stat st;
5044 struct statfs stfs;
5045 void *p;
5046
5047 #ifdef DEBUG
5048 gemu_log("syscall %d", num);
5049 #endif
5050 if(do_strace)
5051 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5052
5053 switch(num) {
5054 case TARGET_NR_exit:
5055 #ifdef CONFIG_USE_NPTL
5056 /* In old applications this may be used to implement _exit(2).
5057 However in threaded applictions it is used for thread termination,
5058 and _exit_group is used for application termination.
5059 Do thread termination if we have more then one thread. */
5060 /* FIXME: This probably breaks if a signal arrives. We should probably
5061 be disabling signals. */
5062 if (first_cpu->next_cpu) {
5063 TaskState *ts;
5064 CPUArchState **lastp;
5065 CPUArchState *p;
5066
5067 cpu_list_lock();
5068 lastp = &first_cpu;
5069 p = first_cpu;
5070 while (p && p != (CPUArchState *)cpu_env) {
5071 lastp = &p->next_cpu;
5072 p = p->next_cpu;
5073 }
5074 /* If we didn't find the CPU for this thread then something is
5075 horribly wrong. */
5076 if (!p)
5077 abort();
5078 /* Remove the CPU from the list. */
5079 *lastp = p->next_cpu;
5080 cpu_list_unlock();
5081 ts = ((CPUArchState *)cpu_env)->opaque;
5082 if (ts->child_tidptr) {
5083 put_user_u32(0, ts->child_tidptr);
5084 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5085 NULL, NULL, 0);
5086 }
5087 thread_env = NULL;
5088 object_delete(OBJECT(ENV_GET_CPU(cpu_env)));
5089 g_free(ts);
5090 pthread_exit(NULL);
5091 }
5092 #endif
5093 #ifdef TARGET_GPROF
5094 _mcleanup();
5095 #endif
5096 gdb_exit(cpu_env, arg1);
5097 _exit(arg1);
5098 ret = 0; /* avoid warning */
5099 break;
5100 case TARGET_NR_read:
5101 if (arg3 == 0)
5102 ret = 0;
5103 else {
5104 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5105 goto efault;
5106 ret = get_errno(read(arg1, p, arg3));
5107 unlock_user(p, arg2, ret);
5108 }
5109 break;
5110 case TARGET_NR_write:
5111 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5112 goto efault;
5113 ret = get_errno(write(arg1, p, arg3));
5114 unlock_user(p, arg2, 0);
5115 break;
5116 case TARGET_NR_open:
5117 if (!(p = lock_user_string(arg1)))
5118 goto efault;
5119 ret = get_errno(do_open(cpu_env, p,
5120 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5121 arg3));
5122 unlock_user(p, arg1, 0);
5123 break;
5124 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5125 case TARGET_NR_openat:
5126 if (!(p = lock_user_string(arg2)))
5127 goto efault;
5128 ret = get_errno(sys_openat(arg1,
5129 path(p),
5130 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5131 arg4));
5132 unlock_user(p, arg2, 0);
5133 break;
5134 #endif
5135 case TARGET_NR_close:
5136 ret = get_errno(close(arg1));
5137 break;
5138 case TARGET_NR_brk:
5139 ret = do_brk(arg1);
5140 break;
5141 case TARGET_NR_fork:
5142 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5143 break;
5144 #ifdef TARGET_NR_waitpid
5145 case TARGET_NR_waitpid:
5146 {
5147 int status;
5148 ret = get_errno(waitpid(arg1, &status, arg3));
5149 if (!is_error(ret) && arg2 && ret
5150 && put_user_s32(host_to_target_waitstatus(status), arg2))
5151 goto efault;
5152 }
5153 break;
5154 #endif
5155 #ifdef TARGET_NR_waitid
5156 case TARGET_NR_waitid:
5157 {
5158 siginfo_t info;
5159 info.si_pid = 0;
5160 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5161 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5162 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5163 goto efault;
5164 host_to_target_siginfo(p, &info);
5165 unlock_user(p, arg3, sizeof(target_siginfo_t));
5166 }
5167 }
5168 break;
5169 #endif
5170 #ifdef TARGET_NR_creat /* not on alpha */
5171 case TARGET_NR_creat:
5172 if (!(p = lock_user_string(arg1)))
5173 goto efault;
5174 ret = get_errno(creat(p, arg2));
5175 unlock_user(p, arg1, 0);
5176 break;
5177 #endif
5178 case TARGET_NR_link:
5179 {
5180 void * p2;
5181 p = lock_user_string(arg1);
5182 p2 = lock_user_string(arg2);
5183 if (!p || !p2)
5184 ret = -TARGET_EFAULT;
5185 else
5186 ret = get_errno(link(p, p2));
5187 unlock_user(p2, arg2, 0);
5188 unlock_user(p, arg1, 0);
5189 }
5190 break;
5191 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5192 case TARGET_NR_linkat:
5193 {
5194 void * p2 = NULL;
5195 if (!arg2 || !arg4)
5196 goto efault;
5197 p = lock_user_string(arg2);
5198 p2 = lock_user_string(arg4);
5199 if (!p || !p2)
5200 ret = -TARGET_EFAULT;
5201 else
5202 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
5203 unlock_user(p, arg2, 0);
5204 unlock_user(p2, arg4, 0);
5205 }
5206 break;
5207 #endif
5208 case TARGET_NR_unlink:
5209 if (!(p = lock_user_string(arg1)))
5210 goto efault;
5211 ret = get_errno(unlink(p));
5212 unlock_user(p, arg1, 0);
5213 break;
5214 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5215 case TARGET_NR_unlinkat:
5216 if (!(p = lock_user_string(arg2)))
5217 goto efault;
5218 ret = get_errno(sys_unlinkat(arg1, p, arg3));
5219 unlock_user(p, arg2, 0);
5220 break;
5221 #endif
5222 case TARGET_NR_execve:
5223 {
5224 char **argp, **envp;
5225 int argc, envc;
5226 abi_ulong gp;
5227 abi_ulong guest_argp;
5228 abi_ulong guest_envp;
5229 abi_ulong addr;
5230 char **q;
5231 int total_size = 0;
5232
5233 argc = 0;
5234 guest_argp = arg2;
5235 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5236 if (get_user_ual(addr, gp))
5237 goto efault;
5238 if (!addr)
5239 break;
5240 argc++;
5241 }
5242 envc = 0;
5243 guest_envp = arg3;
5244 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5245 if (get_user_ual(addr, gp))
5246 goto efault;
5247 if (!addr)
5248 break;
5249 envc++;
5250 }
5251
5252 argp = alloca((argc + 1) * sizeof(void *));
5253 envp = alloca((envc + 1) * sizeof(void *));
5254
5255 for (gp = guest_argp, q = argp; gp;
5256 gp += sizeof(abi_ulong), q++) {
5257 if (get_user_ual(addr, gp))
5258 goto execve_efault;
5259 if (!addr)
5260 break;
5261 if (!(*q = lock_user_string(addr)))
5262 goto execve_efault;
5263 total_size += strlen(*q) + 1;
5264 }
5265 *q = NULL;
5266
5267 for (gp = guest_envp, q = envp; gp;
5268 gp += sizeof(abi_ulong), q++) {
5269 if (get_user_ual(addr, gp))
5270 goto execve_efault;
5271 if (!addr)
5272 break;
5273 if (!(*q = lock_user_string(addr)))
5274 goto execve_efault;
5275 total_size += strlen(*q) + 1;
5276 }
5277 *q = NULL;
5278
5279 /* This case will not be caught by the host's execve() if its
5280 page size is bigger than the target's. */
5281 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5282 ret = -TARGET_E2BIG;
5283 goto execve_end;
5284 }
5285 if (!(p = lock_user_string(arg1)))
5286 goto execve_efault;
5287 ret = get_errno(execve(p, argp, envp));
5288 unlock_user(p, arg1, 0);
5289
5290 goto execve_end;
5291
5292 execve_efault:
5293 ret = -TARGET_EFAULT;
5294
5295 execve_end:
5296 for (gp = guest_argp, q = argp; *q;
5297 gp += sizeof(abi_ulong), q++) {
5298 if (get_user_ual(addr, gp)
5299 || !addr)
5300 break;
5301 unlock_user(*q, addr, 0);
5302 }
5303 for (gp = guest_envp, q = envp; *q;
5304 gp += sizeof(abi_ulong), q++) {
5305 if (get_user_ual(addr, gp)
5306 || !addr)
5307 break;
5308 unlock_user(*q, addr, 0);
5309 }
5310 }
5311 break;
5312 case TARGET_NR_chdir:
5313 if (!(p = lock_user_string(arg1)))
5314 goto efault;
5315 ret = get_errno(chdir(p));
5316 unlock_user(p, arg1, 0);
5317 break;
5318 #ifdef TARGET_NR_time
5319 case TARGET_NR_time:
5320 {
5321 time_t host_time;
5322 ret = get_errno(time(&host_time));
5323 if (!is_error(ret)
5324 && arg1
5325 && put_user_sal(host_time, arg1))
5326 goto efault;
5327 }
5328 break;
5329 #endif
5330 case TARGET_NR_mknod:
5331 if (!(p = lock_user_string(arg1)))
5332 goto efault;
5333 ret = get_errno(mknod(p, arg2, arg3));
5334 unlock_user(p, arg1, 0);
5335 break;
5336 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5337 case TARGET_NR_mknodat:
5338 if (!(p = lock_user_string(arg2)))
5339 goto efault;
5340 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
5341 unlock_user(p, arg2, 0);
5342 break;
5343 #endif
5344 case TARGET_NR_chmod:
5345 if (!(p = lock_user_string(arg1)))
5346 goto efault;
5347 ret = get_errno(chmod(p, arg2));
5348 unlock_user(p, arg1, 0);
5349 break;
5350 #ifdef TARGET_NR_break
5351 case TARGET_NR_break:
5352 goto unimplemented;
5353 #endif
5354 #ifdef TARGET_NR_oldstat
5355 case TARGET_NR_oldstat:
5356 goto unimplemented;
5357 #endif
5358 case TARGET_NR_lseek:
5359 ret = get_errno(lseek(arg1, arg2, arg3));
5360 break;
5361 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5362 /* Alpha specific */
5363 case TARGET_NR_getxpid:
5364 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5365 ret = get_errno(getpid());
5366 break;
5367 #endif
5368 #ifdef TARGET_NR_getpid
5369 case TARGET_NR_getpid:
5370 ret = get_errno(getpid());
5371 break;
5372 #endif
5373 case TARGET_NR_mount:
5374 {
5375 /* need to look at the data field */
5376 void *p2, *p3;
5377 p = lock_user_string(arg1);
5378 p2 = lock_user_string(arg2);
5379 p3 = lock_user_string(arg3);
5380 if (!p || !p2 || !p3)
5381 ret = -TARGET_EFAULT;
5382 else {
5383 /* FIXME - arg5 should be locked, but it isn't clear how to
5384 * do that since it's not guaranteed to be a NULL-terminated
5385 * string.
5386 */
5387 if ( ! arg5 )
5388 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5389 else
5390 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5391 }
5392 unlock_user(p, arg1, 0);
5393 unlock_user(p2, arg2, 0);
5394 unlock_user(p3, arg3, 0);
5395 break;
5396 }
5397 #ifdef TARGET_NR_umount
5398 case TARGET_NR_umount:
5399 if (!(p = lock_user_string(arg1)))
5400 goto efault;
5401 ret = get_errno(umount(p));
5402 unlock_user(p, arg1, 0);
5403 break;
5404 #endif
5405 #ifdef TARGET_NR_stime /* not on alpha */
5406 case TARGET_NR_stime:
5407 {
5408 time_t host_time;
5409 if (get_user_sal(host_time, arg1))
5410 goto efault;
5411 ret = get_errno(stime(&host_time));
5412 }
5413 break;
5414 #endif
5415 case TARGET_NR_ptrace:
5416 goto unimplemented;
5417 #ifdef TARGET_NR_alarm /* not on alpha */
5418 case TARGET_NR_alarm:
5419 ret = alarm(arg1);
5420 break;
5421 #endif
5422 #ifdef TARGET_NR_oldfstat
5423 case TARGET_NR_oldfstat:
5424 goto unimplemented;
5425 #endif
5426 #ifdef TARGET_NR_pause /* not on alpha */
5427 case TARGET_NR_pause:
5428 ret = get_errno(pause());
5429 break;
5430 #endif
5431 #ifdef TARGET_NR_utime
5432 case TARGET_NR_utime:
5433 {
5434 struct utimbuf tbuf, *host_tbuf;
5435 struct target_utimbuf *target_tbuf;
5436 if (arg2) {
5437 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5438 goto efault;
5439 tbuf.actime = tswapal(target_tbuf->actime);
5440 tbuf.modtime = tswapal(target_tbuf->modtime);
5441 unlock_user_struct(target_tbuf, arg2, 0);
5442 host_tbuf = &tbuf;
5443 } else {
5444 host_tbuf = NULL;
5445 }
5446 if (!(p = lock_user_string(arg1)))
5447 goto efault;
5448 ret = get_errno(utime(p, host_tbuf));
5449 unlock_user(p, arg1, 0);
5450 }
5451 break;
5452 #endif
5453 case TARGET_NR_utimes:
5454 {
5455 struct timeval *tvp, tv[2];
5456 if (arg2) {
5457 if (copy_from_user_timeval(&tv[0], arg2)
5458 || copy_from_user_timeval(&tv[1],
5459 arg2 + sizeof(struct target_timeval)))
5460 goto efault;
5461 tvp = tv;
5462 } else {
5463 tvp = NULL;
5464 }
5465 if (!(p = lock_user_string(arg1)))
5466 goto efault;
5467 ret = get_errno(utimes(p, tvp));
5468 unlock_user(p, arg1, 0);
5469 }
5470 break;
5471 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5472 case TARGET_NR_futimesat:
5473 {
5474 struct timeval *tvp, tv[2];
5475 if (arg3) {
5476 if (copy_from_user_timeval(&tv[0], arg3)
5477 || copy_from_user_timeval(&tv[1],
5478 arg3 + sizeof(struct target_timeval)))
5479 goto efault;
5480 tvp = tv;
5481 } else {
5482 tvp = NULL;
5483 }
5484 if (!(p = lock_user_string(arg2)))
5485 goto efault;
5486 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5487 unlock_user(p, arg2, 0);
5488 }
5489 break;
5490 #endif
5491 #ifdef TARGET_NR_stty
5492 case TARGET_NR_stty:
5493 goto unimplemented;
5494 #endif
5495 #ifdef TARGET_NR_gtty
5496 case TARGET_NR_gtty:
5497 goto unimplemented;
5498 #endif
5499 case TARGET_NR_access:
5500 if (!(p = lock_user_string(arg1)))
5501 goto efault;
5502 ret = get_errno(access(path(p), arg2));
5503 unlock_user(p, arg1, 0);
5504 break;
5505 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5506 case TARGET_NR_faccessat:
5507 if (!(p = lock_user_string(arg2)))
5508 goto efault;
5509 ret = get_errno(sys_faccessat(arg1, p, arg3));
5510 unlock_user(p, arg2, 0);
5511 break;
5512 #endif
5513 #ifdef TARGET_NR_nice /* not on alpha */
5514 case TARGET_NR_nice:
5515 ret = get_errno(nice(arg1));
5516 break;
5517 #endif
5518 #ifdef TARGET_NR_ftime
5519 case TARGET_NR_ftime:
5520 goto unimplemented;
5521 #endif
5522 case TARGET_NR_sync:
5523 sync();
5524 ret = 0;
5525 break;
5526 case TARGET_NR_kill:
5527 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5528 break;
5529 case TARGET_NR_rename:
5530 {
5531 void *p2;
5532 p = lock_user_string(arg1);
5533 p2 = lock_user_string(arg2);
5534 if (!p || !p2)
5535 ret = -TARGET_EFAULT;
5536 else
5537 ret = get_errno(rename(p, p2));
5538 unlock_user(p2, arg2, 0);
5539 unlock_user(p, arg1, 0);
5540 }
5541 break;
5542 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5543 case TARGET_NR_renameat:
5544 {
5545 void *p2;
5546 p = lock_user_string(arg2);
5547 p2 = lock_user_string(arg4);
5548 if (!p || !p2)
5549 ret = -TARGET_EFAULT;
5550 else
5551 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5552 unlock_user(p2, arg4, 0);
5553 unlock_user(p, arg2, 0);
5554 }
5555 break;
5556 #endif
5557 case TARGET_NR_mkdir:
5558 if (!(p = lock_user_string(arg1)))
5559 goto efault;
5560 ret = get_errno(mkdir(p, arg2));
5561 unlock_user(p, arg1, 0);
5562 break;
5563 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5564 case TARGET_NR_mkdirat:
5565 if (!(p = lock_user_string(arg2)))
5566 goto efault;
5567 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5568 unlock_user(p, arg2, 0);
5569 break;
5570 #endif
5571 case TARGET_NR_rmdir:
5572 if (!(p = lock_user_string(arg1)))
5573 goto efault;
5574 ret = get_errno(rmdir(p));
5575 unlock_user(p, arg1, 0);
5576 break;
5577 case TARGET_NR_dup:
5578 ret = get_errno(dup(arg1));
5579 break;
5580 case TARGET_NR_pipe:
5581 ret = do_pipe(cpu_env, arg1, 0, 0);
5582 break;
5583 #ifdef TARGET_NR_pipe2
5584 case TARGET_NR_pipe2:
5585 ret = do_pipe(cpu_env, arg1, arg2, 1);
5586 break;
5587 #endif
5588 case TARGET_NR_times:
5589 {
5590 struct target_tms *tmsp;
5591 struct tms tms;
5592 ret = get_errno(times(&tms));
5593 if (arg1) {
5594 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5595 if (!tmsp)
5596 goto efault;
5597 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5598 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5599 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5600 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5601 }
5602 if (!is_error(ret))
5603 ret = host_to_target_clock_t(ret);
5604 }
5605 break;
5606 #ifdef TARGET_NR_prof
5607 case TARGET_NR_prof:
5608 goto unimplemented;
5609 #endif
5610 #ifdef TARGET_NR_signal
5611 case TARGET_NR_signal:
5612 goto unimplemented;
5613 #endif
5614 case TARGET_NR_acct:
5615 if (arg1 == 0) {
5616 ret = get_errno(acct(NULL));
5617 } else {
5618 if (!(p = lock_user_string(arg1)))
5619 goto efault;
5620 ret = get_errno(acct(path(p)));
5621 unlock_user(p, arg1, 0);
5622 }
5623 break;
5624 #ifdef TARGET_NR_umount2 /* not on alpha */
5625 case TARGET_NR_umount2:
5626 if (!(p = lock_user_string(arg1)))
5627 goto efault;
5628 ret = get_errno(umount2(p, arg2));
5629 unlock_user(p, arg1, 0);
5630 break;
5631 #endif
5632 #ifdef TARGET_NR_lock
5633 case TARGET_NR_lock:
5634 goto unimplemented;
5635 #endif
5636 case TARGET_NR_ioctl:
5637 ret = do_ioctl(arg1, arg2, arg3);
5638 break;
5639 case TARGET_NR_fcntl:
5640 ret = do_fcntl(arg1, arg2, arg3);
5641 break;
5642 #ifdef TARGET_NR_mpx
5643 case TARGET_NR_mpx:
5644 goto unimplemented;
5645 #endif
5646 case TARGET_NR_setpgid:
5647 ret = get_errno(setpgid(arg1, arg2));
5648 break;
5649 #ifdef TARGET_NR_ulimit
5650 case TARGET_NR_ulimit:
5651 goto unimplemented;
5652 #endif
5653 #ifdef TARGET_NR_oldolduname
5654 case TARGET_NR_oldolduname:
5655 goto unimplemented;
5656 #endif
5657 case TARGET_NR_umask:
5658 ret = get_errno(umask(arg1));
5659 break;
5660 case TARGET_NR_chroot:
5661 if (!(p = lock_user_string(arg1)))
5662 goto efault;
5663 ret = get_errno(chroot(p));
5664 unlock_user(p, arg1, 0);
5665 break;
5666 case TARGET_NR_ustat:
5667 goto unimplemented;
5668 case TARGET_NR_dup2:
5669 ret = get_errno(dup2(arg1, arg2));
5670 break;
5671 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5672 case TARGET_NR_dup3:
5673 ret = get_errno(dup3(arg1, arg2, arg3));
5674 break;
5675 #endif
5676 #ifdef TARGET_NR_getppid /* not on alpha */
5677 case TARGET_NR_getppid:
5678 ret = get_errno(getppid());
5679 break;
5680 #endif
5681 case TARGET_NR_getpgrp:
5682 ret = get_errno(getpgrp());
5683 break;
5684 case TARGET_NR_setsid:
5685 ret = get_errno(setsid());
5686 break;
5687 #ifdef TARGET_NR_sigaction
5688 case TARGET_NR_sigaction:
5689 {
5690 #if defined(TARGET_ALPHA)
5691 struct target_sigaction act, oact, *pact = 0;
5692 struct target_old_sigaction *old_act;
5693 if (arg2) {
5694 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5695 goto efault;
5696 act._sa_handler = old_act->_sa_handler;
5697 target_siginitset(&act.sa_mask, old_act->sa_mask);
5698 act.sa_flags = old_act->sa_flags;
5699 act.sa_restorer = 0;
5700 unlock_user_struct(old_act, arg2, 0);
5701 pact = &act;
5702 }
5703 ret = get_errno(do_sigaction(arg1, pact, &oact));
5704 if (!is_error(ret) && arg3) {
5705 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5706 goto efault;
5707 old_act->_sa_handler = oact._sa_handler;
5708 old_act->sa_mask = oact.sa_mask.sig[0];
5709 old_act->sa_flags = oact.sa_flags;
5710 unlock_user_struct(old_act, arg3, 1);
5711 }
5712 #elif defined(TARGET_MIPS)
5713 struct target_sigaction act, oact, *pact, *old_act;
5714
5715 if (arg2) {
5716 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5717 goto efault;
5718 act._sa_handler = old_act->_sa_handler;
5719 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5720 act.sa_flags = old_act->sa_flags;
5721 unlock_user_struct(old_act, arg2, 0);
5722 pact = &act;
5723 } else {
5724 pact = NULL;
5725 }
5726
5727 ret = get_errno(do_sigaction(arg1, pact, &oact));
5728
5729 if (!is_error(ret) && arg3) {
5730 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5731 goto efault;
5732 old_act->_sa_handler = oact._sa_handler;
5733 old_act->sa_flags = oact.sa_flags;
5734 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5735 old_act->sa_mask.sig[1] = 0;
5736 old_act->sa_mask.sig[2] = 0;
5737 old_act->sa_mask.sig[3] = 0;
5738 unlock_user_struct(old_act, arg3, 1);
5739 }
5740 #else
5741 struct target_old_sigaction *old_act;
5742 struct target_sigaction act, oact, *pact;
5743 if (arg2) {
5744 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5745 goto efault;
5746 act._sa_handler = old_act->_sa_handler;
5747 target_siginitset(&act.sa_mask, old_act->sa_mask);
5748 act.sa_flags = old_act->sa_flags;
5749 act.sa_restorer = old_act->sa_restorer;
5750 unlock_user_struct(old_act, arg2, 0);
5751 pact = &act;
5752 } else {
5753 pact = NULL;
5754 }
5755 ret = get_errno(do_sigaction(arg1, pact, &oact));
5756 if (!is_error(ret) && arg3) {
5757 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5758 goto efault;
5759 old_act->_sa_handler = oact._sa_handler;
5760 old_act->sa_mask = oact.sa_mask.sig[0];
5761 old_act->sa_flags = oact.sa_flags;
5762 old_act->sa_restorer = oact.sa_restorer;
5763 unlock_user_struct(old_act, arg3, 1);
5764 }
5765 #endif
5766 }
5767 break;
5768 #endif
5769 case TARGET_NR_rt_sigaction:
5770 {
5771 #if defined(TARGET_ALPHA)
5772 struct target_sigaction act, oact, *pact = 0;
5773 struct target_rt_sigaction *rt_act;
5774 /* ??? arg4 == sizeof(sigset_t). */
5775 if (arg2) {
5776 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5777 goto efault;
5778 act._sa_handler = rt_act->_sa_handler;
5779 act.sa_mask = rt_act->sa_mask;
5780 act.sa_flags = rt_act->sa_flags;
5781 act.sa_restorer = arg5;
5782 unlock_user_struct(rt_act, arg2, 0);
5783 pact = &act;
5784 }
5785 ret = get_errno(do_sigaction(arg1, pact, &oact));
5786 if (!is_error(ret) && arg3) {
5787 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5788 goto efault;
5789 rt_act->_sa_handler = oact._sa_handler;
5790 rt_act->sa_mask = oact.sa_mask;
5791 rt_act->sa_flags = oact.sa_flags;
5792 unlock_user_struct(rt_act, arg3, 1);
5793 }
5794 #else
5795 struct target_sigaction *act;
5796 struct target_sigaction *oact;
5797
5798 if (arg2) {
5799 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5800 goto efault;
5801 } else
5802 act = NULL;
5803 if (arg3) {
5804 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5805 ret = -TARGET_EFAULT;
5806 goto rt_sigaction_fail;
5807 }
5808 } else
5809 oact = NULL;
5810 ret = get_errno(do_sigaction(arg1, act, oact));
5811 rt_sigaction_fail:
5812 if (act)
5813 unlock_user_struct(act, arg2, 0);
5814 if (oact)
5815 unlock_user_struct(oact, arg3, 1);
5816 #endif
5817 }
5818 break;
5819 #ifdef TARGET_NR_sgetmask /* not on alpha */
5820 case TARGET_NR_sgetmask:
5821 {
5822 sigset_t cur_set;
5823 abi_ulong target_set;
5824 sigprocmask(0, NULL, &cur_set);
5825 host_to_target_old_sigset(&target_set, &cur_set);
5826 ret = target_set;
5827 }
5828 break;
5829 #endif
5830 #ifdef TARGET_NR_ssetmask /* not on alpha */
5831 case TARGET_NR_ssetmask:
5832 {
5833 sigset_t set, oset, cur_set;
5834 abi_ulong target_set = arg1;
5835 sigprocmask(0, NULL, &cur_set);
5836 target_to_host_old_sigset(&set, &target_set);
5837 sigorset(&set, &set, &cur_set);
5838 sigprocmask(SIG_SETMASK, &set, &oset);
5839 host_to_target_old_sigset(&target_set, &oset);
5840 ret = target_set;
5841 }
5842 break;
5843 #endif
5844 #ifdef TARGET_NR_sigprocmask
5845 case TARGET_NR_sigprocmask:
5846 {
5847 #if defined(TARGET_ALPHA)
5848 sigset_t set, oldset;
5849 abi_ulong mask;
5850 int how;
5851
5852 switch (arg1) {
5853 case TARGET_SIG_BLOCK:
5854 how = SIG_BLOCK;
5855 break;
5856 case TARGET_SIG_UNBLOCK:
5857 how = SIG_UNBLOCK;
5858 break;
5859 case TARGET_SIG_SETMASK:
5860 how = SIG_SETMASK;
5861 break;
5862 default:
5863 ret = -TARGET_EINVAL;
5864 goto fail;
5865 }
5866 mask = arg2;
5867 target_to_host_old_sigset(&set, &mask);
5868
5869 ret = get_errno(sigprocmask(how, &set, &oldset));
5870
5871 if (!is_error(ret)) {
5872 host_to_target_old_sigset(&mask, &oldset);
5873 ret = mask;
5874 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5875 }
5876 #else
5877 sigset_t set, oldset, *set_ptr;
5878 int how;
5879
5880 if (arg2) {
5881 switch (arg1) {
5882 case TARGET_SIG_BLOCK:
5883 how = SIG_BLOCK;
5884 break;
5885 case TARGET_SIG_UNBLOCK:
5886 how = SIG_UNBLOCK;
5887 break;
5888 case TARGET_SIG_SETMASK:
5889 how = SIG_SETMASK;
5890 break;
5891 default:
5892 ret = -TARGET_EINVAL;
5893 goto fail;
5894 }
5895 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5896 goto efault;
5897 target_to_host_old_sigset(&set, p);
5898 unlock_user(p, arg2, 0);
5899 set_ptr = &set;
5900 } else {
5901 how = 0;
5902 set_ptr = NULL;
5903 }
5904 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5905 if (!is_error(ret) && arg3) {
5906 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5907 goto efault;
5908 host_to_target_old_sigset(p, &oldset);
5909 unlock_user(p, arg3, sizeof(target_sigset_t));
5910 }
5911 #endif
5912 }
5913 break;
5914 #endif
5915 case TARGET_NR_rt_sigprocmask:
5916 {
5917 int how = arg1;
5918 sigset_t set, oldset, *set_ptr;
5919
5920 if (arg2) {
5921 switch(how) {
5922 case TARGET_SIG_BLOCK:
5923 how = SIG_BLOCK;
5924 break;
5925 case TARGET_SIG_UNBLOCK:
5926 how = SIG_UNBLOCK;
5927 break;
5928 case TARGET_SIG_SETMASK:
5929 how = SIG_SETMASK;
5930 break;
5931 default:
5932 ret = -TARGET_EINVAL;
5933 goto fail;
5934 }
5935 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5936 goto efault;
5937 target_to_host_sigset(&set, p);
5938 unlock_user(p, arg2, 0);
5939 set_ptr = &set;
5940 } else {
5941 how = 0;
5942 set_ptr = NULL;
5943 }
5944 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5945 if (!is_error(ret) && arg3) {
5946 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5947 goto efault;
5948 host_to_target_sigset(p, &oldset);
5949 unlock_user(p, arg3, sizeof(target_sigset_t));
5950 }
5951 }
5952 break;
5953 #ifdef TARGET_NR_sigpending
5954 case TARGET_NR_sigpending:
5955 {
5956 sigset_t set;
5957 ret = get_errno(sigpending(&set));
5958 if (!is_error(ret)) {
5959 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5960 goto efault;
5961 host_to_target_old_sigset(p, &set);
5962 unlock_user(p, arg1, sizeof(target_sigset_t));
5963 }
5964 }
5965 break;
5966 #endif
5967 case TARGET_NR_rt_sigpending:
5968 {
5969 sigset_t set;
5970 ret = get_errno(sigpending(&set));
5971 if (!is_error(ret)) {
5972 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5973 goto efault;
5974 host_to_target_sigset(p, &set);
5975 unlock_user(p, arg1, sizeof(target_sigset_t));
5976 }
5977 }
5978 break;
5979 #ifdef TARGET_NR_sigsuspend
5980 case TARGET_NR_sigsuspend:
5981 {
5982 sigset_t set;
5983 #if defined(TARGET_ALPHA)
5984 abi_ulong mask = arg1;
5985 target_to_host_old_sigset(&set, &mask);
5986 #else
5987 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5988 goto efault;
5989 target_to_host_old_sigset(&set, p);
5990 unlock_user(p, arg1, 0);
5991 #endif
5992 ret = get_errno(sigsuspend(&set));
5993 }
5994 break;
5995 #endif
5996 case TARGET_NR_rt_sigsuspend:
5997 {
5998 sigset_t set;
5999 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6000 goto efault;
6001 target_to_host_sigset(&set, p);
6002 unlock_user(p, arg1, 0);
6003 ret = get_errno(sigsuspend(&set));
6004 }
6005 break;
6006 case TARGET_NR_rt_sigtimedwait:
6007 {
6008 sigset_t set;
6009 struct timespec uts, *puts;
6010 siginfo_t uinfo;
6011
6012 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6013 goto efault;
6014 target_to_host_sigset(&set, p);
6015 unlock_user(p, arg1, 0);
6016 if (arg3) {
6017 puts = &uts;
6018 target_to_host_timespec(puts, arg3);
6019 } else {
6020 puts = NULL;
6021 }
6022 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6023 if (!is_error(ret) && arg2) {
6024 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6025 goto efault;
6026 host_to_target_siginfo(p, &uinfo);
6027 unlock_user(p, arg2, sizeof(target_siginfo_t));
6028 }
6029 }
6030 break;
6031 case TARGET_NR_rt_sigqueueinfo:
6032 {
6033 siginfo_t uinfo;
6034 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6035 goto efault;
6036 target_to_host_siginfo(&uinfo, p);
6037 unlock_user(p, arg1, 0);
6038 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6039 }
6040 break;
6041 #ifdef TARGET_NR_sigreturn
6042 case TARGET_NR_sigreturn:
6043 /* NOTE: ret is eax, so not transcoding must be done */
6044 ret = do_sigreturn(cpu_env);
6045 break;
6046 #endif
6047 case TARGET_NR_rt_sigreturn:
6048 /* NOTE: ret is eax, so not transcoding must be done */
6049 ret = do_rt_sigreturn(cpu_env);
6050 break;
6051 case TARGET_NR_sethostname:
6052 if (!(p = lock_user_string(arg1)))
6053 goto efault;
6054 ret = get_errno(sethostname(p, arg2));
6055 unlock_user(p, arg1, 0);
6056 break;
6057 case TARGET_NR_setrlimit:
6058 {
6059 int resource = target_to_host_resource(arg1);
6060 struct target_rlimit *target_rlim;
6061 struct rlimit rlim;
6062 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6063 goto efault;
6064 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6065 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6066 unlock_user_struct(target_rlim, arg2, 0);
6067 ret = get_errno(setrlimit(resource, &rlim));
6068 }
6069 break;
6070 case TARGET_NR_getrlimit:
6071 {
6072 int resource = target_to_host_resource(arg1);
6073 struct target_rlimit *target_rlim;
6074 struct rlimit rlim;
6075
6076 ret = get_errno(getrlimit(resource, &rlim));
6077 if (!is_error(ret)) {
6078 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6079 goto efault;
6080 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6081 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6082 unlock_user_struct(target_rlim, arg2, 1);
6083 }
6084 }
6085 break;
6086 case TARGET_NR_getrusage:
6087 {
6088 struct rusage rusage;
6089 ret = get_errno(getrusage(arg1, &rusage));
6090 if (!is_error(ret)) {
6091 host_to_target_rusage(arg2, &rusage);
6092 }
6093 }
6094 break;
6095 case TARGET_NR_gettimeofday:
6096 {
6097 struct timeval tv;
6098 ret = get_errno(gettimeofday(&tv, NULL));
6099 if (!is_error(ret)) {
6100 if (copy_to_user_timeval(arg1, &tv))
6101 goto efault;
6102 }
6103 }
6104 break;
6105 case TARGET_NR_settimeofday:
6106 {
6107 struct timeval tv;
6108 if (copy_from_user_timeval(&tv, arg1))
6109 goto efault;
6110 ret = get_errno(settimeofday(&tv, NULL));
6111 }
6112 break;
6113 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
6114 case TARGET_NR_select:
6115 {
6116 struct target_sel_arg_struct *sel;
6117 abi_ulong inp, outp, exp, tvp;
6118 long nsel;
6119
6120 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6121 goto efault;
6122 nsel = tswapal(sel->n);
6123 inp = tswapal(sel->inp);
6124 outp = tswapal(sel->outp);
6125 exp = tswapal(sel->exp);
6126 tvp = tswapal(sel->tvp);
6127 unlock_user_struct(sel, arg1, 0);
6128 ret = do_select(nsel, inp, outp, exp, tvp);
6129 }
6130 break;
6131 #endif
6132 #ifdef TARGET_NR_pselect6
6133 case TARGET_NR_pselect6:
6134 {
6135 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6136 fd_set rfds, wfds, efds;
6137 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6138 struct timespec ts, *ts_ptr;
6139
6140 /*
6141 * The 6th arg is actually two args smashed together,
6142 * so we cannot use the C library.
6143 */
6144 sigset_t set;
6145 struct {
6146 sigset_t *set;
6147 size_t size;
6148 } sig, *sig_ptr;
6149
6150 abi_ulong arg_sigset, arg_sigsize, *arg7;
6151 target_sigset_t *target_sigset;
6152
6153 n = arg1;
6154 rfd_addr = arg2;
6155 wfd_addr = arg3;
6156 efd_addr = arg4;
6157 ts_addr = arg5;
6158
6159 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6160 if (ret) {
6161 goto fail;
6162 }
6163 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6164 if (ret) {
6165 goto fail;
6166 }
6167 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6168 if (ret) {
6169 goto fail;
6170 }
6171
6172 /*
6173 * This takes a timespec, and not a timeval, so we cannot
6174 * use the do_select() helper ...
6175 */
6176 if (ts_addr) {
6177 if (target_to_host_timespec(&ts, ts_addr)) {
6178 goto efault;
6179 }
6180 ts_ptr = &ts;
6181 } else {
6182 ts_ptr = NULL;
6183 }
6184
6185 /* Extract the two packed args for the sigset */
6186 if (arg6) {
6187 sig_ptr = &sig;
6188 sig.size = _NSIG / 8;
6189
6190 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6191 if (!arg7) {
6192 goto efault;
6193 }
6194 arg_sigset = tswapal(arg7[0]);
6195 arg_sigsize = tswapal(arg7[1]);
6196 unlock_user(arg7, arg6, 0);
6197
6198 if (arg_sigset) {
6199 sig.set = &set;
6200 if (arg_sigsize != sizeof(*target_sigset)) {
6201 /* Like the kernel, we enforce correct size sigsets */
6202 ret = -TARGET_EINVAL;
6203 goto fail;
6204 }
6205 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6206 sizeof(*target_sigset), 1);
6207 if (!target_sigset) {
6208 goto efault;
6209 }
6210 target_to_host_sigset(&set, target_sigset);
6211 unlock_user(target_sigset, arg_sigset, 0);
6212 } else {
6213 sig.set = NULL;
6214 }
6215 } else {
6216 sig_ptr = NULL;
6217 }
6218
6219 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6220 ts_ptr, sig_ptr));
6221
6222 if (!is_error(ret)) {
6223 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6224 goto efault;
6225 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6226 goto efault;
6227 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6228 goto efault;
6229
6230 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6231 goto efault;
6232 }
6233 }
6234 break;
6235 #endif
6236 case TARGET_NR_symlink:
6237 {
6238 void *p2;
6239 p = lock_user_string(arg1);
6240 p2 = lock_user_string(arg2);
6241 if (!p || !p2)
6242 ret = -TARGET_EFAULT;
6243 else
6244 ret = get_errno(symlink(p, p2));
6245 unlock_user(p2, arg2, 0);
6246 unlock_user(p, arg1, 0);
6247 }
6248 break;
6249 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6250 case TARGET_NR_symlinkat:
6251 {
6252 void *p2;
6253 p = lock_user_string(arg1);
6254 p2 = lock_user_string(arg3);
6255 if (!p || !p2)
6256 ret = -TARGET_EFAULT;
6257 else
6258 ret = get_errno(sys_symlinkat(p, arg2, p2));
6259 unlock_user(p2, arg3, 0);
6260 unlock_user(p, arg1, 0);
6261 }
6262 break;
6263 #endif
6264 #ifdef TARGET_NR_oldlstat
6265 case TARGET_NR_oldlstat:
6266 goto unimplemented;
6267 #endif
6268 case TARGET_NR_readlink:
6269 {
6270 void *p2, *temp;
6271 p = lock_user_string(arg1);
6272 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6273 if (!p || !p2)
6274 ret = -TARGET_EFAULT;
6275 else {
6276 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
6277 char real[PATH_MAX];
6278 temp = realpath(exec_path,real);
6279 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
6280 snprintf((char *)p2, arg3, "%s", real);
6281 }
6282 else
6283 ret = get_errno(readlink(path(p), p2, arg3));
6284 }
6285 unlock_user(p2, arg2, ret);
6286 unlock_user(p, arg1, 0);
6287 }
6288 break;
6289 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6290 case TARGET_NR_readlinkat:
6291 {
6292 void *p2;
6293 p = lock_user_string(arg2);
6294 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6295 if (!p || !p2)
6296 ret = -TARGET_EFAULT;
6297 else
6298 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
6299 unlock_user(p2, arg3, ret);
6300 unlock_user(p, arg2, 0);
6301 }
6302 break;
6303 #endif
6304 #ifdef TARGET_NR_uselib
6305 case TARGET_NR_uselib:
6306 goto unimplemented;
6307 #endif
6308 #ifdef TARGET_NR_swapon
6309 case TARGET_NR_swapon:
6310 if (!(p = lock_user_string(arg1)))
6311 goto efault;
6312 ret = get_errno(swapon(p, arg2));
6313 unlock_user(p, arg1, 0);
6314 break;
6315 #endif
6316 case TARGET_NR_reboot:
6317 if (!(p = lock_user_string(arg4)))
6318 goto efault;
6319 ret = reboot(arg1, arg2, arg3, p);
6320 unlock_user(p, arg4, 0);
6321 break;
6322 #ifdef TARGET_NR_readdir
6323 case TARGET_NR_readdir:
6324 goto unimplemented;
6325 #endif
6326 #ifdef TARGET_NR_mmap
6327 case TARGET_NR_mmap:
6328 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6329 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6330 || defined(TARGET_S390X)
6331 {
6332 abi_ulong *v;
6333 abi_ulong v1, v2, v3, v4, v5, v6;
6334 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6335 goto efault;
6336 v1 = tswapal(v[0]);
6337 v2 = tswapal(v[1]);
6338 v3 = tswapal(v[2]);
6339 v4 = tswapal(v[3]);
6340 v5 = tswapal(v[4]);
6341 v6 = tswapal(v[5]);
6342 unlock_user(v, arg1, 0);
6343 ret = get_errno(target_mmap(v1, v2, v3,
6344 target_to_host_bitmask(v4, mmap_flags_tbl),
6345 v5, v6));
6346 }
6347 #else
6348 ret = get_errno(target_mmap(arg1, arg2, arg3,
6349 target_to_host_bitmask(arg4, mmap_flags_tbl),
6350 arg5,
6351 arg6));
6352 #endif
6353 break;
6354 #endif
6355 #ifdef TARGET_NR_mmap2
6356 case TARGET_NR_mmap2:
6357 #ifndef MMAP_SHIFT
6358 #define MMAP_SHIFT 12
6359 #endif
6360 ret = get_errno(target_mmap(arg1, arg2, arg3,
6361 target_to_host_bitmask(arg4, mmap_flags_tbl),
6362 arg5,
6363 arg6 << MMAP_SHIFT));
6364 break;
6365 #endif
6366 case TARGET_NR_munmap:
6367 ret = get_errno(target_munmap(arg1, arg2));
6368 break;
6369 case TARGET_NR_mprotect:
6370 {
6371 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6372 /* Special hack to detect libc making the stack executable. */
6373 if ((arg3 & PROT_GROWSDOWN)
6374 && arg1 >= ts->info->stack_limit
6375 && arg1 <= ts->info->start_stack) {
6376 arg3 &= ~PROT_GROWSDOWN;
6377 arg2 = arg2 + arg1 - ts->info->stack_limit;
6378 arg1 = ts->info->stack_limit;
6379 }
6380 }
6381 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6382 break;
6383 #ifdef TARGET_NR_mremap
6384 case TARGET_NR_mremap:
6385 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6386 break;
6387 #endif
6388 /* ??? msync/mlock/munlock are broken for softmmu. */
6389 #ifdef TARGET_NR_msync
6390 case TARGET_NR_msync:
6391 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6392 break;
6393 #endif
6394 #ifdef TARGET_NR_mlock
6395 case TARGET_NR_mlock:
6396 ret = get_errno(mlock(g2h(arg1), arg2));
6397 break;
6398 #endif
6399 #ifdef TARGET_NR_munlock
6400 case TARGET_NR_munlock:
6401 ret = get_errno(munlock(g2h(arg1), arg2));
6402 break;
6403 #endif
6404 #ifdef TARGET_NR_mlockall
6405 case TARGET_NR_mlockall:
6406 ret = get_errno(mlockall(arg1));
6407 break;
6408 #endif
6409 #ifdef TARGET_NR_munlockall
6410 case TARGET_NR_munlockall:
6411 ret = get_errno(munlockall());
6412 break;
6413 #endif
6414 case TARGET_NR_truncate:
6415 if (!(p = lock_user_string(arg1)))
6416 goto efault;
6417 ret = get_errno(truncate(p, arg2));
6418 unlock_user(p, arg1, 0);
6419 break;
6420 case TARGET_NR_ftruncate:
6421 ret = get_errno(ftruncate(arg1, arg2));
6422 break;
6423 case TARGET_NR_fchmod:
6424 ret = get_errno(fchmod(arg1, arg2));
6425 break;
6426 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6427 case TARGET_NR_fchmodat:
6428 if (!(p = lock_user_string(arg2)))
6429 goto efault;
6430 ret = get_errno(sys_fchmodat(arg1, p, arg3));
6431 unlock_user(p, arg2, 0);
6432 break;
6433 #endif
6434 case TARGET_NR_getpriority:
6435 /* libc does special remapping of the return value of
6436 * sys_getpriority() so it's just easiest to call
6437 * sys_getpriority() directly rather than through libc. */
6438 ret = get_errno(sys_getpriority(arg1, arg2));
6439 break;
6440 case TARGET_NR_setpriority:
6441 ret = get_errno(setpriority(arg1, arg2, arg3));
6442 break;
6443 #ifdef TARGET_NR_profil
6444 case TARGET_NR_profil:
6445 goto unimplemented;
6446 #endif
6447 case TARGET_NR_statfs:
6448 if (!(p = lock_user_string(arg1)))
6449 goto efault;
6450 ret = get_errno(statfs(path(p), &stfs));
6451 unlock_user(p, arg1, 0);
6452 convert_statfs:
6453 if (!is_error(ret)) {
6454 struct target_statfs *target_stfs;
6455
6456 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6457 goto efault;
6458 __put_user(stfs.f_type, &target_stfs->f_type);
6459 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6460 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6461 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6462 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6463 __put_user(stfs.f_files, &target_stfs->f_files);
6464 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6465 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6466 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6467 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6468 unlock_user_struct(target_stfs, arg2, 1);
6469 }
6470 break;
6471 case TARGET_NR_fstatfs:
6472 ret = get_errno(fstatfs(arg1, &stfs));
6473 goto convert_statfs;
6474 #ifdef TARGET_NR_statfs64
6475 case TARGET_NR_statfs64:
6476 if (!(p = lock_user_string(arg1)))
6477 goto efault;
6478 ret = get_errno(statfs(path(p), &stfs));
6479 unlock_user(p, arg1, 0);
6480 convert_statfs64:
6481 if (!is_error(ret)) {
6482 struct target_statfs64 *target_stfs;
6483
6484 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6485 goto efault;
6486 __put_user(stfs.f_type, &target_stfs->f_type);
6487 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6488 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6489 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6490 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6491 __put_user(stfs.f_files, &target_stfs->f_files);
6492 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6493 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6494 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6495 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6496 unlock_user_struct(target_stfs, arg3, 1);
6497 }
6498 break;
6499 case TARGET_NR_fstatfs64:
6500 ret = get_errno(fstatfs(arg1, &stfs));
6501 goto convert_statfs64;
6502 #endif
6503 #ifdef TARGET_NR_ioperm
6504 case TARGET_NR_ioperm:
6505 goto unimplemented;
6506 #endif
6507 #ifdef TARGET_NR_socketcall
6508 case TARGET_NR_socketcall:
6509 ret = do_socketcall(arg1, arg2);
6510 break;
6511 #endif
6512 #ifdef TARGET_NR_accept
6513 case TARGET_NR_accept:
6514 ret = do_accept(arg1, arg2, arg3);
6515 break;
6516 #endif
6517 #ifdef TARGET_NR_bind
6518 case TARGET_NR_bind:
6519 ret = do_bind(arg1, arg2, arg3);
6520 break;
6521 #endif
6522 #ifdef TARGET_NR_connect
6523 case TARGET_NR_connect:
6524 ret = do_connect(arg1, arg2, arg3);
6525 break;
6526 #endif
6527 #ifdef TARGET_NR_getpeername
6528 case TARGET_NR_getpeername:
6529 ret = do_getpeername(arg1, arg2, arg3);
6530 break;
6531 #endif
6532 #ifdef TARGET_NR_getsockname
6533 case TARGET_NR_getsockname:
6534 ret = do_getsockname(arg1, arg2, arg3);
6535 break;
6536 #endif
6537 #ifdef TARGET_NR_getsockopt
6538 case TARGET_NR_getsockopt:
6539 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6540 break;
6541 #endif
6542 #ifdef TARGET_NR_listen
6543 case TARGET_NR_listen:
6544 ret = get_errno(listen(arg1, arg2));
6545 break;
6546 #endif
6547 #ifdef TARGET_NR_recv
6548 case TARGET_NR_recv:
6549 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6550 break;
6551 #endif
6552 #ifdef TARGET_NR_recvfrom
6553 case TARGET_NR_recvfrom:
6554 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6555 break;
6556 #endif
6557 #ifdef TARGET_NR_recvmsg
6558 case TARGET_NR_recvmsg:
6559 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6560 break;
6561 #endif
6562 #ifdef TARGET_NR_send
6563 case TARGET_NR_send:
6564 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6565 break;
6566 #endif
6567 #ifdef TARGET_NR_sendmsg
6568 case TARGET_NR_sendmsg:
6569 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6570 break;
6571 #endif
6572 #ifdef TARGET_NR_sendto
6573 case TARGET_NR_sendto:
6574 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6575 break;
6576 #endif
6577 #ifdef TARGET_NR_shutdown
6578 case TARGET_NR_shutdown:
6579 ret = get_errno(shutdown(arg1, arg2));
6580 break;
6581 #endif
6582 #ifdef TARGET_NR_socket
6583 case TARGET_NR_socket:
6584 ret = do_socket(arg1, arg2, arg3);
6585 break;
6586 #endif
6587 #ifdef TARGET_NR_socketpair
6588 case TARGET_NR_socketpair:
6589 ret = do_socketpair(arg1, arg2, arg3, arg4);
6590 break;
6591 #endif
6592 #ifdef TARGET_NR_setsockopt
6593 case TARGET_NR_setsockopt:
6594 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6595 break;
6596 #endif
6597
6598 case TARGET_NR_syslog:
6599 if (!(p = lock_user_string(arg2)))
6600 goto efault;
6601 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6602 unlock_user(p, arg2, 0);
6603 break;
6604
6605 case TARGET_NR_setitimer:
6606 {
6607 struct itimerval value, ovalue, *pvalue;
6608
6609 if (arg2) {
6610 pvalue = &value;
6611 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6612 || copy_from_user_timeval(&pvalue->it_value,
6613 arg2 + sizeof(struct target_timeval)))
6614 goto efault;
6615 } else {
6616 pvalue = NULL;
6617 }
6618 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6619 if (!is_error(ret) && arg3) {
6620 if (copy_to_user_timeval(arg3,
6621 &ovalue.it_interval)
6622 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6623 &ovalue.it_value))
6624 goto efault;
6625 }
6626 }
6627 break;
6628 case TARGET_NR_getitimer:
6629 {
6630 struct itimerval value;
6631
6632 ret = get_errno(getitimer(arg1, &value));
6633 if (!is_error(ret) && arg2) {
6634 if (copy_to_user_timeval(arg2,
6635 &value.it_interval)
6636 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6637 &value.it_value))
6638 goto efault;
6639 }
6640 }
6641 break;
6642 case TARGET_NR_stat:
6643 if (!(p = lock_user_string(arg1)))
6644 goto efault;
6645 ret = get_errno(stat(path(p), &st));
6646 unlock_user(p, arg1, 0);
6647 goto do_stat;
6648 case TARGET_NR_lstat:
6649 if (!(p = lock_user_string(arg1)))
6650 goto efault;
6651 ret = get_errno(lstat(path(p), &st));
6652 unlock_user(p, arg1, 0);
6653 goto do_stat;
6654 case TARGET_NR_fstat:
6655 {
6656 ret = get_errno(fstat(arg1, &st));
6657 do_stat:
6658 if (!is_error(ret)) {
6659 struct target_stat *target_st;
6660
6661 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6662 goto efault;
6663 memset(target_st, 0, sizeof(*target_st));
6664 __put_user(st.st_dev, &target_st->st_dev);
6665 __put_user(st.st_ino, &target_st->st_ino);
6666 __put_user(st.st_mode, &target_st->st_mode);
6667 __put_user(st.st_uid, &target_st->st_uid);
6668 __put_user(st.st_gid, &target_st->st_gid);
6669 __put_user(st.st_nlink, &target_st->st_nlink);
6670 __put_user(st.st_rdev, &target_st->st_rdev);
6671 __put_user(st.st_size, &target_st->st_size);
6672 __put_user(st.st_blksize, &target_st->st_blksize);
6673 __put_user(st.st_blocks, &target_st->st_blocks);
6674 __put_user(st.st_atime, &target_st->target_st_atime);
6675 __put_user(st.st_mtime, &target_st->target_st_mtime);
6676 __put_user(st.st_ctime, &target_st->target_st_ctime);
6677 unlock_user_struct(target_st, arg2, 1);
6678 }
6679 }
6680 break;
6681 #ifdef TARGET_NR_olduname
6682 case TARGET_NR_olduname:
6683 goto unimplemented;
6684 #endif
6685 #ifdef TARGET_NR_iopl
6686 case TARGET_NR_iopl:
6687 goto unimplemented;
6688 #endif
6689 case TARGET_NR_vhangup:
6690 ret = get_errno(vhangup());
6691 break;
6692 #ifdef TARGET_NR_idle
6693 case TARGET_NR_idle:
6694 goto unimplemented;
6695 #endif
6696 #ifdef TARGET_NR_syscall
6697 case TARGET_NR_syscall:
6698 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6699 arg6, arg7, arg8, 0);
6700 break;
6701 #endif
6702 case TARGET_NR_wait4:
6703 {
6704 int status;
6705 abi_long status_ptr = arg2;
6706 struct rusage rusage, *rusage_ptr;
6707 abi_ulong target_rusage = arg4;
6708 if (target_rusage)
6709 rusage_ptr = &rusage;
6710 else
6711 rusage_ptr = NULL;
6712 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6713 if (!is_error(ret)) {
6714 if (status_ptr && ret) {
6715 status = host_to_target_waitstatus(status);
6716 if (put_user_s32(status, status_ptr))
6717 goto efault;
6718 }
6719 if (target_rusage)
6720 host_to_target_rusage(target_rusage, &rusage);
6721 }
6722 }
6723 break;
6724 #ifdef TARGET_NR_swapoff
6725 case TARGET_NR_swapoff:
6726 if (!(p = lock_user_string(arg1)))
6727 goto efault;
6728 ret = get_errno(swapoff(p));
6729 unlock_user(p, arg1, 0);
6730 break;
6731 #endif
6732 case TARGET_NR_sysinfo:
6733 {
6734 struct target_sysinfo *target_value;
6735 struct sysinfo value;
6736 ret = get_errno(sysinfo(&value));
6737 if (!is_error(ret) && arg1)
6738 {
6739 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6740 goto efault;
6741 __put_user(value.uptime, &target_value->uptime);
6742 __put_user(value.loads[0], &target_value->loads[0]);
6743 __put_user(value.loads[1], &target_value->loads[1]);
6744 __put_user(value.loads[2], &target_value->loads[2]);
6745 __put_user(value.totalram, &target_value->totalram);
6746 __put_user(value.freeram, &target_value->freeram);
6747 __put_user(value.sharedram, &target_value->sharedram);
6748 __put_user(value.bufferram, &target_value->bufferram);
6749 __put_user(value.totalswap, &target_value->totalswap);
6750 __put_user(value.freeswap, &target_value->freeswap);
6751 __put_user(value.procs, &target_value->procs);
6752 __put_user(value.totalhigh, &target_value->totalhigh);
6753 __put_user(value.freehigh, &target_value->freehigh);
6754 __put_user(value.mem_unit, &target_value->mem_unit);
6755 unlock_user_struct(target_value, arg1, 1);
6756 }
6757 }
6758 break;
6759 #ifdef TARGET_NR_ipc
6760 case TARGET_NR_ipc:
6761 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6762 break;
6763 #endif
6764 #ifdef TARGET_NR_semget
6765 case TARGET_NR_semget:
6766 ret = get_errno(semget(arg1, arg2, arg3));
6767 break;
6768 #endif
6769 #ifdef TARGET_NR_semop
6770 case TARGET_NR_semop:
6771 ret = get_errno(do_semop(arg1, arg2, arg3));
6772 break;
6773 #endif
6774 #ifdef TARGET_NR_semctl
6775 case TARGET_NR_semctl:
6776 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6777 break;
6778 #endif
6779 #ifdef TARGET_NR_msgctl
6780 case TARGET_NR_msgctl:
6781 ret = do_msgctl(arg1, arg2, arg3);
6782 break;
6783 #endif
6784 #ifdef TARGET_NR_msgget
6785 case TARGET_NR_msgget:
6786 ret = get_errno(msgget(arg1, arg2));
6787 break;
6788 #endif
6789 #ifdef TARGET_NR_msgrcv
6790 case TARGET_NR_msgrcv:
6791 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6792 break;
6793 #endif
6794 #ifdef TARGET_NR_msgsnd
6795 case TARGET_NR_msgsnd:
6796 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6797 break;
6798 #endif
6799 #ifdef TARGET_NR_shmget
6800 case TARGET_NR_shmget:
6801 ret = get_errno(shmget(arg1, arg2, arg3));
6802 break;
6803 #endif
6804 #ifdef TARGET_NR_shmctl
6805 case TARGET_NR_shmctl:
6806 ret = do_shmctl(arg1, arg2, arg3);
6807 break;
6808 #endif
6809 #ifdef TARGET_NR_shmat
6810 case TARGET_NR_shmat:
6811 ret = do_shmat(arg1, arg2, arg3);
6812 break;
6813 #endif
6814 #ifdef TARGET_NR_shmdt
6815 case TARGET_NR_shmdt:
6816 ret = do_shmdt(arg1);
6817 break;
6818 #endif
6819 case TARGET_NR_fsync:
6820 ret = get_errno(fsync(arg1));
6821 break;
6822 case TARGET_NR_clone:
6823 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6824 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6825 #elif defined(TARGET_CRIS)
6826 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6827 #elif defined(TARGET_S390X)
6828 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6829 #else
6830 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6831 #endif
6832 break;
6833 #ifdef __NR_exit_group
6834 /* new thread calls */
6835 case TARGET_NR_exit_group:
6836 #ifdef TARGET_GPROF
6837 _mcleanup();
6838 #endif
6839 gdb_exit(cpu_env, arg1);
6840 ret = get_errno(exit_group(arg1));
6841 break;
6842 #endif
6843 case TARGET_NR_setdomainname:
6844 if (!(p = lock_user_string(arg1)))
6845 goto efault;
6846 ret = get_errno(setdomainname(p, arg2));
6847 unlock_user(p, arg1, 0);
6848 break;
6849 case TARGET_NR_uname:
6850 /* no need to transcode because we use the linux syscall */
6851 {
6852 struct new_utsname * buf;
6853
6854 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6855 goto efault;
6856 ret = get_errno(sys_uname(buf));
6857 if (!is_error(ret)) {
6858 /* Overrite the native machine name with whatever is being
6859 emulated. */
6860 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6861 /* Allow the user to override the reported release. */
6862 if (qemu_uname_release && *qemu_uname_release)
6863 strcpy (buf->release, qemu_uname_release);
6864 }
6865 unlock_user_struct(buf, arg1, 1);
6866 }
6867 break;
6868 #ifdef TARGET_I386
6869 case TARGET_NR_modify_ldt:
6870 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6871 break;
6872 #if !defined(TARGET_X86_64)
6873 case TARGET_NR_vm86old:
6874 goto unimplemented;
6875 case TARGET_NR_vm86:
6876 ret = do_vm86(cpu_env, arg1, arg2);
6877 break;
6878 #endif
6879 #endif
6880 case TARGET_NR_adjtimex:
6881 goto unimplemented;
6882 #ifdef TARGET_NR_create_module
6883 case TARGET_NR_create_module:
6884 #endif
6885 case TARGET_NR_init_module:
6886 case TARGET_NR_delete_module:
6887 #ifdef TARGET_NR_get_kernel_syms
6888 case TARGET_NR_get_kernel_syms:
6889 #endif
6890 goto unimplemented;
6891 case TARGET_NR_quotactl:
6892 goto unimplemented;
6893 case TARGET_NR_getpgid:
6894 ret = get_errno(getpgid(arg1));
6895 break;
6896 case TARGET_NR_fchdir:
6897 ret = get_errno(fchdir(arg1));
6898 break;
6899 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6900 case TARGET_NR_bdflush:
6901 goto unimplemented;
6902 #endif
6903 #ifdef TARGET_NR_sysfs
6904 case TARGET_NR_sysfs:
6905 goto unimplemented;
6906 #endif
6907 case TARGET_NR_personality:
6908 ret = get_errno(personality(arg1));
6909 break;
6910 #ifdef TARGET_NR_afs_syscall
6911 case TARGET_NR_afs_syscall:
6912 goto unimplemented;
6913 #endif
6914 #ifdef TARGET_NR__llseek /* Not on alpha */
6915 case TARGET_NR__llseek:
6916 {
6917 int64_t res;
6918 #if !defined(__NR_llseek)
6919 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6920 if (res == -1) {
6921 ret = get_errno(res);
6922 } else {
6923 ret = 0;
6924 }
6925 #else
6926 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6927 #endif
6928 if ((ret == 0) && put_user_s64(res, arg4)) {
6929 goto efault;
6930 }
6931 }
6932 break;
6933 #endif
6934 case TARGET_NR_getdents:
6935 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6936 {
6937 struct target_dirent *target_dirp;
6938 struct linux_dirent *dirp;
6939 abi_long count = arg3;
6940
6941 dirp = malloc(count);
6942 if (!dirp) {
6943 ret = -TARGET_ENOMEM;
6944 goto fail;
6945 }
6946
6947 ret = get_errno(sys_getdents(arg1, dirp, count));
6948 if (!is_error(ret)) {
6949 struct linux_dirent *de;
6950 struct target_dirent *tde;
6951 int len = ret;
6952 int reclen, treclen;
6953 int count1, tnamelen;
6954
6955 count1 = 0;
6956 de = dirp;
6957 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6958 goto efault;
6959 tde = target_dirp;
6960 while (len > 0) {
6961 reclen = de->d_reclen;
6962 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6963 tde->d_reclen = tswap16(treclen);
6964 tde->d_ino = tswapal(de->d_ino);
6965 tde->d_off = tswapal(de->d_off);
6966 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6967 if (tnamelen > 256)
6968 tnamelen = 256;
6969 /* XXX: may not be correct */
6970 pstrcpy(tde->d_name, tnamelen, de->d_name);
6971 de = (struct linux_dirent *)((char *)de + reclen);
6972 len -= reclen;
6973 tde = (struct target_dirent *)((char *)tde + treclen);
6974 count1 += treclen;
6975 }
6976 ret = count1;
6977 unlock_user(target_dirp, arg2, ret);
6978 }
6979 free(dirp);
6980 }
6981 #else
6982 {
6983 struct linux_dirent *dirp;
6984 abi_long count = arg3;
6985
6986 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6987 goto efault;
6988 ret = get_errno(sys_getdents(arg1, dirp, count));
6989 if (!is_error(ret)) {
6990 struct linux_dirent *de;
6991 int len = ret;
6992 int reclen;
6993 de = dirp;
6994 while (len > 0) {
6995 reclen = de->d_reclen;
6996 if (reclen > len)
6997 break;
6998 de->d_reclen = tswap16(reclen);
6999 tswapls(&de->d_ino);
7000 tswapls(&de->d_off);
7001 de = (struct linux_dirent *)((char *)de + reclen);
7002 len -= reclen;
7003 }
7004 }
7005 unlock_user(dirp, arg2, ret);
7006 }
7007 #endif
7008 break;
7009 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7010 case TARGET_NR_getdents64:
7011 {
7012 struct linux_dirent64 *dirp;
7013 abi_long count = arg3;
7014 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7015 goto efault;
7016 ret = get_errno(sys_getdents64(arg1, dirp, count));
7017 if (!is_error(ret)) {
7018 struct linux_dirent64 *de;
7019 int len = ret;
7020 int reclen;
7021 de = dirp;
7022 while (len > 0) {
7023 reclen = de->d_reclen;
7024 if (reclen > len)
7025 break;
7026 de->d_reclen = tswap16(reclen);
7027 tswap64s((uint64_t *)&de->d_ino);
7028 tswap64s((uint64_t *)&de->d_off);
7029 de = (struct linux_dirent64 *)((char *)de + reclen);
7030 len -= reclen;
7031 }
7032 }
7033 unlock_user(dirp, arg2, ret);
7034 }
7035 break;
7036 #endif /* TARGET_NR_getdents64 */
7037 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
7038 #ifdef TARGET_S390X
7039 case TARGET_NR_select:
7040 #else
7041 case TARGET_NR__newselect:
7042 #endif
7043 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7044 break;
7045 #endif
7046 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7047 # ifdef TARGET_NR_poll
7048 case TARGET_NR_poll:
7049 # endif
7050 # ifdef TARGET_NR_ppoll
7051 case TARGET_NR_ppoll:
7052 # endif
7053 {
7054 struct target_pollfd *target_pfd;
7055 unsigned int nfds = arg2;
7056 int timeout = arg3;
7057 struct pollfd *pfd;
7058 unsigned int i;
7059
7060 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7061 if (!target_pfd)
7062 goto efault;
7063
7064 pfd = alloca(sizeof(struct pollfd) * nfds);
7065 for(i = 0; i < nfds; i++) {
7066 pfd[i].fd = tswap32(target_pfd[i].fd);
7067 pfd[i].events = tswap16(target_pfd[i].events);
7068 }
7069
7070 # ifdef TARGET_NR_ppoll
7071 if (num == TARGET_NR_ppoll) {
7072 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7073 target_sigset_t *target_set;
7074 sigset_t _set, *set = &_set;
7075
7076 if (arg3) {
7077 if (target_to_host_timespec(timeout_ts, arg3)) {
7078 unlock_user(target_pfd, arg1, 0);
7079 goto efault;
7080 }
7081 } else {
7082 timeout_ts = NULL;
7083 }
7084
7085 if (arg4) {
7086 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7087 if (!target_set) {
7088 unlock_user(target_pfd, arg1, 0);
7089 goto efault;
7090 }
7091 target_to_host_sigset(set, target_set);
7092 } else {
7093 set = NULL;
7094 }
7095
7096 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7097
7098 if (!is_error(ret) && arg3) {
7099 host_to_target_timespec(arg3, timeout_ts);
7100 }
7101 if (arg4) {
7102 unlock_user(target_set, arg4, 0);
7103 }
7104 } else
7105 # endif
7106 ret = get_errno(poll(pfd, nfds, timeout));
7107
7108 if (!is_error(ret)) {
7109 for(i = 0; i < nfds; i++) {
7110 target_pfd[i].revents = tswap16(pfd[i].revents);
7111 }
7112 }
7113 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7114 }
7115 break;
7116 #endif
7117 case TARGET_NR_flock:
7118 /* NOTE: the flock constant seems to be the same for every
7119 Linux platform */
7120 ret = get_errno(flock(arg1, arg2));
7121 break;
7122 case TARGET_NR_readv:
7123 {
7124 int count = arg3;
7125 struct iovec *vec;
7126
7127 vec = alloca(count * sizeof(struct iovec));
7128 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
7129 goto efault;
7130 ret = get_errno(readv(arg1, vec, count));
7131 unlock_iovec(vec, arg2, count, 1);
7132 }
7133 break;
7134 case TARGET_NR_writev:
7135 {
7136 int count = arg3;
7137 struct iovec *vec;
7138
7139 vec = alloca(count * sizeof(struct iovec));
7140 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7141 goto efault;
7142 ret = get_errno(writev(arg1, vec, count));
7143 unlock_iovec(vec, arg2, count, 0);
7144 }
7145 break;
7146 case TARGET_NR_getsid:
7147 ret = get_errno(getsid(arg1));
7148 break;
7149 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7150 case TARGET_NR_fdatasync:
7151 ret = get_errno(fdatasync(arg1));
7152 break;
7153 #endif
7154 case TARGET_NR__sysctl:
7155 /* We don't implement this, but ENOTDIR is always a safe
7156 return value. */
7157 ret = -TARGET_ENOTDIR;
7158 break;
7159 case TARGET_NR_sched_getaffinity:
7160 {
7161 unsigned int mask_size;
7162 unsigned long *mask;
7163
7164 /*
7165 * sched_getaffinity needs multiples of ulong, so need to take
7166 * care of mismatches between target ulong and host ulong sizes.
7167 */
7168 if (arg2 & (sizeof(abi_ulong) - 1)) {
7169 ret = -TARGET_EINVAL;
7170 break;
7171 }
7172 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7173
7174 mask = alloca(mask_size);
7175 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7176
7177 if (!is_error(ret)) {
7178 if (copy_to_user(arg3, mask, ret)) {
7179 goto efault;
7180 }
7181 }
7182 }
7183 break;
7184 case TARGET_NR_sched_setaffinity:
7185 {
7186 unsigned int mask_size;
7187 unsigned long *mask;
7188
7189 /*
7190 * sched_setaffinity needs multiples of ulong, so need to take
7191 * care of mismatches between target ulong and host ulong sizes.
7192 */
7193 if (arg2 & (sizeof(abi_ulong) - 1)) {
7194 ret = -TARGET_EINVAL;
7195 break;
7196 }
7197 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7198
7199 mask = alloca(mask_size);
7200 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7201 goto efault;
7202 }
7203 memcpy(mask, p, arg2);
7204 unlock_user_struct(p, arg2, 0);
7205
7206 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7207 }
7208 break;
7209 case TARGET_NR_sched_setparam:
7210 {
7211 struct sched_param *target_schp;
7212 struct sched_param schp;
7213
7214 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7215 goto efault;
7216 schp.sched_priority = tswap32(target_schp->sched_priority);
7217 unlock_user_struct(target_schp, arg2, 0);
7218 ret = get_errno(sched_setparam(arg1, &schp));
7219 }
7220 break;
7221 case TARGET_NR_sched_getparam:
7222 {
7223 struct sched_param *target_schp;
7224 struct sched_param schp;
7225 ret = get_errno(sched_getparam(arg1, &schp));
7226 if (!is_error(ret)) {
7227 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7228 goto efault;
7229 target_schp->sched_priority = tswap32(schp.sched_priority);
7230 unlock_user_struct(target_schp, arg2, 1);
7231 }
7232 }
7233 break;
7234 case TARGET_NR_sched_setscheduler:
7235 {
7236 struct sched_param *target_schp;
7237 struct sched_param schp;
7238 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7239 goto efault;
7240 schp.sched_priority = tswap32(target_schp->sched_priority);
7241 unlock_user_struct(target_schp, arg3, 0);
7242 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7243 }
7244 break;
7245 case TARGET_NR_sched_getscheduler:
7246 ret = get_errno(sched_getscheduler(arg1));
7247 break;
7248 case TARGET_NR_sched_yield:
7249 ret = get_errno(sched_yield());
7250 break;
7251 case TARGET_NR_sched_get_priority_max:
7252 ret = get_errno(sched_get_priority_max(arg1));
7253 break;
7254 case TARGET_NR_sched_get_priority_min:
7255 ret = get_errno(sched_get_priority_min(arg1));
7256 break;
7257 case TARGET_NR_sched_rr_get_interval:
7258 {
7259 struct timespec ts;
7260 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7261 if (!is_error(ret)) {
7262 host_to_target_timespec(arg2, &ts);
7263 }
7264 }
7265 break;
7266 case TARGET_NR_nanosleep:
7267 {
7268 struct timespec req, rem;
7269 target_to_host_timespec(&req, arg1);
7270 ret = get_errno(nanosleep(&req, &rem));
7271 if (is_error(ret) && arg2) {
7272 host_to_target_timespec(arg2, &rem);
7273 }
7274 }
7275 break;
7276 #ifdef TARGET_NR_query_module
7277 case TARGET_NR_query_module:
7278 goto unimplemented;
7279 #endif
7280 #ifdef TARGET_NR_nfsservctl
7281 case TARGET_NR_nfsservctl:
7282 goto unimplemented;
7283 #endif
7284 case TARGET_NR_prctl:
7285 switch (arg1) {
7286 case PR_GET_PDEATHSIG:
7287 {
7288 int deathsig;
7289 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7290 if (!is_error(ret) && arg2
7291 && put_user_ual(deathsig, arg2)) {
7292 goto efault;
7293 }
7294 break;
7295 }
7296 #ifdef PR_GET_NAME
7297 case PR_GET_NAME:
7298 {
7299 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7300 if (!name) {
7301 goto efault;
7302 }
7303 ret = get_errno(prctl(arg1, (unsigned long)name,
7304 arg3, arg4, arg5));
7305 unlock_user(name, arg2, 16);
7306 break;
7307 }
7308 case PR_SET_NAME:
7309 {
7310 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7311 if (!name) {
7312 goto efault;
7313 }
7314 ret = get_errno(prctl(arg1, (unsigned long)name,
7315 arg3, arg4, arg5));
7316 unlock_user(name, arg2, 0);
7317 break;
7318 }
7319 #endif
7320 default:
7321 /* Most prctl options have no pointer arguments */
7322 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7323 break;
7324 }
7325 break;
7326 #ifdef TARGET_NR_arch_prctl
7327 case TARGET_NR_arch_prctl:
7328 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7329 ret = do_arch_prctl(cpu_env, arg1, arg2);
7330 break;
7331 #else
7332 goto unimplemented;
7333 #endif
7334 #endif
7335 #ifdef TARGET_NR_pread
7336 case TARGET_NR_pread:
7337 if (regpairs_aligned(cpu_env))
7338 arg4 = arg5;
7339 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7340 goto efault;
7341 ret = get_errno(pread(arg1, p, arg3, arg4));
7342 unlock_user(p, arg2, ret);
7343 break;
7344 case TARGET_NR_pwrite:
7345 if (regpairs_aligned(cpu_env))
7346 arg4 = arg5;
7347 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7348 goto efault;
7349 ret = get_errno(pwrite(arg1, p, arg3, arg4));
7350 unlock_user(p, arg2, 0);
7351 break;
7352 #endif
7353 #ifdef TARGET_NR_pread64
7354 case TARGET_NR_pread64:
7355 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7356 goto efault;
7357 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7358 unlock_user(p, arg2, ret);
7359 break;
7360 case TARGET_NR_pwrite64:
7361 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7362 goto efault;
7363 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7364 unlock_user(p, arg2, 0);
7365 break;
7366 #endif
7367 case TARGET_NR_getcwd:
7368 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7369 goto efault;
7370 ret = get_errno(sys_getcwd1(p, arg2));
7371 unlock_user(p, arg1, ret);
7372 break;
7373 case TARGET_NR_capget:
7374 goto unimplemented;
7375 case TARGET_NR_capset:
7376 goto unimplemented;
7377 case TARGET_NR_sigaltstack:
7378 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7379 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7380 defined(TARGET_M68K) || defined(TARGET_S390X)
7381 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7382 break;
7383 #else
7384 goto unimplemented;
7385 #endif
7386 case TARGET_NR_sendfile:
7387 goto unimplemented;
7388 #ifdef TARGET_NR_getpmsg
7389 case TARGET_NR_getpmsg:
7390 goto unimplemented;
7391 #endif
7392 #ifdef TARGET_NR_putpmsg
7393 case TARGET_NR_putpmsg:
7394 goto unimplemented;
7395 #endif
7396 #ifdef TARGET_NR_vfork
7397 case TARGET_NR_vfork:
7398 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7399 0, 0, 0, 0));
7400 break;
7401 #endif
7402 #ifdef TARGET_NR_ugetrlimit
7403 case TARGET_NR_ugetrlimit:
7404 {
7405 struct rlimit rlim;
7406 int resource = target_to_host_resource(arg1);
7407 ret = get_errno(getrlimit(resource, &rlim));
7408 if (!is_error(ret)) {
7409 struct target_rlimit *target_rlim;
7410 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7411 goto efault;
7412 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7413 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7414 unlock_user_struct(target_rlim, arg2, 1);
7415 }
7416 break;
7417 }
7418 #endif
7419 #ifdef TARGET_NR_truncate64
7420 case TARGET_NR_truncate64:
7421 if (!(p = lock_user_string(arg1)))
7422 goto efault;
7423 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7424 unlock_user(p, arg1, 0);
7425 break;
7426 #endif
7427 #ifdef TARGET_NR_ftruncate64
7428 case TARGET_NR_ftruncate64:
7429 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7430 break;
7431 #endif
7432 #ifdef TARGET_NR_stat64
7433 case TARGET_NR_stat64:
7434 if (!(p = lock_user_string(arg1)))
7435 goto efault;
7436 ret = get_errno(stat(path(p), &st));
7437 unlock_user(p, arg1, 0);
7438 if (!is_error(ret))
7439 ret = host_to_target_stat64(cpu_env, arg2, &st);
7440 break;
7441 #endif
7442 #ifdef TARGET_NR_lstat64
7443 case TARGET_NR_lstat64:
7444 if (!(p = lock_user_string(arg1)))
7445 goto efault;
7446 ret = get_errno(lstat(path(p), &st));
7447 unlock_user(p, arg1, 0);
7448 if (!is_error(ret))
7449 ret = host_to_target_stat64(cpu_env, arg2, &st);
7450 break;
7451 #endif
7452 #ifdef TARGET_NR_fstat64
7453 case TARGET_NR_fstat64:
7454 ret = get_errno(fstat(arg1, &st));
7455 if (!is_error(ret))
7456 ret = host_to_target_stat64(cpu_env, arg2, &st);
7457 break;
7458 #endif
7459 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7460 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7461 #ifdef TARGET_NR_fstatat64
7462 case TARGET_NR_fstatat64:
7463 #endif
7464 #ifdef TARGET_NR_newfstatat
7465 case TARGET_NR_newfstatat:
7466 #endif
7467 if (!(p = lock_user_string(arg2)))
7468 goto efault;
7469 #ifdef __NR_fstatat64
7470 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7471 #else
7472 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7473 #endif
7474 if (!is_error(ret))
7475 ret = host_to_target_stat64(cpu_env, arg3, &st);
7476 break;
7477 #endif
7478 case TARGET_NR_lchown:
7479 if (!(p = lock_user_string(arg1)))
7480 goto efault;
7481 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7482 unlock_user(p, arg1, 0);
7483 break;
7484 #ifdef TARGET_NR_getuid
7485 case TARGET_NR_getuid:
7486 ret = get_errno(high2lowuid(getuid()));
7487 break;
7488 #endif
7489 #ifdef TARGET_NR_getgid
7490 case TARGET_NR_getgid:
7491 ret = get_errno(high2lowgid(getgid()));
7492 break;
7493 #endif
7494 #ifdef TARGET_NR_geteuid
7495 case TARGET_NR_geteuid:
7496 ret = get_errno(high2lowuid(geteuid()));
7497 break;
7498 #endif
7499 #ifdef TARGET_NR_getegid
7500 case TARGET_NR_getegid:
7501 ret = get_errno(high2lowgid(getegid()));
7502 break;
7503 #endif
7504 case TARGET_NR_setreuid:
7505 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7506 break;
7507 case TARGET_NR_setregid:
7508 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7509 break;
7510 case TARGET_NR_getgroups:
7511 {
7512 int gidsetsize = arg1;
7513 target_id *target_grouplist;
7514 gid_t *grouplist;
7515 int i;
7516
7517 grouplist = alloca(gidsetsize * sizeof(gid_t));
7518 ret = get_errno(getgroups(gidsetsize, grouplist));
7519 if (gidsetsize == 0)
7520 break;
7521 if (!is_error(ret)) {
7522 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7523 if (!target_grouplist)
7524 goto efault;
7525 for(i = 0;i < ret; i++)
7526 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7527 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7528 }
7529 }
7530 break;
7531 case TARGET_NR_setgroups:
7532 {
7533 int gidsetsize = arg1;
7534 target_id *target_grouplist;
7535 gid_t *grouplist;
7536 int i;
7537
7538 grouplist = alloca(gidsetsize * sizeof(gid_t));
7539 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7540 if (!target_grouplist) {
7541 ret = -TARGET_EFAULT;
7542 goto fail;
7543 }
7544 for(i = 0;i < gidsetsize; i++)
7545 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7546 unlock_user(target_grouplist, arg2, 0);
7547 ret = get_errno(setgroups(gidsetsize, grouplist));
7548 }
7549 break;
7550 case TARGET_NR_fchown:
7551 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7552 break;
7553 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7554 case TARGET_NR_fchownat:
7555 if (!(p = lock_user_string(arg2)))
7556 goto efault;
7557 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7558 unlock_user(p, arg2, 0);
7559 break;
7560 #endif
7561 #ifdef TARGET_NR_setresuid
7562 case TARGET_NR_setresuid:
7563 ret = get_errno(setresuid(low2highuid(arg1),
7564 low2highuid(arg2),
7565 low2highuid(arg3)));
7566 break;
7567 #endif
7568 #ifdef TARGET_NR_getresuid
7569 case TARGET_NR_getresuid:
7570 {
7571 uid_t ruid, euid, suid;
7572 ret = get_errno(getresuid(&ruid, &euid, &suid));
7573 if (!is_error(ret)) {
7574 if (put_user_u16(high2lowuid(ruid), arg1)
7575 || put_user_u16(high2lowuid(euid), arg2)
7576 || put_user_u16(high2lowuid(suid), arg3))
7577 goto efault;
7578 }
7579 }
7580 break;
7581 #endif
7582 #ifdef TARGET_NR_getresgid
7583 case TARGET_NR_setresgid:
7584 ret = get_errno(setresgid(low2highgid(arg1),
7585 low2highgid(arg2),
7586 low2highgid(arg3)));
7587 break;
7588 #endif
7589 #ifdef TARGET_NR_getresgid
7590 case TARGET_NR_getresgid:
7591 {
7592 gid_t rgid, egid, sgid;
7593 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7594 if (!is_error(ret)) {
7595 if (put_user_u16(high2lowgid(rgid), arg1)
7596 || put_user_u16(high2lowgid(egid), arg2)
7597 || put_user_u16(high2lowgid(sgid), arg3))
7598 goto efault;
7599 }
7600 }
7601 break;
7602 #endif
7603 case TARGET_NR_chown:
7604 if (!(p = lock_user_string(arg1)))
7605 goto efault;
7606 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7607 unlock_user(p, arg1, 0);
7608 break;
7609 case TARGET_NR_setuid:
7610 ret = get_errno(setuid(low2highuid(arg1)));
7611 break;
7612 case TARGET_NR_setgid:
7613 ret = get_errno(setgid(low2highgid(arg1)));
7614 break;
7615 case TARGET_NR_setfsuid:
7616 ret = get_errno(setfsuid(arg1));
7617 break;
7618 case TARGET_NR_setfsgid:
7619 ret = get_errno(setfsgid(arg1));
7620 break;
7621
7622 #ifdef TARGET_NR_lchown32
7623 case TARGET_NR_lchown32:
7624 if (!(p = lock_user_string(arg1)))
7625 goto efault;
7626 ret = get_errno(lchown(p, arg2, arg3));
7627 unlock_user(p, arg1, 0);
7628 break;
7629 #endif
7630 #ifdef TARGET_NR_getuid32
7631 case TARGET_NR_getuid32:
7632 ret = get_errno(getuid());
7633 break;
7634 #endif
7635
7636 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7637 /* Alpha specific */
7638 case TARGET_NR_getxuid:
7639 {
7640 uid_t euid;
7641 euid=geteuid();
7642 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7643 }
7644 ret = get_errno(getuid());
7645 break;
7646 #endif
7647 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7648 /* Alpha specific */
7649 case TARGET_NR_getxgid:
7650 {
7651 uid_t egid;
7652 egid=getegid();
7653 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7654 }
7655 ret = get_errno(getgid());
7656 break;
7657 #endif
7658 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7659 /* Alpha specific */
7660 case TARGET_NR_osf_getsysinfo:
7661 ret = -TARGET_EOPNOTSUPP;
7662 switch (arg1) {
7663 case TARGET_GSI_IEEE_FP_CONTROL:
7664 {
7665 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7666
7667 /* Copied from linux ieee_fpcr_to_swcr. */
7668 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7669 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7670 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7671 | SWCR_TRAP_ENABLE_DZE
7672 | SWCR_TRAP_ENABLE_OVF);
7673 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7674 | SWCR_TRAP_ENABLE_INE);
7675 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7676 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7677
7678 if (put_user_u64 (swcr, arg2))
7679 goto efault;
7680 ret = 0;
7681 }
7682 break;
7683
7684 /* case GSI_IEEE_STATE_AT_SIGNAL:
7685 -- Not implemented in linux kernel.
7686 case GSI_UACPROC:
7687 -- Retrieves current unaligned access state; not much used.
7688 case GSI_PROC_TYPE:
7689 -- Retrieves implver information; surely not used.
7690 case GSI_GET_HWRPB:
7691 -- Grabs a copy of the HWRPB; surely not used.
7692 */
7693 }
7694 break;
7695 #endif
7696 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7697 /* Alpha specific */
7698 case TARGET_NR_osf_setsysinfo:
7699 ret = -TARGET_EOPNOTSUPP;
7700 switch (arg1) {
7701 case TARGET_SSI_IEEE_FP_CONTROL:
7702 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7703 {
7704 uint64_t swcr, fpcr, orig_fpcr;
7705
7706 if (get_user_u64 (swcr, arg2))
7707 goto efault;
7708 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
7709 fpcr = orig_fpcr & FPCR_DYN_MASK;
7710
7711 /* Copied from linux ieee_swcr_to_fpcr. */
7712 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7713 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7714 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7715 | SWCR_TRAP_ENABLE_DZE
7716 | SWCR_TRAP_ENABLE_OVF)) << 48;
7717 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7718 | SWCR_TRAP_ENABLE_INE)) << 57;
7719 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7720 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7721
7722 cpu_alpha_store_fpcr (cpu_env, fpcr);
7723 ret = 0;
7724
7725 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
7726 /* Old exceptions are not signaled. */
7727 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7728
7729 /* If any exceptions set by this call, and are unmasked,
7730 send a signal. */
7731 /* ??? FIXME */
7732 }
7733 }
7734 break;
7735
7736 /* case SSI_NVPAIRS:
7737 -- Used with SSIN_UACPROC to enable unaligned accesses.
7738 case SSI_IEEE_STATE_AT_SIGNAL:
7739 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7740 -- Not implemented in linux kernel
7741 */
7742 }
7743 break;
7744 #endif
7745 #ifdef TARGET_NR_osf_sigprocmask
7746 /* Alpha specific. */
7747 case TARGET_NR_osf_sigprocmask:
7748 {
7749 abi_ulong mask;
7750 int how;
7751 sigset_t set, oldset;
7752
7753 switch(arg1) {
7754 case TARGET_SIG_BLOCK:
7755 how = SIG_BLOCK;
7756 break;
7757 case TARGET_SIG_UNBLOCK:
7758 how = SIG_UNBLOCK;
7759 break;
7760 case TARGET_SIG_SETMASK:
7761 how = SIG_SETMASK;
7762 break;
7763 default:
7764 ret = -TARGET_EINVAL;
7765 goto fail;
7766 }
7767 mask = arg2;
7768 target_to_host_old_sigset(&set, &mask);
7769 sigprocmask(how, &set, &oldset);
7770 host_to_target_old_sigset(&mask, &oldset);
7771 ret = mask;
7772 }
7773 break;
7774 #endif
7775
7776 #ifdef TARGET_NR_getgid32
7777 case TARGET_NR_getgid32:
7778 ret = get_errno(getgid());
7779 break;
7780 #endif
7781 #ifdef TARGET_NR_geteuid32
7782 case TARGET_NR_geteuid32:
7783 ret = get_errno(geteuid());
7784 break;
7785 #endif
7786 #ifdef TARGET_NR_getegid32
7787 case TARGET_NR_getegid32:
7788 ret = get_errno(getegid());
7789 break;
7790 #endif
7791 #ifdef TARGET_NR_setreuid32
7792 case TARGET_NR_setreuid32:
7793 ret = get_errno(setreuid(arg1, arg2));
7794 break;
7795 #endif
7796 #ifdef TARGET_NR_setregid32
7797 case TARGET_NR_setregid32:
7798 ret = get_errno(setregid(arg1, arg2));
7799 break;
7800 #endif
7801 #ifdef TARGET_NR_getgroups32
7802 case TARGET_NR_getgroups32:
7803 {
7804 int gidsetsize = arg1;
7805 uint32_t *target_grouplist;
7806 gid_t *grouplist;
7807 int i;
7808
7809 grouplist = alloca(gidsetsize * sizeof(gid_t));
7810 ret = get_errno(getgroups(gidsetsize, grouplist));
7811 if (gidsetsize == 0)
7812 break;
7813 if (!is_error(ret)) {
7814 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7815 if (!target_grouplist) {
7816 ret = -TARGET_EFAULT;
7817 goto fail;
7818 }
7819 for(i = 0;i < ret; i++)
7820 target_grouplist[i] = tswap32(grouplist[i]);
7821 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7822 }
7823 }
7824 break;
7825 #endif
7826 #ifdef TARGET_NR_setgroups32
7827 case TARGET_NR_setgroups32:
7828 {
7829 int gidsetsize = arg1;
7830 uint32_t *target_grouplist;
7831 gid_t *grouplist;
7832 int i;
7833
7834 grouplist = alloca(gidsetsize * sizeof(gid_t));
7835 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7836 if (!target_grouplist) {
7837 ret = -TARGET_EFAULT;
7838 goto fail;
7839 }
7840 for(i = 0;i < gidsetsize; i++)
7841 grouplist[i] = tswap32(target_grouplist[i]);
7842 unlock_user(target_grouplist, arg2, 0);
7843 ret = get_errno(setgroups(gidsetsize, grouplist));
7844 }
7845 break;
7846 #endif
7847 #ifdef TARGET_NR_fchown32
7848 case TARGET_NR_fchown32:
7849 ret = get_errno(fchown(arg1, arg2, arg3));
7850 break;
7851 #endif
7852 #ifdef TARGET_NR_setresuid32
7853 case TARGET_NR_setresuid32:
7854 ret = get_errno(setresuid(arg1, arg2, arg3));
7855 break;
7856 #endif
7857 #ifdef TARGET_NR_getresuid32
7858 case TARGET_NR_getresuid32:
7859 {
7860 uid_t ruid, euid, suid;
7861 ret = get_errno(getresuid(&ruid, &euid, &suid));
7862 if (!is_error(ret)) {
7863 if (put_user_u32(ruid, arg1)
7864 || put_user_u32(euid, arg2)
7865 || put_user_u32(suid, arg3))
7866 goto efault;
7867 }
7868 }
7869 break;
7870 #endif
7871 #ifdef TARGET_NR_setresgid32
7872 case TARGET_NR_setresgid32:
7873 ret = get_errno(setresgid(arg1, arg2, arg3));
7874 break;
7875 #endif
7876 #ifdef TARGET_NR_getresgid32
7877 case TARGET_NR_getresgid32:
7878 {
7879 gid_t rgid, egid, sgid;
7880 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7881 if (!is_error(ret)) {
7882 if (put_user_u32(rgid, arg1)
7883 || put_user_u32(egid, arg2)
7884 || put_user_u32(sgid, arg3))
7885 goto efault;
7886 }
7887 }
7888 break;
7889 #endif
7890 #ifdef TARGET_NR_chown32
7891 case TARGET_NR_chown32:
7892 if (!(p = lock_user_string(arg1)))
7893 goto efault;
7894 ret = get_errno(chown(p, arg2, arg3));
7895 unlock_user(p, arg1, 0);
7896 break;
7897 #endif
7898 #ifdef TARGET_NR_setuid32
7899 case TARGET_NR_setuid32:
7900 ret = get_errno(setuid(arg1));
7901 break;
7902 #endif
7903 #ifdef TARGET_NR_setgid32
7904 case TARGET_NR_setgid32:
7905 ret = get_errno(setgid(arg1));
7906 break;
7907 #endif
7908 #ifdef TARGET_NR_setfsuid32
7909 case TARGET_NR_setfsuid32:
7910 ret = get_errno(setfsuid(arg1));
7911 break;
7912 #endif
7913 #ifdef TARGET_NR_setfsgid32
7914 case TARGET_NR_setfsgid32:
7915 ret = get_errno(setfsgid(arg1));
7916 break;
7917 #endif
7918
7919 case TARGET_NR_pivot_root:
7920 goto unimplemented;
7921 #ifdef TARGET_NR_mincore
7922 case TARGET_NR_mincore:
7923 {
7924 void *a;
7925 ret = -TARGET_EFAULT;
7926 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7927 goto efault;
7928 if (!(p = lock_user_string(arg3)))
7929 goto mincore_fail;
7930 ret = get_errno(mincore(a, arg2, p));
7931 unlock_user(p, arg3, ret);
7932 mincore_fail:
7933 unlock_user(a, arg1, 0);
7934 }
7935 break;
7936 #endif
7937 #ifdef TARGET_NR_arm_fadvise64_64
7938 case TARGET_NR_arm_fadvise64_64:
7939 {
7940 /*
7941 * arm_fadvise64_64 looks like fadvise64_64 but
7942 * with different argument order
7943 */
7944 abi_long temp;
7945 temp = arg3;
7946 arg3 = arg4;
7947 arg4 = temp;
7948 }
7949 #endif
7950 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7951 #ifdef TARGET_NR_fadvise64_64
7952 case TARGET_NR_fadvise64_64:
7953 #endif
7954 #ifdef TARGET_NR_fadvise64
7955 case TARGET_NR_fadvise64:
7956 #endif
7957 #ifdef TARGET_S390X
7958 switch (arg4) {
7959 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
7960 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
7961 case 6: arg4 = POSIX_FADV_DONTNEED; break;
7962 case 7: arg4 = POSIX_FADV_NOREUSE; break;
7963 default: break;
7964 }
7965 #endif
7966 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
7967 break;
7968 #endif
7969 #ifdef TARGET_NR_madvise
7970 case TARGET_NR_madvise:
7971 /* A straight passthrough may not be safe because qemu sometimes
7972 turns private flie-backed mappings into anonymous mappings.
7973 This will break MADV_DONTNEED.
7974 This is a hint, so ignoring and returning success is ok. */
7975 ret = get_errno(0);
7976 break;
7977 #endif
7978 #if TARGET_ABI_BITS == 32
7979 case TARGET_NR_fcntl64:
7980 {
7981 int cmd;
7982 struct flock64 fl;
7983 struct target_flock64 *target_fl;
7984 #ifdef TARGET_ARM
7985 struct target_eabi_flock64 *target_efl;
7986 #endif
7987
7988 cmd = target_to_host_fcntl_cmd(arg2);
7989 if (cmd == -TARGET_EINVAL) {
7990 ret = cmd;
7991 break;
7992 }
7993
7994 switch(arg2) {
7995 case TARGET_F_GETLK64:
7996 #ifdef TARGET_ARM
7997 if (((CPUARMState *)cpu_env)->eabi) {
7998 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7999 goto efault;
8000 fl.l_type = tswap16(target_efl->l_type);
8001 fl.l_whence = tswap16(target_efl->l_whence);
8002 fl.l_start = tswap64(target_efl->l_start);
8003 fl.l_len = tswap64(target_efl->l_len);
8004 fl.l_pid = tswap32(target_efl->l_pid);
8005 unlock_user_struct(target_efl, arg3, 0);
8006 } else
8007 #endif
8008 {
8009 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8010 goto efault;
8011 fl.l_type = tswap16(target_fl->l_type);
8012 fl.l_whence = tswap16(target_fl->l_whence);
8013 fl.l_start = tswap64(target_fl->l_start);
8014 fl.l_len = tswap64(target_fl->l_len);
8015 fl.l_pid = tswap32(target_fl->l_pid);
8016 unlock_user_struct(target_fl, arg3, 0);
8017 }
8018 ret = get_errno(fcntl(arg1, cmd, &fl));
8019 if (ret == 0) {
8020 #ifdef TARGET_ARM
8021 if (((CPUARMState *)cpu_env)->eabi) {
8022 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8023 goto efault;
8024 target_efl->l_type = tswap16(fl.l_type);
8025 target_efl->l_whence = tswap16(fl.l_whence);
8026 target_efl->l_start = tswap64(fl.l_start);
8027 target_efl->l_len = tswap64(fl.l_len);
8028 target_efl->l_pid = tswap32(fl.l_pid);
8029 unlock_user_struct(target_efl, arg3, 1);
8030 } else
8031 #endif
8032 {
8033 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8034 goto efault;
8035 target_fl->l_type = tswap16(fl.l_type);
8036 target_fl->l_whence = tswap16(fl.l_whence);
8037 target_fl->l_start = tswap64(fl.l_start);
8038 target_fl->l_len = tswap64(fl.l_len);
8039 target_fl->l_pid = tswap32(fl.l_pid);
8040 unlock_user_struct(target_fl, arg3, 1);
8041 }
8042 }
8043 break;
8044
8045 case TARGET_F_SETLK64:
8046 case TARGET_F_SETLKW64:
8047 #ifdef TARGET_ARM
8048 if (((CPUARMState *)cpu_env)->eabi) {
8049 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8050 goto efault;
8051 fl.l_type = tswap16(target_efl->l_type);
8052 fl.l_whence = tswap16(target_efl->l_whence);
8053 fl.l_start = tswap64(target_efl->l_start);
8054 fl.l_len = tswap64(target_efl->l_len);
8055 fl.l_pid = tswap32(target_efl->l_pid);
8056 unlock_user_struct(target_efl, arg3, 0);
8057 } else
8058 #endif
8059 {
8060 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8061 goto efault;
8062 fl.l_type = tswap16(target_fl->l_type);
8063 fl.l_whence = tswap16(target_fl->l_whence);
8064 fl.l_start = tswap64(target_fl->l_start);
8065 fl.l_len = tswap64(target_fl->l_len);
8066 fl.l_pid = tswap32(target_fl->l_pid);
8067 unlock_user_struct(target_fl, arg3, 0);
8068 }
8069 ret = get_errno(fcntl(arg1, cmd, &fl));
8070 break;
8071 default:
8072 ret = do_fcntl(arg1, arg2, arg3);
8073 break;
8074 }
8075 break;
8076 }
8077 #endif
8078 #ifdef TARGET_NR_cacheflush
8079 case TARGET_NR_cacheflush:
8080 /* self-modifying code is handled automatically, so nothing needed */
8081 ret = 0;
8082 break;
8083 #endif
8084 #ifdef TARGET_NR_security
8085 case TARGET_NR_security:
8086 goto unimplemented;
8087 #endif
8088 #ifdef TARGET_NR_getpagesize
8089 case TARGET_NR_getpagesize:
8090 ret = TARGET_PAGE_SIZE;
8091 break;
8092 #endif
8093 case TARGET_NR_gettid:
8094 ret = get_errno(gettid());
8095 break;
8096 #ifdef TARGET_NR_readahead
8097 case TARGET_NR_readahead:
8098 #if TARGET_ABI_BITS == 32
8099 if (regpairs_aligned(cpu_env)) {
8100 arg2 = arg3;
8101 arg3 = arg4;
8102 arg4 = arg5;
8103 }
8104 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8105 #else
8106 ret = get_errno(readahead(arg1, arg2, arg3));
8107 #endif
8108 break;
8109 #endif
8110 #ifdef CONFIG_ATTR
8111 #ifdef TARGET_NR_setxattr
8112 case TARGET_NR_listxattr:
8113 case TARGET_NR_llistxattr:
8114 {
8115 void *p, *b = 0;
8116 if (arg2) {
8117 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8118 if (!b) {
8119 ret = -TARGET_EFAULT;
8120 break;
8121 }
8122 }
8123 p = lock_user_string(arg1);
8124 if (p) {
8125 if (num == TARGET_NR_listxattr) {
8126 ret = get_errno(listxattr(p, b, arg3));
8127 } else {
8128 ret = get_errno(llistxattr(p, b, arg3));
8129 }
8130 } else {
8131 ret = -TARGET_EFAULT;
8132 }
8133 unlock_user(p, arg1, 0);
8134 unlock_user(b, arg2, arg3);
8135 break;
8136 }
8137 case TARGET_NR_flistxattr:
8138 {
8139 void *b = 0;
8140 if (arg2) {
8141 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8142 if (!b) {
8143 ret = -TARGET_EFAULT;
8144 break;
8145 }
8146 }
8147 ret = get_errno(flistxattr(arg1, b, arg3));
8148 unlock_user(b, arg2, arg3);
8149 break;
8150 }
8151 case TARGET_NR_setxattr:
8152 case TARGET_NR_lsetxattr:
8153 {
8154 void *p, *n, *v = 0;
8155 if (arg3) {
8156 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8157 if (!v) {
8158 ret = -TARGET_EFAULT;
8159 break;
8160 }
8161 }
8162 p = lock_user_string(arg1);
8163 n = lock_user_string(arg2);
8164 if (p && n) {
8165 if (num == TARGET_NR_setxattr) {
8166 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8167 } else {
8168 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8169 }
8170 } else {
8171 ret = -TARGET_EFAULT;
8172 }
8173 unlock_user(p, arg1, 0);
8174 unlock_user(n, arg2, 0);
8175 unlock_user(v, arg3, 0);
8176 }
8177 break;
8178 case TARGET_NR_fsetxattr:
8179 {
8180 void *n, *v = 0;
8181 if (arg3) {
8182 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8183 if (!v) {
8184 ret = -TARGET_EFAULT;
8185 break;
8186 }
8187 }
8188 n = lock_user_string(arg2);
8189 if (n) {
8190 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8191 } else {
8192 ret = -TARGET_EFAULT;
8193 }
8194 unlock_user(n, arg2, 0);
8195 unlock_user(v, arg3, 0);
8196 }
8197 break;
8198 case TARGET_NR_getxattr:
8199 case TARGET_NR_lgetxattr:
8200 {
8201 void *p, *n, *v = 0;
8202 if (arg3) {
8203 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8204 if (!v) {
8205 ret = -TARGET_EFAULT;
8206 break;
8207 }
8208 }
8209 p = lock_user_string(arg1);
8210 n = lock_user_string(arg2);
8211 if (p && n) {
8212 if (num == TARGET_NR_getxattr) {
8213 ret = get_errno(getxattr(p, n, v, arg4));
8214 } else {
8215 ret = get_errno(lgetxattr(p, n, v, arg4));
8216 }
8217 } else {
8218 ret = -TARGET_EFAULT;
8219 }
8220 unlock_user(p, arg1, 0);
8221 unlock_user(n, arg2, 0);
8222 unlock_user(v, arg3, arg4);
8223 }
8224 break;
8225 case TARGET_NR_fgetxattr:
8226 {
8227 void *n, *v = 0;
8228 if (arg3) {
8229 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8230 if (!v) {
8231 ret = -TARGET_EFAULT;
8232 break;
8233 }
8234 }
8235 n = lock_user_string(arg2);
8236 if (n) {
8237 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8238 } else {
8239 ret = -TARGET_EFAULT;
8240 }
8241 unlock_user(n, arg2, 0);
8242 unlock_user(v, arg3, arg4);
8243 }
8244 break;
8245 case TARGET_NR_removexattr:
8246 case TARGET_NR_lremovexattr:
8247 {
8248 void *p, *n;
8249 p = lock_user_string(arg1);
8250 n = lock_user_string(arg2);
8251 if (p && n) {
8252 if (num == TARGET_NR_removexattr) {
8253 ret = get_errno(removexattr(p, n));
8254 } else {
8255 ret = get_errno(lremovexattr(p, n));
8256 }
8257 } else {
8258 ret = -TARGET_EFAULT;
8259 }
8260 unlock_user(p, arg1, 0);
8261 unlock_user(n, arg2, 0);
8262 }
8263 break;
8264 case TARGET_NR_fremovexattr:
8265 {
8266 void *n;
8267 n = lock_user_string(arg2);
8268 if (n) {
8269 ret = get_errno(fremovexattr(arg1, n));
8270 } else {
8271 ret = -TARGET_EFAULT;
8272 }
8273 unlock_user(n, arg2, 0);
8274 }
8275 break;
8276 #endif
8277 #endif /* CONFIG_ATTR */
8278 #ifdef TARGET_NR_set_thread_area
8279 case TARGET_NR_set_thread_area:
8280 #if defined(TARGET_MIPS)
8281 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8282 ret = 0;
8283 break;
8284 #elif defined(TARGET_CRIS)
8285 if (arg1 & 0xff)
8286 ret = -TARGET_EINVAL;
8287 else {
8288 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8289 ret = 0;
8290 }
8291 break;
8292 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8293 ret = do_set_thread_area(cpu_env, arg1);
8294 break;
8295 #else
8296 goto unimplemented_nowarn;
8297 #endif
8298 #endif
8299 #ifdef TARGET_NR_get_thread_area
8300 case TARGET_NR_get_thread_area:
8301 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8302 ret = do_get_thread_area(cpu_env, arg1);
8303 #else
8304 goto unimplemented_nowarn;
8305 #endif
8306 #endif
8307 #ifdef TARGET_NR_getdomainname
8308 case TARGET_NR_getdomainname:
8309 goto unimplemented_nowarn;
8310 #endif
8311
8312 #ifdef TARGET_NR_clock_gettime
8313 case TARGET_NR_clock_gettime:
8314 {
8315 struct timespec ts;
8316 ret = get_errno(clock_gettime(arg1, &ts));
8317 if (!is_error(ret)) {
8318 host_to_target_timespec(arg2, &ts);
8319 }
8320 break;
8321 }
8322 #endif
8323 #ifdef TARGET_NR_clock_getres
8324 case TARGET_NR_clock_getres:
8325 {
8326 struct timespec ts;
8327 ret = get_errno(clock_getres(arg1, &ts));
8328 if (!is_error(ret)) {
8329 host_to_target_timespec(arg2, &ts);
8330 }
8331 break;
8332 }
8333 #endif
8334 #ifdef TARGET_NR_clock_nanosleep
8335 case TARGET_NR_clock_nanosleep:
8336 {
8337 struct timespec ts;
8338 target_to_host_timespec(&ts, arg3);
8339 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8340 if (arg4)
8341 host_to_target_timespec(arg4, &ts);
8342 break;
8343 }
8344 #endif
8345
8346 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8347 case TARGET_NR_set_tid_address:
8348 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8349 break;
8350 #endif
8351
8352 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8353 case TARGET_NR_tkill:
8354 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8355 break;
8356 #endif
8357
8358 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8359 case TARGET_NR_tgkill:
8360 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8361 target_to_host_signal(arg3)));
8362 break;
8363 #endif
8364
8365 #ifdef TARGET_NR_set_robust_list
8366 case TARGET_NR_set_robust_list:
8367 goto unimplemented_nowarn;
8368 #endif
8369
8370 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8371 case TARGET_NR_utimensat:
8372 {
8373 struct timespec *tsp, ts[2];
8374 if (!arg3) {
8375 tsp = NULL;
8376 } else {
8377 target_to_host_timespec(ts, arg3);
8378 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8379 tsp = ts;
8380 }
8381 if (!arg2)
8382 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8383 else {
8384 if (!(p = lock_user_string(arg2))) {
8385 ret = -TARGET_EFAULT;
8386 goto fail;
8387 }
8388 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8389 unlock_user(p, arg2, 0);
8390 }
8391 }
8392 break;
8393 #endif
8394 #if defined(CONFIG_USE_NPTL)
8395 case TARGET_NR_futex:
8396 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8397 break;
8398 #endif
8399 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8400 case TARGET_NR_inotify_init:
8401 ret = get_errno(sys_inotify_init());
8402 break;
8403 #endif
8404 #ifdef CONFIG_INOTIFY1
8405 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8406 case TARGET_NR_inotify_init1:
8407 ret = get_errno(sys_inotify_init1(arg1));
8408 break;
8409 #endif
8410 #endif
8411 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8412 case TARGET_NR_inotify_add_watch:
8413 p = lock_user_string(arg2);
8414 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8415 unlock_user(p, arg2, 0);
8416 break;
8417 #endif
8418 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8419 case TARGET_NR_inotify_rm_watch:
8420 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8421 break;
8422 #endif
8423
8424 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8425 case TARGET_NR_mq_open:
8426 {
8427 struct mq_attr posix_mq_attr;
8428
8429 p = lock_user_string(arg1 - 1);
8430 if (arg4 != 0)
8431 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8432 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8433 unlock_user (p, arg1, 0);
8434 }
8435 break;
8436
8437 case TARGET_NR_mq_unlink:
8438 p = lock_user_string(arg1 - 1);
8439 ret = get_errno(mq_unlink(p));
8440 unlock_user (p, arg1, 0);
8441 break;
8442
8443 case TARGET_NR_mq_timedsend:
8444 {
8445 struct timespec ts;
8446
8447 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8448 if (arg5 != 0) {
8449 target_to_host_timespec(&ts, arg5);
8450 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8451 host_to_target_timespec(arg5, &ts);
8452 }
8453 else
8454 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8455 unlock_user (p, arg2, arg3);
8456 }
8457 break;
8458
8459 case TARGET_NR_mq_timedreceive:
8460 {
8461 struct timespec ts;
8462 unsigned int prio;
8463
8464 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8465 if (arg5 != 0) {
8466 target_to_host_timespec(&ts, arg5);
8467 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8468 host_to_target_timespec(arg5, &ts);
8469 }
8470 else
8471 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8472 unlock_user (p, arg2, arg3);
8473 if (arg4 != 0)
8474 put_user_u32(prio, arg4);
8475 }
8476 break;
8477
8478 /* Not implemented for now... */
8479 /* case TARGET_NR_mq_notify: */
8480 /* break; */
8481
8482 case TARGET_NR_mq_getsetattr:
8483 {
8484 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8485 ret = 0;
8486 if (arg3 != 0) {
8487 ret = mq_getattr(arg1, &posix_mq_attr_out);
8488 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8489 }
8490 if (arg2 != 0) {
8491 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8492 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8493 }
8494
8495 }
8496 break;
8497 #endif
8498
8499 #ifdef CONFIG_SPLICE
8500 #ifdef TARGET_NR_tee
8501 case TARGET_NR_tee:
8502 {
8503 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8504 }
8505 break;
8506 #endif
8507 #ifdef TARGET_NR_splice
8508 case TARGET_NR_splice:
8509 {
8510 loff_t loff_in, loff_out;
8511 loff_t *ploff_in = NULL, *ploff_out = NULL;
8512 if(arg2) {
8513 get_user_u64(loff_in, arg2);
8514 ploff_in = &loff_in;
8515 }
8516 if(arg4) {
8517 get_user_u64(loff_out, arg2);
8518 ploff_out = &loff_out;
8519 }
8520 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8521 }
8522 break;
8523 #endif
8524 #ifdef TARGET_NR_vmsplice
8525 case TARGET_NR_vmsplice:
8526 {
8527 int count = arg3;
8528 struct iovec *vec;
8529
8530 vec = alloca(count * sizeof(struct iovec));
8531 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
8532 goto efault;
8533 ret = get_errno(vmsplice(arg1, vec, count, arg4));
8534 unlock_iovec(vec, arg2, count, 0);
8535 }
8536 break;
8537 #endif
8538 #endif /* CONFIG_SPLICE */
8539 #ifdef CONFIG_EVENTFD
8540 #if defined(TARGET_NR_eventfd)
8541 case TARGET_NR_eventfd:
8542 ret = get_errno(eventfd(arg1, 0));
8543 break;
8544 #endif
8545 #if defined(TARGET_NR_eventfd2)
8546 case TARGET_NR_eventfd2:
8547 ret = get_errno(eventfd(arg1, arg2));
8548 break;
8549 #endif
8550 #endif /* CONFIG_EVENTFD */
8551 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8552 case TARGET_NR_fallocate:
8553 #if TARGET_ABI_BITS == 32
8554 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8555 target_offset64(arg5, arg6)));
8556 #else
8557 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8558 #endif
8559 break;
8560 #endif
8561 #if defined(CONFIG_SYNC_FILE_RANGE)
8562 #if defined(TARGET_NR_sync_file_range)
8563 case TARGET_NR_sync_file_range:
8564 #if TARGET_ABI_BITS == 32
8565 #if defined(TARGET_MIPS)
8566 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8567 target_offset64(arg5, arg6), arg7));
8568 #else
8569 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8570 target_offset64(arg4, arg5), arg6));
8571 #endif /* !TARGET_MIPS */
8572 #else
8573 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8574 #endif
8575 break;
8576 #endif
8577 #if defined(TARGET_NR_sync_file_range2)
8578 case TARGET_NR_sync_file_range2:
8579 /* This is like sync_file_range but the arguments are reordered */
8580 #if TARGET_ABI_BITS == 32
8581 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8582 target_offset64(arg5, arg6), arg2));
8583 #else
8584 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8585 #endif
8586 break;
8587 #endif
8588 #endif
8589 #if defined(CONFIG_EPOLL)
8590 #if defined(TARGET_NR_epoll_create)
8591 case TARGET_NR_epoll_create:
8592 ret = get_errno(epoll_create(arg1));
8593 break;
8594 #endif
8595 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8596 case TARGET_NR_epoll_create1:
8597 ret = get_errno(epoll_create1(arg1));
8598 break;
8599 #endif
8600 #if defined(TARGET_NR_epoll_ctl)
8601 case TARGET_NR_epoll_ctl:
8602 {
8603 struct epoll_event ep;
8604 struct epoll_event *epp = 0;
8605 if (arg4) {
8606 struct target_epoll_event *target_ep;
8607 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8608 goto efault;
8609 }
8610 ep.events = tswap32(target_ep->events);
8611 /* The epoll_data_t union is just opaque data to the kernel,
8612 * so we transfer all 64 bits across and need not worry what
8613 * actual data type it is.
8614 */
8615 ep.data.u64 = tswap64(target_ep->data.u64);
8616 unlock_user_struct(target_ep, arg4, 0);
8617 epp = &ep;
8618 }
8619 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8620 break;
8621 }
8622 #endif
8623
8624 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8625 #define IMPLEMENT_EPOLL_PWAIT
8626 #endif
8627 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8628 #if defined(TARGET_NR_epoll_wait)
8629 case TARGET_NR_epoll_wait:
8630 #endif
8631 #if defined(IMPLEMENT_EPOLL_PWAIT)
8632 case TARGET_NR_epoll_pwait:
8633 #endif
8634 {
8635 struct target_epoll_event *target_ep;
8636 struct epoll_event *ep;
8637 int epfd = arg1;
8638 int maxevents = arg3;
8639 int timeout = arg4;
8640
8641 target_ep = lock_user(VERIFY_WRITE, arg2,
8642 maxevents * sizeof(struct target_epoll_event), 1);
8643 if (!target_ep) {
8644 goto efault;
8645 }
8646
8647 ep = alloca(maxevents * sizeof(struct epoll_event));
8648
8649 switch (num) {
8650 #if defined(IMPLEMENT_EPOLL_PWAIT)
8651 case TARGET_NR_epoll_pwait:
8652 {
8653 target_sigset_t *target_set;
8654 sigset_t _set, *set = &_set;
8655
8656 if (arg5) {
8657 target_set = lock_user(VERIFY_READ, arg5,
8658 sizeof(target_sigset_t), 1);
8659 if (!target_set) {
8660 unlock_user(target_ep, arg2, 0);
8661 goto efault;
8662 }
8663 target_to_host_sigset(set, target_set);
8664 unlock_user(target_set, arg5, 0);
8665 } else {
8666 set = NULL;
8667 }
8668
8669 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8670 break;
8671 }
8672 #endif
8673 #if defined(TARGET_NR_epoll_wait)
8674 case TARGET_NR_epoll_wait:
8675 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8676 break;
8677 #endif
8678 default:
8679 ret = -TARGET_ENOSYS;
8680 }
8681 if (!is_error(ret)) {
8682 int i;
8683 for (i = 0; i < ret; i++) {
8684 target_ep[i].events = tswap32(ep[i].events);
8685 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8686 }
8687 }
8688 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8689 break;
8690 }
8691 #endif
8692 #endif
8693 #ifdef TARGET_NR_prlimit64
8694 case TARGET_NR_prlimit64:
8695 {
8696 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8697 struct target_rlimit64 *target_rnew, *target_rold;
8698 struct host_rlimit64 rnew, rold, *rnewp = 0;
8699 if (arg3) {
8700 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8701 goto efault;
8702 }
8703 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8704 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8705 unlock_user_struct(target_rnew, arg3, 0);
8706 rnewp = &rnew;
8707 }
8708
8709 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8710 if (!is_error(ret) && arg4) {
8711 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8712 goto efault;
8713 }
8714 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8715 target_rold->rlim_max = tswap64(rold.rlim_max);
8716 unlock_user_struct(target_rold, arg4, 1);
8717 }
8718 break;
8719 }
8720 #endif
8721 default:
8722 unimplemented:
8723 gemu_log("qemu: Unsupported syscall: %d\n", num);
8724 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8725 unimplemented_nowarn:
8726 #endif
8727 ret = -TARGET_ENOSYS;
8728 break;
8729 }
8730 fail:
8731 #ifdef DEBUG
8732 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8733 #endif
8734 if(do_strace)
8735 print_syscall_ret(num, ret);
8736 return ret;
8737 efault:
8738 ret = -TARGET_EFAULT;
8739 goto fail;
8740 }